name
stringlengths 1
473k
| code
stringlengths 7
647k
| asm
stringlengths 4
3.39M
| file
stringlengths 8
196
|
---|---|---|---|
ncnn::im2col_sgemm_pack8to1_int8_sse_xop(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Option const&) | void im2col_sgemm_pack8to1_int8_sse_xop(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
im2col_sgemm_pack8to1_int8_sse(bottom_im2col, top_blob, kernel, opt);
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x88, %rsp
movq %rcx, %rax
movq %rdx, 0x10(%rsp)
movq %rdi, %r12
movslq 0x2c(%rdi), %r13
movl 0x30(%rdi), %ebx
movl 0x38(%rdi), %r15d
movq %rsi, 0x28(%rsp)
movslq 0x38(%rsi), %rcx
movq %rcx, 0x18(%rsp)
leaq 0x30(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl %r13d, %ecx
shrl %ecx
movl %r13d, %r10d
andl $0x1, %r10d
addl %ecx, %r10d
cmpq $0x2, %r13
setge %cl
vpxor %xmm0, %xmm0, %xmm0
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
cmovll %r13d, %r10d
movl %ebx, %esi
shll %cl, %esi
movq 0x10(%rax), %rax
movq %rax, (%rsp)
pushq $0x8
popq %r8
pushq $0x8
popq %r9
movl %r15d, %edx
movl %r10d, %ecx
callq 0x628f2
movl %r13d, %eax
sarl %eax
xorl %ecx, %ecx
testl %ebx, %ebx
movl %ebx, %edx
movl $0x0, %ebx
movl %edx, 0x24(%rsp)
cmovgl %edx, %ebx
testl %r15d, %r15d
movl $0x0, %edi
cmovgl %r15d, %edi
testl %eax, %eax
cmovlel %ecx, %eax
leaq (,%r13,8), %r8
xorl %edx, %edx
cmpq %rax, %rdx
je 0x138d13
movq 0x70(%rsp), %r9
imulq %rdx, %r9
imulq 0x40(%rsp), %r9
addq 0x30(%rsp), %r9
xorl %r10d, %r10d
cmpq %rdi, %r10
je 0x138d0a
movq 0x40(%r12), %r11
movq (%r12), %rsi
imulq 0x10(%r12), %r11
addq %rcx, %rsi
imulq %r10, %r11
addq %rsi, %r11
movl %ebx, %ebp
subl $0x1, %ebp
jb 0x138d05
vmovdqu (%r11), %xmm0
vmovdqu %xmm0, (%r9)
addq $0x10, %r9
addq %r8, %r11
jmp 0x138ced
incq %r10
jmp 0x138ccd
incq %rdx
addq $0x10, %rcx
jmp 0x138cb1
movq %r13, %rcx
andq $-0x2, %rcx
movq 0x30(%rsp), %r9
leaq (,%rcx,8), %r10
cmpq %r13, %rcx
jge 0x138d8c
movl %ecx, %eax
cltd
pushq $0x2
popq %rsi
idivl %esi
addl %eax, %edx
movslq %edx, %rax
imulq 0x70(%rsp), %rax
imulq 0x40(%rsp), %rax
addq %r9, %rax
movq (%r12), %rdx
addq %r10, %rdx
xorl %ebp, %ebp
cmpq %rdi, %rbp
je 0x138d83
movq 0x40(%r12), %r14
imulq 0x10(%r12), %r14
imulq %rbp, %r14
addq %rdx, %r14
movl %ebx, %esi
subl $0x1, %esi
jb 0x138d7e
movq (%r14), %r11
movq %r11, (%rax)
addq $0x8, %rax
addq %r8, %r14
jmp 0x138d6a
incq %rbp
jmp 0x138d51
incq %rcx
addq $0x8, %r10
jmp 0x138d27
movq 0x18(%rsp), %rax
movl %eax, %ebx
sarl $0x2, %ebx
movq 0x28(%rsp), %rcx
movq (%rcx), %rdx
movq %rdx, 0x80(%rsp)
movq 0x10(%rcx), %rdx
imulq 0x40(%rcx), %rdx
movq %rdx, 0x78(%rsp)
imull 0x24(%rsp), %r15d
xorl %esi, %esi
testl %r15d, %r15d
cmovlel %esi, %r15d
testl %ebx, %ebx
cmovlel %esi, %ebx
vpxor %xmm0, %xmm0, %xmm0
cmpq %rbx, %rsi
je 0x1390aa
leaq (,%rsi,4), %rdi
movq 0x78(%rsp), %rcx
imulq %rcx, %rdi
movq 0x80(%rsp), %rax
addq %rax, %rdi
leaq 0x1(,%rsi,4), %r8
imulq %rcx, %r8
addq %rax, %r8
leaq 0x2(,%rsi,4), %r9
imulq %rcx, %r9
addq %rax, %r9
leaq 0x3(,%rsi,4), %r10
imulq %rcx, %r10
addq %rax, %r10
movq 0x10(%rsp), %rax
movq 0x40(%rax), %r14
imulq 0x10(%rax), %r14
movq 0x30(%rsp), %r12
imulq %rsi, %r14
addq (%rax), %r14
movq 0x40(%rsp), %rbp
imulq 0x70(%rsp), %rbp
xorl %r11d, %r11d
movq %r11, %rax
orq $0x1, %rax
cmpq %r13, %rax
jge 0x138f9f
vpxor %xmm1, %xmm1, %xmm1
xorl %eax, %eax
movl %r15d, %ecx
vpxor %xmm2, %xmm2, %xmm2
vpxor %xmm3, %xmm3, %xmm3
vpxor %xmm4, %xmm4, %xmm4
vpxor %xmm5, %xmm5, %xmm5
vpxor %xmm6, %xmm6, %xmm6
vpxor %xmm7, %xmm7, %xmm7
vpxor %xmm8, %xmm8, %xmm8
subl $0x1, %ecx
jb 0x138ef7
vmovdqu (%r12,%rax), %xmm9
vpcmpgtb %xmm9, %xmm0, %xmm10
vpunpcklbw %xmm10, %xmm9, %xmm11 # xmm11 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3],xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
vpunpckhbw %xmm10, %xmm9, %xmm9 # xmm9 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15]
vmovdqu (%r14,%rax,2), %xmm10
vmovdqu 0x10(%r14,%rax,2), %xmm12
vpcmpgtb %xmm10, %xmm0, %xmm13
vpcmpgtb %xmm12, %xmm0, %xmm14
vpunpcklbw %xmm13, %xmm10, %xmm15 # xmm15 = xmm10[0],xmm13[0],xmm10[1],xmm13[1],xmm10[2],xmm13[2],xmm10[3],xmm13[3],xmm10[4],xmm13[4],xmm10[5],xmm13[5],xmm10[6],xmm13[6],xmm10[7],xmm13[7]
vpunpckhbw %xmm13, %xmm10, %xmm10 # xmm10 = xmm10[8],xmm13[8],xmm10[9],xmm13[9],xmm10[10],xmm13[10],xmm10[11],xmm13[11],xmm10[12],xmm13[12],xmm10[13],xmm13[13],xmm10[14],xmm13[14],xmm10[15],xmm13[15]
vpunpcklbw %xmm14, %xmm12, %xmm13 # xmm13 = xmm12[0],xmm14[0],xmm12[1],xmm14[1],xmm12[2],xmm14[2],xmm12[3],xmm14[3],xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7]
vpunpckhbw %xmm14, %xmm12, %xmm12 # xmm12 = xmm12[8],xmm14[8],xmm12[9],xmm14[9],xmm12[10],xmm14[10],xmm12[11],xmm14[11],xmm12[12],xmm14[12],xmm12[13],xmm14[13],xmm12[14],xmm14[14],xmm12[15],xmm14[15]
vpmadcswd %xmm8, %xmm15, %xmm11, %xmm8
vpmadcswd %xmm7, %xmm10, %xmm11, %xmm7
vpmadcswd %xmm6, %xmm13, %xmm11, %xmm6
vpmadcswd %xmm5, %xmm12, %xmm11, %xmm5
vpmadcswd %xmm4, %xmm15, %xmm9, %xmm4
vpmadcswd %xmm3, %xmm10, %xmm9, %xmm3
vpmadcswd %xmm2, %xmm13, %xmm9, %xmm2
vpmadcswd %xmm1, %xmm12, %xmm9, %xmm1
addq $0x10, %rax
jmp 0x138e7c
vpunpckldq %xmm7, %xmm8, %xmm9 # xmm9 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
vpunpckldq %xmm5, %xmm6, %xmm10 # xmm10 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
vpunpckhdq %xmm7, %xmm8, %xmm7 # xmm7 = xmm8[2],xmm7[2],xmm8[3],xmm7[3]
vpunpckhdq %xmm5, %xmm6, %xmm5 # xmm5 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
vpunpckldq %xmm3, %xmm4, %xmm6 # xmm6 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
vpunpckldq %xmm1, %xmm2, %xmm8 # xmm8 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
vpunpckhdq %xmm3, %xmm4, %xmm3 # xmm3 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
vpunpckhdq %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
vpunpcklqdq %xmm10, %xmm9, %xmm2 # xmm2 = xmm9[0],xmm10[0]
vpunpckhqdq %xmm10, %xmm9, %xmm4 # xmm4 = xmm9[1],xmm10[1]
vpaddd %xmm4, %xmm2, %xmm2
vpunpcklqdq %xmm5, %xmm7, %xmm4 # xmm4 = xmm7[0],xmm5[0]
vpunpckhqdq %xmm5, %xmm7, %xmm5 # xmm5 = xmm7[1],xmm5[1]
vpaddd %xmm4, %xmm5, %xmm4
vpaddd %xmm4, %xmm2, %xmm2
vpunpcklqdq %xmm8, %xmm6, %xmm4 # xmm4 = xmm6[0],xmm8[0]
vpunpckhqdq %xmm8, %xmm6, %xmm5 # xmm5 = xmm6[1],xmm8[1]
vpaddd %xmm5, %xmm4, %xmm4
vpunpcklqdq %xmm1, %xmm3, %xmm5 # xmm5 = xmm3[0],xmm1[0]
vmovd %xmm2, (%rdi)
vpextrd $0x1, %xmm2, (%r8)
vpextrd $0x2, %xmm2, (%r9)
vpextrd $0x3, %xmm2, (%r10)
vpunpckhqdq %xmm1, %xmm3, %xmm1 # xmm1 = xmm3[1],xmm1[1]
vpaddd %xmm5, %xmm1, %xmm1
vpaddd %xmm1, %xmm4, %xmm1
vmovd %xmm1, 0x4(%rdi)
vpextrd $0x1, %xmm1, 0x4(%r8)
vpextrd $0x2, %xmm1, 0x4(%r9)
vpextrd $0x3, %xmm1, 0x4(%r10)
addq $0x8, %rdi
addq $0x8, %r8
addq $0x8, %r9
addq $0x8, %r10
addq $0x2, %r11
addq %rbp, %r12
jmp 0x138e46
movq 0x30(%rsp), %r14
movq 0x40(%rsp), %r12
movq 0x10(%rsp), %rax
movq 0x40(%rax), %rdx
imulq %rsi, %rdx
imulq 0x10(%rax), %rdx
addq (%rax), %rdx
imulq 0x70(%rsp), %r12
cmpl %r13d, %r11d
jge 0x1390a2
movl %r11d, %ecx
shrl %ecx
movl %r11d, %eax
andl $0x1, %eax
addl %ecx, %eax
imulq %r12, %rax
addq %r14, %rax
vpxor %xmm1, %xmm1, %xmm1
xorl %ecx, %ecx
movq %rdx, %rbp
vpxor %xmm2, %xmm2, %xmm2
vpxor %xmm3, %xmm3, %xmm3
vpxor %xmm4, %xmm4, %xmm4
cmpl %ecx, %r15d
je 0x139048
vpmovsxbw (%rax,%rcx,8), %xmm5
vmovdqu (%rbp), %xmm6
vmovdqu 0x10(%rbp), %xmm7
vpcmpgtb %xmm6, %xmm0, %xmm8
vpcmpgtb %xmm7, %xmm0, %xmm9
vpunpcklbw %xmm8, %xmm6, %xmm10 # xmm10 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
vpunpckhbw %xmm8, %xmm6, %xmm6 # xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15]
vpunpcklbw %xmm9, %xmm7, %xmm8 # xmm8 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
vpunpckhbw %xmm9, %xmm7, %xmm7 # xmm7 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15]
vpmadcswd %xmm1, %xmm10, %xmm5, %xmm1
vpmadcswd %xmm2, %xmm6, %xmm5, %xmm2
vpmadcswd %xmm3, %xmm8, %xmm5, %xmm3
vpmadcswd %xmm4, %xmm7, %xmm5, %xmm4
addq $0x20, %rbp
incq %rcx
jmp 0x138ff6
vpunpckldq %xmm2, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vpunpckldq %xmm4, %xmm3, %xmm6 # xmm6 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
vpunpckhdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
vpunpckhdq %xmm4, %xmm3, %xmm2 # xmm2 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
vpunpcklqdq %xmm6, %xmm5, %xmm3 # xmm3 = xmm5[0],xmm6[0]
vpunpckhqdq %xmm6, %xmm5, %xmm4 # xmm4 = xmm5[1],xmm6[1]
vpaddd %xmm4, %xmm3, %xmm3
vpunpcklqdq %xmm2, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm2[0]
vpunpckhqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[1],xmm2[1]
vpaddd %xmm4, %xmm1, %xmm1
vpaddd %xmm1, %xmm3, %xmm1
vmovd %xmm1, (%rdi)
vpextrd $0x1, %xmm1, (%r8)
vpextrd $0x2, %xmm1, (%r9)
vpextrd $0x3, %xmm1, (%r10)
addq $0x4, %rdi
addq $0x4, %r8
addq $0x4, %r9
addq $0x4, %r10
incl %r11d
jmp 0x138fc4
incq %rsi
jmp 0x138dcc
movq 0x18(%rsp), %rcx
andq $-0x4, %rcx
movq 0x28(%rsp), %rax
movq 0x10(%rax), %rsi
imulq 0x40(%rax), %rsi
movq (%rax), %rdi
movl %r15d, %r8d
pushq $0x4
popq %r9
vpxor %xmm0, %xmm0, %xmm0
cmpq 0x18(%rsp), %rcx
jge 0x139213
movq 0x30(%rsp), %r11
movq 0x40(%rsp), %r14
imulq 0x70(%rsp), %r14
movl %ecx, %eax
cltd
idivl %r9d
movq %rsi, %r10
imulq %rcx, %r10
addl %eax, %edx
movslq %edx, %rax
movq 0x10(%rsp), %rdx
movq 0x40(%rdx), %r15
imulq %rax, %r15
imulq 0x10(%rdx), %r15
addq %rdi, %r10
addq (%rdx), %r15
xorl %edx, %edx
movq %rdx, %rbx
orq $0x1, %rbx
cmpq %r13, %rbx
jge 0x139196
movq %rdx, %r12
shrq %r12
imulq %r14, %r12
addq %r11, %r12
vpxor %xmm1, %xmm1, %xmm1
xorl %ebp, %ebp
vpxor %xmm2, %xmm2, %xmm2
cmpl %ebp, %r8d
je 0x13916b
vmovdqu (%r12), %xmm3
vpcmpgtb %xmm3, %xmm0, %xmm4
vpunpcklbw %xmm4, %xmm3, %xmm5 # xmm5 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
vpunpckhbw %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
vpmovsxbw (%r15,%rbp,8), %xmm4
vpmadcswd %xmm1, %xmm4, %xmm5, %xmm1
vpmadcswd %xmm2, %xmm4, %xmm3, %xmm2
addq $0x10, %r12
incq %rbp
jmp 0x139139
vpshufd $0xee, %xmm1, %xmm3 # xmm3 = xmm1[2,3,2,3]
vpaddd %xmm1, %xmm3, %xmm1
vpshufd $0xee, %xmm2, %xmm3 # xmm3 = xmm2[2,3,2,3]
vpaddd %xmm2, %xmm3, %xmm2
vphaddd %xmm2, %xmm1, %xmm1
vpshufd $0xe8, %xmm1, %xmm1 # xmm1 = xmm1[0,2,2,3]
vmovq %xmm1, (%r10)
addq $0x8, %r10
addq $0x2, %rdx
jmp 0x139116
movq 0x30(%rsp), %r11
movq 0x40(%rsp), %r14
movq 0x10(%rsp), %rbx
imulq 0x40(%rbx), %rax
imulq 0x10(%rbx), %rax
addq (%rbx), %rax
imulq 0x70(%rsp), %r14
cmpl %r13d, %edx
jge 0x13920b
movl %edx, %ebx
shrl %ebx
movl %edx, %r15d
andl $0x1, %r15d
addl %ebx, %r15d
imulq %r14, %r15
addq %r11, %r15
vpxor %xmm1, %xmm1, %xmm1
xorl %ebx, %ebx
cmpl %ebx, %r8d
je 0x1391f4
vpmovsxbw (%r15,%rbx,8), %xmm2
vpmovsxbw (%rax,%rbx,8), %xmm3
vpmadcswd %xmm1, %xmm3, %xmm2, %xmm1
incq %rbx
jmp 0x1391d8
vphaddd %xmm1, %xmm1, %xmm1
vphaddd %xmm1, %xmm1, %xmm1
vmovd %xmm1, (%r10)
addq $0x4, %r10
incl %edx
jmp 0x1391b8
incq %rcx
jmp 0x1390cf
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x139241
lock
decl (%rax)
jne 0x139241
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x139239
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x139241
movq %rsi, %rdi
callq 0x5f3e0
addq $0x88, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x13928e
movq %rax, %rbx
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x139286
lock
decl (%rax)
jne 0x139286
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
jne 0x139280
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x139286
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/x86/convolution_x86_xop.cpp |
ncnn::crop_pack4_sse(ncnn::Mat const&, ncnn::Mat&, int, int) | static void crop_pack4_sse(const Mat& src, Mat& dst, int top, int left)
{
int w = dst.w;
int h = dst.h;
int right = src.w - dst.w - left;
const float* ptr = src.row(top) + left * 4;
float* outptr = dst;
for (int y = 0; y < h; y++)
{
for (int x = 0; x < w; x++)
{
__m128 _p = _mm_loadu_ps(ptr);
_mm_storeu_ps(outptr, _p);
ptr += 4;
outptr += 4;
}
ptr += (left + right) * 4;
}
} | movl 0x2c(%rsi), %eax
movl 0x30(%rsi), %r8d
movslq 0x2c(%rdi), %r9
movslq %edx, %rdx
imulq %r9, %rdx
subl %eax, %r9d
imulq 0x10(%rdi), %rdx
addq (%rdi), %rdx
shll $0x2, %ecx
movslq %ecx, %rcx
leaq (%rdx,%rcx,4), %rcx
movq (%rsi), %rdx
shll $0x2, %r9d
movslq %r9d, %rsi
xorl %edi, %edi
testl %eax, %eax
cmovlel %edi, %eax
testl %r8d, %r8d
cmovlel %edi, %r8d
shlq $0x2, %rsi
movl %eax, %r9d
cmpl %r8d, %edi
je 0x1409f4
subl $0x1, %r9d
jb 0x1409ed
movups (%rcx), %xmm0
movups %xmm0, (%rdx)
addq $0x10, %rcx
addq $0x10, %rdx
jmp 0x1409d7
addq %rsi, %rcx
incl %edi
jmp 0x1409cf
retq
nop
| /csukuangfj[P]ncnn/src/layer/x86/crop_x86.cpp |
ncnn::Deconvolution_x86::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int Deconvolution_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
// deconvolv with NxN kernel
// value = value + bias
int w = bottom_blob.w;
int h = bottom_blob.h;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
// NCNN_LOGE("Deconvolution input %d x %d pad = %d %d ksize=%d %d stride=%d %d", w, h, pad_w, pad_h, kernel_w, kernel_h, stride_w, stride_h);
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
int outw = (w - 1) * stride_w + kernel_extent_w + output_pad_right;
int outh = (h - 1) * stride_h + kernel_extent_h + output_pad_bottom;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
size_t out_elemsize = elemsize / elempack * out_elempack;
int out_channels = num_output / out_elempack;
Mat top_blob_bordered;
if (pad_left > 0 || pad_right > 0 || pad_top > 0 || pad_bottom > 0 || (output_w > 0 && output_h > 0))
{
top_blob_bordered.create(outw, outh, out_channels, out_elemsize, out_elempack, opt.workspace_allocator);
}
else
{
top_blob_bordered = top_blob;
top_blob_bordered.create(outw, outh, out_channels, out_elemsize, out_elempack, opt.blob_allocator);
}
if (top_blob_bordered.empty())
return -100;
const int maxk = kernel_w * kernel_h;
if (opt.use_sgemm_convolution)
{
// sgemm
Mat bottom_blob_2 = bottom_blob;
{
bottom_blob_2.w = bottom_blob.w * bottom_blob.h;
bottom_blob_2.h = 1;
}
Mat top_col2im;
Option opt_b = opt;
opt_b.blob_allocator = top_blob_bordered.allocator;
gemm->forward(bottom_blob_2, top_col2im, opt_b);
{
// col2im
const int gap = (outw * stride_h - w * stride_w) * out_elempack;
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (out_elempack == 16)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < out_channels; p++)
{
const float* sptr = top_col2im.row(p * maxk);
Mat outm = top_blob_bordered.channel(p);
if (bias_data.empty())
{
outm.fill(_mm512_setzero_ps());
}
else
{
outm.fill(_mm512_load_ps((const float*)bias_data + p * 16));
}
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
float* ptr = outm.row(dilation_h * u) + dilation_w * v * 16;
for (int i = 0; i < h; i++)
{
for (int j = 0; j < w; j++)
{
__m512 _val = _mm512_load_ps(ptr);
__m512 _s = _mm512_load_ps(sptr);
_val = _mm512_add_ps(_val, _s);
_mm512_store_ps(ptr, _val);
ptr += stride_w * 16;
sptr += 16;
}
ptr += gap;
}
}
}
}
}
#endif // __AVX512F__
if (out_elempack == 8)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < out_channels; p++)
{
const float* sptr = top_col2im.row(p * maxk);
Mat outm = top_blob_bordered.channel(p);
if (bias_data.empty())
{
outm.fill(_mm256_setzero_ps());
}
else
{
outm.fill(_mm256_load_ps((const float*)bias_data + p * 8));
}
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
float* ptr = outm.row(dilation_h * u) + dilation_w * v * 8;
for (int i = 0; i < h; i++)
{
for (int j = 0; j < w; j++)
{
__m256 _val = _mm256_load_ps(ptr);
__m256 _s = _mm256_load_ps(sptr);
_val = _mm256_add_ps(_val, _s);
_mm256_store_ps(ptr, _val);
ptr += stride_w * 8;
sptr += 8;
}
ptr += gap;
}
}
}
}
}
#endif // __AVX__
if (out_elempack == 4)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < out_channels; p++)
{
const float* sptr = top_col2im.row(p * maxk);
Mat outm = top_blob_bordered.channel(p);
if (bias_data.empty())
{
outm.fill(_mm_setzero_ps());
}
else
{
outm.fill(_mm_load_ps((const float*)bias_data + p * 4));
}
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
float* ptr = outm.row(dilation_h * u) + dilation_w * v * 4;
for (int i = 0; i < h; i++)
{
for (int j = 0; j < w; j++)
{
__m128 _val = _mm_load_ps(ptr);
__m128 _s = _mm_load_ps(sptr);
_val = _mm_add_ps(_val, _s);
_mm_store_ps(ptr, _val);
ptr += stride_w * 4;
sptr += 4;
}
ptr += gap;
}
}
}
}
}
#endif // __SSE2__
if (out_elempack == 1)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < out_channels; p++)
{
const float* sptr = top_col2im.row(p * maxk);
Mat outm = top_blob_bordered.channel(p);
const float bias = bias_data.empty() ? 0.f : bias_data[p];
outm.fill(bias);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
float* ptr = outm.row(dilation_h * u) + dilation_w * v;
for (int i = 0; i < h; i++)
{
for (int j = 0; j < w; j++)
{
ptr[0] += sptr[0];
ptr += stride_w;
sptr += 1;
}
ptr += gap;
}
}
}
}
}
}
if (activation)
{
activation->forward_inplace(top_blob_bordered, opt);
}
}
else
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16 && out_elempack == 16)
{
deconvolution_pack16_avx512(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
if (elempack == 8 && out_elempack == 16)
{
deconvolution_pack8to16_avx512(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
if (elempack == 16 && out_elempack == 8)
{
deconvolution_pack16to8_avx512(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
if (elempack == 4 && out_elempack == 16)
{
deconvolution_pack4to16_avx512(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
if (elempack == 16 && out_elempack == 4)
{
deconvolution_pack16to4_avx512(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
if (elempack == 1 && out_elempack == 16)
{
deconvolution_pack1to16_avx512(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
if (elempack == 16 && out_elempack == 1)
{
deconvolution_pack16to1_avx512(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
#endif // __AVX512F__
if (elempack == 8 && out_elempack == 8)
{
deconvolution_pack8_avx(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
if (elempack == 4 && out_elempack == 8)
{
deconvolution_pack4to8_avx(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
if (elempack == 8 && out_elempack == 4)
{
deconvolution_pack8to4_avx(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
if (elempack == 1 && out_elempack == 8)
{
deconvolution_pack1to8_avx(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
if (elempack == 8 && out_elempack == 1)
{
deconvolution_pack8to1_avx(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
#endif // __AVX__
if (elempack == 4 && out_elempack == 4)
{
deconvolution_pack4_sse(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
if (elempack == 1 && out_elempack == 4)
{
deconvolution_pack1to4_sse(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
if (elempack == 4 && out_elempack == 1)
{
deconvolution_pack4to1_sse(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
#endif // __SSE2__
if (elempack == 1 && out_elempack == 1)
{
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < num_output; p++)
{
float* outptr = top_blob_bordered.channel(p);
// shadowed variable for less openmp task args
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int channels = bottom_blob.c;
const int outw = top_blob_bordered.w;
const int outh = top_blob_bordered.h;
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_term)
{
sum = bias_data[p];
}
const float* kptr = (const float*)weight_data_tm.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
const float* sptr = m.row(sy);
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
float val = sptr[sx];
int k = y * kernel_w + x;
float w = kptr[k];
sum += val * w;
}
}
kptr += maxk;
}
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
}
cut_padding(top_blob_bordered, top_blob, opt);
if (top_blob.empty())
return -100;
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x258, %rsp # imm = 0x258
movq %rdx, 0x160(%rsp)
movq %rdi, %r15
movq (%rdi), %rdi
movq -0x18(%rdi), %rax
movl 0xd4(%r15,%rax), %edx
decl %edx
imull 0xdc(%r15,%rax), %edx
movl %edx, 0x144(%rsp)
movl 0x2c(%rsi), %r13d
leal -0x1(%r13), %r12d
imull 0xe4(%r15,%rax), %r12d
movl 0xd8(%r15,%rax), %edx
decl %edx
imull 0xe0(%r15,%rax), %edx
movl %edx, 0x140(%rsp)
movl 0x30(%rsi), %r14d
leal -0x1(%r14), %ebp
imull 0xe8(%r15,%rax), %ebp
movl 0xd0(%r15,%rax), %r8d
movl 0xfc(%r15,%rax), %r11d
movl 0x100(%r15,%rax), %r10d
testb $0x3, %r8b
sete %r9b
movq %rcx, 0x130(%rsp)
andb 0x27(%rcx), %r9b
movq 0x10(%rsi), %rax
movq %rsi, 0x70(%rsp)
movslq 0x18(%rsi), %rsi
xorl %edx, %edx
movq %rsi, 0x148(%rsp)
divq %rsi
movq %rax, %rbx
movb %r9b, 0xf(%rsp)
movzbl %r9b, %ecx
leal (%rcx,%rcx,2), %r9d
incl %r9d
addb %cl, %cl
shlq %cl, %rbx
movl %r8d, %eax
cltd
idivl %r9d
movq %rax, 0xa0(%rsp)
andq $0x0, 0x120(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0xe0(%rsp)
movups %xmm0, 0xec(%rsp)
movaps %xmm0, 0x100(%rsp)
movups %xmm0, 0x10c(%rsp)
movq -0x18(%rdi), %rax
cmpl $0x0, 0xec(%r15,%rax)
pushq $0x10
popq %rsi
movq %rcx, 0x10(%rsp)
jg 0x154c2e
cmpl $0x0, 0xf0(%r15,%rax)
jg 0x154c2e
cmpl $0x0, 0xf4(%r15,%rax)
jg 0x154c2e
cmpl $0x0, 0xf8(%r15,%rax)
jle 0x156a97
addl 0x144(%rsp), %r12d
addl 0x140(%rsp), %ebp
addl %r11d, %r12d
incl %r12d
leal 0x1(%r10,%rbp), %edx
movq 0x130(%rsp), %rax
movq (%rax,%rsi), %rax
movq %rax, (%rsp)
leaq 0xe0(%rsp), %rdi
movl %r12d, %esi
movq 0xa0(%rsp), %rcx
movq %rbx, %r8
callq 0x628f2
pushq $-0x64
popq %rbx
cmpq $0x0, 0xe0(%rsp)
je 0x156a4c
movslq 0x118(%rsp), %rcx
movq 0x120(%rsp), %rax
imulq %rcx, %rax
testq %rax, %rax
je 0x156a4c
movq %rcx, %rsi
movq (%r15), %rdx
movq -0x18(%rdx), %rax
movslq 0xd4(%r15,%rax), %rcx
movq %rax, 0x40(%rsp)
movl 0xd8(%r15,%rax), %eax
movl %eax, 0x48(%rsp)
movq %rcx, %rdi
imull %ecx, %eax
movq 0x130(%rsp), %rcx
cmpb $0x1, 0x1d(%rcx)
cltq
movq %rax, 0x158(%rsp)
jne 0x154fda
movq 0x70(%rsp), %rdi
movq (%rdi), %rax
movq %rax, 0x178(%rsp)
movq 0x8(%rdi), %rcx
movq %rcx, 0x180(%rsp)
movq 0x10(%rdi), %rax
movq %rax, 0x188(%rsp)
movl 0x18(%rdi), %eax
movl %eax, 0x190(%rsp)
movq 0x20(%rdi), %rax
movq %rax, 0x198(%rsp)
movups 0x28(%rdi), %xmm0
movl 0x2c(%rdi), %edx
movl 0x30(%rdi), %eax
movups %xmm0, 0x1a0(%rsp)
movl 0x38(%rdi), %esi
movl %esi, 0x1b0(%rsp)
movq 0x40(%rdi), %rsi
movq %rsi, 0x1b8(%rsp)
testq %rcx, %rcx
je 0x154d5b
lock
incl (%rcx)
movl 0x2c(%rdi), %edx
movl 0x30(%rdi), %eax
imull %edx, %eax
leaq 0x178(%rsp), %rsi
leaq 0x1c0(%rsp), %rdx
andq $0x0, 0x40(%rdx)
movl %eax, 0x2c(%rsi)
movl $0x1, 0x30(%rsi)
xorps %xmm0, %xmm0
movaps %xmm0, (%rdx)
movups %xmm0, 0xc(%rdx)
movaps %xmm0, 0x20(%rdx)
movups %xmm0, 0x2c(%rdx)
movq 0x130(%rsp), %rax
movups (%rax), %xmm0
movups 0x10(%rax), %xmm1
movups 0x20(%rax), %xmm2
movups 0x30(%rax), %xmm3
leaq 0x210(%rsp), %rcx
movaps %xmm3, 0x30(%rcx)
movaps %xmm2, 0x20(%rcx)
movaps %xmm1, 0x10(%rcx)
movaps %xmm0, (%rcx)
movq 0x100(%rsp), %rax
movq %rax, 0x8(%rcx)
movq 0x10(%r15), %rdi
movq (%rdi), %rax
callq *0x38(%rax)
movq (%r15), %rsi
movq -0x18(%rsi), %r8
imull 0xe8(%r15,%r8), %r12d
movl 0xe4(%r15,%r8), %eax
imull %r13d, %eax
subl %eax, %r12d
movq 0x10(%rsp), %rcx
shll %cl, %r12d
movslq %r12d, %rdi
cmpb $0x0, 0xf(%rsp)
je 0x156751
xorl %ecx, %ecx
testl %r13d, %r13d
cmovlel %ecx, %r13d
testl %r14d, %r14d
cmovlel %ecx, %r14d
movq 0xa0(%rsp), %rax
testl %eax, %eax
cmovlel %ecx, %eax
movq %rax, 0xa0(%rsp)
xorps %xmm0, %xmm0
xorl %edx, %edx
cmpq 0xa0(%rsp), %rdx
je 0x15697a
movq %rdx, %rax
imulq 0x158(%rsp), %rax
movslq 0x1ec(%rsp), %r10
imulq %rax, %r10
imulq 0x1d0(%rsp), %r10
addq 0x1c0(%rsp), %r10
movslq 0x10c(%rsp), %r12
movslq 0x110(%rsp), %rcx
movq 0x120(%rsp), %r11
movq %rdx, 0x10(%rsp)
imulq %rdx, %r11
movq 0xf0(%rsp), %rbx
imulq %rbx, %r11
addq 0xe0(%rsp), %r11
movl 0x114(%rsp), %ebp
imulq %r12, %rcx
movq %rbx, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rbx
cmpl $0x4, 0x108(%rsp)
cmoveq %rcx, %rax
movq 0x1a8(%r15,%r8), %rcx
testq %rcx, %rcx
je 0x154f17
movslq 0x1e0(%r15,%r8), %rdx
imulq 0x1e8(%r15,%r8), %rdx
testq %rdx, %rdx
je 0x154f17
movq 0x10(%rsp), %rdx
shlq $0x4, %rdx
movaps (%rcx,%rdx), %xmm1
imull %eax, %ebp
testl %ebp, %ebp
movl $0x0, %eax
cmovlel %eax, %ebp
movq %r11, %rax
subl $0x1, %ebp
jb 0x154f35
movups %xmm1, (%rax)
addq $0x10, %rax
jmp 0x154f09
imull %eax, %ebp
testl %ebp, %ebp
movl $0x0, %eax
cmovlel %eax, %ebp
movq %r11, %rax
subl $0x1, %ebp
jb 0x154f35
movups %xmm0, (%rax)
addq $0x10, %rax
jmp 0x154f27
imulq %r12, %rbx
movq -0x18(%rsi), %r8
xorl %eax, %eax
cmpl 0xd8(%r15,%r8), %eax
jge 0x154fcd
xorl %edx, %edx
cmpl 0xd4(%r15,%r8), %edx
jge 0x154fc6
movslq 0xe0(%r15,%r8), %rcx
movslq %eax, %r12
imulq %rcx, %r12
imulq %rbx, %r12
addq %r11, %r12
movl 0xdc(%r15,%r8), %ecx
imull %edx, %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
leaq (%r12,%rcx,4), %r12
xorl %ebp, %ebp
cmpl %r14d, %ebp
je 0x154fc2
movl %r13d, %ecx
subl $0x1, %ecx
jb 0x154fba
movaps (%r10), %xmm1
addps (%r12), %xmm1
movaps %xmm1, (%r12)
movq -0x18(%rsi), %r8
movslq 0xe4(%r15,%r8), %r9
shlq $0x4, %r9
addq %r9, %r12
addq $0x10, %r10
jmp 0x154f8e
leaq (%r12,%rdi,4), %r12
incl %ebp
jmp 0x154f86
incl %edx
jmp 0x154f4f
incl %eax
jmp 0x154f3f
movq 0x10(%rsp), %rdx
incq %rdx
jmp 0x154e32
movq %rdx, 0x150(%rsp)
cmpl $0x4, 0x148(%rsp)
setne %al
movb 0xf(%rsp), %cl
xorb $0x1, %cl
movb %cl, 0xf(%rsp)
orb %cl, %al
movq 0x70(%rsp), %rdx
movl 0x48(%rsp), %r13d
jne 0x15572e
movq %rdi, %r14
movl %r14d, %eax
movq 0x40(%rsp), %rcx
movl 0xdc(%r15,%rcx), %r11d
movl 0xe0(%r15,%rcx), %edi
movl %edi, 0x50(%rsp)
movl 0xe4(%r15,%rcx), %edi
movl 0xe8(%r15,%rcx), %r8d
movl %r8d, 0xa0(%rsp)
movl 0x114(%r15,%rcx), %r9d
movq 0x1a8(%r15,%rcx), %r10
movl %r14d, %ecx
imull %r13d, %ecx
shll $0x4, %ecx
xorl %ebx, %ebx
testl %r14d, %r14d
movl $0x0, %r8d
cmovgl %r14d, %r8d
testl %r13d, %r13d
movl $0x0, %r12d
cmovgl %r13d, %r12d
movq %r12, 0x30(%rsp)
movslq %ecx, %rcx
testl %esi, %esi
cmovlel %ebx, %esi
movq %rsi, 0x138(%rsp)
xorl %esi, %esi
shlq $0x2, %rcx
movq %rcx, 0xc8(%rsp)
shlq $0x6, %r14
pushq $0x1
popq %rcx
subl %eax, %ecx
imull %r11d, %ecx
movl %ecx, 0x60(%rsp)
shlq $0x6, %r8
decl %r9d
movq %r9, 0xc0(%rsp)
xorps %xmm0, %xmm0
movaps 0x29bf4b(%rip), %xmm15 # 0x3f1010
movaps 0x29bf53(%rip), %xmm9 # 0x3f1020
movaps 0x29bf5b(%rip), %xmm8 # 0x3f1030
movaps 0x298fc4(%rip), %xmm5 # 0x3ee0a0
movaps 0x29bf5d(%rip), %xmm6 # 0x3f1040
movq %r10, 0xb8(%rsp)
movaps 0x29bf7d(%rip), %xmm11 # 0x3f1070
movaps 0x29bf85(%rip), %xmm12 # 0x3f1080
movaps 0x29bf8d(%rip), %xmm13 # 0x3f1090
cmpq 0x138(%rsp), %rsi
je 0x15572e
movq 0x120(%rsp), %rax
imulq %rsi, %rax
imulq 0xf0(%rsp), %rax
addq 0xe0(%rsp), %rax
movl 0x2c(%rdx), %r12d
movl 0x30(%rdx), %ecx
movl %ecx, 0x38(%rsp)
movl 0x38(%rdx), %ecx
movq %rsi, 0x80(%rsp)
shlq $0x4, %rsi
movq %rsi, 0x78(%rsp)
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
movq %rcx, 0x58(%rsp)
movl 0x10c(%rsp), %ecx
testl %ecx, %ecx
cmovlel %edx, %ecx
movl %ecx, 0x68(%rsp)
movl 0x110(%rsp), %ecx
testl %ecx, %ecx
cmovlel %edx, %ecx
movl %ecx, 0x64(%rsp)
movl 0x50(%rsp), %esi
cmpl 0x64(%rsp), %edx
je 0x155719
xorl %ecx, %ecx
movl 0x60(%rsp), %r9d
movl %r9d, 0x20(%rsp)
movl %edx, 0x10(%rsp)
cmpl 0x68(%rsp), %ecx
je 0x155712
testq %r10, %r10
movq %rax, 0x18(%rsp)
movl %ecx, 0x28(%rsp)
je 0x1551bb
movq 0x78(%rsp), %rax
movups (%r10,%rax), %xmm1
jmp 0x1551be
xorps %xmm1, %xmm1
movq 0x18(%r15), %rax
movq 0x70(%rsp), %rdx
movslq 0x2c(%rdx), %rbx
movq (%rdx), %rcx
movq %rcx, 0x90(%rsp)
movq 0x10(%rdx), %rcx
movq 0x40(%rdx), %rdx
imulq %rcx, %rdx
movq %rdx, 0x88(%rsp)
imulq %rcx, %rbx
movq 0x58(%r15), %rcx
imulq 0x80(%rsp), %rcx
imulq 0x28(%r15), %rcx
leaq (%rax,%rcx), %r9
addq $0x30, %r9
xorl %eax, %eax
movq 0x30(%rsp), %rcx
cmpq 0x58(%rsp), %rax
je 0x155338
movq 0x88(%rsp), %rbp
movq %rax, 0x98(%rsp)
imulq %rax, %rbp
addq 0x90(%rsp), %rbp
movq %r9, 0xd0(%rsp)
xorl %r10d, %r10d
cmpq %rcx, %r10
je 0x155318
movl %r10d, %eax
subl %r13d, %eax
incl %eax
imull %esi, %eax
addl 0x10(%rsp), %eax
js 0x15530d
cltd
idivl 0xa0(%rsp)
testl %edx, %edx
jne 0x15530d
cmpl 0x38(%rsp), %eax
jge 0x15530d
movq %r14, %r13
movslq %eax, %rsi
imulq %rbx, %rsi
addq %rbp, %rsi
movl 0x20(%rsp), %ecx
xorl %r14d, %r14d
cmpq %r14, %r8
je 0x1552fc
testl %ecx, %ecx
js 0x1552f3
movl %ecx, %eax
cltd
idivl %edi
testl %edx, %edx
jne 0x1552f3
cmpl %r12d, %eax
jge 0x1552f3
shll $0x2, %eax
cltq
movss (%rsi,%rax,4), %xmm2
movss 0x4(%rsi,%rax,4), %xmm3
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps -0x30(%r9,%r14), %xmm2
movss 0x8(%rsi,%rax,4), %xmm7
addps %xmm1, %xmm2
movss 0xc(%rsi,%rax,4), %xmm1
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps -0x20(%r9,%r14), %xmm3
mulps -0x10(%r9,%r14), %xmm7
addps %xmm3, %xmm7
addps %xmm2, %xmm7
mulps (%r9,%r14), %xmm1
addps %xmm7, %xmm1
addq $0x40, %r14
addl %r11d, %ecx
jmp 0x15528d
movq %r13, %r14
movl 0x48(%rsp), %r13d
movl 0x50(%rsp), %esi
movq 0x30(%rsp), %rcx
incq %r10
addq %r14, %r9
jmp 0x155241
movq 0x98(%rsp), %rax
incq %rax
movq 0xd0(%rsp), %r9
addq 0xc8(%rsp), %r9
jmp 0x15520f
movq 0xc0(%rsp), %rax
cmpl $0x5, %eax
ja 0x1556fc
leaq 0x29dd18(%rip), %rcx # 0x3f3068
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
movaps %xmm1, %xmm7
maxps %xmm0, %xmm7
movq 0xb8(%rsp), %r10
movl 0x10(%rsp), %edx
jmpq *%rax
movq 0x40(%rsp), %rax
movq 0x118(%r15,%rax), %rax
minps %xmm0, %xmm1
movss (%rax), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps %xmm1, %xmm2
addps %xmm2, %xmm7
jmp 0x1556e1
movaps %xmm1, %xmm2
minps %xmm15, %xmm2
maxps %xmm9, %xmm2
movaps %xmm2, %xmm14
mulps %xmm8, %xmm14
addps %xmm5, %xmm14
cvttps2dq %xmm14, %xmm7
cvtdq2ps %xmm7, %xmm7
cmpltps %xmm7, %xmm14
andps %xmm6, %xmm14
subps %xmm14, %xmm7
movaps %xmm15, %xmm8
cvttps2dq %xmm7, %xmm15
movaps 0x29bc86(%rip), %xmm0 # 0x3f1050
mulps %xmm0, %xmm7
subps %xmm7, %xmm2
movaps %xmm2, %xmm7
mulps %xmm2, %xmm7
movaps %xmm2, %xmm14
movaps 0x29bc7e(%rip), %xmm10 # 0x3f1060
mulps %xmm10, %xmm14
addps %xmm11, %xmm14
mulps %xmm2, %xmm14
addps %xmm12, %xmm14
mulps %xmm2, %xmm14
addps %xmm13, %xmm14
mulps %xmm2, %xmm14
xorps %xmm4, %xmm4
movaps %xmm5, %xmm0
movaps 0x29bc95(%rip), %xmm5 # 0x3f10a0
addps %xmm5, %xmm14
mulps %xmm2, %xmm14
addps %xmm0, %xmm14
mulps %xmm7, %xmm14
addps %xmm6, %xmm2
addps %xmm14, %xmm2
pslld $0x17, %xmm15
paddd %xmm6, %xmm15
mulps %xmm2, %xmm15
addps %xmm6, %xmm15
movaps %xmm15, %xmm2
maxps 0x29bc6f(%rip), %xmm15 # 0x3f10b0
movaps %xmm15, %xmm7
andps 0x29bc73(%rip), %xmm15 # 0x3f10c0
orps %xmm0, %xmm15
movaps %xmm15, %xmm3
cmpltps 0x29bc83(%rip), %xmm3 # 0x3f10e0
movaps %xmm3, %xmm14
andps %xmm15, %xmm14
movaps 0x29bc83(%rip), %xmm10 # 0x3f10f0
addps %xmm10, %xmm15
addps %xmm14, %xmm15
cmpleps %xmm4, %xmm2
psrld $0x17, %xmm7
paddd 0x29bc4a(%rip), %xmm7 # 0x3f10d0
cvtdq2ps %xmm7, %xmm14
andps %xmm6, %xmm3
subps %xmm3, %xmm14
movaps %xmm15, %xmm7
mulps 0x29bc64(%rip), %xmm7 # 0x3f1100
addps 0x29bc6d(%rip), %xmm7 # 0x3f1110
mulps %xmm15, %xmm7
addps 0x29bc72(%rip), %xmm7 # 0x3f1120
mulps %xmm15, %xmm7
addps 0x29bc77(%rip), %xmm7 # 0x3f1130
mulps %xmm15, %xmm7
addps 0x29bc7c(%rip), %xmm7 # 0x3f1140
mulps %xmm15, %xmm7
addps 0x29bc81(%rip), %xmm7 # 0x3f1150
mulps %xmm15, %xmm7
addps 0x29bc86(%rip), %xmm7 # 0x3f1160
mulps %xmm15, %xmm7
addps 0x29bc8b(%rip), %xmm7 # 0x3f1170
mulps %xmm15, %xmm7
addps 0x29bc90(%rip), %xmm7 # 0x3f1180
mulps %xmm15, %xmm7
movaps 0x29bb55(%rip), %xmm4 # 0x3f1050
mulps %xmm4, %xmm14
addps %xmm15, %xmm14
mulps %xmm15, %xmm15
addps 0x29bc82(%rip), %xmm7 # 0x3f1190
mulps %xmm15, %xmm7
movaps %xmm8, %xmm15
movaps 0x29bb12(%rip), %xmm8 # 0x3f1030
addps %xmm7, %xmm14
mulps 0x29c796(%rip), %xmm14 # 0x3f1cc0
movaps %xmm2, %xmm3
andnps %xmm14, %xmm3
andps 0x29c798(%rip), %xmm2 # 0x3f1cd0
orps %xmm3, %xmm2
minps %xmm15, %xmm2
maxps %xmm9, %xmm2
movaps %xmm2, %xmm3
mulps %xmm8, %xmm3
addps %xmm0, %xmm3
cvttps2dq %xmm3, %xmm7
cvtdq2ps %xmm7, %xmm7
cmpltps %xmm7, %xmm3
andps %xmm6, %xmm3
subps %xmm3, %xmm7
cvttps2dq %xmm7, %xmm14
mulps %xmm4, %xmm7
subps %xmm7, %xmm2
movaps %xmm2, %xmm3
mulps %xmm2, %xmm3
movaps %xmm2, %xmm7
mulps 0x29bae7(%rip), %xmm7 # 0x3f1060
addps %xmm11, %xmm7
mulps %xmm2, %xmm7
addps %xmm12, %xmm7
mulps %xmm2, %xmm7
addps %xmm13, %xmm7
mulps %xmm2, %xmm7
addps %xmm5, %xmm7
movaps %xmm0, %xmm5
xorps %xmm0, %xmm0
mulps %xmm2, %xmm7
addps %xmm5, %xmm7
mulps %xmm3, %xmm7
addps %xmm6, %xmm2
addps %xmm7, %xmm2
pslld $0x17, %xmm14
paddd %xmm6, %xmm14
mulps %xmm2, %xmm14
addps %xmm6, %xmm14
rcpps %xmm14, %xmm2
movaps %xmm2, %xmm7
addps %xmm2, %xmm7
mulps %xmm7, %xmm14
movaps 0x29c712(%rip), %xmm3 # 0x3f1ce0
subps %xmm14, %xmm3
mulps %xmm2, %xmm3
addps %xmm10, %xmm7
addps %xmm3, %xmm7
jmp 0x1556de
movq 0x40(%rsp), %rax
movq 0x118(%r15,%rax), %rax
movss (%rax), %xmm2
movss 0x4(%rax), %xmm7
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
maxps %xmm2, %xmm1
minps %xmm7, %xmm1
movaps %xmm1, %xmm7
jmp 0x1556e1
xorps 0x298a7c(%rip), %xmm1 # 0x3ee090
minps %xmm15, %xmm1
maxps %xmm9, %xmm1
movaps %xmm1, %xmm7
mulps %xmm8, %xmm7
addps %xmm5, %xmm7
cvttps2dq %xmm7, %xmm2
cvtdq2ps %xmm2, %xmm2
cmpltps %xmm2, %xmm7
andps %xmm6, %xmm7
subps %xmm7, %xmm2
cvttps2dq %xmm2, %xmm14
mulps 0x29c6ad(%rip), %xmm2 # 0x3f1cf0
addps %xmm1, %xmm2
movaps %xmm2, %xmm1
mulps %xmm2, %xmm1
movaps %xmm2, %xmm7
mulps 0x29ba0a(%rip), %xmm7 # 0x3f1060
addps 0x29ba13(%rip), %xmm7 # 0x3f1070
mulps %xmm2, %xmm7
addps 0x29ba19(%rip), %xmm7 # 0x3f1080
mulps %xmm2, %xmm7
addps 0x29ba1f(%rip), %xmm7 # 0x3f1090
mulps %xmm2, %xmm7
addps 0x29ba25(%rip), %xmm7 # 0x3f10a0
mulps %xmm2, %xmm7
addps %xmm5, %xmm7
mulps %xmm1, %xmm7
addps %xmm6, %xmm2
addps %xmm7, %xmm2
pslld $0x17, %xmm14
paddd %xmm6, %xmm14
mulps %xmm2, %xmm14
addps %xmm6, %xmm14
rcpps %xmm14, %xmm1
mulps %xmm1, %xmm14
movaps %xmm6, %xmm7
subps %xmm14, %xmm7
mulps %xmm1, %xmm7
addps %xmm1, %xmm7
jmp 0x1556e1
movq 0x40(%rsp), %rax
movq 0x118(%r15,%rax), %rax
movss (%rax), %xmm7
movss 0x4(%rax), %xmm2
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps %xmm1, %xmm7
addps %xmm2, %xmm7
maxps %xmm0, %xmm7
minps %xmm6, %xmm7
mulps %xmm1, %xmm7
movq 0x18(%rsp), %rax
movl 0x28(%rsp), %ecx
movups %xmm7, (%rax)
addq $0x10, %rax
incl %ecx
incl 0x20(%rsp)
jmp 0x155197
movaps %xmm1, %xmm7
movq 0xb8(%rsp), %r10
movq 0x18(%rsp), %rax
movl 0x10(%rsp), %edx
jmp 0x1556e6
incl %edx
jmp 0x15517d
movq 0x80(%rsp), %rsi
incq %rsi
movq 0x70(%rsp), %rdx
jmp 0x155103
cmpl $0x1, 0x148(%rsp)
setne %al
orb 0xf(%rsp), %al
jne 0x155e33
movq 0x150(%rsp), %rax
movq -0x18(%rax), %rcx
movslq 0xd4(%r15,%rcx), %rax
movl 0xd8(%r15,%rcx), %ebp
movl 0xdc(%r15,%rcx), %r8d
movl 0xe0(%r15,%rcx), %r14d
movl 0xe4(%r15,%rcx), %r10d
movl 0xe8(%r15,%rcx), %ebx
movl 0x114(%r15,%rcx), %edi
movl 0x118(%rsp), %r9d
movq %rcx, 0x40(%rsp)
movq 0x1a8(%r15,%rcx), %rcx
movq %rcx, 0xc0(%rsp)
movl %eax, %ecx
imull %ebp, %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
xorl %r11d, %r11d
testl %eax, %eax
movl $0x0, %esi
cmovgl %eax, %esi
testl %ebp, %ebp
movl $0x0, %r13d
cmovgl %ebp, %r13d
testl %r9d, %r9d
cmovlel %r11d, %r9d
movq %r9, 0x138(%rsp)
shlq $0x2, %rcx
movq %rcx, 0x18(%rsp)
pushq $0x1
popq %rcx
subl %eax, %ecx
movq %rax, %r11
xorl %eax, %eax
shlq $0x4, %r11
imull %r8d, %ecx
movl %ecx, 0x60(%rsp)
shlq $0x4, %rsi
decl %edi
xorps %xmm0, %xmm0
movaps 0x29b809(%rip), %xmm15 # 0x3f1010
movaps 0x29b811(%rip), %xmm9 # 0x3f1020
movaps 0x29b819(%rip), %xmm8 # 0x3f1030
movaps 0x298882(%rip), %xmm5 # 0x3ee0a0
movaps 0x29b81b(%rip), %xmm6 # 0x3f1040
movq %rdi, 0xb8(%rsp)
movq %r13, 0x50(%rsp)
movq %r11, 0x48(%rsp)
movaps 0x29b831(%rip), %xmm11 # 0x3f1070
movaps 0x29b839(%rip), %xmm12 # 0x3f1080
movaps 0x29b841(%rip), %xmm13 # 0x3f1090
cmpq 0x138(%rsp), %rax
je 0x155e33
movq 0x120(%rsp), %rcx
imulq %rax, %rcx
imulq 0xf0(%rsp), %rcx
addq 0xe0(%rsp), %rcx
movq %rcx, 0x28(%rsp)
movl 0x2c(%rdx), %ecx
movl %ecx, 0x10(%rsp)
movl 0x30(%rdx), %ecx
movl %ecx, 0xa0(%rsp)
movl 0x38(%rdx), %ecx
movq %rax, 0x80(%rsp)
shlq $0x4, %rax
movq %rax, 0x78(%rsp)
xorl %r12d, %r12d
testl %ecx, %ecx
cmovlel %r12d, %ecx
movq %rcx, 0x58(%rsp)
movl 0x10c(%rsp), %eax
testl %eax, %eax
cmovlel %r12d, %eax
movl %eax, 0x68(%rsp)
movl 0x110(%rsp), %eax
testl %eax, %eax
cmovlel %r12d, %eax
movl %eax, 0x64(%rsp)
movq 0xc0(%rsp), %rax
cmpl 0x64(%rsp), %r12d
je 0x155e1e
xorl %edx, %edx
movl 0x60(%rsp), %ecx
movl %ecx, 0x20(%rsp)
cmpl 0x68(%rsp), %edx
je 0x155e16
testq %rax, %rax
je 0x15590b
movq 0x78(%rsp), %rcx
movups (%rax,%rcx), %xmm1
jmp 0x15590e
xorps %xmm1, %xmm1
movq 0x70(%rsp), %rcx
movslq 0x2c(%rcx), %rdi
movq (%rcx), %rax
movq %rax, 0x88(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0xc8(%rsp)
movq 0x58(%r15), %rcx
imulq 0x80(%rsp), %rcx
imulq 0x28(%r15), %rcx
imulq %rax, %rdi
movq %rdi, 0x30(%rsp)
addq 0x18(%r15), %rcx
xorl %eax, %eax
movl %edx, 0x90(%rsp)
cmpq 0x58(%rsp), %rax
je 0x155a55
movq 0xc8(%rsp), %rdx
movq %rax, 0x98(%rsp)
imulq %rax, %rdx
addq 0x88(%rsp), %rdx
movq %rdx, 0x38(%rsp)
movq %rcx, 0xd0(%rsp)
movq %rcx, %rdi
xorl %r9d, %r9d
cmpq %r13, %r9
je 0x155a31
movl %r9d, %eax
subl %ebp, %eax
incl %eax
imull %r14d, %eax
addl %r12d, %eax
js 0x155a26
cltd
idivl %ebx
testl %edx, %edx
jne 0x155a26
cmpl 0xa0(%rsp), %eax
jge 0x155a26
movl %ebx, %r11d
movl %r14d, %ebx
movl %ebp, %r13d
movslq %eax, %r14
imulq 0x30(%rsp), %r14
addq 0x38(%rsp), %r14
movl 0x20(%rsp), %ecx
xorl %ebp, %ebp
cmpq %rbp, %rsi
je 0x155a13
testl %ecx, %ecx
js 0x155a0a
movl %ecx, %eax
cltd
idivl %r10d
testl %edx, %edx
jne 0x155a0a
cmpl 0x10(%rsp), %eax
jge 0x155a0a
cltq
movss (%r14,%rax,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps (%rdi,%rbp), %xmm2
addps %xmm2, %xmm1
addq $0x10, %rbp
addl %r8d, %ecx
jmp 0x1559de
movl %r13d, %ebp
movl %ebx, %r14d
movl %r11d, %ebx
movq 0x50(%rsp), %r13
movq 0x48(%rsp), %r11
incq %r9
addq %r11, %rdi
jmp 0x155998
movq 0x98(%rsp), %rax
incq %rax
movq 0xd0(%rsp), %rcx
addq 0x18(%rsp), %rcx
movl 0x90(%rsp), %edx
jmp 0x15595e
movq 0xb8(%rsp), %rax
cmpl $0x5, %eax
ja 0x155d16
leaq 0x29d613(%rip), %rcx # 0x3f3080
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
movaps %xmm1, %xmm7
maxps %xmm0, %xmm7
jmpq *%rax
movq 0x40(%rsp), %rax
movq 0x118(%r15,%rax), %rax
minps %xmm0, %xmm1
movss (%rax), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps %xmm1, %xmm2
addps %xmm2, %xmm7
jmp 0x155df2
movaps %xmm1, %xmm2
minps %xmm15, %xmm2
maxps %xmm9, %xmm2
movaps %xmm2, %xmm14
mulps %xmm8, %xmm14
addps %xmm5, %xmm14
cvttps2dq %xmm14, %xmm7
cvtdq2ps %xmm7, %xmm7
cmpltps %xmm7, %xmm14
andps %xmm6, %xmm14
subps %xmm14, %xmm7
movaps %xmm15, %xmm8
cvttps2dq %xmm7, %xmm15
movaps 0x29b575(%rip), %xmm0 # 0x3f1050
mulps %xmm0, %xmm7
subps %xmm7, %xmm2
movaps %xmm2, %xmm7
mulps %xmm2, %xmm7
movaps %xmm2, %xmm14
movaps 0x29b56d(%rip), %xmm10 # 0x3f1060
mulps %xmm10, %xmm14
addps %xmm11, %xmm14
mulps %xmm2, %xmm14
addps %xmm12, %xmm14
mulps %xmm2, %xmm14
addps %xmm13, %xmm14
mulps %xmm2, %xmm14
xorps %xmm4, %xmm4
movaps %xmm5, %xmm0
movaps 0x29b584(%rip), %xmm5 # 0x3f10a0
addps %xmm5, %xmm14
mulps %xmm2, %xmm14
addps %xmm0, %xmm14
mulps %xmm7, %xmm14
addps %xmm6, %xmm2
addps %xmm14, %xmm2
pslld $0x17, %xmm15
paddd %xmm6, %xmm15
mulps %xmm2, %xmm15
addps %xmm6, %xmm15
movaps %xmm15, %xmm2
maxps 0x29b55e(%rip), %xmm15 # 0x3f10b0
movaps %xmm15, %xmm7
andps 0x29b562(%rip), %xmm15 # 0x3f10c0
orps %xmm0, %xmm15
movaps %xmm15, %xmm3
cmpltps 0x29b572(%rip), %xmm3 # 0x3f10e0
movaps %xmm3, %xmm14
andps %xmm15, %xmm14
movaps 0x29b572(%rip), %xmm10 # 0x3f10f0
addps %xmm10, %xmm15
addps %xmm14, %xmm15
cmpleps %xmm4, %xmm2
psrld $0x17, %xmm7
paddd 0x29b539(%rip), %xmm7 # 0x3f10d0
cvtdq2ps %xmm7, %xmm14
andps %xmm6, %xmm3
subps %xmm3, %xmm14
movaps %xmm15, %xmm7
mulps 0x29b553(%rip), %xmm7 # 0x3f1100
addps 0x29b55c(%rip), %xmm7 # 0x3f1110
mulps %xmm15, %xmm7
addps 0x29b561(%rip), %xmm7 # 0x3f1120
mulps %xmm15, %xmm7
addps 0x29b566(%rip), %xmm7 # 0x3f1130
mulps %xmm15, %xmm7
addps 0x29b56b(%rip), %xmm7 # 0x3f1140
mulps %xmm15, %xmm7
addps 0x29b570(%rip), %xmm7 # 0x3f1150
mulps %xmm15, %xmm7
addps 0x29b575(%rip), %xmm7 # 0x3f1160
mulps %xmm15, %xmm7
addps 0x29b57a(%rip), %xmm7 # 0x3f1170
mulps %xmm15, %xmm7
addps 0x29b57f(%rip), %xmm7 # 0x3f1180
mulps %xmm15, %xmm7
movaps 0x29b444(%rip), %xmm4 # 0x3f1050
mulps %xmm4, %xmm14
addps %xmm15, %xmm14
mulps %xmm15, %xmm15
addps 0x29b571(%rip), %xmm7 # 0x3f1190
mulps %xmm15, %xmm7
movaps %xmm8, %xmm15
movaps 0x29b401(%rip), %xmm8 # 0x3f1030
addps %xmm7, %xmm14
mulps 0x29c085(%rip), %xmm14 # 0x3f1cc0
movaps %xmm2, %xmm3
andnps %xmm14, %xmm3
andps 0x29c087(%rip), %xmm2 # 0x3f1cd0
orps %xmm3, %xmm2
minps %xmm15, %xmm2
maxps %xmm9, %xmm2
movaps %xmm2, %xmm3
mulps %xmm8, %xmm3
addps %xmm0, %xmm3
cvttps2dq %xmm3, %xmm7
cvtdq2ps %xmm7, %xmm7
cmpltps %xmm7, %xmm3
andps %xmm6, %xmm3
subps %xmm3, %xmm7
cvttps2dq %xmm7, %xmm14
mulps %xmm4, %xmm7
subps %xmm7, %xmm2
movaps %xmm2, %xmm3
mulps %xmm2, %xmm3
movaps %xmm2, %xmm7
mulps 0x29b3d6(%rip), %xmm7 # 0x3f1060
addps %xmm11, %xmm7
mulps %xmm2, %xmm7
addps %xmm12, %xmm7
mulps %xmm2, %xmm7
addps %xmm13, %xmm7
mulps %xmm2, %xmm7
addps %xmm5, %xmm7
movaps %xmm0, %xmm5
xorps %xmm0, %xmm0
mulps %xmm2, %xmm7
addps %xmm5, %xmm7
mulps %xmm3, %xmm7
addps %xmm6, %xmm2
addps %xmm7, %xmm2
pslld $0x17, %xmm14
paddd %xmm6, %xmm14
mulps %xmm2, %xmm14
addps %xmm6, %xmm14
rcpps %xmm14, %xmm2
movaps %xmm2, %xmm7
addps %xmm2, %xmm7
mulps %xmm7, %xmm14
movaps 0x29c001(%rip), %xmm3 # 0x3f1ce0
subps %xmm14, %xmm3
mulps %xmm2, %xmm3
addps %xmm10, %xmm7
addps %xmm3, %xmm7
jmp 0x155def
movq 0x40(%rsp), %rax
movq 0x118(%r15,%rax), %rax
movss (%rax), %xmm2
movss 0x4(%rax), %xmm7
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
maxps %xmm2, %xmm1
minps %xmm7, %xmm1
movaps %xmm1, %xmm7
jmp 0x155df2
xorps 0x29836b(%rip), %xmm1 # 0x3ee090
minps %xmm15, %xmm1
maxps %xmm9, %xmm1
movaps %xmm1, %xmm7
mulps %xmm8, %xmm7
addps %xmm5, %xmm7
cvttps2dq %xmm7, %xmm2
cvtdq2ps %xmm2, %xmm2
cmpltps %xmm2, %xmm7
andps %xmm6, %xmm7
subps %xmm7, %xmm2
cvttps2dq %xmm2, %xmm14
mulps 0x29bf9c(%rip), %xmm2 # 0x3f1cf0
addps %xmm1, %xmm2
movaps %xmm2, %xmm1
mulps %xmm2, %xmm1
movaps %xmm2, %xmm7
mulps 0x29b2f9(%rip), %xmm7 # 0x3f1060
addps 0x29b302(%rip), %xmm7 # 0x3f1070
mulps %xmm2, %xmm7
addps 0x29b308(%rip), %xmm7 # 0x3f1080
mulps %xmm2, %xmm7
addps 0x29b30e(%rip), %xmm7 # 0x3f1090
mulps %xmm2, %xmm7
addps 0x29b314(%rip), %xmm7 # 0x3f10a0
mulps %xmm2, %xmm7
addps %xmm5, %xmm7
mulps %xmm1, %xmm7
addps %xmm6, %xmm2
addps %xmm7, %xmm2
pslld $0x17, %xmm14
paddd %xmm6, %xmm14
mulps %xmm2, %xmm14
addps %xmm6, %xmm14
rcpps %xmm14, %xmm1
mulps %xmm1, %xmm14
movaps %xmm6, %xmm7
subps %xmm14, %xmm7
mulps %xmm1, %xmm7
addps %xmm1, %xmm7
jmp 0x155df2
movq 0x40(%rsp), %rax
movq 0x118(%r15,%rax), %rax
movss (%rax), %xmm7
movss 0x4(%rax), %xmm2
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps %xmm1, %xmm7
addps %xmm2, %xmm7
maxps %xmm0, %xmm7
minps %xmm6, %xmm7
mulps %xmm1, %xmm7
movq 0x28(%rsp), %rax
movups %xmm7, (%rax)
addq $0x10, %rax
movq %rax, 0x28(%rsp)
incl %edx
incl 0x20(%rsp)
movq 0xc0(%rsp), %rax
jmp 0x1558f1
incl %r12d
jmp 0x1558dc
movq 0x80(%rsp), %rax
incq %rax
movq 0x70(%rsp), %rdx
jmp 0x15584f
cmpl $0x4, 0x148(%rsp)
jne 0x1562f5
cmpb $0x0, 0xf(%rsp)
je 0x1562f5
movq 0x150(%rsp), %rax
movq -0x18(%rax), %rsi
movslq 0xd4(%r15,%rsi), %rax
movl 0xd8(%r15,%rsi), %edi
movl 0xdc(%r15,%rsi), %r14d
movl 0xe0(%r15,%rsi), %r10d
movl 0x114(%r15,%rsi), %r8d
movl 0x118(%rsp), %r9d
movq 0xf0(%rsp), %rcx
imulq 0x120(%rsp), %rcx
movq %rcx, 0x170(%rsp)
movl %eax, %ecx
imull %edi, %ecx
shll $0x2, %ecx
xorl %r11d, %r11d
testl %eax, %eax
movl $0x0, %ebx
cmovgl %eax, %ebx
testl %edi, %edi
movl $0x0, %edx
cmovgl %edi, %edx
movq %rdx, 0x30(%rsp)
movq 0x70(%rsp), %rdx
movl 0x38(%rdx), %r12d
testl %r12d, %r12d
cmovlel %r11d, %r12d
movq %r12, 0x58(%rsp)
movl 0x10c(%rsp), %ebp
testl %ebp, %ebp
cmovlel %r11d, %ebp
movl %ebp, 0x80(%rsp)
movl 0x110(%rsp), %ebp
testl %ebp, %ebp
cmovlel %r11d, %ebp
movl %ebp, 0x64(%rsp)
movslq %ecx, %rcx
testl %r9d, %r9d
cmovlel %r11d, %r9d
movq %r9, 0x150(%rsp)
shlq $0x2, %rcx
movq %rcx, 0xc8(%rsp)
pushq $0x1
popq %rcx
subl %eax, %ecx
shlq $0x4, %rax
movq %rax, 0x10(%rsp)
imull %r14d, %ecx
movl %ecx, 0x60(%rsp)
shlq $0x4, %rbx
decl %r8d
movq %r8, 0xb8(%rsp)
movl 0xe4(%r15,%rsi), %r12d
movl 0xe8(%r15,%rsi), %eax
movl %eax, 0xa0(%rsp)
movq %rsi, 0x138(%rsp)
movq 0x1a8(%r15,%rsi), %rcx
movq 0xe0(%rsp), %rax
movq %rax, 0x168(%rsp)
movl 0x2c(%rdx), %r13d
movl 0x30(%rdx), %eax
movl %eax, 0x38(%rsp)
movl %edi, 0x50(%rsp)
movl %r10d, 0x48(%rsp)
movq %rcx, 0x68(%rsp)
cmpq 0x150(%rsp), %r11
je 0x1562f5
movq 0x170(%rsp), %rdx
movq %r11, 0x78(%rsp)
imulq %r11, %rdx
addq 0x168(%rsp), %rdx
movq %rdx, 0x18(%rsp)
xorl %ebp, %ebp
cmpl 0x64(%rsp), %ebp
je 0x1562e8
movq 0x138(%rsp), %rax
movq 0x118(%r15,%rax), %rax
movq %rax, 0x40(%rsp)
movq 0x58(%r15), %rax
imulq 0x78(%rsp), %rax
imulq 0x28(%r15), %rax
addq 0x18(%r15), %rax
movq %rax, 0xc0(%rsp)
xorl %eax, %eax
movl 0x60(%rsp), %edx
movl %edx, 0x20(%rsp)
cmpl 0x80(%rsp), %eax
je 0x1562e1
movl %eax, 0x28(%rsp)
testq %rcx, %rcx
je 0x156026
movq 0x78(%rsp), %rax
movss (%rcx,%rax,4), %xmm0
jmp 0x156029
xorps %xmm0, %xmm0
movq 0x70(%rsp), %rcx
movslq 0x2c(%rcx), %rsi
movq (%rcx), %rax
movq %rax, 0x90(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0x88(%rsp)
imulq %rax, %rsi
xorps %xmm1, %xmm1
movq 0xc0(%rsp), %r9
xorl %eax, %eax
movq 0x30(%rsp), %rcx
cmpq 0x58(%rsp), %rax
je 0x15613d
movq 0x88(%rsp), %r11
movq %rax, 0x98(%rsp)
imulq %rax, %r11
addq 0x90(%rsp), %r11
movq %r9, 0xd0(%rsp)
xorl %r8d, %r8d
cmpq %rcx, %r8
je 0x15611d
movl %r8d, %eax
subl %edi, %eax
incl %eax
imull %r10d, %eax
addl %ebp, %eax
js 0x156110
cltd
idivl 0xa0(%rsp)
testl %edx, %edx
jne 0x156110
cmpl 0x38(%rsp), %eax
jge 0x156110
movslq %eax, %rdi
imulq %rsi, %rdi
addq %r11, %rdi
movl 0x20(%rsp), %ecx
xorl %r10d, %r10d
cmpq %r10, %rbx
je 0x156102
testl %ecx, %ecx
js 0x1560f9
movl %ecx, %eax
cltd
idivl %r12d
testl %edx, %edx
jne 0x1560f9
cmpl %r13d, %eax
jge 0x1560f9
shll $0x2, %eax
cltq
movaps (%r9,%r10), %xmm2
mulps (%rdi,%rax,4), %xmm2
addps %xmm2, %xmm1
addq $0x10, %r10
addl %r14d, %ecx
jmp 0x1560d0
movl 0x50(%rsp), %edi
movl 0x48(%rsp), %r10d
movq 0x30(%rsp), %rcx
incq %r8
addq 0x10(%rsp), %r9
jmp 0x156099
movq 0x98(%rsp), %rax
incq %rax
movq 0xd0(%rsp), %r9
addq 0xc8(%rsp), %r9
jmp 0x156067
movaps %xmm1, %xmm4
unpckhpd %xmm1, %xmm4 # xmm4 = xmm4[1],xmm1[1]
addps %xmm1, %xmm4
addss %xmm4, %xmm0
shufps $0x55, %xmm4, %xmm4 # xmm4 = xmm4[1,1,1,1]
addss %xmm0, %xmm4
movq 0xb8(%rsp), %rax
cmpl $0x5, %eax
ja 0x1562b8
leaq 0x29cf2d(%rip), %rcx # 0x3f3098
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
movq 0x68(%rsp), %rcx
jmpq *%rax
maxss 0x297e8f(%rip), %xmm4 # 0x3ee010
movaps %xmm4, %xmm0
jmp 0x1562c0
movaps %xmm4, %xmm0
movaps %xmm4, 0xd0(%rsp)
callq 0x5f410
addss 0x298ae7(%rip), %xmm0 # 0x3eec88
callq 0x5f200
callq 0x5f160
movq 0x68(%rsp), %rcx
movl 0x48(%rsp), %r10d
movl 0x50(%rsp), %edi
mulss 0xd0(%rsp), %xmm0
jmp 0x1562c0
movq 0x40(%rsp), %rax
maxss (%rax), %xmm4
movss 0x4(%rax), %xmm1
ucomiss %xmm1, %xmm4
movaps %xmm4, %xmm0
ja 0x1562b3
jmp 0x1562c0
movss 0x29afca(%rip), %xmm2 # 0x3f11b8
minss %xmm2, %xmm4
movaps %xmm4, %xmm0
xorps 0x297e94(%rip), %xmm0 # 0x3ee090
cmpltss 0x29afb7(%rip), %xmm4 # 0x3f11bc
movaps %xmm4, %xmm1
andnps %xmm0, %xmm1
andps %xmm2, %xmm4
orps %xmm1, %xmm4
movaps %xmm4, %xmm0
callq 0x5f410
movq 0x68(%rsp), %rcx
movl 0x48(%rsp), %r10d
movl 0x50(%rsp), %edi
movaps %xmm0, %xmm1
movss 0x298a56(%rip), %xmm0 # 0x3eec88
addss %xmm0, %xmm1
divss %xmm1, %xmm0
jmp 0x1562c0
xorps %xmm0, %xmm0
cmpltss %xmm4, %xmm0
movaps %xmm0, %xmm1
movss 0x298a36(%rip), %xmm2 # 0x3eec88
andps %xmm2, %xmm1
movq 0x40(%rsp), %rax
movss (%rax), %xmm2
andnps %xmm2, %xmm0
orps %xmm1, %xmm0
mulss %xmm4, %xmm0
jmp 0x1562c0
movq 0x40(%rsp), %rax
movss (%rax), %xmm1
movss 0x4(%rax), %xmm2
movaps %xmm2, %xmm3
xorps 0x297e0e(%rip), %xmm3 # 0x3ee090
divss %xmm1, %xmm3
xorps %xmm0, %xmm0
ucomiss %xmm3, %xmm4
jb 0x1562c0
movss 0x2989f2(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
addss %xmm0, %xmm3
ucomiss %xmm3, %xmm4
ja 0x156181
mulss %xmm4, %xmm1
addss %xmm2, %xmm1
mulss %xmm4, %xmm1
movaps %xmm1, %xmm0
jmp 0x1562c0
movaps %xmm4, %xmm0
movq 0x68(%rsp), %rcx
movq 0x18(%rsp), %rax
movss %xmm0, (%rax)
addq $0x4, %rax
movq %rax, 0x18(%rsp)
movl 0x28(%rsp), %eax
incl %eax
incl 0x20(%rsp)
jmp 0x156004
incl %ebp
jmp 0x155fc0
movq 0x78(%rsp), %r11
incq %r11
jmp 0x155f92
cmpl $0x1, 0x148(%rsp)
setne %al
movb 0xf(%rsp), %cl
xorb $0x1, %cl
orb %al, %cl
movq 0x70(%rsp), %rax
jne 0x156a07
movq (%r15), %rcx
movq %rcx, 0xc0(%rsp)
movq 0xe0(%rsp), %rcx
movq %rcx, 0xb8(%rsp)
movq 0x120(%rsp), %rcx
imulq 0xf0(%rsp), %rcx
movq %rcx, 0x78(%rsp)
movl 0x2c(%rax), %ebp
movl 0x30(%rax), %ecx
movl %ecx, 0x10(%rsp)
movl 0x38(%rax), %eax
movslq 0x10c(%rsp), %rcx
xorl %edx, %edx
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0x20(%rsp)
testl %ecx, %ecx
movl $0x0, %esi
movq %rcx, 0x80(%rsp)
cmovgl %ecx, %esi
movq %rsi, 0x90(%rsp)
movl 0x110(%rsp), %eax
testl %eax, %eax
cmovlel %edx, %eax
movl %eax, 0x68(%rsp)
shlq $0x2, 0x158(%rsp)
movq 0xc0(%rsp), %rax
movq -0x18(%rax), %rax
movslq 0xd0(%r15,%rax), %rax
cmpq %rax, %rdx
jge 0x156a07
movq 0x78(%rsp), %rax
imulq %rdx, %rax
addq 0xb8(%rsp), %rax
movq %rax, 0x58(%rsp)
xorl %r12d, %r12d
movq %rdx, 0x18(%rsp)
cmpl 0x68(%rsp), %r12d
je 0x156749
movq (%r15), %rax
movq %rax, 0x88(%rsp)
movl %r12d, 0x28(%rsp)
movq 0x58(%r15), %rcx
imulq %rdx, %rcx
imulq 0x28(%r15), %rcx
subl 0x140(%rsp), %r12d
addq 0x18(%r15), %rcx
movq %rcx, 0xc8(%rsp)
xorl %edx, %edx
cmpq 0x90(%rsp), %rdx
je 0x156721
movq 0x88(%rsp), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x10c(%r15,%rax)
movq %rax, 0x98(%rsp)
je 0x156453
movq 0x1a8(%r15,%rax), %rax
movq 0x18(%rsp), %rcx
movss (%rax,%rcx,4), %xmm4
jmp 0x156456
xorps %xmm4, %xmm4
movq 0x70(%rsp), %rcx
movslq 0x2c(%rcx), %rsi
movq (%rcx), %rax
movq %rax, 0x30(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0x50(%rsp)
movq (%r15), %rcx
movq %rcx, 0x48(%rsp)
imulq %rax, %rsi
movq %rsi, 0xa0(%rsp)
movq %rdx, 0xd0(%rsp)
movl %edx, %r10d
subl 0x144(%rsp), %r10d
movq 0xc8(%rsp), %r11
xorl %eax, %eax
cmpq 0x20(%rsp), %rax
je 0x156595
movq 0x50(%rsp), %r8
movq %rax, 0x38(%rsp)
imulq %rax, %r8
addq 0x30(%rsp), %r8
movq 0x48(%rsp), %rax
movq -0x18(%rax), %r14
movl 0xd8(%r15,%r14), %edi
xorl %r9d, %r9d
testl %edi, %edi
cmovlel %r9d, %edi
cmpl %edi, %r9d
je 0x156580
movl 0xe0(%r15,%r14), %eax
imull %r9d, %eax
addl %r12d, %eax
js 0x156578
cltd
idivl 0xe8(%r15,%r14)
testl %edx, %edx
jne 0x156578
cmpl 0x10(%rsp), %eax
jge 0x156578
movslq %eax, %rcx
imulq 0xa0(%rsp), %rcx
addq %r8, %rcx
movl 0xd4(%r15,%r14), %eax
testl %eax, %eax
movl $0x0, %ebx
cmovgl %eax, %ebx
imull %r9d, %eax
cltq
leaq (%r11,%rax,4), %r13
xorl %esi, %esi
cmpq %rsi, %rbx
je 0x156578
movl 0xdc(%r15,%r14), %eax
imull %esi, %eax
addl %r10d, %eax
js 0x156573
cltd
idivl 0xe4(%r15,%r14)
testl %edx, %edx
jne 0x156573
cmpl %ebp, %eax
jge 0x156573
cltq
movss (%r13,%rsi,4), %xmm0
mulss (%rcx,%rax,4), %xmm0
addss %xmm0, %xmm4
incq %rsi
jmp 0x15653b
incl %r9d
jmp 0x1564e1
movq 0x38(%rsp), %rax
incq %rax
addq 0x158(%rsp), %r11
jmp 0x1564a9
movq 0x98(%rsp), %rsi
movl 0x114(%r15,%rsi), %eax
decl %eax
cmpl $0x5, %eax
ja 0x156704
leaq 0x29caf9(%rip), %rcx # 0x3f30b0
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
movq 0xd0(%rsp), %rdx
jmpq *%rax
maxss 0x297a40(%rip), %xmm4 # 0x3ee010
movaps %xmm4, %xmm0
jmp 0x15670f
movaps %xmm4, %xmm0
movaps %xmm4, 0xa0(%rsp)
callq 0x5f410
addss 0x298698(%rip), %xmm0 # 0x3eec88
callq 0x5f200
callq 0x5f160
movq 0xd0(%rsp), %rdx
mulss 0xa0(%rsp), %xmm0
jmp 0x15670f
movq 0x118(%r15,%rsi), %rax
maxss (%rax), %xmm4
movss 0x4(%rax), %xmm1
ucomiss %xmm1, %xmm4
movaps %xmm4, %xmm0
ja 0x1566ff
jmp 0x15670f
movss 0x29ab7e(%rip), %xmm2 # 0x3f11b8
minss %xmm2, %xmm4
movaps %xmm4, %xmm0
xorps 0x297a48(%rip), %xmm0 # 0x3ee090
cmpltss 0x29ab6b(%rip), %xmm4 # 0x3f11bc
movaps %xmm4, %xmm1
andnps %xmm0, %xmm1
andps %xmm2, %xmm4
orps %xmm1, %xmm4
movaps %xmm4, %xmm0
callq 0x5f410
movq 0xd0(%rsp), %rdx
movaps %xmm0, %xmm1
movss 0x298610(%rip), %xmm0 # 0x3eec88
addss %xmm0, %xmm1
divss %xmm1, %xmm0
jmp 0x15670f
movq 0x118(%r15,%rsi), %rax
movss (%rax), %xmm1
xorps %xmm0, %xmm0
cmpltss %xmm4, %xmm0
movaps %xmm0, %xmm2
andnps %xmm1, %xmm2
movss 0x2985e1(%rip), %xmm1 # 0x3eec88
andps %xmm1, %xmm0
orps %xmm2, %xmm0
mulss %xmm4, %xmm0
jmp 0x15670f
movq 0x118(%r15,%rsi), %rax
movss (%rax), %xmm1
movss 0x4(%rax), %xmm2
movaps %xmm2, %xmm3
xorps 0x2979c2(%rip), %xmm3 # 0x3ee090
divss %xmm1, %xmm3
xorps %xmm0, %xmm0
ucomiss %xmm3, %xmm4
jb 0x15670f
movss 0x2985a6(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
addss %xmm0, %xmm3
ucomiss %xmm3, %xmm4
ja 0x1565d0
mulss %xmm4, %xmm1
addss %xmm2, %xmm1
mulss %xmm4, %xmm1
movaps %xmm1, %xmm0
jmp 0x15670f
movaps %xmm4, %xmm0
movq 0xd0(%rsp), %rdx
movq 0x58(%rsp), %rax
movss %xmm0, (%rax,%rdx,4)
incq %rdx
jmp 0x156412
movq 0x80(%rsp), %rax
movq 0x58(%rsp), %rcx
leaq (%rcx,%rax,4), %rcx
movq %rcx, 0x58(%rsp)
movl 0x28(%rsp), %r12d
incl %r12d
movq 0x18(%rsp), %rdx
jmp 0x1563d4
incq %rdx
jmp 0x156399
movslq 0x1ec(%rsp), %rcx
imulq 0x1d0(%rsp), %rcx
movq 0x1c0(%rsp), %rax
movq %rax, 0x30(%rsp)
movslq 0x10c(%rsp), %r11
movslq 0x110(%rsp), %rax
movq 0xf0(%rsp), %rdx
movq 0x120(%rsp), %rbp
imulq %rdx, %rbp
imulq %r11, %rax
movq %rdx, %r9
movq %rax, 0x50(%rsp)
imulq %rax, %r9
addq $0xf, %r9
andq $-0x10, %r9
movq %r9, 0x98(%rsp)
movq %rdx, 0x48(%rsp)
imulq %rdx, %r11
xorl %edx, %edx
testl %r13d, %r13d
cmovlel %edx, %r13d
movl 0x114(%rsp), %eax
movl %eax, 0x58(%rsp)
testl %r14d, %r14d
cmovlel %edx, %r14d
movq 0xe0(%rsp), %r9
movq 0xa0(%rsp), %rax
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0xa0(%rsp)
movl 0x108(%rsp), %eax
movl %eax, 0x88(%rsp)
imulq 0x158(%rsp), %rcx
movq %rcx, 0x20(%rsp)
movq %r9, 0x90(%rsp)
movq %r9, 0x38(%rsp)
xorl %eax, %eax
movq %rbp, 0xd0(%rsp)
cmpq 0xa0(%rsp), %rax
je 0x15697a
movq %rax, 0x10(%rsp)
movq 0x98(%rsp), %rax
xorl %edx, %edx
divq 0x48(%rsp)
cmpl $0x4, 0x88(%rsp)
cmoveq 0x50(%rsp), %rax
movq 0x1a8(%r15,%r8), %rcx
xorps %xmm0, %xmm0
testq %rcx, %rcx
je 0x15688e
movslq 0x1e0(%r15,%r8), %rdx
imulq 0x1e8(%r15,%r8), %rdx
testq %rdx, %rdx
movq 0x38(%rsp), %r8
je 0x156893
movq 0x10(%rsp), %rdx
movss (%rcx,%rdx,4), %xmm0
jmp 0x156893
movq 0x38(%rsp), %r8
movq 0x10(%rsp), %rcx
movq %rcx, %rdx
imulq 0x20(%rsp), %rdx
addq 0x30(%rsp), %rdx
imulq %rcx, %rbp
addq 0x90(%rsp), %rbp
imull 0x58(%rsp), %eax
testl %eax, %eax
movl $0x0, %ecx
cmovlel %ecx, %eax
xorl %ecx, %ecx
cmpl %ecx, %eax
je 0x1568d2
movss %xmm0, (%r8,%rcx,4)
incq %rcx
jmp 0x1568c3
movq -0x18(%rsi), %r8
xorl %eax, %eax
cmpl 0xd8(%r15,%r8), %eax
jge 0x156960
xorl %r9d, %r9d
cmpl 0xd4(%r15,%r8), %r9d
jge 0x156959
movslq 0xe0(%r15,%r8), %rcx
movslq %eax, %r10
imulq %rcx, %r10
imulq %r11, %r10
addq %rbp, %r10
movslq 0xdc(%r15,%r8), %rcx
movslq %r9d, %r12
imulq %rcx, %r12
leaq (%r10,%r12,4), %rcx
xorl %r10d, %r10d
cmpl %r14d, %r10d
je 0x156954
movl %r13d, %r12d
subl $0x1, %r12d
jb 0x15694b
movss (%rcx), %xmm0
addss (%rdx), %xmm0
movss %xmm0, (%rcx)
movq -0x18(%rsi), %r8
movslq 0xe4(%r15,%r8), %rbx
leaq (%rcx,%rbx,4), %rcx
addq $0x4, %rdx
jmp 0x156923
leaq (%rcx,%rdi,4), %rcx
incl %r10d
jmp 0x15691b
incl %r9d
jmp 0x1568e5
incl %eax
jmp 0x1568d8
movq 0x10(%rsp), %rax
incq %rax
movq 0xd0(%rsp), %rbp
addq %rbp, 0x38(%rsp)
jmp 0x156827
movq 0x8(%r15), %rdi
testq %rdi, %rdi
je 0x156999
movq (%rdi), %rax
leaq 0xe0(%rsp), %rsi
movq 0x130(%rsp), %rdx
callq *0x48(%rax)
movq 0x1c8(%rsp), %rax
testq %rax, %rax
je 0x1569d0
lock
decl (%rax)
jne 0x1569d0
movq 0x1c0(%rsp), %rsi
movq 0x1e0(%rsp), %rdi
testq %rdi, %rdi
je 0x1569c8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1569d0
movq %rsi, %rdi
callq 0x5f3e0
movq 0x180(%rsp), %rax
testq %rax, %rax
je 0x156a07
lock
decl (%rax)
jne 0x156a07
movq 0x178(%rsp), %rsi
movq 0x198(%rsp), %rdi
testq %rdi, %rdi
je 0x1569ff
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x156a07
movq %rsi, %rdi
callq 0x5f3e0
movq (%r15), %rax
addq -0x18(%rax), %r15
leaq 0xe0(%rsp), %rsi
movq %r15, %rdi
movq 0x160(%rsp), %rbx
movq %rbx, %rdx
movq 0x130(%rsp), %rcx
callq 0x153cf6
cmpq $0x0, (%rbx)
pushq $-0x64
popq %rcx
je 0x156a4a
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
xorl %ebx, %ebx
testq %rax, %rax
jne 0x156a4c
movl %ecx, %ebx
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x156a83
lock
decl (%rax)
jne 0x156a83
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
je 0x156a7b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x156a83
movq %rsi, %rdi
callq 0x5f3e0
movl %ebx, %eax
addq $0x258, %rsp # imm = 0x258
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
cmpl $0x0, 0x104(%r15,%rax)
jle 0x156ab1
cmpl $0x0, 0x108(%r15,%rax)
jg 0x154c2e
pushq $0x8
popq %rsi
leaq 0xe0(%rsp), %rax
movq 0x160(%rsp), %rcx
cmpq %rcx, %rax
je 0x154c2e
movq 0x8(%rcx), %rax
testq %rax, %rax
je 0x156b43
lock
incl (%rax)
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x156b43
lock
decl (%rax)
jne 0x156b43
movl %r9d, 0x30(%rsp)
movq %r11, 0x20(%rsp)
movq %r10, 0x38(%rsp)
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
je 0x156b29
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x38(%rsp), %r10
movq 0x20(%rsp), %r11
movl 0x30(%rsp), %r9d
pushq $0x8
popq %rsi
jmp 0x156b43
movq %rsi, %rdi
callq 0x5f3e0
movq 0x38(%rsp), %r10
movq 0x20(%rsp), %r11
movl 0x30(%rsp), %r9d
pushq $0x8
popq %rsi
movq 0x160(%rsp), %rcx
movups (%rcx), %xmm0
movaps %xmm0, 0xe0(%rsp)
movq 0x10(%rcx), %rax
movq %rax, 0xf0(%rsp)
movl 0x18(%rcx), %eax
movl %eax, 0xf8(%rsp)
movq 0x20(%rcx), %rax
movq %rax, 0x100(%rsp)
movups 0x28(%rcx), %xmm0
movups %xmm0, 0x108(%rsp)
movl 0x38(%rcx), %eax
movl %eax, 0x118(%rsp)
movq 0x40(%rcx), %rax
movq %rax, 0x120(%rsp)
jmp 0x154c2e
jmp 0x156c69
jmp 0x156c69
jmp 0x156c69
movq %rax, %rbx
movq 0x1c8(%rsp), %rax
testq %rax, %rax
je 0x156be8
lock
decl (%rax)
jne 0x156be8
movq 0x1c0(%rsp), %rsi
movq 0x1e0(%rsp), %rdi
testq %rdi, %rdi
jne 0x156be2
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x156be8
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x180(%rsp), %rax
testq %rax, %rax
je 0x156c2a
lock
decl (%rax)
jne 0x156c2a
movq 0x178(%rsp), %rsi
movq 0x198(%rsp), %rdi
testq %rdi, %rdi
jne 0x156c19
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x156c2a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x156c2a
jmp 0x156c69
jmp 0x156c69
jmp 0x156c27
movq %rax, %rbx
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x156c61
lock
decl (%rax)
jne 0x156c61
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
jne 0x156c5b
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x156c61
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/x86/deconvolution_x86.cpp |
virtual thunk to ncnn::Deconvolution_x86_avx::create_pipeline(ncnn::Option const&) | int Deconvolution_x86_avx::create_pipeline(const Option& opt)
{
activation = create_activation_layer(activation_type, activation_params, opt);
const int maxk = kernel_w * kernel_h;
int num_input = weight_data_size / maxk / num_output;
int elempack = 1;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = num_input % 16 == 0 ? 16 : num_input % 8 == 0 ? 8 : num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = num_input % 8 == 0 ? 8 : num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
elempack = num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
if (opt.use_sgemm_convolution)
{
const int maxk = kernel_w * kernel_h;
gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
ncnn::ParamDict pd;
pd.set(2, 1); // transA
pd.set(3, 0); // transB
pd.set(4, 1); // constantA
pd.set(5, 0); // constantB
pd.set(6, 1); // constantC
pd.set(7, maxk * num_output); // M = maxk*num_output
pd.set(8, 0); // N = size
pd.set(9, num_input); // K = inch
pd.set(10, -1); // constant_broadcast_type_C = null
pd.set(11, 0); // output_N1M
pd.set(12, out_elempack);
gemm->load_param(pd);
// maxk-inch-outch to pa-maxk-outch/pa-inch
Mat tmp;
{
Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output);
tmp.create(maxk * num_output, num_input);
for (int p = 0; p < num_input; p += 1)
{
float* g00 = tmp.row(p);
for (int q = 0; q + (out_elempack - 1) < num_output; q += out_elempack)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < out_elempack; i++)
{
const float* k00 = weight_data_r2.channel(q + i).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
}
ncnn::Mat weights[1];
weights[0] = tmp;
gemm->load_model(ModelBinFromMatArray(weights));
gemm->create_pipeline(opt);
}
else
{
Mat weight_data_transposed(weight_data.w);
{
float* pt = weight_data_transposed;
const float* p = weight_data;
for (int i = 0; i < num_input * num_output; i++)
{
for (int k = 0; k < maxk; k++)
{
pt[maxk - 1 - k] = p[k];
}
p += maxk;
pt += maxk;
}
}
// src = kw-kh-inch-outch
// dst = pb-pa-kw-kh-inch/pa-outch/pb
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, num_input, num_output);
weight_data_tm.create(maxk, num_input / elempack, num_output / out_elempack, (size_t)4u * elempack * out_elempack, elempack * out_elempack);
for (int q = 0; q + (out_elempack - 1) < num_output; q += out_elempack)
{
float* g00 = weight_data_tm.channel(q / out_elempack);
for (int p = 0; p + (elempack - 1) < num_input; p += elempack)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < elempack; i++)
{
for (int j = 0; j < out_elempack; j++)
{
const float* k00 = weight_data_r2.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x30(%rax), %rdi
callq 0x164cde
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deconvolution_x86_avx.cpp |
ncnn::Deconvolution_x86_avx::destroy_pipeline(ncnn::Option const&) | int Deconvolution_x86_avx::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
if (gemm)
{
gemm->destroy_pipeline(opt);
delete gemm;
gemm = 0;
}
return 0;
} | pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %r14
movq %rdi, %rbx
movq 0x8(%rdi), %rdi
testq %rdi, %rdi
je 0x165840
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x16583b
movq (%rdi), %rax
callq *0x8(%rax)
andq $0x0, 0x8(%rbx)
movq 0x10(%rbx), %rdi
testq %rdi, %rdi
je 0x165866
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x10(%rbx), %rdi
testq %rdi, %rdi
je 0x165861
movq (%rdi), %rax
callq *0x8(%rax)
andq $0x0, 0x10(%rbx)
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deconvolution_x86_avx.cpp |
ncnn::InnerProduct_x86::create_pipeline_int8_x86(ncnn::Option const&) | int InnerProduct_x86::create_pipeline_int8_x86(const Option& opt)
{
const int num_input = weight_data_size / num_output;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
out_elempack = num_output % 8 == 0 ? 8 : 1;
}
#endif // __SSE2__
// src = inch-outch
// dst = pb-inch-outch/pb
{
Mat weight_data_r2 = weight_data.reshape(num_input, num_output);
weight_data_tm.create(num_input, num_output / out_elempack, (size_t)out_elempack, out_elempack);
for (int q = 0; q + (out_elempack - 1) < num_output; q += out_elempack)
{
signed char* g0 = weight_data_tm.row<signed char>(q / out_elempack);
for (int p = 0; p < num_input; p++)
{
for (int j = 0; j < out_elempack; j++)
{
*g0++ = weight_data_r2.row<signed char>(q + j)[p];
}
}
}
}
scale_in_data.create(num_output);
for (int p = 0; p < num_output; p++)
{
// dequantize
float scale_in;
if (weight_data_int8_scales[p] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[0] * weight_data_int8_scales[p]);
scale_in_data[p] = scale_in;
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rsi, %rbx
movq %rdi, %r14
movq (%rdi), %rax
movq -0x18(%rax), %rax
leaq 0x130(%rdi,%rax), %rsi
movl -0x60(%rsi), %ecx
movl -0x58(%rsi), %eax
cltd
idivl %ecx
movl %eax, %r15d
testb $0x7, %cl
sete %bpl
andb 0x27(%rbx), %bpl
pushq $0x8
popq %rax
pushq $0x1
popq %r12
cmovnel %eax, %r12d
xorl %r13d, %r13d
movq %rsp, %rdi
movl %r15d, %edx
xorl %r8d, %r8d
callq 0x62e4e
leaq 0x10(%r14), %rdi
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r14,%rax), %eax
cltd
idivl %r12d
movl %r15d, %esi
movl %eax, %edx
movq %r12, %rcx
movl %r12d, %r8d
xorl %r9d, %r9d
callq 0x627de
leal -0x1(%r12), %eax
movq (%r14), %rdx
movzbl %bpl, %ecx
leal (%rcx,%rcx,2), %ecx
testl %r15d, %r15d
cmovlel %r13d, %r15d
leaq (%rax,%r13), %rsi
movq -0x18(%rdx), %rdi
movslq 0xd0(%r14,%rdi), %rdi
cmpq %rdi, %rsi
jge 0x175b27
movl %r13d, %esi
shrl %cl, %esi
movslq 0x3c(%r14), %rdi
imulq %rdi, %rsi
imulq 0x20(%r14), %rsi
addq 0x10(%r14), %rsi
xorl %edi, %edi
cmpq %r15, %rdi
je 0x175b22
xorl %r8d, %r8d
cmpq %r8, %r12
je 0x175b1a
leaq (%r8,%r13), %r9
movslq 0x2c(%rsp), %r10
imulq %r9, %r10
imulq 0x10(%rsp), %r10
addq (%rsp), %r10
movb (%rdi,%r10), %r9b
movb %r9b, (%rsi,%r8)
incq %r8
jmp 0x175af1
incq %rdi
addq %r8, %rsi
jmp 0x175ae9
addq %r12, %r13
jmp 0x175abc
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x175b54
lock
decl (%rax)
jne 0x175b54
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x175b4c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x175b54
movq %rsi, %rdi
callq 0x5f3e0
leaq 0x58(%r14), %rdi
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r14,%rax), %esi
xorl %r12d, %r12d
pushq $0x4
popq %rdx
xorl %ecx, %ecx
callq 0x635fa
movq (%r14), %rax
movq 0x58(%r14), %rcx
movss 0x279105(%rip), %xmm0 # 0x3eec88
movq -0x18(%rax), %rdx
leaq (%r14,%rdx), %r15
movslq 0xd0(%r14,%rdx), %rdx
cmpq %rdx, %r12
jge 0x175bca
movq 0x1c0(%r15), %rdx
movss (%rdx,%r12,4), %xmm1
xorps %xmm2, %xmm2
ucomiss %xmm1, %xmm2
je 0x175bbf
movq 0x208(%r15), %rdx
mulss (%rdx), %xmm1
movaps %xmm0, %xmm2
divss %xmm1, %xmm2
movss %xmm2, (%rcx,%r12,4)
incq %r12
jmp 0x175b83
cmpb $0x0, (%rbx)
je 0x175c2c
leaq 0x130(%r15), %rbx
movq 0x138(%r15), %rax
testq %rax, %rax
je 0x175c0a
lock
decl (%rax)
jne 0x175c0a
movq 0x130(%r15), %rsi
movq 0x150(%r15), %rdi
testq %rdi, %rdi
je 0x175c02
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x175c0a
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x170(%r15)
xorps %xmm0, %xmm0
movups %xmm0, 0xc(%rbx)
movups %xmm0, (%rbx)
movups %xmm0, 0x158(%r15)
andl $0x0, 0x168(%r15)
xorl %eax, %eax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x175c77
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x175c6f
lock
decl (%rax)
jne 0x175c6f
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x175c69
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x175c6f
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/x86/innerproduct_x86.cpp |
ncnn::Input::Input() | Input::Input()
{
one_blob_only = true;
support_inplace = true;
support_vulkan = true;
support_packing = true;
support_bf16_storage = true;
support_image_storage = true;
} | pushq %rbx
movq %rdi, %rbx
callq 0x77e84
leaq 0x2a7e68(%rip), %rax # 0x4836d0
movq %rax, (%rbx)
movb $0x1, %al
movb %al, 0xf(%rbx)
movb %al, 0xc(%rbx)
movl $0x1010101, 0x8(%rbx) # imm = 0x1010101
popq %rbx
retq
| /csukuangfj[P]ncnn/src/layer/input.cpp |
ncnn::Log::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int Log::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
int size = w * h;
if (base == -1.f)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
for (int i = 0; i < size; i++)
{
ptr[i] = logf(shift + ptr[i] * scale);
}
}
}
else
{
float log_base_inv = 1.f / logf(base);
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
for (int i = 0; i < size; i++)
{
ptr[i] = logf(shift + ptr[i] * scale) * log_base_inv;
}
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18, %rsp
movq %rsi, %r13
movq %rdi, %rbx
movl 0x30(%rsi), %r15d
movl 0x38(%rsi), %r12d
imull 0x2c(%rsi), %r15d
movss 0xd0(%rdi), %xmm0
movss 0x21584b(%rip), %xmm1 # 0x3f11f0
ucomiss %xmm0, %xmm1
jne 0x1dba13
movq (%r13), %r14
movq 0x40(%r13), %rax
xorl %ebp, %ebp
testl %r15d, %r15d
cmovlel %ebp, %r15d
testl %r12d, %r12d
cmovlel %ebp, %r12d
imulq 0x10(%r13), %rax
movq %rax, 0x8(%rsp)
cmpq %r12, %rbp
je 0x1dba94
movq %r12, %r13
xorl %r12d, %r12d
cmpq %r12, %r15
je 0x1dba06
movss 0xd4(%rbx), %xmm0
mulss (%r14,%r12,4), %xmm0
addss 0xd8(%rbx), %xmm0
callq 0x5f200
movss %xmm0, (%r14,%r12,4)
incq %r12
jmp 0x1db9db
incq %rbp
addq 0x8(%rsp), %r14
movq %r13, %r12
jmp 0x1db9cc
callq 0x5f200
movq %r13, %rcx
movq (%r13), %r13
xorl %ebp, %ebp
testl %r15d, %r15d
cmovlel %ebp, %r15d
movq 0x40(%rcx), %rax
testl %r12d, %r12d
cmovlel %ebp, %r12d
movss 0x21324d(%rip), %xmm1 # 0x3eec88
divss %xmm0, %xmm1
movss %xmm1, 0x8(%rsp)
imulq 0x10(%rcx), %rax
movq %rax, 0x10(%rsp)
cmpq %r12, %rbp
je 0x1dba94
xorl %r14d, %r14d
cmpq %r14, %r15
je 0x1dba8a
movss 0xd4(%rbx), %xmm0
mulss (%r13,%r14,4), %xmm0
addss 0xd8(%rbx), %xmm0
callq 0x5f200
mulss 0x8(%rsp), %xmm0
movss %xmm0, (%r13,%r14,4)
incq %r14
jmp 0x1dba57
incq %rbp
addq 0x10(%rsp), %r13
jmp 0x1dba4f
xorl %eax, %eax
addq $0x18, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
| /csukuangfj[P]ncnn/src/layer/log.cpp |
ncnn::LRN::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int LRN::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
size_t elemsize = bottom_top_blob.elemsize;
int size = w * h;
// squared values with local_size padding
Mat square_blob;
square_blob.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_top_blob.channel(q);
float* outptr = square_blob.channel(q);
for (int i = 0; i < size; i++)
{
outptr[i] = ptr[i] * ptr[i];
}
}
if (region_type == NormRegion_ACROSS_CHANNELS)
{
Mat square_sum;
square_sum.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_sum.empty())
return -100;
square_sum.fill(0.f);
const float alpha_div_size = alpha / local_size;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
// square sum
float* ssptr = square_sum.channel(q);
for (int p = q - local_size / 2; p <= q + local_size / 2; p++)
{
if (p < 0 || p >= channels)
continue;
const float* sptr = square_blob.channel(p);
for (int i = 0; i < size; i++)
{
ssptr[i] += sptr[i];
}
}
float* ptr = bottom_top_blob.channel(q);
for (int i = 0; i < size; i++)
{
ptr[i] = ptr[i] * powf(bias + alpha_div_size * ssptr[i], -beta);
}
}
}
else if (region_type == NormRegion_WITHIN_CHANNEL)
{
int outw = w;
int outh = h;
Mat square_blob_bordered = square_blob;
int pad = local_size / 2;
if (pad > 0)
{
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
opt_b.use_packing_layout = false;
copy_make_border(square_blob, square_blob_bordered, pad, local_size - pad - 1, pad, local_size - pad - 1, BORDER_CONSTANT, 0.f, opt_b);
if (square_blob_bordered.empty())
return -100;
w = square_blob_bordered.w;
h = square_blob_bordered.h;
}
const int maxk = local_size * local_size;
const float alpha_div_size = alpha / maxk;
// norm window offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w - local_size;
for (int i = 0; i < local_size; i++)
{
for (int j = 0; j < local_size; j++)
{
space_ofs[p1] = p2;
p1++;
p2++;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
const Mat m = square_blob_bordered.channel(q);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
const float* sptr = m.row(i) + j;
float ss = 0.f;
for (int k = 0; k < maxk; k++)
{
float val = sptr[space_ofs[k]];
ss += val;
}
ptr[j] = ptr[j] * powf(bias + alpha_div_size * ss, -beta);
}
ptr += outw;
}
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x158, %rsp # imm = 0x158
movq %rsi, %rbp
movl 0x2c(%rsi), %esi
movl 0x30(%rbp), %ecx
movl 0x38(%rbp), %r13d
movq 0x10(%rbp), %r15
leaq 0x90(%rsp), %rax
andq $0x0, 0x40(%rax)
movq %rdx, %r14
movq %rdi, %rbx
xorps %xmm0, %xmm0
movaps %xmm0, (%rax)
movups %xmm0, 0xc(%rax)
movaps %xmm0, 0x20(%rax)
movups %xmm0, 0x2c(%rax)
movq 0x10(%rdx), %r9
movq %rax, %rdi
movq %rsi, %r12
movq %rcx, (%rsp)
movl %ecx, %edx
movl %r13d, %ecx
movq %r15, %r8
callq 0x63810
movq 0x90(%rsp), %rcx
pushq $-0x64
popq %r10
testq %rcx, %rcx
je 0x1dbc70
movq 0xd0(%rsp), %rdx
movslq 0xc8(%rsp), %rax
movq %rdx, %rsi
imulq %rax, %rsi
testq %rsi, %rsi
je 0x1dbc70
movq %r15, %r11
movq %r14, 0x18(%rsp)
movq %r12, %rsi
movslq %esi, %rdi
movq %rdi, 0x78(%rsp)
movq (%rsp), %r14
imull %esi, %r14d
movq (%rbp), %rsi
movq 0x40(%rbp), %rdi
imulq 0xa0(%rsp), %rdx
xorl %r8d, %r8d
testl %r14d, %r14d
cmovlel %r8d, %r14d
testl %r13d, %r13d
movl $0x0, %r15d
cmovgl %r13d, %r15d
imulq 0x10(%rbp), %rdi
cmpq %r15, %r8
je 0x1dbcb1
xorl %r9d, %r9d
cmpq %r9, %r14
je 0x1dbc65
movss (%rsi,%r9,4), %xmm0
mulss %xmm0, %xmm0
movss %xmm0, (%rcx,%r9,4)
incq %r9
jmp 0x1dbc4b
incq %r8
addq %rdx, %rcx
addq %rdi, %rsi
jmp 0x1dbc43
movl %r10d, %r14d
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x1dbe5c
lock
decl (%rax)
jne 0x1dbe5c
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x1dbe54
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1dbe5c
movl 0xd0(%rbx), %ecx
testl %ecx, %ecx
je 0x1dbddf
xorl %r14d, %r14d
cmpl $0x1, %ecx
jne 0x1dbc73
movq 0x90(%rsp), %rdx
movq 0x98(%rsp), %rcx
movq %rdx, 0x20(%rsp)
movq %rcx, 0x28(%rsp)
movq 0xa0(%rsp), %rdx
movq %rdx, 0x30(%rsp)
movl 0xa8(%rsp), %edx
movl %edx, 0x38(%rsp)
movq 0xb0(%rsp), %rdx
movq %rdx, 0x40(%rsp)
movups 0xb8(%rsp), %xmm0
movups %xmm0, 0x48(%rsp)
movl %eax, 0x58(%rsp)
movq 0xd0(%rsp), %rax
movq %rax, 0x60(%rsp)
testq %rcx, %rcx
je 0x1dbd2c
lock
incl (%rcx)
movl 0xd4(%rbx), %r13d
cmpl $0x2, %r13d
jl 0x1dbea9
movl %r10d, %r14d
movl %r13d, %edx
shrl %edx
movq 0x18(%rsp), %r11
movups (%r11), %xmm0
movups 0x10(%r11), %xmm1
movups 0x20(%r11), %xmm2
movups 0x30(%r11), %xmm3
leaq 0x110(%rsp), %rax
movaps %xmm3, 0x30(%rax)
movaps %xmm2, 0x20(%rax)
movaps %xmm1, 0x10(%rax)
movaps %xmm0, (%rax)
movq 0x10(%r11), %rcx
movq %rcx, 0x8(%rax)
movb $0x0, 0x27(%rax)
movl %edx, %ecx
notl %ecx
addl %r13d, %ecx
leaq 0x90(%rsp), %rdi
leaq 0x20(%rsp), %rsi
xorps %xmm0, %xmm0
movl %edx, %r8d
movl %ecx, %r9d
pushq %rax
pushq $0x0
callq 0x6466c
popq %rax
popq %rcx
cmpq $0x0, 0x20(%rsp)
movl %r14d, %ecx
je 0x1dc271
movslq 0x58(%rsp), %rax
imulq 0x60(%rsp), %rax
testq %rax, %rax
je 0x1dc271
movq %rbp, 0x10(%rsp)
movl 0x4c(%rsp), %ebp
movl 0xd4(%rbx), %r13d
jmp 0x1dbeb1
leaq 0x20(%rsp), %rdi
andq $0x0, 0x40(%rdi)
xorps %xmm0, %xmm0
movaps %xmm0, (%rdi)
movups %xmm0, 0xc(%rdi)
movaps %xmm0, 0x20(%rdi)
movups %xmm0, 0x2c(%rdi)
movq 0x18(%rsp), %rax
movq 0x10(%rax), %r9
movl %r12d, %esi
movq (%rsp), %rdx
movl %r13d, %ecx
movq %r11, %r8
callq 0x63810
movq 0x20(%rsp), %rax
testq %rax, %rax
pushq $-0x64
popq %rdi
je 0x1dbe71
movq 0x60(%rsp), %rcx
movslq 0x58(%rsp), %rdx
movq %rcx, %rsi
imulq %rdx, %rsi
testq %rsi, %rsi
je 0x1dbe71
imull %edx, %ecx
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpl %edx, %ecx
je 0x1dc0f2
andl $0x0, (%rax,%rdx,4)
incq %rdx
jmp 0x1dbe43
movq %rsi, %rdi
callq 0x5f3e0
movl %r14d, %eax
addq $0x158, %rsp # imm = 0x158
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1dc2a7
lock
decl (%rax)
jne 0x1dc2a7
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1dc29c
movq (%rdi), %rax
callq *0x18(%rax)
pushq $-0x64
popq %rdi
jmp 0x1dc2a7
movq %rbp, 0x10(%rsp)
movl %r12d, %ebp
imull %r13d, %r13d
movss 0xd8(%rbx), %xmm0
movss %xmm0, 0x8(%rsp)
leaq 0x110(%rsp), %rdi
leaq 0xf(%rsp), %rdx
movq %r13, %rsi
callq 0x73bbe
cvtsi2ss %r13d, %xmm0
movq 0x110(%rsp), %r14
movl 0xd4(%rbx), %ecx
subl %ecx, %ebp
xorl %eax, %eax
xorl %edx, %edx
xorl %esi, %esi
cmpl %ecx, %esi
jge 0x1dbf24
cltq
leaq (%r14,%rax,4), %r8
movl %edx, %r9d
xorl %edi, %edi
cmpl %ecx, %edi
jge 0x1dbf19
leaq (%r9,%rdi), %rcx
movl %ecx, (%r8,%rdi,4)
movl 0xd4(%rbx), %ecx
incq %rdi
jmp 0x1dbf02
addl %ebp, %edx
addl %edi, %edx
incl %esi
addq %rdi, %rax
jmp 0x1dbef3
movq %rbx, 0x18(%rsp)
movq 0x10(%rsp), %rcx
movq 0x40(%rcx), %rax
imulq 0x10(%rcx), %rax
movq %rax, 0xf8(%rsp)
movslq 0x4c(%rsp), %rdx
movq 0x30(%rsp), %rax
movq 0x60(%rsp), %rsi
imulq %rax, %rsi
movq %rsi, 0xf0(%rsp)
imulq %rax, %rdx
movq %rdx, 0x108(%rsp)
xorl %eax, %eax
movq %r12, %rbx
testl %ebx, %ebx
cmovlel %eax, %ebx
movq (%rcx), %rcx
movq %rcx, 0xe8(%rsp)
movq (%rsp), %r12
testl %r12d, %r12d
cmovlel %eax, %r12d
movq %r12, (%rsp)
xorl %eax, %eax
movq 0x20(%rsp), %rcx
movq %rcx, 0xe0(%rsp)
movss 0x212ce7(%rip), %xmm1 # 0x3eec88
divss %xmm0, %xmm1
movss %xmm1, 0x10(%rsp)
movq %r15, 0x70(%rsp)
cmpq %r15, %rax
je 0x1dc0b0
movq 0xf8(%rsp), %r12
imulq %rax, %r12
addq 0xe8(%rsp), %r12
movq 0xf0(%rsp), %rcx
movq %rax, 0x100(%rsp)
imulq %rax, %rcx
addq 0xe0(%rsp), %rcx
movq %rcx, 0x80(%rsp)
xorl %eax, %eax
cmpq (%rsp), %rax
je 0x1dc09b
movq 0x108(%rsp), %rbp
movq %rax, 0x88(%rsp)
imulq %rax, %rbp
addq 0x80(%rsp), %rbp
xorl %r15d, %r15d
cmpq %rbx, %r15
je 0x1dc082
leaq (,%r15,4), %rax
addq %rbp, %rax
xorps %xmm0, %xmm0
xorl %ecx, %ecx
cmpq %rcx, %r13
je 0x1dc044
movslq (%r14,%rcx,4), %rdx
addss (%rax,%rdx,4), %xmm0
incq %rcx
jmp 0x1dc031
mulss 0x8(%rsp), %xmm0
mulss 0x10(%rsp), %xmm0
movq 0x18(%rsp), %rax
addss 0xe0(%rax), %xmm0
movss 0xdc(%rax), %xmm1
xorps 0x212024(%rip), %xmm1 # 0x3ee090
callq 0x5f0e0
mulss (%r12,%r15,4), %xmm0
movss %xmm0, (%r12,%r15,4)
incq %r15
jmp 0x1dc01c
movq 0x78(%rsp), %rax
leaq (%r12,%rax,4), %r12
movq 0x88(%rsp), %rax
incq %rax
jmp 0x1dbff3
movq 0x100(%rsp), %rax
incq %rax
movq 0x70(%rsp), %r15
jmp 0x1dbfb0
leaq 0x110(%rsp), %rdi
callq 0x624be
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1dc2b7
lock
decl (%rax)
jne 0x1dc2b7
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1dc29a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1dc2b7
movq %r15, 0x70(%rsp)
movl 0xd4(%rbx), %ecx
cvtsi2ss %ecx, %xmm0
movss 0xd8(%rbx), %xmm1
movss %xmm1, 0x18(%rsp)
movq 0x20(%rsp), %r12
movq 0x60(%rsp), %rdi
pushq $-0x2
popq %rsi
movl %ecx, %eax
cltd
idivl %esi
movq %rax, (%rsp)
pushq $0x2
popq %rsi
movl %ecx, %eax
cltd
idivl %esi
movq (%rbp), %r15
movq 0x40(%rbp), %rcx
cltq
movq %rax, 0x78(%rsp)
movss 0x212b44(%rip), %xmm1 # 0x3eec88
divss %xmm0, %xmm1
movss %xmm1, 0x8(%rsp)
imulq 0x30(%rsp), %rdi
movq %rdi, 0x88(%rsp)
imulq 0x10(%rbp), %rcx
movq %rcx, 0x80(%rsp)
xorl %edx, %edx
cmpq 0x70(%rsp), %rdx
je 0x1dc24b
movq (%rsp), %rax
addl %edx, %eax
movq 0x78(%rsp), %rcx
movq %rdx, 0x10(%rsp)
addq %rdx, %rcx
movq 0x90(%rsp), %rdx
movq 0xd0(%rsp), %rsi
imulq 0xa0(%rsp), %rsi
movslq %eax, %rdi
cmpq %rdi, %rcx
jl 0x1dc1e8
testl %eax, %eax
sets %dil
cmpl %r13d, %eax
setge %r8b
orb %dil, %r8b
jne 0x1dc1e4
movl %eax, %edi
imulq %rsi, %rdi
addq %rdx, %rdi
xorl %r8d, %r8d
cmpq %r8, %r14
je 0x1dc1e4
movss (%r12,%r8,4), %xmm0
addss (%rdi,%r8,4), %xmm0
movss %xmm0, (%r12,%r8,4)
incq %r8
jmp 0x1dc1c8
incl %eax
jmp 0x1dc1a2
xorl %ebp, %ebp
cmpq %rbp, %r14
je 0x1dc22e
movss (%r12,%rbp,4), %xmm0
mulss 0x18(%rsp), %xmm0
mulss 0x8(%rsp), %xmm0
addss 0xe0(%rbx), %xmm0
movss 0xdc(%rbx), %xmm1
xorps 0x211e78(%rip), %xmm1 # 0x3ee090
callq 0x5f0e0
mulss (%r15,%rbp,4), %xmm0
movss %xmm0, (%r15,%rbp,4)
incq %rbp
jmp 0x1dc1ea
movq 0x10(%rsp), %rdx
incq %rdx
addq 0x88(%rsp), %r12
addq 0x80(%rsp), %r15
jmp 0x1dc16b
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1dc2b7
lock
decl (%rax)
jne 0x1dc2b7
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1dc2af
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1dc2b7
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1dc2ca
lock
decl (%rax)
jne 0x1dc2ca
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1dc2bf
movq (%rdi), %rax
callq *0x18(%rax)
movl %r14d, %ecx
jmp 0x1dc2ca
jmp 0x1dc2af
movq %rsi, %rdi
callq 0x5f3e0
pushq $-0x64
popq %rdi
movl %edi, %r14d
jmp 0x1dbc73
movq %rsi, %rdi
callq 0x5f3e0
xorl %r14d, %r14d
jmp 0x1dbc73
movq %rsi, %rdi
callq 0x5f3e0
movl %r14d, %ecx
movl %ecx, %r14d
jmp 0x1dbc73
jmp 0x1dc38e
jmp 0x1dc38e
jmp 0x1dc38e
jmp 0x1dc38e
jmp 0x1dc2e8
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1dc34f
lock
decl (%rax)
jne 0x1dc34f
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x1dc30b
jmp 0x1dc336
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1dc34f
jmp 0x1dc38e
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1dc34f
lock
decl (%rax)
jne 0x1dc34f
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x1dc340
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x1dc34f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1dc34f
jmp 0x1dc38e
jmp 0x1dc38e
movq %rax, %rbx
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x1dc386
lock
decl (%rax)
jne 0x1dc386
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
jne 0x1dc380
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x1dc386
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/lrn.cpp |
virtual thunk to ncnn::LRN_x86::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int LRN_x86::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
size_t elemsize = bottom_top_blob.elemsize;
int size = w * h;
// squared values with local_size padding
Mat square_blob;
square_blob.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_top_blob.channel(q);
float* outptr = square_blob.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _outp = _mm256_mul_ps(_p, _p);
_mm256_storeu_ps(outptr, _outp);
ptr += 8;
outptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*outptr = *ptr * *ptr;
ptr++;
outptr++;
}
}
if (region_type == NormRegion_ACROSS_CHANNELS)
{
Mat square_sum;
square_sum.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_sum.empty())
return -100;
square_sum.fill(0.f);
const float alpha_div_size = alpha / local_size;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
// square sum
for (int p = q - local_size / 2; p <= q + local_size / 2; p++)
{
if (p < 0 || p >= channels)
continue;
const float* sptr = square_blob.channel(p);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _sp = _mm256_loadu_ps(sptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_add_ps(_ssp, _sp);
_mm256_storeu_ps(ssptr, _ssp);
sptr += 8;
ssptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ssptr += *sptr;
sptr++;
ssptr++;
}
}
float* ptr = bottom_top_blob.channel(q);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
__m256 _bias = _mm256_set1_ps(bias);
__m256 _ads = _mm256_set1_ps(alpha_div_size);
__m256 _mb = _mm256_set1_ps(-beta);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_mul_ps(_ssp, _ads);
_ssp = _mm256_add_ps(_ssp, _bias);
_ssp = pow256_ps(_ssp, _mb);
_p = _mm256_mul_ps(_p, _ssp);
_mm256_storeu_ps(ptr, _p);
ssptr += 8;
ptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ptr = *ptr * powf(bias + alpha_div_size * *ssptr, -beta);
ssptr++;
ptr++;
}
}
}
else if (region_type == NormRegion_WITHIN_CHANNEL)
{
int outw = w;
int outh = h;
Mat square_blob_bordered = square_blob;
int pad = local_size / 2;
if (pad > 0)
{
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(square_blob, square_blob_bordered, pad, local_size - pad - 1, pad, local_size - pad - 1, BORDER_CONSTANT, 0.f, opt_b);
if (square_blob_bordered.empty())
return -100;
w = square_blob_bordered.w;
h = square_blob_bordered.h;
}
const int maxk = local_size * local_size;
const float alpha_div_size = alpha / maxk;
// norm window offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w - local_size;
for (int i = 0; i < local_size; i++)
{
for (int j = 0; j < local_size; j++)
{
space_ofs[p1] = p2;
p1++;
p2++;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
const Mat m = square_blob_bordered.channel(q);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
const float* sptr = m.row(i) + j;
float ss = 0.f;
for (int k = 0; k < maxk; k++)
{
float val = sptr[space_ofs[k]];
ss += val;
}
ptr[j] = ptr[j] * powf(bias + alpha_div_size * ss, -beta);
}
ptr += outw;
}
}
}
return 0;
} | movq (%rdi), %rax
addq -0x58(%rax), %rdi
jmp 0x1dc3b0
| /csukuangfj[P]ncnn/src/layer/x86/lrn_x86.cpp |
ncnn::LRN_x86_avx512::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int LRN_x86_avx512::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
size_t elemsize = bottom_top_blob.elemsize;
int size = w * h;
// squared values with local_size padding
Mat square_blob;
square_blob.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_top_blob.channel(q);
float* outptr = square_blob.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _outp = _mm256_mul_ps(_p, _p);
_mm256_storeu_ps(outptr, _outp);
ptr += 8;
outptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*outptr = *ptr * *ptr;
ptr++;
outptr++;
}
}
if (region_type == NormRegion_ACROSS_CHANNELS)
{
Mat square_sum;
square_sum.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_sum.empty())
return -100;
square_sum.fill(0.f);
const float alpha_div_size = alpha / local_size;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
// square sum
for (int p = q - local_size / 2; p <= q + local_size / 2; p++)
{
if (p < 0 || p >= channels)
continue;
const float* sptr = square_blob.channel(p);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _sp = _mm256_loadu_ps(sptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_add_ps(_ssp, _sp);
_mm256_storeu_ps(ssptr, _ssp);
sptr += 8;
ssptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ssptr += *sptr;
sptr++;
ssptr++;
}
}
float* ptr = bottom_top_blob.channel(q);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
__m256 _bias = _mm256_set1_ps(bias);
__m256 _ads = _mm256_set1_ps(alpha_div_size);
__m256 _mb = _mm256_set1_ps(-beta);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_mul_ps(_ssp, _ads);
_ssp = _mm256_add_ps(_ssp, _bias);
_ssp = pow256_ps(_ssp, _mb);
_p = _mm256_mul_ps(_p, _ssp);
_mm256_storeu_ps(ptr, _p);
ssptr += 8;
ptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ptr = *ptr * powf(bias + alpha_div_size * *ssptr, -beta);
ssptr++;
ptr++;
}
}
}
else if (region_type == NormRegion_WITHIN_CHANNEL)
{
int outw = w;
int outh = h;
Mat square_blob_bordered = square_blob;
int pad = local_size / 2;
if (pad > 0)
{
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(square_blob, square_blob_bordered, pad, local_size - pad - 1, pad, local_size - pad - 1, BORDER_CONSTANT, 0.f, opt_b);
if (square_blob_bordered.empty())
return -100;
w = square_blob_bordered.w;
h = square_blob_bordered.h;
}
const int maxk = local_size * local_size;
const float alpha_div_size = alpha / maxk;
// norm window offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w - local_size;
for (int i = 0; i < local_size; i++)
{
for (int j = 0; j < local_size; j++)
{
space_ofs[p1] = p2;
p1++;
p2++;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
const Mat m = square_blob_bordered.channel(q);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
const float* sptr = m.row(i) + j;
float ss = 0.f;
for (int k = 0; k < maxk; k++)
{
float val = sptr[space_ofs[k]];
ss += val;
}
ptr[j] = ptr[j] * powf(bias + alpha_div_size * ss, -beta);
}
ptr += outw;
}
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x178, %rsp # imm = 0x178
movq %rsi, %r15
movl 0x2c(%rsi), %esi
movl 0x30(%r15), %ecx
movl 0x38(%r15), %r12d
movq 0x10(%r15), %r13
leaq 0x90(%rsp), %rax
andq $0x0, 0x40(%rax)
movq %rdx, %r14
movq %rdi, %rbx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rax)
vmovups %xmm0, 0xc(%rax)
vmovaps %xmm0, 0x20(%rax)
vmovups %xmm0, 0x2c(%rax)
movq 0x10(%rdx), %r9
movq %rax, %rdi
movq %rsi, (%rsp)
movq %rcx, 0x10(%rsp)
movl %ecx, %edx
movl %r12d, %ecx
movq %r13, %r8
callq 0x63810
pushq $-0x64
popq %rbp
cmpq $0x0, 0x90(%rsp)
je 0x1dd6c2
movslq 0xc8(%rsp), %rax
imulq 0xd0(%rsp), %rax
testq %rax, %rax
je 0x1dd6c2
movq %r13, 0x110(%rsp)
movq %r14, 0x80(%rsp)
movq (%rsp), %rcx
movslq %ecx, %rax
movq %rax, 0x108(%rsp)
movq 0x10(%rsp), %rax
movl %eax, %r13d
imull %ecx, %r13d
xorl %eax, %eax
testl %r12d, %r12d
movl $0x0, %ecx
cmovgl %r12d, %ecx
movq %rcx, 0x68(%rsp)
movq %rbx, 0x18(%rsp)
cmpq 0x68(%rsp), %rax
je 0x1dcdff
movq (%r15), %rcx
movq 0x10(%r15), %r8
movq 0x40(%r15), %rdi
movq %rdi, %r9
imulq %rax, %r9
imulq %r8, %r9
addq %rcx, %r9
movq 0x90(%rsp), %rdx
movq 0xa0(%rsp), %r11
movq 0xd0(%rsp), %r10
movq %r10, %rbx
imulq %rax, %rbx
imulq %r11, %rbx
addq %rdx, %rbx
xorl %esi, %esi
xorl %r14d, %r14d
leal 0x7(%r14), %ebp
cmpl %r13d, %ebp
jge 0x1dcdc4
vmovups (%r9), %ymm0
vmulps %ymm0, %ymm0, %ymm0
vmovups %ymm0, (%rbx)
addq $0x20, %r9
addq $0x20, %rbx
addl $0x8, %r14d
addq $0x8, %rsi
jmp 0x1dcd9c
imulq %r11, %r10
imulq %rax, %r10
addq %r10, %rdx
imulq %r8, %rdi
imulq %rax, %rdi
addq %rdi, %rcx
movq 0x18(%rsp), %rbx
cmpl %r13d, %esi
jge 0x1dcdf7
vmovss (%rcx,%rsi,4), %xmm0
vmulss %xmm0, %xmm0, %xmm0
vmovss %xmm0, (%rdx,%rsi,4)
incq %rsi
jmp 0x1dcddf
incq %rax
jmp 0x1dcd4d
movq (%rbx), %rax
movq -0x18(%rax), %rcx
movl 0xd0(%rbx,%rcx), %ecx
testl %ecx, %ecx
je 0x1dcf42
xorl %ebp, %ebp
cmpl $0x1, %ecx
jne 0x1dd6c2
movq 0x98(%rsp), %rcx
vmovaps 0x90(%rsp), %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq 0xa0(%rsp), %rdx
movq %rdx, 0x30(%rsp)
movl 0xa8(%rsp), %edx
movl %edx, 0x38(%rsp)
movq 0xb0(%rsp), %rdx
movq %rdx, 0x40(%rsp)
vmovups 0xb8(%rsp), %xmm0
vmovups %xmm0, 0x48(%rsp)
movl 0xc8(%rsp), %edx
movl %edx, 0x58(%rsp)
movq 0xd0(%rsp), %rdx
movq %rdx, 0x60(%rsp)
testq %rcx, %rcx
je 0x1dce8e
lock
incl (%rcx)
movq (%rbx), %rax
movq -0x18(%rax), %rcx
movl 0xd4(%rbx,%rcx), %r13d
cmpl $0x2, %r13d
jl 0x1dcffe
shrl %r13d
movq 0x80(%rsp), %rcx
vmovups (%rcx), %zmm0
leaq 0x130(%rsp), %r10
vmovups %zmm0, (%r10)
movq 0x10(%rcx), %rcx
movq %rcx, 0x8(%r10)
movq -0x18(%rax), %rax
movl %r13d, %ecx
notl %ecx
addl 0xd4(%rbx,%rax), %ecx
leaq 0x90(%rsp), %rdi
leaq 0x20(%rsp), %rsi
vxorps %xmm0, %xmm0, %xmm0
movl %r13d, %edx
movl %r13d, %r8d
movl %ecx, %r9d
pushq %r10
pushq $0x0
vzeroupper
callq 0x6466c
popq %rax
popq %rcx
cmpq $0x0, 0x20(%rsp)
pushq $-0x64
popq %rbp
je 0x1dd68b
movslq 0x58(%rsp), %rax
imulq 0x60(%rsp), %rax
testq %rax, %rax
je 0x1dd68b
movq %r15, %r14
movl 0x4c(%rsp), %r15d
movq (%rbx), %rax
movq -0x18(%rax), %rcx
movl 0xd4(%rbx,%rcx), %r13d
jmp 0x1dd008
leaq 0x20(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
movq 0x80(%rsp), %rax
movq 0x10(%rax), %r9
movq (%rsp), %rsi
movq 0x10(%rsp), %rdx
movl %r12d, %ecx
movq 0x110(%rsp), %r8
vzeroupper
callq 0x63810
movq 0x20(%rsp), %rax
testq %rax, %rax
je 0x1dcfc6
movq 0x60(%rsp), %rcx
movslq 0x58(%rsp), %rdx
movq %rcx, %rsi
imulq %rdx, %rsi
testq %rsi, %rsi
je 0x1dcfc6
imull %edx, %ecx
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpl %edx, %ecx
je 0x1dd26a
andl $0x0, (%rax,%rdx,4)
incq %rdx
jmp 0x1dcfb5
movq 0x28(%rsp), %rax
testq %rax, %rax
pushq $-0x64
popq %rbp
je 0x1dd6c2
lock
decl (%rax)
jne 0x1dd6c2
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1dd6b3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1dd6c2
movq %r15, %r14
movq (%rsp), %rax
movl %eax, %r15d
imull %r13d, %r13d
vmovss 0xd8(%rbx,%rcx), %xmm0
vmovss %xmm0, 0x80(%rsp)
leaq 0x130(%rsp), %rdi
leaq 0xf(%rsp), %rdx
movq %r13, %rsi
vzeroupper
callq 0x73bbe
vcvtsi2ss %r13d, %xmm1, %xmm0
movq 0x130(%rsp), %rbp
movq (%rbx), %rax
movq -0x18(%rax), %rcx
movl 0xd4(%rbx,%rcx), %edx
subl %edx, %r15d
xorl %ecx, %ecx
xorl %esi, %esi
xorl %edi, %edi
cmpl %edx, %edi
jge 0x1dd09c
movslq %ecx, %rcx
leaq (,%rcx,4), %r9
addq %rbp, %r9
movl %esi, %r10d
xorl %r8d, %r8d
cmpl %edx, %r8d
jge 0x1dd08f
leaq (%r10,%r8), %rdx
movl %edx, (%r9,%r8,4)
movq -0x18(%rax), %rdx
movl 0xd4(%rbx,%rdx), %edx
incq %r8
jmp 0x1dd072
addl %r15d, %esi
addl %r8d, %esi
incl %edi
addq %r8, %rcx
jmp 0x1dd05a
movq (%r14), %rax
movq %rax, 0xf0(%rsp)
movq 0x40(%r14), %rax
imulq 0x10(%r14), %rax
movq %rax, 0xe8(%rsp)
movslq 0x4c(%rsp), %rcx
movq 0x30(%rsp), %rax
movq 0x60(%rsp), %rdx
imulq %rax, %rdx
movq %rdx, 0xe0(%rsp)
imulq %rax, %rcx
movq %rcx, 0x100(%rsp)
xorl %ecx, %ecx
movq (%rsp), %rax
testl %eax, %eax
cmovlel %ecx, %eax
movq %rax, (%rsp)
movq 0x20(%rsp), %rax
movq %rax, 0xd8(%rsp)
movq 0x10(%rsp), %rax
testl %eax, %eax
cmovlel %ecx, %eax
movq %rax, 0x10(%rsp)
vmovss 0x211b76(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0x110(%rsp)
cmpq 0x68(%rsp), %rcx
je 0x1dd228
movq 0xe8(%rsp), %r14
imulq %rcx, %r14
addq 0xf0(%rsp), %r14
movq 0xe0(%rsp), %rax
movq %rcx, 0xf8(%rsp)
imulq %rcx, %rax
addq 0xd8(%rsp), %rax
movq %rax, 0x70(%rsp)
xorl %eax, %eax
cmpq 0x10(%rsp), %rax
je 0x1dd218
movq 0x100(%rsp), %r15
movq %rax, 0x78(%rsp)
imulq %rax, %r15
addq 0x70(%rsp), %r15
movq (%rbx), %r12
xorl %ebx, %ebx
cmpq (%rsp), %rbx
je 0x1dd1fa
leaq (%r15,%rbx,4), %rax
vxorps %xmm0, %xmm0, %xmm0
xorl %ecx, %ecx
cmpq %rcx, %r13
je 0x1dd1ab
movslq (%rbp,%rcx,4), %rdx
vaddss (%rax,%rdx,4), %xmm0, %xmm0
incq %rcx
jmp 0x1dd197
movq -0x18(%r12), %rax
vmulss 0x80(%rsp), %xmm0, %xmm0
movq 0x18(%rsp), %rcx
vmovss 0x110(%rsp), %xmm1
vfmadd213ss 0xe0(%rcx,%rax), %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + mem
vmovss 0xdc(%rcx,%rax), %xmm1
vxorps 0x213fd0(%rip){1to4}, %xmm1, %xmm1 # 0x3f11b4
callq 0x5f0e0
vmulss (%r14,%rbx,4), %xmm0, %xmm0
vmovss %xmm0, (%r14,%rbx,4)
incq %rbx
jmp 0x1dd187
movq 0x108(%rsp), %rax
leaq (%r14,%rax,4), %r14
movq 0x78(%rsp), %rax
incq %rax
movq 0x18(%rsp), %rbx
jmp 0x1dd161
movq 0xf8(%rsp), %rcx
incq %rcx
jmp 0x1dd11f
leaq 0x130(%rsp), %rdi
callq 0x624be
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1dd6c0
lock
decl (%rax)
jne 0x1dd6c0
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1dd6b1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1dd6c0
movq %r15, 0x78(%rsp)
movq (%rbx), %rax
movq %rax, (%rsp)
movq -0x18(%rax), %rax
vmovss 0xd8(%rbx,%rax), %xmm0
vcvtsi2ssl 0xd4(%rbx,%rax), %xmm1, %xmm1
vdivss %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x80(%rsp)
vbroadcastss %xmm0, %ymm0
vmovups %ymm0, 0x110(%rsp)
xorl %r15d, %r15d
pushq $0x2
popq %r14
movl %r12d, 0x70(%rsp)
cmpq 0x68(%rsp), %r15
je 0x1dd662
movq (%rsp), %rax
movq -0x18(%rax), %r9
movl 0xd4(%rbx,%r9), %esi
movl %esi, %eax
cltd
pushq $-0x2
popq %rcx
idivl %ecx
movl %eax, %ecx
addl %r15d, %ecx
movl %esi, %eax
cltd
idivl %r14d
addl %r15d, %eax
cmpl %eax, %ecx
jg 0x1dd3ae
movl %ecx, %ecx
testl %ecx, %ecx
sets %al
cmpl %r12d, %ecx
setge %dl
orb %al, %dl
jne 0x1dd3a7
movq 0x90(%rsp), %rax
movq 0xa0(%rsp), %r8
movq 0xd0(%rsp), %rdi
movq %rdi, %r9
imulq %rcx, %r9
imulq %r8, %r9
addq %rax, %r9
movq 0x60(%rsp), %r10
imulq %r15, %r10
imulq 0x30(%rsp), %r10
movq 0x20(%rsp), %rdx
leaq (%rdx,%r10), %r11
xorl %esi, %esi
xorl %ebx, %ebx
leal 0x7(%rbx), %ebp
cmpl %r13d, %ebp
jge 0x1dd36b
vmovups (%r11), %ymm0
vaddps (%r9), %ymm0, %ymm0
vmovups %ymm0, (%r11)
addq $0x20, %r9
addq $0x20, %r11
addl $0x8, %ebx
addq $0x8, %rsi
jmp 0x1dd343
addq %r10, %rdx
imulq %r8, %rdi
imulq %rcx, %rdi
addq %rdi, %rax
movq 0x18(%rsp), %rbx
cmpl %r13d, %esi
jge 0x1dd397
vmovss (%rdx,%rsi,4), %xmm0
vaddss (%rax,%rsi,4), %xmm0, %xmm0
vmovss %xmm0, (%rdx,%rsi,4)
incq %rsi
jmp 0x1dd37e
movq (%rsp), %rax
movq -0x18(%rax), %r9
movl 0xd4(%rbx,%r9), %esi
incl %ecx
jmp 0x1dd2db
movq 0x78(%rsp), %rax
movq (%rax), %r12
movq 0x10(%rax), %rcx
movq 0x40(%rax), %rax
movq %rax, %rdi
imulq %r15, %rdi
imulq %rcx, %rdi
addq %r12, %rdi
movq 0x20(%rsp), %rbp
movq 0x30(%rsp), %rsi
movq 0x60(%rsp), %rdx
movq %rdx, %r8
imulq %r15, %r8
imulq %rsi, %r8
addq %rbp, %r8
vbroadcastss 0xe0(%rbx,%r9), %ymm0
vmovss 0xdc(%rbx,%r9), %xmm1
vxorps 0x213dad(%rip){1to4}, %xmm1, %xmm1 # 0x3f11b4
vbroadcastss %xmm1, %ymm1
xorl %r14d, %r14d
xorl %r9d, %r9d
leal 0x7(%r9), %r10d
cmpl %r13d, %r10d
jge 0x1dd5e8
vmovups (%r8), %ymm2
vfmadd132ps 0x110(%rsp), %ymm0, %ymm2 # ymm2 = (ymm2 * mem) + ymm0
vcmpleps 0x215587(%rip), %ymm2, %k1 # 0x3f29c0
vmaxps 0x213d9d(%rip){1to8}, %ymm2, %ymm2 # 0x3f11e0
vpsrld $0x17, %ymm2, %ymm3
vpbroadcastd 0x213d93(%rip), %ymm4 # 0x3f11e4
vpternlogd $0xea, 0x210bb8(%rip){1to8}, %ymm4, %ymm2 # 0x3ee014
vcmpltps 0x213d85(%rip){1to8}, %ymm2, %k2 # 0x3f11ec
vaddps 0x213d7f(%rip){1to8}, %ymm2, %ymm4 # 0x3f11f0
vaddps %ymm2, %ymm4, %ymm4 {%k2}
vmulps %ymm4, %ymm4, %ymm2
vbroadcastss 0x213d70(%rip), %ymm5 # 0x3f11f4
vfmadd213ps 0x213d6a(%rip){1to8}, %ymm4, %ymm5 # ymm5 = (ymm4 * ymm5) + mem
vfmadd213ps 0x213d64(%rip){1to8}, %ymm4, %ymm5 # ymm5 = (ymm4 * ymm5) + mem
vfmadd213ps 0x213d5e(%rip){1to8}, %ymm4, %ymm5 # ymm5 = (ymm4 * ymm5) + mem
vfmadd213ps 0x213d58(%rip){1to8}, %ymm4, %ymm5 # ymm5 = (ymm4 * ymm5) + mem
vfmadd213ps 0x213d52(%rip){1to8}, %ymm4, %ymm5 # ymm5 = (ymm4 * ymm5) + mem
vfmadd213ps 0x213d4c(%rip){1to8}, %ymm4, %ymm5 # ymm5 = (ymm4 * ymm5) + mem
vfmadd213ps 0x213d46(%rip){1to8}, %ymm4, %ymm5 # ymm5 = (ymm4 * ymm5) + mem
vfmadd213ps 0x213d40(%rip){1to8}, %ymm4, %ymm5 # ymm5 = (ymm4 * ymm5) + mem
vmulps %ymm4, %ymm2, %ymm6
vmulps %ymm5, %ymm6, %ymm5
vpaddd 0x213d02(%rip){1to8}, %ymm3, %ymm3 # 0x3f11e8
vcvtdq2ps %ymm3, %ymm3
vbroadcastss 0x211795(%rip), %ymm6 # 0x3eec88
vsubps %ymm6, %ymm3, %ymm3 {%k2}
vbroadcastss 0x213cc6(%rip), %ymm7 # 0x3f11c8
vfmadd231ps %ymm7, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm7) + ymm5
vbroadcastss 0x210b04(%rip), %ymm8 # 0x3ee014
vfmsub231ps %ymm2, %ymm8, %ymm5 # ymm5 = (ymm8 * ymm2) - ymm5
vsubps %ymm4, %ymm5, %ymm2
vbroadcastss 0x213ca2(%rip), %ymm5 # 0x3f11c4
vfmsub231ps %ymm3, %ymm5, %ymm2 # ymm2 = (ymm5 * ymm3) - ymm2
vpcmpeqd %ymm3, %ymm3, %ymm3
vmovaps %ymm3, %ymm2 {%k1}
vmulps %ymm1, %ymm2, %ymm2
vminps 0x213c79(%rip){1to8}, %ymm2, %ymm2 # 0x3f11b8
vmaxps 0x213c73(%rip){1to8}, %ymm2, %ymm2 # 0x3f11bc
vmovaps %ymm8, %ymm3
vfmadd231ps 0x213c69(%rip){1to8}, %ymm2, %ymm3 # ymm3 = (ymm2 * mem) + ymm3
vroundps $0x1, %ymm3, %ymm4
vcmpltps %ymm4, %ymm3, %k1
vsubps %ymm6, %ymm4, %ymm4 {%k1}
vfmsub231ps %ymm5, %ymm4, %ymm2 # ymm2 = (ymm4 * ymm5) - ymm2
vfnmsub231ps %ymm7, %ymm4, %ymm2 # ymm2 = -(ymm4 * ymm7) - ymm2
vmulps %ymm2, %ymm2, %ymm3
vbroadcastss 0x213c4b(%rip), %ymm5 # 0x3f11cc
vfmadd213ps 0x213c45(%rip){1to8}, %ymm2, %ymm5 # ymm5 = (ymm2 * ymm5) + mem
vfmadd213ps 0x213c3f(%rip){1to8}, %ymm2, %ymm5 # ymm5 = (ymm2 * ymm5) + mem
vfmadd213ps 0x213c39(%rip){1to8}, %ymm2, %ymm5 # ymm5 = (ymm2 * ymm5) + mem
vfmadd213ps 0x213c33(%rip){1to8}, %ymm2, %ymm5 # ymm5 = (ymm2 * ymm5) + mem
vfmadd213ps %ymm8, %ymm2, %ymm5 # ymm5 = (ymm2 * ymm5) + ymm8
vfmadd213ps %ymm2, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm5) + ymm2
vcvttps2dq %ymm4, %ymm2
vpslld $0x17, %ymm2, %ymm2
vpaddd 0x2116c2(%rip){1to8}, %ymm2, %ymm2 # 0x3eec88
vmulps (%rdi), %ymm2, %ymm2
vfmadd213ps %ymm2, %ymm5, %ymm2 # ymm2 = (ymm5 * ymm2) + ymm2
vmovups %ymm2, (%rdi)
addq $0x20, %r8
addq $0x20, %rdi
addl $0x8, %r9d
addq $0x8, %r14
jmp 0x1dd412
imulq %rcx, %rax
imulq %r15, %rax
addq %rax, %r12
imulq %rsi, %rdx
imulq %r15, %rdx
addq %rdx, %rbp
cmpl %r13d, %r14d
jge 0x1dd651
movq (%rsp), %rax
movq -0x18(%rax), %rax
vmovss (%rbp,%r14,4), %xmm0
vmovaps 0x80(%rsp), %xmm1
vfmadd213ss 0xe0(%rbx,%rax), %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + mem
vmovss 0xdc(%rbx,%rax), %xmm1
vxorps 0x213b7c(%rip){1to4}, %xmm1, %xmm1 # 0x3f11b4
vzeroupper
callq 0x5f0e0
vmulss (%r12,%r14,4), %xmm0, %xmm0
vmovss %xmm0, (%r12,%r14,4)
incq %r14
jmp 0x1dd5fe
incq %r15
movl 0x70(%rsp), %r12d
pushq $0x2
popq %r14
jmp 0x1dd2b3
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1dd6c0
lock
decl (%rax)
jne 0x1dd6c0
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1dd6b5
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x1dd6c0
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1dd6c2
lock
decl (%rax)
jne 0x1dd6c2
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1dd716
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1dd6c2
jmp 0x1dd6b5
jmp 0x1dd716
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
xorl %ebp, %ebp
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x1dd6ff
lock
decl (%rax)
jne 0x1dd6ff
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x1dd6f4
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x1dd6ff
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
movl %ebp, %eax
addq $0x178, %rsp # imm = 0x178
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x1dd6c2
jmp 0x1dd7dc
jmp 0x1dd7dc
jmp 0x1dd7dc
jmp 0x1dd7dc
jmp 0x1dd736
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1dd79d
lock
decl (%rax)
jne 0x1dd79d
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x1dd759
jmp 0x1dd784
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1dd79d
jmp 0x1dd7dc
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1dd79d
lock
decl (%rax)
jne 0x1dd79d
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x1dd78e
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x1dd79d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1dd79d
jmp 0x1dd7dc
jmp 0x1dd7dc
movq %rax, %rbx
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x1dd7d4
lock
decl (%rax)
jne 0x1dd7d4
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
jne 0x1dd7ce
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x1dd7d4
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/lrn_x86_avx512.cpp |
virtual thunk to ncnn::LRN_x86_avx512::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int LRN_x86_avx512::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
size_t elemsize = bottom_top_blob.elemsize;
int size = w * h;
// squared values with local_size padding
Mat square_blob;
square_blob.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_top_blob.channel(q);
float* outptr = square_blob.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _outp = _mm256_mul_ps(_p, _p);
_mm256_storeu_ps(outptr, _outp);
ptr += 8;
outptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*outptr = *ptr * *ptr;
ptr++;
outptr++;
}
}
if (region_type == NormRegion_ACROSS_CHANNELS)
{
Mat square_sum;
square_sum.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_sum.empty())
return -100;
square_sum.fill(0.f);
const float alpha_div_size = alpha / local_size;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
// square sum
for (int p = q - local_size / 2; p <= q + local_size / 2; p++)
{
if (p < 0 || p >= channels)
continue;
const float* sptr = square_blob.channel(p);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _sp = _mm256_loadu_ps(sptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_add_ps(_ssp, _sp);
_mm256_storeu_ps(ssptr, _ssp);
sptr += 8;
ssptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ssptr += *sptr;
sptr++;
ssptr++;
}
}
float* ptr = bottom_top_blob.channel(q);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
__m256 _bias = _mm256_set1_ps(bias);
__m256 _ads = _mm256_set1_ps(alpha_div_size);
__m256 _mb = _mm256_set1_ps(-beta);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_mul_ps(_ssp, _ads);
_ssp = _mm256_add_ps(_ssp, _bias);
_ssp = pow256_ps(_ssp, _mb);
_p = _mm256_mul_ps(_p, _ssp);
_mm256_storeu_ps(ptr, _p);
ssptr += 8;
ptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ptr = *ptr * powf(bias + alpha_div_size * *ssptr, -beta);
ssptr++;
ptr++;
}
}
}
else if (region_type == NormRegion_WITHIN_CHANNEL)
{
int outw = w;
int outh = h;
Mat square_blob_bordered = square_blob;
int pad = local_size / 2;
if (pad > 0)
{
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(square_blob, square_blob_bordered, pad, local_size - pad - 1, pad, local_size - pad - 1, BORDER_CONSTANT, 0.f, opt_b);
if (square_blob_bordered.empty())
return -100;
w = square_blob_bordered.w;
h = square_blob_bordered.h;
}
const int maxk = local_size * local_size;
const float alpha_div_size = alpha / maxk;
// norm window offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w - local_size;
for (int i = 0; i < local_size; i++)
{
for (int j = 0; j < local_size; j++)
{
space_ofs[p1] = p2;
p1++;
p2++;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
const Mat m = square_blob_bordered.channel(q);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
const float* sptr = m.row(i) + j;
float ss = 0.f;
for (int k = 0; k < maxk; k++)
{
float val = sptr[space_ofs[k]];
ss += val;
}
ptr[j] = ptr[j] * powf(bias + alpha_div_size * ss, -beta);
}
ptr += outw;
}
}
}
return 0;
} | movq (%rdi), %rax
addq -0x58(%rax), %rdi
jmp 0x1dcc74
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/lrn_x86_avx512.cpp |
ncnn::LRN_x86_fma::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int LRN_x86_fma::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
size_t elemsize = bottom_top_blob.elemsize;
int size = w * h;
// squared values with local_size padding
Mat square_blob;
square_blob.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_top_blob.channel(q);
float* outptr = square_blob.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _outp = _mm256_mul_ps(_p, _p);
_mm256_storeu_ps(outptr, _outp);
ptr += 8;
outptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*outptr = *ptr * *ptr;
ptr++;
outptr++;
}
}
if (region_type == NormRegion_ACROSS_CHANNELS)
{
Mat square_sum;
square_sum.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_sum.empty())
return -100;
square_sum.fill(0.f);
const float alpha_div_size = alpha / local_size;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
// square sum
for (int p = q - local_size / 2; p <= q + local_size / 2; p++)
{
if (p < 0 || p >= channels)
continue;
const float* sptr = square_blob.channel(p);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _sp = _mm256_loadu_ps(sptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_add_ps(_ssp, _sp);
_mm256_storeu_ps(ssptr, _ssp);
sptr += 8;
ssptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ssptr += *sptr;
sptr++;
ssptr++;
}
}
float* ptr = bottom_top_blob.channel(q);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
__m256 _bias = _mm256_set1_ps(bias);
__m256 _ads = _mm256_set1_ps(alpha_div_size);
__m256 _mb = _mm256_set1_ps(-beta);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_mul_ps(_ssp, _ads);
_ssp = _mm256_add_ps(_ssp, _bias);
_ssp = pow256_ps(_ssp, _mb);
_p = _mm256_mul_ps(_p, _ssp);
_mm256_storeu_ps(ptr, _p);
ssptr += 8;
ptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ptr = *ptr * powf(bias + alpha_div_size * *ssptr, -beta);
ssptr++;
ptr++;
}
}
}
else if (region_type == NormRegion_WITHIN_CHANNEL)
{
int outw = w;
int outh = h;
Mat square_blob_bordered = square_blob;
int pad = local_size / 2;
if (pad > 0)
{
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(square_blob, square_blob_bordered, pad, local_size - pad - 1, pad, local_size - pad - 1, BORDER_CONSTANT, 0.f, opt_b);
if (square_blob_bordered.empty())
return -100;
w = square_blob_bordered.w;
h = square_blob_bordered.h;
}
const int maxk = local_size * local_size;
const float alpha_div_size = alpha / maxk;
// norm window offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w - local_size;
for (int i = 0; i < local_size; i++)
{
for (int j = 0; j < local_size; j++)
{
space_ofs[p1] = p2;
p1++;
p2++;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
const Mat m = square_blob_bordered.channel(q);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
const float* sptr = m.row(i) + j;
float ss = 0.f;
for (int k = 0; k < maxk; k++)
{
float val = sptr[space_ofs[k]];
ss += val;
}
ptr[j] = ptr[j] * powf(bias + alpha_div_size * ss, -beta);
}
ptr += outw;
}
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x178, %rsp # imm = 0x178
movq %rsi, %r15
movl 0x2c(%rsi), %esi
movl 0x30(%r15), %ecx
movl 0x38(%r15), %r12d
movq 0x10(%r15), %r13
leaq 0x90(%rsp), %rax
andq $0x0, 0x40(%rax)
movq %rdx, %r14
movq %rdi, %rbx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rax)
vmovups %xmm0, 0xc(%rax)
vmovaps %xmm0, 0x20(%rax)
vmovups %xmm0, 0x2c(%rax)
movq 0x10(%rdx), %r9
movq %rax, %rdi
movq %rsi, (%rsp)
movq %rcx, 0x10(%rsp)
movl %ecx, %edx
movl %r12d, %ecx
movq %r13, %r8
callq 0x63810
pushq $-0x64
popq %rbp
cmpq $0x0, 0x90(%rsp)
je 0x1de30e
movslq 0xc8(%rsp), %rax
imulq 0xd0(%rsp), %rax
testq %rax, %rax
je 0x1de30e
movq %r13, 0x110(%rsp)
movq %r14, 0x80(%rsp)
movq (%rsp), %rcx
movslq %ecx, %rax
movq %rax, 0x108(%rsp)
movq 0x10(%rsp), %rax
movl %eax, %r13d
imull %ecx, %r13d
xorl %eax, %eax
testl %r12d, %r12d
movl $0x0, %ecx
cmovgl %r12d, %ecx
movq %rcx, 0x68(%rsp)
movq %rbx, 0x18(%rsp)
cmpq 0x68(%rsp), %rax
je 0x1dd9c3
movq (%r15), %rcx
movq 0x10(%r15), %r8
movq 0x40(%r15), %rdi
movq %rdi, %r9
imulq %rax, %r9
imulq %r8, %r9
addq %rcx, %r9
movq 0x90(%rsp), %rdx
movq 0xa0(%rsp), %r11
movq 0xd0(%rsp), %r10
movq %r10, %rbx
imulq %rax, %rbx
imulq %r11, %rbx
addq %rdx, %rbx
xorl %esi, %esi
xorl %r14d, %r14d
leal 0x7(%r14), %ebp
cmpl %r13d, %ebp
jge 0x1dd988
vmovups (%r9), %ymm0
vmulps %ymm0, %ymm0, %ymm0
vmovups %ymm0, (%rbx)
addq $0x20, %r9
addq $0x20, %rbx
addl $0x8, %r14d
addq $0x8, %rsi
jmp 0x1dd960
imulq %r11, %r10
imulq %rax, %r10
addq %r10, %rdx
imulq %r8, %rdi
imulq %rax, %rdi
addq %rdi, %rcx
movq 0x18(%rsp), %rbx
cmpl %r13d, %esi
jge 0x1dd9bb
vmovss (%rcx,%rsi,4), %xmm0
vmulss %xmm0, %xmm0, %xmm0
vmovss %xmm0, (%rdx,%rsi,4)
incq %rsi
jmp 0x1dd9a3
incq %rax
jmp 0x1dd911
movq (%rbx), %rax
movq -0x18(%rax), %rcx
movl 0xd0(%rbx,%rcx), %ecx
testl %ecx, %ecx
je 0x1ddb0e
xorl %ebp, %ebp
cmpl $0x1, %ecx
jne 0x1de30e
movq 0x98(%rsp), %rcx
vmovaps 0x90(%rsp), %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq 0xa0(%rsp), %rdx
movq %rdx, 0x30(%rsp)
movl 0xa8(%rsp), %edx
movl %edx, 0x38(%rsp)
movq 0xb0(%rsp), %rdx
movq %rdx, 0x40(%rsp)
vmovups 0xb8(%rsp), %xmm0
vmovups %xmm0, 0x48(%rsp)
movl 0xc8(%rsp), %edx
movl %edx, 0x58(%rsp)
movq 0xd0(%rsp), %rdx
movq %rdx, 0x60(%rsp)
testq %rcx, %rcx
je 0x1dda52
lock
incl (%rcx)
movq (%rbx), %rax
movq -0x18(%rax), %rcx
movl 0xd4(%rbx,%rcx), %r13d
cmpl $0x2, %r13d
jl 0x1ddbca
shrl %r13d
movq 0x80(%rsp), %rcx
vmovups (%rcx), %ymm0
vmovups 0x20(%rcx), %ymm1
leaq 0x130(%rsp), %r10
vmovups %ymm1, 0x20(%r10)
vmovups %ymm0, (%r10)
movq 0x10(%rcx), %rcx
movq %rcx, 0x8(%r10)
movq -0x18(%rax), %rax
movl %r13d, %ecx
notl %ecx
addl 0xd4(%rbx,%rax), %ecx
leaq 0x90(%rsp), %rdi
leaq 0x20(%rsp), %rsi
vxorps %xmm0, %xmm0, %xmm0
movl %r13d, %edx
movl %r13d, %r8d
movl %ecx, %r9d
pushq %r10
pushq $0x0
vzeroupper
callq 0x6466c
popq %rax
popq %rcx
cmpq $0x0, 0x20(%rsp)
pushq $-0x64
popq %rbp
je 0x1de2d7
movslq 0x58(%rsp), %rax
imulq 0x60(%rsp), %rax
testq %rax, %rax
je 0x1de2d7
movq %r15, %r14
movl 0x4c(%rsp), %r15d
movq (%rbx), %rax
movq -0x18(%rax), %rcx
movl 0xd4(%rbx,%rcx), %r13d
jmp 0x1ddbd4
leaq 0x20(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
movq 0x80(%rsp), %rax
movq 0x10(%rax), %r9
movq (%rsp), %rsi
movq 0x10(%rsp), %rdx
movl %r12d, %ecx
movq 0x110(%rsp), %r8
vzeroupper
callq 0x63810
movq 0x20(%rsp), %rax
testq %rax, %rax
je 0x1ddb92
movq 0x60(%rsp), %rcx
movslq 0x58(%rsp), %rdx
movq %rcx, %rsi
imulq %rdx, %rsi
testq %rsi, %rsi
je 0x1ddb92
imull %edx, %ecx
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpl %edx, %ecx
je 0x1dde3b
andl $0x0, (%rax,%rdx,4)
incq %rdx
jmp 0x1ddb81
movq 0x28(%rsp), %rax
testq %rax, %rax
pushq $-0x64
popq %rbp
je 0x1de30e
lock
decl (%rax)
jne 0x1de30e
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1de2ff
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1de30e
movq %r15, %r14
movq (%rsp), %rax
movl %eax, %r15d
imull %r13d, %r13d
vmovss 0xd8(%rbx,%rcx), %xmm0
vmovss %xmm0, 0x80(%rsp)
leaq 0x130(%rsp), %rdi
leaq 0xf(%rsp), %rdx
movq %r13, %rsi
vzeroupper
callq 0x73bbe
vcvtsi2ss %r13d, %xmm2, %xmm0
movq 0x130(%rsp), %rbp
movq (%rbx), %rax
movq -0x18(%rax), %rcx
movl 0xd4(%rbx,%rcx), %edx
subl %edx, %r15d
xorl %ecx, %ecx
xorl %esi, %esi
xorl %edi, %edi
cmpl %edx, %edi
jge 0x1ddc68
movslq %ecx, %rcx
leaq (,%rcx,4), %r9
addq %rbp, %r9
movl %esi, %r10d
xorl %r8d, %r8d
cmpl %edx, %r8d
jge 0x1ddc5b
leaq (%r10,%r8), %rdx
movl %edx, (%r9,%r8,4)
movq -0x18(%rax), %rdx
movl 0xd4(%rbx,%rdx), %edx
incq %r8
jmp 0x1ddc3e
addl %r15d, %esi
addl %r8d, %esi
incl %edi
addq %r8, %rcx
jmp 0x1ddc26
movq 0x40(%r14), %rax
imulq 0x10(%r14), %rax
movq %rax, 0xf0(%rsp)
movslq 0x4c(%rsp), %rcx
movq 0x30(%rsp), %rax
movq 0x60(%rsp), %rdx
imulq %rax, %rdx
movq %rdx, 0xe8(%rsp)
imulq %rax, %rcx
movq %rcx, 0x100(%rsp)
xorl %ecx, %ecx
movq (%rsp), %rax
testl %eax, %eax
cmovlel %ecx, %eax
movq %rax, (%rsp)
movq (%r14), %rax
movq %rax, 0xe0(%rsp)
xorl %ecx, %ecx
movq 0x10(%rsp), %rax
testl %eax, %eax
cmovlel %ecx, %eax
movq %rax, 0x10(%rsp)
movq 0x20(%rsp), %rax
movq %rax, 0xd8(%rsp)
vmovss 0x210fa8(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0x110(%rsp)
cmpq 0x68(%rsp), %rcx
je 0x1dddf9
movq 0xf0(%rsp), %r14
imulq %rcx, %r14
addq 0xe0(%rsp), %r14
movq 0xe8(%rsp), %rax
movq %rcx, 0xf8(%rsp)
imulq %rcx, %rax
addq 0xd8(%rsp), %rax
movq %rax, 0x70(%rsp)
xorl %eax, %eax
cmpq 0x10(%rsp), %rax
je 0x1ddde9
movq 0x100(%rsp), %r15
movq %rax, 0x78(%rsp)
imulq %rax, %r15
addq 0x70(%rsp), %r15
movq (%rbx), %r12
xorl %ebx, %ebx
cmpq (%rsp), %rbx
je 0x1dddcb
leaq (%r15,%rbx,4), %rax
vxorps %xmm0, %xmm0, %xmm0
xorl %ecx, %ecx
cmpq %rcx, %r13
je 0x1ddd79
movslq (%rbp,%rcx,4), %rdx
vaddss (%rax,%rdx,4), %xmm0, %xmm0
incq %rcx
jmp 0x1ddd65
movq -0x18(%r12), %rax
vmulss 0x80(%rsp), %xmm0, %xmm0
movq 0x18(%rsp), %rcx
vmovss 0x110(%rsp), %xmm1
vfmadd213ss 0xe0(%rcx,%rax), %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + mem
vmovss 0xdc(%rcx,%rax), %xmm1
vbroadcastss 0x213403(%rip), %xmm2 # 0x3f11b4
vxorps %xmm2, %xmm1, %xmm1
callq 0x5f0e0
vmulss (%r14,%rbx,4), %xmm0, %xmm0
vmovss %xmm0, (%r14,%rbx,4)
incq %rbx
jmp 0x1ddd55
movq 0x108(%rsp), %rax
leaq (%r14,%rax,4), %r14
movq 0x78(%rsp), %rax
incq %rax
movq 0x18(%rsp), %rbx
jmp 0x1ddd2f
movq 0xf8(%rsp), %rcx
incq %rcx
jmp 0x1ddced
leaq 0x130(%rsp), %rdi
callq 0x624be
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1de30c
lock
decl (%rax)
jne 0x1de30c
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1de2fd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1de30c
movq %r15, 0x78(%rsp)
movq (%rbx), %rax
movq %rax, (%rsp)
movq -0x18(%rax), %rax
vmovss 0xd8(%rbx,%rax), %xmm0
vcvtsi2ssl 0xd4(%rbx,%rax), %xmm1, %xmm1
vdivss %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x80(%rsp)
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovups %ymm0, 0x110(%rsp)
xorl %r15d, %r15d
pushq $0x2
popq %r14
movl %r12d, 0x70(%rsp)
cmpq 0x68(%rsp), %r15
je 0x1de2ae
movq (%rsp), %rax
movq -0x18(%rax), %r9
movl 0xd4(%rbx,%r9), %esi
movl %esi, %eax
cltd
pushq $-0x2
popq %rcx
idivl %ecx
movl %eax, %ecx
addl %r15d, %ecx
movl %esi, %eax
cltd
idivl %r14d
addl %r15d, %eax
cmpl %eax, %ecx
jg 0x1ddf85
movl %ecx, %ecx
testl %ecx, %ecx
sets %al
cmpl %r12d, %ecx
setge %dl
orb %al, %dl
jne 0x1ddf7e
movq 0x90(%rsp), %rax
movq 0xa0(%rsp), %r8
movq 0xd0(%rsp), %rdi
movq %rdi, %r9
imulq %rcx, %r9
imulq %r8, %r9
addq %rax, %r9
movq 0x60(%rsp), %r10
imulq %r15, %r10
imulq 0x30(%rsp), %r10
movq 0x20(%rsp), %rdx
leaq (%rdx,%r10), %r11
xorl %esi, %esi
xorl %ebx, %ebx
leal 0x7(%rbx), %ebp
cmpl %r13d, %ebp
jge 0x1ddf42
vmovups (%r11), %ymm0
vaddps (%r9), %ymm0, %ymm0
vmovups %ymm0, (%r11)
addq $0x20, %r9
addq $0x20, %r11
addl $0x8, %ebx
addq $0x8, %rsi
jmp 0x1ddf1a
addq %r10, %rdx
imulq %r8, %rdi
imulq %rcx, %rdi
addq %rdi, %rax
movq 0x18(%rsp), %rbx
cmpl %r13d, %esi
jge 0x1ddf6e
vmovss (%rdx,%rsi,4), %xmm0
vaddss (%rax,%rsi,4), %xmm0, %xmm0
vmovss %xmm0, (%rdx,%rsi,4)
incq %rsi
jmp 0x1ddf55
movq (%rsp), %rax
movq -0x18(%rax), %r9
movl 0xd4(%rbx,%r9), %esi
incl %ecx
jmp 0x1ddeb2
movq 0x78(%rsp), %rax
movq (%rax), %r12
movq 0x10(%rax), %rcx
movq 0x40(%rax), %rax
movq %rax, %rdi
imulq %r15, %rdi
imulq %rcx, %rdi
addq %r12, %rdi
movq 0x20(%rsp), %rbp
movq 0x30(%rsp), %rsi
movq 0x60(%rsp), %rdx
movq %rdx, %r8
imulq %r15, %r8
imulq %rsi, %r8
addq %rbp, %r8
vbroadcastss 0xe0(%rbx,%r9), %ymm0
vbroadcastss 0xdc(%rbx,%r9), %xmm1
vbroadcastss 0x2131d7(%rip), %xmm2 # 0x3f11b4
vxorps %xmm2, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
xorl %r14d, %r14d
xorl %r9d, %r9d
leal 0x7(%r9), %r10d
cmpl %r13d, %r10d
jge 0x1de231
vmovups (%r8), %ymm2
vfmadd132ps 0x110(%rsp), %ymm0, %ymm2 # ymm2 = (ymm2 * mem) + ymm0
vbroadcastss 0x2131ce(%rip), %ymm3 # 0x3f11e0
vmaxps %ymm3, %ymm2, %ymm3
vpsrld $0x17, %xmm3, %xmm4
vextractf128 $0x1, %ymm3, %xmm5
vpsrld $0x17, %xmm5, %xmm5
vbroadcastss 0x2131b5(%rip), %ymm6 # 0x3f11e4
vandps %ymm6, %ymm3, %ymm3
vbroadcastss 0x20ffd8(%rip), %ymm8 # 0x3ee014
vorps %ymm3, %ymm8, %ymm3
vbroadcastss 0x2131a3(%rip), %ymm6 # 0x3f11ec
vcmpleps %ymm3, %ymm6, %ymm6
vandnps %ymm3, %ymm6, %ymm7
vbroadcastss 0x213195(%rip), %ymm9 # 0x3f11f0
vaddps %ymm3, %ymm9, %ymm3
vaddps %ymm7, %ymm3, %ymm3
vextractf128 $0x1, %ymm6, %xmm7
vpsubd %xmm7, %xmm5, %xmm5
vbroadcastss 0x21388e(%rip), %xmm7 # 0x3f1904
vpaddd %xmm7, %xmm5, %xmm5
vpsubd %xmm6, %xmm4, %xmm4
vpaddd %xmm7, %xmm4, %xmm4
vinsertf128 $0x1, %xmm5, %ymm4, %ymm4
vmulps %ymm3, %ymm3, %ymm5
vbroadcastss 0x21315f(%rip), %ymm6 # 0x3f11f4
vbroadcastss 0x21315a(%rip), %ymm7 # 0x3f11f8
vfmadd213ps %ymm7, %ymm3, %ymm6 # ymm6 = (ymm3 * ymm6) + ymm7
vbroadcastss 0x213150(%rip), %ymm7 # 0x3f11fc
vfmadd213ps %ymm7, %ymm3, %ymm6 # ymm6 = (ymm3 * ymm6) + ymm7
vbroadcastss 0x213146(%rip), %ymm7 # 0x3f1200
vfmadd213ps %ymm7, %ymm3, %ymm6 # ymm6 = (ymm3 * ymm6) + ymm7
vbroadcastss 0x21313c(%rip), %ymm7 # 0x3f1204
vfmadd213ps %ymm7, %ymm3, %ymm6 # ymm6 = (ymm3 * ymm6) + ymm7
vbroadcastss 0x213132(%rip), %ymm7 # 0x3f1208
vfmadd213ps %ymm7, %ymm3, %ymm6 # ymm6 = (ymm3 * ymm6) + ymm7
vbroadcastss 0x213128(%rip), %ymm7 # 0x3f120c
vfmadd213ps %ymm7, %ymm3, %ymm6 # ymm6 = (ymm3 * ymm6) + ymm7
vbroadcastss 0x21311e(%rip), %ymm7 # 0x3f1210
vfmadd213ps %ymm7, %ymm3, %ymm6 # ymm6 = (ymm3 * ymm6) + ymm7
vbroadcastss 0x213114(%rip), %ymm7 # 0x3f1214
vfmadd213ps %ymm7, %ymm3, %ymm6 # ymm6 = (ymm3 * ymm6) + ymm7
vmulps %ymm3, %ymm5, %ymm7
vmulps %ymm6, %ymm7, %ymm6
vcvtdq2ps %ymm4, %ymm4
vbroadcastss 0x2130ae(%rip), %ymm7 # 0x3f11c8
vfmadd231ps %ymm7, %ymm4, %ymm6 # ymm6 = (ymm4 * ymm7) + ymm6
vfmsub231ps %ymm5, %ymm8, %ymm6 # ymm6 = (ymm8 * ymm5) - ymm6
vcmpleps 0x214893(%rip), %ymm2, %ymm2 # 0x3f29c0
vsubps %ymm3, %ymm6, %ymm3
vbroadcastss 0x21308a(%rip), %ymm5 # 0x3f11c4
vfmsub231ps %ymm4, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm4) - ymm3
vorps %ymm3, %ymm2, %ymm2
vmulps %ymm1, %ymm2, %ymm2
vbroadcastss 0x213068(%rip), %ymm3 # 0x3f11b8
vminps %ymm3, %ymm2, %ymm2
vbroadcastss 0x21305f(%rip), %ymm3 # 0x3f11bc
vmaxps %ymm3, %ymm2, %ymm2
vbroadcastss 0x213056(%rip), %ymm3 # 0x3f11c0
vfmadd213ps %ymm8, %ymm2, %ymm3 # ymm3 = (ymm2 * ymm3) + ymm8
vroundps $0x1, %ymm3, %ymm4
vcmpltps %ymm4, %ymm3, %ymm3
vbroadcastss 0x210b05(%rip), %ymm6 # 0x3eec88
vandps %ymm6, %ymm3, %ymm3
vsubps %ymm3, %ymm4, %ymm3
vfmsub231ps %ymm5, %ymm3, %ymm2 # ymm2 = (ymm3 * ymm5) - ymm2
vfnmsub231ps %ymm7, %ymm3, %ymm2 # ymm2 = -(ymm3 * ymm7) - ymm2
vmulps %ymm2, %ymm2, %ymm4
vbroadcastss 0x21302a(%rip), %ymm5 # 0x3f11cc
vbroadcastss 0x213025(%rip), %ymm6 # 0x3f11d0
vfmadd213ps %ymm6, %ymm2, %ymm5 # ymm5 = (ymm2 * ymm5) + ymm6
vbroadcastss 0x21301b(%rip), %ymm6 # 0x3f11d4
vfmadd213ps %ymm6, %ymm2, %ymm5 # ymm5 = (ymm2 * ymm5) + ymm6
vbroadcastss 0x213011(%rip), %ymm6 # 0x3f11d8
vfmadd213ps %ymm6, %ymm2, %ymm5 # ymm5 = (ymm2 * ymm5) + ymm6
vbroadcastss 0x213007(%rip), %ymm6 # 0x3f11dc
vfmadd213ps %ymm6, %ymm2, %ymm5 # ymm5 = (ymm2 * ymm5) + ymm6
vfmadd213ps %ymm8, %ymm2, %ymm5 # ymm5 = (ymm2 * ymm5) + ymm8
vfmadd213ps %ymm2, %ymm4, %ymm5 # ymm5 = (ymm4 * ymm5) + ymm2
vcvttps2dq %ymm3, %ymm2
vpslld $0x17, %xmm2, %xmm3
vextractf128 $0x1, %ymm2, %xmm2
vpslld $0x17, %xmm2, %xmm2
vbroadcastss 0x210a87(%rip), %xmm4 # 0x3eec88
vpaddd %xmm4, %xmm2, %xmm2
vpaddd %xmm4, %xmm3, %xmm3
vinsertf128 $0x1, %xmm2, %ymm3, %ymm2
vmulps (%rdi), %ymm2, %ymm2
vfmadd213ps %ymm2, %ymm5, %ymm2 # ymm2 = (ymm5 * ymm2) + ymm2
vmovups %ymm2, (%rdi)
addq $0x20, %r8
addq $0x20, %rdi
addl $0x8, %r9d
addq $0x8, %r14
jmp 0x1ddfed
imulq %rcx, %rax
imulq %r15, %rax
addq %rax, %r12
imulq %rsi, %rdx
imulq %r15, %rdx
addq %rdx, %rbp
cmpl %r13d, %r14d
jge 0x1de29d
movq (%rsp), %rax
movq -0x18(%rax), %rax
vmovss (%rbp,%r14,4), %xmm0
vmovaps 0x80(%rsp), %xmm1
vfmadd213ss 0xe0(%rbx,%rax), %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + mem
vmovss 0xdc(%rbx,%rax), %xmm1
vbroadcastss 0x212f34(%rip), %xmm2 # 0x3f11b4
vxorps %xmm2, %xmm1, %xmm1
vzeroupper
callq 0x5f0e0
vmulss (%r12,%r14,4), %xmm0, %xmm0
vmovss %xmm0, (%r12,%r14,4)
incq %r14
jmp 0x1de247
incq %r15
movl 0x70(%rsp), %r12d
pushq $0x2
popq %r14
jmp 0x1dde8a
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1de30c
lock
decl (%rax)
jne 0x1de30c
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1de301
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x1de30c
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1de30e
lock
decl (%rax)
jne 0x1de30e
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1de362
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1de30e
jmp 0x1de301
jmp 0x1de362
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
xorl %ebp, %ebp
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x1de34b
lock
decl (%rax)
jne 0x1de34b
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x1de340
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x1de34b
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
movl %ebp, %eax
addq $0x178, %rsp # imm = 0x178
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x1de30e
jmp 0x1de428
jmp 0x1de428
jmp 0x1de428
jmp 0x1de428
jmp 0x1de382
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1de3e9
lock
decl (%rax)
jne 0x1de3e9
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x1de3a5
jmp 0x1de3d0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1de3e9
jmp 0x1de428
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1de3e9
lock
decl (%rax)
jne 0x1de3e9
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x1de3da
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x1de3e9
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1de3e9
jmp 0x1de428
jmp 0x1de428
movq %rax, %rbx
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x1de420
lock
decl (%rax)
jne 0x1de420
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
jne 0x1de41a
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x1de420
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/lrn_x86_fma.cpp |
virtual thunk to ncnn::LRN_x86_fma::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int LRN_x86_fma::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
size_t elemsize = bottom_top_blob.elemsize;
int size = w * h;
// squared values with local_size padding
Mat square_blob;
square_blob.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_top_blob.channel(q);
float* outptr = square_blob.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _outp = _mm256_mul_ps(_p, _p);
_mm256_storeu_ps(outptr, _outp);
ptr += 8;
outptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*outptr = *ptr * *ptr;
ptr++;
outptr++;
}
}
if (region_type == NormRegion_ACROSS_CHANNELS)
{
Mat square_sum;
square_sum.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_sum.empty())
return -100;
square_sum.fill(0.f);
const float alpha_div_size = alpha / local_size;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
// square sum
for (int p = q - local_size / 2; p <= q + local_size / 2; p++)
{
if (p < 0 || p >= channels)
continue;
const float* sptr = square_blob.channel(p);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _sp = _mm256_loadu_ps(sptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_add_ps(_ssp, _sp);
_mm256_storeu_ps(ssptr, _ssp);
sptr += 8;
ssptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ssptr += *sptr;
sptr++;
ssptr++;
}
}
float* ptr = bottom_top_blob.channel(q);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
__m256 _bias = _mm256_set1_ps(bias);
__m256 _ads = _mm256_set1_ps(alpha_div_size);
__m256 _mb = _mm256_set1_ps(-beta);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_mul_ps(_ssp, _ads);
_ssp = _mm256_add_ps(_ssp, _bias);
_ssp = pow256_ps(_ssp, _mb);
_p = _mm256_mul_ps(_p, _ssp);
_mm256_storeu_ps(ptr, _p);
ssptr += 8;
ptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ptr = *ptr * powf(bias + alpha_div_size * *ssptr, -beta);
ssptr++;
ptr++;
}
}
}
else if (region_type == NormRegion_WITHIN_CHANNEL)
{
int outw = w;
int outh = h;
Mat square_blob_bordered = square_blob;
int pad = local_size / 2;
if (pad > 0)
{
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(square_blob, square_blob_bordered, pad, local_size - pad - 1, pad, local_size - pad - 1, BORDER_CONSTANT, 0.f, opt_b);
if (square_blob_bordered.empty())
return -100;
w = square_blob_bordered.w;
h = square_blob_bordered.h;
}
const int maxk = local_size * local_size;
const float alpha_div_size = alpha / maxk;
// norm window offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w - local_size;
for (int i = 0; i < local_size; i++)
{
for (int j = 0; j < local_size; j++)
{
space_ofs[p1] = p2;
p1++;
p2++;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
const Mat m = square_blob_bordered.channel(q);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
const float* sptr = m.row(i) + j;
float ss = 0.f;
for (int k = 0; k < maxk; k++)
{
float val = sptr[space_ofs[k]];
ss += val;
}
ptr[j] = ptr[j] * powf(bias + alpha_div_size * ss, -beta);
}
ptr += outw;
}
}
}
return 0;
} | movq (%rdi), %rax
addq -0x58(%rax), %rdi
jmp 0x1dd838
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/lrn_x86_fma.cpp |
ncnn::MemoryData::load_model(ncnn::ModelBin const&) | int MemoryData::load_model(const ModelBin& mb)
{
if (d != 0)
{
data = mb.load(w, h, d, c, 1);
}
else if (c != 0)
{
data = mb.load(w, h, c, 1);
}
else if (h != 0)
{
data = mb.load(w, h, 1);
}
else if (w != 0)
{
data = mb.load(w, 1);
}
else // 0 0 0
{
data.create(1);
}
if (data.empty())
return -100;
return 0;
} | pushq %r14
pushq %rbx
subq $0x58, %rsp
movq %rdi, %rbx
movl 0xd8(%rdi), %r8d
testl %r8d, %r8d
je 0x1df233
movl 0xd0(%rbx), %edx
movl 0xd4(%rbx), %ecx
movl 0xdc(%rbx), %r9d
movq (%rsi), %rax
movl $0x1, (%rsp)
leaq 0x10(%rsp), %r14
movq %r14, %rdi
callq *0x28(%rax)
leaq 0xe0(%rbx), %rcx
movq 0x8(%r14), %rax
cmpq %r14, %rcx
je 0x1df407
testq %rax, %rax
je 0x1df1f8
lock
incl (%rax)
movq 0xe8(%rbx), %rax
testq %rax, %rax
je 0x1df3ab
lock
decl (%rax)
jne 0x1df3ab
movq 0xe0(%rbx), %rsi
movq 0x100(%rbx), %rdi
testq %rdi, %rdi
je 0x1df3a3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1df3ab
movl 0xdc(%rbx), %r8d
testl %r8d, %r8d
je 0x1df2b4
movl 0xd0(%rbx), %edx
movl 0xd4(%rbx), %ecx
movq (%rsi), %rax
leaq 0x10(%rsp), %r14
pushq $0x1
popq %r9
movq %r14, %rdi
callq *0x20(%rax)
leaq 0xe0(%rbx), %rcx
movq 0x8(%r14), %rax
cmpq %r14, %rcx
je 0x1df4b4
testq %rax, %rax
je 0x1df279
lock
incl (%rax)
movq 0xe8(%rbx), %rax
testq %rax, %rax
je 0x1df458
lock
decl (%rax)
jne 0x1df458
movq 0xe0(%rbx), %rsi
movq 0x100(%rbx), %rdi
testq %rdi, %rdi
je 0x1df450
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1df458
movl 0xd4(%rbx), %ecx
testl %ecx, %ecx
je 0x1df32d
movl 0xd0(%rbx), %edx
movq (%rsi), %rax
leaq 0x10(%rsp), %r14
pushq $0x1
popq %r8
movq %r14, %rdi
callq *0x18(%rax)
leaq 0xe0(%rbx), %rcx
movq 0x8(%r14), %rax
cmpq %r14, %rcx
je 0x1df548
testq %rax, %rax
je 0x1df2f2
lock
incl (%rax)
movq 0xe8(%rbx), %rax
testq %rax, %rax
je 0x1df4ec
lock
decl (%rax)
jne 0x1df4ec
movq 0xe0(%rbx), %rsi
movq 0x100(%rbx), %rdi
testq %rdi, %rdi
je 0x1df4e4
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1df4ec
movl 0xd0(%rbx), %edx
testl %edx, %edx
je 0x1df437
movq (%rsi), %rax
leaq 0x10(%rsp), %r14
pushq $0x1
popq %rcx
movq %r14, %rdi
callq *0x10(%rax)
leaq 0xe0(%rbx), %rcx
movq 0x8(%r14), %rax
cmpq %r14, %rcx
je 0x1df5dc
testq %rax, %rax
je 0x1df368
lock
incl (%rax)
movq 0xe8(%rbx), %rax
testq %rax, %rax
je 0x1df580
lock
decl (%rax)
jne 0x1df580
movq 0xe0(%rbx), %rsi
movq 0x100(%rbx), %rdi
testq %rdi, %rdi
je 0x1df578
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1df580
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0xe0(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0xe8(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0xf0(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0xf8(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x100(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x108(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x118(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x120(%rbx)
testq %rax, %rax
je 0x1df605
lock
decl (%rax)
jne 0x1df605
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x1df5fd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1df605
leaq 0xe0(%rbx), %rdi
pushq $0x1
popq %rsi
pushq $0x4
popq %rdx
xorl %ecx, %ecx
callq 0x635fa
jmp 0x1df605
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0xe0(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0xe8(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0xf0(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0xf8(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x100(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x108(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x118(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x120(%rbx)
testq %rax, %rax
je 0x1df605
lock
decl (%rax)
jne 0x1df605
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x1df5fd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1df605
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0xe0(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0xe8(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0xf0(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0xf8(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x100(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x108(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x118(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x120(%rbx)
testq %rax, %rax
je 0x1df605
lock
decl (%rax)
jne 0x1df605
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x1df5fd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1df605
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0xe0(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0xe8(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0xf0(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0xf8(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x100(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x108(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x118(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x120(%rbx)
testq %rax, %rax
je 0x1df605
lock
decl (%rax)
jne 0x1df605
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x1df5fd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1df605
movq %rsi, %rdi
callq 0x5f3e0
cmpq $0x0, 0xe0(%rbx)
je 0x1df625
movslq 0x118(%rbx), %rcx
imulq 0x120(%rbx), %rcx
xorl %eax, %eax
testq %rcx, %rcx
jne 0x1df628
pushq $-0x64
popq %rax
addq $0x58, %rsp
popq %rbx
popq %r14
retq
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x1df6fd
lock
decl (%rax)
jne 0x1df6fd
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x1df6ed
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1df6fd
jmp 0x1df707
jmp 0x1df707
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x1df6fd
lock
decl (%rax)
jne 0x1df6fd
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x1df6ed
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1df6fd
jmp 0x1df707
jmp 0x1df707
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x1df6fd
lock
decl (%rax)
jne 0x1df6fd
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x1df6ed
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1df6fd
jmp 0x1df707
jmp 0x1df707
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x1df6fd
lock
decl (%rax)
jne 0x1df6fd
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x1df6f7
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x1df6fd
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x1df707
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/memorydata.cpp |
ncnn::MVN::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int MVN::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int size = w * h;
top_blob.create(w, h, channels, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
// prepare sum per channel
Mat sum(channels, elemsize, opt.workspace_allocator);
if (sum.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_blob.channel(q);
float s = 0.f;
for (int i = 0; i < size; i++)
{
s += ptr[i];
}
sum[q] = s;
}
if (across_channels)
{
// compute mean across channels
float mean = 0.f;
for (int q = 0; q < channels; q++)
{
mean += sum[q];
}
mean = mean / (channels * size);
// subtract mean
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
for (int i = 0; i < size; i++)
{
outptr[i] = ptr[i] - mean;
}
}
}
else
{
// subtract mean
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
float mean = sum[q] / size;
for (int i = 0; i < size; i++)
{
outptr[i] = ptr[i] - mean;
}
}
}
if (normalize_variance)
{
// prepare squared sum per channel
Mat sqsum(channels, elemsize, opt.workspace_allocator);
if (sqsum.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = top_blob.channel(q);
float s = 0.f;
for (int i = 0; i < size; i++)
{
s += ptr[i] * ptr[i];
}
sqsum[q] = s;
}
if (across_channels)
{
// compute squared mean across channels
float sqmean = 0.f;
for (int q = 0; q < channels; q++)
{
sqmean += sqsum[q];
}
sqmean = sqmean / (channels * size);
// normalize variance
float norm_var = sqrtf(sqmean) + eps;
float norm_var_inv = 1.f / norm_var;
// apply normalize_variance
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* outptr = top_blob.channel(q);
for (int i = 0; i < size; i++)
{
outptr[i] = outptr[i] * norm_var_inv;
}
}
}
else
{
// apply normalize_variance
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* outptr = top_blob.channel(q);
float sqmean = sqsum[q] / size;
float norm_var = sqrt(sqmean) + eps;
float norm_var_inv = 1.f / norm_var;
for (int i = 0; i < size; i++)
{
outptr[i] = outptr[i] * norm_var_inv;
}
}
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xb8, %rsp
movq %rdx, %r14
movq %rsi, %r12
movq %rdi, 0x8(%rsp)
movl 0x2c(%rsi), %ebp
movl 0x30(%rsi), %r15d
movl 0x38(%rsi), %r13d
movq 0x10(%rsi), %rbx
movq %rcx, 0x10(%rsp)
movq 0x8(%rcx), %r9
movq %rdx, %rdi
movl %ebp, %esi
movl %r15d, %edx
movl %r13d, %ecx
movq %rbx, %r8
callq 0x63810
pushq $-0x64
popq %rax
cmpq $0x0, (%r14)
je 0x1dfd1f
movslq 0x38(%r14), %rcx
imulq 0x40(%r14), %rcx
testq %rcx, %rcx
je 0x1dfd1f
movl %r13d, %esi
movl %r15d, %r13d
movq 0x10(%rsp), %rax
movq 0x10(%rax), %rcx
leaq 0x70(%rsp), %r15
andq $0x0, 0x40(%r15)
xorps %xmm0, %xmm0
movaps %xmm0, (%r15)
movups %xmm0, 0xc(%r15)
movaps %xmm0, 0x20(%r15)
movups %xmm0, 0x2c(%r15)
movq %r15, %rdi
movq %rbx, %rdx
movl %esi, %ebx
movq %rdx, 0x18(%rsp)
callq 0x635fa
movq (%r15), %r8
pushq $-0x64
popq %rax
testq %r8, %r8
je 0x1dfce8
movslq 0xa8(%rsp), %rcx
imulq 0xb0(%rsp), %rcx
testq %rcx, %rcx
je 0x1dfce8
imull %ebp, %r13d
movq (%r12), %rcx
movq 0x40(%r12), %rdx
imulq 0x10(%r12), %rdx
xorl %esi, %esi
testl %r13d, %r13d
movl $0x0, %r15d
movl %r13d, 0x4(%rsp)
cmovgl %r13d, %r15d
movl %ebx, %r13d
testl %ebx, %ebx
movl $0x0, %ebp
cmovgl %ebx, %ebp
movq 0x8(%rsp), %r10
cmpq %rbp, %rsi
je 0x1df9f8
xorps %xmm0, %xmm0
xorl %edi, %edi
cmpq %rdi, %r15
je 0x1df9ea
addss (%rcx,%rdi,4), %xmm0
incq %rdi
jmp 0x1df9db
movss %xmm0, (%r8,%rsi,4)
incq %rsi
addq %rdx, %rcx
jmp 0x1df9d1
cmpl $0x0, 0xd4(%r10)
je 0x1dfa79
xorps %xmm0, %xmm0
movq 0x70(%rsp), %rdx
xorl %ecx, %ecx
cmpq %rcx, %rbp
je 0x1dfa1b
addss (%rdx,%rcx,4), %xmm0
incq %rcx
jmp 0x1dfa0c
movl %r13d, %ecx
imull 0x4(%rsp), %ecx
cvtsi2ss %ecx, %xmm1
divss %xmm1, %xmm0
movq (%r12), %r9
movq 0x40(%r12), %rcx
movq 0x40(%r14), %rdx
imulq 0x10(%r14), %rdx
movq (%r14), %rsi
imulq 0x10(%r12), %rcx
xorl %edi, %edi
cmpq %rbp, %rdi
je 0x1dfae5
xorl %r8d, %r8d
cmpq %r8, %r15
je 0x1dfa6e
movss (%r9,%r8,4), %xmm1
subss %xmm0, %xmm1
movss %xmm1, (%rsi,%r8,4)
incq %r8
jmp 0x1dfa54
incq %rdi
addq %rdx, %rsi
addq %rcx, %r9
jmp 0x1dfa48
movq (%r12), %r11
movq 0x40(%r12), %rcx
movq (%r14), %rdx
movq 0x40(%r14), %rsi
imulq 0x10(%r14), %rsi
movq 0x70(%rsp), %rdi
cvtsi2ssl 0x4(%rsp), %xmm1
movss 0x20f1e7(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
imulq 0x10(%r12), %rcx
xorl %r8d, %r8d
cmpq %rbp, %r8
je 0x1dfae5
movss (%rdi,%r8,4), %xmm1
mulss %xmm0, %xmm1
xorl %r9d, %r9d
cmpq %r9, %r15
je 0x1dfada
movss (%r11,%r9,4), %xmm2
subss %xmm1, %xmm2
movss %xmm2, (%rdx,%r9,4)
incq %r9
jmp 0x1dfac0
incq %r8
addq %rsi, %rdx
addq %rcx, %r11
jmp 0x1dfaae
cmpl $0x0, 0xd0(%r10)
je 0x1dfce6
movl %eax, %ebx
movq 0x10(%rsp), %rax
movq 0x10(%rax), %rcx
leaq 0x20(%rsp), %rdi
andq $0x0, 0x40(%rdi)
xorps %xmm0, %xmm0
movaps %xmm0, (%rdi)
movups %xmm0, 0xc(%rdi)
movaps %xmm0, 0x20(%rdi)
movups %xmm0, 0x2c(%rdi)
movl %r13d, %esi
movq 0x18(%rsp), %rdx
callq 0x635fa
movq 0x20(%rsp), %r9
testq %r9, %r9
movl %ebx, %eax
je 0x1dfb85
movslq 0x58(%rsp), %rcx
imulq 0x60(%rsp), %rcx
testq %rcx, %rcx
je 0x1dfb85
movq (%r14), %rcx
movq 0x40(%r14), %rdx
imulq 0x10(%r14), %rdx
xorl %esi, %esi
movq 0x8(%rsp), %r8
cmpq %rbp, %rsi
je 0x1dfbbc
xorps %xmm0, %xmm0
xorl %edi, %edi
cmpq %rdi, %r15
je 0x1dfb77
movss (%rcx,%rdi,4), %xmm1
mulss %xmm1, %xmm1
addss %xmm1, %xmm0
incq %rdi
jmp 0x1dfb60
movss %xmm0, (%r9,%rsi,4)
incq %rsi
addq %rdx, %rcx
jmp 0x1dfb56
movq 0x28(%rsp), %rcx
testq %rcx, %rcx
je 0x1dfce8
lock
decl (%rcx)
jne 0x1dfce8
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1dfcd2
movq (%rdi), %rax
callq *0x18(%rax)
movl %ebx, %eax
jmp 0x1dfce8
cmpl $0x0, 0xd4(%r8)
je 0x1dfc40
xorps %xmm0, %xmm0
movq 0x20(%rsp), %rax
xorl %ecx, %ecx
cmpq %rcx, %rbp
je 0x1dfbdf
addss (%rax,%rcx,4), %xmm0
incq %rcx
jmp 0x1dfbd0
imull 0x4(%rsp), %r13d
cvtsi2ss %r13d, %xmm1
divss %xmm1, %xmm0
sqrtss %xmm0, %xmm1
addss 0xd8(%r8), %xmm1
movq 0x40(%r14), %rax
imulq 0x10(%r14), %rax
movq (%r14), %rcx
movss 0x20f079(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
xorl %edx, %edx
cmpq %rbp, %rdx
je 0x1dfcac
xorl %esi, %esi
cmpq %rsi, %r15
je 0x1dfc38
movss (%rcx,%rsi,4), %xmm1
mulss %xmm0, %xmm1
movss %xmm1, (%rcx,%rsi,4)
incq %rsi
jmp 0x1dfc20
incq %rdx
addq %rax, %rcx
jmp 0x1dfc15
movq (%r14), %rax
movq 0x40(%r14), %rcx
imulq 0x10(%r14), %rcx
movq 0x20(%rsp), %rdx
cvtsi2ssl 0x4(%rsp), %xmm2
movss 0x20f029(%rip), %xmm0 # 0x3eec88
movaps %xmm0, %xmm1
divss %xmm2, %xmm1
xorl %esi, %esi
cmpq %rbp, %rsi
je 0x1dfcac
movss (%rdx,%rsi,4), %xmm2
mulss %xmm1, %xmm2
sqrtss %xmm2, %xmm3
addss 0xd8(%r8), %xmm3
movaps %xmm0, %xmm2
divss %xmm3, %xmm2
xorl %edi, %edi
cmpq %rdi, %r15
je 0x1dfca4
movss (%rax,%rdi,4), %xmm3
mulss %xmm2, %xmm3
movss %xmm3, (%rax,%rdi,4)
incq %rdi
jmp 0x1dfc8c
incq %rsi
addq %rcx, %rax
jmp 0x1dfc68
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1dfce6
lock
decl (%rax)
jne 0x1dfce6
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1dfcde
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1dfce6
movq %rsi, %rdi
callq 0x5f3e0
movl %ebx, %eax
jmp 0x1dfce8
movq %rsi, %rdi
callq 0x5f3e0
xorl %eax, %eax
movq 0x78(%rsp), %rcx
testq %rcx, %rcx
je 0x1dfd1f
lock
decl (%rcx)
jne 0x1dfd1f
movl %eax, %ebx
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x1dfd15
movq (%rdi), %rax
callq *0x18(%rax)
movl %ebx, %eax
jmp 0x1dfd1f
movq %rsi, %rdi
callq 0x5f3e0
movl %ebx, %eax
addq $0xb8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x1dfd35
jmp 0x1dfd35
movq %rax, %rdi
callq 0x61d68
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x1dfd71
lock
decl (%rax)
jne 0x1dfd71
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x1dfd6b
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x1dfd71
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x1dfd35
nop
| /csukuangfj[P]ncnn/src/layer/mvn.cpp |
ncnn::Pooling::load_param(ncnn::ParamDict const&) | int Pooling::load_param(const ParamDict& pd)
{
pooling_type = pd.get(0, 0);
kernel_w = pd.get(1, 0);
kernel_h = pd.get(11, kernel_w);
stride_w = pd.get(2, 1);
stride_h = pd.get(12, stride_w);
pad_left = pd.get(3, 0);
pad_right = pd.get(14, pad_left);
pad_top = pd.get(13, pad_left);
pad_bottom = pd.get(15, pad_top);
global_pooling = pd.get(4, 0);
pad_mode = pd.get(5, 0);
avgpool_count_include_pad = pd.get(6, 0);
adaptive_pooling = pd.get(7, 0);
out_w = pd.get(8, 0);
out_h = pd.get(18, out_w);
return 0;
} | pushq %rbp
pushq %r14
pushq %rbx
movq %rsi, %r14
movq %rdi, %rbx
movq %rsi, %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xd0(%rbx)
pushq $0x1
popq %rbp
movq %r14, %rdi
movl %ebp, %esi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xd4(%rbx)
pushq $0xb
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xd8(%rbx)
pushq $0x2
popq %rsi
movq %r14, %rdi
movl %ebp, %edx
callq 0x718a6
movl %eax, 0xdc(%rbx)
pushq $0xc
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xe0(%rbx)
pushq $0x3
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xe4(%rbx)
pushq $0xe
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xe8(%rbx)
movl 0xe4(%rbx), %edx
pushq $0xd
popq %rsi
movq %r14, %rdi
callq 0x718a6
movl %eax, 0xec(%rbx)
pushq $0xf
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xf0(%rbx)
pushq $0x4
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xf4(%rbx)
pushq $0x5
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xf8(%rbx)
pushq $0x6
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xfc(%rbx)
pushq $0x7
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0x100(%rbx)
pushq $0x8
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0x104(%rbx)
pushq $0x12
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0x108(%rbx)
xorl %eax, %eax
popq %rbx
popq %r14
popq %rbp
retq
nop
| /csukuangfj[P]ncnn/src/layer/pooling.cpp |
ncnn::Pooling_x86_fma::create_pipeline(ncnn::Option const&) | int Pooling_x86_fma::create_pipeline(const Option& /*opt*/)
{
if (adaptive_pooling)
{
support_packing = false;
support_bf16_storage = false;
support_fp16_storage = false;
support_int8_storage = false;
support_tensor_storage = false;
}
return 0;
} | movq (%rdi), %rax
movq -0x18(%rax), %rcx
cmpl $0x0, 0x100(%rdi,%rcx)
je 0x1e431f
xorl %edx, %edx
movb %dl, 0xb(%rdi,%rcx)
movq -0x18(%rax), %rcx
movb %dl, 0xc(%rdi,%rcx)
movq -0x18(%rax), %rcx
movb %dl, 0xd(%rdi,%rcx)
movq -0x18(%rax), %rcx
movb %dl, 0xe(%rdi,%rcx)
movq -0x18(%rax), %rax
movb %dl, 0x10(%rdi,%rax)
xorl %eax, %eax
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/pooling_x86_fma.cpp |
ncnn::binary_op_broadcast_inner(ncnn::Mat const&, ncnn::Mat const&, ncnn::Mat&, int, ncnn::Option const&) | static int binary_op_broadcast_inner(const Mat& a, const Mat& b, Mat& c, int op_type, const Option& opt)
{
// squeeze inner axes
Mat b2 = b;
if (b.dims == 2 && b.w == 1)
b2 = b.reshape(b.h);
else if (b.dims == 3 && b.h == 1)
b2 = b.reshape(b.c);
else if (b.dims == 3 && b.w == 1)
b2 = b.reshape(b.h, b.c);
else if (b.dims == 4 && b.d == 1)
b2 = b.reshape(b.c);
else if (b.dims == 4 && b.h == 1)
b2 = b.reshape(b.d, b.c);
else if (b.dims == 4 && b.w == 1)
b2 = b.reshape(b.h, b.d, b.c);
using namespace BinaryOp_x86_fma_functor;
if (op_type == BinaryOp::Operation_ADD) return binary_op_broadcast_inner<binary_op_add>(a, b2, c, opt);
if (op_type == BinaryOp::Operation_SUB) return binary_op_broadcast_inner<binary_op_sub>(a, b2, c, opt);
if (op_type == BinaryOp::Operation_MUL) return binary_op_broadcast_inner<binary_op_mul>(a, b2, c, opt);
if (op_type == BinaryOp::Operation_DIV) return binary_op_broadcast_inner<binary_op_div>(a, b2, c, opt);
if (op_type == BinaryOp::Operation_MAX) return binary_op_broadcast_inner<binary_op_max>(a, b2, c, opt);
if (op_type == BinaryOp::Operation_MIN) return binary_op_broadcast_inner<binary_op_min>(a, b2, c, opt);
if (op_type == BinaryOp::Operation_POW) return binary_op_broadcast_inner<binary_op_pow>(a, b2, c, opt);
if (op_type == BinaryOp::Operation_RSUB) return binary_op_broadcast_inner<binary_op_rsub>(a, b2, c, opt);
if (op_type == BinaryOp::Operation_RDIV) return binary_op_broadcast_inner<binary_op_rdiv>(a, b2, c, opt);
if (op_type == BinaryOp::Operation_RPOW) return binary_op_broadcast_inner<binary_op_rpow>(a, b2, c, opt);
if (op_type == BinaryOp::Operation_ATAN2) return binary_op_broadcast_inner<binary_op_atan2>(a, b2, c, opt);
if (op_type == BinaryOp::Operation_RATAN2) return binary_op_broadcast_inner<binary_op_ratan2>(a, b2, c, opt);
// should never reach here
return 0;
} | pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x1a0, %rsp # imm = 0x1A0
movl %ecx, %r13d
movq %rdx, 0x48(%rsp)
movq %rdi, 0x20(%rsp)
movq (%rsi), %rbx
movq 0x8(%rsi), %rcx
movq 0x10(%rsi), %rax
movq %rax, 0x78(%rsp)
movq 0x20(%rsi), %rax
movq %rax, 0x50(%rsp)
vmovq 0x28(%rsi), %xmm1
movq 0x40(%rsi), %rax
movq %rax, 0x98(%rsp)
vmovdqa %xmm1, %xmm0
testq %rcx, %rcx
je 0x264d73
lock
incl (%rcx)
vmovq 0x28(%rsi), %xmm0
movq %rcx, 0x58(%rsp)
vmovd %xmm0, %ecx
movl %ecx, %eax
xorl $0x2, %eax
vpextrd $0x1, %xmm0, %edi
xorl $0x1, %edi
orl %edi, %eax
jne 0x264de3
movl 0x30(%rsi), %edx
leaq 0x140(%rsp), %rdi
xorl %ecx, %ecx
callq 0x62c8a
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x264db0
lock
incl (%rax)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x264f79
lock
decl (%rax)
jne 0x264f79
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x264f71
movq (%rdi), %rax
movq %rbx, %rsi
callq *0x18(%rax)
jmp 0x264f79
movl %ecx, %eax
xorl $0x3, %eax
movl 0x30(%rsi), %edx
movl %edx, %r8d
xorl $0x1, %r8d
movl %eax, %r9d
orl %r8d, %r9d
jne 0x264e4f
movl 0x38(%rsi), %edx
leaq 0x140(%rsp), %rdi
xorl %ecx, %ecx
callq 0x62c8a
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x264e1c
lock
incl (%rax)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x26506e
lock
decl (%rax)
jne 0x26506e
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x265066
movq (%rdi), %rax
movq %rbx, %rsi
callq *0x18(%rax)
jmp 0x26506e
orl %edi, %eax
jne 0x264ea9
movl 0x38(%rsi), %ecx
leaq 0x140(%rsp), %rdi
xorl %r8d, %r8d
callq 0x62e4e
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x264e76
lock
incl (%rax)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x265102
lock
decl (%rax)
jne 0x265102
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x2650fa
movq (%rdi), %rax
movq %rbx, %rsi
callq *0x18(%rax)
jmp 0x265102
xorl $0x4, %ecx
movl 0x34(%rsi), %eax
movl %eax, %r9d
xorl $0x1, %r9d
orl %ecx, %r9d
jne 0x264f10
movl 0x38(%rsi), %edx
leaq 0x140(%rsp), %rdi
xorl %ecx, %ecx
callq 0x62c8a
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x264edd
lock
incl (%rax)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x265196
lock
decl (%rax)
jne 0x265196
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x26518e
movq (%rdi), %rax
movq %rbx, %rsi
callq *0x18(%rax)
jmp 0x265196
orl %ecx, %r8d
jne 0x265005
movl 0x38(%rsi), %ecx
leaq 0x140(%rsp), %rdi
movl %eax, %edx
xorl %r8d, %r8d
callq 0x62e4e
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x264f3e
lock
incl (%rax)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x26522a
lock
decl (%rax)
jne 0x26522a
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x265222
movq (%rdi), %rax
movq %rbx, %rsi
callq *0x18(%rax)
jmp 0x26522a
movq %rbx, %rdi
callq 0x5f3e0
movq 0x140(%rsp), %rbx
movq 0x148(%rsp), %r14
movq 0x150(%rsp), %rax
movq %rax, 0x78(%rsp)
movq 0x160(%rsp), %rax
movq %rax, 0x50(%rsp)
vmovq 0x168(%rsp), %xmm1
movq 0x180(%rsp), %rax
movq %rax, 0x98(%rsp)
testq %r14, %r14
je 0x26534c
lock
decl (%r14)
jne 0x26534c
vmovdqa %xmm1, 0x80(%rsp)
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x26533b
movq (%rdi), %rax
callq *0x18(%rax)
vmovdqa 0x80(%rsp), %xmm1
jmp 0x26534c
orl %edi, %ecx
jne 0x265351
movl 0x38(%rsi), %r8d
leaq 0x140(%rsp), %rdi
movl %eax, %ecx
xorl %r9d, %r9d
callq 0x63020
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x265033
lock
incl (%rax)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2652be
lock
decl (%rax)
jne 0x2652be
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x2652b6
movq (%rdi), %rax
movq %rbx, %rsi
callq *0x18(%rax)
jmp 0x2652be
movq %rbx, %rdi
callq 0x5f3e0
movq 0x140(%rsp), %rbx
movq 0x148(%rsp), %r14
movq 0x150(%rsp), %rax
movq %rax, 0x78(%rsp)
movq 0x160(%rsp), %rax
movq %rax, 0x50(%rsp)
vmovq 0x168(%rsp), %xmm1
movq 0x180(%rsp), %rax
movq %rax, 0x98(%rsp)
testq %r14, %r14
je 0x26534c
lock
decl (%r14)
jne 0x26534c
vmovdqa %xmm1, 0x80(%rsp)
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x26533b
movq (%rdi), %rax
callq *0x18(%rax)
vmovdqa 0x80(%rsp), %xmm1
jmp 0x26534c
movq %rbx, %rdi
callq 0x5f3e0
movq 0x140(%rsp), %rbx
movq 0x148(%rsp), %r14
movq 0x150(%rsp), %rax
movq %rax, 0x78(%rsp)
movq 0x160(%rsp), %rax
movq %rax, 0x50(%rsp)
vmovq 0x168(%rsp), %xmm1
movq 0x180(%rsp), %rax
movq %rax, 0x98(%rsp)
testq %r14, %r14
je 0x26534c
lock
decl (%r14)
jne 0x26534c
vmovdqa %xmm1, 0x80(%rsp)
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x26533b
movq (%rdi), %rax
callq *0x18(%rax)
vmovdqa 0x80(%rsp), %xmm1
jmp 0x26534c
movq %rbx, %rdi
callq 0x5f3e0
movq 0x140(%rsp), %rbx
movq 0x148(%rsp), %r14
movq 0x150(%rsp), %rax
movq %rax, 0x78(%rsp)
movq 0x160(%rsp), %rax
movq %rax, 0x50(%rsp)
vmovq 0x168(%rsp), %xmm1
movq 0x180(%rsp), %rax
movq %rax, 0x98(%rsp)
testq %r14, %r14
je 0x26534c
lock
decl (%r14)
jne 0x26534c
vmovdqa %xmm1, 0x80(%rsp)
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x26533b
movq (%rdi), %rax
callq *0x18(%rax)
vmovdqa 0x80(%rsp), %xmm1
jmp 0x26534c
movq %rbx, %rdi
callq 0x5f3e0
movq 0x140(%rsp), %rbx
movq 0x148(%rsp), %r14
movq 0x150(%rsp), %rax
movq %rax, 0x78(%rsp)
movq 0x160(%rsp), %rax
movq %rax, 0x50(%rsp)
vmovq 0x168(%rsp), %xmm1
movq 0x180(%rsp), %rax
movq %rax, 0x98(%rsp)
testq %r14, %r14
je 0x26534c
lock
decl (%r14)
jne 0x26534c
vmovdqa %xmm1, 0x80(%rsp)
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x26533b
movq (%rdi), %rax
callq *0x18(%rax)
vmovdqa 0x80(%rsp), %xmm1
jmp 0x26534c
movq %rbx, %rdi
callq 0x5f3e0
movq 0x140(%rsp), %rbx
movq 0x148(%rsp), %r14
movq 0x150(%rsp), %rax
movq %rax, 0x78(%rsp)
movq 0x160(%rsp), %rax
movq %rax, 0x50(%rsp)
vmovq 0x168(%rsp), %xmm1
movq 0x180(%rsp), %rax
movq %rax, 0x98(%rsp)
testq %r14, %r14
je 0x26534c
lock
decl (%r14)
jne 0x26534c
vmovdqa %xmm1, 0x80(%rsp)
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x26533b
movq (%rdi), %rax
callq *0x18(%rax)
vmovdqa 0x80(%rsp), %xmm1
jmp 0x26534c
movq %rsi, %rdi
callq 0x5f3e0
vmovdqa 0x80(%rsp), %xmm1
movq %r14, 0x58(%rsp)
movq %rbx, 0x18(%rsp)
cmpl $0xb, %r13d
ja 0x26bab3
movl %r13d, %eax
leaq 0x192102(%rip), %rcx # 0x3f746c
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
vmovd %xmm1, 0x38(%rsp)
vpextrd $0x1, %xmm1, 0x34(%rsp)
jmpq *%rax
movq 0x20(%rsp), %rdx
movl 0x2c(%rdx), %eax
movl 0x30(%rdx), %ecx
movl 0x34(%rdx), %r13d
movl 0x38(%rdx), %esi
movq %rsi, 0x60(%rsp)
movl 0x18(%rdx), %edi
movl 0x28(%rdx), %esi
cmpl $0x2, %esi
movq 0x18(%rsp), %r10
movq %r13, 0x80(%rsp)
jne 0x268001
cmpl $0x1, 0x38(%rsp)
jne 0x268001
movl %edi, %r8d
imull %eax, %r8d
xorl %r14d, %r14d
testl %ecx, %ecx
movl $0x0, %edx
cmovgl %ecx, %edx
movq %rdx, 0x40(%rsp)
cmpq 0x40(%rsp), %r14
je 0x267ff9
vmovd (%r10,%r14,4), %xmm0
cmpl $0x4, %edi
jne 0x2653fd
movq %r14, %rsi
shlq $0x4, %rsi
vmovdqu (%r10,%rsi), %xmm1
jmp 0x265416
vpshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x265416
movq %r14, %rsi
shlq $0x5, %rsi
vmovups (%r10,%rsi), %ymm2
jmp 0x26541c
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
movq 0x20(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r14, %r13
movq %rsi, 0xb0(%rsp)
imulq %rsi, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %r12
imulq %r12, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r14, %r9
movq %rsi, 0x28(%rsp)
imulq %rsi, %r9
movq (%rdx), %rbx
movq 0x10(%rdx), %r10
imulq %r10, %r9
addq %rbx, %r9
xorl %r15d, %r15d
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x2654a8
vaddps (%r13), %ymm2, %ymm3
vmovups %ymm3, (%r9)
addq $0x20, %r13
addq $0x20, %r9
addl $0x8, %esi
addq $0x8, %r15
jmp 0x26546a
vaddps (%r13), %xmm1, %xmm2
vmovups %xmm2, (%r9)
addq $0x10, %r13
addq $0x10, %r9
addl $0x4, %esi
addq $0x4, %r15
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x26548e
imulq %r14, %r10
imulq 0x28(%rsp), %r10
addq %r10, %rbx
imulq %r14, %r12
imulq 0xb0(%rsp), %r12
addq %r12, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r15d
jge 0x2654eb
vaddss (%r11,%r15,4), %xmm0, %xmm1
vmovss %xmm1, (%rbx,%r15,4)
incq %r15
jmp 0x2654d5
incq %r14
movq 0x18(%rsp), %r10
jmp 0x2653d8
movq 0x20(%rsp), %rax
movl 0x2c(%rax), %ecx
movl %ecx, 0x80(%rsp)
movl 0x30(%rax), %ecx
movq %rcx, 0x40(%rsp)
movl 0x34(%rax), %ecx
movq %rcx, 0xf8(%rsp)
movl 0x38(%rax), %ecx
movq %rcx, 0xf0(%rsp)
movl 0x18(%rax), %r12d
movl 0x28(%rax), %eax
cmpl $0x2, %eax
movq 0x18(%rsp), %rbx
movl %r12d, 0xdc(%rsp)
jne 0x26966d
cmpl $0x1, 0x38(%rsp)
jne 0x26966d
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
xorl %r13d, %r13d
movq 0x40(%rsp), %rax
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movq %rcx, 0x130(%rsp)
cmpq 0x130(%rsp), %r13
je 0x269665
vmovss (%rbx,%r13,4), %xmm0
cmpl $0x4, %r12d
vmovaps %xmm0, 0xb0(%rsp)
jne 0x2655ad
movq %r13, %rax
shlq $0x4, %rax
vmovups (%rbx,%rax), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
jmp 0x2655cf
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x2655cf
movq %r13, %rax
shlq $0x5, %rax
vmovups (%rbx,%rax), %ymm0
jmp 0x2655d5
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
movq 0x20(%rsp), %rax
movslq 0x2c(%rax), %rcx
movq %r13, %r12
movq %rcx, 0xa0(%rsp)
imulq %rcx, %r12
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0xd0(%rsp)
imulq %rax, %r12
movq %rcx, 0x28(%rsp)
addq %rcx, %r12
movq 0x48(%rsp), %rax
movslq 0x2c(%rax), %rcx
movq %r13, 0xa8(%rsp)
movq %rcx, 0xc8(%rsp)
imulq %rcx, %r13
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0x138(%rsp)
imulq %rax, %r13
movq %rcx, 0x60(%rsp)
addq %rcx, %r13
vmovaps %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
xorl %ebx, %ebx
leal 0x7(%rbx), %eax
cmpl %r15d, %eax
jge 0x2656db
vmovdqu (%r12), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26d916
vmovdqu %ymm0, (%r13)
addq $0x20, %r12
addq $0x20, %r13
addl $0x8, %ebx
addq $0x8, %r14
jmp 0x26564e
vmovdqu (%r12), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26daa0
vmovdqu %xmm0, (%r13)
addq $0x10, %r12
addq $0x10, %r13
addl $0x4, %ebx
addq $0x4, %r14
leal 0x3(%rbx), %eax
cmpl %r15d, %eax
jl 0x26569a
movq 0xa8(%rsp), %r13
movq 0x138(%rsp), %rax
imulq %r13, %rax
imulq 0xc8(%rsp), %rax
addq %rax, 0x60(%rsp)
movq 0xd0(%rsp), %rax
imulq %r13, %rax
imulq 0xa0(%rsp), %rax
addq %rax, 0x28(%rsp)
movq 0x18(%rsp), %rbx
movl 0xdc(%rsp), %r12d
cmpl %r15d, %r14d
vmovdqa 0xb0(%rsp), %xmm0
jge 0x26575d
movq 0x28(%rsp), %rax
vmovd (%rax,%r14,4), %xmm1
vzeroupper
callq 0x5f170
movq 0x60(%rsp), %rax
vmovd %xmm0, (%rax,%r14,4)
incq %r14
jmp 0x26572c
incq %r13
jmp 0x265573
movq 0x20(%rsp), %rdx
movl 0x2c(%rdx), %eax
movl 0x30(%rdx), %ecx
movl 0x34(%rdx), %r13d
movl 0x38(%rdx), %esi
movq %rsi, 0x60(%rsp)
movl 0x18(%rdx), %edi
movl 0x28(%rdx), %esi
cmpl $0x2, %esi
movq 0x18(%rsp), %r10
movq %r13, 0x80(%rsp)
jne 0x266967
cmpl $0x1, 0x38(%rsp)
jne 0x266967
movl %edi, %r8d
imull %eax, %r8d
xorl %r14d, %r14d
testl %ecx, %ecx
movl $0x0, %edx
cmovgl %ecx, %edx
movq %rdx, 0x40(%rsp)
cmpq 0x40(%rsp), %r14
je 0x26695f
vmovd (%r10,%r14,4), %xmm0
cmpl $0x4, %edi
jne 0x2657e1
movq %r14, %rsi
shlq $0x4, %rsi
vmovdqu (%r10,%rsi), %xmm1
jmp 0x2657fa
vpshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x2657fa
movq %r14, %rsi
shlq $0x5, %rsi
vmovups (%r10,%rsi), %ymm2
jmp 0x265800
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
movq 0x20(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r14, %r13
movq %rsi, 0xb0(%rsp)
imulq %rsi, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %r12
imulq %r12, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r14, %r9
movq %rsi, 0x28(%rsp)
imulq %rsi, %r9
movq (%rdx), %rbx
movq 0x10(%rdx), %r10
imulq %r10, %r9
addq %rbx, %r9
xorl %r15d, %r15d
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x26588c
vmaxps (%r13), %ymm2, %ymm3
vmovups %ymm3, (%r9)
addq $0x20, %r13
addq $0x20, %r9
addl $0x8, %esi
addq $0x8, %r15
jmp 0x26584e
vmaxps (%r13), %xmm1, %xmm2
vmovups %xmm2, (%r9)
addq $0x10, %r13
addq $0x10, %r9
addl $0x4, %esi
addq $0x4, %r15
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x265872
imulq %r14, %r10
imulq 0x28(%rsp), %r10
addq %r10, %rbx
imulq %r14, %r12
imulq 0xb0(%rsp), %r12
addq %r12, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r15d
jge 0x2658cf
vmaxss (%r11,%r15,4), %xmm0, %xmm1
vmovss %xmm1, (%rbx,%r15,4)
incq %r15
jmp 0x2658b9
incq %r14
movq 0x18(%rsp), %r10
jmp 0x2657bc
movq 0x20(%rsp), %rax
movl 0x2c(%rax), %ecx
movl %ecx, 0x80(%rsp)
movl 0x30(%rax), %ecx
movq %rcx, 0x40(%rsp)
movl 0x34(%rax), %ecx
movq %rcx, 0xf8(%rsp)
movl 0x38(%rax), %ecx
movq %rcx, 0xf0(%rsp)
movl 0x18(%rax), %r12d
movl 0x28(%rax), %eax
cmpl $0x2, %eax
movq 0x18(%rsp), %rbx
movl %r12d, 0xdc(%rsp)
jne 0x2698b9
cmpl $0x1, 0x38(%rsp)
jne 0x2698b9
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
xorl %r13d, %r13d
movq 0x40(%rsp), %rax
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movq %rcx, 0x130(%rsp)
cmpq 0x130(%rsp), %r13
je 0x2698b1
vmovss (%rbx,%r13,4), %xmm0
cmpl $0x4, %r12d
vmovaps %xmm0, 0xb0(%rsp)
jne 0x265991
movq %r13, %rax
shlq $0x4, %rax
vmovups (%rbx,%rax), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
jmp 0x2659b3
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x2659b3
movq %r13, %rax
shlq $0x5, %rax
vmovups (%rbx,%rax), %ymm0
jmp 0x2659b9
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
movq 0x20(%rsp), %rax
movslq 0x2c(%rax), %rcx
movq %r13, %r12
movq %rcx, 0xa0(%rsp)
imulq %rcx, %r12
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0xd0(%rsp)
imulq %rax, %r12
movq %rcx, 0x28(%rsp)
addq %rcx, %r12
movq 0x48(%rsp), %rax
movslq 0x2c(%rax), %rcx
movq %r13, 0xa8(%rsp)
movq %rcx, 0xc8(%rsp)
imulq %rcx, %r13
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0x138(%rsp)
imulq %rax, %r13
movq %rcx, 0x60(%rsp)
addq %rcx, %r13
vmovaps %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
xorl %ebx, %ebx
leal 0x7(%rbx), %eax
cmpl %r15d, %eax
jge 0x265abf
vmovdqu (%r12), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26d220
vmovdqu %ymm0, (%r13)
addq $0x20, %r12
addq $0x20, %r13
addl $0x8, %ebx
addq $0x8, %r14
jmp 0x265a32
vmovdqu (%r12), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26d430
vmovdqu %xmm0, (%r13)
addq $0x10, %r12
addq $0x10, %r13
addl $0x4, %ebx
addq $0x4, %r14
leal 0x3(%rbx), %eax
cmpl %r15d, %eax
jl 0x265a7e
movq 0xa8(%rsp), %r13
movq 0x138(%rsp), %rax
imulq %r13, %rax
imulq 0xc8(%rsp), %rax
addq %rax, 0x60(%rsp)
movq 0xd0(%rsp), %rax
imulq %r13, %rax
imulq 0xa0(%rsp), %rax
addq %rax, 0x28(%rsp)
movq 0x18(%rsp), %rbx
movl 0xdc(%rsp), %r12d
cmpl %r15d, %r14d
vmovdqa 0xb0(%rsp), %xmm0
jge 0x265b41
movq 0x28(%rsp), %rax
vmovd (%rax,%r14,4), %xmm1
vzeroupper
callq 0x5f0e0
movq 0x60(%rsp), %rax
vmovd %xmm0, (%rax,%r14,4)
incq %r14
jmp 0x265b10
incq %r13
jmp 0x265957
movq 0x20(%rsp), %rdx
movl 0x2c(%rdx), %eax
movl 0x30(%rdx), %ecx
movl 0x34(%rdx), %r13d
movl 0x38(%rdx), %esi
movq %rsi, 0x60(%rsp)
movl 0x18(%rdx), %edi
movl 0x28(%rdx), %esi
cmpl $0x2, %esi
movq 0x18(%rsp), %r10
movq %r13, 0x80(%rsp)
jne 0x266ed2
cmpl $0x1, 0x38(%rsp)
jne 0x266ed2
movl %edi, %r8d
imull %eax, %r8d
xorl %r14d, %r14d
testl %ecx, %ecx
movl $0x0, %edx
cmovgl %ecx, %edx
movq %rdx, 0x40(%rsp)
cmpq 0x40(%rsp), %r14
je 0x266eca
vmovd (%r10,%r14,4), %xmm0
cmpl $0x4, %edi
jne 0x265bc5
movq %r14, %rsi
shlq $0x4, %rsi
vmovdqu (%r10,%rsi), %xmm1
jmp 0x265bde
vpshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x265bde
movq %r14, %rsi
shlq $0x5, %rsi
vmovups (%r10,%rsi), %ymm2
jmp 0x265be4
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
movq 0x20(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r14, %r13
movq %rsi, 0xb0(%rsp)
imulq %rsi, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %r12
imulq %r12, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r14, %r9
movq %rsi, 0x28(%rsp)
imulq %rsi, %r9
movq (%rdx), %rbx
movq 0x10(%rdx), %r10
imulq %r10, %r9
addq %rbx, %r9
xorl %r15d, %r15d
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x265c70
vmulps (%r13), %ymm2, %ymm3
vmovups %ymm3, (%r9)
addq $0x20, %r13
addq $0x20, %r9
addl $0x8, %esi
addq $0x8, %r15
jmp 0x265c32
vmulps (%r13), %xmm1, %xmm2
vmovups %xmm2, (%r9)
addq $0x10, %r13
addq $0x10, %r9
addl $0x4, %esi
addq $0x4, %r15
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x265c56
imulq %r14, %r10
imulq 0x28(%rsp), %r10
addq %r10, %rbx
imulq %r14, %r12
imulq 0xb0(%rsp), %r12
addq %r12, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r15d
jge 0x265cb3
vmulss (%r11,%r15,4), %xmm0, %xmm1
vmovss %xmm1, (%rbx,%r15,4)
incq %r15
jmp 0x265c9d
incq %r14
movq 0x18(%rsp), %r10
jmp 0x265ba0
movq 0x20(%rsp), %rdx
movl 0x2c(%rdx), %eax
movl 0x30(%rdx), %ecx
movl 0x34(%rdx), %r13d
movl 0x38(%rdx), %esi
movq %rsi, 0x60(%rsp)
movl 0x18(%rdx), %edi
movl 0x28(%rdx), %esi
cmpl $0x2, %esi
movq 0x18(%rsp), %r10
movq %r13, 0x80(%rsp)
jne 0x26743d
cmpl $0x1, 0x38(%rsp)
jne 0x26743d
movl %edi, %r8d
imull %eax, %r8d
xorl %r14d, %r14d
testl %ecx, %ecx
movl $0x0, %edx
cmovgl %ecx, %edx
movq %rdx, 0x40(%rsp)
vbroadcastss 0x188f68(%rip), %ymm0 # 0x3eec88
vbroadcastss 0x188f5f(%rip), %xmm1 # 0x3eec88
vmovss 0x188f57(%rip), %xmm2 # 0x3eec88
cmpq 0x40(%rsp), %r14
je 0x267435
vmovss (%r10,%r14,4), %xmm3
cmpl $0x4, %edi
jne 0x265d56
movq %r14, %rsi
shlq $0x4, %rsi
vmovups (%r10,%rsi), %xmm4
jmp 0x265d6f
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
cmpl $0x8, %edi
jne 0x265d6f
movq %r14, %rsi
shlq $0x5, %rsi
vmovups (%r10,%rsi), %ymm5
jmp 0x265d75
vinsertf128 $0x1, %xmm4, %ymm4, %ymm5
movq 0x20(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r14, %r13
movq %rsi, 0xb0(%rsp)
imulq %rsi, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %r15
imulq %r15, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r14, %r9
movq %rsi, 0x28(%rsp)
imulq %rsi, %r9
movq (%rdx), %rbx
movq 0x10(%rdx), %r10
imulq %r10, %r9
addq %rbx, %r9
vrcpps %ymm5, %ymm6
vfmsub213ps %ymm0, %ymm6, %ymm5 # ymm5 = (ymm6 * ymm5) - ymm0
vfnmadd132ps %ymm6, %ymm6, %ymm5 # ymm5 = -(ymm5 * ymm6) + ymm6
xorl %r12d, %r12d
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x265df5
vmulps (%r13), %ymm5, %ymm6
vmovups %ymm6, (%r9)
addq $0x20, %r13
addq $0x20, %r9
addl $0x8, %esi
addq $0x8, %r12
jmp 0x265dd1
vrcpps %xmm4, %xmm5
vfmsub213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) - xmm1
vfnmadd132ps %xmm5, %xmm5, %xmm4 # xmm4 = -(xmm4 * xmm5) + xmm5
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jge 0x265e27
vmulps (%r13), %xmm4, %xmm5
vmovups %xmm5, (%r9)
addq $0x10, %r13
addq $0x10, %r9
addl $0x4, %esi
addq $0x4, %r12
jmp 0x265e03
vdivss %xmm3, %xmm2, %xmm3
imulq %r14, %r10
imulq 0x28(%rsp), %r10
addq %r10, %rbx
imulq %r14, %r15
imulq 0xb0(%rsp), %r15
addq %r15, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r12d
jge 0x265e66
vmulss (%r11,%r12,4), %xmm3, %xmm4
vmovss %xmm4, (%rbx,%r12,4)
incq %r12
jmp 0x265e50
incq %r14
movq 0x18(%rsp), %r10
jmp 0x265d31
movq 0x20(%rsp), %rdx
movl 0x2c(%rdx), %eax
movl 0x30(%rdx), %ecx
movl 0x34(%rdx), %r13d
movl 0x38(%rdx), %esi
movq %rsi, 0x60(%rsp)
movl 0x18(%rdx), %edi
movl 0x28(%rdx), %esi
cmpl $0x2, %esi
movq 0x18(%rsp), %r10
movq %r13, 0x80(%rsp)
jne 0x267a96
cmpl $0x1, 0x38(%rsp)
jne 0x267a96
movl %edi, %r8d
imull %eax, %r8d
xorl %r14d, %r14d
testl %ecx, %ecx
movl $0x0, %edx
cmovgl %ecx, %edx
movq %rdx, 0x40(%rsp)
cmpq 0x40(%rsp), %r14
je 0x267a8e
vmovd (%r10,%r14,4), %xmm0
cmpl $0x4, %edi
jne 0x265eef
movq %r14, %rsi
shlq $0x4, %rsi
vmovdqu (%r10,%rsi), %xmm1
jmp 0x265f08
vpshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x265f08
movq %r14, %rsi
shlq $0x5, %rsi
vmovups (%r10,%rsi), %ymm2
jmp 0x265f0e
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
movq 0x20(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r14, %r13
movq %rsi, 0xb0(%rsp)
imulq %rsi, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %r12
imulq %r12, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r14, %r9
movq %rsi, 0x28(%rsp)
imulq %rsi, %r9
movq (%rdx), %rbx
movq 0x10(%rdx), %r10
imulq %r10, %r9
addq %rbx, %r9
xorl %r15d, %r15d
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x265f9a
vsubps (%r13), %ymm2, %ymm3
vmovups %ymm3, (%r9)
addq $0x20, %r13
addq $0x20, %r9
addl $0x8, %esi
addq $0x8, %r15
jmp 0x265f5c
vsubps (%r13), %xmm1, %xmm2
vmovups %xmm2, (%r9)
addq $0x10, %r13
addq $0x10, %r9
addl $0x4, %esi
addq $0x4, %r15
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x265f80
imulq %r14, %r10
imulq 0x28(%rsp), %r10
addq %r10, %rbx
imulq %r14, %r12
imulq 0xb0(%rsp), %r12
addq %r12, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r15d
jge 0x265fdd
vsubss (%r11,%r15,4), %xmm0, %xmm1
vmovss %xmm1, (%rbx,%r15,4)
incq %r15
jmp 0x265fc7
incq %r14
movq 0x18(%rsp), %r10
jmp 0x265eca
movq 0x20(%rsp), %rdx
movl 0x2c(%rdx), %eax
movl 0x30(%rdx), %ecx
movl 0x34(%rdx), %r13d
movl 0x38(%rdx), %esi
movq %rsi, 0x60(%rsp)
movl 0x18(%rdx), %edi
movl 0x28(%rdx), %esi
cmpl $0x2, %esi
movq 0x18(%rsp), %r10
movq %r13, 0x80(%rsp)
jne 0x26816b
cmpl $0x1, 0x38(%rsp)
jne 0x26816b
movl %edi, %r8d
imull %eax, %r8d
xorl %r15d, %r15d
testl %ecx, %ecx
movl $0x0, %edx
cmovgl %ecx, %edx
movq %rdx, 0x40(%rsp)
cmpq 0x40(%rsp), %r15
je 0x268163
vmovd (%r10,%r15,4), %xmm0
cmpl $0x4, %edi
jne 0x266066
movq %r15, %rsi
shlq $0x4, %rsi
vmovdqu (%r10,%rsi), %xmm1
jmp 0x26607f
vpshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x26607f
movq %r15, %rsi
shlq $0x5, %rsi
vmovups (%r10,%rsi), %ymm2
jmp 0x266085
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
movq 0x20(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r15, %r13
movq %rsi, 0xb0(%rsp)
imulq %rsi, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %r12
imulq %r12, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r15, %r9
movq %rsi, 0x28(%rsp)
imulq %rsi, %r9
movq (%rdx), %rbx
movq 0x10(%rdx), %r10
imulq %r10, %r9
addq %rbx, %r9
xorl %r14d, %r14d
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x266119
vmovups (%r13), %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmovups %ymm3, (%r9)
addq $0x20, %r13
addq $0x20, %r9
addl $0x8, %esi
addq $0x8, %r14
jmp 0x2660d3
vmovups (%r13), %xmm2
vsubps %xmm1, %xmm2, %xmm2
vmovups %xmm2, (%r9)
addq $0x10, %r13
addq $0x10, %r9
addl $0x4, %esi
addq $0x4, %r14
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x2660fb
imulq %r15, %r10
imulq 0x28(%rsp), %r10
addq %r10, %rbx
imulq %r15, %r12
imulq 0xb0(%rsp), %r12
addq %r12, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r14d
jge 0x266160
vmovss (%r11,%r14,4), %xmm1
vsubss %xmm0, %xmm1, %xmm1
vmovss %xmm1, (%rbx,%r14,4)
incq %r14
jmp 0x266146
incq %r15
movq 0x18(%rsp), %r10
jmp 0x266041
movq 0x20(%rsp), %rdx
movl 0x2c(%rdx), %eax
movl 0x30(%rdx), %ecx
movl 0x34(%rdx), %r13d
movl 0x38(%rdx), %esi
movq %rsi, 0x60(%rsp)
movl 0x18(%rdx), %edi
movl 0x28(%rdx), %esi
cmpl $0x2, %esi
movq 0x18(%rsp), %r10
movq %r13, 0x80(%rsp)
jne 0x268b07
cmpl $0x1, 0x38(%rsp)
jne 0x268b07
movl %edi, %r8d
imull %eax, %r8d
xorl %r14d, %r14d
testl %ecx, %ecx
movl $0x0, %edx
cmovgl %ecx, %edx
movq %rdx, 0x40(%rsp)
cmpq 0x40(%rsp), %r14
je 0x268aff
vmovd (%r10,%r14,4), %xmm0
cmpl $0x4, %edi
jne 0x2661e9
movq %r14, %rsi
shlq $0x4, %rsi
vmovdqu (%r10,%rsi), %xmm1
jmp 0x266202
vpshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x266202
movq %r14, %rsi
shlq $0x5, %rsi
vmovups (%r10,%rsi), %ymm2
jmp 0x266208
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
movq 0x20(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r14, %r13
movq %rsi, 0xb0(%rsp)
imulq %rsi, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %r12
imulq %r12, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r14, %r9
movq %rsi, 0x28(%rsp)
imulq %rsi, %r9
movq (%rdx), %rbx
movq 0x10(%rdx), %r10
imulq %r10, %r9
addq %rbx, %r9
xorl %r15d, %r15d
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x266294
vminps (%r13), %ymm2, %ymm3
vmovups %ymm3, (%r9)
addq $0x20, %r13
addq $0x20, %r9
addl $0x8, %esi
addq $0x8, %r15
jmp 0x266256
vminps (%r13), %xmm1, %xmm2
vmovups %xmm2, (%r9)
addq $0x10, %r13
addq $0x10, %r9
addl $0x4, %esi
addq $0x4, %r15
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x26627a
imulq %r14, %r10
imulq 0x28(%rsp), %r10
addq %r10, %rbx
imulq %r14, %r12
imulq 0xb0(%rsp), %r12
addq %r12, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r15d
jge 0x2662d7
vminss (%r11,%r15,4), %xmm0, %xmm1
vmovss %xmm1, (%rbx,%r15,4)
incq %r15
jmp 0x2662c1
incq %r14
movq 0x18(%rsp), %r10
jmp 0x2661c4
movq 0x20(%rsp), %rax
movl 0x2c(%rax), %ecx
movl %ecx, 0x80(%rsp)
movl 0x30(%rax), %ecx
movq %rcx, 0x40(%rsp)
movl 0x34(%rax), %ecx
movq %rcx, 0xf8(%rsp)
movl 0x38(%rax), %ecx
movq %rcx, 0xf0(%rsp)
movl 0x18(%rax), %r12d
movl 0x28(%rax), %eax
cmpl $0x2, %eax
movq 0x18(%rsp), %rbx
movl %r12d, 0xdc(%rsp)
jne 0x269b05
cmpl $0x1, 0x38(%rsp)
jne 0x269b05
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
xorl %r13d, %r13d
movq 0x40(%rsp), %rax
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movq %rcx, 0x130(%rsp)
cmpq 0x130(%rsp), %r13
je 0x269afd
vmovss (%rbx,%r13,4), %xmm0
cmpl $0x4, %r12d
vmovaps %xmm0, 0x60(%rsp)
jne 0x266396
movq %r13, %rax
shlq $0x4, %rax
vmovups (%rbx,%rax), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
jmp 0x2663b8
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x2663b8
movq %r13, %rax
shlq $0x5, %rax
vmovdqu (%rbx,%rax), %ymm0
jmp 0x2663be
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
movq 0x20(%rsp), %rax
movslq 0x2c(%rax), %rcx
movq %r13, %r12
movq %rcx, 0xa0(%rsp)
imulq %rcx, %r12
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0xd0(%rsp)
imulq %rax, %r12
movq %rcx, 0xb0(%rsp)
addq %rcx, %r12
movq 0x48(%rsp), %rax
movslq 0x2c(%rax), %rcx
movq %r13, 0xa8(%rsp)
movq %rcx, 0xc8(%rsp)
imulq %rcx, %r13
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0x138(%rsp)
imulq %rax, %r13
movq %rcx, 0x28(%rsp)
addq %rcx, %r13
vmovdqa %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
xorl %ebx, %ebx
leal 0x7(%rbx), %eax
cmpl %r15d, %eax
jge 0x2664c7
vmovdqu (%r12), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26ce36
vmovdqu %ymm0, (%r13)
addq $0x20, %r12
addq $0x20, %r13
addl $0x8, %ebx
addq $0x8, %r14
jmp 0x26643a
vmovdqu (%r12), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26d046
vmovdqu %xmm0, (%r13)
addq $0x10, %r12
addq $0x10, %r13
addl $0x4, %ebx
addq $0x4, %r14
leal 0x3(%rbx), %eax
cmpl %r15d, %eax
jl 0x266486
movq 0xa8(%rsp), %r13
movq 0x138(%rsp), %rax
imulq %r13, %rax
imulq 0xc8(%rsp), %rax
addq %rax, 0x28(%rsp)
movq 0xd0(%rsp), %rax
imulq %r13, %rax
imulq 0xa0(%rsp), %rax
addq %rax, 0xb0(%rsp)
movq 0x18(%rsp), %rbx
movl 0xdc(%rsp), %r12d
cmpl %r15d, %r14d
jge 0x26654c
movq 0xb0(%rsp), %rax
vmovd (%rax,%r14,4), %xmm0
vmovdqa 0x60(%rsp), %xmm1
vzeroupper
callq 0x5f0e0
movq 0x28(%rsp), %rax
vmovd %xmm0, (%rax,%r14,4)
incq %r14
jmp 0x26651b
incq %r13
jmp 0x26635f
movq 0x20(%rsp), %rdx
movl 0x2c(%rdx), %eax
movl 0x30(%rdx), %ecx
movl 0x34(%rdx), %r13d
movl 0x38(%rdx), %esi
movq %rsi, 0x60(%rsp)
movl 0x18(%rdx), %edi
movl 0x28(%rdx), %esi
cmpl $0x2, %esi
movq 0x18(%rsp), %r10
movq %r13, 0x80(%rsp)
jne 0x269072
cmpl $0x1, 0x38(%rsp)
jne 0x269072
movl %edi, %r8d
imull %eax, %r8d
xorl %r14d, %r14d
testl %ecx, %ecx
movl $0x0, %edx
cmovgl %ecx, %edx
movq %rdx, 0x40(%rsp)
cmpq 0x40(%rsp), %r14
je 0x26906a
vmovd (%r10,%r14,4), %xmm0
cmpl $0x4, %edi
jne 0x2665d0
movq %r14, %rsi
shlq $0x4, %rsi
vmovdqu (%r10,%rsi), %xmm1
jmp 0x2665e9
vpshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x2665e9
movq %r14, %rsi
shlq $0x5, %rsi
vmovups (%r10,%rsi), %ymm2
jmp 0x2665ef
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
movq 0x20(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r14, %r13
movq %rsi, 0xb0(%rsp)
imulq %rsi, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %r12
imulq %r12, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movq %r14, %r9
movq %rsi, 0x28(%rsp)
imulq %rsi, %r9
movq (%rdx), %rbx
movq 0x10(%rdx), %r10
imulq %r10, %r9
addq %rbx, %r9
xorl %r15d, %r15d
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x26669f
vmovups (%r13), %ymm3
vrcpps %ymm3, %ymm4
vmulps %ymm4, %ymm2, %ymm5
vfmsub213ps %ymm2, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) - ymm2
vfnmadd213ps %ymm5, %ymm4, %ymm3 # ymm3 = -(ymm4 * ymm3) + ymm5
vmovups %ymm3, (%r9)
addq $0x20, %r13
addq $0x20, %r9
addl $0x8, %esi
addq $0x8, %r15
jmp 0x26663d
vmovups (%r13), %xmm2
vrcpps %xmm2, %xmm3
vmulps %xmm3, %xmm1, %xmm4
vfmsub213ps %xmm1, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) - xmm1
vfnmadd213ps %xmm4, %xmm3, %xmm2 # xmm2 = -(xmm3 * xmm2) + xmm4
vmovups %xmm2, (%r9)
addq $0x10, %r13
addq $0x10, %r9
addl $0x4, %esi
addq $0x4, %r15
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x266673
imulq %r14, %r10
imulq 0x28(%rsp), %r10
addq %r10, %rbx
imulq %r14, %r12
imulq 0xb0(%rsp), %r12
addq %r12, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r15d
jge 0x2666e2
vdivss (%r11,%r15,4), %xmm0, %xmm1
vmovss %xmm1, (%rbx,%r15,4)
incq %r15
jmp 0x2666cc
incq %r14
movq 0x18(%rsp), %r10
jmp 0x2665ab
movq 0x20(%rsp), %rax
movl 0x2c(%rax), %ecx
movl %ecx, 0x80(%rsp)
movl 0x30(%rax), %ecx
movq %rcx, 0x40(%rsp)
movl 0x34(%rax), %ecx
movq %rcx, 0xf8(%rsp)
movl 0x38(%rax), %ecx
movq %rcx, 0xf0(%rsp)
movl 0x18(%rax), %r12d
movl 0x28(%rax), %eax
cmpl $0x2, %eax
movq 0x18(%rsp), %rbx
movl %r12d, 0xdc(%rsp)
jne 0x269d54
cmpl $0x1, 0x38(%rsp)
jne 0x269d54
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
xorl %r13d, %r13d
movq 0x40(%rsp), %rax
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movq %rcx, 0x130(%rsp)
cmpq 0x130(%rsp), %r13
je 0x269d4c
vmovss (%rbx,%r13,4), %xmm0
cmpl $0x4, %r12d
vmovaps %xmm0, 0x60(%rsp)
jne 0x2667a1
movq %r13, %rax
shlq $0x4, %rax
vmovups (%rbx,%rax), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
jmp 0x2667c3
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x2667c3
movq %r13, %rax
shlq $0x5, %rax
vmovdqu (%rbx,%rax), %ymm0
jmp 0x2667c9
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
movq 0x20(%rsp), %rax
movslq 0x2c(%rax), %rcx
movq %r13, %r12
movq %rcx, 0xa0(%rsp)
imulq %rcx, %r12
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0xd0(%rsp)
imulq %rax, %r12
movq %rcx, 0xb0(%rsp)
addq %rcx, %r12
movq 0x48(%rsp), %rax
movslq 0x2c(%rax), %rcx
movq %r13, 0xa8(%rsp)
movq %rcx, 0xc8(%rsp)
imulq %rcx, %r13
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0x138(%rsp)
imulq %rax, %r13
movq %rcx, 0x28(%rsp)
addq %rcx, %r13
vmovdqa %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
xorl %ebx, %ebx
leal 0x7(%rbx), %eax
cmpl %r15d, %eax
jge 0x2668d2
vmovdqu (%r12), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26d60a
vmovdqu %ymm0, (%r13)
addq $0x20, %r12
addq $0x20, %r13
addl $0x8, %ebx
addq $0x8, %r14
jmp 0x266845
vmovdqu (%r12), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26d794
vmovdqu %xmm0, (%r13)
addq $0x10, %r12
addq $0x10, %r13
addl $0x4, %ebx
addq $0x4, %r14
leal 0x3(%rbx), %eax
cmpl %r15d, %eax
jl 0x266891
movq 0xa8(%rsp), %r13
movq 0x138(%rsp), %rax
imulq %r13, %rax
imulq 0xc8(%rsp), %rax
addq %rax, 0x28(%rsp)
movq 0xd0(%rsp), %rax
imulq %r13, %rax
imulq 0xa0(%rsp), %rax
addq %rax, 0xb0(%rsp)
movq 0x18(%rsp), %rbx
movl 0xdc(%rsp), %r12d
cmpl %r15d, %r14d
jge 0x266957
movq 0xb0(%rsp), %rax
vmovd (%rax,%r14,4), %xmm0
vmovdqa 0x60(%rsp), %xmm1
vzeroupper
callq 0x5f170
movq 0x28(%rsp), %rax
vmovd %xmm0, (%rax,%r14,4)
incq %r14
jmp 0x266926
incq %r13
jmp 0x26676a
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
leal -0x3(%rsi), %edx
cmpl $0x1, %edx
ja 0x266ad1
cmpl $0x1, 0x38(%rsp)
jne 0x266ad1
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
imull %r13d, %r8d
xorl %r9d, %r9d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %esi
cmovgl %edx, %esi
movq %rsi, 0x40(%rsp)
cmpq 0x40(%rsp), %r9
je 0x266ac9
vmovd (%r10,%r9,4), %xmm0
cmpl $0x4, %edi
jne 0x2669c9
movq %r9, %rdx
shlq $0x4, %rdx
vmovdqu (%r10,%rdx), %xmm1
jmp 0x2669e2
vpshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x2669e2
movq %r9, %rdx
shlq $0x5, %rdx
vmovups (%r10,%rdx), %ymm2
jmp 0x2669e8
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
movq %r14, %r13
imulq %r9, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %rdx
movq %rdx, 0xb0(%rsp)
imulq %rdx, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r10
movq %r10, %rsi
imulq %r9, %rsi
movq (%rdx), %rbx
movq 0x10(%rdx), %rdx
movq %rdx, 0x28(%rsp)
imulq %rdx, %rsi
addq %rbx, %rsi
xorl %r15d, %r15d
xorl %r12d, %r12d
leal 0x7(%r12), %edx
cmpl %r8d, %edx
jge 0x266a77
vmaxps (%r13), %ymm2, %ymm3
vmovups %ymm3, (%rsi)
addq $0x20, %r13
addq $0x20, %rsi
addl $0x8, %r12d
addq $0x8, %r15
jmp 0x266a37
vmaxps (%r13), %xmm1, %xmm2
vmovups %xmm2, (%rsi)
addq $0x10, %r13
addq $0x10, %rsi
addl $0x4, %r12d
addq $0x4, %r15
leal 0x3(%r12), %edx
cmpl %r8d, %edx
jl 0x266a5d
imulq 0x28(%rsp), %r10
imulq %r9, %r10
addq %r10, %rbx
imulq 0xb0(%rsp), %r14
imulq %r9, %r14
addq %r14, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r15d
jge 0x266abc
vmaxss (%r11,%r15,4), %xmm0, %xmm1
vmovss %xmm1, (%rbx,%r15,4)
incq %r15
jmp 0x266aa6
incq %r9
movq 0x18(%rsp), %r10
jmp 0x2669a4
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x3, %esi
jne 0x266c17
cmpl $0x2, 0x38(%rsp)
jne 0x266c17
movl %edi, %r8d
imull %eax, %r8d
xorl %r9d, %r9d
testl %ecx, %ecx
movl $0x0, %r10d
cmovgl %ecx, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
cmpq %r11, %r9
je 0x266c0f
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x266bff
vmovss (%r15,%r13,4), %xmm0
cmpl $0x4, %edi
jne 0x266b7c
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x266b95
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x266b95
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x266b9b
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x266bd3
vmaxps (%r14), %ymm2, %ymm3
vmovups %ymm3, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x266b9d
vmaxps (%r14), %xmm1, %xmm2
vmovups %xmm2, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x266bbd
jmp 0x266bf2
vmaxss (%r14), %xmm0, %xmm1
vmovss %xmm1, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
cmpl %r8d, %esi
jl 0x266bdd
incq %r13
jmp 0x266b59
incq %r9
movq 0x80(%rsp), %r13
jmp 0x266b17
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x266d62
cmpl $0x2, 0x38(%rsp)
jne 0x266d62
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
xorl %r9d, %r9d
testl %r13d, %r13d
movl $0x0, %r10d
cmovgl %r13d, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
cmpq %r11, %r9
je 0x266d5a
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x266d4a
vmovss (%r15,%r13,4), %xmm0
cmpl $0x4, %edi
jne 0x266cc7
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x266ce0
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x266ce0
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x266ce6
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x266d1e
vmaxps (%r14), %ymm2, %ymm3
vmovups %ymm3, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x266ce8
vmaxps (%r14), %xmm1, %xmm2
vmovups %xmm2, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x266d08
jmp 0x266d3d
vmaxss (%r14), %xmm0, %xmm1
vmovss %xmm1, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
cmpl %r8d, %esi
jl 0x266d28
incq %r13
jmp 0x266ca4
incq %r9
movq 0x80(%rsp), %r13
jmp 0x266c62
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x26bab3
cmpl $0x3, 0x38(%rsp)
jne 0x26bab3
imull %edi, %eax
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
testl %r13d, %r13d
cmovlel %r8d, %r13d
movq 0x60(%rsp), %rdx
testl %edx, %edx
cmovlel %r8d, %edx
movq %rdx, 0x60(%rsp)
movq 0x98(%rsp), %rdx
movq 0x78(%rsp), %rsi
imulq %rsi, %rdx
movq %rdx, 0x98(%rsp)
movslq 0x34(%rsp), %r9
imulq %rsi, %r9
cmpq 0x60(%rsp), %r8
je 0x26bab3
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r10
imulq %r8, %r10
imulq 0x10(%rdx), %r10
addq (%rdx), %r10
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r11
imulq %r8, %r11
imulq 0x10(%rdx), %r11
addq (%rdx), %r11
movq 0x98(%rsp), %rbx
imulq %r8, %rbx
addq 0x18(%rsp), %rbx
xorl %r14d, %r14d
cmpq %r13, %r14
je 0x266ec2
movq %r9, %r15
imulq %r14, %r15
addq %rbx, %r15
xorl %r12d, %r12d
cmpq %rcx, %r12
je 0x266eba
vmovss (%r15,%r12,4), %xmm0
cmpl $0x4, %edi
jne 0x266e3d
movq %r12, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x266e56
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x266e56
movq %r12, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x266e5c
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %eax, %edx
jge 0x266e91
vmaxps (%r10), %ymm2, %ymm3
vmovups %ymm3, (%r11)
addq $0x20, %r10
addq $0x20, %r11
addl $0x8, %esi
jmp 0x266e5e
vmaxps (%r10), %xmm1, %xmm2
vmovups %xmm2, (%r11)
addq $0x10, %r10
addq $0x10, %r11
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %eax, %edx
jl 0x266e7c
jmp 0x266eae
vmaxss (%r10), %xmm0, %xmm1
vmovss %xmm1, (%r11)
addq $0x4, %r10
addq $0x4, %r11
incl %esi
cmpl %eax, %esi
jl 0x266e9a
incq %r12
jmp 0x266e1a
incq %r14
jmp 0x266e04
incq %r8
jmp 0x266dbb
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
leal -0x3(%rsi), %edx
cmpl $0x1, %edx
ja 0x26703c
cmpl $0x1, 0x38(%rsp)
jne 0x26703c
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
imull %r13d, %r8d
xorl %r9d, %r9d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %esi
cmovgl %edx, %esi
movq %rsi, 0x40(%rsp)
cmpq 0x40(%rsp), %r9
je 0x267034
vmovd (%r10,%r9,4), %xmm0
cmpl $0x4, %edi
jne 0x266f34
movq %r9, %rdx
shlq $0x4, %rdx
vmovdqu (%r10,%rdx), %xmm1
jmp 0x266f4d
vpshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x266f4d
movq %r9, %rdx
shlq $0x5, %rdx
vmovups (%r10,%rdx), %ymm2
jmp 0x266f53
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
movq %r14, %r13
imulq %r9, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %rdx
movq %rdx, 0xb0(%rsp)
imulq %rdx, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r10
movq %r10, %rsi
imulq %r9, %rsi
movq (%rdx), %rbx
movq 0x10(%rdx), %rdx
movq %rdx, 0x28(%rsp)
imulq %rdx, %rsi
addq %rbx, %rsi
xorl %r15d, %r15d
xorl %r12d, %r12d
leal 0x7(%r12), %edx
cmpl %r8d, %edx
jge 0x266fe2
vmulps (%r13), %ymm2, %ymm3
vmovups %ymm3, (%rsi)
addq $0x20, %r13
addq $0x20, %rsi
addl $0x8, %r12d
addq $0x8, %r15
jmp 0x266fa2
vmulps (%r13), %xmm1, %xmm2
vmovups %xmm2, (%rsi)
addq $0x10, %r13
addq $0x10, %rsi
addl $0x4, %r12d
addq $0x4, %r15
leal 0x3(%r12), %edx
cmpl %r8d, %edx
jl 0x266fc8
imulq 0x28(%rsp), %r10
imulq %r9, %r10
addq %r10, %rbx
imulq 0xb0(%rsp), %r14
imulq %r9, %r14
addq %r14, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r15d
jge 0x267027
vmulss (%r11,%r15,4), %xmm0, %xmm1
vmovss %xmm1, (%rbx,%r15,4)
incq %r15
jmp 0x267011
incq %r9
movq 0x18(%rsp), %r10
jmp 0x266f0f
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x3, %esi
jne 0x267182
cmpl $0x2, 0x38(%rsp)
jne 0x267182
movl %edi, %r8d
imull %eax, %r8d
xorl %r9d, %r9d
testl %ecx, %ecx
movl $0x0, %r10d
cmovgl %ecx, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
cmpq %r11, %r9
je 0x26717a
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x26716a
vmovss (%r15,%r13,4), %xmm0
cmpl $0x4, %edi
jne 0x2670e7
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x267100
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x267100
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x267106
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x26713e
vmulps (%r14), %ymm2, %ymm3
vmovups %ymm3, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x267108
vmulps (%r14), %xmm1, %xmm2
vmovups %xmm2, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x267128
jmp 0x26715d
vmulss (%r14), %xmm0, %xmm1
vmovss %xmm1, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
cmpl %r8d, %esi
jl 0x267148
incq %r13
jmp 0x2670c4
incq %r9
movq 0x80(%rsp), %r13
jmp 0x267082
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x2672cd
cmpl $0x2, 0x38(%rsp)
jne 0x2672cd
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
xorl %r9d, %r9d
testl %r13d, %r13d
movl $0x0, %r10d
cmovgl %r13d, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
cmpq %r11, %r9
je 0x2672c5
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x2672b5
vmovss (%r15,%r13,4), %xmm0
cmpl $0x4, %edi
jne 0x267232
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x26724b
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x26724b
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x267251
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x267289
vmulps (%r14), %ymm2, %ymm3
vmovups %ymm3, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x267253
vmulps (%r14), %xmm1, %xmm2
vmovups %xmm2, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x267273
jmp 0x2672a8
vmulss (%r14), %xmm0, %xmm1
vmovss %xmm1, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
cmpl %r8d, %esi
jl 0x267293
incq %r13
jmp 0x26720f
incq %r9
movq 0x80(%rsp), %r13
jmp 0x2671cd
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x26bab3
cmpl $0x3, 0x38(%rsp)
jne 0x26bab3
imull %edi, %eax
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
testl %r13d, %r13d
cmovlel %r8d, %r13d
movq 0x60(%rsp), %rdx
testl %edx, %edx
cmovlel %r8d, %edx
movq %rdx, 0x60(%rsp)
movq 0x98(%rsp), %rdx
movq 0x78(%rsp), %rsi
imulq %rsi, %rdx
movq %rdx, 0x98(%rsp)
movslq 0x34(%rsp), %r9
imulq %rsi, %r9
cmpq 0x60(%rsp), %r8
je 0x26bab3
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r10
imulq %r8, %r10
imulq 0x10(%rdx), %r10
addq (%rdx), %r10
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r11
imulq %r8, %r11
imulq 0x10(%rdx), %r11
addq (%rdx), %r11
movq 0x98(%rsp), %rbx
imulq %r8, %rbx
addq 0x18(%rsp), %rbx
xorl %r14d, %r14d
cmpq %r13, %r14
je 0x26742d
movq %r9, %r15
imulq %r14, %r15
addq %rbx, %r15
xorl %r12d, %r12d
cmpq %rcx, %r12
je 0x267425
vmovss (%r15,%r12,4), %xmm0
cmpl $0x4, %edi
jne 0x2673a8
movq %r12, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x2673c1
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x2673c1
movq %r12, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x2673c7
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %eax, %edx
jge 0x2673fc
vmulps (%r10), %ymm2, %ymm3
vmovups %ymm3, (%r11)
addq $0x20, %r10
addq $0x20, %r11
addl $0x8, %esi
jmp 0x2673c9
vmulps (%r10), %xmm1, %xmm2
vmovups %xmm2, (%r11)
addq $0x10, %r10
addq $0x10, %r11
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %eax, %edx
jl 0x2673e7
jmp 0x267419
vmulss (%r10), %xmm0, %xmm1
vmovss %xmm1, (%r11)
addq $0x4, %r10
addq $0x4, %r11
incl %esi
cmpl %eax, %esi
jl 0x267405
incq %r12
jmp 0x267385
incq %r14
jmp 0x26736f
incq %r8
jmp 0x267326
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
leal -0x3(%rsi), %edx
cmpl $0x1, %edx
ja 0x2675e1
cmpl $0x1, 0x38(%rsp)
jne 0x2675e1
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
imull %r13d, %r8d
xorl %r9d, %r9d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %esi
cmovgl %edx, %esi
movq %rsi, 0x40(%rsp)
vbroadcastss 0x187805(%rip), %ymm0 # 0x3eec88
vbroadcastss 0x1877fc(%rip), %xmm1 # 0x3eec88
vmovss 0x1877f4(%rip), %xmm2 # 0x3eec88
cmpq 0x40(%rsp), %r9
je 0x2675d9
vmovss (%r10,%r9,4), %xmm3
cmpl $0x4, %edi
jne 0x2674b9
movq %r9, %rdx
shlq $0x4, %rdx
vmovups (%r10,%rdx), %xmm4
jmp 0x2674d2
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
cmpl $0x8, %edi
jne 0x2674d2
movq %r9, %rdx
shlq $0x5, %rdx
vmovups (%r10,%rdx), %ymm5
jmp 0x2674d8
vinsertf128 $0x1, %xmm4, %ymm4, %ymm5
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
movq %r14, %r13
imulq %r9, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %rdx
movq %rdx, 0xb0(%rsp)
imulq %rdx, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
movq %r12, %rsi
imulq %r9, %rsi
movq (%rdx), %rbx
movq 0x10(%rdx), %rdx
movq %rdx, 0x28(%rsp)
imulq %rdx, %rsi
addq %rbx, %rsi
vrcpps %ymm5, %ymm6
vfmsub213ps %ymm0, %ymm6, %ymm5 # ymm5 = (ymm6 * ymm5) - ymm0
vfnmadd132ps %ymm6, %ymm6, %ymm5 # ymm5 = -(ymm5 * ymm6) + ymm6
xorl %r15d, %r15d
xorl %r10d, %r10d
leal 0x7(%r10), %edx
cmpl %r8d, %edx
jge 0x26755a
vmulps (%r13), %ymm5, %ymm6
vmovups %ymm6, (%rsi)
addq $0x20, %r13
addq $0x20, %rsi
addl $0x8, %r10d
addq $0x8, %r15
jmp 0x267535
vrcpps %xmm4, %xmm5
vfmsub213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) - xmm1
vfnmadd132ps %xmm5, %xmm5, %xmm4 # xmm4 = -(xmm4 * xmm5) + xmm5
leal 0x3(%r10), %edx
cmpl %r8d, %edx
jge 0x26758d
vmulps (%r13), %xmm4, %xmm5
vmovups %xmm5, (%rsi)
addq $0x10, %r13
addq $0x10, %rsi
addl $0x4, %r10d
addq $0x4, %r15
jmp 0x267568
vdivss %xmm3, %xmm2, %xmm3
imulq 0x28(%rsp), %r12
imulq %r9, %r12
addq %r12, %rbx
imulq 0xb0(%rsp), %r14
imulq %r9, %r14
addq %r14, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r15d
jge 0x2675cc
vmulss (%r11,%r15,4), %xmm3, %xmm4
vmovss %xmm4, (%rbx,%r15,4)
incq %r15
jmp 0x2675b6
incq %r9
movq 0x18(%rsp), %r10
jmp 0x267494
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x3, %esi
jne 0x267763
cmpl $0x2, 0x38(%rsp)
jne 0x267763
movl %edi, %r8d
imull %eax, %r8d
xorl %r9d, %r9d
testl %ecx, %ecx
movl $0x0, %r10d
cmovgl %ecx, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
vbroadcastss 0x187658(%rip), %ymm0 # 0x3eec88
vbroadcastss 0x18764f(%rip), %xmm1 # 0x3eec88
vmovss 0x187647(%rip), %xmm2 # 0x3eec88
cmpq %r11, %r9
je 0x26775b
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x26774b
vmovss (%r15,%r13,4), %xmm3
cmpl $0x4, %edi
jne 0x2676a6
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm4
jmp 0x2676bf
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
cmpl $0x8, %edi
jne 0x2676bf
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm5
jmp 0x2676c5
vinsertf128 $0x1, %xmm4, %ymm4, %ymm5
vrcpps %ymm5, %ymm6
vfmsub213ps %ymm0, %ymm6, %ymm5 # ymm5 = (ymm6 * ymm5) - ymm0
vfnmadd132ps %ymm6, %ymm6, %ymm5 # ymm5 = -(ymm5 * ymm6) + ymm6
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x2676f5
vmulps (%r14), %ymm5, %ymm6
vmovups %ymm6, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x2676d5
vrcpps %xmm4, %xmm5
vfmsub213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) - xmm1
vfnmadd132ps %xmm5, %xmm5, %xmm4 # xmm4 = -(xmm4 * xmm5) + xmm5
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jge 0x267723
vmulps (%r14), %xmm4, %xmm5
vmovups %xmm5, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
jmp 0x267703
vdivss %xmm3, %xmm2, %xmm3
cmpl %r8d, %esi
jge 0x267743
vmulss (%r14), %xmm3, %xmm4
vmovss %xmm4, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
jmp 0x267727
incq %r13
jmp 0x267683
incq %r9
movq 0x80(%rsp), %r13
jmp 0x267641
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x2678ea
cmpl $0x2, 0x38(%rsp)
jne 0x2678ea
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
xorl %r9d, %r9d
testl %r13d, %r13d
movl $0x0, %r10d
cmovgl %r13d, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
vbroadcastss 0x1874d1(%rip), %ymm0 # 0x3eec88
vbroadcastss 0x1874c8(%rip), %xmm1 # 0x3eec88
vmovss 0x1874c0(%rip), %xmm2 # 0x3eec88
cmpq %r11, %r9
je 0x2678e2
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x2678d2
vmovss (%r15,%r13,4), %xmm3
cmpl $0x4, %edi
jne 0x26782d
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm4
jmp 0x267846
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
cmpl $0x8, %edi
jne 0x267846
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm5
jmp 0x26784c
vinsertf128 $0x1, %xmm4, %ymm4, %ymm5
vrcpps %ymm5, %ymm6
vfmsub213ps %ymm0, %ymm6, %ymm5 # ymm5 = (ymm6 * ymm5) - ymm0
vfnmadd132ps %ymm6, %ymm6, %ymm5 # ymm5 = -(ymm5 * ymm6) + ymm6
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x26787c
vmulps (%r14), %ymm5, %ymm6
vmovups %ymm6, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x26785c
vrcpps %xmm4, %xmm5
vfmsub213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) - xmm1
vfnmadd132ps %xmm5, %xmm5, %xmm4 # xmm4 = -(xmm4 * xmm5) + xmm5
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jge 0x2678aa
vmulps (%r14), %xmm4, %xmm5
vmovups %xmm5, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
jmp 0x26788a
vdivss %xmm3, %xmm2, %xmm3
cmpl %r8d, %esi
jge 0x2678ca
vmulss (%r14), %xmm3, %xmm4
vmovss %xmm4, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
jmp 0x2678ae
incq %r13
jmp 0x26780a
incq %r9
movq 0x80(%rsp), %r13
jmp 0x2677c8
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x26bab3
cmpl $0x3, 0x38(%rsp)
jne 0x26bab3
imull %edi, %eax
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
testl %r13d, %r13d
cmovlel %r8d, %r13d
movq 0x60(%rsp), %rdx
testl %edx, %edx
cmovlel %r8d, %edx
movq %rdx, 0x60(%rsp)
movq 0x98(%rsp), %rdx
movq 0x78(%rsp), %rsi
imulq %rsi, %rdx
movq %rdx, 0x98(%rsp)
movslq 0x34(%rsp), %r9
imulq %rsi, %r9
vbroadcastss 0x18733c(%rip), %ymm0 # 0x3eec88
vbroadcastss 0x187333(%rip), %xmm1 # 0x3eec88
vmovss 0x18732b(%rip), %xmm2 # 0x3eec88
cmpq 0x60(%rsp), %r8
je 0x26bab3
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r10
imulq %r8, %r10
imulq 0x10(%rdx), %r10
addq (%rdx), %r10
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r11
imulq %r8, %r11
imulq 0x10(%rdx), %r11
addq (%rdx), %r11
movq 0x98(%rsp), %rbx
imulq %r8, %rbx
addq 0x18(%rsp), %rbx
xorl %r14d, %r14d
cmpq %r13, %r14
je 0x267a86
movq %r9, %r15
imulq %r14, %r15
addq %rbx, %r15
xorl %r12d, %r12d
cmpq %rcx, %r12
je 0x267a7e
vmovss (%r15,%r12,4), %xmm3
cmpl $0x4, %edi
jne 0x2679df
movq %r12, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm4
jmp 0x2679f8
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
cmpl $0x8, %edi
jne 0x2679f8
movq %r12, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm5
jmp 0x2679fe
vinsertf128 $0x1, %xmm4, %ymm4, %ymm5
vrcpps %ymm5, %ymm6
vfmsub213ps %ymm0, %ymm6, %ymm5 # ymm5 = (ymm6 * ymm5) - ymm0
vfnmadd132ps %ymm6, %ymm6, %ymm5 # ymm5 = -(ymm5 * ymm6) + ymm6
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %eax, %edx
jge 0x267a2c
vmulps (%r10), %ymm5, %ymm6
vmovups %ymm6, (%r11)
addq $0x20, %r10
addq $0x20, %r11
addl $0x8, %esi
jmp 0x267a0e
vrcpps %xmm4, %xmm5
vfmsub213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) - xmm1
vfnmadd132ps %xmm5, %xmm5, %xmm4 # xmm4 = -(xmm4 * xmm5) + xmm5
leal 0x3(%rsi), %edx
cmpl %eax, %edx
jge 0x267a58
vmulps (%r10), %xmm4, %xmm5
vmovups %xmm5, (%r11)
addq $0x10, %r10
addq $0x10, %r11
addl $0x4, %esi
jmp 0x267a3a
vdivss %xmm3, %xmm2, %xmm3
cmpl %eax, %esi
jge 0x267a76
vmulss (%r10), %xmm3, %xmm4
vmovss %xmm4, (%r11)
addq $0x4, %r10
addq $0x4, %r11
incl %esi
jmp 0x267a5c
incq %r12
jmp 0x2679bc
incq %r14
jmp 0x2679a6
incq %r8
jmp 0x26795d
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
leal -0x3(%rsi), %edx
cmpl $0x1, %edx
ja 0x267c00
cmpl $0x1, 0x38(%rsp)
jne 0x267c00
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
imull %r13d, %r8d
xorl %r9d, %r9d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %esi
cmovgl %edx, %esi
movq %rsi, 0x40(%rsp)
cmpq 0x40(%rsp), %r9
je 0x267bf8
vmovd (%r10,%r9,4), %xmm0
cmpl $0x4, %edi
jne 0x267af8
movq %r9, %rdx
shlq $0x4, %rdx
vmovdqu (%r10,%rdx), %xmm1
jmp 0x267b11
vpshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x267b11
movq %r9, %rdx
shlq $0x5, %rdx
vmovups (%r10,%rdx), %ymm2
jmp 0x267b17
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
movq %r14, %r13
imulq %r9, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %rdx
movq %rdx, 0xb0(%rsp)
imulq %rdx, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r10
movq %r10, %rsi
imulq %r9, %rsi
movq (%rdx), %rbx
movq 0x10(%rdx), %rdx
movq %rdx, 0x28(%rsp)
imulq %rdx, %rsi
addq %rbx, %rsi
xorl %r15d, %r15d
xorl %r12d, %r12d
leal 0x7(%r12), %edx
cmpl %r8d, %edx
jge 0x267ba6
vsubps (%r13), %ymm2, %ymm3
vmovups %ymm3, (%rsi)
addq $0x20, %r13
addq $0x20, %rsi
addl $0x8, %r12d
addq $0x8, %r15
jmp 0x267b66
vsubps (%r13), %xmm1, %xmm2
vmovups %xmm2, (%rsi)
addq $0x10, %r13
addq $0x10, %rsi
addl $0x4, %r12d
addq $0x4, %r15
leal 0x3(%r12), %edx
cmpl %r8d, %edx
jl 0x267b8c
imulq 0x28(%rsp), %r10
imulq %r9, %r10
addq %r10, %rbx
imulq 0xb0(%rsp), %r14
imulq %r9, %r14
addq %r14, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r15d
jge 0x267beb
vsubss (%r11,%r15,4), %xmm0, %xmm1
vmovss %xmm1, (%rbx,%r15,4)
incq %r15
jmp 0x267bd5
incq %r9
movq 0x18(%rsp), %r10
jmp 0x267ad3
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x3, %esi
jne 0x267d46
cmpl $0x2, 0x38(%rsp)
jne 0x267d46
movl %edi, %r8d
imull %eax, %r8d
xorl %r9d, %r9d
testl %ecx, %ecx
movl $0x0, %r10d
cmovgl %ecx, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
cmpq %r11, %r9
je 0x267d3e
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x267d2e
vmovss (%r15,%r13,4), %xmm0
cmpl $0x4, %edi
jne 0x267cab
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x267cc4
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x267cc4
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x267cca
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x267d02
vsubps (%r14), %ymm2, %ymm3
vmovups %ymm3, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x267ccc
vsubps (%r14), %xmm1, %xmm2
vmovups %xmm2, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x267cec
jmp 0x267d21
vsubss (%r14), %xmm0, %xmm1
vmovss %xmm1, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
cmpl %r8d, %esi
jl 0x267d0c
incq %r13
jmp 0x267c88
incq %r9
movq 0x80(%rsp), %r13
jmp 0x267c46
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x267e91
cmpl $0x2, 0x38(%rsp)
jne 0x267e91
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
xorl %r9d, %r9d
testl %r13d, %r13d
movl $0x0, %r10d
cmovgl %r13d, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
cmpq %r11, %r9
je 0x267e89
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x267e79
vmovss (%r15,%r13,4), %xmm0
cmpl $0x4, %edi
jne 0x267df6
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x267e0f
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x267e0f
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x267e15
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x267e4d
vsubps (%r14), %ymm2, %ymm3
vmovups %ymm3, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x267e17
vsubps (%r14), %xmm1, %xmm2
vmovups %xmm2, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x267e37
jmp 0x267e6c
vsubss (%r14), %xmm0, %xmm1
vmovss %xmm1, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
cmpl %r8d, %esi
jl 0x267e57
incq %r13
jmp 0x267dd3
incq %r9
movq 0x80(%rsp), %r13
jmp 0x267d91
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x26bab3
cmpl $0x3, 0x38(%rsp)
jne 0x26bab3
imull %edi, %eax
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
testl %r13d, %r13d
cmovlel %r8d, %r13d
movq 0x60(%rsp), %rdx
testl %edx, %edx
cmovlel %r8d, %edx
movq %rdx, 0x60(%rsp)
movq 0x98(%rsp), %rdx
movq 0x78(%rsp), %rsi
imulq %rsi, %rdx
movq %rdx, 0x98(%rsp)
movslq 0x34(%rsp), %r9
imulq %rsi, %r9
cmpq 0x60(%rsp), %r8
je 0x26bab3
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r10
imulq %r8, %r10
imulq 0x10(%rdx), %r10
addq (%rdx), %r10
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r11
imulq %r8, %r11
imulq 0x10(%rdx), %r11
addq (%rdx), %r11
movq 0x98(%rsp), %rbx
imulq %r8, %rbx
addq 0x18(%rsp), %rbx
xorl %r14d, %r14d
cmpq %r13, %r14
je 0x267ff1
movq %r9, %r15
imulq %r14, %r15
addq %rbx, %r15
xorl %r12d, %r12d
cmpq %rcx, %r12
je 0x267fe9
vmovss (%r15,%r12,4), %xmm0
cmpl $0x4, %edi
jne 0x267f6c
movq %r12, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x267f85
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x267f85
movq %r12, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x267f8b
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %eax, %edx
jge 0x267fc0
vsubps (%r10), %ymm2, %ymm3
vmovups %ymm3, (%r11)
addq $0x20, %r10
addq $0x20, %r11
addl $0x8, %esi
jmp 0x267f8d
vsubps (%r10), %xmm1, %xmm2
vmovups %xmm2, (%r11)
addq $0x10, %r10
addq $0x10, %r11
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %eax, %edx
jl 0x267fab
jmp 0x267fdd
vsubss (%r10), %xmm0, %xmm1
vmovss %xmm1, (%r11)
addq $0x4, %r10
addq $0x4, %r11
incl %esi
cmpl %eax, %esi
jl 0x267fc9
incq %r12
jmp 0x267f49
incq %r14
jmp 0x267f33
incq %r8
jmp 0x267eea
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
leal -0x3(%rsi), %edx
cmpl $0x1, %edx
ja 0x2682e1
cmpl $0x1, 0x38(%rsp)
jne 0x2682e1
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
imull %r13d, %r8d
xorl %r9d, %r9d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %esi
cmovgl %edx, %esi
movq %rsi, 0x40(%rsp)
cmpq 0x40(%rsp), %r9
je 0x2682d9
vmovd (%r10,%r9,4), %xmm0
cmpl $0x4, %edi
jne 0x268063
movq %r9, %rdx
shlq $0x4, %rdx
vmovdqu (%r10,%rdx), %xmm1
jmp 0x26807c
vpshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x26807c
movq %r9, %rdx
shlq $0x5, %rdx
vmovups (%r10,%rdx), %ymm2
jmp 0x268082
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
movq %r14, %r13
imulq %r9, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %rdx
movq %rdx, 0xb0(%rsp)
imulq %rdx, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r10
movq %r10, %rsi
imulq %r9, %rsi
movq (%rdx), %rbx
movq 0x10(%rdx), %rdx
movq %rdx, 0x28(%rsp)
imulq %rdx, %rsi
addq %rbx, %rsi
xorl %r15d, %r15d
xorl %r12d, %r12d
leal 0x7(%r12), %edx
cmpl %r8d, %edx
jge 0x268111
vaddps (%r13), %ymm2, %ymm3
vmovups %ymm3, (%rsi)
addq $0x20, %r13
addq $0x20, %rsi
addl $0x8, %r12d
addq $0x8, %r15
jmp 0x2680d1
vaddps (%r13), %xmm1, %xmm2
vmovups %xmm2, (%rsi)
addq $0x10, %r13
addq $0x10, %rsi
addl $0x4, %r12d
addq $0x4, %r15
leal 0x3(%r12), %edx
cmpl %r8d, %edx
jl 0x2680f7
imulq 0x28(%rsp), %r10
imulq %r9, %r10
addq %r10, %rbx
imulq 0xb0(%rsp), %r14
imulq %r9, %r14
addq %r14, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r15d
jge 0x268156
vaddss (%r11,%r15,4), %xmm0, %xmm1
vmovss %xmm1, (%rbx,%r15,4)
incq %r15
jmp 0x268140
incq %r9
movq 0x18(%rsp), %r10
jmp 0x26803e
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
leal -0x3(%rsi), %edx
cmpl $0x1, %edx
ja 0x268427
cmpl $0x1, 0x38(%rsp)
jne 0x268427
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
imull %r13d, %r8d
xorl %r9d, %r9d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %esi
cmovgl %edx, %esi
movq %rsi, 0x40(%rsp)
cmpq 0x40(%rsp), %r9
je 0x26841f
vmovd (%r10,%r9,4), %xmm0
cmpl $0x4, %edi
jne 0x2681cd
movq %r9, %rdx
shlq $0x4, %rdx
vmovdqu (%r10,%rdx), %xmm1
jmp 0x2681e6
vpshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x2681e6
movq %r9, %rdx
shlq $0x5, %rdx
vmovups (%r10,%rdx), %ymm2
jmp 0x2681ec
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
movq %r14, %r13
imulq %r9, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %rdx
movq %rdx, 0xb0(%rsp)
imulq %rdx, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r10
movq %r10, %rsi
imulq %r9, %rsi
movq (%rdx), %rbx
movq 0x10(%rdx), %rdx
movq %rdx, 0x28(%rsp)
imulq %rdx, %rsi
addq %rbx, %rsi
xorl %r15d, %r15d
xorl %r12d, %r12d
leal 0x7(%r12), %edx
cmpl %r8d, %edx
jge 0x268283
vmovups (%r13), %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmovups %ymm3, (%rsi)
addq $0x20, %r13
addq $0x20, %rsi
addl $0x8, %r12d
addq $0x8, %r15
jmp 0x26823b
vmovups (%r13), %xmm2
vsubps %xmm1, %xmm2, %xmm2
vmovups %xmm2, (%rsi)
addq $0x10, %r13
addq $0x10, %rsi
addl $0x4, %r12d
addq $0x4, %r15
leal 0x3(%r12), %edx
cmpl %r8d, %edx
jl 0x268265
imulq 0x28(%rsp), %r10
imulq %r9, %r10
addq %r10, %rbx
imulq 0xb0(%rsp), %r14
imulq %r9, %r14
addq %r14, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r15d
jge 0x2682cc
vmovss (%r11,%r15,4), %xmm1
vsubss %xmm0, %xmm1, %xmm1
vmovss %xmm1, (%rbx,%r15,4)
incq %r15
jmp 0x2682b2
incq %r9
movq 0x18(%rsp), %r10
jmp 0x2681a8
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x3, %esi
jne 0x268579
cmpl $0x2, 0x38(%rsp)
jne 0x268579
movl %edi, %r8d
imull %eax, %r8d
xorl %r9d, %r9d
testl %ecx, %ecx
movl $0x0, %r10d
cmovgl %ecx, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
cmpq %r11, %r9
je 0x268571
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x26840f
vmovss (%r15,%r13,4), %xmm0
cmpl $0x4, %edi
jne 0x26838c
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x2683a5
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x2683a5
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x2683ab
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x2683e3
vaddps (%r14), %ymm2, %ymm3
vmovups %ymm3, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x2683ad
vaddps (%r14), %xmm1, %xmm2
vmovups %xmm2, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x2683cd
jmp 0x268402
vaddss (%r14), %xmm0, %xmm1
vmovss %xmm1, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
cmpl %r8d, %esi
jl 0x2683ed
incq %r13
jmp 0x268369
incq %r9
movq 0x80(%rsp), %r13
jmp 0x268327
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x3, %esi
jne 0x2686c4
cmpl $0x2, 0x38(%rsp)
jne 0x2686c4
movl %edi, %r8d
imull %eax, %r8d
xorl %r9d, %r9d
testl %ecx, %ecx
movl $0x0, %r10d
cmovgl %ecx, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
cmpq %r11, %r9
je 0x2686bc
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x268561
vmovss (%r15,%r13,4), %xmm0
cmpl $0x4, %edi
jne 0x2684d2
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x2684eb
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x2684eb
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x2684f1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x268531
vmovups (%r14), %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmovups %ymm3, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x2684f3
vmovups (%r14), %xmm2
vsubps %xmm1, %xmm2, %xmm2
vmovups %xmm2, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x268517
jmp 0x268554
vmovss (%r14), %xmm1
vsubss %xmm0, %xmm1, %xmm1
vmovss %xmm1, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
cmpl %r8d, %esi
jl 0x26853b
incq %r13
jmp 0x2684af
incq %r9
movq 0x80(%rsp), %r13
jmp 0x26846d
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x26881b
cmpl $0x2, 0x38(%rsp)
jne 0x26881b
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
xorl %r9d, %r9d
testl %r13d, %r13d
movl $0x0, %r10d
cmovgl %r13d, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
cmpq %r11, %r9
je 0x268813
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x2686ac
vmovss (%r15,%r13,4), %xmm0
cmpl $0x4, %edi
jne 0x268629
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x268642
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x268642
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x268648
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x268680
vaddps (%r14), %ymm2, %ymm3
vmovups %ymm3, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x26864a
vaddps (%r14), %xmm1, %xmm2
vmovups %xmm2, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x26866a
jmp 0x26869f
vaddss (%r14), %xmm0, %xmm1
vmovss %xmm1, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
cmpl %r8d, %esi
jl 0x26868a
incq %r13
jmp 0x268606
incq %r9
movq 0x80(%rsp), %r13
jmp 0x2685c4
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x26898b
cmpl $0x2, 0x38(%rsp)
jne 0x26898b
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
xorl %r9d, %r9d
testl %r13d, %r13d
movl $0x0, %r10d
cmovgl %r13d, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
cmpq %r11, %r9
je 0x268983
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x268803
vmovss (%r15,%r13,4), %xmm0
cmpl $0x4, %edi
jne 0x268774
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x26878d
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x26878d
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x268793
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x2687d3
vmovups (%r14), %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmovups %ymm3, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x268795
vmovups (%r14), %xmm2
vsubps %xmm1, %xmm2, %xmm2
vmovups %xmm2, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x2687b9
jmp 0x2687f6
vmovss (%r14), %xmm1
vsubss %xmm0, %xmm1, %xmm1
vmovss %xmm1, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
cmpl %r8d, %esi
jl 0x2687dd
incq %r13
jmp 0x268751
incq %r9
movq 0x80(%rsp), %r13
jmp 0x26870f
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x26bab3
cmpl $0x3, 0x38(%rsp)
jne 0x26bab3
imull %edi, %eax
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
testl %r13d, %r13d
cmovlel %r8d, %r13d
movq 0x60(%rsp), %rdx
testl %edx, %edx
cmovlel %r8d, %edx
movq %rdx, 0x60(%rsp)
movq 0x98(%rsp), %rdx
movq 0x78(%rsp), %rsi
imulq %rsi, %rdx
movq %rdx, 0x98(%rsp)
movslq 0x34(%rsp), %r9
imulq %rsi, %r9
cmpq 0x60(%rsp), %r8
je 0x26bab3
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r10
imulq %r8, %r10
imulq 0x10(%rdx), %r10
addq (%rdx), %r10
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r11
imulq %r8, %r11
imulq 0x10(%rdx), %r11
addq (%rdx), %r11
movq 0x98(%rsp), %rbx
imulq %r8, %rbx
addq 0x18(%rsp), %rbx
xorl %r14d, %r14d
cmpq %r13, %r14
je 0x26897b
movq %r9, %r15
imulq %r14, %r15
addq %rbx, %r15
xorl %r12d, %r12d
cmpq %rcx, %r12
je 0x268973
vmovss (%r15,%r12,4), %xmm0
cmpl $0x4, %edi
jne 0x2688f6
movq %r12, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x26890f
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x26890f
movq %r12, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x268915
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %eax, %edx
jge 0x26894a
vaddps (%r10), %ymm2, %ymm3
vmovups %ymm3, (%r11)
addq $0x20, %r10
addq $0x20, %r11
addl $0x8, %esi
jmp 0x268917
vaddps (%r10), %xmm1, %xmm2
vmovups %xmm2, (%r11)
addq $0x10, %r10
addq $0x10, %r11
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %eax, %edx
jl 0x268935
jmp 0x268967
vaddss (%r10), %xmm0, %xmm1
vmovss %xmm1, (%r11)
addq $0x4, %r10
addq $0x4, %r11
incl %esi
cmpl %eax, %esi
jl 0x268953
incq %r12
jmp 0x2688d3
incq %r14
jmp 0x2688bd
incq %r8
jmp 0x268874
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x26bab3
cmpl $0x3, 0x38(%rsp)
jne 0x26bab3
imull %edi, %eax
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
testl %r13d, %r13d
cmovlel %r8d, %r13d
movq 0x60(%rsp), %rdx
testl %edx, %edx
cmovlel %r8d, %edx
movq %rdx, 0x60(%rsp)
movq 0x98(%rsp), %rdx
movq 0x78(%rsp), %rsi
imulq %rsi, %rdx
movq %rdx, 0x98(%rsp)
movslq 0x34(%rsp), %r9
imulq %rsi, %r9
cmpq 0x60(%rsp), %r8
je 0x26bab3
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r10
imulq %r8, %r10
imulq 0x10(%rdx), %r10
addq (%rdx), %r10
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r11
imulq %r8, %r11
imulq 0x10(%rdx), %r11
addq (%rdx), %r11
movq 0x98(%rsp), %rbx
imulq %r8, %rbx
addq 0x18(%rsp), %rbx
xorl %r14d, %r14d
cmpq %r13, %r14
je 0x268af7
movq %r9, %r15
imulq %r14, %r15
addq %rbx, %r15
xorl %r12d, %r12d
cmpq %rcx, %r12
je 0x268aef
vmovss (%r15,%r12,4), %xmm0
cmpl $0x4, %edi
jne 0x268a66
movq %r12, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x268a7f
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x268a7f
movq %r12, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x268a85
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %eax, %edx
jge 0x268ac2
vmovups (%r10), %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmovups %ymm3, (%r11)
addq $0x20, %r10
addq $0x20, %r11
addl $0x8, %esi
jmp 0x268a87
vmovups (%r10), %xmm2
vsubps %xmm1, %xmm2, %xmm2
vmovups %xmm2, (%r11)
addq $0x10, %r10
addq $0x10, %r11
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %eax, %edx
jl 0x268aa9
jmp 0x268ae3
vmovss (%r10), %xmm1
vsubss %xmm0, %xmm1, %xmm1
vmovss %xmm1, (%r11)
addq $0x4, %r10
addq $0x4, %r11
incl %esi
cmpl %eax, %esi
jl 0x268acb
incq %r12
jmp 0x268a43
incq %r14
jmp 0x268a2d
incq %r8
jmp 0x2689e4
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
leal -0x3(%rsi), %edx
cmpl $0x1, %edx
ja 0x268c71
cmpl $0x1, 0x38(%rsp)
jne 0x268c71
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
imull %r13d, %r8d
xorl %r9d, %r9d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %esi
cmovgl %edx, %esi
movq %rsi, 0x40(%rsp)
cmpq 0x40(%rsp), %r9
je 0x268c69
vmovd (%r10,%r9,4), %xmm0
cmpl $0x4, %edi
jne 0x268b69
movq %r9, %rdx
shlq $0x4, %rdx
vmovdqu (%r10,%rdx), %xmm1
jmp 0x268b82
vpshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x268b82
movq %r9, %rdx
shlq $0x5, %rdx
vmovups (%r10,%rdx), %ymm2
jmp 0x268b88
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
movq %r14, %r13
imulq %r9, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %rdx
movq %rdx, 0xb0(%rsp)
imulq %rdx, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r10
movq %r10, %rsi
imulq %r9, %rsi
movq (%rdx), %rbx
movq 0x10(%rdx), %rdx
movq %rdx, 0x28(%rsp)
imulq %rdx, %rsi
addq %rbx, %rsi
xorl %r15d, %r15d
xorl %r12d, %r12d
leal 0x7(%r12), %edx
cmpl %r8d, %edx
jge 0x268c17
vminps (%r13), %ymm2, %ymm3
vmovups %ymm3, (%rsi)
addq $0x20, %r13
addq $0x20, %rsi
addl $0x8, %r12d
addq $0x8, %r15
jmp 0x268bd7
vminps (%r13), %xmm1, %xmm2
vmovups %xmm2, (%rsi)
addq $0x10, %r13
addq $0x10, %rsi
addl $0x4, %r12d
addq $0x4, %r15
leal 0x3(%r12), %edx
cmpl %r8d, %edx
jl 0x268bfd
imulq 0x28(%rsp), %r10
imulq %r9, %r10
addq %r10, %rbx
imulq 0xb0(%rsp), %r14
imulq %r9, %r14
addq %r14, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r15d
jge 0x268c5c
vminss (%r11,%r15,4), %xmm0, %xmm1
vmovss %xmm1, (%rbx,%r15,4)
incq %r15
jmp 0x268c46
incq %r9
movq 0x18(%rsp), %r10
jmp 0x268b44
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x3, %esi
jne 0x268db7
cmpl $0x2, 0x38(%rsp)
jne 0x268db7
movl %edi, %r8d
imull %eax, %r8d
xorl %r9d, %r9d
testl %ecx, %ecx
movl $0x0, %r10d
cmovgl %ecx, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
cmpq %r11, %r9
je 0x268daf
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x268d9f
vmovss (%r15,%r13,4), %xmm0
cmpl $0x4, %edi
jne 0x268d1c
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x268d35
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x268d35
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x268d3b
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x268d73
vminps (%r14), %ymm2, %ymm3
vmovups %ymm3, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x268d3d
vminps (%r14), %xmm1, %xmm2
vmovups %xmm2, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x268d5d
jmp 0x268d92
vminss (%r14), %xmm0, %xmm1
vmovss %xmm1, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
cmpl %r8d, %esi
jl 0x268d7d
incq %r13
jmp 0x268cf9
incq %r9
movq 0x80(%rsp), %r13
jmp 0x268cb7
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x268f02
cmpl $0x2, 0x38(%rsp)
jne 0x268f02
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
xorl %r9d, %r9d
testl %r13d, %r13d
movl $0x0, %r10d
cmovgl %r13d, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
cmpq %r11, %r9
je 0x268efa
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x268eea
vmovss (%r15,%r13,4), %xmm0
cmpl $0x4, %edi
jne 0x268e67
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x268e80
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x268e80
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x268e86
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x268ebe
vminps (%r14), %ymm2, %ymm3
vmovups %ymm3, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x268e88
vminps (%r14), %xmm1, %xmm2
vmovups %xmm2, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x268ea8
jmp 0x268edd
vminss (%r14), %xmm0, %xmm1
vmovss %xmm1, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
cmpl %r8d, %esi
jl 0x268ec8
incq %r13
jmp 0x268e44
incq %r9
movq 0x80(%rsp), %r13
jmp 0x268e02
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x26bab3
cmpl $0x3, 0x38(%rsp)
jne 0x26bab3
imull %edi, %eax
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
testl %r13d, %r13d
cmovlel %r8d, %r13d
movq 0x60(%rsp), %rdx
testl %edx, %edx
cmovlel %r8d, %edx
movq %rdx, 0x60(%rsp)
movq 0x98(%rsp), %rdx
movq 0x78(%rsp), %rsi
imulq %rsi, %rdx
movq %rdx, 0x98(%rsp)
movslq 0x34(%rsp), %r9
imulq %rsi, %r9
cmpq 0x60(%rsp), %r8
je 0x26bab3
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r10
imulq %r8, %r10
imulq 0x10(%rdx), %r10
addq (%rdx), %r10
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r11
imulq %r8, %r11
imulq 0x10(%rdx), %r11
addq (%rdx), %r11
movq 0x98(%rsp), %rbx
imulq %r8, %rbx
addq 0x18(%rsp), %rbx
xorl %r14d, %r14d
cmpq %r13, %r14
je 0x269062
movq %r9, %r15
imulq %r14, %r15
addq %rbx, %r15
xorl %r12d, %r12d
cmpq %rcx, %r12
je 0x26905a
vmovss (%r15,%r12,4), %xmm0
cmpl $0x4, %edi
jne 0x268fdd
movq %r12, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x268ff6
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x268ff6
movq %r12, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x268ffc
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %eax, %edx
jge 0x269031
vminps (%r10), %ymm2, %ymm3
vmovups %ymm3, (%r11)
addq $0x20, %r10
addq $0x20, %r11
addl $0x8, %esi
jmp 0x268ffe
vminps (%r10), %xmm1, %xmm2
vmovups %xmm2, (%r11)
addq $0x10, %r10
addq $0x10, %r11
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %eax, %edx
jl 0x26901c
jmp 0x26904e
vminss (%r10), %xmm0, %xmm1
vmovss %xmm1, (%r11)
addq $0x4, %r10
addq $0x4, %r11
incl %esi
cmpl %eax, %esi
jl 0x26903a
incq %r12
jmp 0x268fba
incq %r14
jmp 0x268fa4
incq %r8
jmp 0x268f5b
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
leal -0x3(%rsi), %edx
cmpl $0x1, %edx
ja 0x269200
cmpl $0x1, 0x38(%rsp)
jne 0x269200
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
imull %r13d, %r8d
xorl %r9d, %r9d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %esi
cmovgl %edx, %esi
movq %rsi, 0x40(%rsp)
cmpq 0x40(%rsp), %r9
je 0x2691f8
vmovd (%r10,%r9,4), %xmm0
cmpl $0x4, %edi
jne 0x2690d4
movq %r9, %rdx
shlq $0x4, %rdx
vmovdqu (%r10,%rdx), %xmm1
jmp 0x2690ed
vpshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x2690ed
movq %r9, %rdx
shlq $0x5, %rdx
vmovups (%r10,%rdx), %ymm2
jmp 0x2690f3
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
movq %r14, %r13
imulq %r9, %r13
movq (%rdx), %r11
movq 0x10(%rdx), %rdx
movq %rdx, 0xb0(%rsp)
imulq %rdx, %r13
addq %r11, %r13
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r10
movq %r10, %rsi
imulq %r9, %rsi
movq (%rdx), %rbx
movq 0x10(%rdx), %rdx
movq %rdx, 0x28(%rsp)
imulq %rdx, %rsi
addq %rbx, %rsi
xorl %r15d, %r15d
xorl %r12d, %r12d
leal 0x7(%r12), %edx
cmpl %r8d, %edx
jge 0x2691a6
vmovups (%r13), %ymm3
vrcpps %ymm3, %ymm4
vmulps %ymm4, %ymm2, %ymm5
vfmsub213ps %ymm2, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) - ymm2
vfnmadd213ps %ymm5, %ymm4, %ymm3 # ymm3 = -(ymm4 * ymm3) + ymm5
vmovups %ymm3, (%rsi)
addq $0x20, %r13
addq $0x20, %rsi
addl $0x8, %r12d
addq $0x8, %r15
jmp 0x269142
vmovups (%r13), %xmm2
vrcpps %xmm2, %xmm3
vmulps %xmm3, %xmm1, %xmm4
vfmsub213ps %xmm1, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) - xmm1
vfnmadd213ps %xmm4, %xmm3, %xmm2 # xmm2 = -(xmm3 * xmm2) + xmm4
vmovups %xmm2, (%rsi)
addq $0x10, %r13
addq $0x10, %rsi
addl $0x4, %r12d
addq $0x4, %r15
leal 0x3(%r12), %edx
cmpl %r8d, %edx
jl 0x26917a
imulq 0x28(%rsp), %r10
imulq %r9, %r10
addq %r10, %rbx
imulq 0xb0(%rsp), %r14
imulq %r9, %r14
addq %r14, %r11
movq 0x80(%rsp), %r13
cmpl %r8d, %r15d
jge 0x2691eb
vdivss (%r11,%r15,4), %xmm0, %xmm1
vmovss %xmm1, (%rbx,%r15,4)
incq %r15
jmp 0x2691d5
incq %r9
movq 0x18(%rsp), %r10
jmp 0x2690af
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x3, %esi
jne 0x26936a
cmpl $0x2, 0x38(%rsp)
jne 0x26936a
movl %edi, %r8d
imull %eax, %r8d
xorl %r9d, %r9d
testl %ecx, %ecx
movl $0x0, %r10d
cmovgl %ecx, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
cmpq %r11, %r9
je 0x269362
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x269352
vmovss (%r15,%r13,4), %xmm0
cmpl $0x4, %edi
jne 0x2692ab
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x2692c4
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x2692c4
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x2692ca
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x269326
vmovups (%r14), %ymm3
vrcpps %ymm3, %ymm4
vmulps %ymm4, %ymm2, %ymm5
vfmsub213ps %ymm2, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) - ymm2
vfnmadd213ps %ymm5, %ymm4, %ymm3 # ymm3 = -(ymm4 * ymm3) + ymm5
vmovups %ymm3, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x2692cc
vmovups (%r14), %xmm2
vrcpps %xmm2, %xmm3
vmulps %xmm3, %xmm1, %xmm4
vfmsub213ps %xmm1, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) - xmm1
vfnmadd213ps %xmm4, %xmm3, %xmm2 # xmm2 = -(xmm3 * xmm2) + xmm4
vmovups %xmm2, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x2692fe
jmp 0x269345
vdivss (%r14), %xmm0, %xmm1
vmovss %xmm1, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
cmpl %r8d, %esi
jl 0x269330
incq %r13
jmp 0x269288
incq %r9
movq 0x80(%rsp), %r13
jmp 0x269246
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x2694d9
cmpl $0x2, 0x38(%rsp)
jne 0x2694d9
movl %edi, %r8d
imull %eax, %r8d
imull %ecx, %r8d
xorl %r9d, %r9d
testl %r13d, %r13d
movl $0x0, %r10d
cmovgl %r13d, %r10d
movq 0x60(%rsp), %rdx
testl %edx, %edx
movl $0x0, %r11d
cmovgl %edx, %r11d
movslq 0x34(%rsp), %rbx
imulq 0x78(%rsp), %rbx
cmpq %r11, %r9
je 0x2694d1
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r14
imulq %r9, %r14
imulq 0x10(%rdx), %r14
addq (%rdx), %r14
movq %rbx, %r15
imulq %r9, %r15
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r12
imulq %r9, %r12
imulq 0x10(%rdx), %r12
addq 0x18(%rsp), %r15
addq (%rdx), %r12
xorl %r13d, %r13d
cmpq %r10, %r13
je 0x2694c1
vmovss (%r15,%r13,4), %xmm0
cmpl $0x4, %edi
jne 0x26941a
movq %r13, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x269433
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x269433
movq %r13, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x269439
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %r8d, %edx
jge 0x269495
vmovups (%r14), %ymm3
vrcpps %ymm3, %ymm4
vmulps %ymm4, %ymm2, %ymm5
vfmsub213ps %ymm2, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) - ymm2
vfnmadd213ps %ymm5, %ymm4, %ymm3 # ymm3 = -(ymm4 * ymm3) + ymm5
vmovups %ymm3, (%r12)
addq $0x20, %r14
addq $0x20, %r12
addl $0x8, %esi
jmp 0x26943b
vmovups (%r14), %xmm2
vrcpps %xmm2, %xmm3
vmulps %xmm3, %xmm1, %xmm4
vfmsub213ps %xmm1, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) - xmm1
vfnmadd213ps %xmm4, %xmm3, %xmm2 # xmm2 = -(xmm3 * xmm2) + xmm4
vmovups %xmm2, (%r12)
addq $0x10, %r14
addq $0x10, %r12
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %r8d, %edx
jl 0x26946d
jmp 0x2694b4
vdivss (%r14), %xmm0, %xmm1
vmovss %xmm1, (%r12)
addq $0x4, %r14
addq $0x4, %r12
incl %esi
cmpl %r8d, %esi
jl 0x26949f
incq %r13
jmp 0x2693f7
incq %r9
movq 0x80(%rsp), %r13
jmp 0x2693b5
movq 0x20(%rsp), %rdx
movl 0x28(%rdx), %esi
cmpl $0x4, %esi
jne 0x26bab3
cmpl $0x3, 0x38(%rsp)
jne 0x26bab3
imull %edi, %eax
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
testl %r13d, %r13d
cmovlel %r8d, %r13d
movq 0x60(%rsp), %rdx
testl %edx, %edx
cmovlel %r8d, %edx
movq %rdx, 0x60(%rsp)
movq 0x98(%rsp), %rdx
movq 0x78(%rsp), %rsi
imulq %rsi, %rdx
movq %rdx, 0x98(%rsp)
movslq 0x34(%rsp), %r9
imulq %rsi, %r9
cmpq 0x60(%rsp), %r8
je 0x26bab3
movq 0x20(%rsp), %rdx
movq 0x40(%rdx), %r10
imulq %r8, %r10
imulq 0x10(%rdx), %r10
addq (%rdx), %r10
movq 0x48(%rsp), %rdx
movq 0x40(%rdx), %r11
imulq %r8, %r11
imulq 0x10(%rdx), %r11
addq (%rdx), %r11
movq 0x98(%rsp), %rbx
imulq %r8, %rbx
addq 0x18(%rsp), %rbx
xorl %r14d, %r14d
cmpq %r13, %r14
je 0x26965d
movq %r9, %r15
imulq %r14, %r15
addq %rbx, %r15
xorl %r12d, %r12d
cmpq %rcx, %r12
je 0x269655
vmovss (%r15,%r12,4), %xmm0
cmpl $0x4, %edi
jne 0x2695b4
movq %r12, %rdx
shlq $0x4, %rdx
vmovups (%r15,%rdx), %xmm1
jmp 0x2695cd
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
cmpl $0x8, %edi
jne 0x2695cd
movq %r12, %rdx
shlq $0x5, %rdx
vmovups (%r15,%rdx), %ymm2
jmp 0x2695d3
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %esi, %esi
leal 0x7(%rsi), %edx
cmpl %eax, %edx
jge 0x26962c
vmovups (%r10), %ymm3
vrcpps %ymm3, %ymm4
vmulps %ymm4, %ymm2, %ymm5
vfmsub213ps %ymm2, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) - ymm2
vfnmadd213ps %ymm5, %ymm4, %ymm3 # ymm3 = -(ymm4 * ymm3) + ymm5
vmovups %ymm3, (%r11)
addq $0x20, %r10
addq $0x20, %r11
addl $0x8, %esi
jmp 0x2695d5
vmovups (%r10), %xmm2
vrcpps %xmm2, %xmm3
vmulps %xmm3, %xmm1, %xmm4
vfmsub213ps %xmm1, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) - xmm1
vfnmadd213ps %xmm4, %xmm3, %xmm2 # xmm2 = -(xmm3 * xmm2) + xmm4
vmovups %xmm2, (%r11)
addq $0x10, %r10
addq $0x10, %r11
addl $0x4, %esi
leal 0x3(%rsi), %edx
cmpl %eax, %edx
jl 0x269605
jmp 0x269649
vdivss (%r10), %xmm0, %xmm1
vmovss %xmm1, (%r11)
addq $0x4, %r10
addq $0x4, %r11
incl %esi
cmpl %eax, %esi
jl 0x269635
incq %r12
jmp 0x269591
incq %r14
jmp 0x26957b
incq %r8
jmp 0x269532
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
leal -0x3(%rax), %ecx
cmpl $0x1, %ecx
ja 0x269fa3
cmpl $0x1, 0x38(%rsp)
jne 0x269fa3
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
imull 0x40(%rsp), %r15d
imull 0xf8(%rsp), %r15d
xorl %r13d, %r13d
movq 0xf0(%rsp), %rax
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movq %rcx, 0x130(%rsp)
cmpq 0x130(%rsp), %r13
je 0x269f9b
vmovss (%rbx,%r13,4), %xmm0
cmpl $0x4, %r12d
vmovaps %xmm0, 0xb0(%rsp)
jne 0x2696f6
movq %r13, %rax
shlq $0x4, %rax
vmovups (%rbx,%rax), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
jmp 0x269718
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x269718
movq %r13, %rax
shlq $0x5, %rax
vmovups (%rbx,%rax), %ymm0
jmp 0x26971e
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
movq 0x20(%rsp), %rax
movq 0x40(%rax), %r12
movq %r12, 0xa0(%rsp)
imulq %r13, %r12
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0xd0(%rsp)
imulq %rax, %r12
movq %rcx, 0x28(%rsp)
addq %rcx, %r12
movq 0x48(%rsp), %rax
movq 0x40(%rax), %rdx
movq %r13, %rcx
movq %rdx, 0xc8(%rsp)
movq %rdx, %r13
movq %rcx, 0xa8(%rsp)
imulq %rcx, %r13
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0x138(%rsp)
imulq %rax, %r13
movq %rcx, 0x60(%rsp)
addq %rcx, %r13
vmovaps %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
xorl %ebx, %ebx
leal 0x7(%rbx), %eax
cmpl %r15d, %eax
jge 0x269827
vmovdqu (%r12), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26d916
vmovdqu %ymm0, (%r13)
addq $0x20, %r12
addq $0x20, %r13
addl $0x8, %ebx
addq $0x8, %r14
jmp 0x26979a
vmovdqu (%r12), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26daa0
vmovdqu %xmm0, (%r13)
addq $0x10, %r12
addq $0x10, %r13
addl $0x4, %ebx
addq $0x4, %r14
leal 0x3(%rbx), %eax
cmpl %r15d, %eax
jl 0x2697e6
movq 0xc8(%rsp), %rax
imulq 0x138(%rsp), %rax
movq 0xa8(%rsp), %r13
imulq %r13, %rax
addq %rax, 0x60(%rsp)
movq 0xa0(%rsp), %rax
imulq 0xd0(%rsp), %rax
imulq %r13, %rax
addq %rax, 0x28(%rsp)
movq 0x18(%rsp), %rbx
movl 0xdc(%rsp), %r12d
cmpl %r15d, %r14d
vmovdqa 0xb0(%rsp), %xmm0
jge 0x2698a9
movq 0x28(%rsp), %rax
vmovd (%rax,%r14,4), %xmm1
vzeroupper
callq 0x5f170
movq 0x60(%rsp), %rax
vmovd %xmm0, (%rax,%r14,4)
incq %r14
jmp 0x269878
incq %r13
jmp 0x2696bc
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
leal -0x3(%rax), %ecx
cmpl $0x1, %ecx
ja 0x26a1bc
cmpl $0x1, 0x38(%rsp)
jne 0x26a1bc
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
imull 0x40(%rsp), %r15d
imull 0xf8(%rsp), %r15d
xorl %r13d, %r13d
movq 0xf0(%rsp), %rax
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movq %rcx, 0x130(%rsp)
cmpq 0x130(%rsp), %r13
je 0x26a1b4
vmovss (%rbx,%r13,4), %xmm0
cmpl $0x4, %r12d
vmovaps %xmm0, 0xb0(%rsp)
jne 0x269942
movq %r13, %rax
shlq $0x4, %rax
vmovups (%rbx,%rax), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
jmp 0x269964
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x269964
movq %r13, %rax
shlq $0x5, %rax
vmovups (%rbx,%rax), %ymm0
jmp 0x26996a
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
movq 0x20(%rsp), %rax
movq 0x40(%rax), %r12
movq %r12, 0xa0(%rsp)
imulq %r13, %r12
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0xd0(%rsp)
imulq %rax, %r12
movq %rcx, 0x28(%rsp)
addq %rcx, %r12
movq 0x48(%rsp), %rax
movq 0x40(%rax), %rdx
movq %r13, %rcx
movq %rdx, 0xc8(%rsp)
movq %rdx, %r13
movq %rcx, 0xa8(%rsp)
imulq %rcx, %r13
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0x138(%rsp)
imulq %rax, %r13
movq %rcx, 0x60(%rsp)
addq %rcx, %r13
vmovaps %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
xorl %ebx, %ebx
leal 0x7(%rbx), %eax
cmpl %r15d, %eax
jge 0x269a73
vmovdqu (%r12), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26d220
vmovdqu %ymm0, (%r13)
addq $0x20, %r12
addq $0x20, %r13
addl $0x8, %ebx
addq $0x8, %r14
jmp 0x2699e6
vmovdqu (%r12), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26d430
vmovdqu %xmm0, (%r13)
addq $0x10, %r12
addq $0x10, %r13
addl $0x4, %ebx
addq $0x4, %r14
leal 0x3(%rbx), %eax
cmpl %r15d, %eax
jl 0x269a32
movq 0xc8(%rsp), %rax
imulq 0x138(%rsp), %rax
movq 0xa8(%rsp), %r13
imulq %r13, %rax
addq %rax, 0x60(%rsp)
movq 0xa0(%rsp), %rax
imulq 0xd0(%rsp), %rax
imulq %r13, %rax
addq %rax, 0x28(%rsp)
movq 0x18(%rsp), %rbx
movl 0xdc(%rsp), %r12d
cmpl %r15d, %r14d
vmovdqa 0xb0(%rsp), %xmm0
jge 0x269af5
movq 0x28(%rsp), %rax
vmovd (%rax,%r14,4), %xmm1
vzeroupper
callq 0x5f0e0
movq 0x60(%rsp), %rax
vmovd %xmm0, (%rax,%r14,4)
incq %r14
jmp 0x269ac4
incq %r13
jmp 0x269908
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
leal -0x3(%rax), %ecx
cmpl $0x1, %ecx
ja 0x26a3d5
cmpl $0x1, 0x38(%rsp)
jne 0x26a3d5
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
imull 0x40(%rsp), %r15d
imull 0xf8(%rsp), %r15d
xorl %r13d, %r13d
movq 0xf0(%rsp), %rax
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movq %rcx, 0x130(%rsp)
cmpq 0x130(%rsp), %r13
je 0x26a3cd
vmovss (%rbx,%r13,4), %xmm0
cmpl $0x4, %r12d
vmovaps %xmm0, 0x60(%rsp)
jne 0x269b8b
movq %r13, %rax
shlq $0x4, %rax
vmovups (%rbx,%rax), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
jmp 0x269bad
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x269bad
movq %r13, %rax
shlq $0x5, %rax
vmovdqu (%rbx,%rax), %ymm0
jmp 0x269bb3
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
movq 0x20(%rsp), %rax
movq 0x40(%rax), %r12
movq %r12, 0xa0(%rsp)
imulq %r13, %r12
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0xd0(%rsp)
imulq %rax, %r12
movq %rcx, 0xb0(%rsp)
addq %rcx, %r12
movq 0x48(%rsp), %rax
movq 0x40(%rax), %rdx
movq %r13, %rcx
movq %rdx, 0xc8(%rsp)
movq %rdx, %r13
movq %rcx, 0xa8(%rsp)
imulq %rcx, %r13
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0x138(%rsp)
imulq %rax, %r13
movq %rcx, 0x28(%rsp)
addq %rcx, %r13
vmovdqa %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
xorl %ebx, %ebx
leal 0x7(%rbx), %eax
cmpl %r15d, %eax
jge 0x269cbf
vmovdqu (%r12), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26ce36
vmovdqu %ymm0, (%r13)
addq $0x20, %r12
addq $0x20, %r13
addl $0x8, %ebx
addq $0x8, %r14
jmp 0x269c32
vmovdqu (%r12), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26d046
vmovdqu %xmm0, (%r13)
addq $0x10, %r12
addq $0x10, %r13
addl $0x4, %ebx
addq $0x4, %r14
leal 0x3(%rbx), %eax
cmpl %r15d, %eax
jl 0x269c7e
movq 0xc8(%rsp), %rax
imulq 0x138(%rsp), %rax
movq 0xa8(%rsp), %r13
imulq %r13, %rax
addq %rax, 0x28(%rsp)
movq 0xa0(%rsp), %rax
imulq 0xd0(%rsp), %rax
imulq %r13, %rax
addq %rax, 0xb0(%rsp)
movq 0x18(%rsp), %rbx
movl 0xdc(%rsp), %r12d
cmpl %r15d, %r14d
jge 0x269d44
movq 0xb0(%rsp), %rax
vmovd (%rax,%r14,4), %xmm0
vmovdqa 0x60(%rsp), %xmm1
vzeroupper
callq 0x5f0e0
movq 0x28(%rsp), %rax
vmovd %xmm0, (%rax,%r14,4)
incq %r14
jmp 0x269d13
incq %r13
jmp 0x269b54
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
leal -0x3(%rax), %ecx
cmpl $0x1, %ecx
ja 0x26a5ee
cmpl $0x1, 0x38(%rsp)
jne 0x26a5ee
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
imull 0x40(%rsp), %r15d
imull 0xf8(%rsp), %r15d
xorl %r13d, %r13d
movq 0xf0(%rsp), %rax
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movq %rcx, 0x130(%rsp)
cmpq 0x130(%rsp), %r13
je 0x26a5e6
vmovss (%rbx,%r13,4), %xmm0
cmpl $0x4, %r12d
vmovaps %xmm0, 0x60(%rsp)
jne 0x269dda
movq %r13, %rax
shlq $0x4, %rax
vmovups (%rbx,%rax), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
jmp 0x269dfc
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x269dfc
movq %r13, %rax
shlq $0x5, %rax
vmovdqu (%rbx,%rax), %ymm0
jmp 0x269e02
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
movq 0x20(%rsp), %rax
movq 0x40(%rax), %r12
movq %r12, 0xa0(%rsp)
imulq %r13, %r12
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0xd0(%rsp)
imulq %rax, %r12
movq %rcx, 0xb0(%rsp)
addq %rcx, %r12
movq 0x48(%rsp), %rax
movq 0x40(%rax), %rdx
movq %r13, %rcx
movq %rdx, 0xc8(%rsp)
movq %rdx, %r13
movq %rcx, 0xa8(%rsp)
imulq %rcx, %r13
movq (%rax), %rcx
movq 0x10(%rax), %rax
movq %rax, 0x138(%rsp)
imulq %rax, %r13
movq %rcx, 0x28(%rsp)
addq %rcx, %r13
vmovdqa %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
xorl %ebx, %ebx
leal 0x7(%rbx), %eax
cmpl %r15d, %eax
jge 0x269f0e
vmovdqu (%r12), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26d60a
vmovdqu %ymm0, (%r13)
addq $0x20, %r12
addq $0x20, %r13
addl $0x8, %ebx
addq $0x8, %r14
jmp 0x269e81
vmovdqu (%r12), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26d794
vmovdqu %xmm0, (%r13)
addq $0x10, %r12
addq $0x10, %r13
addl $0x4, %ebx
addq $0x4, %r14
leal 0x3(%rbx), %eax
cmpl %r15d, %eax
jl 0x269ecd
movq 0xc8(%rsp), %rax
imulq 0x138(%rsp), %rax
movq 0xa8(%rsp), %r13
imulq %r13, %rax
addq %rax, 0x28(%rsp)
movq 0xa0(%rsp), %rax
imulq 0xd0(%rsp), %rax
imulq %r13, %rax
addq %rax, 0xb0(%rsp)
movq 0x18(%rsp), %rbx
movl 0xdc(%rsp), %r12d
cmpl %r15d, %r14d
jge 0x269f93
movq 0xb0(%rsp), %rax
vmovd (%rax,%r14,4), %xmm0
vmovdqa 0x60(%rsp), %xmm1
vzeroupper
callq 0x5f170
movq 0x28(%rsp), %rax
vmovd %xmm0, (%rax,%r14,4)
incq %r14
jmp 0x269f62
incq %r13
jmp 0x269da3
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
cmpl $0x3, %eax
jne 0x26a807
cmpl $0x2, 0x38(%rsp)
jne 0x26a807
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
xorl %ecx, %ecx
movq 0x40(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0x60(%rsp)
movq 0xf0(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0xd0(%rsp)
movslq 0x34(%rsp), %rax
imulq 0x78(%rsp), %rax
movq %rax, 0xc8(%rsp)
cmpq 0xd0(%rsp), %rcx
je 0x26a7ff
movq 0x20(%rsp), %rax
movq 0x40(%rax), %r13
imulq %rcx, %r13
imulq 0x10(%rax), %r13
addq (%rax), %r13
movq 0xc8(%rsp), %rsi
imulq %rcx, %rsi
movq %rbx, %rax
movq %rcx, %rdx
movq 0x48(%rsp), %rcx
movq 0x40(%rcx), %rbx
movq %rdx, 0xa0(%rsp)
imulq %rdx, %rbx
imulq 0x10(%rcx), %rbx
addq %rax, %rsi
addq (%rcx), %rbx
xorl %ecx, %ecx
movq %rsi, 0xa8(%rsp)
cmpq 0x60(%rsp), %rcx
je 0x26a19f
vmovd (%rsi,%rcx,4), %xmm1
cmpl $0x4, %r12d
movq %rcx, 0x28(%rsp)
vmovdqa %xmm1, 0xb0(%rsp)
jne 0x26a0a0
movq %rcx, %rax
shlq $0x4, %rax
vmovdqu (%rsi,%rax), %xmm0
vmovdqa %xmm0, 0xe0(%rsp)
jmp 0x26a0c2
vpshufd $0x0, %xmm1, %xmm0 # xmm0 = xmm1[0,0,0,0]
vmovdqa %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x26a0c2
movq %rcx, %rax
shlq $0x5, %rax
vmovdqu (%rsi,%rax), %ymm0
jmp 0x26a0c8
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
leal 0x7(%r14), %eax
cmpl %r15d, %eax
jge 0x26a154
vmovdqu (%r13), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26d916
vmovdqu %ymm0, (%rbx)
addq $0x20, %r13
addq $0x20, %rbx
addl $0x8, %r14d
jmp 0x26a0d4
vmovdqu (%r13), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26daa0
vmovdqu %xmm0, (%rbx)
addq $0x10, %r13
addq $0x10, %rbx
addl $0x4, %r14d
leal 0x3(%r14), %eax
cmpl %r15d, %eax
jl 0x26a118
jmp 0x26a185
vmovd (%r13), %xmm1
vmovdqa 0xb0(%rsp), %xmm0
vzeroupper
callq 0x5f170
vmovd %xmm0, (%rbx)
addq $0x4, %r13
addq $0x4, %rbx
incl %r14d
cmpl %r15d, %r14d
jl 0x26a15f
movq 0x28(%rsp), %rcx
incq %rcx
movq 0xa8(%rsp), %rsi
jmp 0x26a065
movq 0xa0(%rsp), %rcx
incq %rcx
movq 0x18(%rsp), %rbx
jmp 0x26a006
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
cmpl $0x3, %eax
jne 0x26aa29
cmpl $0x2, 0x38(%rsp)
jne 0x26aa29
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
xorl %ecx, %ecx
movq 0x40(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0x60(%rsp)
movq 0xf0(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0xd0(%rsp)
movslq 0x34(%rsp), %rax
imulq 0x78(%rsp), %rax
movq %rax, 0xc8(%rsp)
cmpq 0xd0(%rsp), %rcx
je 0x26aa21
movq 0x20(%rsp), %rax
movq 0x40(%rax), %r13
imulq %rcx, %r13
imulq 0x10(%rax), %r13
addq (%rax), %r13
movq 0xc8(%rsp), %rsi
imulq %rcx, %rsi
movq %rbx, %rax
movq %rcx, %rdx
movq 0x48(%rsp), %rcx
movq 0x40(%rcx), %rbx
movq %rdx, 0xa0(%rsp)
imulq %rdx, %rbx
imulq 0x10(%rcx), %rbx
addq %rax, %rsi
addq (%rcx), %rbx
xorl %ecx, %ecx
movq %rsi, 0xa8(%rsp)
cmpq 0x60(%rsp), %rcx
je 0x26a3b8
vmovd (%rsi,%rcx,4), %xmm1
cmpl $0x4, %r12d
movq %rcx, 0x28(%rsp)
vmovdqa %xmm1, 0xb0(%rsp)
jne 0x26a2b9
movq %rcx, %rax
shlq $0x4, %rax
vmovdqu (%rsi,%rax), %xmm0
vmovdqa %xmm0, 0xe0(%rsp)
jmp 0x26a2db
vpshufd $0x0, %xmm1, %xmm0 # xmm0 = xmm1[0,0,0,0]
vmovdqa %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x26a2db
movq %rcx, %rax
shlq $0x5, %rax
vmovdqu (%rsi,%rax), %ymm0
jmp 0x26a2e1
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
leal 0x7(%r14), %eax
cmpl %r15d, %eax
jge 0x26a36d
vmovdqu (%r13), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26d220
vmovdqu %ymm0, (%rbx)
addq $0x20, %r13
addq $0x20, %rbx
addl $0x8, %r14d
jmp 0x26a2ed
vmovdqu (%r13), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26d430
vmovdqu %xmm0, (%rbx)
addq $0x10, %r13
addq $0x10, %rbx
addl $0x4, %r14d
leal 0x3(%r14), %eax
cmpl %r15d, %eax
jl 0x26a331
jmp 0x26a39e
vmovd (%r13), %xmm1
vmovdqa 0xb0(%rsp), %xmm0
vzeroupper
callq 0x5f0e0
vmovd %xmm0, (%rbx)
addq $0x4, %r13
addq $0x4, %rbx
incl %r14d
cmpl %r15d, %r14d
jl 0x26a378
movq 0x28(%rsp), %rcx
incq %rcx
movq 0xa8(%rsp), %rsi
jmp 0x26a27e
movq 0xa0(%rsp), %rcx
incq %rcx
movq 0x18(%rsp), %rbx
jmp 0x26a21f
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
cmpl $0x3, %eax
jne 0x26ac4b
cmpl $0x2, 0x38(%rsp)
jne 0x26ac4b
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
xorl %ecx, %ecx
movq 0x40(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0x60(%rsp)
movq 0xf0(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0xd0(%rsp)
movslq 0x34(%rsp), %rax
imulq 0x78(%rsp), %rax
movq %rax, 0xc8(%rsp)
cmpq 0xd0(%rsp), %rcx
je 0x26ac43
movq 0x20(%rsp), %rax
movq 0x40(%rax), %r13
imulq %rcx, %r13
imulq 0x10(%rax), %r13
addq (%rax), %r13
movq 0xc8(%rsp), %rsi
imulq %rcx, %rsi
movq %rbx, %rax
movq %rcx, %rdx
movq 0x48(%rsp), %rcx
movq 0x40(%rcx), %rbx
movq %rdx, 0xa0(%rsp)
imulq %rdx, %rbx
imulq 0x10(%rcx), %rbx
addq %rax, %rsi
addq (%rcx), %rbx
xorl %ecx, %ecx
movq %rsi, 0xa8(%rsp)
cmpq 0x60(%rsp), %rcx
je 0x26a5d1
vmovss (%rsi,%rcx,4), %xmm0
cmpl $0x4, %r12d
movq %rcx, 0x28(%rsp)
vmovaps %xmm0, 0xb0(%rsp)
jne 0x26a4d2
movq %rcx, %rax
shlq $0x4, %rax
vmovups (%rsi,%rax), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
jmp 0x26a4f4
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x26a4f4
movq %rcx, %rax
shlq $0x5, %rax
vmovdqu (%rsi,%rax), %ymm0
jmp 0x26a4fa
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
leal 0x7(%r14), %eax
cmpl %r15d, %eax
jge 0x26a586
vmovdqu (%r13), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26ce36
vmovdqu %ymm0, (%rbx)
addq $0x20, %r13
addq $0x20, %rbx
addl $0x8, %r14d
jmp 0x26a506
vmovdqu (%r13), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26d046
vmovdqu %xmm0, (%rbx)
addq $0x10, %r13
addq $0x10, %rbx
addl $0x4, %r14d
leal 0x3(%r14), %eax
cmpl %r15d, %eax
jl 0x26a54a
jmp 0x26a5b7
vmovd (%r13), %xmm0
vmovdqa 0xb0(%rsp), %xmm1
vzeroupper
callq 0x5f0e0
vmovd %xmm0, (%rbx)
addq $0x4, %r13
addq $0x4, %rbx
incl %r14d
cmpl %r15d, %r14d
jl 0x26a591
movq 0x28(%rsp), %rcx
incq %rcx
movq 0xa8(%rsp), %rsi
jmp 0x26a497
movq 0xa0(%rsp), %rcx
incq %rcx
movq 0x18(%rsp), %rbx
jmp 0x26a438
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
cmpl $0x3, %eax
jne 0x26ae6d
cmpl $0x2, 0x38(%rsp)
jne 0x26ae6d
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
xorl %ecx, %ecx
movq 0x40(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0x60(%rsp)
movq 0xf0(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0xd0(%rsp)
movslq 0x34(%rsp), %rax
imulq 0x78(%rsp), %rax
movq %rax, 0xc8(%rsp)
cmpq 0xd0(%rsp), %rcx
je 0x26ae65
movq 0x20(%rsp), %rax
movq 0x40(%rax), %r13
imulq %rcx, %r13
imulq 0x10(%rax), %r13
addq (%rax), %r13
movq 0xc8(%rsp), %rsi
imulq %rcx, %rsi
movq %rbx, %rax
movq %rcx, %rdx
movq 0x48(%rsp), %rcx
movq 0x40(%rcx), %rbx
movq %rdx, 0xa0(%rsp)
imulq %rdx, %rbx
imulq 0x10(%rcx), %rbx
addq %rax, %rsi
addq (%rcx), %rbx
xorl %ecx, %ecx
movq %rsi, 0xa8(%rsp)
cmpq 0x60(%rsp), %rcx
je 0x26a7ea
vmovss (%rsi,%rcx,4), %xmm0
cmpl $0x4, %r12d
movq %rcx, 0x28(%rsp)
vmovaps %xmm0, 0xb0(%rsp)
jne 0x26a6eb
movq %rcx, %rax
shlq $0x4, %rax
vmovups (%rsi,%rax), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
jmp 0x26a70d
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x26a70d
movq %rcx, %rax
shlq $0x5, %rax
vmovdqu (%rsi,%rax), %ymm0
jmp 0x26a713
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
leal 0x7(%r14), %eax
cmpl %r15d, %eax
jge 0x26a79f
vmovdqu (%r13), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26d60a
vmovdqu %ymm0, (%rbx)
addq $0x20, %r13
addq $0x20, %rbx
addl $0x8, %r14d
jmp 0x26a71f
vmovdqu (%r13), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26d794
vmovdqu %xmm0, (%rbx)
addq $0x10, %r13
addq $0x10, %rbx
addl $0x4, %r14d
leal 0x3(%r14), %eax
cmpl %r15d, %eax
jl 0x26a763
jmp 0x26a7d0
vmovd (%r13), %xmm0
vmovdqa 0xb0(%rsp), %xmm1
vzeroupper
callq 0x5f170
vmovd %xmm0, (%rbx)
addq $0x4, %r13
addq $0x4, %rbx
incl %r14d
cmpl %r15d, %r14d
jl 0x26a7aa
movq 0x28(%rsp), %rcx
incq %rcx
movq 0xa8(%rsp), %rsi
jmp 0x26a6b0
movq 0xa0(%rsp), %rcx
incq %rcx
movq 0x18(%rsp), %rbx
jmp 0x26a651
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
cmpl $0x4, %eax
jne 0x26b08f
cmpl $0x2, 0x38(%rsp)
jne 0x26b08f
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
imull 0x40(%rsp), %r15d
xorl %ecx, %ecx
movq 0xf8(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0x60(%rsp)
movq 0xf0(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0xd0(%rsp)
movslq 0x34(%rsp), %rax
imulq 0x78(%rsp), %rax
movq %rax, 0xc8(%rsp)
cmpq 0xd0(%rsp), %rcx
je 0x26b087
movq 0x20(%rsp), %rax
movq 0x40(%rax), %r13
imulq %rcx, %r13
imulq 0x10(%rax), %r13
addq (%rax), %r13
movq 0xc8(%rsp), %rsi
imulq %rcx, %rsi
movq %rbx, %rax
movq %rcx, %rdx
movq 0x48(%rsp), %rcx
movq 0x40(%rcx), %rbx
movq %rdx, 0xa0(%rsp)
imulq %rdx, %rbx
imulq 0x10(%rcx), %rbx
addq %rax, %rsi
addq (%rcx), %rbx
xorl %ecx, %ecx
movq %rsi, 0xa8(%rsp)
cmpq 0x60(%rsp), %rcx
je 0x26aa0c
vmovd (%rsi,%rcx,4), %xmm1
cmpl $0x4, %r12d
movq %rcx, 0x28(%rsp)
vmovdqa %xmm1, 0xb0(%rsp)
jne 0x26a90d
movq %rcx, %rax
shlq $0x4, %rax
vmovdqu (%rsi,%rax), %xmm0
vmovdqa %xmm0, 0xe0(%rsp)
jmp 0x26a92f
vpshufd $0x0, %xmm1, %xmm0 # xmm0 = xmm1[0,0,0,0]
vmovdqa %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x26a92f
movq %rcx, %rax
shlq $0x5, %rax
vmovdqu (%rsi,%rax), %ymm0
jmp 0x26a935
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
leal 0x7(%r14), %eax
cmpl %r15d, %eax
jge 0x26a9c1
vmovdqu (%r13), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26d916
vmovdqu %ymm0, (%rbx)
addq $0x20, %r13
addq $0x20, %rbx
addl $0x8, %r14d
jmp 0x26a941
vmovdqu (%r13), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26daa0
vmovdqu %xmm0, (%rbx)
addq $0x10, %r13
addq $0x10, %rbx
addl $0x4, %r14d
leal 0x3(%r14), %eax
cmpl %r15d, %eax
jl 0x26a985
jmp 0x26a9f2
vmovd (%r13), %xmm1
vmovdqa 0xb0(%rsp), %xmm0
vzeroupper
callq 0x5f170
vmovd %xmm0, (%rbx)
addq $0x4, %r13
addq $0x4, %rbx
incl %r14d
cmpl %r15d, %r14d
jl 0x26a9cc
movq 0x28(%rsp), %rcx
incq %rcx
movq 0xa8(%rsp), %rsi
jmp 0x26a8d2
movq 0xa0(%rsp), %rcx
incq %rcx
movq 0x18(%rsp), %rbx
jmp 0x26a873
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
cmpl $0x4, %eax
jne 0x26b31a
cmpl $0x2, 0x38(%rsp)
jne 0x26b31a
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
imull 0x40(%rsp), %r15d
xorl %ecx, %ecx
movq 0xf8(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0x60(%rsp)
movq 0xf0(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0xd0(%rsp)
movslq 0x34(%rsp), %rax
imulq 0x78(%rsp), %rax
movq %rax, 0xc8(%rsp)
cmpq 0xd0(%rsp), %rcx
je 0x26b312
movq 0x20(%rsp), %rax
movq 0x40(%rax), %r13
imulq %rcx, %r13
imulq 0x10(%rax), %r13
addq (%rax), %r13
movq 0xc8(%rsp), %rsi
imulq %rcx, %rsi
movq %rbx, %rax
movq %rcx, %rdx
movq 0x48(%rsp), %rcx
movq 0x40(%rcx), %rbx
movq %rdx, 0xa0(%rsp)
imulq %rdx, %rbx
imulq 0x10(%rcx), %rbx
addq %rax, %rsi
addq (%rcx), %rbx
xorl %ecx, %ecx
movq %rsi, 0xa8(%rsp)
cmpq 0x60(%rsp), %rcx
je 0x26ac2e
vmovd (%rsi,%rcx,4), %xmm1
cmpl $0x4, %r12d
movq %rcx, 0x28(%rsp)
vmovdqa %xmm1, 0xb0(%rsp)
jne 0x26ab2f
movq %rcx, %rax
shlq $0x4, %rax
vmovdqu (%rsi,%rax), %xmm0
vmovdqa %xmm0, 0xe0(%rsp)
jmp 0x26ab51
vpshufd $0x0, %xmm1, %xmm0 # xmm0 = xmm1[0,0,0,0]
vmovdqa %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x26ab51
movq %rcx, %rax
shlq $0x5, %rax
vmovdqu (%rsi,%rax), %ymm0
jmp 0x26ab57
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
leal 0x7(%r14), %eax
cmpl %r15d, %eax
jge 0x26abe3
vmovdqu (%r13), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26d220
vmovdqu %ymm0, (%rbx)
addq $0x20, %r13
addq $0x20, %rbx
addl $0x8, %r14d
jmp 0x26ab63
vmovdqu (%r13), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26d430
vmovdqu %xmm0, (%rbx)
addq $0x10, %r13
addq $0x10, %rbx
addl $0x4, %r14d
leal 0x3(%r14), %eax
cmpl %r15d, %eax
jl 0x26aba7
jmp 0x26ac14
vmovd (%r13), %xmm1
vmovdqa 0xb0(%rsp), %xmm0
vzeroupper
callq 0x5f0e0
vmovd %xmm0, (%rbx)
addq $0x4, %r13
addq $0x4, %rbx
incl %r14d
cmpl %r15d, %r14d
jl 0x26abee
movq 0x28(%rsp), %rcx
incq %rcx
movq 0xa8(%rsp), %rsi
jmp 0x26aaf4
movq 0xa0(%rsp), %rcx
incq %rcx
movq 0x18(%rsp), %rbx
jmp 0x26aa95
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
cmpl $0x4, %eax
jne 0x26b5a5
cmpl $0x2, 0x38(%rsp)
jne 0x26b5a5
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
imull 0x40(%rsp), %r15d
xorl %ecx, %ecx
movq 0xf8(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0x60(%rsp)
movq 0xf0(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0xd0(%rsp)
movslq 0x34(%rsp), %rax
imulq 0x78(%rsp), %rax
movq %rax, 0xc8(%rsp)
cmpq 0xd0(%rsp), %rcx
je 0x26b59d
movq 0x20(%rsp), %rax
movq 0x40(%rax), %r13
imulq %rcx, %r13
imulq 0x10(%rax), %r13
addq (%rax), %r13
movq 0xc8(%rsp), %rsi
imulq %rcx, %rsi
movq %rbx, %rax
movq %rcx, %rdx
movq 0x48(%rsp), %rcx
movq 0x40(%rcx), %rbx
movq %rdx, 0xa0(%rsp)
imulq %rdx, %rbx
imulq 0x10(%rcx), %rbx
addq %rax, %rsi
addq (%rcx), %rbx
xorl %ecx, %ecx
movq %rsi, 0xa8(%rsp)
cmpq 0x60(%rsp), %rcx
je 0x26ae50
vmovss (%rsi,%rcx,4), %xmm0
cmpl $0x4, %r12d
movq %rcx, 0x28(%rsp)
vmovaps %xmm0, 0xb0(%rsp)
jne 0x26ad51
movq %rcx, %rax
shlq $0x4, %rax
vmovups (%rsi,%rax), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
jmp 0x26ad73
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x26ad73
movq %rcx, %rax
shlq $0x5, %rax
vmovdqu (%rsi,%rax), %ymm0
jmp 0x26ad79
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
leal 0x7(%r14), %eax
cmpl %r15d, %eax
jge 0x26ae05
vmovdqu (%r13), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26ce36
vmovdqu %ymm0, (%rbx)
addq $0x20, %r13
addq $0x20, %rbx
addl $0x8, %r14d
jmp 0x26ad85
vmovdqu (%r13), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26d046
vmovdqu %xmm0, (%rbx)
addq $0x10, %r13
addq $0x10, %rbx
addl $0x4, %r14d
leal 0x3(%r14), %eax
cmpl %r15d, %eax
jl 0x26adc9
jmp 0x26ae36
vmovd (%r13), %xmm0
vmovdqa 0xb0(%rsp), %xmm1
vzeroupper
callq 0x5f0e0
vmovd %xmm0, (%rbx)
addq $0x4, %r13
addq $0x4, %rbx
incl %r14d
cmpl %r15d, %r14d
jl 0x26ae10
movq 0x28(%rsp), %rcx
incq %rcx
movq 0xa8(%rsp), %rsi
jmp 0x26ad16
movq 0xa0(%rsp), %rcx
incq %rcx
movq 0x18(%rsp), %rbx
jmp 0x26acb7
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
cmpl $0x4, %eax
jne 0x26b830
cmpl $0x2, 0x38(%rsp)
jne 0x26b830
movl %r12d, %r15d
imull 0x80(%rsp), %r15d
imull 0x40(%rsp), %r15d
xorl %ecx, %ecx
movq 0xf8(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0x60(%rsp)
movq 0xf0(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0xd0(%rsp)
movslq 0x34(%rsp), %rax
imulq 0x78(%rsp), %rax
movq %rax, 0xc8(%rsp)
cmpq 0xd0(%rsp), %rcx
je 0x26b828
movq 0x20(%rsp), %rax
movq 0x40(%rax), %r13
imulq %rcx, %r13
imulq 0x10(%rax), %r13
addq (%rax), %r13
movq 0xc8(%rsp), %rsi
imulq %rcx, %rsi
movq %rbx, %rax
movq %rcx, %rdx
movq 0x48(%rsp), %rcx
movq 0x40(%rcx), %rbx
movq %rdx, 0xa0(%rsp)
imulq %rdx, %rbx
imulq 0x10(%rcx), %rbx
addq %rax, %rsi
addq (%rcx), %rbx
xorl %ecx, %ecx
movq %rsi, 0xa8(%rsp)
cmpq 0x60(%rsp), %rcx
je 0x26b072
vmovss (%rsi,%rcx,4), %xmm0
cmpl $0x4, %r12d
movq %rcx, 0x28(%rsp)
vmovaps %xmm0, 0xb0(%rsp)
jne 0x26af73
movq %rcx, %rax
shlq $0x4, %rax
vmovups (%rsi,%rax), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
jmp 0x26af95
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x26af95
movq %rcx, %rax
shlq $0x5, %rax
vmovdqu (%rsi,%rax), %ymm0
jmp 0x26af9b
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x140(%rsp)
xorl %r14d, %r14d
leal 0x7(%r14), %eax
cmpl %r15d, %eax
jge 0x26b027
vmovdqu (%r13), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x26d60a
vmovdqu %ymm0, (%rbx)
addq $0x20, %r13
addq $0x20, %rbx
addl $0x8, %r14d
jmp 0x26afa7
vmovdqu (%r13), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x100(%rsp), %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26d794
vmovdqu %xmm0, (%rbx)
addq $0x10, %r13
addq $0x10, %rbx
addl $0x4, %r14d
leal 0x3(%r14), %eax
cmpl %r15d, %eax
jl 0x26afeb
jmp 0x26b058
vmovd (%r13), %xmm0
vmovdqa 0xb0(%rsp), %xmm1
vzeroupper
callq 0x5f170
vmovd %xmm0, (%rbx)
addq $0x4, %r13
addq $0x4, %rbx
incl %r14d
cmpl %r15d, %r14d
jl 0x26b032
movq 0x28(%rsp), %rcx
incq %rcx
movq 0xa8(%rsp), %rsi
jmp 0x26af38
movq 0xa0(%rsp), %rcx
incq %rcx
movq 0x18(%rsp), %rbx
jmp 0x26aed9
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
cmpl $0x4, %eax
jne 0x26bab3
cmpl $0x3, 0x38(%rsp)
jne 0x26bab3
movl 0x80(%rsp), %eax
imull %r12d, %eax
movl %eax, 0x80(%rsp)
xorl %edx, %edx
movq 0x40(%rsp), %rax
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0x40(%rsp)
movq 0xf8(%rsp), %rax
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0xf8(%rsp)
movq 0xf0(%rsp), %rax
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0xf0(%rsp)
movq 0x98(%rsp), %rax
movq 0x78(%rsp), %rcx
imulq %rcx, %rax
movq %rax, 0x98(%rsp)
movslq 0x34(%rsp), %rax
imulq %rcx, %rax
movq %rax, 0x78(%rsp)
leaq 0x17(%rsp), %r13
leaq 0x100(%rsp), %r14
cmpq 0xf0(%rsp), %rdx
je 0x26bab3
movq 0x20(%rsp), %rax
movq 0x40(%rax), %rbx
imulq %rdx, %rbx
imulq 0x10(%rax), %rbx
addq (%rax), %rbx
movq 0x48(%rsp), %rax
movq 0x40(%rax), %r15
imulq %rdx, %r15
imulq 0x10(%rax), %r15
addq (%rax), %r15
movq 0x98(%rsp), %rax
movq %rdx, 0xa0(%rsp)
imulq %rdx, %rax
addq 0x18(%rsp), %rax
movq %rax, 0x38(%rsp)
xorl %eax, %eax
cmpq 0xf8(%rsp), %rax
je 0x26b302
movq 0x78(%rsp), %rcx
imulq %rax, %rcx
addq 0x38(%rsp), %rcx
movq %rcx, 0x60(%rsp)
xorl %ecx, %ecx
movq %rax, 0xa8(%rsp)
cmpq 0x40(%rsp), %rcx
je 0x26b2fa
movq 0x60(%rsp), %rax
vmovd (%rax,%rcx,4), %xmm1
cmpl $0x4, %r12d
movq %rcx, 0x28(%rsp)
vmovdqa %xmm1, 0xb0(%rsp)
jne 0x26b1ee
movq 0x28(%rsp), %rax
shlq $0x4, %rax
movq 0x60(%rsp), %rcx
vmovdqu (%rcx,%rax), %xmm0
vmovdqa %xmm0, 0xe0(%rsp)
jmp 0x26b217
vpshufd $0x0, %xmm1, %xmm0 # xmm0 = xmm1[0,0,0,0]
vmovdqa %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x26b217
movq 0x28(%rsp), %rax
shlq $0x5, %rax
movq 0x60(%rsp), %rcx
vmovdqu (%rcx,%rax), %ymm0
jmp 0x26b21d
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x140(%rsp)
xorl %r12d, %r12d
leal 0x7(%r12), %eax
cmpl 0x80(%rsp), %eax
jge 0x26b29e
vmovdqu (%rbx), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
movq %r13, %rdi
movq %r14, %rsi
leaq 0x140(%rsp), %rdx
callq 0x26d916
vmovdqu %ymm0, (%r15)
addq $0x20, %rbx
addq $0x20, %r15
addl $0x8, %r12d
jmp 0x26b229
vmovdqu (%rbx), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
movq %r13, %rdi
movq %r14, %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26daa0
vmovdqu %xmm0, (%r15)
addq $0x10, %rbx
addq $0x10, %r15
addl $0x4, %r12d
leal 0x3(%r12), %eax
cmpl 0x80(%rsp), %eax
jl 0x26b26a
jmp 0x26b2d3
vmovd (%rbx), %xmm1
vmovdqa 0xb0(%rsp), %xmm0
vzeroupper
callq 0x5f170
vmovd %xmm0, (%r15)
addq $0x4, %rbx
addq $0x4, %r15
incl %r12d
cmpl 0x80(%rsp), %r12d
jl 0x26b2ae
movq 0x28(%rsp), %rcx
incq %rcx
movl 0xdc(%rsp), %r12d
movq 0xa8(%rsp), %rax
jmp 0x26b1a7
incq %rax
jmp 0x26b17c
movq 0xa0(%rsp), %rdx
incq %rdx
jmp 0x26b124
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
cmpl $0x4, %eax
jne 0x26bab3
cmpl $0x3, 0x38(%rsp)
jne 0x26bab3
movl 0x80(%rsp), %eax
imull %r12d, %eax
movl %eax, 0x80(%rsp)
xorl %edx, %edx
movq 0x40(%rsp), %rax
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0x40(%rsp)
movq 0xf8(%rsp), %rax
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0xf8(%rsp)
movq 0xf0(%rsp), %rax
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0xf0(%rsp)
movq 0x98(%rsp), %rax
movq 0x78(%rsp), %rcx
imulq %rcx, %rax
movq %rax, 0x98(%rsp)
movslq 0x34(%rsp), %rax
imulq %rcx, %rax
movq %rax, 0x78(%rsp)
leaq 0x17(%rsp), %r13
leaq 0x100(%rsp), %r14
cmpq 0xf0(%rsp), %rdx
je 0x26bab3
movq 0x20(%rsp), %rax
movq 0x40(%rax), %rbx
imulq %rdx, %rbx
imulq 0x10(%rax), %rbx
addq (%rax), %rbx
movq 0x48(%rsp), %rax
movq 0x40(%rax), %r15
imulq %rdx, %r15
imulq 0x10(%rax), %r15
addq (%rax), %r15
movq 0x98(%rsp), %rax
movq %rdx, 0xa0(%rsp)
imulq %rdx, %rax
addq 0x18(%rsp), %rax
movq %rax, 0x38(%rsp)
xorl %eax, %eax
cmpq 0xf8(%rsp), %rax
je 0x26b58d
movq 0x78(%rsp), %rcx
imulq %rax, %rcx
addq 0x38(%rsp), %rcx
movq %rcx, 0x60(%rsp)
xorl %ecx, %ecx
movq %rax, 0xa8(%rsp)
cmpq 0x40(%rsp), %rcx
je 0x26b585
movq 0x60(%rsp), %rax
vmovd (%rax,%rcx,4), %xmm1
cmpl $0x4, %r12d
movq %rcx, 0x28(%rsp)
vmovdqa %xmm1, 0xb0(%rsp)
jne 0x26b479
movq 0x28(%rsp), %rax
shlq $0x4, %rax
movq 0x60(%rsp), %rcx
vmovdqu (%rcx,%rax), %xmm0
vmovdqa %xmm0, 0xe0(%rsp)
jmp 0x26b4a2
vpshufd $0x0, %xmm1, %xmm0 # xmm0 = xmm1[0,0,0,0]
vmovdqa %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x26b4a2
movq 0x28(%rsp), %rax
shlq $0x5, %rax
movq 0x60(%rsp), %rcx
vmovdqu (%rcx,%rax), %ymm0
jmp 0x26b4a8
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x140(%rsp)
xorl %r12d, %r12d
leal 0x7(%r12), %eax
cmpl 0x80(%rsp), %eax
jge 0x26b529
vmovdqu (%rbx), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
movq %r13, %rdi
movq %r14, %rsi
leaq 0x140(%rsp), %rdx
callq 0x26d220
vmovdqu %ymm0, (%r15)
addq $0x20, %rbx
addq $0x20, %r15
addl $0x8, %r12d
jmp 0x26b4b4
vmovdqu (%rbx), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
movq %r13, %rdi
movq %r14, %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26d430
vmovdqu %xmm0, (%r15)
addq $0x10, %rbx
addq $0x10, %r15
addl $0x4, %r12d
leal 0x3(%r12), %eax
cmpl 0x80(%rsp), %eax
jl 0x26b4f5
jmp 0x26b55e
vmovd (%rbx), %xmm1
vmovdqa 0xb0(%rsp), %xmm0
vzeroupper
callq 0x5f0e0
vmovd %xmm0, (%r15)
addq $0x4, %rbx
addq $0x4, %r15
incl %r12d
cmpl 0x80(%rsp), %r12d
jl 0x26b539
movq 0x28(%rsp), %rcx
incq %rcx
movl 0xdc(%rsp), %r12d
movq 0xa8(%rsp), %rax
jmp 0x26b432
incq %rax
jmp 0x26b407
movq 0xa0(%rsp), %rdx
incq %rdx
jmp 0x26b3af
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
cmpl $0x4, %eax
jne 0x26bab3
cmpl $0x3, 0x38(%rsp)
jne 0x26bab3
movl 0x80(%rsp), %eax
imull %r12d, %eax
movl %eax, 0x80(%rsp)
xorl %edx, %edx
movq 0x40(%rsp), %rax
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0x40(%rsp)
movq 0xf8(%rsp), %rax
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0xf8(%rsp)
movq 0xf0(%rsp), %rax
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0xf0(%rsp)
movq 0x98(%rsp), %rax
movq 0x78(%rsp), %rcx
imulq %rcx, %rax
movq %rax, 0x98(%rsp)
movslq 0x34(%rsp), %rax
imulq %rcx, %rax
movq %rax, 0x78(%rsp)
leaq 0x17(%rsp), %r13
leaq 0x100(%rsp), %r14
cmpq 0xf0(%rsp), %rdx
je 0x26bab3
movq 0x20(%rsp), %rax
movq 0x40(%rax), %rbx
imulq %rdx, %rbx
imulq 0x10(%rax), %rbx
addq (%rax), %rbx
movq 0x48(%rsp), %rax
movq 0x40(%rax), %r15
imulq %rdx, %r15
imulq 0x10(%rax), %r15
addq (%rax), %r15
movq 0x98(%rsp), %rax
movq %rdx, 0xa0(%rsp)
imulq %rdx, %rax
addq 0x18(%rsp), %rax
movq %rax, 0x38(%rsp)
xorl %eax, %eax
cmpq 0xf8(%rsp), %rax
je 0x26b818
movq 0x78(%rsp), %rcx
imulq %rax, %rcx
addq 0x38(%rsp), %rcx
movq %rcx, 0x60(%rsp)
xorl %ecx, %ecx
movq %rax, 0xa8(%rsp)
cmpq 0x40(%rsp), %rcx
je 0x26b810
movq 0x60(%rsp), %rax
vmovss (%rax,%rcx,4), %xmm0
cmpl $0x4, %r12d
movq %rcx, 0x28(%rsp)
vmovaps %xmm0, 0xb0(%rsp)
jne 0x26b704
movq 0x28(%rsp), %rax
shlq $0x4, %rax
movq 0x60(%rsp), %rcx
vmovups (%rcx,%rax), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
jmp 0x26b72d
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x26b72d
movq 0x28(%rsp), %rax
shlq $0x5, %rax
movq 0x60(%rsp), %rcx
vmovdqu (%rcx,%rax), %ymm0
jmp 0x26b733
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x140(%rsp)
xorl %r12d, %r12d
leal 0x7(%r12), %eax
cmpl 0x80(%rsp), %eax
jge 0x26b7b4
vmovdqu (%rbx), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
movq %r13, %rdi
movq %r14, %rsi
leaq 0x140(%rsp), %rdx
callq 0x26ce36
vmovdqu %ymm0, (%r15)
addq $0x20, %rbx
addq $0x20, %r15
addl $0x8, %r12d
jmp 0x26b73f
vmovdqu (%rbx), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
movq %r13, %rdi
movq %r14, %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26d046
vmovdqu %xmm0, (%r15)
addq $0x10, %rbx
addq $0x10, %r15
addl $0x4, %r12d
leal 0x3(%r12), %eax
cmpl 0x80(%rsp), %eax
jl 0x26b780
jmp 0x26b7e9
vmovd (%rbx), %xmm0
vmovdqa 0xb0(%rsp), %xmm1
vzeroupper
callq 0x5f0e0
vmovd %xmm0, (%r15)
addq $0x4, %rbx
addq $0x4, %r15
incl %r12d
cmpl 0x80(%rsp), %r12d
jl 0x26b7c4
movq 0x28(%rsp), %rcx
incq %rcx
movl 0xdc(%rsp), %r12d
movq 0xa8(%rsp), %rax
jmp 0x26b6bd
incq %rax
jmp 0x26b692
movq 0xa0(%rsp), %rdx
incq %rdx
jmp 0x26b63a
movq 0x20(%rsp), %rax
movl 0x28(%rax), %eax
cmpl $0x4, %eax
jne 0x26bab3
cmpl $0x3, 0x38(%rsp)
jne 0x26bab3
movl 0x80(%rsp), %eax
imull %r12d, %eax
movl %eax, 0x80(%rsp)
xorl %edx, %edx
movq 0x40(%rsp), %rax
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0x40(%rsp)
movq 0xf8(%rsp), %rax
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0xf8(%rsp)
movq 0xf0(%rsp), %rax
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0xf0(%rsp)
movq 0x98(%rsp), %rax
movq 0x78(%rsp), %rcx
imulq %rcx, %rax
movq %rax, 0x98(%rsp)
movslq 0x34(%rsp), %rax
imulq %rcx, %rax
movq %rax, 0x78(%rsp)
leaq 0x17(%rsp), %r13
leaq 0x100(%rsp), %r14
cmpq 0xf0(%rsp), %rdx
je 0x26bab3
movq 0x20(%rsp), %rax
movq 0x40(%rax), %rbx
imulq %rdx, %rbx
imulq 0x10(%rax), %rbx
addq (%rax), %rbx
movq 0x48(%rsp), %rax
movq 0x40(%rax), %r15
imulq %rdx, %r15
imulq 0x10(%rax), %r15
addq (%rax), %r15
movq 0x98(%rsp), %rax
movq %rdx, 0xa0(%rsp)
imulq %rdx, %rax
addq 0x18(%rsp), %rax
movq %rax, 0x38(%rsp)
xorl %eax, %eax
cmpq 0xf8(%rsp), %rax
je 0x26baa3
movq 0x78(%rsp), %rcx
imulq %rax, %rcx
addq 0x38(%rsp), %rcx
movq %rcx, 0x60(%rsp)
xorl %ecx, %ecx
movq %rax, 0xa8(%rsp)
cmpq 0x40(%rsp), %rcx
je 0x26ba9b
movq 0x60(%rsp), %rax
vmovss (%rax,%rcx,4), %xmm0
cmpl $0x4, %r12d
movq %rcx, 0x28(%rsp)
vmovaps %xmm0, 0xb0(%rsp)
jne 0x26b98f
movq 0x28(%rsp), %rax
shlq $0x4, %rax
movq 0x60(%rsp), %rcx
vmovups (%rcx,%rax), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
jmp 0x26b9b8
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0xe0(%rsp)
cmpl $0x8, %r12d
jne 0x26b9b8
movq 0x28(%rsp), %rax
shlq $0x5, %rax
movq 0x60(%rsp), %rcx
vmovdqu (%rcx,%rax), %ymm0
jmp 0x26b9be
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x140(%rsp)
xorl %r12d, %r12d
leal 0x7(%r12), %eax
cmpl 0x80(%rsp), %eax
jge 0x26ba3f
vmovdqu (%rbx), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
movq %r13, %rdi
movq %r14, %rsi
leaq 0x140(%rsp), %rdx
callq 0x26d60a
vmovdqu %ymm0, (%r15)
addq $0x20, %rbx
addq $0x20, %r15
addl $0x8, %r12d
jmp 0x26b9ca
vmovdqu (%rbx), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
movq %r13, %rdi
movq %r14, %rsi
leaq 0xe0(%rsp), %rdx
vzeroupper
callq 0x26d794
vmovdqu %xmm0, (%r15)
addq $0x10, %rbx
addq $0x10, %r15
addl $0x4, %r12d
leal 0x3(%r12), %eax
cmpl 0x80(%rsp), %eax
jl 0x26ba0b
jmp 0x26ba74
vmovd (%rbx), %xmm0
vmovdqa 0xb0(%rsp), %xmm1
vzeroupper
callq 0x5f170
vmovd %xmm0, (%r15)
addq $0x4, %rbx
addq $0x4, %r15
incl %r12d
cmpl 0x80(%rsp), %r12d
jl 0x26ba4f
movq 0x28(%rsp), %rcx
incq %rcx
movl 0xdc(%rsp), %r12d
movq 0xa8(%rsp), %rax
jmp 0x26b948
incq %rax
jmp 0x26b91d
movq 0xa0(%rsp), %rdx
incq %rdx
jmp 0x26b8c5
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x26bae9
lock
decl (%rax)
jne 0x26bae9
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x26badc
movq (%rdi), %rax
movq 0x18(%rsp), %rsi
vzeroupper
callq *0x18(%rax)
jmp 0x26bae9
movq 0x18(%rsp), %rdi
vzeroupper
callq 0x5f3e0
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
jmp 0x26bd56
movq %rax, %r15
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x26bcc1
lock
decl (%rax)
jne 0x26bcc1
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x26bc9e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x26bcc1
jmp 0x26bd56
jmp 0x26bd56
movq %rax, %r15
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x26bcc1
lock
decl (%rax)
jne 0x26bcc1
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x26bc9e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x26bcc1
jmp 0x26bd56
jmp 0x26bd56
movq %rax, %r15
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x26bcc1
lock
decl (%rax)
jne 0x26bcc1
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x26bc9e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x26bcc1
jmp 0x26bd56
jmp 0x26bcbe
jmp 0x26bd56
movq %rax, %r15
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x26bcc1
lock
decl (%rax)
jne 0x26bcc1
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x26bc9e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x26bcc1
jmp 0x26bd56
jmp 0x26bcbe
jmp 0x26bd56
movq %rax, %r15
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x26bcc1
lock
decl (%rax)
jne 0x26bcc1
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x26bc9e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x26bcc1
jmp 0x26bd56
jmp 0x26bcbe
jmp 0x26bd56
movq %rax, %r15
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x26bcc1
lock
decl (%rax)
jne 0x26bcc1
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
jne 0x26bca8
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x26bcc1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x26bcc1
jmp 0x26bd56
jmp 0x26bcbe
jmp 0x26bd56
jmp 0x26bcbe
movq %rax, %r15
movq %rbx, %rsi
jmp 0x26bd1c
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
jmp 0x26bd14
movq %rax, %r15
movq 0x18(%rsp), %rsi
movq 0x50(%rsp), %rdi
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x26bd4b
lock
decl (%rax)
jne 0x26bd4b
testq %rdi, %rdi
jne 0x26bd42
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
jmp 0x26bd4b
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
movq %r15, %rdi
vzeroupper
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/binaryop_x86_fma.cpp |
virtual thunk to ncnn::BinaryOp_x86_fma::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int BinaryOp_x86_fma::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const bool b_is_scalar = bottom_blobs[1].w * bottom_blobs[1].h * bottom_blobs[1].d * bottom_blobs[1].c * bottom_blobs[1].elempack == 1;
const bool a_rank_is_lower = bottom_blobs[0].dims < bottom_blobs[1].dims && !b_is_scalar;
const bool a_size_is_lower = bottom_blobs[0].w * bottom_blobs[0].h * bottom_blobs[0].d * bottom_blobs[0].c * bottom_blobs[0].elempack < bottom_blobs[1].w * bottom_blobs[1].h * bottom_blobs[1].d * bottom_blobs[1].c * bottom_blobs[1].elempack;
const bool a_is_lower = a_rank_is_lower || (!a_rank_is_lower && a_size_is_lower);
const Mat& A = a_is_lower ? bottom_blobs[1] : bottom_blobs[0];
const Mat& B = a_is_lower ? bottom_blobs[0] : bottom_blobs[1];
const int op_type_r = a_is_lower ? get_reverse_op_type(op_type) : op_type;
Mat& top_blob = top_blobs[0];
top_blob.create_like(A, opt.blob_allocator);
if (top_blob.empty())
return -100;
// B is a scalar
if (B.w * B.h * B.d * B.c * B.elempack == 1)
{
return binary_op_scalar(A, B[0], top_blob, op_type_r, opt);
}
// no broadcast
if (A.dims == B.dims && A.w == B.w && A.h == B.h && A.d == B.d && A.c == B.c && A.elempack == B.elempack)
{
return binary_op_no_broadcast(A, B, top_blob, op_type_r, opt);
}
// broadcast B for inner axis
if ((B.dims < A.dims)
|| (A.dims == 2 && B.w == 1 && B.h == A.h)
|| (A.dims == 3 && B.w == 1 && B.h == 1 && B.c == A.c)
|| (A.dims == 3 && B.w == 1 && B.h == A.h && B.c == A.c)
|| (A.dims == 4 && B.w == 1 && B.h == 1 && B.d == 1 && B.c == A.c)
|| (A.dims == 4 && B.w == 1 && B.h == 1 && B.d == A.d && B.c == A.c)
|| (A.dims == 4 && B.w == 1 && B.h == A.h && B.d == A.d && B.c == A.c))
{
return binary_op_broadcast_inner(A, B, top_blob, op_type_r, opt);
}
// broadcast B for outer axis
if (B.elempack == 1 && ((A.dims == 2 && B.w == A.w && B.h == 1) || (A.dims == 3 && B.w == A.w && B.h == 1 && B.c == 1) || (A.dims == 3 && B.w == A.w && B.h == A.h && B.c == 1) || (A.dims == 4 && B.w == A.w && B.h == 1 && B.d == 1 && B.c == 1) || (A.dims == 4 && B.w == A.w && B.h == A.h && B.d == 1 && B.c == 1) || (A.dims == 4 && B.w == A.w && B.h == A.h && B.d == A.d && B.c == 1)))
{
return binary_op_broadcast_outer(A, B, top_blob, op_type_r, opt);
}
// some special broadcast rule here
if (A.dims == 3 && B.dims == 3 && A.w == B.w && B.h == 1 && A.c == B.c)
{
return binary_op_broadcast_20(A, B, top_blob, op_type_r, opt);
}
return 0;
} | movq (%rdi), %rax
addq -0x40(%rax), %rdi
jmp 0x25fe94
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/binaryop_x86_fma.cpp |
ncnn::BinaryOp_x86_fma::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int BinaryOp_x86_fma::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
using namespace BinaryOp_x86_fma_functor;
if (op_type == Operation_ADD) return binary_op_scalar_inplace<binary_op_add>(bottom_top_blob, b, opt);
if (op_type == Operation_SUB) return binary_op_scalar_inplace<binary_op_sub>(bottom_top_blob, b, opt);
if (op_type == Operation_MUL) return binary_op_scalar_inplace<binary_op_mul>(bottom_top_blob, b, opt);
if (op_type == Operation_DIV) return binary_op_scalar_inplace<binary_op_div>(bottom_top_blob, b, opt);
if (op_type == Operation_MAX) return binary_op_scalar_inplace<binary_op_max>(bottom_top_blob, b, opt);
if (op_type == Operation_MIN) return binary_op_scalar_inplace<binary_op_min>(bottom_top_blob, b, opt);
if (op_type == Operation_POW) return binary_op_scalar_inplace<binary_op_pow>(bottom_top_blob, b, opt);
if (op_type == Operation_RSUB) return binary_op_scalar_inplace<binary_op_rsub>(bottom_top_blob, b, opt);
if (op_type == Operation_RDIV) return binary_op_scalar_inplace<binary_op_rdiv>(bottom_top_blob, b, opt);
if (op_type == Operation_RPOW) return binary_op_scalar_inplace<binary_op_rpow>(bottom_top_blob, b, opt);
if (op_type == Operation_ATAN2) return binary_op_scalar_inplace<binary_op_atan2>(bottom_top_blob, b, opt);
if (op_type == Operation_RATAN2) return binary_op_scalar_inplace<binary_op_ratan2>(bottom_top_blob, b, opt);
return 0;
} | pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0xe0, %rsp
movq (%rdi), %rax
movq -0x18(%rax), %rdx
movl 0xd0(%rdi,%rdx), %eax
cmpq $0xb, %rax
ja 0x26c8c4
movq %rsi, %r14
leaq 0x18b6f8(%rip), %rcx # 0x3f749c
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
movq %rsi, 0x38(%rsp)
jmpq *%rax
vbroadcastss 0xd8(%rdi,%rdx), %ymm0
movl 0x30(%r14), %eax
movl 0x38(%r14), %ecx
imull 0x2c(%r14), %eax
imull 0x34(%r14), %eax
imull 0x18(%r14), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpq %rcx, %rdx
je 0x26c8c4
movq (%r14), %rsi
movq 0x10(%r14), %r9
movq 0x40(%r14), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x26be37
vaddps (%r10), %ymm0, %ymm1
vmovups %ymm1, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x26be01
vaddps (%r10), %xmm0, %xmm1
vmovaps %xmm1, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x26be21
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x26be5d
vaddss (%rsi,%rdi,4), %xmm0, %xmm1
vmovss %xmm1, (%rsi,%rdi,4)
incq %rdi
jmp 0x26be4a
incq %rdx
jmp 0x26bdda
vbroadcastss 0xd8(%rdi,%rdx), %ymm0
vmovaps %ymm0, 0x40(%rsp)
movl 0x30(%r14), %ebx
movl 0x38(%r14), %eax
imull 0x2c(%r14), %ebx
imull 0x34(%r14), %ebx
imull 0x18(%r14), %ebx
xorl %r15d, %r15d
testl %eax, %eax
cmovlel %r15d, %eax
movq %rax, 0x18(%rsp)
cmpq 0x18(%rsp), %r15
je 0x26c8c4
movq (%r14), %r12
movq 0x10(%r14), %rax
movq 0x40(%r14), %r14
movq %r14, 0x28(%rsp)
movq %r15, 0x30(%rsp)
imulq %r15, %r14
movq %rax, 0x20(%rsp)
imulq %rax, %r14
addq %r12, %r14
vmovaps 0x40(%rsp), %ymm0
vmovaps %ymm0, 0xa0(%rsp)
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %ebx, %eax
jge 0x26bf1c
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x60(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x60(%rsp), %rsi
leaq 0xa0(%rsp), %rdx
callq 0x26d916
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x26bedf
vmovaps 0x40(%rsp), %ymm0
vmovaps %xmm0, 0x60(%rsp)
leal 0x3(%r15), %eax
cmpl %ebx, %eax
jge 0x26bf6b
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x90(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x90(%rsp), %rsi
leaq 0x60(%rsp), %rdx
vzeroupper
callq 0x26daa0
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
jmp 0x26bf28
movq 0x28(%rsp), %rax
imulq 0x20(%rsp), %rax
movq 0x30(%rsp), %r15
imulq %r15, %rax
addq %rax, %r12
movq 0x38(%rsp), %r14
cmpl %ebx, %r13d
jge 0x26bfab
vmovss (%r12,%r13,4), %xmm1
vmovaps 0x40(%rsp), %ymm0
vzeroupper
callq 0x5f170
vmovss %xmm0, (%r12,%r13,4)
incq %r13
jmp 0x26bf87
incq %r15
jmp 0x26be9a
vbroadcastss 0xd8(%rdi,%rdx), %ymm0
movl 0x30(%r14), %eax
movl 0x38(%r14), %ecx
imull 0x2c(%r14), %eax
imull 0x34(%r14), %eax
imull 0x18(%r14), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpq %rcx, %rdx
je 0x26c8c4
movq (%r14), %rsi
movq 0x10(%r14), %r9
movq 0x40(%r14), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x26c038
vmaxps (%r10), %ymm0, %ymm1
vmovups %ymm1, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x26c002
vmaxps (%r10), %xmm0, %xmm1
vmovaps %xmm1, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x26c022
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x26c05e
vmaxss (%rsi,%rdi,4), %xmm0, %xmm1
vmovss %xmm1, (%rsi,%rdi,4)
incq %rdi
jmp 0x26c04b
incq %rdx
jmp 0x26bfdb
vbroadcastss 0xd8(%rdi,%rdx), %ymm0
vmovaps %ymm0, 0x40(%rsp)
movl 0x30(%r14), %ebx
movl 0x38(%r14), %eax
imull 0x2c(%r14), %ebx
imull 0x34(%r14), %ebx
imull 0x18(%r14), %ebx
xorl %r15d, %r15d
testl %eax, %eax
cmovlel %r15d, %eax
movq %rax, 0x18(%rsp)
cmpq 0x18(%rsp), %r15
je 0x26c8c4
movq (%r14), %r12
movq 0x10(%r14), %rax
movq 0x40(%r14), %r14
movq %r14, 0x28(%rsp)
movq %r15, 0x30(%rsp)
imulq %r15, %r14
movq %rax, 0x20(%rsp)
imulq %rax, %r14
addq %r12, %r14
vmovaps 0x40(%rsp), %ymm0
vmovaps %ymm0, 0xa0(%rsp)
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %ebx, %eax
jge 0x26c11d
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x60(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x60(%rsp), %rsi
leaq 0xa0(%rsp), %rdx
callq 0x26d220
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x26c0e0
vmovaps 0x40(%rsp), %ymm0
vmovaps %xmm0, 0x60(%rsp)
leal 0x3(%r15), %eax
cmpl %ebx, %eax
jge 0x26c16c
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x90(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x90(%rsp), %rsi
leaq 0x60(%rsp), %rdx
vzeroupper
callq 0x26d430
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
jmp 0x26c129
movq 0x28(%rsp), %rax
imulq 0x20(%rsp), %rax
movq 0x30(%rsp), %r15
imulq %r15, %rax
addq %rax, %r12
movq 0x38(%rsp), %r14
cmpl %ebx, %r13d
jge 0x26c1ac
vmovss (%r12,%r13,4), %xmm1
vmovaps 0x40(%rsp), %ymm0
vzeroupper
callq 0x5f0e0
vmovss %xmm0, (%r12,%r13,4)
incq %r13
jmp 0x26c188
incq %r15
jmp 0x26c09b
vbroadcastss 0xd8(%rdi,%rdx), %ymm0
movl 0x30(%r14), %eax
movl 0x38(%r14), %ecx
imull 0x2c(%r14), %eax
imull 0x34(%r14), %eax
imull 0x18(%r14), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpq %rcx, %rdx
je 0x26c8c4
movq (%r14), %rsi
movq 0x10(%r14), %r9
movq 0x40(%r14), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x26c239
vmulps (%r10), %ymm0, %ymm1
vmovups %ymm1, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x26c203
vmulps (%r10), %xmm0, %xmm1
vmovaps %xmm1, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x26c223
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x26c25f
vmulss (%rsi,%rdi,4), %xmm0, %xmm1
vmovss %xmm1, (%rsi,%rdi,4)
incq %rdi
jmp 0x26c24c
incq %rdx
jmp 0x26c1dc
movl 0x30(%r14), %eax
imull 0x2c(%r14), %eax
imull 0x34(%r14), %eax
movl 0x38(%r14), %ecx
imull 0x18(%r14), %eax
vmovss 0x182a02(%rip), %xmm0 # 0x3eec88
vdivss 0xd8(%rdi,%rdx), %xmm0, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpq %rcx, %rdx
je 0x26c8c4
movq (%r14), %rsi
movq 0x10(%r14), %r9
movq 0x40(%r14), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x26c2fe
vmulps (%r10), %ymm2, %ymm3
vmovups %ymm3, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x26c2c8
vmulps (%r10), %xmm1, %xmm3
vmovaps %xmm3, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x26c2e8
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x26c324
vmulss (%rsi,%rdi,4), %xmm0, %xmm3
vmovss %xmm3, (%rsi,%rdi,4)
incq %rdi
jmp 0x26c311
incq %rdx
jmp 0x26c2a1
vbroadcastss 0xd8(%rdi,%rdx), %ymm0
movl 0x30(%r14), %eax
movl 0x38(%r14), %ecx
imull 0x2c(%r14), %eax
imull 0x34(%r14), %eax
imull 0x18(%r14), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpq %rcx, %rdx
je 0x26c8c4
movq (%r14), %rsi
movq 0x10(%r14), %r9
movq 0x40(%r14), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x26c3b1
vsubps (%r10), %ymm0, %ymm1
vmovups %ymm1, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x26c37b
vsubps (%r10), %xmm0, %xmm1
vmovaps %xmm1, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x26c39b
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x26c3d7
vsubss (%rsi,%rdi,4), %xmm0, %xmm1
vmovss %xmm1, (%rsi,%rdi,4)
incq %rdi
jmp 0x26c3c4
incq %rdx
jmp 0x26c354
vbroadcastss 0xd8(%rdi,%rdx), %ymm0
movl 0x30(%r14), %eax
movl 0x38(%r14), %ecx
imull 0x2c(%r14), %eax
imull 0x34(%r14), %eax
imull 0x18(%r14), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpq %rcx, %rdx
je 0x26c8c4
movq (%r14), %rsi
movq 0x10(%r14), %r9
movq 0x40(%r14), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x26c46c
vmovups (%r10), %ymm1
vsubps %ymm0, %ymm1, %ymm1
vmovups %ymm1, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x26c42e
vmovaps (%r10), %xmm1
vsubps %xmm0, %xmm1, %xmm1
vmovaps %xmm1, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x26c452
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x26c496
vmovss (%rsi,%rdi,4), %xmm1
vsubss %xmm0, %xmm1, %xmm1
vmovss %xmm1, (%rsi,%rdi,4)
incq %rdi
jmp 0x26c47f
incq %rdx
jmp 0x26c407
vbroadcastss 0xd8(%rdi,%rdx), %ymm0
movl 0x30(%r14), %eax
movl 0x38(%r14), %ecx
imull 0x2c(%r14), %eax
imull 0x34(%r14), %eax
imull 0x18(%r14), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpq %rcx, %rdx
je 0x26c8c4
movq (%r14), %rsi
movq 0x10(%r14), %r9
movq 0x40(%r14), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x26c523
vminps (%r10), %ymm0, %ymm1
vmovups %ymm1, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x26c4ed
vminps (%r10), %xmm0, %xmm1
vmovaps %xmm1, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x26c50d
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x26c549
vminss (%rsi,%rdi,4), %xmm0, %xmm1
vmovss %xmm1, (%rsi,%rdi,4)
incq %rdi
jmp 0x26c536
incq %rdx
jmp 0x26c4c6
vbroadcastss 0xd8(%rdi,%rdx), %ymm0
vmovaps %ymm0, 0x40(%rsp)
movl 0x30(%r14), %ebx
movl 0x38(%r14), %eax
imull 0x2c(%r14), %ebx
imull 0x34(%r14), %ebx
imull 0x18(%r14), %ebx
xorl %r15d, %r15d
testl %eax, %eax
cmovlel %r15d, %eax
movq %rax, 0x18(%rsp)
cmpq 0x18(%rsp), %r15
je 0x26c8c4
movq (%r14), %r12
movq 0x10(%r14), %rax
movq 0x40(%r14), %r14
movq %r14, 0x28(%rsp)
movq %r15, 0x30(%rsp)
imulq %r15, %r14
movq %rax, 0x20(%rsp)
imulq %rax, %r14
addq %r12, %r14
vmovaps 0x40(%rsp), %ymm0
vmovaps %ymm0, 0xa0(%rsp)
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %ebx, %eax
jge 0x26c608
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x60(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x60(%rsp), %rsi
leaq 0xa0(%rsp), %rdx
callq 0x26ce36
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x26c5cb
vmovaps 0x40(%rsp), %ymm0
vmovaps %xmm0, 0x60(%rsp)
leal 0x3(%r15), %eax
cmpl %ebx, %eax
jge 0x26c657
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x90(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x90(%rsp), %rsi
leaq 0x60(%rsp), %rdx
vzeroupper
callq 0x26d046
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
jmp 0x26c614
movq 0x28(%rsp), %rax
imulq 0x20(%rsp), %rax
movq 0x30(%rsp), %r15
imulq %r15, %rax
addq %rax, %r12
movq 0x38(%rsp), %r14
cmpl %ebx, %r13d
jge 0x26c697
vmovss (%r12,%r13,4), %xmm0
vmovaps 0x40(%rsp), %ymm1
vzeroupper
callq 0x5f0e0
vmovss %xmm0, (%r12,%r13,4)
incq %r13
jmp 0x26c673
incq %r15
jmp 0x26c586
vbroadcastss 0xd8(%rdi,%rdx), %ymm0
vmovaps %ymm0, 0x40(%rsp)
movl 0x30(%r14), %ebx
movl 0x38(%r14), %eax
imull 0x2c(%r14), %ebx
imull 0x34(%r14), %ebx
imull 0x18(%r14), %ebx
xorl %r15d, %r15d
testl %eax, %eax
cmovlel %r15d, %eax
movq %rax, 0x18(%rsp)
cmpq 0x18(%rsp), %r15
je 0x26c8c4
movq (%r14), %r12
movq 0x10(%r14), %rax
movq 0x40(%r14), %r14
movq %r14, 0x28(%rsp)
movq %r15, 0x30(%rsp)
imulq %r15, %r14
movq %rax, 0x20(%rsp)
imulq %rax, %r14
addq %r12, %r14
vmovaps 0x40(%rsp), %ymm0
vmovaps %ymm0, 0xa0(%rsp)
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %ebx, %eax
jge 0x26c756
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x60(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x60(%rsp), %rsi
leaq 0xa0(%rsp), %rdx
callq 0x26d60a
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x26c719
vmovaps 0x40(%rsp), %ymm0
vmovaps %xmm0, 0x60(%rsp)
leal 0x3(%r15), %eax
cmpl %ebx, %eax
jge 0x26c7a5
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x90(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x90(%rsp), %rsi
leaq 0x60(%rsp), %rdx
vzeroupper
callq 0x26d794
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
jmp 0x26c762
movq 0x28(%rsp), %rax
imulq 0x20(%rsp), %rax
movq 0x30(%rsp), %r15
imulq %r15, %rax
addq %rax, %r12
movq 0x38(%rsp), %r14
cmpl %ebx, %r13d
jge 0x26c7e5
vmovss (%r12,%r13,4), %xmm0
vmovaps 0x40(%rsp), %ymm1
vzeroupper
callq 0x5f170
vmovss %xmm0, (%r12,%r13,4)
incq %r13
jmp 0x26c7c1
incq %r15
jmp 0x26c6d4
vbroadcastss 0xd8(%rdi,%rdx), %ymm0
movl 0x30(%r14), %eax
movl 0x38(%r14), %ecx
imull 0x2c(%r14), %eax
imull 0x34(%r14), %eax
imull 0x18(%r14), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpq %rcx, %rdx
je 0x26c8c4
movq (%r14), %rsi
movq 0x10(%r14), %r9
movq 0x40(%r14), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x26c896
vmovups (%r10), %ymm1
vrcpps %ymm1, %ymm2
vmulps %ymm2, %ymm0, %ymm3
vfmsub213ps %ymm0, %ymm3, %ymm1 # ymm1 = (ymm3 * ymm1) - ymm0
vfnmadd213ps %ymm3, %ymm2, %ymm1 # ymm1 = -(ymm2 * ymm1) + ymm3
vmovups %ymm1, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x26c83c
vmovaps (%r10), %xmm1
vrcpps %xmm1, %xmm2
vmulps %xmm2, %xmm0, %xmm3
vfmsub213ps %xmm0, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm1) - xmm0
vfnmadd213ps %xmm3, %xmm2, %xmm1 # xmm1 = -(xmm2 * xmm1) + xmm3
vmovaps %xmm1, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x26c86e
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x26c8bc
vdivss (%rsi,%rdi,4), %xmm0, %xmm1
vmovss %xmm1, (%rsi,%rdi,4)
incq %rdi
jmp 0x26c8a9
incq %rdx
jmp 0x26c815
xorl %eax, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/binaryop_x86_fma.cpp |
virtual thunk to ncnn::BinaryOp_x86_fma::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int BinaryOp_x86_fma::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
using namespace BinaryOp_x86_fma_functor;
if (op_type == Operation_ADD) return binary_op_scalar_inplace<binary_op_add>(bottom_top_blob, b, opt);
if (op_type == Operation_SUB) return binary_op_scalar_inplace<binary_op_sub>(bottom_top_blob, b, opt);
if (op_type == Operation_MUL) return binary_op_scalar_inplace<binary_op_mul>(bottom_top_blob, b, opt);
if (op_type == Operation_DIV) return binary_op_scalar_inplace<binary_op_div>(bottom_top_blob, b, opt);
if (op_type == Operation_MAX) return binary_op_scalar_inplace<binary_op_max>(bottom_top_blob, b, opt);
if (op_type == Operation_MIN) return binary_op_scalar_inplace<binary_op_min>(bottom_top_blob, b, opt);
if (op_type == Operation_POW) return binary_op_scalar_inplace<binary_op_pow>(bottom_top_blob, b, opt);
if (op_type == Operation_RSUB) return binary_op_scalar_inplace<binary_op_rsub>(bottom_top_blob, b, opt);
if (op_type == Operation_RDIV) return binary_op_scalar_inplace<binary_op_rdiv>(bottom_top_blob, b, opt);
if (op_type == Operation_RPOW) return binary_op_scalar_inplace<binary_op_rpow>(bottom_top_blob, b, opt);
if (op_type == Operation_ATAN2) return binary_op_scalar_inplace<binary_op_atan2>(bottom_top_blob, b, opt);
if (op_type == Operation_RATAN2) return binary_op_scalar_inplace<binary_op_ratan2>(bottom_top_blob, b, opt);
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x58(%rax), %rdi
callq 0x26bd6a
xorl %eax, %eax
popq %rcx
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/binaryop_x86_fma.cpp |
int ncnn::binary_op_no_broadcast<ncnn::BinaryOp_x86_fma_functor::binary_op_sub>(ncnn::Mat const&, ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) | static int binary_op_no_broadcast(const Mat& a, const Mat& b, Mat& c, const Option& opt)
{
Op op;
const int channels = a.c;
const int size = a.w * a.h * a.d * a.elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = a.channel(q);
const float* ptr1 = b.channel(q);
float* outptr = c.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
__m512 _p1 = _mm512_loadu_ps(ptr1);
__m512 _outp = op.func_pack16(_p, _p1);
_mm512_storeu_ps(outptr, _outp);
ptr += 16;
ptr1 += 16;
outptr += 16;
}
#endif // __AVX512F__
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _p1 = _mm256_loadu_ps(ptr1);
__m256 _outp = op.func_pack8(_p, _p1);
_mm256_storeu_ps(outptr, _outp);
ptr += 8;
ptr1 += 8;
outptr += 8;
}
#endif // __AVX__
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_load_ps(ptr);
__m128 _p1 = _mm_load_ps(ptr1);
__m128 _outp = op.func_pack4(_p, _p1);
_mm_store_ps(outptr, _outp);
ptr += 4;
ptr1 += 4;
outptr += 4;
}
#endif // __SSE2__
for (; i < size; i++)
{
*outptr = op.func(*ptr, *ptr1);
ptr += 1;
ptr1 += 1;
outptr += 1;
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
movl 0x30(%rdi), %eax
movl 0x38(%rdi), %ecx
imull 0x2c(%rdi), %eax
imull 0x34(%rdi), %eax
imull 0x18(%rdi), %eax
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
cmpq %rcx, %r8
je 0x26c9bc
movq 0x10(%rsi), %r9
imulq 0x40(%rsi), %r9
movq 0x10(%rdi), %r10
imulq %r8, %r9
addq (%rsi), %r9
movq 0x10(%rdx), %r11
imulq 0x40(%rdx), %r11
imulq %r8, %r11
addq (%rdx), %r11
imulq 0x40(%rdi), %r10
imulq %r8, %r10
addq (%rdi), %r10
xorl %ebx, %ebx
xorl %r14d, %r14d
xorl %r15d, %r15d
leal 0x7(%r15), %ebp
cmpl %eax, %ebp
jge 0x26c991
vmovups (%r10,%r14), %ymm0
vsubps (%r9,%r14), %ymm0, %ymm0
vmovups %ymm0, (%r11,%r14)
addl $0x8, %r15d
addq $0x20, %r14
addq $0x8, %rbx
jmp 0x26c94b
vmovaps (%r10,%r14), %xmm0
vsubps (%r9,%r14), %xmm0, %xmm0
vmovaps %xmm0, (%r11,%r14)
addl $0x4, %r15d
addq $0x10, %r14
addq $0x4, %rbx
leal 0x3(%r15), %ebp
cmpl %eax, %ebp
jl 0x26c973
jmp 0x26c9b0
vmovss (%r10,%rbx,4), %xmm0
vsubss (%r9,%rbx,4), %xmm0, %xmm0
vmovss %xmm0, (%r11,%rbx,4)
incq %rbx
cmpl %eax, %ebx
jl 0x26c99b
incq %r8
jmp 0x26c90a
popq %rbx
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/binaryop_x86_fma.cpp |
int ncnn::binary_op_no_broadcast<ncnn::BinaryOp_x86_fma_functor::binary_op_div>(ncnn::Mat const&, ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) | static int binary_op_no_broadcast(const Mat& a, const Mat& b, Mat& c, const Option& opt)
{
Op op;
const int channels = a.c;
const int size = a.w * a.h * a.d * a.elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = a.channel(q);
const float* ptr1 = b.channel(q);
float* outptr = c.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
__m512 _p1 = _mm512_loadu_ps(ptr1);
__m512 _outp = op.func_pack16(_p, _p1);
_mm512_storeu_ps(outptr, _outp);
ptr += 16;
ptr1 += 16;
outptr += 16;
}
#endif // __AVX512F__
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _p1 = _mm256_loadu_ps(ptr1);
__m256 _outp = op.func_pack8(_p, _p1);
_mm256_storeu_ps(outptr, _outp);
ptr += 8;
ptr1 += 8;
outptr += 8;
}
#endif // __AVX__
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_load_ps(ptr);
__m128 _p1 = _mm_load_ps(ptr1);
__m128 _outp = op.func_pack4(_p, _p1);
_mm_store_ps(outptr, _outp);
ptr += 4;
ptr1 += 4;
outptr += 4;
}
#endif // __SSE2__
for (; i < size; i++)
{
*outptr = op.func(*ptr, *ptr1);
ptr += 1;
ptr1 += 1;
outptr += 1;
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
movl 0x30(%rdi), %eax
movl 0x38(%rdi), %ecx
imull 0x2c(%rdi), %eax
imull 0x34(%rdi), %eax
imull 0x18(%rdi), %eax
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
cmpq %rcx, %r8
je 0x26cabd
movq 0x10(%rsi), %r9
imulq 0x40(%rsi), %r9
movq 0x10(%rdi), %r10
imulq %r8, %r9
addq (%rsi), %r9
movq 0x10(%rdx), %r11
imulq 0x40(%rdx), %r11
imulq %r8, %r11
addq (%rdx), %r11
imulq 0x40(%rdi), %r10
imulq %r8, %r10
addq (%rdi), %r10
xorl %ebx, %ebx
xorl %r14d, %r14d
xorl %r15d, %r15d
leal 0x7(%r15), %ebp
cmpl %eax, %ebp
jge 0x26ca92
vmovups (%r9,%r14), %ymm0
vrcpps %ymm0, %ymm1
vmovups (%r10,%r14), %ymm2
vmulps %ymm1, %ymm2, %ymm3
vfmsub213ps %ymm2, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm0) - ymm2
vfnmadd213ps %ymm3, %ymm1, %ymm0 # ymm0 = -(ymm1 * ymm0) + ymm3
vmovups %ymm0, (%r11,%r14)
addl $0x8, %r15d
addq $0x20, %r14
addq $0x8, %rbx
jmp 0x26ca28
vmovaps (%r10,%r14), %xmm0
vmovaps (%r9,%r14), %xmm1
vrcpps %xmm1, %xmm2
vmulps %xmm2, %xmm0, %xmm3
vfmsub213ps %xmm0, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm1) - xmm0
vfnmadd213ps %xmm3, %xmm2, %xmm1 # xmm1 = -(xmm2 * xmm1) + xmm3
vmovaps %xmm1, (%r11,%r14)
addl $0x4, %r15d
addq $0x10, %r14
addq $0x4, %rbx
leal 0x3(%r15), %ebp
cmpl %eax, %ebp
jl 0x26ca62
jmp 0x26cab1
vmovss (%r10,%rbx,4), %xmm0
vdivss (%r9,%rbx,4), %xmm0, %xmm0
vmovss %xmm0, (%r11,%rbx,4)
incq %rbx
cmpl %eax, %ebx
jl 0x26ca9c
incq %r8
jmp 0x26c9e7
popq %rbx
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/binaryop_x86_fma.cpp |
ncnn::UnaryOp_x86_fma::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int UnaryOp_x86_fma::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
using namespace UnaryOp_x86_fma_functor;
if (op_type == Operation_ABS)
return unary_op_inplace<unary_op_abs>(bottom_top_blob, opt);
if (op_type == Operation_NEG)
return unary_op_inplace<unary_op_neg>(bottom_top_blob, opt);
if (op_type == Operation_FLOOR)
return unary_op_inplace<unary_op_floor>(bottom_top_blob, opt);
if (op_type == Operation_CEIL)
return unary_op_inplace<unary_op_ceil>(bottom_top_blob, opt);
if (op_type == Operation_SQUARE)
return unary_op_inplace<unary_op_square>(bottom_top_blob, opt);
if (op_type == Operation_SQRT)
return unary_op_inplace<unary_op_sqrt>(bottom_top_blob, opt);
if (op_type == Operation_RSQRT)
return unary_op_inplace<unary_op_rsqrt>(bottom_top_blob, opt);
if (op_type == Operation_EXP)
return unary_op_inplace<unary_op_exp>(bottom_top_blob, opt);
if (op_type == Operation_LOG)
return unary_op_inplace<unary_op_log>(bottom_top_blob, opt);
if (op_type == Operation_SIN)
return unary_op_inplace<unary_op_sin>(bottom_top_blob, opt);
if (op_type == Operation_COS)
return unary_op_inplace<unary_op_cos>(bottom_top_blob, opt);
if (op_type == Operation_TAN)
return unary_op_inplace<unary_op_tan>(bottom_top_blob, opt);
if (op_type == Operation_ASIN)
return unary_op_inplace<unary_op_asin>(bottom_top_blob, opt);
if (op_type == Operation_ACOS)
return unary_op_inplace<unary_op_acos>(bottom_top_blob, opt);
if (op_type == Operation_ATAN)
return unary_op_inplace<unary_op_atan>(bottom_top_blob, opt);
if (op_type == Operation_RECIPROCAL)
return unary_op_inplace<unary_op_reciprocal>(bottom_top_blob, opt);
if (op_type == Operation_TANH)
return unary_op_inplace<unary_op_tanh>(bottom_top_blob, opt);
if (op_type == Operation_LOG10)
return unary_op_inplace<unary_op_log10>(bottom_top_blob, opt);
return 0;
} | pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x80, %rsp
movq %rsi, 0x18(%rsp)
movq (%rdi), %rax
movq -0x18(%rax), %rax
movl 0xd0(%rdi,%rax), %eax
cmpq $0x11, %rax
ja 0x281716
leaq 0x177d68(%rip), %rcx # 0x3f8348
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
vbroadcastss 0x1712f0(%rip), %ymm0 # 0x3f1900
vbroadcastss 0x1712e7(%rip), %xmm1 # 0x3f1900
cmpq %rcx, %rdx
je 0x281716
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x28067b
vandps (%r10), %ymm0, %ymm2
vmovups %ymm2, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x280645
vandps (%r10), %xmm1, %xmm2
vmovaps %xmm2, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x280665
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x2806a5
vmovss (%rsi,%rdi,4), %xmm2
vandps %xmm1, %xmm2, %xmm2
vmovss %xmm2, (%rsi,%rdi,4)
incq %rdi
jmp 0x28068e
incq %rdx
jmp 0x280619
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x281716
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x280775
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x2828e6
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x280711
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x2829f4
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x280747
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x2807b3
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f160
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x280795
incq %r15
jmp 0x2806d6
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x281716
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x280883
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x282726
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x28081f
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x282806
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x280855
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x2808c1
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f540
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x2808a3
incq %r15
jmp 0x2807e4
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x281716
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x280991
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x282016
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x28092d
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x2821ce
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x280963
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x2809cf
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f4c0
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x2809b1
incq %r15
jmp 0x2808f2
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpq %rcx, %rdx
je 0x281716
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x280a5f
vmovups (%r10), %ymm0
vmulps %ymm0, %ymm0, %ymm0
vmovups %ymm0, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x280a21
vmovaps (%r10), %xmm0
vmulps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x280a45
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x280a89
vmovss (%rsi,%rdi,4), %xmm0
vmulss %xmm0, %xmm0, %xmm0
vmovss %xmm0, (%rsi,%rdi,4)
incq %rdi
jmp 0x280a72
incq %rdx
jmp 0x2809f5
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x281716
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x280b59
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x282330
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x280af5
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x282422
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x280b2b
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x280b97
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f240
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x280b79
incq %r15
jmp 0x280aba
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x281716
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x280c67
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x281b3e
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x280c03
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x281c96
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x280c39
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x280ca5
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f3b0
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x280c87
incq %r15
jmp 0x280bc8
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpq %rcx, %rdx
je 0x281716
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x280d2f
vroundps $0x1, (%r10), %ymm0
vmovups %ymm0, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x280cf7
vroundps $0x1, (%r10), %xmm0
vmovaps %xmm0, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x280d18
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x280d57
vroundss $0x9, (%rsi,%rdi,4), %xmm1, %xmm0
vmovss %xmm0, (%rsi,%rdi,4)
incq %rdi
jmp 0x280d42
incq %rdx
jmp 0x280ccb
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
vbroadcastss 0x16df02(%rip), %ymm0 # 0x3eec88
vbroadcastss 0x16def9(%rip), %xmm1 # 0x3eec88
vmovss 0x16def1(%rip), %xmm2 # 0x3eec88
cmpq %rcx, %rdx
je 0x281716
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x280e15
vmovups (%r10), %ymm3
vrcpps %ymm3, %ymm4
vfmsub213ps %ymm0, %ymm4, %ymm3 # ymm3 = (ymm4 * ymm3) - ymm0
vfnmadd132ps %ymm4, %ymm4, %ymm3 # ymm3 = -(ymm3 * ymm4) + ymm4
vmovups %ymm3, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x280dc3
vmovaps (%r10), %xmm3
vrcpps %xmm3, %xmm4
vfmsub213ps %xmm1, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) - xmm1
vfnmadd132ps %xmm4, %xmm4, %xmm3 # xmm3 = -(xmm3 * xmm4) + xmm4
vmovaps %xmm3, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x280df1
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x280e3b
vdivss (%rsi,%rdi,4), %xmm2, %xmm3
vmovss %xmm3, (%rsi,%rdi,4)
incq %rdi
jmp 0x280e28
incq %rdx
jmp 0x280d97
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpq %rcx, %rdx
je 0x281716
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x280ec5
vroundps $0x2, (%r10), %ymm0
vmovups %ymm0, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x280e8d
vroundps $0x2, (%r10), %xmm0
vmovaps %xmm0, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x280eae
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x280eed
vroundss $0xa, (%rsi,%rdi,4), %xmm1, %xmm0
vmovss %xmm0, (%rsi,%rdi,4)
incq %rdi
jmp 0x280ed8
incq %rdx
jmp 0x280e61
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x281716
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x280fbd
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x28173c
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x280f59
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x281826
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x280f8f
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x280ffb
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f410
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x280fdd
incq %r15
jmp 0x280f1e
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
vbroadcastss 0x17018a(%rip), %ymm0 # 0x3f11b4
vbroadcastss 0x170181(%rip), %xmm1 # 0x3f11b4
cmpq %rcx, %rdx
je 0x281716
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x281095
vxorps (%r10), %ymm0, %ymm2
vmovups %ymm2, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x28105f
vxorps (%r10), %xmm1, %xmm2
vmovaps %xmm2, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x28107f
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x2810bf
vmovss (%rsi,%rdi,4), %xmm2
vxorps %xmm1, %xmm2, %xmm2
vmovss %xmm2, (%rsi,%rdi,4)
incq %rdi
jmp 0x2810a8
incq %rdx
jmp 0x281033
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x281716
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x28118f
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x282afa
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x28112b
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x282c36
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x281161
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x2811cd
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f560
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x2811af
incq %r15
jmp 0x2810f0
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
vbroadcastss 0x1745c8(%rip), %ymm0 # 0x3f57c4
vbroadcastss 0x17071f(%rip), %ymm1 # 0x3f1924
vbroadcastss 0x1706f2(%rip), %ymm2 # 0x3f1900
vbroadcastss 0x16ffc9(%rip), %ymm3 # 0x3f11e0
vbroadcastss 0x1745a4(%rip), %xmm4 # 0x3f57c4
vbroadcastss 0x1706fb(%rip), %xmm5 # 0x3f1924
vbroadcastss 0x1706ce(%rip), %xmm6 # 0x3f1900
vbroadcastss 0x16ffa5(%rip), %xmm7 # 0x3f11e0
cmpq %rcx, %rdx
je 0x281716
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x2812eb
vmovups (%r10), %ymm8
vrsqrtps %ymm8, %ymm9
vmulps %ymm9, %ymm8, %ymm10
vfmadd213ps %ymm0, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm9) + ymm0
vmulps %ymm1, %ymm10, %ymm10
vmulps %ymm9, %ymm10, %ymm9
vandps %ymm2, %ymm8, %ymm8
vcmpleps %ymm8, %ymm3, %ymm8
vandps %ymm9, %ymm8, %ymm8
vmovups %ymm8, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x281267
vmovaps (%r10), %xmm8
vrsqrtps %xmm8, %xmm9
vmulps %xmm9, %xmm8, %xmm10
vfmadd213ps %xmm4, %xmm10, %xmm9 # xmm9 = (xmm10 * xmm9) + xmm4
vmulps %xmm5, %xmm10, %xmm10
vmulps %xmm9, %xmm10, %xmm9
vandps %xmm6, %xmm8, %xmm8
vcmpleps %xmm8, %xmm7, %xmm8
vandps %xmm9, %xmm8, %xmm8
vmovaps %xmm8, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x2812ae
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x281311
vsqrtss (%rsi,%rdi,4), %xmm11, %xmm8
vmovss %xmm8, (%rsi,%rdi,4)
incq %rdi
jmp 0x2812fe
incq %rdx
jmp 0x28123b
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x281716
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x2813e1
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x282512
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x28137d
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x28261c
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x2813b3
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x28141f
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f0a0
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x281401
incq %r15
jmp 0x281342
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
vmovss 0x174377(%rip), %xmm0 # 0x3f57c4
vmovss 0x1704cf(%rip), %xmm1 # 0x3f1924
cmpq %rcx, %rdx
je 0x281716
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x2814b7
vrsqrtps (%r10), %ymm2
vmovups %ymm2, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x281481
vrsqrtps (%r10), %xmm2
vmovaps %xmm2, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x2814a1
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x2814f2
vmovss (%rsi,%rdi,4), %xmm2
vrsqrtss %xmm2, %xmm2, %xmm3
vmulss %xmm3, %xmm2, %xmm2
vfmadd213ss %xmm0, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + xmm0
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm2, %xmm3, %xmm2
vmovss %xmm2, (%rsi,%rdi,4)
incq %rdi
jmp 0x2814ca
incq %rdx
jmp 0x281455
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x281716
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x2815c2
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x281dac
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x28155e
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x281f00
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x281594
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x281600
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f4a0
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x2815e2
incq %r15
jmp 0x281523
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x281716
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x2816d0
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x2818f4
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x28166c
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x281a22
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x2816a2
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x28170e
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f200
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x2816f0
incq %r15
jmp 0x281631
xorl %eax, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/unaryop_x86_fma.cpp |
ncnn::UnaryOp_x86_fma_functor::unary_op_exp::func_pack8(float vector[8] const&) const | static NCNN_FORCEINLINE __m256 exp256_ps(__m256 x)
{
__m256 tmp = _mm256_setzero_ps(), fx;
__m256i imm0;
__m256 one = *(__m256*)_ps256_1;
x = _mm256_min_ps(x, *(__m256*)_ps256_exp_hi);
x = _mm256_max_ps(x, *(__m256*)_ps256_exp_lo);
/* express exp(x) as exp(g + n*log(2)) */
fx = _mm256_comp_fmadd_ps(x, *(__m256*)_ps256_cephes_LOG2EF, *(__m256*)_ps256_0p5);
/* how to perform a floorf with SSE: just below */
//imm0 = _mm256_cvttps_epi32(fx);
//tmp = _mm256_cvtepi32_ps(imm0);
tmp = _mm256_floor_ps(fx);
/* if greater, subtract 1 */
//__m256 mask = _mm256_cmpgt_ps(tmp, fx);
__m256 mask = _mm256_cmp_ps(tmp, fx, _CMP_GT_OS);
mask = _mm256_and_ps(mask, one);
fx = _mm256_sub_ps(tmp, mask);
// x = x - fx * exp_C1
x = _mm256_comp_fnmadd_ps(fx, *(__m256*)_ps256_cephes_exp_C1, x);
// x = x - fx * exp_C2
x = _mm256_comp_fnmadd_ps(fx, *(__m256*)_ps256_cephes_exp_C2, x);
tmp = _mm256_mul_ps(x, x);
__m256 y = *(__m256*)_ps256_cephes_exp_p0;
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_exp_p1);
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_exp_p2);
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_exp_p3);
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_exp_p4);
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_exp_p5);
y = _mm256_comp_fmadd_ps(y, tmp, x);
y = _mm256_add_ps(y, one);
/* build 2^n */
imm0 = _mm256_cvttps_epi32(fx);
// another two AVX2 instructions
imm0 = _mm256_comp_add_epi32(imm0, *(__m256i*)_pi32_256_0x7f);
imm0 = _mm256_comp_slli_epi32(imm0, 23);
__m256 pow2n = _mm256_castsi256_ps(imm0);
y = _mm256_mul_ps(y, pow2n);
return y;
} | vbroadcastss 0x16fa73(%rip), %ymm0 # 0x3f11b8
vminps (%rsi), %ymm0, %ymm0
vbroadcastss 0x16fa6a(%rip), %ymm1 # 0x3f11bc
vmaxps %ymm1, %ymm0, %ymm1
vbroadcastss 0x16c8b5(%rip), %ymm0 # 0x3ee014
vbroadcastss 0x16fa58(%rip), %ymm2 # 0x3f11c0
vfmadd213ps %ymm0, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm0
vroundps $0x1, %ymm2, %ymm3
vbroadcastss 0x16d50c(%rip), %ymm4 # 0x3eec88
vcmpltps %ymm3, %ymm2, %ymm2
vandps %ymm4, %ymm2, %ymm2
vbroadcastss 0x16fa36(%rip), %ymm4 # 0x3f11c4
vsubps %ymm2, %ymm3, %ymm2
vfmsub213ps %ymm1, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm4) - ymm1
vbroadcastss 0x170f7c(%rip), %ymm1 # 0x3f271c
vbroadcastss 0x16fa27(%rip), %ymm3 # 0x3f11d0
vbroadcastss 0x16fa1a(%rip), %ymm5 # 0x3f11cc
vfmsub213ps %ymm4, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm1) - ymm4
vfmadd213ps %ymm3, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm3
vbroadcastss 0x16fa0f(%rip), %ymm3 # 0x3f11d4
vmulps %ymm1, %ymm1, %ymm4
vfmadd231ps %ymm5, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm5) + ymm3
vbroadcastss 0x16fa01(%rip), %ymm5 # 0x3f11d8
vfmadd231ps %ymm3, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm3) + ymm5
vbroadcastss 0x16f9f7(%rip), %ymm3 # 0x3f11dc
vfmadd231ps %ymm5, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm5) + ymm3
vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0
vfmadd213ps %ymm1, %ymm4, %ymm3 # ymm3 = (ymm4 * ymm3) + ymm1
vcvttps2dq %ymm2, %ymm0
vpslld $0x17, %xmm0, %xmm1
vextractf128 $0x1, %ymm0, %xmm0
vpslld $0x17, %xmm0, %xmm0
vbroadcastss 0x16d477(%rip), %xmm2 # 0x3eec88
vpaddd %xmm2, %xmm0, %xmm0
vpaddd %xmm2, %xmm1, %xmm1
vinsertf128 $0x1, %xmm0, %ymm1, %ymm0
vfmadd213ps %ymm0, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm0) + ymm0
retq
nop
| /csukuangfj[P]ncnn/src/layer/x86/avx_mathfun.h |
ncnn::UnaryOp_x86_avx::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int UnaryOp_x86_avx::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
using namespace UnaryOp_x86_avx_functor;
if (op_type == Operation_ABS)
return unary_op_inplace<unary_op_abs>(bottom_top_blob, opt);
if (op_type == Operation_NEG)
return unary_op_inplace<unary_op_neg>(bottom_top_blob, opt);
if (op_type == Operation_FLOOR)
return unary_op_inplace<unary_op_floor>(bottom_top_blob, opt);
if (op_type == Operation_CEIL)
return unary_op_inplace<unary_op_ceil>(bottom_top_blob, opt);
if (op_type == Operation_SQUARE)
return unary_op_inplace<unary_op_square>(bottom_top_blob, opt);
if (op_type == Operation_SQRT)
return unary_op_inplace<unary_op_sqrt>(bottom_top_blob, opt);
if (op_type == Operation_RSQRT)
return unary_op_inplace<unary_op_rsqrt>(bottom_top_blob, opt);
if (op_type == Operation_EXP)
return unary_op_inplace<unary_op_exp>(bottom_top_blob, opt);
if (op_type == Operation_LOG)
return unary_op_inplace<unary_op_log>(bottom_top_blob, opt);
if (op_type == Operation_SIN)
return unary_op_inplace<unary_op_sin>(bottom_top_blob, opt);
if (op_type == Operation_COS)
return unary_op_inplace<unary_op_cos>(bottom_top_blob, opt);
if (op_type == Operation_TAN)
return unary_op_inplace<unary_op_tan>(bottom_top_blob, opt);
if (op_type == Operation_ASIN)
return unary_op_inplace<unary_op_asin>(bottom_top_blob, opt);
if (op_type == Operation_ACOS)
return unary_op_inplace<unary_op_acos>(bottom_top_blob, opt);
if (op_type == Operation_ATAN)
return unary_op_inplace<unary_op_atan>(bottom_top_blob, opt);
if (op_type == Operation_RECIPROCAL)
return unary_op_inplace<unary_op_reciprocal>(bottom_top_blob, opt);
if (op_type == Operation_TANH)
return unary_op_inplace<unary_op_tanh>(bottom_top_blob, opt);
if (op_type == Operation_LOG10)
return unary_op_inplace<unary_op_log10>(bottom_top_blob, opt);
return 0;
} | pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x80, %rsp
movq %rsi, 0x18(%rsp)
movq (%rdi), %rax
movq -0x18(%rax), %rax
movl 0xd0(%rdi,%rax), %eax
cmpq $0x11, %rax
ja 0x283f35
leaq 0x1755e4(%rip), %rcx # 0x3f83c8
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
vbroadcastss 0x16eaec(%rip), %ymm0 # 0x3f1900
vbroadcastss 0x16eae3(%rip), %xmm1 # 0x3f1900
cmpq %rcx, %rdx
je 0x283f35
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x282e7f
vandps (%r10), %ymm0, %ymm2
vmovups %ymm2, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x282e49
vandps (%r10), %xmm1, %xmm2
vmovaps %xmm2, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x282e69
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x282ea9
vmovss (%rsi,%rdi,4), %xmm2
vandps %xmm1, %xmm2, %xmm2
vmovss %xmm2, (%rsi,%rdi,4)
incq %rdi
jmp 0x282e92
incq %rdx
jmp 0x282e1d
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x283f35
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x282f79
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x2851c4
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x282f15
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x2852fa
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x282f4b
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x282fb7
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f160
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x282f99
incq %r15
jmp 0x282eda
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x283f35
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x283087
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x284fc0
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x283023
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x2850c2
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x283059
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x2830c5
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f540
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x2830a7
incq %r15
jmp 0x282fe8
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x283f35
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x283195
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x28484e
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x283131
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x284a08
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x283167
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x2831d3
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f4c0
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x2831b5
incq %r15
jmp 0x2830f6
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpq %rcx, %rdx
je 0x283f35
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x283263
vmovups (%r10), %ymm0
vmulps %ymm0, %ymm0, %ymm0
vmovups %ymm0, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x283225
vmovaps (%r10), %xmm0
vmulps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x283249
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x28328d
vmovss (%rsi,%rdi,4), %xmm0
vmulss %xmm0, %xmm0, %xmm0
vmovss %xmm0, (%rsi,%rdi,4)
incq %rdi
jmp 0x283276
incq %rdx
jmp 0x2831f9
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x283f35
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x28335d
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x284b6c
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x2832f9
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x284c78
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x28332f
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x28339b
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f240
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x28337d
incq %r15
jmp 0x2832be
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x283f35
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x28346b
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x2843a2
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x283407
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x2844f0
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x28343d
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x2834a9
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f3b0
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x28348b
incq %r15
jmp 0x2833cc
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpq %rcx, %rdx
je 0x283f35
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x283533
vroundps $0x1, (%r10), %ymm0
vmovups %ymm0, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x2834fb
vroundps $0x1, (%r10), %xmm0
vmovaps %xmm0, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x28351c
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x28355b
vroundss $0x9, (%rsi,%rdi,4), %xmm1, %xmm0
vmovss %xmm0, (%rsi,%rdi,4)
incq %rdi
jmp 0x283546
incq %rdx
jmp 0x2834cf
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
vbroadcastss 0x16b6fe(%rip), %ymm0 # 0x3eec88
vbroadcastss 0x16b6f5(%rip), %xmm1 # 0x3eec88
vmovss 0x16b6ed(%rip), %xmm2 # 0x3eec88
cmpq %rcx, %rdx
je 0x283f35
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x283625
vmovups (%r10), %ymm3
vrcpps %ymm3, %ymm4
vmulps %ymm4, %ymm3, %ymm3
vsubps %ymm3, %ymm0, %ymm3
vmulps %ymm3, %ymm4, %ymm3
vaddps %ymm3, %ymm4, %ymm3
vmovups %ymm3, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x2835c7
vmovaps (%r10), %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vsubps %xmm3, %xmm1, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vmovaps %xmm3, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x2835fb
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x28364b
vdivss (%rsi,%rdi,4), %xmm2, %xmm3
vmovss %xmm3, (%rsi,%rdi,4)
incq %rdi
jmp 0x283638
incq %rdx
jmp 0x28359b
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
cmpq %rcx, %rdx
je 0x283f35
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x2836d5
vroundps $0x2, (%r10), %ymm0
vmovups %ymm0, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x28369d
vroundps $0x2, (%r10), %xmm0
vmovaps %xmm0, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x2836be
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x2836fd
vroundss $0xa, (%rsi,%rdi,4), %xmm1, %xmm0
vmovss %xmm0, (%rsi,%rdi,4)
incq %rdi
jmp 0x2836e8
incq %rdx
jmp 0x283671
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x283f35
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x2837cd
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x283f5c
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x283769
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x284052
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x28379f
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x28380b
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f410
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x2837ed
incq %r15
jmp 0x28372e
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
vbroadcastss 0x16d97a(%rip), %ymm0 # 0x3f11b4
vbroadcastss 0x16d971(%rip), %xmm1 # 0x3f11b4
cmpq %rcx, %rdx
je 0x283f35
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x2838a5
vxorps (%r10), %ymm0, %ymm2
vmovups %ymm2, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x28386f
vxorps (%r10), %xmm1, %xmm2
vmovaps %xmm2, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x28388f
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x2838cf
vmovss (%rsi,%rdi,4), %xmm2
vxorps %xmm1, %xmm2, %xmm2
vmovss %xmm2, (%rsi,%rdi,4)
incq %rdi
jmp 0x2838b8
incq %rdx
jmp 0x283843
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x283f35
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x28399f
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x285414
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x28393b
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x285564
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x283971
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x2839dd
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f560
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x2839bf
incq %r15
jmp 0x283900
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
vbroadcastss 0x16df18(%rip), %ymm0 # 0x3f1924
vbroadcastss 0x171daf(%rip), %ymm1 # 0x3f57c4
vbroadcastss 0x16dee2(%rip), %ymm2 # 0x3f1900
vbroadcastss 0x16d7b9(%rip), %ymm3 # 0x3f11e0
vbroadcastss 0x16def4(%rip), %xmm4 # 0x3f1924
vbroadcastss 0x171d8b(%rip), %xmm5 # 0x3f57c4
vbroadcastss 0x16debe(%rip), %xmm6 # 0x3f1900
vbroadcastss 0x16d795(%rip), %xmm7 # 0x3f11e0
cmpq %rcx, %rdx
je 0x283f35
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x283b07
vmovups (%r10), %ymm8
vrsqrtps %ymm8, %ymm9
vmulps %ymm9, %ymm8, %ymm10
vmulps %ymm0, %ymm10, %ymm11
vmulps %ymm9, %ymm10, %ymm9
vaddps %ymm1, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vandps %ymm2, %ymm8, %ymm8
vcmpleps %ymm8, %ymm3, %ymm8
vandps %ymm9, %ymm8, %ymm8
vmovups %ymm8, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x283a77
vmovaps (%r10), %xmm8
vrsqrtps %xmm8, %xmm9
vmulps %xmm9, %xmm8, %xmm10
vmulps %xmm4, %xmm10, %xmm11
vmulps %xmm9, %xmm10, %xmm9
vaddps %xmm5, %xmm9, %xmm9
vmulps %xmm9, %xmm11, %xmm9
vandps %xmm6, %xmm8, %xmm8
vcmpleps %xmm8, %xmm7, %xmm8
vandps %xmm9, %xmm8, %xmm8
vmovaps %xmm8, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x283ac6
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x283b2d
vsqrtss (%rsi,%rdi,4), %xmm12, %xmm8
vmovss %xmm8, (%rsi,%rdi,4)
incq %rdi
jmp 0x283b1a
incq %rdx
jmp 0x283a4b
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x283f35
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x283bfd
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x284d84
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x283b99
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x284ea2
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x283bcf
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x283c3b
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f0a0
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x283c1d
incq %r15
jmp 0x283b5e
movq 0x18(%rsp), %rdx
movl 0x30(%rdx), %eax
movl 0x38(%rdx), %ecx
imull 0x2c(%rdx), %eax
imull 0x34(%rdx), %eax
imull 0x18(%rdx), %eax
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
vmovss 0x171b5b(%rip), %xmm0 # 0x3f57c4
vmovss 0x16dcb3(%rip), %xmm1 # 0x3f1924
cmpq %rcx, %rdx
je 0x283f35
movq 0x18(%rsp), %rdi
movq (%rdi), %rsi
movq 0x10(%rdi), %r9
movq 0x40(%rdi), %r8
movq %r8, %r10
imulq %rdx, %r10
imulq %r9, %r10
addq %rsi, %r10
xorl %edi, %edi
xorl %r11d, %r11d
leal 0x7(%r11), %ebx
cmpl %eax, %ebx
jge 0x283cd3
vrsqrtps (%r10), %ymm2
vmovups %ymm2, (%r10)
addq $0x20, %r10
addl $0x8, %r11d
addq $0x8, %rdi
jmp 0x283c9d
vrsqrtps (%r10), %xmm2
vmovaps %xmm2, (%r10)
addq $0x10, %r10
addl $0x4, %r11d
addq $0x4, %rdi
leal 0x3(%r11), %ebx
cmpl %eax, %ebx
jl 0x283cbd
imulq %r9, %r8
imulq %rdx, %r8
addq %r8, %rsi
cmpl %eax, %edi
jge 0x283d11
vmovss (%rsi,%rdi,4), %xmm2
vrsqrtss %xmm2, %xmm2, %xmm3
vmulss %xmm3, %xmm2, %xmm2
vmulss %xmm3, %xmm2, %xmm2
vaddss %xmm0, %xmm2, %xmm2
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm2, %xmm3, %xmm2
vmovss %xmm2, (%rsi,%rdi,4)
incq %rdi
jmp 0x283ce6
incq %rdx
jmp 0x283c71
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x283f35
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x283de1
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x2845fa
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x283d7d
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x284744
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x283db3
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x283e1f
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f4a0
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x283e01
incq %r15
jmp 0x283d42
movq 0x18(%rsp), %rax
movl 0x30(%rax), %r12d
movl 0x38(%rax), %ecx
imull 0x2c(%rax), %r12d
imull 0x34(%rax), %r12d
imull 0x18(%rax), %r12d
xorl %r15d, %r15d
testl %ecx, %ecx
cmovlel %r15d, %ecx
movq %rcx, 0x20(%rsp)
cmpq 0x20(%rsp), %r15
je 0x283f35
movq 0x18(%rsp), %rax
movq (%rax), %rbx
movq 0x10(%rax), %rcx
movq 0x40(%rax), %r14
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
imulq %r15, %r14
movq %rcx, 0x28(%rsp)
imulq %rcx, %r14
addq %rbx, %r14
xorl %r13d, %r13d
xorl %r15d, %r15d
leal 0x7(%r15), %eax
cmpl %r12d, %eax
jge 0x283eef
vmovups (%r14), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
callq 0x28412c
vmovups %ymm0, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r13
jmp 0x283e8b
vmovaps (%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x17(%rsp), %rdi
leaq 0x40(%rsp), %rsi
vzeroupper
callq 0x284270
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r13
leal 0x3(%r15), %eax
cmpl %r12d, %eax
jl 0x283ec1
movq 0x30(%rsp), %rax
imulq 0x28(%rsp), %rax
movq 0x38(%rsp), %r15
imulq %r15, %rax
addq %rax, %rbx
cmpl %r12d, %r13d
jge 0x283f2d
vmovss (%rbx,%r13,4), %xmm0
vzeroupper
callq 0x5f200
vmovss %xmm0, (%rbx,%r13,4)
incq %r13
jmp 0x283f0f
incq %r15
jmp 0x283e50
xorl %eax, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/unaryop_x86_avx.cpp |
ncnn::UnaryOp_x86_avx_functor::unary_op_exp::func_pack8(float vector[8] const&) const | static NCNN_FORCEINLINE __m256 exp256_ps(__m256 x)
{
__m256 tmp = _mm256_setzero_ps(), fx;
__m256i imm0;
__m256 one = *(__m256*)_ps256_1;
x = _mm256_min_ps(x, *(__m256*)_ps256_exp_hi);
x = _mm256_max_ps(x, *(__m256*)_ps256_exp_lo);
/* express exp(x) as exp(g + n*log(2)) */
fx = _mm256_comp_fmadd_ps(x, *(__m256*)_ps256_cephes_LOG2EF, *(__m256*)_ps256_0p5);
/* how to perform a floorf with SSE: just below */
//imm0 = _mm256_cvttps_epi32(fx);
//tmp = _mm256_cvtepi32_ps(imm0);
tmp = _mm256_floor_ps(fx);
/* if greater, subtract 1 */
//__m256 mask = _mm256_cmpgt_ps(tmp, fx);
__m256 mask = _mm256_cmp_ps(tmp, fx, _CMP_GT_OS);
mask = _mm256_and_ps(mask, one);
fx = _mm256_sub_ps(tmp, mask);
// x = x - fx * exp_C1
x = _mm256_comp_fnmadd_ps(fx, *(__m256*)_ps256_cephes_exp_C1, x);
// x = x - fx * exp_C2
x = _mm256_comp_fnmadd_ps(fx, *(__m256*)_ps256_cephes_exp_C2, x);
tmp = _mm256_mul_ps(x, x);
__m256 y = *(__m256*)_ps256_cephes_exp_p0;
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_exp_p1);
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_exp_p2);
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_exp_p3);
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_exp_p4);
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_exp_p5);
y = _mm256_comp_fmadd_ps(y, tmp, x);
y = _mm256_add_ps(y, one);
/* build 2^n */
imm0 = _mm256_cvttps_epi32(fx);
// another two AVX2 instructions
imm0 = _mm256_comp_add_epi32(imm0, *(__m256i*)_pi32_256_0x7f);
imm0 = _mm256_comp_slli_epi32(imm0, 23);
__m256 pow2n = _mm256_castsi256_ps(imm0);
y = _mm256_mul_ps(y, pow2n);
return y;
} | vbroadcastss 0x16d253(%rip), %ymm0 # 0x3f11b8
vminps (%rsi), %ymm0, %ymm0
vbroadcastss 0x16d24a(%rip), %ymm1 # 0x3f11bc
vmaxps %ymm1, %ymm0, %ymm0
vbroadcastss 0x16d241(%rip), %ymm1 # 0x3f11c0
vmulps %ymm1, %ymm0, %ymm1
vbroadcastss 0x16a088(%rip), %ymm2 # 0x3ee014
vaddps %ymm2, %ymm1, %ymm1
vroundps $0x1, %ymm1, %ymm3
vcmpltps %ymm3, %ymm1, %ymm1
vbroadcastss 0x16ace4(%rip), %ymm4 # 0x3eec88
vandps %ymm4, %ymm1, %ymm1
vsubps %ymm1, %ymm3, %ymm1
vbroadcastss 0x16ea2b(%rip), %ymm3 # 0x3f29e0
vmulps %ymm3, %ymm1, %ymm3
vaddps %ymm3, %ymm0, %ymm0
vbroadcastss 0x16d206(%rip), %ymm3 # 0x3f11cc
vmulps %ymm0, %ymm0, %ymm5
vmulps %ymm3, %ymm0, %ymm3
vbroadcastss 0x16d1f9(%rip), %ymm6 # 0x3f11d0
vaddps %ymm6, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vbroadcastss 0x16d1ec(%rip), %ymm6 # 0x3f11d4
vaddps %ymm6, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vbroadcastss 0x16d1df(%rip), %ymm6 # 0x3f11d8
vaddps %ymm6, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vbroadcastss 0x16d1d2(%rip), %ymm6 # 0x3f11dc
vaddps %ymm6, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vmulps %ymm2, %ymm5, %ymm2
vaddps %ymm4, %ymm0, %ymm0
vaddps %ymm2, %ymm0, %ymm0
vcvttps2dq %ymm1, %ymm1
vpslld $0x17, %xmm1, %xmm2
vextractf128 $0x1, %ymm1, %xmm1
vpslld $0x17, %xmm1, %xmm1
vbroadcastss 0x16ac49(%rip), %xmm3 # 0x3eec88
vpaddd %xmm3, %xmm1, %xmm1
vpaddd %xmm3, %xmm2, %xmm2
vinsertf128 $0x1, %xmm1, %ymm2, %ymm1
vmulps %ymm1, %ymm0, %ymm0
retq
| /csukuangfj[P]ncnn/src/layer/x86/avx_mathfun.h |
ncnn::UnaryOp_x86_avx_functor::unary_op_cos::func_pack8(float vector[8] const&) const | static NCNN_FORCEINLINE __m256 cos256_ps(__m256 x)
{ // any x
__m256 xmm1, xmm2 = _mm256_setzero_ps(), xmm3, y;
__m256i imm0, imm2;
#ifndef __AVX2__
__m128i imm0_1, imm0_2;
__m128i imm2_1, imm2_2;
#endif
/* take the absolute value */
x = _mm256_and_ps(x, *(__m256*)_ps256_inv_sign_mask);
/* scale by 4/Pi */
y = _mm256_mul_ps(x, *(__m256*)_ps256_cephes_FOPI);
#ifdef __AVX2__
/* store the integer part of y in mm0 */
imm2 = _mm256_cvttps_epi32(y);
/* j=(j+1) & (~1) (see the cephes sources) */
imm2 = _mm256_comp_add_epi32(imm2, *(__m256i*)_pi32_256_1);
imm2 = _mm256_and_si256(imm2, *(__m256i*)_pi32_256_inv1);
y = _mm256_cvtepi32_ps(imm2);
imm2 = _mm256_comp_sub_epi32(imm2, *(__m256i*)_pi32_256_2);
/* get the swap sign flag */
imm0 = _mm256_andnot_si256(imm2, *(__m256i*)_pi32_256_4);
imm0 = _mm256_comp_slli_epi32(imm0, 29);
/* get the polynom selection mask */
imm2 = _mm256_and_si256(imm2, *(__m256i*)_pi32_256_2);
imm2 = _mm256_cmpeq_epi32(imm2, *(__m256i*)_pi32_256_0);
#else
/* we use SSE2 routines to perform the integer ops */
COPY_IMM_TO_XMM(_mm256_cvttps_epi32(y), imm2_1, imm2_2);
imm2_1 = _mm_add_epi32(imm2_1, *(__m128i*)_pi32avx_1);
imm2_2 = _mm_add_epi32(imm2_2, *(__m128i*)_pi32avx_1);
imm2_1 = _mm_and_si128(imm2_1, *(__m128i*)_pi32avx_inv1);
imm2_2 = _mm_and_si128(imm2_2, *(__m128i*)_pi32avx_inv1);
COPY_XMM_TO_IMM(imm2_1, imm2_2, imm2);
y = _mm256_cvtepi32_ps(imm2);
imm2_1 = _mm_sub_epi32(imm2_1, *(__m128i*)_pi32avx_2);
imm2_2 = _mm_sub_epi32(imm2_2, *(__m128i*)_pi32avx_2);
imm0_1 = _mm_andnot_si128(imm2_1, *(__m128i*)_pi32avx_4);
imm0_2 = _mm_andnot_si128(imm2_2, *(__m128i*)_pi32avx_4);
imm0_1 = _mm_slli_epi32(imm0_1, 29);
imm0_2 = _mm_slli_epi32(imm0_2, 29);
COPY_XMM_TO_IMM(imm0_1, imm0_2, imm0);
imm2_1 = _mm_and_si128(imm2_1, *(__m128i*)_pi32avx_2);
imm2_2 = _mm_and_si128(imm2_2, *(__m128i*)_pi32avx_2);
imm2_1 = _mm_cmpeq_epi32(imm2_1, _mm_setzero_si128());
imm2_2 = _mm_cmpeq_epi32(imm2_2, _mm_setzero_si128());
COPY_XMM_TO_IMM(imm2_1, imm2_2, imm2);
#endif
__m256 sign_bit = _mm256_castsi256_ps(imm0);
__m256 poly_mask = _mm256_castsi256_ps(imm2);
/* The magic pass: "Extended precision modular arithmetic"
x = ((x - y * DP1) - y * DP2) - y * DP3; */
xmm1 = *(__m256*)_ps256_minus_cephes_DP1;
xmm2 = *(__m256*)_ps256_minus_cephes_DP2;
xmm3 = *(__m256*)_ps256_minus_cephes_DP3;
x = _mm256_comp_fmadd_ps(y, xmm1, x);
x = _mm256_comp_fmadd_ps(y, xmm2, x);
x = _mm256_comp_fmadd_ps(y, xmm3, x);
/* Evaluate the first polynom (0 <= x <= Pi/4) */
y = *(__m256*)_ps256_coscof_p0;
__m256 z = _mm256_mul_ps(x, x);
y = _mm256_comp_fmadd_ps(y, z, *(__m256*)_ps256_coscof_p1);
y = _mm256_comp_fmadd_ps(y, z, *(__m256*)_ps256_coscof_p2);
y = _mm256_mul_ps(y, z);
y = _mm256_mul_ps(y, z);
// y = y - z * 0.5
y = _mm256_comp_fnmadd_ps(z, *(__m256*)_ps256_0p5, y);
y = _mm256_add_ps(y, *(__m256*)_ps256_1);
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
__m256 y2 = *(__m256*)_ps256_sincof_p0;
y2 = _mm256_mul_ps(y2, z);
y2 = _mm256_add_ps(y2, *(__m256*)_ps256_sincof_p1);
y2 = _mm256_mul_ps(y2, z);
y2 = _mm256_add_ps(y2, *(__m256*)_ps256_sincof_p2);
y2 = _mm256_mul_ps(y2, z);
y2 = _mm256_mul_ps(y2, x);
y2 = _mm256_add_ps(y2, x);
/* select the correct result from the two polynoms */
xmm3 = poly_mask;
y2 = _mm256_and_ps(xmm3, y2); //, xmm3);
y = _mm256_andnot_ps(xmm3, y);
y = _mm256_add_ps(y, y2);
/* update the sign */
y = _mm256_xor_ps(y, sign_bit);
return y;
} | vbroadcastss 0x16d2fd(%rip), %ymm0 # 0x3f1900
vandps (%rsi), %ymm0, %ymm2
vbroadcastss 0x173240(%rip), %ymm0 # 0x3f7850
vmulps %ymm0, %ymm2, %ymm0
vcvttps2dq %ymm0, %ymm0
vpcmpeqd %xmm1, %xmm1, %xmm1
vpsubd %xmm1, %xmm0, %xmm3
vextractf128 $0x1, %ymm0, %xmm0
vpsubd %xmm1, %xmm0, %xmm0
vmovddup 0x173d7e(%rip), %xmm1 # xmm1 = mem[0,0]
vpand %xmm1, %xmm3, %xmm3
vpand %xmm1, %xmm0, %xmm0
vinsertf128 $0x1, %xmm0, %ymm3, %ymm1
vcvtdq2ps %ymm1, %ymm4
vbroadcastss 0x173207(%rip), %xmm1 # 0x3f7854
vpaddd %xmm1, %xmm3, %xmm3
vpaddd %xmm1, %xmm0, %xmm5
vpslld $0x1d, %xmm3, %xmm0
vmovddup 0x173d56(%rip), %xmm1 # xmm1 = mem[0,0]
vpandn %xmm1, %xmm0, %xmm0
vpslld $0x1d, %xmm5, %xmm6
vpandn %xmm1, %xmm6, %xmm1
vinsertf128 $0x1, %xmm1, %ymm0, %ymm0
vbroadcastss 0x1731da(%rip), %xmm6 # 0x3f7858
vpand %xmm6, %xmm3, %xmm1
vbroadcastss 0x173d35(%rip), %ymm3 # 0x3f83c0
vpand %xmm6, %xmm5, %xmm5
vmulps %ymm3, %ymm4, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vmulps %ymm2, %ymm2, %ymm3
vbroadcastss 0x1731c4(%rip), %ymm4 # 0x3f7868
vbroadcastss 0x1731bf(%rip), %ymm6 # 0x3f786c
vmulps %ymm4, %ymm3, %ymm4
vaddps %ymm6, %ymm4, %ymm4
vbroadcastss 0x1731b2(%rip), %ymm6 # 0x3f7870
vmulps %ymm3, %ymm4, %ymm4
vaddps %ymm6, %ymm4, %ymm4
vbroadcastss 0x16d255(%rip), %ymm6 # 0x3f1924
vmulps %ymm3, %ymm4, %ymm4
vaddps %ymm6, %ymm4, %ymm4
vbroadcastss 0x16a5a8(%rip), %ymm6 # 0x3eec88
vmulps %ymm3, %ymm4, %ymm4
vaddps %ymm6, %ymm4, %ymm4
vbroadcastss 0x173183(%rip), %ymm6 # 0x3f7874
vmulps %ymm6, %ymm3, %ymm6
vbroadcastss 0x17317a(%rip), %ymm7 # 0x3f7878
vaddps %ymm7, %ymm6, %ymm6
vmulps %ymm3, %ymm6, %ymm6
vbroadcastss 0x17316d(%rip), %ymm7 # 0x3f787c
vaddps %ymm7, %ymm6, %ymm6
vmulps %ymm2, %ymm3, %ymm3
vmulps %ymm6, %ymm3, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vinsertf128 $0x1, %xmm5, %ymm1, %ymm1
vcvtdq2ps %ymm1, %ymm1
vxorps %xmm3, %xmm3, %xmm3
vcmpeqps %ymm3, %ymm1, %ymm1
vandps %ymm2, %ymm1, %ymm2
vandnps %ymm4, %ymm1, %ymm1
vaddps %ymm1, %ymm2, %ymm1
vxorps %ymm1, %ymm0, %ymm0
retq
nop
| /csukuangfj[P]ncnn/src/layer/x86/avx_mathfun.h |
ncnn::UnaryOp_x86_avx_functor::unary_op_log10::func_pack8(float vector[8] const&) const | static NCNN_FORCEINLINE __m256 log256_ps(__m256 x)
{
__m256i imm0;
__m256 one = *(__m256*)_ps256_1;
//__m256 invalid_mask = _mm256_cmple_ps(x, _mm256_setzero_ps());
__m256 invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_LE_OS);
x = _mm256_max_ps(x, *(__m256*)_ps256_min_norm_pos); /* cut off denormalized stuff */
// can be done with AVX2
imm0 = _mm256_comp_srli_epi32(_mm256_castps_si256(x), 23);
/* keep only the fractional part */
x = _mm256_and_ps(x, *(__m256*)_ps256_inv_mant_mask);
x = _mm256_or_ps(x, *(__m256*)_ps256_0p5);
// this is again another AVX2 instruction
imm0 = _mm256_comp_sub_epi32(imm0, *(__m256i*)_pi32_256_0x7f);
__m256 e = _mm256_cvtepi32_ps(imm0);
e = _mm256_add_ps(e, one);
/* part2:
if( x < SQRTHF ) {
e -= 1;
x = x + x - 1.0;
} else { x = x - 1.0; }
*/
//__m256 mask = _mm256_cmplt_ps(x, *(__m256*)_ps256_cephes_SQRTHF);
__m256 mask = _mm256_cmp_ps(x, *(__m256*)_ps256_cephes_SQRTHF, _CMP_LT_OS);
__m256 tmp = _mm256_and_ps(x, mask);
x = _mm256_sub_ps(x, one);
e = _mm256_sub_ps(e, _mm256_and_ps(one, mask));
x = _mm256_add_ps(x, tmp);
__m256 z = _mm256_mul_ps(x, x);
__m256 y = *(__m256*)_ps256_cephes_log_p0;
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_log_p1);
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_log_p2);
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_log_p3);
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_log_p4);
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_log_p5);
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_log_p6);
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_log_p7);
y = _mm256_comp_fmadd_ps(y, x, *(__m256*)_ps256_cephes_log_p8);
y = _mm256_mul_ps(y, x);
y = _mm256_mul_ps(y, z);
y = _mm256_comp_fmadd_ps(e, *(__m256*)_ps256_cephes_log_q1, y);
//y = -z * 0.5 + y
y = _mm256_comp_fnmadd_ps(z, *(__m256*)_ps256_0p5, y);
x = _mm256_add_ps(x, y);
x = _mm256_comp_fmadd_ps(e, *(__m256*)_ps256_cephes_log_q2, x);
y = _mm256_or_ps(x, invalid_mask); // negative arg will be NAN
return y;
} | vbroadcastss 0x16bdc3(%rip), %ymm0 # 0x3f11e0
vmaxps (%rsi), %ymm0, %ymm0
vpsrld $0x17, %xmm0, %xmm1
vextractf128 $0x1, %ymm0, %xmm2
vpsrld $0x17, %xmm2, %xmm2
vbroadcastss 0x16bdaa(%rip), %ymm3 # 0x3f11e4
vandps %ymm3, %ymm0, %ymm0
vbroadcastss 0x168bcd(%rip), %ymm3 # 0x3ee014
vorps %ymm3, %ymm0, %ymm0
vbroadcastss 0x16c4b0(%rip), %xmm3 # 0x3f1904
vbroadcastss 0x16bd8f(%rip), %ymm4 # 0x3f11ec
vcmpleps %ymm0, %ymm4, %ymm4
vandnps %ymm0, %ymm4, %ymm5
vbroadcastss 0x16bd81(%rip), %ymm6 # 0x3f11f0
vaddps %ymm6, %ymm0, %ymm0
vaddps %ymm5, %ymm0, %ymm0
vextractf128 $0x1, %ymm4, %xmm5
vpsubd %xmm5, %xmm2, %xmm2
vpaddd %xmm3, %xmm2, %xmm2
vpsubd %xmm4, %xmm1, %xmm1
vpaddd %xmm3, %xmm1, %xmm1
vinsertf128 $0x1, %xmm2, %ymm1, %ymm1
vcvtdq2ps %ymm1, %ymm2
vmulps %ymm0, %ymm0, %ymm1
vbroadcastss 0x16bd50(%rip), %ymm3 # 0x3f11f4
vbroadcastss 0x16bd4b(%rip), %ymm4 # 0x3f11f8
vmulps %ymm3, %ymm0, %ymm3
vaddps %ymm4, %ymm3, %ymm3
vbroadcastss 0x16bd3e(%rip), %ymm4 # 0x3f11fc
vmulps %ymm0, %ymm3, %ymm3
vaddps %ymm4, %ymm3, %ymm3
vbroadcastss 0x16bd31(%rip), %ymm4 # 0x3f1200
vmulps %ymm0, %ymm3, %ymm3
vaddps %ymm4, %ymm3, %ymm3
vbroadcastss 0x16bd24(%rip), %ymm4 # 0x3f1204
vmulps %ymm0, %ymm3, %ymm3
vaddps %ymm4, %ymm3, %ymm3
vbroadcastss 0x16bd17(%rip), %ymm4 # 0x3f1208
vmulps %ymm0, %ymm3, %ymm3
vaddps %ymm4, %ymm3, %ymm3
vbroadcastss 0x16bd0a(%rip), %ymm4 # 0x3f120c
vmulps %ymm0, %ymm3, %ymm3
vaddps %ymm4, %ymm3, %ymm3
vbroadcastss 0x16bcfd(%rip), %ymm4 # 0x3f1210
vmulps %ymm0, %ymm3, %ymm3
vaddps %ymm4, %ymm3, %ymm3
vbroadcastss 0x16bcf0(%rip), %ymm4 # 0x3f1214
vmulps %ymm0, %ymm3, %ymm3
vaddps %ymm4, %ymm3, %ymm3
vbroadcastss 0x16c3eb(%rip), %ymm4 # 0x3f1920
vmulps %ymm0, %ymm3, %ymm3
vmulps %ymm4, %ymm2, %ymm2
vbroadcastss 0x16c3de(%rip), %ymm4 # 0x3f1924
vaddps %ymm4, %ymm3, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vaddps %ymm0, %ymm2, %ymm0
vaddps %ymm1, %ymm0, %ymm0
vbroadcastss 0x172e65(%rip), %ymm1 # 0x3f83c4
vmulps %ymm1, %ymm0, %ymm0
retq
| /csukuangfj[P]ncnn/src/layer/x86/avx_mathfun.h |
ncnn::ConvolutionDepthWise::load_model(ncnn::ModelBin const&) | int ConvolutionDepthWise::load_model(const ModelBin& mb)
{
if (dynamic_weight)
return 0;
weight_data = mb.load(weight_data_size, 0);
if (weight_data.empty())
return -100;
if (bias_term)
{
bias_data = mb.load(num_output, 1);
if (bias_data.empty())
return -100;
}
#if NCNN_INT8
if (int8_scale_term == 1 || int8_scale_term == 101)
{
weight_data_int8_scales = mb.load(group, 1);
bottom_blob_int8_scales = mb.load(1, 1);
float bottom_blob_int8_scale = bottom_blob_int8_scales[0];
bottom_blob_int8_scales = Mat(group);
bottom_blob_int8_scales.fill(bottom_blob_int8_scale);
}
else if (int8_scale_term == 2 || int8_scale_term == 102)
{
weight_data_int8_scales = mb.load(1, 1);
bottom_blob_int8_scales = mb.load(1, 1);
// extend group if only one provided
float weight_data_int8_scale = weight_data_int8_scales[0];
weight_data_int8_scales = Mat(group);
weight_data_int8_scales.fill(weight_data_int8_scale);
float bottom_blob_int8_scale = bottom_blob_int8_scales[0];
bottom_blob_int8_scales = Mat(group);
bottom_blob_int8_scales.fill(bottom_blob_int8_scale);
}
if (int8_scale_term > 100)
{
top_blob_int8_scales = mb.load(1, 1);
float top_blob_int8_scale = top_blob_int8_scales[0];
top_blob_int8_scales = Mat(group);
top_blob_int8_scales.fill(top_blob_int8_scale);
}
#endif // NCNN_INT8
return 0;
} | pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x60, %rsp
cmpl $0x0, 0x160(%rdi)
je 0x285b4c
xorl %r12d, %r12d
movl %r12d, %eax
addq $0x60, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
movq %rsi, %r15
movq %rdi, %rbx
movl 0x104(%rdi), %edx
movq (%rsi), %rax
leaq 0x10(%rsp), %r14
movq %r14, %rdi
xorl %ecx, %ecx
callq *0x10(%rax)
leaq 0x168(%rbx), %r13
movq 0x8(%r14), %rax
cmpq %r14, %r13
je 0x285c14
testq %rax, %rax
je 0x285b84
lock
incl (%rax)
movq 0x170(%rbx), %rax
testq %rax, %rax
je 0x285bb8
lock
decl (%rax)
jne 0x285bb8
movq 0x168(%rbx), %rsi
movq 0x188(%rbx), %rdi
testq %rdi, %rdi
je 0x285bb0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x285bb8
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0x168(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x170(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x178(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x180(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x188(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x190(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x1a0(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x1a8(%rbx)
testq %rax, %rax
je 0x285c3d
lock
decl (%rax)
jne 0x285c3d
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x285c35
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x285c3d
movq %rsi, %rdi
callq 0x5f3e0
pushq $-0x64
popq %r12
cmpq $0x0, (%r13)
je 0x285b3b
movslq 0x1a0(%rbx), %rax
imulq 0x1a8(%rbx), %rax
testq %rax, %rax
je 0x285b3b
cmpl $0x0, 0x100(%rbx)
je 0x285d7f
movl 0xd0(%rbx), %edx
movq (%r15), %rax
pushq $0x1
popq %rcx
movq %r14, %rdi
movq %r15, %rsi
callq *0x10(%rax)
leaq 0x1b0(%rbx), %r13
movq 0x18(%rsp), %rax
cmpq %r14, %r13
je 0x285d33
testq %rax, %rax
je 0x285ca3
lock
incl (%rax)
movq 0x1b8(%rbx), %rax
testq %rax, %rax
je 0x285cd7
lock
decl (%rax)
jne 0x285cd7
movq 0x1b0(%rbx), %rsi
movq 0x1d0(%rbx), %rdi
testq %rdi, %rdi
je 0x285ccf
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x285cd7
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0x1b0(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x1b8(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x1c0(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x1c8(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x1d0(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x1d8(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x1e8(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x1f0(%rbx)
testq %rax, %rax
je 0x285d5c
lock
decl (%rax)
jne 0x285d5c
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x285d54
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x285d5c
movq %rsi, %rdi
callq 0x5f3e0
cmpq $0x0, (%r13)
je 0x285b3b
movslq 0x1e8(%rbx), %rax
imulq 0x1f0(%rbx), %rax
testq %rax, %rax
je 0x285b3b
movl 0x10c(%rbx), %eax
cmpl $0x1, %eax
je 0x285d9d
cmpl $0x2, %eax
je 0x285e0a
cmpl $0x66, %eax
je 0x285e0a
cmpl $0x65, %eax
jne 0x286582
movl 0x108(%rbx), %edx
movq (%r15), %rax
pushq $0x1
popq %rcx
movq %r14, %rdi
movq %r15, %rsi
callq *0x10(%rax)
leaq 0x1f8(%rbx), %rcx
movq 0x18(%rsp), %rax
cmpq %r14, %rcx
je 0x285fbe
testq %rax, %rax
je 0x285dcf
lock
incl (%rax)
movq 0x200(%rbx), %rax
testq %rax, %rax
je 0x285f62
lock
decl (%rax)
jne 0x285f62
movq 0x1f8(%rbx), %rsi
movq 0x218(%rbx), %rdi
testq %rdi, %rdi
je 0x285f5a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x285f62
movq (%r15), %rax
pushq $0x1
popq %rcx
movq %r14, %rdi
movq %r15, %rsi
movl %ecx, %edx
callq *0x10(%rax)
leaq 0x1f8(%rbx), %r13
movq 0x18(%rsp), %rax
cmpq %r14, %r13
je 0x285ec8
testq %rax, %rax
je 0x285e38
lock
incl (%rax)
movq 0x200(%rbx), %rax
testq %rax, %rax
je 0x285e6c
lock
decl (%rax)
jne 0x285e6c
movq 0x1f8(%rbx), %rsi
movq 0x218(%rbx), %rdi
testq %rdi, %rdi
je 0x285e64
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x285e6c
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0x1f8(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x200(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x208(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x210(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x218(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x220(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x230(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x238(%rbx)
testq %rax, %rax
je 0x285ef1
lock
decl (%rax)
jne 0x285ef1
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x285ee9
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x285ef1
movq %rsi, %rdi
callq 0x5f3e0
movq (%r15), %rax
pushq $0x1
popq %rcx
movq %r14, %rdi
movq %r15, %rsi
movl %ecx, %edx
callq *0x10(%rax)
leaq 0x240(%rbx), %r12
movq 0x18(%rsp), %rax
cmpq %r14, %r12
je 0x2860b4
testq %rax, %rax
je 0x285f1f
lock
incl (%rax)
movq 0x248(%rbx), %rax
testq %rax, %rax
je 0x286058
lock
decl (%rax)
jne 0x286058
movq 0x240(%rbx), %rsi
movq 0x260(%rbx), %rdi
testq %rdi, %rdi
je 0x286050
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x286058
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0x1f8(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x200(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x208(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x210(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x218(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x220(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x230(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x238(%rbx)
testq %rax, %rax
je 0x285fe7
lock
decl (%rax)
jne 0x285fe7
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x285fdf
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x285fe7
movq %rsi, %rdi
callq 0x5f3e0
movq (%r15), %rax
pushq $0x1
popq %rcx
movq %r14, %rdi
movq %r15, %rsi
movl %ecx, %edx
callq *0x10(%rax)
leaq 0x240(%rbx), %r12
movq 0x18(%rsp), %rax
cmpq %r14, %r12
je 0x2861d3
testq %rax, %rax
je 0x286015
lock
incl (%rax)
movq 0x248(%rbx), %rax
testq %rax, %rax
je 0x286177
lock
decl (%rax)
jne 0x286177
movq 0x240(%rbx), %rsi
movq 0x260(%rbx), %rdi
testq %rdi, %rdi
je 0x28616f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x286177
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0x240(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x248(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x250(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x258(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x260(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x268(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x278(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x280(%rbx)
testq %rax, %rax
je 0x2860dd
lock
decl (%rax)
jne 0x2860dd
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x2860d5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2860dd
movq %rsi, %rdi
callq 0x5f3e0
movq 0x1f8(%rbx), %rax
movss (%rax), %xmm0
movss %xmm0, 0xc(%rsp)
movl 0x108(%rbx), %esi
andq $0x0, 0x50(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x10(%rsp)
movups %xmm0, 0x1c(%rsp)
movaps %xmm0, 0x30(%rsp)
movups %xmm0, 0x3c(%rsp)
pushq $0x4
popq %rdx
movq %r14, %rdi
xorl %ecx, %ecx
callq 0x635fa
movq 0x18(%rsp), %rax
cmpq %r14, %r13
je 0x2862f2
testq %rax, %rax
je 0x286134
lock
incl (%rax)
movq 0x200(%rbx), %rax
testq %rax, %rax
je 0x286296
lock
decl (%rax)
jne 0x286296
movq 0x1f8(%rbx), %rsi
movq 0x218(%rbx), %rdi
testq %rdi, %rdi
je 0x28628e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x286296
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0x240(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x248(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x250(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x258(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x260(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x268(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x278(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x280(%rbx)
testq %rax, %rax
je 0x2861fc
lock
decl (%rax)
jne 0x2861fc
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x2861f4
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2861fc
movq %rsi, %rdi
callq 0x5f3e0
movq 0x240(%rbx), %rax
movss (%rax), %xmm0
movss %xmm0, 0xc(%rsp)
movl 0x108(%rbx), %esi
andq $0x0, 0x50(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x10(%rsp)
movups %xmm0, 0x1c(%rsp)
movaps %xmm0, 0x30(%rsp)
movups %xmm0, 0x3c(%rsp)
pushq $0x4
popq %rdx
movq %r14, %rdi
xorl %ecx, %ecx
callq 0x635fa
movq 0x18(%rsp), %rax
cmpq %r14, %r12
je 0x28644c
testq %rax, %rax
je 0x286253
lock
incl (%rax)
movq 0x248(%rbx), %rax
testq %rax, %rax
je 0x2863f0
lock
decl (%rax)
jne 0x2863f0
movq 0x240(%rbx), %rsi
movq 0x260(%rbx), %rdi
testq %rdi, %rdi
je 0x2863e8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2863f0
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0x1f8(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x200(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x208(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x210(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x218(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x220(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x230(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x238(%rbx)
testq %rax, %rax
movss 0xc(%rsp), %xmm0
je 0x28632d
lock
decl (%rax)
jne 0x28632d
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x28631f
movq (%rdi), %rax
callq *0x18(%rax)
movss 0xc(%rsp), %xmm0
jmp 0x28632d
movq %rsi, %rdi
callq 0x5f3e0
movss 0xc(%rsp), %xmm0
movl 0x230(%rbx), %eax
imull 0x238(%rbx), %eax
movq 0x1f8(%rbx), %rcx
xorl %edx, %edx
testl %eax, %eax
cmovlel %edx, %eax
cmpl %edx, %eax
je 0x286356
movss %xmm0, (%rcx,%rdx,4)
incq %rdx
jmp 0x286348
movq 0x240(%rbx), %rax
movss (%rax), %xmm0
movss %xmm0, 0xc(%rsp)
movl 0x108(%rbx), %esi
andq $0x0, 0x50(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x10(%rsp)
movups %xmm0, 0x1c(%rsp)
movaps %xmm0, 0x30(%rsp)
movups %xmm0, 0x3c(%rsp)
pushq $0x4
popq %rdx
movq %r14, %rdi
xorl %ecx, %ecx
callq 0x635fa
movq 0x18(%rsp), %rax
cmpq %r14, %r12
je 0x286518
testq %rax, %rax
je 0x2863ad
lock
incl (%rax)
movq 0x248(%rbx), %rax
testq %rax, %rax
je 0x2864bc
lock
decl (%rax)
jne 0x2864bc
movq 0x240(%rbx), %rsi
movq 0x260(%rbx), %rdi
testq %rdi, %rdi
je 0x2864b4
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2864bc
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0x240(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x248(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x250(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x258(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x260(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x268(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x278(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x280(%rbx)
testq %rax, %rax
movss 0xc(%rsp), %xmm0
je 0x286487
lock
decl (%rax)
jne 0x286487
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x286479
movq (%rdi), %rax
callq *0x18(%rax)
movss 0xc(%rsp), %xmm0
jmp 0x286487
movq %rsi, %rdi
callq 0x5f3e0
movss 0xc(%rsp), %xmm0
movl 0x278(%rbx), %eax
imull 0x280(%rbx), %eax
movq 0x240(%rbx), %rcx
xorl %edx, %edx
testl %eax, %eax
cmovlel %edx, %eax
cmpl %edx, %eax
je 0x28657c
movss %xmm0, (%rcx,%rdx,4)
incq %rdx
jmp 0x2864a2
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0x240(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x248(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x250(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x258(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x260(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x268(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x278(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x280(%rbx)
testq %rax, %rax
movss 0xc(%rsp), %xmm0
je 0x286553
lock
decl (%rax)
jne 0x286553
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x286545
movq (%rdi), %rax
callq *0x18(%rax)
movss 0xc(%rsp), %xmm0
jmp 0x286553
movq %rsi, %rdi
callq 0x5f3e0
movss 0xc(%rsp), %xmm0
movl 0x278(%rbx), %eax
imull 0x280(%rbx), %eax
movq 0x240(%rbx), %rcx
xorl %edx, %edx
testl %eax, %eax
cmovlel %edx, %eax
cmpl %edx, %eax
je 0x28657c
movss %xmm0, (%rcx,%rdx,4)
incq %rdx
jmp 0x28656e
movl 0x10c(%rbx), %eax
cmpl $0x65, %eax
jl 0x285b38
movq (%r15), %rax
pushq $0x1
popq %rcx
movq %r14, %rdi
movq %r15, %rsi
movl %ecx, %edx
callq *0x10(%rax)
leaq 0x288(%rbx), %r15
movq 0x18(%rsp), %rax
cmpq %r14, %r15
je 0x286649
testq %rax, %rax
je 0x2865b9
lock
incl (%rax)
movq 0x290(%rbx), %rax
testq %rax, %rax
je 0x2865ed
lock
decl (%rax)
jne 0x2865ed
movq 0x288(%rbx), %rsi
movq 0x2a8(%rbx), %rdi
testq %rdi, %rdi
je 0x2865e5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2865ed
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0x288(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x290(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x298(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x2a0(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x2a8(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x2b0(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x2c0(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x2c8(%rbx)
testq %rax, %rax
je 0x286672
lock
decl (%rax)
jne 0x286672
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x28666a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x286672
movq %rsi, %rdi
callq 0x5f3e0
movq 0x288(%rbx), %rax
movss (%rax), %xmm0
movss %xmm0, 0xc(%rsp)
movl 0x108(%rbx), %esi
andq $0x0, 0x50(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x10(%rsp)
movups %xmm0, 0x1c(%rsp)
movaps %xmm0, 0x30(%rsp)
movups %xmm0, 0x3c(%rsp)
pushq $0x4
popq %rdx
movq %r14, %rdi
xorl %ecx, %ecx
callq 0x635fa
movq 0x18(%rsp), %rax
cmpq %r14, %r15
je 0x286759
testq %rax, %rax
je 0x2866c9
lock
incl (%rax)
movq 0x290(%rbx), %rax
testq %rax, %rax
je 0x2866fd
lock
decl (%rax)
jne 0x2866fd
movq 0x288(%rbx), %rsi
movq 0x2a8(%rbx), %rdi
testq %rdi, %rdi
je 0x2866f5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2866fd
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0x288(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x290(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x298(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x2a0(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x2a8(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x2b0(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x2c0(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x2c8(%rbx)
testq %rax, %rax
movss 0xc(%rsp), %xmm0
je 0x286794
lock
decl (%rax)
jne 0x286794
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x286786
movq (%rdi), %rax
callq *0x18(%rax)
movss 0xc(%rsp), %xmm0
jmp 0x286794
movq %rsi, %rdi
callq 0x5f3e0
movss 0xc(%rsp), %xmm0
movl 0x2c0(%rbx), %eax
imull 0x2c8(%rbx), %eax
movq 0x288(%rbx), %rcx
xorl %r12d, %r12d
testl %eax, %eax
cmovlel %r12d, %eax
xorl %edx, %edx
cmpl %edx, %eax
je 0x285b3b
movss %xmm0, (%rcx,%rdx,4)
incq %rdx
jmp 0x2867b3
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x286a48
lock
decl (%rax)
jne 0x286a48
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x286a40
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x286a48
jmp 0x286a5a
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x286a48
lock
decl (%rax)
jne 0x286a48
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x286a40
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x286a48
jmp 0x286a5a
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x286a48
lock
decl (%rax)
jne 0x286a48
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x286a40
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x286a48
jmp 0x286a5a
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x286a48
lock
decl (%rax)
jne 0x286a48
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x286a40
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x286a48
jmp 0x286a5a
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x286a48
lock
decl (%rax)
jne 0x286a48
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x286a40
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x286a48
jmp 0x286a5a
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x286a48
lock
decl (%rax)
jne 0x286a48
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x286a40
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x286a48
jmp 0x286a5a
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x286a48
lock
decl (%rax)
jne 0x286a48
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x286a40
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x286a48
jmp 0x286a5a
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x286a48
lock
decl (%rax)
jne 0x286a48
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x286a40
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x286a48
jmp 0x286a5a
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x286a48
lock
decl (%rax)
jne 0x286a48
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x286a40
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x286a48
jmp 0x286a5a
jmp 0x286a5a
jmp 0x286a5a
jmp 0x286a5a
jmp 0x286a5a
jmp 0x286a5a
jmp 0x286a5a
jmp 0x286a5a
jmp 0x286a5a
jmp 0x286a5a
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x286a48
lock
decl (%rax)
jne 0x286a48
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x286a40
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x286a48
jmp 0x286a5a
jmp 0x286a5a
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x286a48
lock
decl (%rax)
jne 0x286a48
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x286a50
movq %rsi, %rdi
callq 0x5f3e0
movq %rbx, %rdi
callq 0x5f340
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x286a48
jmp 0x286a5a
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/convolutiondepthwise.cpp |
ncnn::ConvolutionDepthWise::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int ConvolutionDepthWise::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
// convolv with NxN kernel
// value = value + bias
#if NCNN_INT8
if (opt.use_int8_inference && weight_data.elemsize == (size_t)1u)
{
return forward_int8(bottom_blob, top_blob, opt);
}
#endif
// NCNN_LOGE("ConvolutionDepthWise input %d x %d pad = %d %d ksize=%d %d stride=%d %d", w, h, pad_w, pad_h, kernel_w, kernel_h, stride_w, stride_h);
Mat bottom_blob_bordered;
make_padding(bottom_blob, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
const int w = bottom_blob_bordered.w;
const int h = bottom_blob_bordered.h;
const size_t elemsize = bottom_blob_bordered.elemsize;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int outw = (w - kernel_extent_w) / stride_w + 1;
const int outh = (h - kernel_extent_h) / stride_h + 1;
top_blob.create(outw, outh, num_output, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
int ret = convolutiondepthwise(bottom_blob_bordered, top_blob, weight_data, bias_data, kernel_w, kernel_h, stride_w, stride_h, dilation_w, dilation_h, group, activation_type, activation_params, opt);
if (ret != 0)
return ret;
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rcx, %r12
movq %rdx, %r15
movq %rdi, %r14
cmpb $0x1, 0x1e(%rcx)
jne 0x286fbf
cmpq $0x1, 0x178(%r14)
jne 0x286fbf
movq %r14, %rdi
movq %r15, %rdx
movq %r12, %rcx
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x287170
movq %rsp, %rdx
andq $0x0, 0x40(%rdx)
xorps %xmm0, %xmm0
movaps %xmm0, (%rdx)
movups %xmm0, 0xc(%rdx)
movaps %xmm0, 0x20(%rdx)
movups %xmm0, 0x2c(%rdx)
movq %r14, %rdi
movq %r12, %rcx
callq 0x287daa
pushq $-0x64
popq %rbx
cmpq $0x0, (%rsp)
je 0x2870eb
movslq 0x38(%rsp), %rax
imulq 0x40(%rsp), %rax
testq %rax, %rax
je 0x2870eb
movq 0x10(%rsp), %r8
movl 0xd4(%r14), %eax
decl %eax
imull 0xdc(%r14), %eax
movl 0xd0(%r14), %ecx
notl %eax
movl 0xd8(%r14), %esi
decl %esi
imull 0xe0(%r14), %esi
addl 0x2c(%rsp), %eax
cltd
idivl 0xe4(%r14)
movl %eax, %edi
notl %esi
addl 0x30(%rsp), %esi
movl %esi, %eax
cltd
idivl 0xe8(%r14)
leal 0x1(%rdi), %esi
leal 0x1(%rax), %edx
movq 0x8(%r12), %r9
movq %r15, %rdi
callq 0x63810
cmpq $0x0, (%r15)
je 0x2870eb
movslq 0x38(%r15), %rax
imulq 0x40(%r15), %rax
testq %rax, %rax
je 0x2870eb
leaq 0x168(%r14), %rdx
leaq 0x1b0(%r14), %rcx
movl 0xd4(%r14), %r8d
movl 0xd8(%r14), %r9d
movl 0xe4(%r14), %eax
movl 0xe8(%r14), %r10d
movl 0xdc(%r14), %r11d
movl 0xe0(%r14), %r12d
movl 0x108(%r14), %r13d
movl 0x110(%r14), %ebp
addq $0x118, %r14 # imm = 0x118
xorl %ebx, %ebx
subq $0x8, %rsp
leaq 0x8(%rsp), %rdi
movq %r15, %rsi
pushq %r14
pushq %rbp
pushq %r13
pushq %r12
pushq %r11
pushq %r10
pushq %rax
callq 0x287dbf
addq $0x40, %rsp
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x287118
lock
decl (%rax)
jne 0x287118
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x287110
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x287118
movq %rsi, %rdi
callq 0x5f3e0
movl %ebx, %eax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x28712f
jmp 0x287167
jmp 0x28712f
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x28715f
lock
decl (%rax)
jne 0x28715f
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x287159
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x28715f
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/convolutiondepthwise.cpp |
ncnn::ConvolutionDepthWise::forward_int8(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int ConvolutionDepthWise::forward_int8(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
// convolv with NxN kernel
// value = value + bias
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
if (channels % group != 0 || num_output % group != 0)
{
// reject invalid group
return -100;
}
// NCNN_LOGE("ConvolutionDepthWise input %d x %d pad = %d %d ksize=%d %d stride=%d %d", w, h, pad_w, pad_h, kernel_w, kernel_h, stride_w, stride_h);
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
Mat bottom_blob_int8 = bottom_blob;
if (elemsize != 1)
{
const int channels_g = channels / group;
Mat scales(channels);
{
float* ps = scales;
for (int g = 0; g < group; g++)
{
float scale = bottom_blob_int8_scales[g];
for (int q = 0; q < channels_g; q++)
{
*ps++ = scale;
}
}
}
Option opt_q = opt;
opt_q.blob_allocator = opt.workspace_allocator;
quantize_to_int8(bottom_blob, bottom_blob_int8, scales, opt_q);
}
Mat bottom_blob_bordered;
make_padding(bottom_blob_int8, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
int outw = (w - kernel_extent_w) / stride_w + 1;
int outh = (h - kernel_extent_h) / stride_h + 1;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
// int8
bool use_int8_requantize = int8_scale_term > 100;
size_t out_elemsize = use_int8_requantize ? 1u : 4u;
top_blob.create(outw, outh, num_output, out_elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
// depth-wise
if (channels == group && group == num_output)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
signed char* outptr = top_blob.channel(g);
const signed char* kptr = (const signed char*)weight_data + maxk * g;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
int sum = 0;
const signed char* sptr = m.row<signed char>(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++)
{
signed char val = sptr[space_ofs[k]];
signed char w = kptr[k];
sum += val * w;
}
float scale_in;
if (weight_data_int8_scales[g] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
float sumfp32 = sum * scale_in;
if (bias_term)
sumfp32 += bias_data[g];
sumfp32 = activation_ss(sumfp32, activation_type, activation_params);
if (use_int8_requantize)
{
// requantize
float scale_out = top_blob_int8_scales[g];
signed char sums8 = float2int8(sumfp32 * scale_out);
outptr[0] = sums8;
outptr += 1;
}
else
{
// dequantize
((float*)outptr)[0] = sumfp32;
outptr += 4;
}
}
}
}
}
else
{
// group convolution
const int channels_g = channels / group;
const int num_output_g = num_output / group;
#ifdef _WIN32
#pragma omp parallel for num_threads(opt.num_threads)
#else // _WIN32
#pragma omp parallel for collapse(2) num_threads(opt.num_threads)
#endif // _WIN32
for (int g = 0; g < group; g++)
{
for (int p = 0; p < num_output_g; p++)
{
signed char* outptr = top_blob.channel(g * num_output_g + p);
const signed char* weight_data_ptr = (const signed char*)weight_data + maxk * channels_g * num_output_g * g;
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
int sum = 0;
const signed char* kptr = weight_data_ptr + maxk * channels_g * p;
// channels_g
for (int q = 0; q < channels_g; q++)
{
const Mat m = bottom_blob_bordered.channel(channels_g * g + q);
const signed char* sptr = m.row<signed char>(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++)
{
signed char val = sptr[space_ofs[k]];
signed char w = kptr[k];
sum += val * w;
}
kptr += maxk;
}
float scale_in;
if (weight_data_int8_scales[g] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
float sumfp32 = sum * scale_in;
if (bias_term)
sumfp32 += bias_data[g * num_output_g + p];
sumfp32 = activation_ss(sumfp32, activation_type, activation_params);
if (use_int8_requantize)
{
// requantize
float scale_out = top_blob_int8_scales[g];
signed char sums8 = float2int8(sumfp32 * scale_out);
outptr[0] = sums8;
outptr += 1;
}
else
{
// dequantize
((float*)outptr)[0] = sumfp32;
outptr += 4;
}
}
}
}
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1a8, %rsp # imm = 0x1A8
movq %rcx, %r14
movq %rdx, %r8
movl 0x38(%rsi), %ebp
movl 0x108(%rdi), %ecx
movl %ebp, %eax
cltd
idivl %ecx
pushq $-0x64
popq %rbx
testl %edx, %edx
jne 0x2871a9
movl 0xd0(%rdi), %eax
cltd
idivl %ecx
testl %edx, %edx
je 0x2871bd
movl %ebx, %eax
addq $0x1a8, %rsp # imm = 0x1A8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rsi, %r13
movq %r8, 0x58(%rsp)
movl 0xd4(%rdi), %r15d
decl %r15d
imull 0xdc(%rdi), %r15d
movl 0xd8(%rdi), %r12d
decl %r12d
movq %rdi, 0x8(%rsp)
imull 0xe0(%rdi), %r12d
movq 0x10(%rsi), %rax
movq (%rsi), %rcx
movq %rcx, 0x118(%rsp)
movq 0x8(%rsi), %rcx
movq %rcx, 0x120(%rsp)
movq %rax, 0x128(%rsp)
movl 0x18(%rsi), %edx
movl %edx, 0x130(%rsp)
movq 0x20(%rsi), %rdx
movq %rdx, 0x138(%rsp)
movups 0x28(%rsi), %xmm0
movups %xmm0, 0x140(%rsp)
movl %ebp, 0x150(%rsp)
movq 0x40(%rsi), %rdx
movq %rdx, 0x158(%rsp)
testq %rcx, %rcx
je 0x28724e
lock
incl (%rcx)
cmpq $0x1, %rax
movl %ebp, 0x10(%rsp)
je 0x28735c
movq %r14, 0x18(%rsp)
movq 0x8(%rsp), %r14
movl 0x108(%r14), %eax
movl %eax, 0x30(%rsp)
leaq 0x70(%rsp), %rdi
andq $0x0, 0x40(%rdi)
xorps %xmm0, %xmm0
movaps %xmm0, (%rdi)
movups %xmm0, 0xc(%rdi)
movaps %xmm0, 0x20(%rdi)
movups %xmm0, 0x2c(%rdi)
xorl %ebx, %ebx
pushq $0x4
popq %rdx
movl %ebp, %esi
xorl %ecx, %ecx
callq 0x635fa
movl %ebp, %eax
cltd
idivl 0x30(%rsp)
movq 0x70(%rsp), %rcx
movl 0x108(%r14), %edx
testl %eax, %eax
cmovlel %ebx, %eax
movq 0x240(%r14), %rsi
testl %edx, %edx
cmovlel %ebx, %edx
movq 0x18(%rsp), %r14
cmpq %rdx, %rbx
je 0x2872e4
movss (%rsi,%rbx,4), %xmm0
movl %eax, %edi
subl $0x1, %edi
jb 0x2872df
movss %xmm0, (%rcx)
addq $0x4, %rcx
jmp 0x2872d0
incq %rbx
jmp 0x2872c4
movups (%r14), %xmm0
movups 0x10(%r14), %xmm1
movups 0x20(%r14), %xmm2
movups 0x30(%r14), %xmm3
leaq 0x160(%rsp), %rcx
movaps %xmm3, 0x30(%rcx)
movaps %xmm2, 0x20(%rcx)
movaps %xmm1, 0x10(%rcx)
movaps %xmm0, (%rcx)
movq 0x10(%r14), %rax
movq %rax, 0x8(%rcx)
leaq 0x118(%rsp), %rsi
leaq 0x70(%rsp), %rdx
movq %r13, %rdi
callq 0x652e3
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x28735c
lock
decl (%rax)
jne 0x28735c
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x287354
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x28735c
movq %rsi, %rdi
callq 0x5f3e0
leaq 0x70(%rsp), %rdx
andq $0x0, 0x40(%rdx)
xorps %xmm0, %xmm0
movaps %xmm0, (%rdx)
movups %xmm0, 0xc(%rdx)
movaps %xmm0, 0x20(%rdx)
movups %xmm0, 0x2c(%rdx)
leaq 0x118(%rsp), %rsi
movq 0x8(%rsp), %rdi
movq %r14, %rcx
callq 0x287daa
pushq $-0x64
popq %rbx
cmpq $0x0, 0x70(%rsp)
je 0x287c5f
movslq 0xa8(%rsp), %rax
imulq 0xb0(%rsp), %rax
testq %rax, %rax
je 0x287c5f
movq %r14, 0x18(%rsp)
notl %r15d
movl 0x9c(%rsp), %ebx
addl %ebx, %r15d
movl %r15d, %eax
cltd
movq 0x8(%rsp), %r14
idivl 0xe4(%r14)
movl %eax, %r15d
notl %r12d
addl 0xa0(%rsp), %r12d
movl %r12d, %eax
cltd
idivl 0xe8(%r14)
movl %eax, %r12d
movslq 0xd4(%r14), %rax
movslq 0xd8(%r14), %r13
imulq %rax, %r13
leaq 0x160(%rsp), %rdi
leaq 0x27(%rsp), %rdx
movq %r13, %rsi
callq 0x73bbe
movq %r15, 0x28(%rsp)
leal 0x1(%r15), %esi
movq %r12, 0x60(%rsp)
leal 0x1(%r12), %edx
imull 0xe0(%r14), %ebx
movq 0x160(%rsp), %rbp
movl 0xd4(%r14), %eax
movl 0xdc(%r14), %ecx
imull %eax, %ecx
subl %ecx, %ebx
xorl %ecx, %ecx
xorl %edi, %edi
xorl %r8d, %r8d
movq 0x58(%rsp), %r14
movq 0x8(%rsp), %r9
cmpl 0xd8(%r9), %ecx
jge 0x2874a4
movslq %r8d, %r8
leaq (,%r8,4), %r10
addq %rbp, %r10
xorl %r9d, %r9d
cmpl %eax, %r9d
jge 0x28749b
movl %edi, (%r10,%r9,4)
movq 0x8(%rsp), %rax
addl 0xdc(%rax), %edi
movl 0xd4(%rax), %eax
incq %r9
jmp 0x28747c
addl %ebx, %edi
incl %ecx
addq %r9, %r8
jmp 0x28745d
movq 0x8(%rsp), %rax
movl 0xd0(%rax), %ecx
movl 0x10c(%rax), %edi
xorl %eax, %eax
movl %edi, 0x44(%rsp)
cmpl $0x65, %edi
setl %al
leaq 0x1(%rax,%rax,2), %r8
movq 0x18(%rsp), %rax
movq 0x8(%rax), %r9
movq %r14, %rdi
callq 0x63810
pushq $-0x64
popq %rax
movq %rax, 0x50(%rsp)
cmpq $0x0, (%r14)
movq 0x28(%rsp), %r8
movl 0x10(%rsp), %eax
je 0x287c4d
movslq 0x38(%r14), %rcx
imulq 0x40(%r14), %rcx
testq %rcx, %rcx
je 0x287c4d
movq 0x8(%rsp), %rdx
movl 0xd0(%rdx), %ecx
movl 0x108(%rdx), %edi
movl %eax, %esi
xorl %edi, %esi
movl %edi, %edx
xorl %ecx, %edx
orl %esi, %edx
jne 0x28784a
movq $0x0, 0x50(%rsp)
testl %r13d, %r13d
movl $0x0, %ebx
cmovgl %r13d, %ebx
movq $0x0, 0x48(%rsp)
xorl %r9d, %r9d
movq 0x8(%rsp), %rdx
cltq
cmpq %rax, %r9
jge 0x287c4d
movq 0x40(%r14), %r12
imulq %r9, %r12
imulq 0x10(%r14), %r12
addq (%r14), %r12
movslq 0x9c(%rsp), %rcx
movq 0xb0(%rsp), %rsi
imulq %r9, %rsi
movq 0x80(%rsp), %rax
imulq %rax, %rsi
addq 0x70(%rsp), %rsi
movq %rsi, 0x30(%rsp)
imulq %rax, %rcx
movq %rcx, 0x10(%rsp)
movq 0x168(%rdx), %r15
addq 0x48(%rsp), %r15
xorl %eax, %eax
movq %r9, 0x38(%rsp)
cmpl 0x60(%rsp), %eax
jg 0x287832
xorl %r14d, %r14d
movl %eax, 0xc0(%rsp)
cltq
movq %rax, 0x18(%rsp)
cmpl %r8d, %r14d
jg 0x287824
movslq 0xe8(%rdx), %rax
imulq 0x18(%rsp), %rax
imulq 0x10(%rsp), %rax
movslq 0xe4(%rdx), %rdx
movslq %r14d, %rcx
imulq %rdx, %rcx
addq 0x30(%rsp), %rcx
addq %rax, %rcx
xorl %edx, %edx
xorl %eax, %eax
cmpq %rdx, %rbx
je 0x28761a
movslq (%rbp,%rdx,4), %rsi
movsbl (%rcx,%rsi), %esi
movsbl (%r15,%rdx), %edi
imull %esi, %edi
addl %edi, %eax
incq %rdx
jmp 0x2875fd
movq 0x8(%rsp), %rdx
movq 0x1f8(%rdx), %rcx
movss (%rcx,%r9,4), %xmm1
xorps %xmm0, %xmm0
ucomiss %xmm1, %xmm0
je 0x28764d
movq 0x240(%rdx), %rcx
mulss (%rcx,%r9,4), %xmm1
movss 0x16763f(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
cvtsi2ss %eax, %xmm4
mulss %xmm0, %xmm4
cmpl $0x0, 0x100(%rdx)
je 0x28766b
movq 0x1b0(%rdx), %rax
addss (%rax,%r9,4), %xmm4
movl 0x110(%rdx), %eax
decl %eax
cmpl $0x5, %eax
ja 0x287690
leaq 0x170dd9(%rip), %rcx # 0x3f8458
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
maxss 0x166980(%rip), %xmm4 # 0x3ee010
movaps %xmm4, %xmm0
jmp 0x2877ca
movaps %xmm4, %xmm0
movaps %xmm4, 0xd0(%rsp)
callq 0x5f410
addss 0x1675d8(%rip), %xmm0 # 0x3eec88
callq 0x5f200
callq 0x5f160
movq 0x38(%rsp), %r9
movq 0x28(%rsp), %r8
movq 0x8(%rsp), %rdx
mulss 0xd0(%rsp), %xmm0
jmp 0x2877ca
movq 0x118(%rdx), %rax
maxss (%rax), %xmm4
movss 0x4(%rax), %xmm1
ucomiss %xmm1, %xmm4
movaps %xmm4, %xmm0
ja 0x2877c7
jmp 0x2877ca
movss 0x169ab8(%rip), %xmm2 # 0x3f11b8
minss %xmm2, %xmm4
movaps %xmm4, %xmm0
xorps 0x166982(%rip), %xmm0 # 0x3ee090
cmpltss 0x169aa5(%rip), %xmm4 # 0x3f11bc
movaps %xmm4, %xmm1
andnps %xmm0, %xmm1
andps %xmm2, %xmm4
orps %xmm1, %xmm4
movaps %xmm4, %xmm0
callq 0x5f410
movq 0x38(%rsp), %r9
movq 0x28(%rsp), %r8
movq 0x8(%rsp), %rdx
movaps %xmm0, %xmm1
movss 0x167543(%rip), %xmm0 # 0x3eec88
addss %xmm0, %xmm1
divss %xmm1, %xmm0
jmp 0x2877ca
movq 0x118(%rdx), %rax
xorps %xmm0, %xmm0
cmpltss %xmm4, %xmm0
movaps %xmm0, %xmm1
movss 0x16751f(%rip), %xmm2 # 0x3eec88
andps %xmm2, %xmm1
movss (%rax), %xmm2
andnps %xmm2, %xmm0
orps %xmm1, %xmm0
mulss %xmm4, %xmm0
jmp 0x2877ca
movq 0x118(%rdx), %rax
movss (%rax), %xmm1
movss 0x4(%rax), %xmm2
movaps %xmm2, %xmm3
xorps 0x1668fa(%rip), %xmm3 # 0x3ee090
divss %xmm1, %xmm3
xorps %xmm0, %xmm0
ucomiss %xmm3, %xmm4
jb 0x2877ca
movss 0x1674de(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
addss %xmm0, %xmm3
ucomiss %xmm3, %xmm4
ja 0x287690
mulss %xmm4, %xmm1
addss %xmm2, %xmm1
mulss %xmm4, %xmm1
movaps %xmm1, %xmm0
cmpl $0x65, 0x44(%rsp)
jl 0x287810
movq 0x288(%rdx), %rax
mulss (%rax,%r9,4), %xmm0
callq 0x5f2d0
movq 0x38(%rsp), %r9
movq 0x28(%rsp), %r8
movq 0x8(%rsp), %rdx
cvttss2si %xmm0, %eax
cmpl $-0x7e, %eax
pushq $-0x7f
popq %rcx
cmovll %ecx, %eax
cmpl $0x7f, %eax
pushq $0x7f
popq %rcx
cmovgel %ecx, %eax
movb %al, (%r12)
pushq $0x1
jmp 0x287818
movss %xmm0, (%r12)
pushq $0x4
popq %rax
addq %rax, %r12
incl %r14d
jmp 0x2875c7
movl 0xc0(%rsp), %eax
incl %eax
jmp 0x2875ac
incq %r9
movl 0x108(%rdx), %eax
addq %r13, 0x48(%rsp)
movq 0x58(%rsp), %r14
jmp 0x28754b
cltd
idivl %edi
movl %eax, %esi
movl %ecx, %eax
cltd
idivl %edi
movl %esi, %ecx
imull %r13d, %ecx
movl %eax, %edx
imull %ecx, %edx
movl %edx, 0x6c(%rsp)
movq $0x0, 0x50(%rsp)
testl %r13d, %r13d
movl $0x0, %r12d
cmovgl %r13d, %r12d
testl %esi, %esi
movl $0x0, %r15d
cmovgl %esi, %r15d
movslq %ecx, %rcx
movq %rcx, 0xf8(%rsp)
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movq %rcx, 0x110(%rsp)
movslq %esi, %rcx
movq %rcx, 0xf0(%rsp)
cltq
movq %rax, 0xe8(%rsp)
xorl %ebx, %ebx
movq 0x8(%rsp), %rdx
movq 0x60(%rsp), %rcx
movslq %edi, %rax
cmpq %rax, %rbx
jge 0x287c4d
movl 0x6c(%rsp), %eax
imull %ebx, %eax
movq %rbx, %rsi
imulq 0xe8(%rsp), %rsi
movq %rsi, 0x108(%rsp)
cltq
movq %rax, 0x100(%rsp)
movq %rbx, 0x10(%rsp)
imulq 0xf0(%rsp), %rbx
xorl %eax, %eax
cmpq 0x110(%rsp), %rax
je 0x287c3a
movq %rax, %rsi
movq 0x108(%rsp), %rax
leaq (%rsi,%rax), %r9
movq 0x58(%rsp), %rax
movq 0x40(%rax), %rdi
movq %r9, 0xd0(%rsp)
imulq %r9, %rdi
imulq 0x10(%rax), %rdi
addq (%rax), %rdi
movq 0x168(%rdx), %rax
addq 0x100(%rsp), %rax
movq %rsi, 0x48(%rsp)
imulq 0xf8(%rsp), %rsi
addq %rax, %rsi
movq %rsi, 0x38(%rsp)
xorl %eax, %eax
cmpl %ecx, %eax
jg 0x287c2d
movl %eax, 0x30(%rsp)
xorl %r14d, %r14d
cmpl %r8d, %r14d
jg 0x287c1d
movq %rdi, 0x18(%rsp)
movslq 0x9c(%rsp), %rcx
movq %rdx, %r8
movq 0x80(%rsp), %rdx
movq 0xb0(%rsp), %rax
movslq 0xe8(%r8), %rsi
movslq 0x30(%rsp), %rdi
imulq %rsi, %rdi
imulq %rdx, %rcx
imulq %rdi, %rcx
addq 0x70(%rsp), %rcx
imulq %rdx, %rax
movslq 0xe4(%r8), %rsi
movslq %r14d, %rdx
imulq %rsi, %rdx
addq %rcx, %rdx
xorl %esi, %esi
xorl %ecx, %ecx
movq 0x38(%rsp), %rdi
cmpq %r15, %rsi
je 0x287a13
leaq (%rsi,%rbx), %r8
imulq %rax, %r8
addq %rdx, %r8
xorl %r9d, %r9d
cmpq %r9, %r12
je 0x287a0b
movslq (%rbp,%r9,4), %r10
movsbl (%r8,%r10), %r10d
movsbl (%rdi,%r9), %r11d
imull %r10d, %r11d
addl %r11d, %ecx
incq %r9
jmp 0x2879eb
addq %r13, %rdi
incq %rsi
jmp 0x2879d8
movq 0x8(%rsp), %rdx
movq 0x1f8(%rdx), %rax
movq 0x10(%rsp), %rsi
movss (%rax,%rsi,4), %xmm1
xorps %xmm0, %xmm0
ucomiss %xmm1, %xmm0
je 0x287a49
movq 0x240(%rdx), %rax
mulss (%rax,%rsi,4), %xmm1
movss 0x167243(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
cvtsi2ss %ecx, %xmm4
mulss %xmm0, %xmm4
cmpl $0x0, 0x100(%rdx)
je 0x287a6e
movq 0x1b0(%rdx), %rax
movq 0xd0(%rsp), %rcx
addss (%rax,%rcx,4), %xmm4
movl 0x110(%rdx), %eax
decl %eax
cmpl $0x5, %eax
ja 0x287a93
leaq 0x1709be(%rip), %rcx # 0x3f8440
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
maxss 0x16657d(%rip), %xmm4 # 0x3ee010
movaps %xmm4, %xmm0
jmp 0x287bc3
movaps %xmm4, %xmm0
movaps %xmm4, 0xc0(%rsp)
callq 0x5f410
addss 0x1671d5(%rip), %xmm0 # 0x3eec88
callq 0x5f200
callq 0x5f160
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %rdx
mulss 0xc0(%rsp), %xmm0
jmp 0x287bc3
movq 0x118(%rdx), %rax
maxss (%rax), %xmm4
movss 0x4(%rax), %xmm1
ucomiss %xmm1, %xmm4
movaps %xmm4, %xmm0
ja 0x287bc0
jmp 0x287bc3
movss 0x1696ba(%rip), %xmm2 # 0x3f11b8
minss %xmm2, %xmm4
movaps %xmm4, %xmm0
xorps 0x166584(%rip), %xmm0 # 0x3ee090
cmpltss 0x1696a7(%rip), %xmm4 # 0x3f11bc
movaps %xmm4, %xmm1
andnps %xmm0, %xmm1
andps %xmm2, %xmm4
orps %xmm1, %xmm4
movaps %xmm4, %xmm0
callq 0x5f410
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %rdx
movaps %xmm0, %xmm1
movss 0x16714a(%rip), %xmm0 # 0x3eec88
addss %xmm0, %xmm1
divss %xmm1, %xmm0
jmp 0x287bc3
movq 0x118(%rdx), %rax
xorps %xmm0, %xmm0
cmpltss %xmm4, %xmm0
movaps %xmm0, %xmm1
movss 0x167126(%rip), %xmm2 # 0x3eec88
andps %xmm2, %xmm1
movss (%rax), %xmm2
andnps %xmm2, %xmm0
orps %xmm1, %xmm0
mulss %xmm4, %xmm0
jmp 0x287bc3
movq 0x118(%rdx), %rax
movss (%rax), %xmm1
movss 0x4(%rax), %xmm2
movaps %xmm2, %xmm3
xorps 0x166501(%rip), %xmm3 # 0x3ee090
divss %xmm1, %xmm3
xorps %xmm0, %xmm0
ucomiss %xmm3, %xmm4
jb 0x287bc3
movss 0x1670e5(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
addss %xmm0, %xmm3
ucomiss %xmm3, %xmm4
ja 0x287a93
mulss %xmm4, %xmm1
addss %xmm2, %xmm1
mulss %xmm4, %xmm1
movaps %xmm1, %xmm0
cmpl $0x65, 0x44(%rsp)
jl 0x287c01
movq 0x288(%rdx), %rax
mulss (%rax,%rsi,4), %xmm0
callq 0x5f2d0
movq 0x8(%rsp), %rdx
cvttss2si %xmm0, %eax
cmpl $-0x7e, %eax
pushq $-0x7f
popq %rcx
cmovll %ecx, %eax
cmpl $0x7f, %eax
pushq $0x7f
popq %rcx
cmovgel %ecx, %eax
movq 0x18(%rsp), %rdi
movb %al, (%rdi)
pushq $0x1
jmp 0x287c0c
movq 0x18(%rsp), %rdi
movss %xmm0, (%rdi)
pushq $0x4
popq %rax
movq 0x28(%rsp), %r8
addq %rax, %rdi
incl %r14d
jmp 0x287974
movl 0x30(%rsp), %eax
incl %eax
movq 0x60(%rsp), %rcx
jmp 0x287965
movq 0x48(%rsp), %rax
incq %rax
jmp 0x287904
movq 0x10(%rsp), %rbx
incq %rbx
movl 0x108(%rdx), %edi
jmp 0x2878c3
leaq 0x160(%rsp), %rdi
callq 0x624be
movq 0x50(%rsp), %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x287c90
lock
decl (%rax)
jne 0x287c90
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x287c88
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x287c90
movq %rsi, %rdi
callq 0x5f3e0
movq 0x120(%rsp), %rax
testq %rax, %rax
je 0x2871a9
lock
decl (%rax)
jne 0x2871a9
movq 0x118(%rsp), %rsi
movq 0x138(%rsp), %rdi
testq %rdi, %rdi
je 0x287cca
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2871a9
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2871a9
jmp 0x287da2
jmp 0x287da2
jmp 0x287da2
movq %rax, %rbx
leaq 0x160(%rsp), %rdi
callq 0x624be
jmp 0x287d30
jmp 0x287d2d
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x287d61
lock
decl (%rax)
jne 0x287d61
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x287d51
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x287d61
jmp 0x287da2
movq %rax, %rbx
jmp 0x287d61
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x287d61
lock
decl (%rax)
jne 0x287d61
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x287d5b
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x287d61
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x120(%rsp), %rax
testq %rax, %rax
je 0x287d98
lock
decl (%rax)
jne 0x287d98
movq 0x118(%rsp), %rsi
movq 0x138(%rsp), %rdi
testq %rdi, %rdi
jne 0x287d92
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x287d98
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x287da2
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/convolutiondepthwise.cpp |
ncnn::convolutiondepthwise(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Mat const&, int, int, int, int, int, int, int, int, ncnn::Mat const&, ncnn::Option const&) | static int convolutiondepthwise(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data, const Mat& bias_data, int kernel_w, int kernel_h, int stride_w, int stride_h, int dilation_w, int dilation_h, int group, int activation_type, const Mat& activation_params, const Option& opt)
{
const int w = bottom_blob.w;
const int inch = bottom_blob.c;
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int bias_term = bias_data.empty() ? 0 : 1;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
// depth-wise
if (inch == group && group == outch)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data + maxk * g;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_term)
sum = bias_data[g];
const float* sptr = m.row(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++)
{
float val = sptr[space_ofs[k]];
float w = kptr[k];
sum += val * w;
}
outptr[j] = activation_ss(sum, activation_type, activation_params);
}
outptr += outw;
}
}
}
else
{
// group convolution
const int inch_g = inch / group;
const int outch_g = outch / group;
#ifdef _WIN32
#pragma omp parallel for num_threads(opt.num_threads)
#else
#pragma omp parallel for collapse(2) num_threads(opt.num_threads)
#endif
for (int g = 0; g < group; g++)
{
for (int p = 0; p < outch_g; p++)
{
float* outptr = top_blob.channel(g * outch_g + p);
const float* weight_data_ptr = (const float*)weight_data + maxk * inch_g * outch_g * g;
// shadowed variable for less openmp task args
const int outw = top_blob.w;
const int outh = top_blob.h;
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_term)
sum = bias_data[outch_g * g + p];
const float* kptr = weight_data_ptr + maxk * inch_g * p;
for (int q = 0; q < inch_g; q++)
{
const Mat m = bottom_blob.channel(inch_g * g + q);
const float* sptr = m.row(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++)
{
float val = sptr[space_ofs[k]];
float w = kptr[k];
sum += val * w;
}
kptr += maxk;
}
outptr[j] = activation_ss(sum, activation_type, activation_params);
}
outptr += outw;
}
}
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x128, %rsp # imm = 0x128
movl %r9d, %ebp
movl %r8d, %r15d
movq %rdx, 0xa0(%rsp)
movq %rsi, 0x58(%rsp)
movl 0x170(%rsp), %ebx
movq %rdi, 0x28(%rsp)
movl 0x2c(%rdi), %r12d
cmpq $0x0, (%rcx)
movq %rcx, 0xa8(%rsp)
je 0x287e14
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
sete 0xf(%rsp)
jmp 0x287e19
movb $0x1, 0xf(%rsp)
movl 0x180(%rsp), %r13d
movq 0x28(%rsp), %rax
movl 0x38(%rax), %eax
movl %eax, 0x40(%rsp)
movq 0x58(%rsp), %rax
movslq 0x2c(%rax), %rcx
movq %rcx, 0x48(%rsp)
movl 0x30(%rax), %ecx
movq %rcx, 0x18(%rsp)
movl 0x38(%rax), %eax
movl %eax, 0x10(%rsp)
movl %ebp, %eax
imull %r15d, %eax
movslq %eax, %r14
leaq 0x110(%rsp), %rdi
leaq 0x27(%rsp), %rdx
movq %r14, %rsi
callq 0x73bbe
movl %ebp, %r9d
imull 0x178(%rsp), %r12d
movl %ebx, %eax
imull %r15d, %eax
subl %eax, %r12d
xorl %eax, %eax
testl %r15d, %r15d
cmovlel %eax, %r15d
testl %ebp, %ebp
cmovlel %eax, %r9d
leaq 0x110(%rsp), %rcx
movq (%rcx), %rbp
movl %r15d, %ecx
xorl %edx, %edx
xorl %esi, %esi
cmpl %r9d, %eax
je 0x287ec8
movslq %ecx, %rdi
movslq %esi, %r8
leal (%r15,%r8), %esi
cmpq %r8, %rdi
je 0x287ebe
movl %edx, (%rbp,%r8,4)
incq %r8
addl %ebx, %edx
jmp 0x287ead
addl %r12d, %edx
incl %eax
addl %r15d, %ecx
jmp 0x287e9e
movl 0x40(%rsp), %eax
movl %eax, %edx
xorl %r13d, %edx
movl 0x10(%rsp), %esi
movl %esi, %ecx
xorl %r13d, %ecx
orl %edx, %ecx
movb 0xf(%rsp), %r10b
movl 0x188(%rsp), %r11d
jne 0x288230
movq 0x58(%rsp), %rax
movq (%rax), %rcx
movq %rcx, 0x80(%rsp)
movq 0x40(%rax), %rcx
imulq 0x10(%rax), %rcx
movq %rcx, 0x78(%rsp)
movq %r13, %rdx
movq 0xa0(%rsp), %rax
movq (%rax), %r13
movq 0x28(%rsp), %rsi
movslq 0x2c(%rsi), %rax
movq (%rsi), %rcx
movq %rcx, 0x70(%rsp)
movq 0x10(%rsi), %rcx
movq 0x40(%rsi), %rsi
imulq %rcx, %rsi
movq %rsi, 0x68(%rsp)
imulq %rcx, %rax
xorl %esi, %esi
testl %r14d, %r14d
movl $0x0, %r12d
cmovgl %r14d, %r12d
movslq 0x160(%rsp), %rdi
movq 0x48(%rsp), %rcx
testl %ecx, %ecx
movl $0x0, %r8d
cmovgl %ecx, %r8d
movslq 0x168(%rsp), %rcx
movq 0x18(%rsp), %r15
testl %r15d, %r15d
cmovlel %esi, %r15d
movq %r15, 0x18(%rsp)
testl %edx, %edx
cmovlel %esi, %edx
movq %rdx, 0x60(%rsp)
imulq %rax, %rcx
movq %rcx, 0x88(%rsp)
shlq $0x2, %r14
leaq 0x1704e5(%rip), %r9 # 0x3f8488
movq %rdi, 0x28(%rsp)
movq %r8, 0x50(%rsp)
cmpq 0x60(%rsp), %rsi
je 0x288651
movq 0x78(%rsp), %rax
imulq %rsi, %rax
addq 0x80(%rsp), %rax
movq %rax, 0x10(%rsp)
movq 0x68(%rsp), %rax
imulq %rsi, %rax
addq 0x70(%rsp), %rax
movq %rax, 0xb0(%rsp)
xorl %ebx, %ebx
movq %rsi, 0x30(%rsp)
cmpq 0x18(%rsp), %rbx
je 0x288225
movq 0xa8(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x40(%rsp)
movq %rbx, 0x90(%rsp)
imulq 0x88(%rsp), %rbx
addq 0xb0(%rsp), %rbx
movq 0x190(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x38(%rsp)
xorl %r15d, %r15d
cmpq %r8, %r15
je 0x288202
xorps %xmm4, %xmm4
testb %r10b, %r10b
jne 0x28804d
movq 0x40(%rsp), %rax
movss (%rax,%rsi,4), %xmm4
movq %r15, %rax
imulq %rdi, %rax
leaq (%rbx,%rax,4), %rax
xorl %ecx, %ecx
cmpq %rcx, %r12
je 0x288079
movslq (%rbp,%rcx,4), %rdx
movss (%r13,%rcx,4), %xmm0
mulss (%rax,%rdx,4), %xmm0
addss %xmm0, %xmm4
incq %rcx
jmp 0x28805a
leal -0x1(%r11), %eax
cmpl $0x5, %eax
ja 0x288093
movslq (%r9,%rax,4), %rax
addq %r9, %rax
jmpq *%rax
maxss 0x165f7d(%rip), %xmm4 # 0x3ee010
movaps %xmm4, %xmm0
jmp 0x2881ef
movaps %xmm4, %xmm0
movaps %xmm4, 0xc0(%rsp)
callq 0x5f410
addss 0x166bd5(%rip), %xmm0 # 0x3eec88
callq 0x5f200
callq 0x5f160
leaq 0x1703c4(%rip), %r9 # 0x3f8488
movq 0x50(%rsp), %r8
movq 0x28(%rsp), %rdi
movq 0x30(%rsp), %rsi
movl 0x188(%rsp), %r11d
movb 0xf(%rsp), %r10b
mulss 0xc0(%rsp), %xmm0
jmp 0x2881ef
movq 0x38(%rsp), %rax
maxss (%rax), %xmm4
movss 0x4(%rax), %xmm1
ucomiss %xmm1, %xmm4
movaps %xmm4, %xmm0
ja 0x2881ec
jmp 0x2881ef
movss 0x1690a3(%rip), %xmm2 # 0x3f11b8
minss %xmm2, %xmm4
movaps %xmm4, %xmm0
xorps 0x165f6d(%rip), %xmm0 # 0x3ee090
cmpltss 0x169090(%rip), %xmm4 # 0x3f11bc
movaps %xmm4, %xmm1
andnps %xmm0, %xmm1
andps %xmm2, %xmm4
orps %xmm1, %xmm4
movaps %xmm4, %xmm0
callq 0x5f410
leaq 0x170341(%rip), %r9 # 0x3f8488
movq 0x50(%rsp), %r8
movq 0x28(%rsp), %rdi
movq 0x30(%rsp), %rsi
movl 0x188(%rsp), %r11d
movb 0xf(%rsp), %r10b
movaps %xmm0, %xmm1
movss 0x166b1a(%rip), %xmm0 # 0x3eec88
addss %xmm0, %xmm1
divss %xmm1, %xmm0
jmp 0x2881ef
xorps %xmm0, %xmm0
cmpltss %xmm4, %xmm0
movaps %xmm0, %xmm1
movss 0x166afd(%rip), %xmm2 # 0x3eec88
andps %xmm2, %xmm1
movq 0x38(%rsp), %rax
movss (%rax), %xmm2
andnps %xmm2, %xmm0
orps %xmm1, %xmm0
mulss %xmm4, %xmm0
jmp 0x2881ef
movq 0x38(%rsp), %rax
movss (%rax), %xmm1
movss 0x4(%rax), %xmm2
movaps %xmm2, %xmm3
xorps 0x165ed5(%rip), %xmm3 # 0x3ee090
divss %xmm1, %xmm3
xorps %xmm0, %xmm0
ucomiss %xmm3, %xmm4
jb 0x2881ef
movss 0x166ab9(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
addss %xmm0, %xmm3
ucomiss %xmm3, %xmm4
ja 0x288093
mulss %xmm4, %xmm1
addss %xmm2, %xmm1
mulss %xmm4, %xmm1
movaps %xmm1, %xmm0
movq 0x10(%rsp), %rax
movss %xmm0, (%rax,%r15,4)
incq %r15
jmp 0x288032
movq 0x48(%rsp), %rax
movq 0x10(%rsp), %rcx
leaq (%rcx,%rax,4), %rcx
movq %rcx, 0x10(%rsp)
movq 0x90(%rsp), %rbx
incq %rbx
jmp 0x287feb
incq %rsi
addq %r14, %r13
jmp 0x287fad
cltd
idivl %r13d
movl %eax, %ecx
movl %esi, %eax
cltd
idivl %r13d
movl %ecx, %edx
imull %r14d, %edx
xorl %esi, %esi
testl %r14d, %r14d
movl $0x0, %ebx
cmovgl %r14d, %ebx
testl %ecx, %ecx
movl $0x0, %r12d
cmovgl %ecx, %r12d
movslq 0x160(%rsp), %rdi
movq %rdi, 0x50(%rsp)
movslq 0x168(%rsp), %rdi
movq %rdi, 0x70(%rsp)
testl %eax, %eax
movl $0x0, %edi
cmovgl %eax, %edi
movq %rdi, 0x68(%rsp)
movslq %edx, %rdx
movslq %ecx, %rcx
cltq
testl %r13d, %r13d
cmovlel %esi, %r13d
movq %rax, 0xd8(%rsp)
movq %rcx, 0xe0(%rsp)
imull %ecx, %eax
imull %r14d, %eax
movl %eax, 0x98(%rsp)
shlq $0x2, %rdx
movq %rdx, 0xf0(%rsp)
shlq $0x2, %r14
xorl %eax, %eax
xorl %ecx, %ecx
movq %r13, 0x60(%rsp)
cmpq %r13, %rcx
je 0x288651
movl %eax, 0x9c(%rsp)
movslq %eax, %rdx
shlq $0x2, %rdx
movq %rcx, %rsi
movq 0x58(%rsp), %rax
movq 0x40(%rax), %rdi
imulq 0x10(%rax), %rdi
movq %rdi, 0x100(%rsp)
imulq 0xd8(%rsp), %rsi
movq %rsi, 0x108(%rsp)
movslq 0x2c(%rax), %rsi
movq %rcx, 0xe8(%rsp)
movq %rcx, %r13
imulq 0xe0(%rsp), %r13
testl %esi, %esi
movl $0x0, %edi
movq %rsi, 0x78(%rsp)
cmovgl %esi, %edi
movq %rdi, 0x40(%rsp)
movl 0x30(%rax), %esi
testl %esi, %esi
movl $0x0, %ecx
cmovlel %ecx, %esi
movq %rsi, 0x88(%rsp)
movq (%rax), %rax
movq %rax, 0xf8(%rsp)
movq 0xa0(%rsp), %rax
addq (%rax), %rdx
movq %rdx, 0x30(%rsp)
xorl %ecx, %ecx
cmpq 0x68(%rsp), %rcx
je 0x28862e
movq 0x108(%rsp), %rax
leaq (%rcx,%rax), %rdx
movq 0x100(%rsp), %rsi
movq %rdx, 0x90(%rsp)
imulq %rdx, %rsi
addq 0xf8(%rsp), %rsi
movq %rsi, 0x10(%rsp)
xorl %edx, %edx
movq %rcx, 0x80(%rsp)
cmpq 0x88(%rsp), %rdx
je 0x288614
movq 0xa8(%rsp), %rax
movq (%rax), %rax
movq %rax, 0xc0(%rsp)
movq %rdx, 0x48(%rsp)
imulq 0x70(%rsp), %rdx
movq %rdx, 0x38(%rsp)
movq 0x190(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x18(%rsp)
xorl %r15d, %r15d
cmpq 0x40(%rsp), %r15
je 0x2885ec
xorps %xmm4, %xmm4
testb %r10b, %r10b
jne 0x288418
movq 0x90(%rsp), %rax
movq 0xc0(%rsp), %rcx
movss (%rcx,%rax,4), %xmm4
movq 0x28(%rsp), %rsi
movslq 0x2c(%rsi), %rcx
imulq 0x38(%rsp), %rcx
movq 0x10(%rsi), %rdx
movq 0x40(%rsi), %rax
imulq %rdx, %rax
imulq %rdx, %rcx
addq (%rsi), %rcx
movq %r15, %rdx
imulq 0x50(%rsp), %rdx
leaq (%rcx,%rdx,4), %rcx
movq 0x30(%rsp), %rdx
xorl %esi, %esi
cmpq %r12, %rsi
je 0x288488
leaq (%rsi,%r13), %rdi
imulq %rax, %rdi
addq %rcx, %rdi
xorl %r8d, %r8d
cmpq %r8, %rbx
je 0x288480
movslq (%rbp,%r8,4), %r9
movss (%rdx,%r8,4), %xmm0
mulss (%rdi,%r9,4), %xmm0
addss %xmm0, %xmm4
incq %r8
jmp 0x288461
incq %rsi
addq %r14, %rdx
jmp 0x28844e
leal -0x1(%r11), %eax
cmpl $0x5, %eax
ja 0x2884a9
leaq 0x16ffd8(%rip), %rcx # 0x3f8470
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
maxss 0x165b67(%rip), %xmm4 # 0x3ee010
movaps %xmm4, %xmm0
jmp 0x2885d9
movaps %xmm4, %xmm0
movaps %xmm4, 0xb0(%rsp)
callq 0x5f410
addss 0x1667bf(%rip), %xmm0 # 0x3eec88
callq 0x5f200
callq 0x5f160
movl 0x188(%rsp), %r11d
movb 0xf(%rsp), %r10b
mulss 0xb0(%rsp), %xmm0
jmp 0x2885d9
movq 0x18(%rsp), %rax
maxss (%rax), %xmm4
movss 0x4(%rax), %xmm1
ucomiss %xmm1, %xmm4
movaps %xmm4, %xmm0
ja 0x2885d6
jmp 0x2885d9
movss 0x168ca3(%rip), %xmm2 # 0x3f11b8
minss %xmm2, %xmm4
movaps %xmm4, %xmm0
xorps 0x165b6d(%rip), %xmm0 # 0x3ee090
cmpltss 0x168c90(%rip), %xmm4 # 0x3f11bc
movaps %xmm4, %xmm1
andnps %xmm0, %xmm1
andps %xmm2, %xmm4
orps %xmm1, %xmm4
movaps %xmm4, %xmm0
callq 0x5f410
movl 0x188(%rsp), %r11d
movb 0xf(%rsp), %r10b
movaps %xmm0, %xmm1
movss 0x166730(%rip), %xmm0 # 0x3eec88
addss %xmm0, %xmm1
divss %xmm1, %xmm0
jmp 0x2885d9
xorps %xmm0, %xmm0
cmpltss %xmm4, %xmm0
movaps %xmm0, %xmm1
movss 0x166713(%rip), %xmm2 # 0x3eec88
andps %xmm2, %xmm1
movq 0x18(%rsp), %rax
movss (%rax), %xmm2
andnps %xmm2, %xmm0
orps %xmm1, %xmm0
mulss %xmm4, %xmm0
jmp 0x2885d9
movq 0x18(%rsp), %rax
movss (%rax), %xmm1
movss 0x4(%rax), %xmm2
movaps %xmm2, %xmm3
xorps 0x165aeb(%rip), %xmm3 # 0x3ee090
divss %xmm1, %xmm3
xorps %xmm0, %xmm0
ucomiss %xmm3, %xmm4
jb 0x2885d9
movss 0x1666cf(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
addss %xmm0, %xmm3
ucomiss %xmm3, %xmm4
ja 0x2884a9
mulss %xmm4, %xmm1
addss %xmm2, %xmm1
mulss %xmm4, %xmm1
movaps %xmm1, %xmm0
movq 0x10(%rsp), %rax
movss %xmm0, (%rax,%r15,4)
incq %r15
jmp 0x2883f0
movq 0x78(%rsp), %rax
movq 0x10(%rsp), %rcx
leaq (%rcx,%rax,4), %rcx
movq %rcx, 0x10(%rsp)
movq 0x48(%rsp), %rdx
incq %rdx
movq 0x80(%rsp), %rcx
jmp 0x2883ac
incq %rcx
movq 0x30(%rsp), %rax
addq 0xf0(%rsp), %rax
movq %rax, 0x30(%rsp)
jmp 0x28836a
movq 0xe8(%rsp), %rcx
incq %rcx
movl 0x9c(%rsp), %eax
addl 0x98(%rsp), %eax
movq 0x60(%rsp), %r13
jmp 0x2882cb
leaq 0x110(%rsp), %rdi
callq 0x624be
addq $0x128, %rsp # imm = 0x128
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
| /csukuangfj[P]ncnn/src/layer/convolutiondepthwise.cpp |
ncnn::ConvolutionDepthWise_x86::create_pipeline(ncnn::Option const&) | int ConvolutionDepthWise_x86::create_pipeline(const Option& opt)
{
if (dynamic_weight)
return 0;
activation = create_activation_layer(activation_type, activation_params, opt);
#if NCNN_INT8
if (opt.use_int8_inference && weight_data.elemsize == (size_t)1u)
{
return create_pipeline_int8_x86(opt);
}
#endif
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
// depth-wise
if (channels == group && group == num_output)
{
int elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = channels % 16 == 0 ? 16 : channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#else
elempack = channels % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
#if __SSE2__
#if __AVX__
// pack16
#if __AVX512F__
if (elempack == 16)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 16, opt);
}
#endif // __AVX512F__
// pack8
if (elempack == 8)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 8, opt);
}
#endif // __AVX__
// pack4
if (elempack == 4)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 4, opt);
}
#endif // __SSE2__
if (elempack == 1)
{
// depth-wise specific
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
weight_data_tm = weight_data;
}
else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
weight_data_tm = weight_data;
}
else
{
create_group_ops(opt);
}
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
// group convolution
create_group_ops(opt);
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x50, %rsp
movq (%rdi), %rax
movq -0x18(%rax), %r13
cmpl $0x0, 0x160(%rdi,%r13)
je 0x288dd5
xorl %eax, %eax
addq $0x50, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
movq %rsi, %r14
movq %rdi, %rbx
movl 0x110(%rdi,%r13), %ecx
decl %ecx
cmpl $0x5, %ecx
ja 0x289171
leaq 0x16f6cb(%rip), %rax # 0x3f84c0
movslq (%rax,%rcx,4), %rcx
addq %rax, %rcx
jmpq *%rcx
pushq $0x1a
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r12, %rsi
callq *0x10(%rax)
jmp 0x288f68
pushq $0x47
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r12, %rsi
callq *0x10(%rax)
jmp 0x288f68
pushq $0x36
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq 0x118(%rbx,%r13), %rax
movss (%rax), %xmm0
movq %r12, %rdi
xorl %esi, %esi
callq 0x71952
movq 0x118(%rbx,%r13), %rax
movss 0x4(%rax), %xmm0
leaq 0x8(%rsp), %rdi
pushq $0x1
popq %rsi
callq 0x71952
movq (%r15), %rax
leaq 0x8(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
jmp 0x288f68
pushq $0x1e
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r12, %rsi
callq *0x10(%rax)
jmp 0x288f68
pushq $0x1a
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq 0x118(%rbx,%r13), %rax
movss (%rax), %xmm0
movq %r12, %rdi
xorl %esi, %esi
callq 0x71952
movq (%r15), %rax
leaq 0x8(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
jmp 0x288f68
pushq $0x43
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq 0x118(%rbx,%r13), %rax
movss (%rax), %xmm0
movq %r12, %rdi
xorl %esi, %esi
callq 0x71952
movq 0x118(%rbx,%r13), %rax
movss 0x4(%rax), %xmm0
leaq 0x8(%rsp), %rdi
pushq $0x1
popq %rsi
callq 0x71952
movq (%r15), %rax
leaq 0x8(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
leaq 0x8(%rsp), %rdi
callq 0x71614
movq (%r15), %rax
movq %r15, %rdi
movq %r14, %rsi
callq *0x20(%rax)
movq (%rbx), %rax
movq %r15, 0x8(%rbx)
movq -0x18(%rax), %r15
cmpb $0x1, 0x1e(%r14)
jne 0x288fab
cmpq $0x1, 0x178(%rbx,%r15)
jne 0x288fab
movq %rbx, %rdi
movq %r14, %rsi
callq 0x289278
jmp 0x288dc5
movl 0xd0(%rbx,%r15), %esi
movl 0xd4(%rbx,%r15), %r10d
movl 0xd8(%rbx,%r15), %r11d
movl %r11d, %r8d
imull %r10d, %r8d
movl 0x104(%rbx,%r15), %eax
movl 0x108(%rbx,%r15), %ecx
cltd
idivl %ecx
cltd
idivl %r8d
movl %eax, %edi
movl %esi, %eax
cltd
idivl %ecx
movl %eax, %r9d
movl %edi, %eax
cltd
idivl %r9d
cmpl %esi, %ecx
jne 0x28911e
imull %ecx, %eax
cmpl %ecx, %eax
jne 0x28911e
leaq (%rbx,%r15), %rsi
testb $0x3, %cl
jne 0x289075
cmpb $0x0, 0x27(%r14)
je 0x289075
addq $0x168, %rsi # imm = 0x168
leaq 0x8(%rsp), %r15
movq %r15, %rdi
movl %r8d, %edx
xorl %r8d, %r8d
callq 0x62e4e
leaq 0x28(%rbx), %rsi
pushq $0x4
popq %rdx
movq %r15, %rdi
movq %r14, %rcx
callq 0x64e3b
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x289129
lock
decl (%rax)
jne 0x289129
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x2891a8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x289129
xorl $0x3, %r10d
xorl $0x3, %r11d
orl %r10d, %r11d
jne 0x28911e
cmpl $0x1, 0xdc(%rsi)
jne 0x2890aa
cmpl $0x1, 0xe0(%rsi)
jne 0x2890aa
cmpl $0x1, 0xe4(%rsi)
jne 0x2890aa
cmpl $0x1, 0xe8(%rsi)
je 0x2890ce
cmpl $0x1, 0xdc(%rsi)
jne 0x28911e
cmpl $0x1, 0xe0(%rsi)
jne 0x28911e
cmpl $0x2, 0xe4(%rsi)
jne 0x28911e
cmpl $0x2, 0xe8(%rsi)
jne 0x28911e
addq $0x168, %r15 # imm = 0x168
cmpq $0x28, %r15
je 0x289129
movq 0x8(%rbx,%r15), %rax
testq %rax, %rax
je 0x2890e8
lock
incl (%rax)
leaq 0x28(%rbx), %r12
movq 0x30(%rbx), %rax
testq %rax, %rax
je 0x2891bd
lock
decl (%rax)
jne 0x2891bd
movq 0x28(%rbx), %rsi
movq 0x48(%rbx), %rdi
testq %rdi, %rdi
je 0x2891b5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2891bd
movq %rbx, %rdi
movq %r14, %rsi
callq 0x2894d8
cmpb $0x1, (%r14)
jne 0x288dc5
movq (%rbx), %rax
movq -0x18(%rax), %rax
leaq (%rbx,%rax), %r14
addq %rax, %rbx
addq $0x168, %rbx # imm = 0x168
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x289181
lock
decl (%rax)
jne 0x289181
movq 0x168(%r14), %rsi
movq 0x188(%r14), %rdi
testq %rdi, %rdi
je 0x289179
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x289181
xorl %r15d, %r15d
jmp 0x288f81
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1a8(%r14)
xorps %xmm0, %xmm0
movups %xmm0, 0xc(%rbx)
movups %xmm0, (%rbx)
movups %xmm0, 0x190(%r14)
andl $0x0, 0x1a0(%r14)
jmp 0x288dc5
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x289129
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x68(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, 0xc(%r12)
movups %xmm0, (%r12)
andl $0x0, 0x60(%rbx)
movups %xmm0, 0x50(%rbx)
movups (%rbx,%r15), %xmm0
movups %xmm0, 0x28(%rbx)
movq 0x10(%rbx,%r15), %rax
movq %rax, 0x38(%rbx)
movl 0x18(%rbx,%r15), %eax
movl %eax, 0x40(%rbx)
movq 0x20(%rbx,%r15), %rax
movq %rax, 0x48(%rbx)
movups 0x28(%rbx,%r15), %xmm0
movups %xmm0, 0x50(%rbx)
movl 0x38(%rbx,%r15), %eax
movl %eax, 0x60(%rbx)
movq 0x40(%rbx,%r15), %rax
movq %rax, 0x68(%rbx)
jmp 0x289129
jmp 0x289256
jmp 0x289262
jmp 0x289262
jmp 0x289262
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x28926f
lock
decl (%rax)
jne 0x28926f
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
jne 0x28924e
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x28926f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x28926f
movq %rax, %rdi
callq 0x61d68
jmp 0x289262
jmp 0x289262
movq %rax, %rbx
leaq 0x8(%rsp), %rdi
callq 0x71614
movq %rbx, %rdi
callq 0x5f340
nop
| /csukuangfj[P]ncnn/src/layer/x86/convolutiondepthwise_x86.cpp |
ncnn::ConvolutionDepthWise_x86::create_pipeline_int8_x86(ncnn::Option const&) | int ConvolutionDepthWise_x86::create_pipeline_int8_x86(const Option& opt)
{
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
// depth-wise
if (channels == group && group == num_output)
{
int elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
elempack = channels % 8 == 0 ? 8 : 1;
}
#endif // __SSE2__
if (elempack == 8)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 8, opt);
}
if (elempack == 1)
{
weight_data_tm = weight_data;
}
return 0;
}
// group convolution
create_group_ops(opt);
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rsi, %r15
movq %rdi, %rbx
movq (%rdi), %rax
movq -0x18(%rax), %r14
movl 0xd8(%rdi,%r14), %r8d
imull 0xd4(%rdi,%r14), %r8d
movl 0xd0(%rdi,%r14), %esi
movl 0x104(%rdi,%r14), %eax
movl 0x108(%rdi,%r14), %ecx
cltd
idivl %ecx
cltd
idivl %r8d
movl %eax, %edi
movl %esi, %eax
cltd
idivl %ecx
movl %eax, %r9d
movl %edi, %eax
cltd
idivl %r9d
cmpl %esi, %ecx
jne 0x28935a
imull %ecx, %eax
cmpl %ecx, %eax
jne 0x28935a
testb $0x7, %cl
jne 0x2893ad
cmpb $0x0, 0x27(%r15)
je 0x2893ad
addq %rbx, %r14
addq $0x168, %r14 # imm = 0x168
movq %rsp, %r12
movq %r12, %rdi
movq %r14, %rsi
movl %r8d, %edx
xorl %r8d, %r8d
callq 0x62e4e
addq $0x28, %rbx
pushq $0x8
popq %rdx
movq %r12, %rdi
movq %rbx, %rsi
movq %r15, %rcx
callq 0x64e3b
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x289487
lock
decl (%rax)
jne 0x289487
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x28941e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x289487
movq %rbx, %rdi
movq %r15, %rsi
callq 0x2894d8
cmpb $0x1, (%r15)
jne 0x289487
movq (%rbx), %rax
movq -0x18(%rax), %rax
leaq (%rbx,%rax), %r14
addq %rax, %rbx
addq $0x168, %rbx # imm = 0x168
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x2893fa
lock
decl (%rax)
jne 0x2893fa
movq 0x168(%r14), %rsi
movq 0x188(%r14), %rdi
testq %rdi, %rdi
je 0x2893f2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2893fa
addq $0x168, %r14 # imm = 0x168
cmpq $0x28, %r14
je 0x289487
movq 0x8(%rbx,%r14), %rax
testq %rax, %rax
je 0x2893cb
lock
incl (%rax)
leaq 0x28(%rbx), %r15
movq 0x30(%rbx), %rax
testq %rax, %rax
je 0x289430
lock
decl (%rax)
jne 0x289430
movq 0x28(%rbx), %rsi
movq 0x48(%rbx), %rdi
testq %rdi, %rdi
je 0x289428
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x289430
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1a8(%r14)
xorps %xmm0, %xmm0
movups %xmm0, 0xc(%rbx)
movups %xmm0, (%rbx)
movups %xmm0, 0x190(%r14)
andl $0x0, 0x1a0(%r14)
jmp 0x289487
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x289487
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x68(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, 0xc(%r15)
movups %xmm0, (%r15)
andl $0x0, 0x60(%rbx)
movups %xmm0, 0x50(%rbx)
movups (%rbx,%r14), %xmm0
movups %xmm0, 0x28(%rbx)
movq 0x10(%rbx,%r14), %rax
movq %rax, 0x38(%rbx)
movl 0x18(%rbx,%r14), %eax
movl %eax, 0x40(%rbx)
movq 0x20(%rbx,%r14), %rax
movq %rax, 0x48(%rbx)
movups 0x28(%rbx,%r14), %xmm0
movups %xmm0, 0x50(%rbx)
movl 0x38(%rbx,%r14), %eax
movl %eax, 0x60(%rbx)
movq 0x40(%rbx,%r14), %rax
movq %rax, 0x68(%rbx)
xorl %eax, %eax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
jmp 0x2894cf
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x2894c7
lock
decl (%rax)
jne 0x2894c7
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x2894c1
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2894c7
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/x86/convolutiondepthwise_x86.cpp |
ncnn::ConvolutionDepthWise_x86::create_group_ops(ncnn::Option const&) | int ConvolutionDepthWise_x86::create_group_ops(const Option& opt)
{
// create Convolution op for each group
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
for (int i = 0; i < (int)group_ops.size(); i++)
delete group_ops[i];
group_ops.clear();
const int channels_g = channels / group;
const int num_output_g = num_output / group;
group_ops.resize(group);
for (int g = 0; g < group; g++)
{
Mat weight_data_g = weight_data.range(maxk * channels_g * num_output_g * g, maxk * channels_g * num_output_g).clone();
Mat bias_data_g;
if (bias_term)
bias_data_g = bias_data.range(num_output_g * g, num_output_g);
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution);
// set param
ncnn::ParamDict pd;
pd.set(0, num_output_g); // num_output
pd.set(1, kernel_w);
pd.set(11, kernel_h);
pd.set(2, dilation_w);
pd.set(12, dilation_h);
pd.set(3, stride_w);
pd.set(13, stride_h);
pd.set(4, 0); // pad_w
pd.set(14, 0); // pad_h
pd.set(5, bias_term);
pd.set(6, maxk * channels_g * num_output_g); // weight_data_size
pd.set(8, int8_scale_term);
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
// set weights
if (bias_term)
{
ncnn::Mat weights[5];
weights[0] = weight_data_g;
weights[1] = bias_data_g;
#if NCNN_INT8
if (int8_scale_term)
{
Mat weight_data_int8_scales_g(num_output_g);
weight_data_int8_scales_g.fill(weight_data_int8_scales[g]);
weights[2] = weight_data_int8_scales_g;
weights[3] = bottom_blob_int8_scales.range(g, 1);
}
if (int8_scale_term > 100)
{
weights[4] = top_blob_int8_scales.range(g, 1);
}
#endif
op->load_model(ModelBinFromMatArray(weights));
}
else
{
ncnn::Mat weights[4];
weights[0] = weight_data_g;
#if NCNN_INT8
if (int8_scale_term)
{
Mat weight_data_int8_scales_g(num_output_g);
weight_data_int8_scales_g.fill(weight_data_int8_scales[g]);
weights[1] = weight_data_int8_scales_g;
weights[2] = bottom_blob_int8_scales.range(g, 1);
}
if (int8_scale_term > 100)
{
weights[3] = top_blob_int8_scales.range(g, 1);
}
#endif
op->load_model(ModelBinFromMatArray(weights));
}
op->create_pipeline(opt);
group_ops[g] = op;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x278, %rsp # imm = 0x278
movq %rsi, 0x250(%rsp)
movq %rdi, %r14
movq (%rdi), %rax
movq -0x18(%rax), %rdx
movl 0xd0(%rdi,%rdx), %ecx
movl 0xd8(%rdi,%rdx), %ebp
imull 0xd4(%rdi,%rdx), %ebp
movl 0x104(%rdi,%rdx), %eax
movl 0x108(%rdi,%rdx), %r15d
cltd
idivl %r15d
cltd
idivl %ebp
movl %eax, %esi
movl %ecx, %eax
cltd
idivl %r15d
movl %eax, %ecx
movl %esi, %eax
cltd
idivl %ecx
movl %eax, %ebx
leaq 0x10(%rdi), %rax
movq %rax, 0x1e0(%rsp)
xorl %r12d, %r12d
movq 0x10(%r14), %rax
movq 0x18(%r14), %rcx
movq %rcx, %rdx
subq %rax, %rdx
shrq $0x3, %rdx
movslq %edx, %rdx
cmpq %rdx, %r12
jge 0x289575
movq (%rax,%r12,8), %rdi
testq %rdi, %rdi
je 0x289570
movq (%rdi), %rax
callq *0x8(%rax)
incq %r12
jmp 0x289547
imull %r15d, %ebx
cmpq %rax, %rcx
je 0x289582
movq %rax, 0x18(%r14)
movq (%r14), %rax
movq -0x18(%rax), %rcx
movslq 0x108(%r14,%rcx), %rsi
movl %ebx, %eax
cltd
idivl %esi
movl %eax, %ebx
movl 0xd0(%r14,%rcx), %eax
cltd
idivl %esi
movl %eax, %r15d
movq 0x1e0(%rsp), %rdi
callq 0x6fbc2
leaq 0x118(%r14), %rax
movq %rax, 0x258(%rsp)
imull %ebp, %ebx
imull %r15d, %ebx
movl %ebx, 0x6c(%rsp)
movslq %ebx, %rax
movq %rax, 0x260(%rsp)
movslq %r15d, %rax
movq %rax, 0x248(%rsp)
pushq $0x1
popq %rbx
xorl %edx, %edx
movl %r15d, 0x14(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
movslq 0x108(%r14,%rax), %rcx
cmpq %rcx, %rdx
jge 0x28a2ad
movq %rdx, %rcx
movq %rdx, 0x8(%rsp)
movq 0x260(%rsp), %rdi
imulq %rdi, %rcx
movq 0x178(%r14,%rax), %rdx
imulq %rdx, %rcx
addq 0x168(%r14,%rax), %rcx
movl 0x180(%r14,%rax), %esi
movq 0x188(%r14,%rax), %rax
movq %rcx, 0x70(%rsp)
andq $0x0, 0x78(%rsp)
movq %rdx, 0x80(%rsp)
movl %esi, 0x88(%rsp)
movq %rax, 0x90(%rsp)
movl %ebx, 0x98(%rsp)
movl %edi, 0x9c(%rsp)
movabsq $0x100000001, %rax # imm = 0x100000001
movq %rax, 0xa0(%rsp)
movl %ebx, 0xa8(%rsp)
movq %rdi, 0xb0(%rsp)
leaq 0x200(%rsp), %rdi
leaq 0x70(%rsp), %rsi
xorl %edx, %edx
callq 0x624f0
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2896d3
lock
decl (%rax)
jne 0x2896d3
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2896cb
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2896d3
movq %rsi, %rdi
callq 0x5f3e0
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x100(%r14,%rax)
je 0x289743
movq 0x8(%rsp), %r15
movq 0x248(%rsp), %rcx
imulq %rcx, %r15
movq 0x1c0(%r14,%rax), %rsi
movq %rsi, 0x1f8(%rsp)
imulq %rsi, %r15
addq 0x1b0(%r14,%rax), %r15
movl 0x1c8(%r14,%rax), %edx
movl %edx, 0x1c(%rsp)
movq 0x1d0(%r14,%rax), %rax
movq %rax, 0x1f0(%rsp)
movl %ebx, %r13d
movl 0x14(%rsp), %eax
movl %eax, 0x18(%rsp)
movq %rcx, 0x1e8(%rsp)
jmp 0x28977d
xorl %r15d, %r15d
movq $0x0, 0x1f8(%rsp)
movl $0x0, 0x1c(%rsp)
movq $0x0, 0x1f0(%rsp)
xorl %r13d, %r13d
movl $0x0, 0x18(%rsp)
movq $0x0, 0x1e8(%rsp)
pushq $0x6
popq %rdi
callq 0x782bf
movq %rax, %rbp
leaq 0x268(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq %r12, %rdi
xorl %esi, %esi
movl 0x14(%rsp), %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd4(%r14,%rax), %edx
movq %r12, %rdi
movl %ebx, %esi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd8(%r14,%rax), %edx
movq %r12, %rdi
pushq $0xb
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xdc(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x2
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe0(%r14,%rax), %edx
movq %r12, %rdi
pushq $0xc
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe4(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x3
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r14,%rax), %edx
movq %r12, %rdi
pushq $0xd
popq %rsi
callq 0x7193a
movq %r12, %rdi
pushq $0x4
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq %r12, %rdi
pushq $0xe
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x100(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x5
popq %rsi
callq 0x7193a
movq %r12, %rdi
pushq $0x6
popq %rsi
movl 0x6c(%rsp), %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x10c(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x8
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x110(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x9
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rdx
addq 0x258(%rsp), %rdx
movq %r12, %rdi
pushq $0xa
popq %rsi
callq 0x7196c
movq (%rbp), %rax
movq %rbp, %rdi
movq %r12, %rsi
callq *0x10(%rax)
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x100(%r14,%rax)
je 0x28995e
pushq $0x40
popq %rax
xorps %xmm0, %xmm0
andq $0x0, 0x70(%rsp,%rax)
movups %xmm0, 0x30(%rsp,%rax)
movups %xmm0, 0x3c(%rsp,%rax)
movups %xmm0, 0x50(%rsp,%rax)
movups %xmm0, 0x5c(%rsp,%rax)
addq $0x48, %rax
cmpq $0x1a8, %rax # imm = 0x1A8
jne 0x2898f7
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x28992d
lock
incl (%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2899da
lock
decl (%rax)
jne 0x2899da
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2899d2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2899da
pushq $0x40
popq %rax
xorps %xmm0, %xmm0
andq $0x0, 0x70(%rsp,%rax)
movups %xmm0, 0x30(%rsp,%rax)
movups %xmm0, 0x3c(%rsp,%rax)
movups %xmm0, 0x50(%rsp,%rax)
movups %xmm0, 0x5c(%rsp,%rax)
addq $0x48, %rax
cmpq $0x160, %rax # imm = 0x160
jne 0x289964
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x28999a
lock
incl (%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x289d81
lock
decl (%rax)
jne 0x289d81
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x289d79
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x289d81
movq %rsi, %rdi
callq 0x5f3e0
movaps 0x200(%rsp), %xmm0
movaps %xmm0, 0x70(%rsp)
movq 0x210(%rsp), %rax
movq %rax, 0x80(%rsp)
movl 0x218(%rsp), %eax
movl %eax, 0x88(%rsp)
movq 0x220(%rsp), %rax
movq %rax, 0x90(%rsp)
movups 0x228(%rsp), %xmm0
movups %xmm0, 0x98(%rsp)
movl 0x238(%rsp), %eax
movl %eax, 0xa8(%rsp)
movq 0x240(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0xc0(%rsp), %rax
testq %rax, %rax
je 0x289a7a
lock
decl (%rax)
jne 0x289a7a
movq 0xb8(%rsp), %rsi
movq 0xd8(%rsp), %rdi
testq %rdi, %rdi
je 0x289a72
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x289a7a
movq %rsi, %rdi
callq 0x5f3e0
movq %r15, 0xb8(%rsp)
andq $0x0, 0xc0(%rsp)
movq 0x1f8(%rsp), %rax
movq %rax, 0xc8(%rsp)
movl 0x1c(%rsp), %eax
movl %eax, 0xd0(%rsp)
movq 0x1f0(%rsp), %rax
movq %rax, 0xd8(%rsp)
movl %r13d, 0xe0(%rsp)
movl 0x18(%rsp), %eax
movl %eax, 0xe4(%rsp)
movl %r13d, 0xe8(%rsp)
movl %r13d, 0xec(%rsp)
movl %r13d, 0xf0(%rsp)
movq 0x1e8(%rsp), %rax
movq %rax, 0xf8(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x10c(%r14,%rax)
je 0x289d03
andq $0x0, 0x60(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x20(%rsp)
movups %xmm0, 0x2c(%rsp)
leaq 0x40(%rsp), %rax
movups %xmm0, 0xc(%rax)
movaps %xmm0, (%rax)
leaq 0x20(%rsp), %rdi
movl 0x14(%rsp), %esi
pushq $0x4
popq %rdx
xorl %ecx, %ecx
callq 0x635fa
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x1f8(%r14,%rax), %rax
movq 0x8(%rsp), %rcx
movss (%rax,%rcx,4), %xmm0
movl 0x58(%rsp), %eax
imull 0x60(%rsp), %eax
movq 0x20(%rsp), %rcx
testl %eax, %eax
movl $0x0, %edx
cmovlel %edx, %eax
xorl %edx, %edx
cmpl %edx, %eax
je 0x289b7a
movss %xmm0, (%rcx,%rdx,4)
incq %rdx
jmp 0x289b6c
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x289b87
lock
incl (%rax)
movq 0x108(%rsp), %rax
testq %rax, %rax
je 0x289bbe
lock
decl (%rax)
jne 0x289bbe
movq 0x100(%rsp), %rsi
movq 0x120(%rsp), %rdi
testq %rdi, %rdi
je 0x289bb6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x289bbe
movq %rsi, %rdi
callq 0x5f3e0
movaps 0x20(%rsp), %xmm0
movaps %xmm0, 0x100(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0x110(%rsp)
movl 0x38(%rsp), %eax
movl %eax, 0x118(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0x120(%rsp)
movups 0x48(%rsp), %xmm0
movups %xmm0, 0x128(%rsp)
movl 0x58(%rsp), %eax
movl %eax, 0x138(%rsp)
movq 0x60(%rsp), %rax
movq %rax, 0x140(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x250(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x240(%r14,%rax), %r13
movl 0x258(%r14,%rax), %r12d
movq 0x260(%r14,%rax), %rbx
movq 0x150(%rsp), %rax
testq %rax, %rax
je 0x289c7c
lock
decl (%rax)
jne 0x289c7c
movq 0x148(%rsp), %rsi
movq 0x168(%rsp), %rdi
testq %rdi, %rdi
je 0x289c74
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x289c7c
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x148(%rsp)
andq $0x0, 0x150(%rsp)
movq %r15, 0x158(%rsp)
movl %r12d, 0x160(%rsp)
movq %rbx, 0x168(%rsp)
movaps 0x16e784(%rip), %xmm0 # 0x3f8430
movaps %xmm0, 0x170(%rsp)
movl $0x1, 0x180(%rsp)
movq $0x1, 0x188(%rsp)
movq 0x28(%rsp), %rax
testq %rax, %rax
pushq $0x1
popq %rbx
je 0x289cfc
lock
decl (%rax)
jne 0x289cfc
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x289cf4
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x289cfc
movq %rsi, %rdi
callq 0x5f3e0
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x65, 0x10c(%r14,%rax)
jl 0x289f17
movq 0x298(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x288(%r14,%rax), %r13
movl 0x2a0(%r14,%rax), %ebx
movq 0x2a8(%r14,%rax), %r12
movq 0x198(%rsp), %rax
testq %rax, %rax
je 0x289ec6
lock
decl (%rax)
jne 0x289ec6
movq 0x190(%rsp), %rsi
movq 0x1b0(%rsp), %rdi
testq %rdi, %rdi
je 0x289ebe
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x289ec6
movq %rsi, %rdi
callq 0x5f3e0
movaps 0x200(%rsp), %xmm0
movaps %xmm0, 0x70(%rsp)
movq 0x210(%rsp), %rax
movq %rax, 0x80(%rsp)
movl 0x218(%rsp), %eax
movl %eax, 0x88(%rsp)
movq 0x220(%rsp), %rax
movq %rax, 0x90(%rsp)
movups 0x228(%rsp), %xmm0
movups %xmm0, 0x98(%rsp)
movl 0x238(%rsp), %eax
movl %eax, 0xa8(%rsp)
movq 0x240(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x10c(%r14,%rax)
je 0x28a0ef
andq $0x0, 0x60(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x20(%rsp)
movups %xmm0, 0x2c(%rsp)
leaq 0x40(%rsp), %rax
movups %xmm0, 0xc(%rax)
movaps %xmm0, (%rax)
leaq 0x20(%rsp), %rdi
movl 0x14(%rsp), %esi
pushq $0x4
popq %rdx
xorl %ecx, %ecx
callq 0x635fa
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x1f8(%r14,%rax), %rax
movq 0x8(%rsp), %rcx
movss (%rax,%rcx,4), %xmm0
movl 0x58(%rsp), %eax
imull 0x60(%rsp), %eax
movq 0x20(%rsp), %rcx
testl %eax, %eax
movl $0x0, %edx
cmovlel %edx, %eax
xorl %edx, %edx
cmpl %edx, %eax
je 0x289e73
movss %xmm0, (%rcx,%rdx,4)
incq %rdx
jmp 0x289e65
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x289e80
lock
incl (%rax)
movq 0xc0(%rsp), %rax
testq %rax, %rax
je 0x289fab
lock
decl (%rax)
jne 0x289fab
movq 0xb8(%rsp), %rsi
movq 0xd8(%rsp), %rdi
testq %rdi, %rdi
je 0x289fa3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x289fab
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x190(%rsp)
andq $0x0, 0x198(%rsp)
movq %r15, 0x1a0(%rsp)
movl %ebx, 0x1a8(%rsp)
movq %r12, 0x1b0(%rsp)
movaps 0x16e53b(%rip), %xmm0 # 0x3f8430
movups %xmm0, 0x1b8(%rsp)
movl $0x1, 0x1c8(%rsp)
movq $0x1, 0x1d0(%rsp)
pushq $0x1
popq %rbx
leaq 0x20(%rsp), %r15
movq %r15, %rdi
leaq 0x70(%rsp), %rsi
callq 0x6b00e
movq (%rbp), %rax
movq %rbp, %rdi
movq %r15, %rsi
callq *0x18(%rax)
movq %r15, %rdi
callq 0x6b03a
movl $0x120, %r15d # imm = 0x120
movq 0x78(%rsp,%r15), %rax
testq %rax, %rax
je 0x289f75
lock
decl (%rax)
jne 0x289f75
movq 0x70(%rsp,%r15), %rsi
movq 0x90(%rsp,%r15), %rdi
testq %rdi, %rdi
je 0x289f6d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x289f75
movq %rsi, %rdi
callq 0x5f3e0
leaq (%rsp,%r15), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
xorps %xmm0, %xmm0
movups %xmm0, 0xc(%rax)
movups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
movups %xmm0, 0x28(%rax)
addq $-0x48, %r15
cmpq $-0x48, %r15
jne 0x289f44
jmp 0x28a236
movq %rsi, %rdi
callq 0x5f3e0
movaps 0x20(%rsp), %xmm0
movups %xmm0, 0xb8(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0xc8(%rsp)
movl 0x38(%rsp), %eax
movl %eax, 0xd0(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0xd8(%rsp)
movups 0x48(%rsp), %xmm0
movaps %xmm0, 0xe0(%rsp)
movl 0x58(%rsp), %eax
movl %eax, 0xf0(%rsp)
movq 0x60(%rsp), %rax
movq %rax, 0xf8(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x250(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x240(%r14,%rax), %r13
movl 0x258(%r14,%rax), %ebx
movq 0x260(%r14,%rax), %r12
movq 0x108(%rsp), %rax
testq %rax, %rax
je 0x28a069
lock
decl (%rax)
jne 0x28a069
movq 0x100(%rsp), %rsi
movq 0x120(%rsp), %rdi
testq %rdi, %rdi
je 0x28a061
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x28a069
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x100(%rsp)
andq $0x0, 0x108(%rsp)
movq %r15, 0x110(%rsp)
movl %ebx, 0x118(%rsp)
movq %r12, 0x120(%rsp)
movaps 0x16e398(%rip), %xmm0 # 0x3f8430
movups %xmm0, 0x128(%rsp)
movl $0x1, 0x138(%rsp)
movq $0x1, 0x140(%rsp)
movq 0x28(%rsp), %rax
testq %rax, %rax
pushq $0x1
popq %rbx
je 0x28a0e8
lock
decl (%rax)
jne 0x28a0e8
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x28a0e0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x28a0e8
movq %rsi, %rdi
callq 0x5f3e0
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x65, 0x10c(%r14,%rax)
jl 0x28a1af
movq 0x298(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x288(%r14,%rax), %r13
movl 0x2a0(%r14,%rax), %ebx
movq 0x2a8(%r14,%rax), %r12
movq 0x150(%rsp), %rax
testq %rax, %rax
je 0x28a15e
lock
decl (%rax)
jne 0x28a15e
movq 0x148(%rsp), %rsi
movq 0x168(%rsp), %rdi
testq %rdi, %rdi
je 0x28a156
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x28a15e
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x148(%rsp)
andq $0x0, 0x150(%rsp)
movq %r15, 0x158(%rsp)
movl %ebx, 0x160(%rsp)
movq %r12, 0x168(%rsp)
movaps 0x16e2a3(%rip), %xmm0 # 0x3f8430
movaps %xmm0, 0x170(%rsp)
movl $0x1, 0x180(%rsp)
movq $0x1, 0x188(%rsp)
pushq $0x1
popq %rbx
leaq 0x20(%rsp), %r15
movq %r15, %rdi
leaq 0x70(%rsp), %rsi
callq 0x6b00e
movq (%rbp), %rax
movq %rbp, %rdi
movq %r15, %rsi
callq *0x18(%rax)
movq %r15, %rdi
callq 0x6b03a
movl $0xd8, %r15d
movq 0x78(%rsp,%r15), %rax
testq %rax, %rax
je 0x28a20d
lock
decl (%rax)
jne 0x28a20d
movq 0x70(%rsp,%r15), %rsi
movq 0x90(%rsp,%r15), %rdi
testq %rdi, %rdi
je 0x28a205
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x28a20d
movq %rsi, %rdi
callq 0x5f3e0
leaq (%rsp,%r15), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
xorps %xmm0, %xmm0
movups %xmm0, 0xc(%rax)
movups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
movups %xmm0, 0x28(%rax)
addq $-0x48, %r15
cmpq $-0x48, %r15
jne 0x28a1dc
movq (%rbp), %rax
movq %rbp, %rdi
movq 0x250(%rsp), %rsi
callq *0x20(%rax)
leaq 0x268(%rsp), %rdi
movq 0x1e0(%rsp), %rax
movq (%rax), %rax
movq 0x8(%rsp), %rcx
movq %rbp, (%rax,%rcx,8)
callq 0x71614
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x28a2a0
lock
decl (%rax)
jne 0x28a2a0
movq 0x200(%rsp), %rsi
movq 0x220(%rsp), %rdi
testq %rdi, %rdi
je 0x28a298
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x28a2a0
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rdx
incq %rdx
jmp 0x2895ed
xorl %eax, %eax
addq $0x278, %rsp # imm = 0x278
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x28a375
jmp 0x28a500
jmp 0x28a2cd
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x28a387
lock
decl (%rax)
jne 0x28a387
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x28a303
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x28a387
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x28a387
jmp 0x28a500
jmp 0x28a3f9
jmp 0x28a375
jmp 0x28a319
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x28a40b
lock
decl (%rax)
jne 0x28a40b
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x28a34f
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x28a40b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x28a40b
jmp 0x28a500
jmp 0x28a500
jmp 0x28a500
jmp 0x28a500
jmp 0x28a375
jmp 0x28a3f9
movq %rax, %rbx
jmp 0x28a387
movq %rax, %rbx
leaq 0x20(%rsp), %rdi
callq 0x6b03a
movl $0xd8, %r14d
xorps %xmm0, %xmm0
movq 0x78(%rsp,%r14), %rax
testq %rax, %rax
je 0x28a3c7
lock
decl (%rax)
jne 0x28a3c7
movq 0x70(%rsp,%r14), %rsi
movq 0x90(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x28a3bc
movq (%rdi), %rax
callq *0x18(%rax)
xorps %xmm0, %xmm0
jmp 0x28a3c7
movq %rsi, %rdi
callq 0x5f3e0
xorps %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
movups %xmm0, 0xc(%rax)
movups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
movups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x28a390
jmp 0x28a4b4
jmp 0x28a500
jmp 0x28a3f9
movq %rax, %rbx
jmp 0x28a40b
movq %rax, %rbx
leaq 0x20(%rsp), %rdi
callq 0x6b03a
movl $0x120, %r14d # imm = 0x120
xorps %xmm0, %xmm0
movq 0x78(%rsp,%r14), %rax
testq %rax, %rax
je 0x28a44b
lock
decl (%rax)
jne 0x28a44b
movq 0x70(%rsp,%r14), %rsi
movq 0x90(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x28a440
movq (%rdi), %rax
callq *0x18(%rax)
xorps %xmm0, %xmm0
jmp 0x28a44b
movq %rsi, %rdi
callq 0x5f3e0
xorps %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
movups %xmm0, 0xc(%rax)
movups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
movups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x28a414
jmp 0x28a4b4
jmp 0x28a500
jmp 0x28a4a8
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x28a4f8
lock
decl (%rax)
jne 0x28a4f8
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x28a4e8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x28a4f8
jmp 0x28a500
movq %rax, %rbx
jmp 0x28a4c1
jmp 0x28a500
jmp 0x28a500
movq %rax, %rbx
leaq 0x268(%rsp), %rdi
callq 0x71614
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x28a4f8
lock
decl (%rax)
jne 0x28a4f8
movq 0x200(%rsp), %rsi
movq 0x220(%rsp), %rdi
testq %rdi, %rdi
jne 0x28a4f2
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x28a4f8
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/x86/convolutiondepthwise_x86.cpp |
virtual thunk to ncnn::ConvolutionDepthWise_x86::create_pipeline(ncnn::Option const&) | int ConvolutionDepthWise_x86::create_pipeline(const Option& opt)
{
if (dynamic_weight)
return 0;
activation = create_activation_layer(activation_type, activation_params, opt);
#if NCNN_INT8
if (opt.use_int8_inference && weight_data.elemsize == (size_t)1u)
{
return create_pipeline_int8_x86(opt);
}
#endif
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
// depth-wise
if (channels == group && group == num_output)
{
int elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = channels % 16 == 0 ? 16 : channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#else
elempack = channels % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
#if __SSE2__
#if __AVX__
// pack16
#if __AVX512F__
if (elempack == 16)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 16, opt);
}
#endif // __AVX512F__
// pack8
if (elempack == 8)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 8, opt);
}
#endif // __AVX__
// pack4
if (elempack == 4)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 4, opt);
}
#endif // __SSE2__
if (elempack == 1)
{
// depth-wise specific
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
weight_data_tm = weight_data;
}
else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
weight_data_tm = weight_data;
}
else
{
create_group_ops(opt);
}
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
// group convolution
create_group_ops(opt);
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x30(%rax), %rdi
callq 0x288da6
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/src/layer/x86/convolutiondepthwise_x86.cpp |
ncnn::ConvolutionDepthWise_x86::destroy_pipeline(ncnn::Option const&) | int ConvolutionDepthWise_x86::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
for (int i = 0; i < (int)group_ops.size(); i++)
{
group_ops[i]->destroy_pipeline(opt);
delete group_ops[i];
}
group_ops.clear();
return 0;
} | pushq %r15
pushq %r14
pushq %rbx
movq %rsi, %r14
movq %rdi, %rbx
movq 0x8(%rdi), %rdi
testq %rdi, %rdi
je 0x28a54b
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x28a546
movq (%rdi), %rax
callq *0x8(%rax)
andq $0x0, 0x8(%rbx)
xorl %r15d, %r15d
movq 0x10(%rbx), %rax
movq 0x18(%rbx), %rcx
movq %rcx, %rdx
subq %rax, %rdx
shrq $0x3, %rdx
movslq %edx, %rdx
cmpq %rdx, %r15
jge 0x28a58d
movq (%rax,%r15,8), %rdi
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x10(%rbx), %rax
movq (%rax,%r15,8), %rdi
testq %rdi, %rdi
je 0x28a588
movq (%rdi), %rax
callq *0x8(%rax)
incq %r15
jmp 0x28a54e
cmpq %rax, %rcx
je 0x28a596
movq %rax, 0x18(%rbx)
xorl %eax, %eax
popq %rbx
popq %r14
popq %r15
retq
| /csukuangfj[P]ncnn/src/layer/x86/convolutiondepthwise_x86.cpp |
virtual thunk to ncnn::ConvolutionDepthWise_x86::destroy_pipeline(ncnn::Option const&) | int ConvolutionDepthWise_x86::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
for (int i = 0; i < (int)group_ops.size(); i++)
{
group_ops[i]->destroy_pipeline(opt);
delete group_ops[i];
}
group_ops.clear();
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x38(%rax), %rdi
callq 0x28a51a
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/src/layer/x86/convolutiondepthwise_x86.cpp |
ncnn::ConvolutionDepthWise_x86::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int ConvolutionDepthWise_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
#if NCNN_INT8
if (opt.use_int8_inference && int8_scale_term)
{
return forward_int8_x86(bottom_blob, top_blob, opt);
}
#endif
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
Mat bottom_blob_bordered;
make_padding(bottom_blob, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
int outw = (w - kernel_extent_w) / stride_w + 1;
int outh = (h - kernel_extent_h) / stride_h + 1;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
size_t out_elemsize = elemsize / elempack * out_elempack;
top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
// depth-wise
if (channels * elempack == group && group == num_output)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 16;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m512 _sum = _mm512_set1_ps(0.f);
if (bias_term)
{
_sum = _mm512_loadu_ps(((const float*)bias_data) + g * 16);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 16;
for (int k = 0; k < maxk; k++)
{
__m512 _val = _mm512_loadu_ps(sptr + space_ofs[k] * 16);
__m512 _w = _mm512_loadu_ps(kptr + k * 16);
_sum = _mm512_fmadd_ps(_val, _w, _sum);
}
_mm512_storeu_ps(outptr, _sum);
outptr += 16;
}
}
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 8;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_set1_ps(0.f);
if (bias_term)
{
_sum = _mm256_loadu_ps(((const float*)bias_data) + g * 8);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 8;
for (int k = 0; k < maxk; k++)
{
__m256 _val = _mm256_loadu_ps(sptr + space_ofs[k] * 8);
__m256 _w = _mm256_loadu_ps(kptr + k * 8);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
}
_mm256_storeu_ps(outptr + j * 8, _sum);
}
outptr += outw * 8;
}
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
#endif // __AVX__
if (elempack == 4)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 4;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_set1_ps(0.f);
if (bias_term)
{
_sum = _mm_loadu_ps(((const float*)bias_data) + g * 4);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 4;
for (int k = 0; k < maxk; k++)
{
__m128 _val = _mm_loadu_ps(sptr + space_ofs[k] * 4);
__m128 _w = _mm_loadu_ps(kptr + k * 4);
_sum = _mm_add_ps(_mm_mul_ps(_val, _w), _sum);
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr + j * 4, _sum);
}
outptr += outw * 4;
}
}
return 0;
}
}
#endif // __SSE2__
if (elempack == 1)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
}
// group convolution
const int channels_g = channels * elempack / group;
const int num_output_g = num_output / group;
int g_elempack = 1;
int out_g_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
g_elempack = channels_g % 16 == 0 ? 16 : channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 16 == 0 ? 16 : num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#elif __AVX__
g_elempack = channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#else
g_elempack = channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
// unpacking
Mat bottom_blob_bordered_unpacked = bottom_blob_bordered;
if (elempack > g_elempack)
{
Option opt_p = opt;
opt_p.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob_bordered, bottom_blob_bordered_unpacked, g_elempack, opt_p);
}
Mat top_blob_unpacked = top_blob;
if (out_g_elempack < out_elempack)
{
top_blob_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
if (top_blob_unpacked.empty())
return -100;
}
for (int g = 0; g < group; g++)
{
const Mat bottom_blob_bordered_g = bottom_blob_bordered_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
Mat top_blob_g = top_blob_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
const ncnn::Layer* op = group_ops[g];
Option opt_g = opt;
opt_g.blob_allocator = top_blob_unpacked.allocator;
// forward
op->forward(bottom_blob_bordered_g, top_blob_g, opt_g);
}
// packing
if (out_g_elempack < out_elempack)
{
convert_packing(top_blob_unpacked, top_blob, out_elempack, opt);
}
else
{
top_blob = top_blob_unpacked;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x2c8, %rsp # imm = 0x2C8
movq %rcx, %r14
movq (%rdi), %rax
movq -0x18(%rax), %rcx
cmpb $0x1, 0x1e(%r14)
jne 0x28a5f5
cmpl $0x0, 0x10c(%rdi,%rcx)
je 0x28a5f5
movq %r14, %rcx
addq $0x2c8, %rsp # imm = 0x2C8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x28d5aa
movq %rdx, 0x48(%rsp)
movl 0x38(%rsi), %edx
movq %rdx, 0x50(%rsp)
movq 0x10(%rsi), %r13
movslq 0x18(%rsi), %r15
movl 0xd4(%rdi,%rcx), %ebx
decl %ebx
imull 0xdc(%rdi,%rcx), %ebx
movq %rdi, %r8
movl 0xd8(%rdi,%rcx), %ebp
decl %ebp
imull 0xe0(%rdi,%rcx), %ebp
leaq 0xb0(%rsp), %rdx
andq $0x0, 0x40(%rdx)
xorps %xmm0, %xmm0
movaps %xmm0, (%rdx)
movups %xmm0, 0xc(%rdx)
movaps %xmm0, 0x20(%rdx)
movups %xmm0, 0x2c(%rdx)
movq -0x18(%rax), %rdi
movq %r8, 0x88(%rsp)
addq %r8, %rdi
movq %r14, %rcx
callq 0x287daa
pushq $-0x64
popq %r12
cmpq $0x0, 0xb0(%rsp)
je 0x28d3c9
movslq 0xe8(%rsp), %rax
imulq 0xf0(%rsp), %rax
testq %rax, %rax
je 0x28d3c9
movq %r15, %r10
notl %ebx
movl 0xdc(%rsp), %eax
movl %eax, 0x10(%rsp)
addl %eax, %ebx
movq 0x88(%rsp), %rsi
movq (%rsi), %rax
movq -0x18(%rax), %rcx
movl %ebx, %eax
cltd
idivl 0xe4(%rsi,%rcx)
movl %eax, %edi
notl %ebp
addl 0xe0(%rsp), %ebp
movl %ebp, %eax
movq %rsi, %rbp
cltd
idivl 0xe8(%rsi,%rcx)
movq %rdi, 0xa0(%rsp)
leal 0x1(%rdi), %esi
movq %rax, 0x20(%rsp)
leal 0x1(%rax), %r8d
movl 0xd0(%rbp,%rcx), %edi
testb $0x3, %dil
sete %al
andb 0x27(%r14), %al
movzbl %al, %r15d
leal (%r15,%r15,2), %r9d
incl %r9d
movq %r13, %rax
xorl %edx, %edx
movq %r10, %rbx
divq %r10
movq %rax, %r13
addb %r15b, %r15b
movl %r15d, %ecx
shlq %cl, %r13
movl %edi, %eax
cltd
idivl %r9d
movq %r14, 0x190(%rsp)
movq 0x8(%r14), %rcx
movq %rcx, (%rsp)
movq 0x48(%rsp), %r14
movq %r14, %rdi
movl %esi, 0x60(%rsp)
movl %r8d, 0x70(%rsp)
movl %r8d, %edx
movl %eax, %ecx
movq %r13, %r8
movl %r9d, 0x30(%rsp)
callq 0x628f2
movq (%r14), %r10
testq %r10, %r10
je 0x28d3c9
movq %r13, %r11
movq 0x40(%r14), %r13
movslq 0x38(%r14), %rax
imulq %r13, %rax
testq %rax, %rax
je 0x28d3c9
movq %rbx, %r9
movl %r9d, %eax
imull 0x50(%rsp), %eax
movq (%rbp), %rcx
movq -0x18(%rcx), %r8
movl 0xd0(%rbp,%r8), %ecx
movl 0x108(%rbp,%r8), %esi
cmpl %esi, %eax
jne 0x28beda
cmpl %ecx, %eax
jne 0x28beda
cmpl $0x1, %r9d
je 0x28b2ef
movl %eax, %ecx
cmpl $0x4, %r9d
jne 0x28beda
movl 0xd4(%rbp,%r8), %eax
movl 0xd8(%rbp,%r8), %r15d
cmpl $0x5, %eax
je 0x28b672
cmpl $0x3, %eax
jne 0x28ccf0
cmpl $0x3, %r15d
jne 0x28ccf0
cmpl $0x1, 0xdc(%rbp,%r8)
jne 0x28c434
cmpl $0x1, 0xe0(%rbp,%r8)
jne 0x28c434
cmpl $0x1, 0xe4(%rbp,%r8)
jne 0x28c434
cmpl $0x1, 0xe8(%rbp,%r8)
jne 0x28c434
movl 0x2c(%r14), %eax
movl 0x30(%r14), %ecx
movl 0xe8(%rsp), %edx
movq 0x1b0(%rbp,%r8), %rsi
xorl %edi, %edi
testl %ecx, %ecx
cmovlel %edi, %ecx
testl %edx, %edx
cmovlel %edi, %edx
movq %rdx, 0x1e8(%rsp)
movl $0x80, %r8d
movq %rsi, 0x1e0(%rsp)
cmpq 0x1e8(%rsp), %rdi
je 0x28d415
testq %rsi, %rsi
je 0x28a884
movq %rdi, %rdx
shlq $0x4, %rdx
movups (%rsi,%rdx), %xmm0
jmp 0x28a887
xorps %xmm0, %xmm0
movaps %xmm0, 0x10(%rsp)
movq 0x40(%r14), %r9
imulq %rdi, %r9
imulq 0x10(%r14), %r9
addq (%r14), %r9
movq 0x28(%rbp), %rdx
movslq 0x54(%rbp), %rsi
imulq %rdi, %rsi
imulq 0x38(%rbp), %rsi
movslq 0xdc(%rsp), %r10
movq 0xf0(%rsp), %r13
movq %rdi, %r14
imulq %rdi, %r13
movq 0xc0(%rsp), %r11
imulq %r11, %r13
addq 0xb0(%rsp), %r13
imulq %r11, %r10
leaq (%r10,%r13), %rbp
leaq (,%r10,2), %r11
addq %r13, %r11
movaps (%rdx,%rsi), %xmm0
movaps %xmm0, 0x20(%rsp)
movaps 0x10(%rdx,%rsi), %xmm0
movaps %xmm0, 0x50(%rsp)
movaps 0x20(%rdx,%rsi), %xmm0
movaps %xmm0, 0x110(%rsp)
movaps 0x30(%rdx,%rsi), %xmm0
movaps %xmm0, 0x90(%rsp)
movaps 0x40(%rdx,%rsi), %xmm0
movaps %xmm0, 0x30(%rsp)
movaps 0x50(%rdx,%rsi), %xmm0
movaps %xmm0, 0xa0(%rsp)
movaps 0x60(%rdx,%rsi), %xmm0
movaps %xmm0, 0x60(%rsp)
movaps 0x70(%rdx,%rsi), %xmm0
movaps %xmm0, 0x100(%rsp)
movaps 0x80(%rdx,%rsi), %xmm0
movaps %xmm0, 0x70(%rsp)
xorl %r10d, %r10d
cmpl %ecx, %r10d
je 0x28b2cf
leaq 0x20(%r13), %r15
leaq 0x20(%rbp), %rbx
leaq 0x20(%r11), %rdx
xorl %esi, %esi
xorl %r12d, %r12d
leal 0x7(%r12), %edi
cmpl %eax, %edi
jge 0x28b0bf
movaps (%r13,%rsi), %xmm0
mulps 0x20(%rsp), %xmm0
addps 0x10(%rsp), %xmm0
movaps 0x10(%r13,%rsi), %xmm3
movaps 0x20(%r13,%rsi), %xmm1
movaps 0x30(%r13,%rsi), %xmm9
movaps %xmm9, 0x180(%rsp)
movaps %xmm3, %xmm2
mulps 0x50(%rsp), %xmm2
movaps %xmm1, %xmm4
movaps 0x110(%rsp), %xmm8
mulps %xmm8, %xmm4
addps %xmm2, %xmm4
addps %xmm0, %xmm4
movaps (%rbp,%rsi), %xmm12
movaps 0x90(%rsp), %xmm7
mulps %xmm7, %xmm12
movaps 0x10(%rbp,%rsi), %xmm0
movaps 0x20(%rbp,%rsi), %xmm2
movaps 0x30(%rbp,%rsi), %xmm5
movaps %xmm5, 0x120(%rsp)
movaps %xmm0, %xmm13
mulps 0x30(%rsp), %xmm13
addps %xmm12, %xmm13
movaps %xmm2, %xmm14
mulps 0xa0(%rsp), %xmm14
addps %xmm13, %xmm14
addps %xmm4, %xmm14
movaps (%r11,%rsi), %xmm15
movaps 0x60(%rsp), %xmm11
mulps %xmm11, %xmm15
movaps 0x10(%r11,%rsi), %xmm5
movaps 0x20(%r11,%rsi), %xmm4
movaps 0x30(%r11,%rsi), %xmm12
movaps %xmm5, %xmm13
movaps 0x100(%rsp), %xmm10
mulps %xmm10, %xmm13
addps %xmm15, %xmm13
movaps %xmm4, %xmm15
mulps 0x70(%rsp), %xmm15
addps %xmm13, %xmm15
addps %xmm14, %xmm15
movaps %xmm15, (%r9,%rsi)
mulps 0x20(%rsp), %xmm3
addps 0x10(%rsp), %xmm3
movaps %xmm1, %xmm13
movaps 0x50(%rsp), %xmm15
mulps %xmm15, %xmm13
movaps %xmm9, %xmm14
mulps %xmm8, %xmm14
mulps %xmm7, %xmm0
addps %xmm13, %xmm0
addps %xmm3, %xmm0
movaps %xmm2, %xmm3
mulps 0x30(%rsp), %xmm3
movaps 0x120(%rsp), %xmm9
movaps %xmm9, %xmm13
movaps 0xa0(%rsp), %xmm6
mulps %xmm6, %xmm13
addps %xmm14, %xmm13
mulps %xmm11, %xmm5
addps %xmm3, %xmm5
movaps %xmm4, %xmm3
mulps %xmm10, %xmm3
addps %xmm5, %xmm3
addps %xmm0, %xmm3
movaps %xmm12, %xmm5
movaps 0x70(%rsp), %xmm10
mulps %xmm10, %xmm5
addps %xmm13, %xmm5
addps %xmm3, %xmm5
movaps 0x40(%r13,%rsi), %xmm0
movaps 0x40(%rbp,%rsi), %xmm3
movaps 0x40(%r11,%rsi), %xmm13
movaps %xmm5, 0x10(%r9,%rsi)
mulps 0x20(%rsp), %xmm1
addps 0x10(%rsp), %xmm1
movaps 0x180(%rsp), %xmm11
movaps %xmm11, %xmm5
mulps %xmm15, %xmm5
movaps %xmm0, %xmm14
mulps %xmm8, %xmm14
mulps %xmm7, %xmm2
movaps 0x30(%rsp), %xmm8
mulps %xmm8, %xmm9
addps %xmm5, %xmm9
movaps %xmm3, %xmm5
mulps %xmm6, %xmm5
addps %xmm14, %xmm5
mulps 0x60(%rsp), %xmm4
addps %xmm2, %xmm4
addps %xmm1, %xmm4
movaps %xmm12, %xmm1
movaps 0x100(%rsp), %xmm6
mulps %xmm6, %xmm1
addps %xmm9, %xmm1
addps %xmm4, %xmm1
movaps %xmm13, %xmm2
mulps %xmm10, %xmm2
addps %xmm5, %xmm2
addps %xmm1, %xmm2
movaps 0x50(%r13,%rsi), %xmm4
movaps 0x50(%rbp,%rsi), %xmm1
movaps 0x50(%r11,%rsi), %xmm14
movaps %xmm2, 0x20(%r9,%rsi)
movaps %xmm11, %xmm2
mulps 0x20(%rsp), %xmm2
addps 0x10(%rsp), %xmm2
movaps %xmm2, %xmm9
movaps %xmm0, %xmm2
mulps 0x50(%rsp), %xmm2
movaps %xmm4, %xmm5
mulps 0x110(%rsp), %xmm5
movaps 0x120(%rsp), %xmm10
mulps %xmm7, %xmm10
movaps %xmm3, %xmm15
mulps %xmm8, %xmm15
addps %xmm2, %xmm15
movaps %xmm1, %xmm2
movaps %xmm1, %xmm8
mulps 0xa0(%rsp), %xmm2
addps %xmm5, %xmm2
movaps 0x60(%rsp), %xmm7
mulps %xmm7, %xmm12
addps %xmm10, %xmm12
addps %xmm9, %xmm12
movaps %xmm13, %xmm5
mulps %xmm6, %xmm5
addps %xmm15, %xmm5
addps %xmm12, %xmm5
movaps %xmm14, %xmm10
movaps 0x70(%rsp), %xmm6
mulps %xmm6, %xmm10
addps %xmm2, %xmm10
addps %xmm5, %xmm10
movaps 0x60(%r13,%rsi), %xmm15
movaps 0x60(%rbp,%rsi), %xmm1
movaps %xmm1, 0x120(%rsp)
movaps 0x60(%r11,%rsi), %xmm12
movaps %xmm10, 0x30(%r9,%rsi)
mulps 0x20(%rsp), %xmm0
addps 0x10(%rsp), %xmm0
movaps %xmm4, %xmm5
movaps 0x50(%rsp), %xmm9
mulps %xmm9, %xmm5
movaps %xmm15, %xmm10
movaps 0x110(%rsp), %xmm2
mulps %xmm2, %xmm10
mulps 0x90(%rsp), %xmm3
movaps %xmm8, %xmm11
mulps 0x30(%rsp), %xmm11
addps %xmm5, %xmm11
movaps %xmm1, %xmm5
mulps 0xa0(%rsp), %xmm5
addps %xmm10, %xmm5
mulps %xmm7, %xmm13
addps %xmm3, %xmm13
addps %xmm0, %xmm13
movaps %xmm14, %xmm0
mulps 0x100(%rsp), %xmm0
addps %xmm11, %xmm0
addps %xmm13, %xmm0
movaps %xmm12, %xmm3
mulps %xmm6, %xmm3
addps %xmm5, %xmm3
addps %xmm0, %xmm3
movaps 0x70(%r13,%rsi), %xmm11
movaps 0x70(%rbp,%rsi), %xmm13
movaps 0x70(%r11,%rsi), %xmm10
movaps %xmm3, 0x40(%r9,%rsi)
mulps 0x20(%rsp), %xmm4
addps 0x10(%rsp), %xmm4
movaps %xmm15, %xmm0
mulps %xmm9, %xmm0
movaps %xmm9, %xmm7
movaps %xmm11, %xmm3
mulps %xmm2, %xmm3
movaps 0x90(%rsp), %xmm6
mulps %xmm6, %xmm8
movaps 0x120(%rsp), %xmm5
movaps 0x30(%rsp), %xmm2
mulps %xmm2, %xmm5
addps %xmm0, %xmm5
movaps %xmm13, %xmm0
movaps %xmm13, 0x180(%rsp)
movaps 0xa0(%rsp), %xmm9
mulps %xmm9, %xmm0
addps %xmm3, %xmm0
mulps 0x60(%rsp), %xmm14
addps %xmm8, %xmm14
addps %xmm4, %xmm14
movaps %xmm12, %xmm1
movaps 0x100(%rsp), %xmm8
mulps %xmm8, %xmm1
addps %xmm5, %xmm1
addps %xmm14, %xmm1
movaps %xmm10, %xmm4
mulps 0x70(%rsp), %xmm4
addps %xmm0, %xmm4
addps %xmm1, %xmm4
movaps 0x80(%r13,%rsi), %xmm3
movaps 0x80(%rbp,%rsi), %xmm1
movaps 0x80(%r11,%rsi), %xmm0
movaps %xmm4, 0x50(%r9,%rsi)
movaps %xmm11, %xmm4
mulps %xmm7, %xmm4
movaps %xmm3, %xmm5
movaps 0x110(%rsp), %xmm7
mulps %xmm7, %xmm5
movaps %xmm13, %xmm14
mulps %xmm2, %xmm14
addps %xmm4, %xmm14
movaps %xmm1, %xmm4
mulps %xmm9, %xmm4
addps %xmm5, %xmm4
movaps 0x120(%rsp), %xmm2
mulps %xmm6, %xmm2
movaps 0x60(%rsp), %xmm13
mulps %xmm13, %xmm12
addps %xmm2, %xmm12
mulps 0x20(%rsp), %xmm15
addps 0x10(%rsp), %xmm15
addps %xmm15, %xmm12
movaps %xmm10, %xmm2
mulps %xmm8, %xmm2
addps %xmm14, %xmm2
addps %xmm12, %xmm2
movaps %xmm0, %xmm5
movaps 0x70(%rsp), %xmm6
mulps %xmm6, %xmm5
addps %xmm4, %xmm5
addps %xmm2, %xmm5
movaps 0x90(%r13,%rsi), %xmm2
mulps %xmm7, %xmm2
movaps 0x90(%rbp,%rsi), %xmm4
mulps %xmm9, %xmm4
addps %xmm2, %xmm4
movaps 0x90(%r11,%rsi), %xmm2
mulps %xmm6, %xmm2
addps %xmm4, %xmm2
movaps %xmm5, 0x60(%r9,%rsi)
mulps 0x50(%rsp), %xmm3
mulps 0x30(%rsp), %xmm1
addps %xmm3, %xmm1
movaps 0x180(%rsp), %xmm3
mulps 0x90(%rsp), %xmm3
mulps %xmm13, %xmm10
addps %xmm3, %xmm10
mulps 0x20(%rsp), %xmm11
addps 0x10(%rsp), %xmm11
addps %xmm11, %xmm10
mulps %xmm8, %xmm0
addps %xmm1, %xmm0
addps %xmm10, %xmm0
addps %xmm2, %xmm0
movaps %xmm0, 0x70(%r9,%rsi)
addl $0x8, %r12d
addq %r8, %rsi
addq %r8, %r15
addq %r8, %rbx
addq %r8, %rdx
jmp 0x28a970
movaps (%r13,%rsi), %xmm0
mulps 0x20(%rsp), %xmm0
addps 0x10(%rsp), %xmm0
movaps 0x10(%r13,%rsi), %xmm3
movaps 0x20(%r13,%rsi), %xmm13
movaps 0x30(%r13,%rsi), %xmm11
movaps %xmm3, %xmm1
mulps 0x50(%rsp), %xmm1
movaps %xmm13, %xmm4
movaps 0x110(%rsp), %xmm2
mulps %xmm2, %xmm4
movaps %xmm2, %xmm7
addps %xmm1, %xmm4
addps %xmm0, %xmm4
movaps (%rbp,%rsi), %xmm1
mulps 0x90(%rsp), %xmm1
movaps 0x10(%rbp,%rsi), %xmm2
movaps 0x20(%rbp,%rsi), %xmm8
movaps 0x30(%rbp,%rsi), %xmm9
movaps %xmm2, %xmm5
mulps 0x30(%rsp), %xmm5
addps %xmm1, %xmm5
movaps %xmm8, %xmm14
movaps 0xa0(%rsp), %xmm6
mulps %xmm6, %xmm14
addps %xmm5, %xmm14
addps %xmm4, %xmm14
movaps (%r11,%rsi), %xmm5
mulps 0x60(%rsp), %xmm5
movaps 0x10(%r11,%rsi), %xmm4
movaps 0x20(%r11,%rsi), %xmm1
movaps 0x30(%r11,%rsi), %xmm12
movaps %xmm4, %xmm15
movaps 0x100(%rsp), %xmm10
mulps %xmm10, %xmm15
addps %xmm5, %xmm15
movaps %xmm1, %xmm5
movaps 0x70(%rsp), %xmm0
mulps %xmm0, %xmm5
addps %xmm15, %xmm5
addps %xmm14, %xmm5
movaps %xmm5, (%r9,%rsi)
mulps 0x20(%rsp), %xmm3
addps 0x10(%rsp), %xmm3
movaps %xmm13, %xmm5
movaps 0x50(%rsp), %xmm15
mulps %xmm15, %xmm5
movaps %xmm11, %xmm14
mulps %xmm7, %xmm14
movaps 0x90(%rsp), %xmm7
mulps %xmm7, %xmm2
addps %xmm5, %xmm2
addps %xmm3, %xmm2
movaps %xmm8, %xmm3
movaps 0x30(%rsp), %xmm0
mulps %xmm0, %xmm3
movaps %xmm9, %xmm5
movaps %xmm9, 0x120(%rsp)
mulps %xmm6, %xmm5
addps %xmm14, %xmm5
movaps 0x60(%rsp), %xmm6
mulps %xmm6, %xmm4
addps %xmm3, %xmm4
movaps %xmm1, %xmm3
mulps %xmm10, %xmm3
addps %xmm4, %xmm3
addps %xmm2, %xmm3
movaps %xmm12, %xmm14
mulps 0x70(%rsp), %xmm14
addps %xmm5, %xmm14
addps %xmm3, %xmm14
movaps 0x40(%r13,%rsi), %xmm4
movaps 0x40(%rbp,%rsi), %xmm3
movaps 0x40(%r11,%rsi), %xmm2
movaps %xmm14, 0x10(%r9,%rsi)
movaps %xmm11, %xmm5
mulps %xmm15, %xmm5
movaps %xmm4, %xmm14
movaps 0x110(%rsp), %xmm10
mulps %xmm10, %xmm14
movaps %xmm9, %xmm15
mulps %xmm0, %xmm15
addps %xmm5, %xmm15
movaps %xmm3, %xmm5
movaps 0xa0(%rsp), %xmm9
mulps %xmm9, %xmm5
addps %xmm14, %xmm5
movaps 0x10(%rsp), %xmm14
mulps %xmm7, %xmm8
mulps %xmm6, %xmm1
addps %xmm8, %xmm1
mulps 0x20(%rsp), %xmm13
addps %xmm14, %xmm13
addps %xmm13, %xmm1
movaps %xmm12, %xmm0
movaps 0x100(%rsp), %xmm6
mulps %xmm6, %xmm0
addps %xmm15, %xmm0
addps %xmm1, %xmm0
movaps %xmm2, %xmm1
movaps 0x70(%rsp), %xmm7
mulps %xmm7, %xmm1
addps %xmm5, %xmm1
addps %xmm0, %xmm1
movaps 0x50(%r13,%rsi), %xmm0
mulps %xmm10, %xmm0
movaps 0x50(%rbp,%rsi), %xmm5
mulps %xmm9, %xmm5
addps %xmm0, %xmm5
movaps 0x50(%r11,%rsi), %xmm0
mulps %xmm7, %xmm0
addps %xmm5, %xmm0
movaps %xmm1, 0x20(%r9,%rsi)
mulps 0x50(%rsp), %xmm4
mulps 0x30(%rsp), %xmm3
addps %xmm4, %xmm3
movaps 0x120(%rsp), %xmm1
mulps 0x90(%rsp), %xmm1
mulps 0x60(%rsp), %xmm12
addps %xmm1, %xmm12
mulps 0x20(%rsp), %xmm11
movaps %xmm14, 0x10(%rsp)
addps %xmm14, %xmm11
addps %xmm11, %xmm12
mulps %xmm6, %xmm2
addps %xmm3, %xmm2
addps %xmm12, %xmm2
addps %xmm0, %xmm2
movaps %xmm2, 0x30(%r9,%rsi)
addl $0x4, %r12d
addq $0x40, %rsi
addq $0x40, %r15
addq $0x40, %rbx
addq $0x40, %rdx
leal 0x3(%r12), %edi
cmpl %eax, %edi
jl 0x28ae58
jmp 0x28b20f
movaps 0x10(%r13,%rsi), %xmm4
movaps 0x20(%r13,%rsi), %xmm10
movaps 0x10(%rbp,%rsi), %xmm0
movaps 0x20(%rbp,%rsi), %xmm6
movaps 0x10(%r11,%rsi), %xmm3
movaps 0x20(%r11,%rsi), %xmm1
movaps (%r13,%rsi), %xmm5
mulps 0x20(%rsp), %xmm5
addps 0x10(%rsp), %xmm5
movaps %xmm4, %xmm11
mulps 0x50(%rsp), %xmm11
movaps %xmm10, %xmm12
movaps 0x110(%rsp), %xmm7
mulps %xmm7, %xmm12
addps %xmm11, %xmm12
addps %xmm5, %xmm12
movaps (%rbp,%rsi), %xmm5
movaps 0x90(%rsp), %xmm14
mulps %xmm14, %xmm5
movaps %xmm0, %xmm11
mulps 0x30(%rsp), %xmm11
addps %xmm5, %xmm11
movaps %xmm6, %xmm5
movaps 0xa0(%rsp), %xmm15
mulps %xmm15, %xmm5
addps %xmm11, %xmm5
addps %xmm12, %xmm5
movaps (%r11,%rsi), %xmm11
movaps 0x60(%rsp), %xmm8
mulps %xmm8, %xmm11
movaps %xmm3, %xmm12
movaps 0x100(%rsp), %xmm9
mulps %xmm9, %xmm12
addps %xmm11, %xmm12
movaps %xmm1, %xmm13
movaps 0x70(%rsp), %xmm2
mulps %xmm2, %xmm13
addps %xmm12, %xmm13
addps %xmm5, %xmm13
movaps 0x30(%r13,%rsi), %xmm5
mulps %xmm7, %xmm5
movaps 0x30(%rbp,%rsi), %xmm12
mulps %xmm15, %xmm12
addps %xmm5, %xmm12
movaps 0x30(%r11,%rsi), %xmm11
mulps %xmm2, %xmm11
addps %xmm12, %xmm11
movaps %xmm13, (%r9,%rsi)
mulps 0x50(%rsp), %xmm10
mulps %xmm14, %xmm0
addps %xmm10, %xmm0
mulps 0x20(%rsp), %xmm4
addps 0x10(%rsp), %xmm4
addps %xmm4, %xmm0
mulps 0x30(%rsp), %xmm6
mulps %xmm8, %xmm3
addps %xmm6, %xmm3
mulps %xmm9, %xmm1
addps %xmm3, %xmm1
addps %xmm0, %xmm1
addps %xmm11, %xmm1
movaps %xmm1, 0x10(%r9,%rsi)
addl $0x2, %r12d
addq $0x20, %rsi
addq $0x20, %r15
addq $0x20, %rbx
addq $0x20, %rdx
leal 0x1(%r12), %edi
cmpl %eax, %edi
jl 0x28b0d1
addq %rsi, %r9
cmpl %eax, %r12d
jge 0x28b2be
movaps -0x20(%r15), %xmm0
mulps 0x20(%rsp), %xmm0
addps 0x10(%rsp), %xmm0
movaps -0x10(%r15), %xmm1
mulps 0x50(%rsp), %xmm1
movaps (%r15), %xmm2
mulps 0x110(%rsp), %xmm2
addps %xmm1, %xmm2
addps %xmm0, %xmm2
movaps -0x20(%rbx), %xmm0
mulps 0x90(%rsp), %xmm0
movaps -0x10(%rbx), %xmm1
mulps 0x30(%rsp), %xmm1
addps %xmm0, %xmm1
movaps (%rbx), %xmm0
mulps 0xa0(%rsp), %xmm0
addps %xmm1, %xmm0
addps %xmm2, %xmm0
movaps -0x20(%rdx), %xmm1
mulps 0x60(%rsp), %xmm1
movaps -0x10(%rdx), %xmm2
mulps 0x100(%rsp), %xmm2
addps %xmm1, %xmm2
movaps (%rdx), %xmm1
mulps 0x70(%rsp), %xmm1
addps %xmm2, %xmm1
addps %xmm0, %xmm1
movaps %xmm1, (%r9)
addq $0x10, %r9
incl %r12d
addq $0x10, %r15
addq $0x10, %rbx
addq $0x10, %rdx
jmp 0x28b21f
incl %r10d
movq %r15, %r13
movq %rbx, %rbp
movq %rdx, %r11
jmp 0x28a956
movq %r14, %rdi
incq %rdi
movq 0x48(%rsp), %r14
movq 0x88(%rsp), %rbp
movq 0x1e0(%rsp), %rsi
jmp 0x28a864
cmpl $0x3, 0xd4(%rbp,%r8)
movl %eax, %ecx
jne 0x28beda
cmpl $0x3, 0xd8(%rbp,%r8)
movl %eax, %ecx
jne 0x28beda
cmpl $0x1, 0xdc(%rbp,%r8)
movq %r10, 0x20(%rsp)
jne 0x28bd03
cmpl $0x1, 0xe0(%rbp,%r8)
jne 0x28bd03
cmpl $0x1, 0xe4(%rbp,%r8)
jne 0x28bd03
cmpl $0x1, 0xe8(%rbp,%r8)
jne 0x28bd03
movslq 0xdc(%rsp), %rax
movl 0x2c(%r14), %edx
movl 0x30(%r14), %edi
movl 0xe8(%rsp), %ecx
movq 0x28(%rbp), %r9
imulq 0x10(%r14), %r13
movq 0x1b0(%rbp,%r8), %rsi
movslq %edx, %r8
movq 0xb0(%rsp), %r11
movq %r11, 0x110(%rsp)
movq 0xc0(%rsp), %r11
imulq 0xf0(%rsp), %r11
movq %r11, 0x100(%rsp)
leaq (%rax,%rax), %r11
movq %r11, 0x120(%rsp)
leaq (%rax,%rax,2), %r11
movq %r11, 0x180(%rsp)
movq %rax, 0x70(%rsp)
addl $0x2, %eax
movslq %eax, %rbp
xorl %eax, %eax
testl %ecx, %ecx
cmovlel %eax, %ecx
movq %rcx, 0x50(%rsp)
shlq $0x2, %rbp
movq %r8, 0xa0(%rsp)
leaq (,%r8,4), %rcx
movq %rcx, 0x10(%rsp)
movq %rsi, 0x90(%rsp)
movq %r13, 0x60(%rsp)
cmpq 0x50(%rsp), %rax
je 0x28d41b
testq %rsi, %rsi
je 0x28b412
movss (%rsi,%rax,4), %xmm0
jmp 0x28b415
xorps %xmm0, %xmm0
movq 0xa0(%rsp), %r14
movq %r13, %rsi
imulq %rax, %rsi
addq %r10, %rsi
imulq $0x24, %rax, %r13
movq 0x100(%rsp), %r15
movq %rax, 0x30(%rsp)
imulq %rax, %r15
addq 0x110(%rsp), %r15
movq 0x70(%rsp), %rax
leaq (%r15,%rax,4), %rax
movq 0x120(%rsp), %rcx
leaq (%r15,%rcx,4), %r8
movq 0x180(%rsp), %rcx
leaq (%r15,%rcx,4), %r11
xorl %r10d, %r10d
movq %rsi, %rbx
movl %r10d, %ecx
orl $0x1, %ecx
cmpl %edi, %ecx
jge 0x28b645
leaq (%rsi,%r14,4), %rsi
xorl %r12d, %r12d
movl %edx, %ecx
testl %ecx, %ecx
jle 0x28b582
movsd 0x4(%r15,%r12), %xmm3
movsd 0x4(%rax,%r12), %xmm6
movsd (%r8,%r12), %xmm5
movups (%r9,%r13), %xmm1
movups 0x10(%r9,%r13), %xmm2
movss (%r15,%r12), %xmm4
unpcklps %xmm3, %xmm4 # xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
movss (%rax,%r12), %xmm7
movaps %xmm7, %xmm8
shufps $0xd4, %xmm3, %xmm8 # xmm8 = xmm8[0,1],xmm3[1,3]
shufps $0x24, %xmm8, %xmm4 # xmm4 = xmm4[0,1],xmm8[2,0]
movaps %xmm6, %xmm9
movlhps %xmm5, %xmm9 # xmm9 = xmm9[0],xmm5[0]
mulps %xmm1, %xmm4
mulps %xmm2, %xmm9
addps %xmm4, %xmm9
movss 0x8(%r8,%r12), %xmm8
movss 0x20(%r9,%r13), %xmm3
movaps %xmm3, %xmm4
mulss %xmm8, %xmm4
movaps %xmm9, %xmm10
unpckhpd %xmm9, %xmm10 # xmm10 = xmm10[1],xmm9[1]
addps %xmm9, %xmm10
movaps %xmm10, %xmm9
shufps $0x55, %xmm10, %xmm9 # xmm9 = xmm9[1,1],xmm10[1,1]
addss %xmm10, %xmm9
addss %xmm0, %xmm4
addss %xmm9, %xmm4
movsd (%r11,%r12), %xmm9
movaps %xmm1, %xmm10
unpcklps %xmm6, %xmm10 # xmm10 = xmm10[0],xmm6[0],xmm10[1],xmm6[1]
shufps $0xd4, %xmm5, %xmm8 # xmm8 = xmm8[0,1],xmm5[1,3]
shufps $0xd4, %xmm6, %xmm5 # xmm5 = xmm5[0,1],xmm6[1,3]
shufps $0x24, %xmm5, %xmm10 # xmm10 = xmm10[0,1],xmm5[2,0]
shufps $0x42, %xmm9, %xmm8 # xmm8 = xmm8[2,0],xmm9[0,1]
movss %xmm7, %xmm1 # xmm1 = xmm7[0],xmm1[1,2,3]
mulps %xmm10, %xmm1
mulps %xmm2, %xmm8
addps %xmm1, %xmm8
mulss 0x8(%r11,%r12), %xmm3
movaps %xmm8, %xmm1
unpckhpd %xmm8, %xmm1 # xmm1 = xmm1[1],xmm8[1]
addps %xmm8, %xmm1
movaps %xmm1, %xmm2
shufps $0x55, %xmm1, %xmm2 # xmm2 = xmm2[1,1],xmm1[1,1]
addss %xmm1, %xmm2
addss %xmm0, %xmm3
addss %xmm2, %xmm3
movss %xmm4, (%rbx,%r12)
movss %xmm3, (%rsi,%r12)
decl %ecx
addq $0x4, %r12
jmp 0x28b482
addq %rbp, %r15
addq %r12, %r15
addq %rbp, %rax
addq %r12, %rax
addq %rbp, %r8
addq %r12, %r8
addq %rbp, %r11
addq %r12, %r11
addq 0x10(%rsp), %rbx
addq %r12, %rbx
addl $0x2, %r10d
addq %r12, %rsi
jmp 0x28b46b
xorl %ecx, %ecx
movl %edx, %esi
testl %esi, %esi
jle 0x28b62a
movsd 0x4(%r15,%rcx), %xmm1
movups (%r9,%r13), %xmm2
movups 0x10(%r9,%r13), %xmm3
movss (%r15,%rcx), %xmm4
unpcklps %xmm1, %xmm4 # xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
movss (%rax,%rcx), %xmm5
shufps $0xd4, %xmm1, %xmm5 # xmm5 = xmm5[0,1],xmm1[1,3]
shufps $0x24, %xmm5, %xmm4 # xmm4 = xmm4[0,1],xmm5[2,0]
mulps %xmm2, %xmm4
movsd 0x4(%rax,%rcx), %xmm1
movhps (%r8,%rcx), %xmm1 # xmm1 = xmm1[0,1],mem[0,1]
mulps %xmm3, %xmm1
movss 0x20(%r9,%r13), %xmm2
mulss 0x8(%r8,%rcx), %xmm2
addps %xmm4, %xmm1
movaps %xmm1, %xmm3
unpckhpd %xmm1, %xmm3 # xmm3 = xmm3[1],xmm1[1]
addps %xmm1, %xmm3
movaps %xmm3, %xmm1
shufps $0x55, %xmm3, %xmm1 # xmm1 = xmm1[1,1],xmm3[1,1]
addss %xmm3, %xmm1
addss %xmm0, %xmm2
addss %xmm1, %xmm2
movss %xmm2, (%rbx,%rcx)
decl %esi
addq $0x4, %rcx
jmp 0x28b5b2
addq %rcx, %r15
addq $0x8, %r15
addq %rcx, %rax
addq $0x8, %rax
addq %rcx, %r8
addq $0x8, %r8
incl %r10d
addq %rcx, %rbx
cmpl %edi, %r10d
jl 0x28b5ae
movq 0x30(%rsp), %rax
incq %rax
movq 0x48(%rsp), %r14
movq 0x20(%rsp), %r10
movq 0x60(%rsp), %r13
movq 0x90(%rsp), %rsi
jmp 0x28b3fb
cmpl $0x5, %r15d
jne 0x28ccf0
cmpl $0x1, 0xdc(%rbp,%r8)
jne 0x28c9de
cmpl $0x1, 0xe0(%rbp,%r8)
jne 0x28c9de
cmpl $0x1, 0xe4(%rbp,%r8)
jne 0x28c9de
cmpl $0x1, 0xe8(%rbp,%r8)
jne 0x28c9de
movl 0x2c(%r14), %ecx
movl 0x30(%r14), %edx
movl 0xdc(%rsp), %eax
movl 0xe8(%rsp), %esi
movq 0x1b0(%rbp,%r8), %r9
movl %edx, %r8d
leal 0x10(,%rax,4), %eax
movslq %eax, %rdi
leal (,%rcx,4), %eax
cltq
xorl %edx, %edx
testl %ecx, %ecx
cmovlel %edx, %ecx
movq %rcx, 0x10(%rsp)
testl %esi, %esi
cmovlel %edx, %esi
movq %rsi, 0x50(%rsp)
shlq $0x2, %rax
movq %rax, 0x20(%rsp)
shlq $0x2, %rdi
movq %r9, 0x60(%rsp)
cmpq 0x50(%rsp), %rdx
je 0x28d415
testq %r9, %r9
je 0x28b732
movq %rdx, %rax
shlq $0x4, %rax
movups (%r9,%rax), %xmm0
jmp 0x28b735
xorps %xmm0, %xmm0
movq 0x40(%r14), %r10
imulq %rdx, %r10
movq 0x10(%r14), %rax
imulq %rax, %r10
addq (%r14), %r10
movslq 0x2c(%r14), %r9
movq 0x28(%rbp), %r11
movslq 0x54(%rbp), %rbx
imulq %rdx, %rbx
imulq 0x38(%rbp), %rbx
imulq %rax, %r9
addq %r10, %r9
movslq 0xdc(%rsp), %rax
movq 0xf0(%rsp), %r15
movq %rdx, 0x30(%rsp)
imulq %rdx, %r15
movq 0xc0(%rsp), %rcx
imulq %rcx, %r15
addq 0xb0(%rsp), %r15
imulq %rcx, %rax
leaq (%r15,%rax), %r12
leaq (%r15,%rax,2), %r13
leaq (%rax,%rax,2), %rbp
addq %r15, %rbp
leaq (%r15,%rax,4), %rdx
leaq (%rax,%rax,4), %rax
addq %r15, %rax
xorl %esi, %esi
movl %esi, %ecx
orl $0x1, %ecx
cmpl %r8d, %ecx
jge 0x28baf4
movl %r8d, %r14d
movq 0x10(%rsp), %rcx
xorl %r8d, %r8d
subl $0x1, %ecx
jb 0x28bab4
movaps (%r11,%rbx), %xmm6
movaps 0x10(%r11,%rbx), %xmm8
movaps 0x20(%r11,%rbx), %xmm2
movaps 0x30(%r11,%rbx), %xmm3
movaps 0x40(%r11,%rbx), %xmm5
movaps (%r15,%r8), %xmm1
mulps %xmm6, %xmm1
addps %xmm0, %xmm1
movaps 0x10(%r15,%r8), %xmm4
mulps %xmm8, %xmm4
movaps 0x20(%r15,%r8), %xmm7
mulps %xmm2, %xmm7
addps %xmm4, %xmm7
addps %xmm1, %xmm7
movaps 0x30(%r15,%r8), %xmm1
mulps %xmm3, %xmm1
movaps 0x40(%r15,%r8), %xmm14
mulps %xmm5, %xmm14
addps %xmm1, %xmm14
movaps (%r12,%r8), %xmm1
movaps 0x10(%r12,%r8), %xmm13
movaps 0x20(%r12,%r8), %xmm12
movaps 0x30(%r12,%r8), %xmm10
movaps 0x40(%r12,%r8), %xmm4
mulps %xmm1, %xmm6
addps %xmm0, %xmm6
mulps %xmm13, %xmm8
mulps %xmm12, %xmm2
addps %xmm8, %xmm2
addps %xmm6, %xmm2
mulps %xmm10, %xmm3
mulps %xmm4, %xmm5
addps %xmm3, %xmm5
movaps 0x50(%r11,%rbx), %xmm6
movaps 0x60(%r11,%rbx), %xmm11
movaps 0x70(%r11,%rbx), %xmm9
movaps 0x80(%r11,%rbx), %xmm8
movaps 0x90(%r11,%rbx), %xmm3
mulps %xmm6, %xmm1
addps %xmm14, %xmm1
addps %xmm7, %xmm1
mulps %xmm11, %xmm13
mulps %xmm9, %xmm12
addps %xmm13, %xmm12
mulps %xmm8, %xmm10
addps %xmm12, %xmm10
mulps %xmm3, %xmm4
addps %xmm10, %xmm4
addps %xmm1, %xmm4
movaps (%r13,%r8), %xmm13
movaps 0x10(%r13,%r8), %xmm12
movaps 0x20(%r13,%r8), %xmm10
movaps 0x30(%r13,%r8), %xmm7
movaps 0x40(%r13,%r8), %xmm1
mulps %xmm13, %xmm6
addps %xmm5, %xmm6
addps %xmm2, %xmm6
mulps %xmm12, %xmm11
mulps %xmm10, %xmm9
addps %xmm11, %xmm9
mulps %xmm7, %xmm8
addps %xmm9, %xmm8
mulps %xmm1, %xmm3
addps %xmm8, %xmm3
addps %xmm6, %xmm3
movaps 0xa0(%r11,%rbx), %xmm14
movaps 0xb0(%r11,%rbx), %xmm11
movaps 0xc0(%r11,%rbx), %xmm8
movaps 0xd0(%r11,%rbx), %xmm5
movaps 0xe0(%r11,%rbx), %xmm2
mulps %xmm14, %xmm13
mulps %xmm11, %xmm12
addps %xmm13, %xmm12
mulps %xmm8, %xmm10
addps %xmm12, %xmm10
mulps %xmm5, %xmm7
addps %xmm10, %xmm7
mulps %xmm2, %xmm1
addps %xmm7, %xmm1
addps %xmm4, %xmm1
movaps (%rbp,%r8), %xmm13
movaps 0x10(%rbp,%r8), %xmm10
movaps 0x20(%rbp,%r8), %xmm9
movaps 0x30(%rbp,%r8), %xmm6
movaps 0x40(%rbp,%r8), %xmm4
mulps %xmm13, %xmm14
mulps %xmm10, %xmm11
addps %xmm14, %xmm11
mulps %xmm9, %xmm8
addps %xmm11, %xmm8
mulps %xmm6, %xmm5
addps %xmm8, %xmm5
mulps %xmm4, %xmm2
addps %xmm5, %xmm2
addps %xmm3, %xmm2
movaps 0xf0(%r11,%rbx), %xmm14
movaps 0x100(%r11,%rbx), %xmm12
movaps 0x110(%r11,%rbx), %xmm8
movaps 0x120(%r11,%rbx), %xmm7
movaps 0x130(%r11,%rbx), %xmm3
mulps %xmm14, %xmm13
mulps %xmm12, %xmm10
addps %xmm13, %xmm10
mulps %xmm8, %xmm9
addps %xmm10, %xmm9
mulps %xmm7, %xmm6
addps %xmm9, %xmm6
mulps %xmm3, %xmm4
addps %xmm6, %xmm4
movaps (%rdx,%r8), %xmm6
movaps 0x10(%rdx,%r8), %xmm13
movaps 0x20(%rdx,%r8), %xmm11
movaps 0x30(%rdx,%r8), %xmm9
movaps 0x40(%rdx,%r8), %xmm5
mulps %xmm6, %xmm14
mulps %xmm13, %xmm12
addps %xmm14, %xmm12
mulps %xmm11, %xmm8
addps %xmm12, %xmm8
mulps %xmm9, %xmm7
addps %xmm8, %xmm7
mulps %xmm5, %xmm3
addps %xmm7, %xmm3
movaps 0x140(%r11,%rbx), %xmm7
movaps 0x150(%r11,%rbx), %xmm14
movaps 0x160(%r11,%rbx), %xmm12
movaps 0x170(%r11,%rbx), %xmm10
movaps 0x180(%r11,%rbx), %xmm8
mulps %xmm7, %xmm6
addps %xmm4, %xmm6
addps %xmm1, %xmm6
mulps %xmm14, %xmm13
mulps %xmm12, %xmm11
addps %xmm13, %xmm11
mulps %xmm10, %xmm9
addps %xmm11, %xmm9
mulps %xmm8, %xmm5
addps %xmm9, %xmm5
addps %xmm6, %xmm5
mulps (%rax,%r8), %xmm7
addps %xmm3, %xmm7
mulps 0x10(%rax,%r8), %xmm14
mulps 0x20(%rax,%r8), %xmm12
addps %xmm2, %xmm7
addps %xmm14, %xmm12
mulps 0x30(%rax,%r8), %xmm10
addps %xmm12, %xmm10
mulps 0x40(%rax,%r8), %xmm8
addps %xmm10, %xmm8
addps %xmm7, %xmm8
movaps %xmm5, (%r10,%r8)
movaps %xmm8, (%r9,%r8)
addq $0x10, %r8
jmp 0x28b7ca
addq %rdi, %r15
addq %r8, %r15
addq %rdi, %r12
addq %r8, %r12
addq %rdi, %r13
addq %r8, %r13
addq %rdi, %rbp
addq %r8, %rbp
addq %rdi, %rdx
addq %r8, %rdx
addq %rdi, %rax
addq %r8, %rax
movq 0x20(%rsp), %rcx
addq %rcx, %r10
addq %r8, %r10
addq %rcx, %r9
addq %r8, %r9
addl $0x2, %esi
movl %r14d, %r8d
jmp 0x28b7b1
movq 0x60(%rsp), %r9
cmpl %r8d, %esi
jge 0x28bce9
movq 0x10(%rsp), %rax
movl %eax, %ecx
xorl %eax, %eax
subl $0x1, %ecx
jb 0x28bcbe
movaps (%r11,%rbx), %xmm1
movaps 0x10(%r11,%rbx), %xmm2
movaps 0x20(%r11,%rbx), %xmm3
movaps 0x30(%r11,%rbx), %xmm4
mulps (%r15,%rax), %xmm1
movaps 0x40(%r11,%rbx), %xmm5
addps %xmm0, %xmm1
mulps 0x10(%r15,%rax), %xmm2
mulps 0x20(%r15,%rax), %xmm3
addps %xmm2, %xmm3
addps %xmm1, %xmm3
mulps 0x30(%r15,%rax), %xmm4
mulps 0x40(%r15,%rax), %xmm5
addps %xmm4, %xmm5
movaps 0x50(%r11,%rbx), %xmm1
movaps 0x60(%r11,%rbx), %xmm4
movaps 0x70(%r11,%rbx), %xmm6
movaps 0x80(%r11,%rbx), %xmm7
movaps 0x90(%r11,%rbx), %xmm2
mulps (%r12,%rax), %xmm1
addps %xmm5, %xmm1
addps %xmm3, %xmm1
mulps 0x10(%r12,%rax), %xmm4
mulps 0x20(%r12,%rax), %xmm6
mulps 0x30(%r12,%rax), %xmm7
addps %xmm4, %xmm6
addps %xmm6, %xmm7
mulps 0x40(%r12,%rax), %xmm2
addps %xmm7, %xmm2
addps %xmm1, %xmm2
movaps 0xa0(%r11,%rbx), %xmm3
movaps 0xb0(%r11,%rbx), %xmm4
movaps 0xc0(%r11,%rbx), %xmm5
movaps 0xd0(%r11,%rbx), %xmm6
mulps (%r13,%rax), %xmm3
movaps 0xe0(%r11,%rbx), %xmm1
mulps 0x10(%r13,%rax), %xmm4
addps %xmm3, %xmm4
mulps 0x20(%r13,%rax), %xmm5
mulps 0x30(%r13,%rax), %xmm6
addps %xmm4, %xmm5
addps %xmm5, %xmm6
mulps 0x40(%r13,%rax), %xmm1
addps %xmm6, %xmm1
addps %xmm2, %xmm1
movaps 0xf0(%r11,%rbx), %xmm2
movaps 0x100(%r11,%rbx), %xmm3
movaps 0x110(%r11,%rbx), %xmm4
movaps 0x120(%r11,%rbx), %xmm5
mulps (%rbp,%rax), %xmm2
movaps 0x130(%r11,%rbx), %xmm6
mulps 0x10(%rbp,%rax), %xmm3
addps %xmm2, %xmm3
mulps 0x20(%rbp,%rax), %xmm4
mulps 0x30(%rbp,%rax), %xmm5
addps %xmm3, %xmm4
addps %xmm4, %xmm5
mulps 0x40(%rbp,%rax), %xmm6
addps %xmm5, %xmm6
movaps 0x140(%r11,%rbx), %xmm2
movaps 0x150(%r11,%rbx), %xmm3
movaps 0x160(%r11,%rbx), %xmm4
movaps 0x170(%r11,%rbx), %xmm5
movaps 0x180(%r11,%rbx), %xmm7
mulps (%rdx,%rax), %xmm2
addps %xmm6, %xmm2
addps %xmm1, %xmm2
mulps 0x10(%rdx,%rax), %xmm3
mulps 0x20(%rdx,%rax), %xmm4
mulps 0x30(%rdx,%rax), %xmm5
addps %xmm3, %xmm4
addps %xmm4, %xmm5
mulps 0x40(%rdx,%rax), %xmm7
addps %xmm5, %xmm7
addps %xmm2, %xmm7
movaps %xmm7, (%r10,%rax)
addq $0x10, %rax
jmp 0x28bb0b
addq %rax, %r15
addq $0x40, %r15
leaq 0x40(%r12,%rax), %r12
addq %rax, %r13
addq $0x40, %r13
addq %rax, %rbp
addq $0x40, %rbp
addq %rax, %rdx
addq $0x40, %rdx
incl %esi
addq %rax, %r10
jmp 0x28baf9
movq 0x30(%rsp), %rdx
incq %rdx
movq 0x48(%rsp), %r14
movq 0x88(%rsp), %rbp
jmp 0x28b714
cmpl $0x1, 0xdc(%rbp,%r8)
movl %eax, %ecx
jne 0x28beda
cmpl $0x1, 0xe0(%rbp,%r8)
movl %eax, %ecx
jne 0x28beda
cmpl $0x2, 0xe4(%rbp,%r8)
movl %eax, %ecx
jne 0x28beda
cmpl $0x2, 0xe8(%rbp,%r8)
movl %eax, %ecx
jne 0x28beda
movslq 0xdc(%rsp), %rdx
movl 0x2c(%r14), %eax
movl 0x30(%r14), %edi
movl 0xe8(%rsp), %esi
movl %edx, %ecx
subl %eax, %ecx
addl %ecx, %ecx
movq 0x28(%rbp), %r9
movq 0x1b0(%rbp,%r8), %r8
imulq 0x10(%r14), %r13
movq 0xb0(%rsp), %r11
movq %r11, 0x60(%rsp)
movq 0xc0(%rsp), %r11
imulq 0xf0(%rsp), %r11
movq %r11, 0x70(%rsp)
movq %rdx, 0x30(%rsp)
addq %rdx, %rdx
movq %rdx, 0x90(%rsp)
movslq %ecx, %r15
xorl %r12d, %r12d
testl %edi, %edi
cmovlel %r12d, %edi
testl %esi, %esi
cmovlel %r12d, %esi
movq %rsi, 0x10(%rsp)
shlq $0x2, %r15
movq %r8, 0x50(%rsp)
cmpq 0x10(%rsp), %r12
je 0x28d415
testq %r8, %r8
je 0x28bde0
movss (%r8,%r12,4), %xmm0
jmp 0x28bde3
xorps %xmm0, %xmm0
movq %r13, %r8
imulq %r12, %r13
addq %r10, %r13
imulq $0x24, %r12, %rbp
movq 0x70(%rsp), %r11
imulq %r12, %r11
addq 0x60(%rsp), %r11
movq 0x30(%rsp), %rcx
leaq (%r11,%rcx,4), %r10
movq 0x90(%rsp), %rcx
leaq (%r11,%rcx,4), %rbx
xorl %esi, %esi
cmpl %edi, %esi
je 0x28beb8
xorl %edx, %edx
movl %eax, %ecx
testl %ecx, %ecx
jle 0x28be9f
movsd 0x4(%r11,%rdx), %xmm1
movups (%r9,%rbp), %xmm2
movups 0x10(%r9,%rbp), %xmm3
movss (%r11,%rdx), %xmm4
unpcklps %xmm1, %xmm4 # xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
movss (%r10,%rdx), %xmm5
shufps $0xd4, %xmm1, %xmm5 # xmm5 = xmm5[0,1],xmm1[1,3]
shufps $0x24, %xmm5, %xmm4 # xmm4 = xmm4[0,1],xmm5[2,0]
mulps %xmm2, %xmm4
movsd 0x4(%r10,%rdx), %xmm1
movhps (%rbx,%rdx), %xmm1 # xmm1 = xmm1[0,1],mem[0,1]
mulps %xmm3, %xmm1
addps %xmm4, %xmm1
movss 0x20(%r9,%rbp), %xmm2
mulss 0x8(%rbx,%rdx), %xmm2
addss %xmm0, %xmm2
movaps %xmm1, %xmm3
unpckhpd %xmm1, %xmm3 # xmm3 = xmm3[1],xmm1[1]
addps %xmm1, %xmm3
movaps %xmm3, %xmm1
shufps $0x55, %xmm3, %xmm1 # xmm1 = xmm1[1,1],xmm3[1,1]
addss %xmm3, %xmm1
addss %xmm2, %xmm1
movss %xmm1, (%r13)
addq $0x4, %r13
decl %ecx
addq $0x8, %rdx
jmp 0x28be22
addq %r15, %r11
addq %rdx, %r11
addq %r15, %r10
addq %rdx, %r10
addq %r15, %rbx
addq %rdx, %rbx
incl %esi
jmp 0x28be16
incq %r12
movq 0x48(%rsp), %r14
movq 0x88(%rsp), %rbp
movq 0x20(%rsp), %r10
movq %r8, %r13
movq 0x50(%rsp), %r8
jmp 0x28bdc8
cltd
idivl %esi
movl %eax, %edi
movl %ecx, %eax
cltd
idivl %esi
movl %eax, %r10d
movq 0x190(%rsp), %r14
cmpb $0x1, 0x27(%r14)
jne 0x28bf18
xorl %eax, %eax
testb $0x3, %dil
sete %al
leal (%rax,%rax,2), %eax
incl %eax
movl %eax, 0x20(%rsp)
xorl %eax, %eax
testb $0x3, %r10b
sete %al
leal (%rax,%rax,2), %eax
incl %eax
jmp 0x28bf1f
pushq $0x1
popq %rax
movl %eax, 0x20(%rsp)
movl %eax, 0x10(%rsp)
movl 0x30(%rsp), %ebx
movq %r11, %r13
movq 0xb0(%rsp), %rcx
movq 0xb8(%rsp), %rax
movq %rcx, 0x198(%rsp)
movq %rax, 0x1a0(%rsp)
movq 0xc0(%rsp), %rcx
movq %rcx, 0x1a8(%rsp)
movl 0xc8(%rsp), %ecx
movl %ecx, 0x1b0(%rsp)
movq 0xd0(%rsp), %rcx
movq %rcx, 0x1b8(%rsp)
movups 0xd8(%rsp), %xmm0
movups %xmm0, 0x1c0(%rsp)
movl 0xe8(%rsp), %ecx
movl %ecx, 0x1d0(%rsp)
movq 0xf0(%rsp), %rcx
movq %rcx, 0x1d8(%rsp)
testq %rax, %rax
je 0x28bfae
lock
incl (%rax)
cmpl 0x20(%rsp), %r9d
jle 0x28c00a
movl %r10d, %r12d
movl %edi, %ebp
movups (%r14), %xmm0
movups 0x10(%r14), %xmm1
movups 0x20(%r14), %xmm2
movups 0x30(%r14), %xmm3
leaq 0x130(%rsp), %rcx
movaps %xmm3, 0x30(%rcx)
movaps %xmm2, 0x20(%rcx)
movaps %xmm1, 0x10(%rcx)
movaps %xmm0, (%rcx)
movq 0x10(%r14), %rax
movq %rax, 0x8(%rcx)
leaq 0xb0(%rsp), %rdi
leaq 0x198(%rsp), %rsi
movl 0x20(%rsp), %edx
callq 0x64e3b
movl %ebp, %edi
movl %r12d, %r10d
movq 0x48(%rsp), %rdx
movq (%rdx), %rax
movq %rax, 0x130(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x138(%rsp)
movq 0x10(%rdx), %rcx
movq %rcx, 0x140(%rsp)
movl 0x18(%rdx), %ecx
movl %ecx, 0x148(%rsp)
movq 0x20(%rdx), %rcx
movq %rcx, 0x150(%rsp)
movups 0x28(%rdx), %xmm0
movups %xmm0, 0x158(%rsp)
movl 0x38(%rdx), %ecx
movl %ecx, 0x168(%rsp)
movq 0x40(%rdx), %rcx
movq %rcx, 0x170(%rsp)
testq %rax, %rax
je 0x28c072
lock
incl (%rax)
cmpl %ebx, 0x10(%rsp)
jae 0x28c0fe
movq 0x88(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0xd0(%rcx,%rax), %eax
cltd
movl 0x10(%rsp), %r9d
idivl %r9d
movl %r15d, %ecx
shrq %cl, %r13
movl %edi, %ebx
movl %r9d, %r8d
imulq %r13, %r8
movq 0x10(%r14), %rcx
movq %rcx, (%rsp)
leaq 0x130(%rsp), %rdi
movl 0x60(%rsp), %esi
movl 0x70(%rsp), %edx
movl %eax, %ecx
movl %r10d, %ebp
callq 0x628f2
pushq $-0x64
popq %r12
cmpq $0x0, 0x130(%rsp)
je 0x28d35b
movl %ebp, %r10d
movl %ebx, %edi
movslq 0x168(%rsp), %rax
imulq 0x170(%rsp), %rax
testq %rax, %rax
je 0x28d35b
xorl %r15d, %r15d
xorl %r12d, %r12d
xorl %ebx, %ebx
movq 0x88(%rsp), %r9
movq (%r9), %rax
movq -0x18(%rax), %rax
movslq 0x108(%r9,%rax), %rax
cmpq %rax, %rbx
jge 0x28c3ab
movl %r15d, %eax
cltd
movl 0x20(%rsp), %esi
idivl %esi
movl %eax, %ecx
movl %edi, %ebp
movl %edi, %eax
cltd
idivl %esi
movl %eax, %edx
movslq 0x1c4(%rsp), %rdi
movslq 0x1c8(%rsp), %rsi
movslq 0x1cc(%rsp), %rax
movslq %ecx, %r8
imulq 0x1d8(%rsp), %r8
movq 0x1a8(%rsp), %rcx
imulq %rcx, %r8
addq 0x198(%rsp), %r8
movq %r8, 0x238(%rsp)
movl 0x1b0(%rsp), %r8d
andq $0x0, 0x240(%rsp)
movq %rcx, 0x248(%rsp)
movl %r8d, 0x250(%rsp)
movq 0x1b8(%rsp), %r8
movq %r8, 0x258(%rsp)
movl %edi, 0x264(%rsp)
movl %esi, 0x268(%rsp)
movl %eax, 0x26c(%rsp)
movl %edx, 0x270(%rsp)
imulq %rdi, %rsi
imulq %rcx, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x278(%rsp)
movl 0x1c0(%rsp), %eax
movl %eax, 0x260(%rsp)
movl %r12d, %eax
cltd
movl 0x10(%rsp), %esi
idivl %esi
movl %eax, %ecx
movl %r10d, %r13d
movl %r10d, %eax
cltd
idivl %esi
movl %eax, %edx
movslq 0x15c(%rsp), %r8
movslq 0x160(%rsp), %rdi
movslq 0x164(%rsp), %rax
movslq %ecx, %rcx
imulq 0x170(%rsp), %rcx
movq 0x140(%rsp), %rsi
imulq %rsi, %rcx
addq 0x130(%rsp), %rcx
movq %rcx, 0x1f0(%rsp)
movl 0x148(%rsp), %ecx
andq $0x0, 0x1f8(%rsp)
movq %rsi, 0x200(%rsp)
movl %ecx, 0x208(%rsp)
movq 0x150(%rsp), %rcx
movq %rcx, 0x210(%rsp)
movl %r8d, 0x21c(%rsp)
movl %edi, 0x220(%rsp)
movl %eax, 0x224(%rsp)
movl %edx, 0x228(%rsp)
imulq %r8, %rdi
imulq %rsi, %rax
imulq %rdi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rsi
movq %rax, 0x230(%rsp)
movl 0x158(%rsp), %eax
movl %eax, 0x218(%rsp)
movq 0x10(%r9), %rax
movq (%rax,%rbx,8), %rdi
movups (%r14), %xmm0
movups 0x10(%r14), %xmm1
movups 0x20(%r14), %xmm2
movups 0x30(%r14), %xmm3
movaps %xmm0, 0x280(%rsp)
movaps %xmm3, 0x2b0(%rsp)
movaps %xmm2, 0x2a0(%rsp)
movaps %xmm1, 0x290(%rsp)
movq %rcx, 0x288(%rsp)
movq (%rdi), %rax
leaq 0x238(%rsp), %rsi
leaq 0x1f0(%rsp), %rdx
leaq 0x280(%rsp), %rcx
callq *0x38(%rax)
movq 0x1f8(%rsp), %rax
testq %rax, %rax
je 0x28c361
lock
decl (%rax)
jne 0x28c361
movq 0x1f0(%rsp), %rsi
movq 0x210(%rsp), %rdi
testq %rdi, %rdi
je 0x28c359
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x28c361
movq %rsi, %rdi
callq 0x5f3e0
movq 0x240(%rsp), %rax
testq %rax, %rax
je 0x28c398
lock
decl (%rax)
jne 0x28c398
movq 0x238(%rsp), %rsi
movq 0x258(%rsp), %rdi
testq %rdi, %rdi
je 0x28c390
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x28c398
movq %rsi, %rdi
callq 0x5f3e0
incq %rbx
movl %r13d, %r10d
addl %r13d, %r12d
movl %ebp, %edi
addl %ebp, %r15d
jmp 0x28c106
movl 0x30(%rsp), %edx
cmpl %edx, 0x10(%rsp)
jae 0x28c3d2
leaq 0x130(%rsp), %rdi
movq 0x48(%rsp), %rsi
movq %r14, %rcx
callq 0x64e3b
xorl %r12d, %r12d
jmp 0x28d35b
xorl %r12d, %r12d
leaq 0x130(%rsp), %rax
movq 0x48(%rsp), %rcx
cmpq %rcx, %rax
je 0x28d35b
movq 0x138(%rsp), %rax
testq %rax, %rax
je 0x28c3fb
lock
incl (%rax)
movq 0x8(%rcx), %rax
testq %rax, %rax
je 0x28d307
lock
decl (%rax)
jne 0x28d307
movq 0x48(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x28d2fc
movq (%rdi), %rax
callq *0x18(%rax)
xorl %r12d, %r12d
jmp 0x28d307
pushq $0x3
popq %r15
cmpl $0x1, 0xdc(%rbp,%r8)
jne 0x28ccf0
cmpl $0x1, 0xe0(%rbp,%r8)
jne 0x28ccf0
cmpl $0x2, 0xe4(%rbp,%r8)
jne 0x28ccf0
cmpl $0x2, 0xe8(%rbp,%r8)
jne 0x28ccf0
movl 0x2c(%r14), %eax
movl 0x30(%r14), %ecx
movl 0xdc(%rsp), %edi
movl 0xe8(%rsp), %edx
subl %eax, %edi
shll $0x3, %edi
movq 0x1b0(%rbp,%r8), %rsi
movslq %edi, %rdi
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
testl %edx, %edx
cmovlel %r8d, %edx
shlq $0x2, %rdi
cmpq %rdx, %r8
je 0x28d415
testq %rsi, %rsi
je 0x28c4c9
movq %r8, %r9
shlq $0x4, %r9
movups (%rsi,%r9), %xmm15
jmp 0x28c4cd
xorps %xmm15, %xmm15
movq 0x40(%r14), %r9
imulq %r8, %r9
imulq 0x10(%r14), %r9
addq (%r14), %r9
movq 0x28(%rbp), %r15
movslq 0x54(%rbp), %r12
imulq %r8, %r12
imulq 0x38(%rbp), %r12
movslq 0xdc(%rsp), %rbx
movq 0xf0(%rsp), %r10
imulq %r8, %r10
movq 0xc0(%rsp), %r11
imulq %r11, %r10
addq 0xb0(%rsp), %r10
imulq %r11, %rbx
leaq (%r10,%rbx), %r11
leaq (%r10,%rbx,2), %rbx
movaps (%r15,%r12), %xmm3
movaps 0x10(%r15,%r12), %xmm4
movaps 0x20(%r15,%r12), %xmm5
movaps 0x30(%r15,%r12), %xmm14
movaps 0x40(%r15,%r12), %xmm7
movaps 0x50(%r15,%r12), %xmm6
movaps 0x60(%r15,%r12), %xmm8
movaps 0x70(%r15,%r12), %xmm0
movaps %xmm0, 0x30(%rsp)
movaps 0x80(%r15,%r12), %xmm0
movaps %xmm0, 0x60(%rsp)
xorl %ebp, %ebp
movaps %xmm5, 0x70(%rsp)
movaps %xmm6, 0x90(%rsp)
movaps %xmm8, 0xa0(%rsp)
movaps %xmm3, 0x20(%rsp)
movaps %xmm15, 0x50(%rsp)
movaps %xmm4, 0x10(%rsp)
cmpl %ecx, %ebp
je 0x28c9ce
xorl %r15d, %r15d
xorl %r12d, %r12d
leal 0x3(%r12), %r13d
cmpl %eax, %r13d
jge 0x28c7f1
movaps (%r10,%r15), %xmm11
mulps %xmm3, %xmm11
addps %xmm15, %xmm11
movaps 0x10(%r10,%r15), %xmm12
mulps %xmm4, %xmm12
movaps 0x20(%r10,%r15), %xmm13
movaps 0x40(%r10,%r15), %xmm10
movaps %xmm13, %xmm15
movaps 0x70(%rsp), %xmm6
mulps %xmm6, %xmm15
addps %xmm12, %xmm15
addps %xmm11, %xmm15
movaps (%r11,%r15), %xmm11
mulps %xmm14, %xmm11
movaps 0x10(%r11,%r15), %xmm12
mulps %xmm7, %xmm12
addps %xmm11, %xmm12
movaps 0x20(%r11,%r15), %xmm0
movaps 0x40(%r11,%r15), %xmm11
movaps %xmm7, %xmm8
movaps %xmm14, %xmm7
movaps %xmm0, %xmm14
movaps 0x90(%rsp), %xmm9
mulps %xmm9, %xmm14
addps %xmm12, %xmm14
addps %xmm15, %xmm14
movaps (%rbx,%r15), %xmm12
movaps 0xa0(%rsp), %xmm5
mulps %xmm5, %xmm12
movaps 0x10(%rbx,%r15), %xmm1
movaps 0x30(%rsp), %xmm4
mulps %xmm4, %xmm1
addps %xmm12, %xmm1
movaps 0x20(%rbx,%r15), %xmm2
movaps 0x40(%rbx,%r15), %xmm12
movaps %xmm2, %xmm15
movaps 0x60(%rsp), %xmm3
mulps %xmm3, %xmm15
addps %xmm1, %xmm15
addps %xmm14, %xmm15
movaps 0x30(%r10,%r15), %xmm1
mulps 0x10(%rsp), %xmm1
movaps 0x30(%r11,%r15), %xmm14
mulps %xmm8, %xmm14
addps %xmm1, %xmm14
movaps 0x30(%rbx,%r15), %xmm1
mulps %xmm4, %xmm1
addps %xmm14, %xmm1
movaps %xmm15, (%r9)
mulps 0x20(%rsp), %xmm13
addps 0x50(%rsp), %xmm13
movaps %xmm10, %xmm14
mulps %xmm6, %xmm14
mulps %xmm7, %xmm0
movaps %xmm11, %xmm15
mulps %xmm9, %xmm15
addps %xmm14, %xmm15
mulps %xmm5, %xmm2
addps %xmm0, %xmm2
addps %xmm13, %xmm2
addps %xmm1, %xmm2
movaps %xmm12, %xmm0
mulps %xmm3, %xmm0
addps %xmm15, %xmm0
addps %xmm2, %xmm0
movaps 0x60(%r10,%r15), %xmm14
movaps 0x60(%r11,%r15), %xmm15
movaps 0x60(%rbx,%r15), %xmm13
movaps 0x50(%r10,%r15), %xmm1
mulps 0x10(%rsp), %xmm1
movaps 0x50(%r11,%r15), %xmm2
mulps %xmm8, %xmm2
addps %xmm1, %xmm2
movaps 0x50(%rbx,%r15), %xmm1
mulps %xmm4, %xmm1
addps %xmm2, %xmm1
movaps %xmm0, 0x10(%r9)
movaps %xmm14, %xmm0
mulps %xmm6, %xmm0
movaps %xmm15, %xmm2
mulps %xmm9, %xmm2
addps %xmm0, %xmm2
mulps %xmm7, %xmm11
mulps %xmm5, %xmm12
addps %xmm11, %xmm12
mulps 0x20(%rsp), %xmm10
addps 0x50(%rsp), %xmm10
addps %xmm10, %xmm12
addps %xmm1, %xmm12
movaps %xmm13, %xmm0
mulps %xmm3, %xmm0
addps %xmm2, %xmm0
addps %xmm12, %xmm0
movaps 0x70(%r10,%r15), %xmm1
mulps 0x10(%rsp), %xmm1
movaps 0x80(%r10,%r15), %xmm2
mulps %xmm6, %xmm2
movaps 0x70(%r11,%r15), %xmm10
mulps %xmm8, %xmm10
addps %xmm1, %xmm10
movaps 0x80(%r11,%r15), %xmm1
mulps %xmm9, %xmm1
addps %xmm2, %xmm1
movaps 0x70(%rbx,%r15), %xmm2
mulps %xmm4, %xmm2
addps %xmm10, %xmm2
movaps 0x80(%rbx,%r15), %xmm10
mulps %xmm3, %xmm10
addps %xmm1, %xmm10
movaps %xmm0, 0x20(%r9)
mulps %xmm7, %xmm15
mulps %xmm5, %xmm13
addps %xmm15, %xmm13
movaps 0x50(%rsp), %xmm15
movaps 0x20(%rsp), %xmm3
movaps 0x10(%rsp), %xmm4
mulps %xmm3, %xmm14
addps %xmm15, %xmm14
addps %xmm14, %xmm13
movaps %xmm7, %xmm14
movaps %xmm8, %xmm7
addps %xmm2, %xmm13
addps %xmm10, %xmm13
movaps %xmm13, 0x30(%r9)
addq $0x40, %r9
addl $0x4, %r12d
subq $-0x80, %r15
jmp 0x28c59a
movaps 0x90(%rsp), %xmm6
movaps 0xa0(%rsp), %xmm8
leal 0x1(%r12), %r13d
cmpl %eax, %r13d
jge 0x28c91e
movaps 0x20(%r10,%r15), %xmm11
movaps 0x20(%r11,%r15), %xmm12
movaps 0x20(%rbx,%r15), %xmm10
movaps (%r10,%r15), %xmm0
mulps %xmm3, %xmm0
addps %xmm15, %xmm0
movaps 0x10(%r10,%r15), %xmm1
mulps %xmm4, %xmm1
movaps %xmm11, %xmm2
movaps 0x70(%rsp), %xmm13
mulps %xmm13, %xmm2
addps %xmm1, %xmm2
addps %xmm0, %xmm2
movaps (%r11,%r15), %xmm0
mulps %xmm14, %xmm0
movaps 0x10(%r11,%r15), %xmm1
mulps %xmm7, %xmm1
addps %xmm0, %xmm1
movaps %xmm12, %xmm0
mulps %xmm6, %xmm0
addps %xmm1, %xmm0
addps %xmm2, %xmm0
movaps (%rbx,%r15), %xmm1
mulps %xmm8, %xmm1
movaps 0x10(%rbx,%r15), %xmm2
movaps 0x30(%rsp), %xmm9
mulps %xmm9, %xmm2
addps %xmm1, %xmm2
movaps %xmm10, %xmm1
movaps 0x60(%rsp), %xmm5
mulps %xmm5, %xmm1
addps %xmm2, %xmm1
addps %xmm0, %xmm1
movaps 0x30(%r10,%r15), %xmm0
mulps %xmm4, %xmm0
movaps 0x40(%r10,%r15), %xmm2
mulps %xmm13, %xmm2
movaps 0x30(%r11,%r15), %xmm13
mulps %xmm7, %xmm13
addps %xmm0, %xmm13
movaps 0x40(%r11,%r15), %xmm0
mulps %xmm6, %xmm0
addps %xmm2, %xmm0
movaps 0x30(%rbx,%r15), %xmm2
mulps %xmm9, %xmm2
addps %xmm13, %xmm2
movaps 0x40(%rbx,%r15), %xmm13
mulps %xmm5, %xmm13
addps %xmm0, %xmm13
movaps %xmm1, (%r9)
mulps %xmm14, %xmm12
mulps %xmm8, %xmm10
addps %xmm12, %xmm10
mulps %xmm3, %xmm11
addps %xmm15, %xmm11
addps %xmm11, %xmm10
addps %xmm2, %xmm10
addps %xmm13, %xmm10
movaps %xmm10, 0x10(%r9)
addq $0x20, %r9
addl $0x2, %r12d
addq $0x40, %r15
jmp 0x28c802
movq 0x48(%rsp), %r14
movaps 0x70(%rsp), %xmm5
cmpl %eax, %r12d
jge 0x28c9b5
movaps (%r10,%r15), %xmm0
mulps %xmm3, %xmm0
addps %xmm15, %xmm0
movaps 0x10(%r10,%r15), %xmm1
mulps %xmm4, %xmm1
movaps 0x20(%r10,%r15), %xmm2
mulps %xmm5, %xmm2
addps %xmm1, %xmm2
addps %xmm0, %xmm2
movaps (%r11,%r15), %xmm0
mulps %xmm14, %xmm0
movaps 0x10(%r11,%r15), %xmm1
mulps %xmm7, %xmm1
addps %xmm0, %xmm1
movaps 0x20(%r11,%r15), %xmm0
mulps %xmm6, %xmm0
addps %xmm1, %xmm0
addps %xmm2, %xmm0
movaps (%rbx,%r15), %xmm1
mulps %xmm8, %xmm1
movaps 0x10(%rbx,%r15), %xmm2
mulps 0x30(%rsp), %xmm2
addps %xmm1, %xmm2
movaps 0x20(%rbx,%r15), %xmm1
mulps 0x60(%rsp), %xmm1
addps %xmm2, %xmm1
addps %xmm0, %xmm1
movaps %xmm1, (%r9)
addq $0x10, %r9
incl %r12d
addq $0x20, %r15
jmp 0x28c928
addq %rdi, %r10
addq %r15, %r10
addq %rdi, %r11
addq %r15, %r11
addq %rdi, %rbx
addq %r15, %rbx
incl %ebp
jmp 0x28c58c
incq %r8
movq 0x88(%rsp), %rbp
jmp 0x28c4ad
pushq $0x5
popq %r15
cmpl $0x1, 0xdc(%rbp,%r8)
jne 0x28ccf0
cmpl $0x1, 0xe0(%rbp,%r8)
jne 0x28ccf0
cmpl $0x2, 0xe4(%rbp,%r8)
jne 0x28ccf0
cmpl $0x2, 0xe8(%rbp,%r8)
jne 0x28ccf0
movl 0x2c(%r14), %edi
movl 0x30(%r14), %ecx
movl 0xdc(%rsp), %eax
movl 0xe8(%rsp), %edx
subl %edi, %eax
shll $0x3, %eax
movq 0x1b0(%rbp,%r8), %rsi
xorl %r15d, %r15d
testl %edi, %edi
cmovlel %r15d, %edi
movslq %eax, %r8
testl %ecx, %ecx
cmovlel %r15d, %ecx
testl %edx, %edx
cmovlel %r15d, %edx
movq %rdx, 0x10(%rsp)
shlq $0x2, %r8
movq %rsi, 0x20(%rsp)
cmpq 0x10(%rsp), %r15
je 0x28d415
testq %rsi, %rsi
je 0x28ca84
movq %r15, %rax
shlq $0x4, %rax
movups (%rsi,%rax), %xmm0
jmp 0x28ca87
xorps %xmm0, %xmm0
movq 0x40(%r14), %r9
imulq %r15, %r9
imulq 0x10(%r14), %r9
addq (%r14), %r9
movq 0x28(%rbp), %r10
movslq 0x54(%rbp), %r11
imulq %r15, %r11
imulq 0x38(%rbp), %r11
movq 0xf0(%rsp), %rbx
movq %r15, %r14
imulq %r15, %rbx
movq 0xc0(%rsp), %rax
imulq %rax, %rbx
addq 0xb0(%rsp), %rbx
movslq 0xdc(%rsp), %rdx
imulq %rax, %rdx
leaq (%rbx,%rdx), %r15
leaq (%rbx,%rdx,2), %r12
leaq (%rdx,%rdx,2), %r13
addq %rbx, %r13
leaq (%rbx,%rdx,4), %rbp
xorl %edx, %edx
cmpl %ecx, %edx
je 0x28ccd3
movl %edi, %eax
xorl %esi, %esi
subl $0x1, %eax
jb 0x28ccae
movaps (%r10,%r11), %xmm2
movaps 0x10(%r10,%r11), %xmm3
movaps 0x20(%r10,%r11), %xmm1
movaps 0x30(%r10,%r11), %xmm4
movaps 0x40(%r10,%r11), %xmm5
mulps (%rbx,%rsi), %xmm2
mulps 0x10(%rbx,%rsi), %xmm3
mulps 0x20(%rbx,%rsi), %xmm1
addps %xmm0, %xmm2
addps %xmm3, %xmm1
mulps 0x30(%rbx,%rsi), %xmm4
addps %xmm2, %xmm1
mulps 0x40(%rbx,%rsi), %xmm5
addps %xmm4, %xmm5
movaps 0x50(%r10,%r11), %xmm3
movaps 0x60(%r10,%r11), %xmm4
movaps 0x70(%r10,%r11), %xmm6
movaps 0x80(%r10,%r11), %xmm7
movaps 0x90(%r10,%r11), %xmm2
mulps (%r15,%rsi), %xmm3
addps %xmm5, %xmm3
mulps 0x10(%r15,%rsi), %xmm4
mulps 0x20(%r15,%rsi), %xmm6
addps %xmm1, %xmm3
addps %xmm4, %xmm6
mulps 0x30(%r15,%rsi), %xmm7
addps %xmm6, %xmm7
mulps 0x40(%r15,%rsi), %xmm2
addps %xmm7, %xmm2
addps %xmm3, %xmm2
movaps 0xa0(%r10,%r11), %xmm3
movaps 0xb0(%r10,%r11), %xmm4
movaps 0xc0(%r10,%r11), %xmm5
movaps 0xd0(%r10,%r11), %xmm6
movaps 0xe0(%r10,%r11), %xmm1
mulps (%r12,%rsi), %xmm3
mulps 0x10(%r12,%rsi), %xmm4
mulps 0x20(%r12,%rsi), %xmm5
addps %xmm3, %xmm4
addps %xmm4, %xmm5
mulps 0x30(%r12,%rsi), %xmm6
addps %xmm5, %xmm6
mulps 0x40(%r12,%rsi), %xmm1
addps %xmm6, %xmm1
addps %xmm2, %xmm1
movaps 0xf0(%r10,%r11), %xmm2
movaps 0x100(%r10,%r11), %xmm3
movaps 0x110(%r10,%r11), %xmm4
movaps 0x120(%r10,%r11), %xmm5
movaps 0x130(%r10,%r11), %xmm6
mulps (%r13,%rsi), %xmm2
mulps 0x10(%r13,%rsi), %xmm3
mulps 0x20(%r13,%rsi), %xmm4
addps %xmm2, %xmm3
addps %xmm3, %xmm4
mulps 0x30(%r13,%rsi), %xmm5
addps %xmm4, %xmm5
mulps 0x40(%r13,%rsi), %xmm6
addps %xmm5, %xmm6
movaps 0x140(%r10,%r11), %xmm2
movaps 0x150(%r10,%r11), %xmm3
movaps 0x160(%r10,%r11), %xmm4
movaps 0x170(%r10,%r11), %xmm5
movaps 0x180(%r10,%r11), %xmm7
mulps (%rbp,%rsi), %xmm2
addps %xmm6, %xmm2
mulps 0x10(%rbp,%rsi), %xmm3
mulps 0x20(%rbp,%rsi), %xmm4
addps %xmm1, %xmm2
addps %xmm3, %xmm4
mulps 0x30(%rbp,%rsi), %xmm5
addps %xmm4, %xmm5
mulps 0x40(%rbp,%rsi), %xmm7
addps %xmm5, %xmm7
addps %xmm2, %xmm7
movaps %xmm7, (%r9)
addq $0x10, %r9
addq $0x20, %rsi
jmp 0x28caf8
addq %r8, %rbx
addq %rsi, %rbx
addq %r8, %r15
addq %rsi, %r15
addq %r8, %r12
addq %rsi, %r12
addq %r8, %r13
addq %rsi, %r13
addq %r8, %rbp
addq %rsi, %rbp
incl %edx
jmp 0x28caec
movq %r14, %r15
incq %r15
movq 0x48(%rsp), %r14
movq 0x88(%rsp), %rbp
movq 0x20(%rsp), %rsi
jmp 0x28ca67
imull %eax, %r15d
movslq %r15d, %rsi
leaq 0x198(%rsp), %rdi
leaq 0x130(%rsp), %rdx
callq 0x73bbe
movq (%rbp), %rcx
movq -0x18(%rcx), %rdx
movl 0x10(%rsp), %r11d
imull 0xe0(%rbp,%rdx), %r11d
movq 0x198(%rsp), %rax
movl 0xdc(%rbp,%rdx), %esi
imull 0xd4(%rbp,%rdx), %esi
subl %esi, %r11d
xorl %esi, %esi
xorl %edi, %edi
xorl %r8d, %r8d
cmpl 0xd8(%rbp,%rdx), %r8d
jge 0x28cd80
movslq %edi, %rdi
leaq (%rax,%rdi,4), %r10
xorl %r9d, %r9d
cmpl 0xd4(%rbp,%rdx), %r9d
jge 0x28cd75
movl %esi, (%r10,%r9,4)
movq -0x18(%rcx), %rdx
addl 0xdc(%rbp,%rdx), %esi
incq %r9
jmp 0x28cd57
addl %r11d, %esi
incl %r8d
addq %r9, %rdi
jmp 0x28cd43
leal (,%r15,4), %ecx
movl %ecx, 0x90(%rsp)
movl 0x60(%rsp), %edx
shll $0x2, %edx
xorl %ecx, %ecx
testl %r15d, %r15d
cmovlel %ecx, %r15d
movslq %edx, %rdx
movq %rdx, 0x30(%rsp)
movq 0x50(%rsp), %r12
testl %r12d, %r12d
cmovlel %ecx, %r12d
movq %r12, 0x50(%rsp)
movslq 0xa0(%rsp), %rdi
shlq $0x2, %r15
leaq 0x16b70d(%rip), %r8 # 0x3f84d8
xorps %xmm0, %xmm0
movaps 0x16423a(%rip), %xmm15 # 0x3f1010
movaps 0x164243(%rip), %xmm4 # 0x3f1020
movaps 0x16424c(%rip), %xmm6 # 0x3f1030
movaps 0x1612b5(%rip), %xmm5 # 0x3ee0a0
movaps 0x16424d(%rip), %xmm8 # 0x3f1040
xorl %edx, %edx
movaps 0x164253(%rip), %xmm9 # 0x3f1050
movaps 0x16426b(%rip), %xmm11 # 0x3f1070
movaps 0x164273(%rip), %xmm12 # 0x3f1080
movaps 0x16427b(%rip), %xmm13 # 0x3f1090
cmpq 0x50(%rsp), %rdx
je 0x28d2e7
movq %rcx, 0x60(%rsp)
movslq %ecx, %r10
movq 0x40(%r14), %r11
imulq %rdx, %r11
imulq 0x10(%r14), %r11
shlq $0x2, %r10
addq (%r14), %r11
movslq 0xdc(%rsp), %rbx
movq 0xf0(%rsp), %r13
imulq %rdx, %r13
movq 0xc0(%rsp), %rcx
imulq %rcx, %r13
addq 0xb0(%rsp), %r13
imulq %rcx, %rbx
movq %rdx, 0x70(%rsp)
shlq $0x4, %rdx
movq %rdx, 0x10(%rsp)
addq 0x28(%rbp), %r10
xorl %ecx, %ecx
cmpl 0x20(%rsp), %ecx
jg 0x28d2c9
movq (%rbp), %rdx
xorl %r9d, %r9d
cmpq %rdi, %r9
jg 0x28d2b9
movq -0x18(%rdx), %r12
cmpl $0x0, 0x100(%rbp,%r12)
je 0x28ceb9
movq 0x1b0(%rbp,%r12), %rsi
movq 0x10(%rsp), %r14
movups (%rsi,%r14), %xmm1
jmp 0x28cebc
xorps %xmm1, %xmm1
movslq 0xe8(%rbp,%r12), %rsi
movq %rbp, %r14
movslq %ecx, %rbp
imulq %rsi, %rbp
imulq %rbx, %rbp
addq %r13, %rbp
movl 0xe4(%r14,%r12), %esi
imull %r9d, %esi
shll $0x2, %esi
movslq %esi, %rsi
leaq (,%rsi,4), %rsi
addq %rbp, %rsi
xorl %ebp, %ebp
cmpq %rbp, %r15
je 0x28cf17
movslq (%rax,%rbp), %r14
shlq $0x4, %r14
movups (%rsi,%r14), %xmm2
movups (%r10,%rbp,4), %xmm3
mulps %xmm2, %xmm3
addps %xmm3, %xmm1
addq $0x4, %rbp
jmp 0x28cef4
movq 0x88(%rsp), %rbp
movl 0x110(%rbp,%r12), %esi
decl %esi
cmpl $0x5, %esi
ja 0x28d1cc
movslq (%r8,%rsi,4), %rsi
addq %r8, %rsi
movaps %xmm1, %xmm7
maxps %xmm0, %xmm7
jmpq *%rsi
movq 0x118(%rbp,%r12), %rsi
minps %xmm0, %xmm1
movss (%rsi), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps %xmm1, %xmm2
addps %xmm2, %xmm7
jmp 0x28d2a5
movaps %xmm1, %xmm2
minps %xmm15, %xmm2
maxps %xmm4, %xmm2
movaps %xmm2, %xmm14
mulps %xmm6, %xmm14
addps %xmm5, %xmm14
cvttps2dq %xmm14, %xmm7
cvtdq2ps %xmm7, %xmm7
cmpltps %xmm7, %xmm14
andps %xmm8, %xmm14
subps %xmm14, %xmm7
movaps %xmm15, %xmm4
cvttps2dq %xmm7, %xmm15
mulps %xmm9, %xmm7
subps %xmm7, %xmm2
movaps %xmm2, %xmm7
mulps %xmm2, %xmm7
movaps %xmm2, %xmm14
movaps 0x1640b4(%rip), %xmm10 # 0x3f1060
mulps %xmm10, %xmm14
addps %xmm11, %xmm14
mulps %xmm2, %xmm14
addps %xmm12, %xmm14
mulps %xmm2, %xmm14
addps %xmm13, %xmm14
mulps %xmm2, %xmm14
xorps %xmm6, %xmm6
movaps %xmm5, %xmm0
movaps 0x1640cb(%rip), %xmm5 # 0x3f10a0
addps %xmm5, %xmm14
mulps %xmm2, %xmm14
addps %xmm0, %xmm14
mulps %xmm7, %xmm14
addps %xmm8, %xmm2
addps %xmm14, %xmm2
pslld $0x17, %xmm15
paddd %xmm8, %xmm15
mulps %xmm2, %xmm15
addps %xmm8, %xmm15
movaps %xmm15, %xmm2
maxps 0x1640a4(%rip), %xmm15 # 0x3f10b0
movaps %xmm15, %xmm7
andps 0x1640a8(%rip), %xmm15 # 0x3f10c0
orps %xmm0, %xmm15
movaps %xmm15, %xmm3
cmpltps 0x1640b8(%rip), %xmm3 # 0x3f10e0
movaps %xmm3, %xmm14
andps %xmm15, %xmm14
movaps 0x1640b8(%rip), %xmm10 # 0x3f10f0
addps %xmm10, %xmm15
addps %xmm14, %xmm15
cmpleps %xmm6, %xmm2
psrld $0x17, %xmm7
paddd 0x16407f(%rip), %xmm7 # 0x3f10d0
cvtdq2ps %xmm7, %xmm14
andps %xmm8, %xmm3
subps %xmm3, %xmm14
movaps %xmm15, %xmm7
mulps 0x164098(%rip), %xmm7 # 0x3f1100
addps 0x1640a1(%rip), %xmm7 # 0x3f1110
mulps %xmm15, %xmm7
addps 0x1640a6(%rip), %xmm7 # 0x3f1120
mulps %xmm15, %xmm7
addps 0x1640ab(%rip), %xmm7 # 0x3f1130
mulps %xmm15, %xmm7
addps 0x1640b0(%rip), %xmm7 # 0x3f1140
mulps %xmm15, %xmm7
addps 0x1640b5(%rip), %xmm7 # 0x3f1150
mulps %xmm15, %xmm7
addps 0x1640ba(%rip), %xmm7 # 0x3f1160
mulps %xmm15, %xmm7
addps 0x1640bf(%rip), %xmm7 # 0x3f1170
mulps %xmm15, %xmm7
addps 0x1640c4(%rip), %xmm7 # 0x3f1180
mulps %xmm15, %xmm7
mulps %xmm9, %xmm14
addps %xmm15, %xmm14
mulps %xmm15, %xmm15
addps 0x1640bd(%rip), %xmm7 # 0x3f1190
mulps %xmm15, %xmm7
movaps %xmm4, %xmm15
movaps 0x163f3e(%rip), %xmm4 # 0x3f1020
movaps 0x163f47(%rip), %xmm6 # 0x3f1030
addps %xmm7, %xmm14
mulps 0x164bcb(%rip), %xmm14 # 0x3f1cc0
movaps %xmm2, %xmm3
andnps %xmm14, %xmm3
andps 0x164bcd(%rip), %xmm2 # 0x3f1cd0
orps %xmm3, %xmm2
minps %xmm15, %xmm2
maxps %xmm4, %xmm2
movaps %xmm2, %xmm3
mulps %xmm6, %xmm3
addps %xmm0, %xmm3
cvttps2dq %xmm3, %xmm7
cvtdq2ps %xmm7, %xmm7
cmpltps %xmm7, %xmm3
andps %xmm8, %xmm3
subps %xmm3, %xmm7
cvttps2dq %xmm7, %xmm14
mulps %xmm9, %xmm7
subps %xmm7, %xmm2
movaps %xmm2, %xmm3
mulps %xmm2, %xmm3
movaps %xmm2, %xmm7
mulps 0x163f1c(%rip), %xmm7 # 0x3f1060
addps %xmm11, %xmm7
mulps %xmm2, %xmm7
addps %xmm12, %xmm7
mulps %xmm2, %xmm7
addps %xmm13, %xmm7
mulps %xmm2, %xmm7
addps %xmm5, %xmm7
movaps %xmm0, %xmm5
xorps %xmm0, %xmm0
mulps %xmm2, %xmm7
addps %xmm5, %xmm7
mulps %xmm3, %xmm7
addps %xmm8, %xmm2
addps %xmm7, %xmm2
pslld $0x17, %xmm14
paddd %xmm8, %xmm14
mulps %xmm2, %xmm14
addps %xmm8, %xmm14
rcpps %xmm14, %xmm2
movaps %xmm2, %xmm7
addps %xmm2, %xmm7
mulps %xmm7, %xmm14
movaps 0x164b46(%rip), %xmm3 # 0x3f1ce0
subps %xmm14, %xmm3
mulps %xmm2, %xmm3
addps %xmm10, %xmm7
addps %xmm3, %xmm7
jmp 0x28d2a2
movq 0x118(%rbp,%r12), %rsi
movss (%rsi), %xmm2
movss 0x4(%rsi), %xmm7
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
maxps %xmm2, %xmm1
minps %xmm7, %xmm1
movaps %xmm1, %xmm7
jmp 0x28d2a5
xorps 0x160eb5(%rip), %xmm1 # 0x3ee090
minps %xmm15, %xmm1
maxps %xmm4, %xmm1
movaps %xmm1, %xmm7
mulps %xmm6, %xmm7
addps %xmm5, %xmm7
cvttps2dq %xmm7, %xmm2
cvtdq2ps %xmm2, %xmm2
cmpltps %xmm2, %xmm7
andps %xmm8, %xmm7
subps %xmm7, %xmm2
cvttps2dq %xmm2, %xmm14
mulps 0x164ae7(%rip), %xmm2 # 0x3f1cf0
addps %xmm1, %xmm2
movaps %xmm2, %xmm1
mulps %xmm2, %xmm1
movaps %xmm2, %xmm7
mulps 0x163e44(%rip), %xmm7 # 0x3f1060
addps 0x163e4d(%rip), %xmm7 # 0x3f1070
mulps %xmm2, %xmm7
addps 0x163e53(%rip), %xmm7 # 0x3f1080
mulps %xmm2, %xmm7
addps 0x163e59(%rip), %xmm7 # 0x3f1090
mulps %xmm2, %xmm7
addps 0x163e5f(%rip), %xmm7 # 0x3f10a0
mulps %xmm2, %xmm7
addps %xmm5, %xmm7
mulps %xmm1, %xmm7
addps %xmm8, %xmm2
addps %xmm7, %xmm2
pslld $0x17, %xmm14
paddd %xmm8, %xmm14
mulps %xmm2, %xmm14
addps %xmm8, %xmm14
rcpps %xmm14, %xmm1
mulps %xmm1, %xmm14
movaps %xmm8, %xmm7
subps %xmm14, %xmm7
mulps %xmm1, %xmm7
addps %xmm1, %xmm7
jmp 0x28d2a5
movq 0x118(%rbp,%r12), %rsi
movss (%rsi), %xmm7
movss 0x4(%rsi), %xmm2
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps %xmm1, %xmm7
addps %xmm2, %xmm7
maxps %xmm0, %xmm7
minps %xmm8, %xmm7
mulps %xmm1, %xmm7
movq %r9, %rsi
shlq $0x4, %rsi
movups %xmm7, (%r11,%rsi)
incq %r9
jmp 0x28ce8d
movq 0x30(%rsp), %rdx
leaq (%r11,%rdx,4), %r11
incl %ecx
jmp 0x28ce7c
movq 0x70(%rsp), %rdx
incq %rdx
movq 0x60(%rsp), %rcx
addl 0x90(%rsp), %ecx
movq 0x48(%rsp), %r14
jmp 0x28ce15
leaq 0x198(%rsp), %rdi
callq 0x624be
xorl %r12d, %r12d
jmp 0x28d3c9
movq %rsi, %rdi
callq 0x5f3e0
xorl %r12d, %r12d
movaps 0x130(%rsp), %xmm0
movq 0x48(%rsp), %rcx
movups %xmm0, (%rcx)
movq 0x140(%rsp), %rax
movq %rax, 0x10(%rcx)
movl 0x148(%rsp), %eax
movl %eax, 0x18(%rcx)
movq 0x150(%rsp), %rax
movq %rax, 0x20(%rcx)
movups 0x158(%rsp), %xmm0
movups %xmm0, 0x28(%rcx)
movl 0x168(%rsp), %eax
movl %eax, 0x38(%rcx)
movq 0x170(%rsp), %rax
movq %rax, 0x40(%rcx)
movq 0x138(%rsp), %rax
testq %rax, %rax
je 0x28d392
lock
decl (%rax)
jne 0x28d392
movq 0x130(%rsp), %rsi
movq 0x150(%rsp), %rdi
testq %rdi, %rdi
je 0x28d38a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x28d392
movq %rsi, %rdi
callq 0x5f3e0
movq 0x1a0(%rsp), %rax
testq %rax, %rax
je 0x28d3c9
lock
decl (%rax)
jne 0x28d3c9
movq 0x198(%rsp), %rsi
movq 0x1b8(%rsp), %rdi
testq %rdi, %rdi
je 0x28d3c1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x28d3c9
movq %rsi, %rdi
callq 0x5f3e0
movq 0xb8(%rsp), %rax
testq %rax, %rax
je 0x28d400
lock
decl (%rax)
jne 0x28d400
movq 0xb0(%rsp), %rsi
movq 0xd0(%rsp), %rdi
testq %rdi, %rdi
je 0x28d3f8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x28d400
movq %rsi, %rdi
callq 0x5f3e0
movl %r12d, %eax
addq $0x2c8, %rsp # imm = 0x2C8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq 0x8(%rbp), %rdi
jmp 0x28d427
movq 0x88(%rsp), %rax
movq 0x8(%rax), %rdi
testq %rdi, %rdi
je 0x28d2f4
movq (%rdi), %rax
movq %r14, %rsi
movq 0x190(%rsp), %rdx
callq *0x48(%rax)
jmp 0x28d2f4
jmp 0x28d473
jmp 0x28d5a1
jmp 0x28d5a1
movq %rax, %rbx
jmp 0x28d523
movq %rax, %rbx
jmp 0x28d4ec
jmp 0x28d5a1
jmp 0x28d473
jmp 0x28d5a1
jmp 0x28d5a1
movq %rax, %rbx
jmp 0x28d55a
movq %rax, %rbx
movq 0x1f8(%rsp), %rax
testq %rax, %rax
je 0x28d4b5
lock
decl (%rax)
jne 0x28d4b5
movq 0x1f0(%rsp), %rsi
movq 0x210(%rsp), %rdi
testq %rdi, %rdi
jne 0x28d4af
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x28d4b5
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x240(%rsp), %rax
testq %rax, %rax
je 0x28d4ec
lock
decl (%rax)
jne 0x28d4ec
movq 0x238(%rsp), %rsi
movq 0x258(%rsp), %rdi
testq %rdi, %rdi
jne 0x28d4e6
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x28d4ec
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x138(%rsp), %rax
testq %rax, %rax
je 0x28d523
lock
decl (%rax)
jne 0x28d523
movq 0x130(%rsp), %rsi
movq 0x150(%rsp), %rdi
testq %rdi, %rdi
jne 0x28d51d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x28d523
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1a0(%rsp), %rax
testq %rax, %rax
je 0x28d55a
lock
decl (%rax)
jne 0x28d55a
movq 0x198(%rsp), %rsi
movq 0x1b8(%rsp), %rdi
testq %rdi, %rdi
jne 0x28d554
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x28d55a
movq (%rdi), %rax
callq *0x18(%rax)
movq 0xb8(%rsp), %rax
testq %rax, %rax
je 0x28d591
lock
decl (%rax)
jne 0x28d591
movq 0xb0(%rsp), %rsi
movq 0xd0(%rsp), %rdi
testq %rdi, %rdi
jne 0x28d58b
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x28d591
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x28d5a1
jmp 0x28d5a1
jmp 0x28d5a1
jmp 0x28d5a1
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/x86/convolutiondepthwise_x86.cpp |
virtual thunk to ncnn::ConvolutionDepthWise_x86::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int ConvolutionDepthWise_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
#if NCNN_INT8
if (opt.use_int8_inference && int8_scale_term)
{
return forward_int8_x86(bottom_blob, top_blob, opt);
}
#endif
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
Mat bottom_blob_bordered;
make_padding(bottom_blob, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
int outw = (w - kernel_extent_w) / stride_w + 1;
int outh = (h - kernel_extent_h) / stride_h + 1;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
size_t out_elemsize = elemsize / elempack * out_elempack;
top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
// depth-wise
if (channels * elempack == group && group == num_output)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 16;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m512 _sum = _mm512_set1_ps(0.f);
if (bias_term)
{
_sum = _mm512_loadu_ps(((const float*)bias_data) + g * 16);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 16;
for (int k = 0; k < maxk; k++)
{
__m512 _val = _mm512_loadu_ps(sptr + space_ofs[k] * 16);
__m512 _w = _mm512_loadu_ps(kptr + k * 16);
_sum = _mm512_fmadd_ps(_val, _w, _sum);
}
_mm512_storeu_ps(outptr, _sum);
outptr += 16;
}
}
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 8;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_set1_ps(0.f);
if (bias_term)
{
_sum = _mm256_loadu_ps(((const float*)bias_data) + g * 8);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 8;
for (int k = 0; k < maxk; k++)
{
__m256 _val = _mm256_loadu_ps(sptr + space_ofs[k] * 8);
__m256 _w = _mm256_loadu_ps(kptr + k * 8);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
}
_mm256_storeu_ps(outptr + j * 8, _sum);
}
outptr += outw * 8;
}
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
#endif // __AVX__
if (elempack == 4)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 4;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_set1_ps(0.f);
if (bias_term)
{
_sum = _mm_loadu_ps(((const float*)bias_data) + g * 4);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 4;
for (int k = 0; k < maxk; k++)
{
__m128 _val = _mm_loadu_ps(sptr + space_ofs[k] * 4);
__m128 _w = _mm_loadu_ps(kptr + k * 4);
_sum = _mm_add_ps(_mm_mul_ps(_val, _w), _sum);
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr + j * 4, _sum);
}
outptr += outw * 4;
}
}
return 0;
}
}
#endif // __SSE2__
if (elempack == 1)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
}
// group convolution
const int channels_g = channels * elempack / group;
const int num_output_g = num_output / group;
int g_elempack = 1;
int out_g_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
g_elempack = channels_g % 16 == 0 ? 16 : channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 16 == 0 ? 16 : num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#elif __AVX__
g_elempack = channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#else
g_elempack = channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
// unpacking
Mat bottom_blob_bordered_unpacked = bottom_blob_bordered;
if (elempack > g_elempack)
{
Option opt_p = opt;
opt_p.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob_bordered, bottom_blob_bordered_unpacked, g_elempack, opt_p);
}
Mat top_blob_unpacked = top_blob;
if (out_g_elempack < out_elempack)
{
top_blob_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
if (top_blob_unpacked.empty())
return -100;
}
for (int g = 0; g < group; g++)
{
const Mat bottom_blob_bordered_g = bottom_blob_bordered_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
Mat top_blob_g = top_blob_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
const ncnn::Layer* op = group_ops[g];
Option opt_g = opt;
opt_g.blob_allocator = top_blob_unpacked.allocator;
// forward
op->forward(bottom_blob_bordered_g, top_blob_g, opt_g);
}
// packing
if (out_g_elempack < out_elempack)
{
convert_packing(top_blob_unpacked, top_blob, out_elempack, opt);
}
else
{
top_blob = top_blob_unpacked;
}
return 0;
} | movq (%rdi), %rax
addq -0x48(%rax), %rdi
jmp 0x28a5b0
| /csukuangfj[P]ncnn/src/layer/x86/convolutiondepthwise_x86.cpp |
ncnn::ConvolutionDepthWise_x86::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int ConvolutionDepthWise_x86::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& bottom_blob = bottom_blobs[0];
const Mat& _weight_data = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int _kernel_w = _weight_data.w;
const int _kernel_h = _weight_data.h;
const int _num_output = _weight_data.c * _weight_data.elempack;
Mat weight_data_flattened;
flatten(_weight_data, weight_data_flattened, opt);
if (weight_data_flattened.empty())
return -100;
// weight_data_flattened as pack1
weight_data_flattened.w *= weight_data_flattened.elempack;
weight_data_flattened.elemsize /= weight_data_flattened.elempack;
weight_data_flattened.elempack = 1;
Mat bias_data_flattened;
if (bias_term)
{
const Mat& _bias_data = bottom_blobs[2];
flatten(_bias_data, bias_data_flattened, opt);
if (bias_data_flattened.empty())
return -100;
// bias_data_flattened as pack1
bias_data_flattened.w *= bias_data_flattened.elempack;
bias_data_flattened.elemsize /= bias_data_flattened.elempack;
bias_data_flattened.elempack = 1;
}
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::ConvolutionDepthWise);
ncnn::ParamDict pd;
pd.set(0, _num_output);
pd.set(1, _kernel_w);
pd.set(11, _kernel_h);
pd.set(2, dilation_w);
pd.set(12, dilation_h);
pd.set(3, stride_w);
pd.set(13, stride_h);
pd.set(4, pad_left);
pd.set(15, pad_right);
pd.set(14, pad_top);
pd.set(16, pad_bottom);
pd.set(18, pad_value);
pd.set(5, bias_term);
pd.set(6, weight_data_flattened.w);
pd.set(7, group);
pd.set(8, int8_scale_term);
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
ncnn::Mat weights[2];
weights[0] = weight_data_flattened;
weights[1] = bias_data_flattened;
op->load_model(ncnn::ModelBinFromMatArray(weights));
op->create_pipeline(opt);
op->forward(bottom_blob, top_blob, opt);
op->destroy_pipeline(opt);
delete op;
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x168, %rsp # imm = 0x168
movq %rsi, %rbp
movq %rdi, %r13
movq (%rsi), %r14
leaq 0x48(%r14), %rdi
movq (%rdx), %rax
movq %rax, 0xb8(%rsp)
movl 0x60(%r14), %ebx
movl 0x74(%r14), %eax
movl %eax, 0x1c(%rsp)
movl 0x78(%r14), %eax
movl %eax, 0x18(%rsp)
imull 0x80(%r14), %ebx
leaq 0x70(%rsp), %rsi
andq $0x0, 0x40(%rsi)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsi)
movups %xmm0, 0xc(%rsi)
movaps %xmm0, 0x20(%rsi)
movups %xmm0, 0x2c(%rsi)
movq %rcx, %r15
movq %rcx, %rdx
callq 0x64ee7
pushq $-0x64
popq %r12
cmpq $0x0, 0x70(%rsp)
je 0x2904be
movslq 0xa8(%rsp), %rax
imulq 0xb0(%rsp), %rax
testq %rax, %rax
je 0x2904be
movslq 0x88(%rsp), %rcx
movl 0x9c(%rsp), %eax
imull %ecx, %eax
movl %eax, 0x9c(%rsp)
movq 0x80(%rsp), %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x80(%rsp)
movl $0x1, 0x88(%rsp)
andq $0x0, 0x60(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x20(%rsp)
movups %xmm0, 0x2c(%rsp)
movaps %xmm0, 0x40(%rsp)
movups %xmm0, 0x4c(%rsp)
movq (%r13), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x100(%r13,%rax)
je 0x29001e
movl $0x90, %edi
addq (%rbp), %rdi
leaq 0x20(%rsp), %rsi
movq %r15, %rdx
callq 0x64ee7
pushq $-0x64
popq %r12
cmpq $0x0, 0x20(%rsp)
je 0x290490
movslq 0x58(%rsp), %rax
imulq 0x60(%rsp), %rax
testq %rax, %rax
je 0x290490
movslq 0x38(%rsp), %rcx
movl 0x4c(%rsp), %eax
imull %ecx, %eax
movl %eax, 0x4c(%rsp)
movq 0x30(%rsp), %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x30(%rsp)
movl $0x1, 0x38(%rsp)
pushq $0x2a
popq %rdi
callq 0x782bf
movq %rax, %r12
leaq 0x8(%rsp), %rdi
callq 0x71548
leaq 0x8(%rsp), %rdi
xorl %esi, %esi
movl %ebx, %edx
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0x1
popq %rsi
movl 0x1c(%rsp), %edx
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0xb
popq %rsi
movl 0x18(%rsp), %edx
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xdc(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x2
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xe0(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xc
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xe4(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x3
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xd
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xec(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x4
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xf0(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xf
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xf4(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xe
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xf8(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x10
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movss 0xfc(%r13,%rax), %xmm0
leaq 0x8(%rsp), %rdi
pushq $0x12
popq %rsi
callq 0x71952
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x100(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x5
popq %rsi
callq 0x7193a
movl 0x9c(%rsp), %edx
leaq 0x8(%rsp), %rdi
pushq $0x6
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x108(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x7
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x10c(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x8
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x110(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x9
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
leaq (%rax,%r13), %rdx
addq $0x118, %rdx # imm = 0x118
leaq 0x8(%rsp), %rdi
pushq $0xa
popq %rsi
callq 0x7196c
movq (%r12), %rax
leaq 0x8(%rsp), %rsi
movq %r12, %rdi
callq *0x10(%rax)
andq $0x0, 0x110(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0xd0(%rsp)
movups %xmm0, 0xdc(%rsp)
movaps %xmm0, 0xf0(%rsp)
movups %xmm0, 0xfc(%rsp)
andq $0x0, 0x158(%rsp)
movups %xmm0, 0x118(%rsp)
movups %xmm0, 0x124(%rsp)
movups %xmm0, 0x138(%rsp)
movups %xmm0, 0x144(%rsp)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x290283
lock
incl (%rax)
movq 0xd8(%rsp), %rax
testq %rax, %rax
je 0x2902ba
lock
decl (%rax)
jne 0x2902ba
movq 0xd0(%rsp), %rsi
movq 0xf0(%rsp), %rdi
testq %rdi, %rdi
je 0x2902b2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2902ba
movq %rsi, %rdi
callq 0x5f3e0
movaps 0x70(%rsp), %xmm0
movaps %xmm0, 0xd0(%rsp)
movq 0x80(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl 0x88(%rsp), %eax
movl %eax, 0xe8(%rsp)
movq 0x90(%rsp), %rax
movq %rax, 0xf0(%rsp)
movups 0x98(%rsp), %xmm0
movups %xmm0, 0xf8(%rsp)
movl 0xa8(%rsp), %eax
movl %eax, 0x108(%rsp)
movq 0xb0(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x290330
lock
incl (%rax)
movq 0x120(%rsp), %rax
testq %rax, %rax
je 0x290367
lock
decl (%rax)
jne 0x290367
movq 0x118(%rsp), %rsi
movq 0x138(%rsp), %rdi
testq %rdi, %rdi
je 0x29035f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x290367
movq %rsi, %rdi
callq 0x5f3e0
movaps 0x20(%rsp), %xmm0
leaq 0xd0(%rsp), %rsi
movups %xmm0, 0x48(%rsi)
movq 0x30(%rsp), %rax
movq %rax, 0x58(%rsi)
movl 0x38(%rsp), %eax
movl %eax, 0x60(%rsi)
movq 0x40(%rsp), %rax
movq %rax, 0x68(%rsi)
movups 0x48(%rsp), %xmm0
movaps %xmm0, 0x70(%rsi)
movl 0x58(%rsp), %eax
movl %eax, 0x80(%rsi)
movq 0x60(%rsp), %rax
movq %rax, 0x88(%rsi)
leaq 0xc0(%rsp), %rdi
callq 0x6b00e
movq (%r12), %rax
leaq 0xc0(%rsp), %rsi
movq %r12, %rdi
callq *0x18(%rax)
leaq 0xc0(%rsp), %rdi
callq 0x6b03a
movq (%r12), %rax
movq %r12, %rdi
movq %r15, %rsi
callq *0x20(%rax)
movq (%r12), %rax
movq %r12, %rdi
movq %r14, %rsi
movq 0xb8(%rsp), %rdx
movq %r15, %rcx
callq *0x38(%rax)
movq (%r12), %rax
movq %r12, %rdi
movq %r15, %rsi
callq *0x28(%rax)
movq (%r12), %rax
movq %r12, %rdi
callq *0x8(%rax)
pushq $0x48
popq %rbx
xorps %xmm0, %xmm0
movq 0xd8(%rsp,%rbx), %rax
testq %rax, %rax
je 0x29045b
lock
decl (%rax)
jne 0x29045b
movq 0xd0(%rsp,%rbx), %rsi
movq 0xf0(%rsp,%rbx), %rdi
testq %rdi, %rdi
je 0x290450
movq (%rdi), %rax
callq *0x18(%rax)
xorps %xmm0, %xmm0
jmp 0x29045b
movq %rsi, %rdi
callq 0x5f3e0
xorps %xmm0, %xmm0
leaq (%rsp,%rbx), %rax
addq $0xd0, %rax
andq $0x0, 0x40(%rax)
movups %xmm0, 0xc(%rax)
movups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
movups %xmm0, 0x28(%rax)
addq $-0x48, %rbx
cmpq $-0x48, %rbx
jne 0x29041e
leaq 0x8(%rsp), %rdi
callq 0x71614
xorl %r12d, %r12d
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x2904be
lock
decl (%rax)
jne 0x2904be
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x2904b6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2904be
movq %rsi, %rdi
callq 0x5f3e0
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2904ef
lock
decl (%rax)
jne 0x2904ef
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2904e7
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2904ef
movq %rsi, %rdi
callq 0x5f3e0
movl %r12d, %eax
addq $0x168, %rsp # imm = 0x168
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x29061e
jmp 0x29061e
movq %rax, %rbx
leaq 0xc0(%rsp), %rdi
callq 0x6b03a
jmp 0x290531
jmp 0x29052e
jmp 0x290526
jmp 0x290526
movq %rax, %rbx
jmp 0x2905b5
movq %rax, %rbx
pushq $0x48
popq %r14
xorps %xmm0, %xmm0
movq 0xd8(%rsp,%r14), %rax
testq %rax, %rax
je 0x290575
lock
decl (%rax)
jne 0x290575
movq 0xd0(%rsp,%r14), %rsi
movq 0xf0(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x29056a
movq (%rdi), %rax
callq *0x18(%rax)
xorps %xmm0, %xmm0
jmp 0x290575
movq %rsi, %rdi
callq 0x5f3e0
xorps %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0xd0, %rax
andq $0x0, 0x40(%rax)
movups %xmm0, 0xc(%rax)
movups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
movups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x290538
jmp 0x2905ab
jmp 0x29061e
movq %rax, %rbx
jmp 0x2905e3
jmp 0x29061e
movq %rax, %rbx
leaq 0x8(%rsp), %rdi
callq 0x71614
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x2905e3
lock
decl (%rax)
jne 0x2905e3
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x2905dd
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2905e3
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x290614
lock
decl (%rax)
jne 0x290614
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x29060e
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x290614
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x29061e
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/x86/convolutiondepthwise_x86.cpp |
virtual thunk to ncnn::ConvolutionDepthWise_x86::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int ConvolutionDepthWise_x86::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& bottom_blob = bottom_blobs[0];
const Mat& _weight_data = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int _kernel_w = _weight_data.w;
const int _kernel_h = _weight_data.h;
const int _num_output = _weight_data.c * _weight_data.elempack;
Mat weight_data_flattened;
flatten(_weight_data, weight_data_flattened, opt);
if (weight_data_flattened.empty())
return -100;
// weight_data_flattened as pack1
weight_data_flattened.w *= weight_data_flattened.elempack;
weight_data_flattened.elemsize /= weight_data_flattened.elempack;
weight_data_flattened.elempack = 1;
Mat bias_data_flattened;
if (bias_term)
{
const Mat& _bias_data = bottom_blobs[2];
flatten(_bias_data, bias_data_flattened, opt);
if (bias_data_flattened.empty())
return -100;
// bias_data_flattened as pack1
bias_data_flattened.w *= bias_data_flattened.elempack;
bias_data_flattened.elemsize /= bias_data_flattened.elempack;
bias_data_flattened.elempack = 1;
}
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::ConvolutionDepthWise);
ncnn::ParamDict pd;
pd.set(0, _num_output);
pd.set(1, _kernel_w);
pd.set(11, _kernel_h);
pd.set(2, dilation_w);
pd.set(12, dilation_h);
pd.set(3, stride_w);
pd.set(13, stride_h);
pd.set(4, pad_left);
pd.set(15, pad_right);
pd.set(14, pad_top);
pd.set(16, pad_bottom);
pd.set(18, pad_value);
pd.set(5, bias_term);
pd.set(6, weight_data_flattened.w);
pd.set(7, group);
pd.set(8, int8_scale_term);
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
ncnn::Mat weights[2];
weights[0] = weight_data_flattened;
weights[1] = bias_data_flattened;
op->load_model(ncnn::ModelBinFromMatArray(weights));
op->create_pipeline(opt);
op->forward(bottom_blob, top_blob, opt);
op->destroy_pipeline(opt);
delete op;
return 0;
} | movq (%rdi), %rax
addq -0x40(%rax), %rdi
jmp 0x28febe
| /csukuangfj[P]ncnn/src/layer/x86/convolutiondepthwise_x86.cpp |
ncnn::ConvolutionDepthWise_x86_avx512::create_group_ops(ncnn::Option const&) | int ConvolutionDepthWise_x86_avx512::create_group_ops(const Option& opt)
{
// create Convolution op for each group
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
for (int i = 0; i < (int)group_ops.size(); i++)
delete group_ops[i];
group_ops.clear();
const int channels_g = channels / group;
const int num_output_g = num_output / group;
group_ops.resize(group);
for (int g = 0; g < group; g++)
{
Mat weight_data_g = weight_data.range(maxk * channels_g * num_output_g * g, maxk * channels_g * num_output_g).clone();
Mat bias_data_g;
if (bias_term)
bias_data_g = bias_data.range(num_output_g * g, num_output_g);
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution);
// set param
ncnn::ParamDict pd;
pd.set(0, num_output_g); // num_output
pd.set(1, kernel_w);
pd.set(11, kernel_h);
pd.set(2, dilation_w);
pd.set(12, dilation_h);
pd.set(3, stride_w);
pd.set(13, stride_h);
pd.set(4, 0); // pad_w
pd.set(14, 0); // pad_h
pd.set(5, bias_term);
pd.set(6, maxk * channels_g * num_output_g); // weight_data_size
pd.set(8, int8_scale_term);
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
// set weights
if (bias_term)
{
ncnn::Mat weights[5];
weights[0] = weight_data_g;
weights[1] = bias_data_g;
#if NCNN_INT8
if (int8_scale_term)
{
Mat weight_data_int8_scales_g(num_output_g);
weight_data_int8_scales_g.fill(weight_data_int8_scales[g]);
weights[2] = weight_data_int8_scales_g;
weights[3] = bottom_blob_int8_scales.range(g, 1);
}
if (int8_scale_term > 100)
{
weights[4] = top_blob_int8_scales.range(g, 1);
}
#endif
op->load_model(ModelBinFromMatArray(weights));
}
else
{
ncnn::Mat weights[4];
weights[0] = weight_data_g;
#if NCNN_INT8
if (int8_scale_term)
{
Mat weight_data_int8_scales_g(num_output_g);
weight_data_int8_scales_g.fill(weight_data_int8_scales[g]);
weights[1] = weight_data_int8_scales_g;
weights[2] = bottom_blob_int8_scales.range(g, 1);
}
if (int8_scale_term > 100)
{
weights[3] = top_blob_int8_scales.range(g, 1);
}
#endif
op->load_model(ModelBinFromMatArray(weights));
}
op->create_pipeline(opt);
group_ops[g] = op;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x278, %rsp # imm = 0x278
movq %rsi, 0x250(%rsp)
movq %rdi, %r14
movq (%rdi), %rax
movq -0x18(%rax), %rdx
movl 0xd0(%rdi,%rdx), %ecx
movl 0xd8(%rdi,%rdx), %ebp
imull 0xd4(%rdi,%rdx), %ebp
movl 0x104(%rdi,%rdx), %eax
movl 0x108(%rdi,%rdx), %r15d
cltd
idivl %r15d
cltd
idivl %ebp
movl %eax, %esi
movl %ecx, %eax
cltd
idivl %r15d
movl %eax, %ecx
movl %esi, %eax
cltd
idivl %ecx
movl %eax, %ebx
leaq 0x10(%rdi), %rax
movq %rax, 0x1e0(%rsp)
xorl %r12d, %r12d
movq 0x10(%r14), %rax
movq 0x18(%r14), %rcx
movq %rcx, %rdx
subq %rax, %rdx
shrq $0x3, %rdx
movslq %edx, %rdx
cmpq %rdx, %r12
jge 0x29104f
movq (%rax,%r12,8), %rdi
testq %rdi, %rdi
je 0x29104a
movq (%rdi), %rax
callq *0x8(%rax)
incq %r12
jmp 0x291021
imull %r15d, %ebx
cmpq %rax, %rcx
je 0x29105c
movq %rax, 0x18(%r14)
movq (%r14), %rax
movq -0x18(%rax), %rcx
movslq 0x108(%r14,%rcx), %rsi
movl %ebx, %eax
cltd
idivl %esi
movl %eax, %ebx
movl 0xd0(%r14,%rcx), %eax
cltd
idivl %esi
movl %eax, %r15d
movq 0x1e0(%rsp), %rdi
callq 0x6fbc2
leaq 0x118(%r14), %rax
movq %rax, 0x258(%rsp)
imull %ebp, %ebx
imull %r15d, %ebx
movl %ebx, 0x6c(%rsp)
movslq %ebx, %rax
movq %rax, 0x260(%rsp)
movslq %r15d, %rax
movq %rax, 0x248(%rsp)
pushq $0x1
popq %rbx
xorl %edx, %edx
movl %r15d, 0x14(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
movslq 0x108(%r14,%rax), %rcx
cmpq %rcx, %rdx
jge 0x291dc6
movq %rdx, %rcx
movq %rdx, 0x8(%rsp)
movq 0x260(%rsp), %rdi
imulq %rdi, %rcx
movq 0x178(%r14,%rax), %rdx
imulq %rdx, %rcx
addq 0x168(%r14,%rax), %rcx
movl 0x180(%r14,%rax), %esi
movq 0x188(%r14,%rax), %rax
movq %rcx, 0x70(%rsp)
andq $0x0, 0x78(%rsp)
movq %rdx, 0x80(%rsp)
movl %esi, 0x88(%rsp)
movq %rax, 0x90(%rsp)
movl %ebx, 0x98(%rsp)
movl %edi, 0x9c(%rsp)
movabsq $0x100000001, %rax # imm = 0x100000001
movq %rax, 0xa0(%rsp)
movl %ebx, 0xa8(%rsp)
movq %rdi, 0xb0(%rsp)
leaq 0x200(%rsp), %rdi
leaq 0x70(%rsp), %rsi
xorl %edx, %edx
callq 0x624f0
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2911ad
lock
decl (%rax)
jne 0x2911ad
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2911a5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2911ad
movq %rsi, %rdi
callq 0x5f3e0
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x100(%r14,%rax)
je 0x29121d
movq 0x8(%rsp), %r15
movq 0x248(%rsp), %rcx
imulq %rcx, %r15
movq 0x1c0(%r14,%rax), %rsi
movq %rsi, 0x1f8(%rsp)
imulq %rsi, %r15
addq 0x1b0(%r14,%rax), %r15
movl 0x1c8(%r14,%rax), %edx
movl %edx, 0x1c(%rsp)
movq 0x1d0(%r14,%rax), %rax
movq %rax, 0x1f0(%rsp)
movl %ebx, %r13d
movl 0x14(%rsp), %eax
movl %eax, 0x18(%rsp)
movq %rcx, 0x1e8(%rsp)
jmp 0x291257
xorl %r15d, %r15d
movq $0x0, 0x1f8(%rsp)
movl $0x0, 0x1c(%rsp)
movq $0x0, 0x1f0(%rsp)
xorl %r13d, %r13d
movl $0x0, 0x18(%rsp)
movq $0x0, 0x1e8(%rsp)
pushq $0x6
popq %rdi
callq 0x782bf
movq %rax, %rbp
leaq 0x268(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq %r12, %rdi
xorl %esi, %esi
movl 0x14(%rsp), %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd4(%r14,%rax), %edx
movq %r12, %rdi
movl %ebx, %esi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd8(%r14,%rax), %edx
movq %r12, %rdi
pushq $0xb
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xdc(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x2
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe0(%r14,%rax), %edx
movq %r12, %rdi
pushq $0xc
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe4(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x3
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r14,%rax), %edx
movq %r12, %rdi
pushq $0xd
popq %rsi
callq 0x7193a
movq %r12, %rdi
pushq $0x4
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq %r12, %rdi
pushq $0xe
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x100(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x5
popq %rsi
callq 0x7193a
movq %r12, %rdi
pushq $0x6
popq %rsi
movl 0x6c(%rsp), %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x10c(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x8
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x110(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x9
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rdx
addq 0x258(%rsp), %rdx
movq %r12, %rdi
pushq $0xa
popq %rsi
callq 0x7196c
movq (%rbp), %rax
movq %rbp, %rdi
movq %r12, %rsi
callq *0x10(%rax)
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x100(%r14,%rax)
je 0x291444
pushq $0x40
popq %rax
vxorps %xmm0, %xmm0, %xmm0
andq $0x0, 0x70(%rsp,%rax)
vmovups %xmm0, 0x30(%rsp,%rax)
vmovups %xmm0, 0x3c(%rsp,%rax)
vmovups %xmm0, 0x50(%rsp,%rax)
vmovups %xmm0, 0x5c(%rsp,%rax)
addq $0x48, %rax
cmpq $0x1a8, %rax # imm = 0x1A8
jne 0x2913d2
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x29140c
lock
incl (%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2914c5
lock
decl (%rax)
jne 0x2914c5
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2914bd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2914c5
pushq $0x40
popq %rax
vxorps %xmm0, %xmm0, %xmm0
andq $0x0, 0x70(%rsp,%rax)
vmovups %xmm0, 0x30(%rsp,%rax)
vmovups %xmm0, 0x3c(%rsp,%rax)
vmovups %xmm0, 0x50(%rsp,%rax)
vmovups %xmm0, 0x5c(%rsp,%rax)
addq $0x48, %rax
cmpq $0x160, %rax # imm = 0x160
jne 0x29144b
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x291485
lock
incl (%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x29187c
lock
decl (%rax)
jne 0x29187c
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x291874
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29187c
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x200(%rsp), %xmm0
vmovaps %xmm0, 0x70(%rsp)
movq 0x210(%rsp), %rax
movq %rax, 0x80(%rsp)
movl 0x218(%rsp), %eax
movl %eax, 0x88(%rsp)
movq 0x220(%rsp), %rax
movq %rax, 0x90(%rsp)
vmovups 0x228(%rsp), %xmm0
vmovups %xmm0, 0x98(%rsp)
movl 0x238(%rsp), %eax
movl %eax, 0xa8(%rsp)
movq 0x240(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0xc0(%rsp), %rax
testq %rax, %rax
je 0x291569
lock
decl (%rax)
jne 0x291569
movq 0xb8(%rsp), %rsi
movq 0xd8(%rsp), %rdi
testq %rdi, %rdi
je 0x291561
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x291569
movq %rsi, %rdi
callq 0x5f3e0
movq %r15, 0xb8(%rsp)
andq $0x0, 0xc0(%rsp)
movq 0x1f8(%rsp), %rax
movq %rax, 0xc8(%rsp)
movl 0x1c(%rsp), %eax
movl %eax, 0xd0(%rsp)
movq 0x1f0(%rsp), %rax
movq %rax, 0xd8(%rsp)
movl %r13d, 0xe0(%rsp)
movl 0x18(%rsp), %eax
movl %eax, 0xe4(%rsp)
movl %r13d, 0xe8(%rsp)
movl %r13d, 0xec(%rsp)
movl %r13d, 0xf0(%rsp)
movq 0x1e8(%rsp), %rax
movq %rax, 0xf8(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x10c(%r14,%rax)
je 0x2917fe
andq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
leaq 0x40(%rsp), %rax
vmovups %xmm0, 0xc(%rax)
vmovaps %xmm0, (%rax)
leaq 0x20(%rsp), %rdi
movl 0x14(%rsp), %esi
pushq $0x4
popq %rdx
xorl %ecx, %ecx
callq 0x635fa
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x1f8(%r14,%rax), %rax
movq 0x8(%rsp), %rcx
vmovss (%rax,%rcx,4), %xmm0
movl 0x58(%rsp), %eax
imull 0x60(%rsp), %eax
movq 0x20(%rsp), %rcx
testl %eax, %eax
movl $0x0, %edx
cmovlel %edx, %eax
xorl %edx, %edx
cmpl %edx, %eax
je 0x29166e
vmovss %xmm0, (%rcx,%rdx,4)
incq %rdx
jmp 0x291660
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x29167b
lock
incl (%rax)
movq 0x108(%rsp), %rax
testq %rax, %rax
je 0x2916b2
lock
decl (%rax)
jne 0x2916b2
movq 0x100(%rsp), %rsi
movq 0x120(%rsp), %rdi
testq %rdi, %rdi
je 0x2916aa
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2916b2
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x20(%rsp), %xmm0
vmovaps %xmm0, 0x100(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0x110(%rsp)
movl 0x38(%rsp), %eax
movl %eax, 0x118(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0x120(%rsp)
vmovups 0x48(%rsp), %xmm0
vmovups %xmm0, 0x128(%rsp)
movl 0x58(%rsp), %eax
movl %eax, 0x138(%rsp)
movq 0x60(%rsp), %rax
movq %rax, 0x140(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x250(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x240(%r14,%rax), %r13
movl 0x258(%r14,%rax), %r12d
movq 0x260(%r14,%rax), %rbx
movq 0x150(%rsp), %rax
testq %rax, %rax
je 0x291774
lock
decl (%rax)
jne 0x291774
movq 0x148(%rsp), %rsi
movq 0x168(%rsp), %rdi
testq %rdi, %rdi
je 0x29176c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x291774
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x148(%rsp)
andq $0x0, 0x150(%rsp)
movq %r15, 0x158(%rsp)
movl %r12d, 0x160(%rsp)
movq %rbx, 0x168(%rsp)
vbroadcastss 0x166e5a(%rip), %xmm0 # 0x3f8600
vmovaps %xmm0, 0x170(%rsp)
movl $0x1, 0x180(%rsp)
movq $0x1, 0x188(%rsp)
movq 0x28(%rsp), %rax
testq %rax, %rax
pushq $0x1
popq %rbx
je 0x2917f7
lock
decl (%rax)
jne 0x2917f7
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x2917ef
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2917f7
movq %rsi, %rdi
callq 0x5f3e0
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x65, 0x10c(%r14,%rax)
jl 0x291a1e
movq 0x298(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x288(%r14,%rax), %r13
movl 0x2a0(%r14,%rax), %ebx
movq 0x2a8(%r14,%rax), %r12
movq 0x198(%rsp), %rax
testq %rax, %rax
je 0x2919ca
lock
decl (%rax)
jne 0x2919ca
movq 0x190(%rsp), %rsi
movq 0x1b0(%rsp), %rdi
testq %rdi, %rdi
je 0x2919c2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2919ca
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x200(%rsp), %xmm0
vmovaps %xmm0, 0x70(%rsp)
movq 0x210(%rsp), %rax
movq %rax, 0x80(%rsp)
movl 0x218(%rsp), %eax
movl %eax, 0x88(%rsp)
movq 0x220(%rsp), %rax
movq %rax, 0x90(%rsp)
vmovups 0x228(%rsp), %xmm0
vmovups %xmm0, 0x98(%rsp)
movl 0x238(%rsp), %eax
movl %eax, 0xa8(%rsp)
movq 0x240(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x10c(%r14,%rax)
je 0x291c01
andq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
leaq 0x40(%rsp), %rax
vmovups %xmm0, 0xc(%rax)
vmovaps %xmm0, (%rax)
leaq 0x20(%rsp), %rdi
movl 0x14(%rsp), %esi
pushq $0x4
popq %rdx
xorl %ecx, %ecx
callq 0x635fa
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x1f8(%r14,%rax), %rax
movq 0x8(%rsp), %rcx
vmovss (%rax,%rcx,4), %xmm0
movl 0x58(%rsp), %eax
imull 0x60(%rsp), %eax
movq 0x20(%rsp), %rcx
testl %eax, %eax
movl $0x0, %edx
cmovlel %edx, %eax
xorl %edx, %edx
cmpl %edx, %eax
je 0x291977
vmovss %xmm0, (%rcx,%rdx,4)
incq %rdx
jmp 0x291969
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x291984
lock
incl (%rax)
movq 0xc0(%rsp), %rax
testq %rax, %rax
je 0x291ab6
lock
decl (%rax)
jne 0x291ab6
movq 0xb8(%rsp), %rsi
movq 0xd8(%rsp), %rdi
testq %rdi, %rdi
je 0x291aae
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x291ab6
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x190(%rsp)
andq $0x0, 0x198(%rsp)
movq %r15, 0x1a0(%rsp)
movl %ebx, 0x1a8(%rsp)
movq %r12, 0x1b0(%rsp)
vbroadcastss 0x166c05(%rip), %xmm0 # 0x3f8600
vmovups %xmm0, 0x1b8(%rsp)
movl $0x1, 0x1c8(%rsp)
movq $0x1, 0x1d0(%rsp)
pushq $0x1
popq %rbx
leaq 0x20(%rsp), %r15
movq %r15, %rdi
leaq 0x70(%rsp), %rsi
callq 0x6b00e
movq (%rbp), %rax
movq %rbp, %rdi
movq %r15, %rsi
callq *0x18(%rax)
movq %r15, %rdi
callq 0x6b03a
movl $0x120, %r15d # imm = 0x120
movq 0x78(%rsp,%r15), %rax
testq %rax, %rax
je 0x291a7c
lock
decl (%rax)
jne 0x291a7c
movq 0x70(%rsp,%r15), %rsi
movq 0x90(%rsp,%r15), %rdi
testq %rdi, %rdi
je 0x291a74
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x291a7c
movq %rsi, %rdi
callq 0x5f3e0
leaq (%rsp,%r15), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r15
cmpq $-0x48, %r15
jne 0x291a4b
jmp 0x291d4f
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x20(%rsp), %xmm0
vmovups %xmm0, 0xb8(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0xc8(%rsp)
movl 0x38(%rsp), %eax
movl %eax, 0xd0(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0xd8(%rsp)
vmovups 0x48(%rsp), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
movl 0x58(%rsp), %eax
movl %eax, 0xf0(%rsp)
movq 0x60(%rsp), %rax
movq %rax, 0xf8(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x250(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x240(%r14,%rax), %r13
movl 0x258(%r14,%rax), %ebx
movq 0x260(%r14,%rax), %r12
movq 0x108(%rsp), %rax
testq %rax, %rax
je 0x291b78
lock
decl (%rax)
jne 0x291b78
movq 0x100(%rsp), %rsi
movq 0x120(%rsp), %rdi
testq %rdi, %rdi
je 0x291b70
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x291b78
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x100(%rsp)
andq $0x0, 0x108(%rsp)
movq %r15, 0x110(%rsp)
movl %ebx, 0x118(%rsp)
movq %r12, 0x120(%rsp)
vbroadcastss 0x166a57(%rip), %xmm0 # 0x3f8600
vmovups %xmm0, 0x128(%rsp)
movl $0x1, 0x138(%rsp)
movq $0x1, 0x140(%rsp)
movq 0x28(%rsp), %rax
testq %rax, %rax
pushq $0x1
popq %rbx
je 0x291bfa
lock
decl (%rax)
jne 0x291bfa
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x291bf2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x291bfa
movq %rsi, %rdi
callq 0x5f3e0
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x65, 0x10c(%r14,%rax)
jl 0x291cc4
movq 0x298(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x288(%r14,%rax), %r13
movl 0x2a0(%r14,%rax), %ebx
movq 0x2a8(%r14,%rax), %r12
movq 0x150(%rsp), %rax
testq %rax, %rax
je 0x291c70
lock
decl (%rax)
jne 0x291c70
movq 0x148(%rsp), %rsi
movq 0x168(%rsp), %rdi
testq %rdi, %rdi
je 0x291c68
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x291c70
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x148(%rsp)
andq $0x0, 0x150(%rsp)
movq %r15, 0x158(%rsp)
movl %ebx, 0x160(%rsp)
movq %r12, 0x168(%rsp)
vbroadcastss 0x16695f(%rip), %xmm0 # 0x3f8600
vmovaps %xmm0, 0x170(%rsp)
movl $0x1, 0x180(%rsp)
movq $0x1, 0x188(%rsp)
pushq $0x1
popq %rbx
leaq 0x20(%rsp), %r15
movq %r15, %rdi
leaq 0x70(%rsp), %rsi
callq 0x6b00e
movq (%rbp), %rax
movq %rbp, %rdi
movq %r15, %rsi
callq *0x18(%rax)
movq %r15, %rdi
callq 0x6b03a
movl $0xd8, %r15d
movq 0x78(%rsp,%r15), %rax
testq %rax, %rax
je 0x291d22
lock
decl (%rax)
jne 0x291d22
movq 0x70(%rsp,%r15), %rsi
movq 0x90(%rsp,%r15), %rdi
testq %rdi, %rdi
je 0x291d1a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x291d22
movq %rsi, %rdi
callq 0x5f3e0
leaq (%rsp,%r15), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r15
cmpq $-0x48, %r15
jne 0x291cf1
movq (%rbp), %rax
movq %rbp, %rdi
movq 0x250(%rsp), %rsi
callq *0x20(%rax)
leaq 0x268(%rsp), %rdi
movq 0x1e0(%rsp), %rax
movq (%rax), %rax
movq 0x8(%rsp), %rcx
movq %rbp, (%rax,%rcx,8)
callq 0x71614
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x291db9
lock
decl (%rax)
jne 0x291db9
movq 0x200(%rsp), %rsi
movq 0x220(%rsp), %rdi
testq %rdi, %rdi
je 0x291db1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x291db9
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rdx
incq %rdx
jmp 0x2910c7
xorl %eax, %eax
addq $0x278, %rsp # imm = 0x278
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x291e8e
jmp 0x292025
jmp 0x291de6
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x291ea0
lock
decl (%rax)
jne 0x291ea0
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x291e1c
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x291ea0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x291ea0
jmp 0x292025
jmp 0x291f18
jmp 0x291e8e
jmp 0x291e32
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x291f2a
lock
decl (%rax)
jne 0x291f2a
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x291e68
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x291f2a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x291f2a
jmp 0x292025
jmp 0x292025
jmp 0x292025
jmp 0x292025
jmp 0x291e8e
jmp 0x291f18
movq %rax, %rbx
jmp 0x291ea0
movq %rax, %rbx
leaq 0x20(%rsp), %rdi
callq 0x6b03a
movl $0xd8, %r14d
vxorps %xmm0, %xmm0, %xmm0
movq 0x78(%rsp,%r14), %rax
testq %rax, %rax
je 0x291ee3
lock
decl (%rax)
jne 0x291ee3
movq 0x70(%rsp,%r14), %rsi
movq 0x90(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x291ed7
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x291ee3
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x291eaa
jmp 0x291fd9
jmp 0x292025
jmp 0x291f18
movq %rax, %rbx
jmp 0x291f2a
movq %rax, %rbx
leaq 0x20(%rsp), %rdi
callq 0x6b03a
movl $0x120, %r14d # imm = 0x120
vxorps %xmm0, %xmm0, %xmm0
movq 0x78(%rsp,%r14), %rax
testq %rax, %rax
je 0x291f6d
lock
decl (%rax)
jne 0x291f6d
movq 0x70(%rsp,%r14), %rsi
movq 0x90(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x291f61
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x291f6d
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x291f34
jmp 0x291fd9
jmp 0x292025
jmp 0x291fcd
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x29201d
lock
decl (%rax)
jne 0x29201d
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x29200d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29201d
jmp 0x292025
movq %rax, %rbx
jmp 0x291fe6
jmp 0x292025
jmp 0x292025
movq %rax, %rbx
leaq 0x268(%rsp), %rdi
callq 0x71614
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x29201d
lock
decl (%rax)
jne 0x29201d
movq 0x200(%rsp), %rsi
movq 0x220(%rsp), %rdi
testq %rdi, %rdi
jne 0x292017
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x29201d
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx512.cpp |
ncnn::ConvolutionDepthWise_x86_avx512::destroy_pipeline(ncnn::Option const&) | int ConvolutionDepthWise_x86_avx512::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
for (int i = 0; i < (int)group_ops.size(); i++)
{
group_ops[i]->destroy_pipeline(opt);
delete group_ops[i];
}
group_ops.clear();
return 0;
} | pushq %r15
pushq %r14
pushq %rbx
movq %rsi, %r14
movq %rdi, %rbx
movq 0x8(%rdi), %rdi
testq %rdi, %rdi
je 0x292071
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x29206c
movq (%rdi), %rax
callq *0x8(%rax)
andq $0x0, 0x8(%rbx)
xorl %r15d, %r15d
movq 0x10(%rbx), %rax
movq 0x18(%rbx), %rcx
movq %rcx, %rdx
subq %rax, %rdx
shrq $0x3, %rdx
movslq %edx, %rdx
cmpq %rdx, %r15
jge 0x2920b3
movq (%rax,%r15,8), %rdi
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x10(%rbx), %rax
movq (%rax,%r15,8), %rdi
testq %rdi, %rdi
je 0x2920ae
movq (%rdi), %rax
callq *0x8(%rax)
incq %r15
jmp 0x292074
cmpq %rax, %rcx
je 0x2920bc
movq %rax, 0x18(%rbx)
xorl %eax, %eax
popq %rbx
popq %r14
popq %r15
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx512.cpp |
virtual thunk to ncnn::ConvolutionDepthWise_x86_avx512::destroy_pipeline(ncnn::Option const&) | int ConvolutionDepthWise_x86_avx512::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
for (int i = 0; i < (int)group_ops.size(); i++)
{
group_ops[i]->destroy_pipeline(opt);
delete group_ops[i];
}
group_ops.clear();
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x38(%rax), %rdi
callq 0x292040
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx512.cpp |
ncnn::ConvolutionDepthWise_x86_avx512::forward_int8_x86(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int ConvolutionDepthWise_x86_avx512::forward_int8_x86(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int elempack = bottom_blob.elempack;
int elembits = bottom_blob.elembits();
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
Mat bottom_blob_int8 = bottom_blob;
if (elembits != 8)
{
const int channels_g = channels * elempack / group;
Mat scales(channels * elempack);
{
float* ps = scales;
for (int g = 0; g < group; g++)
{
float scale = bottom_blob_int8_scales[g];
for (int q = 0; q < channels_g; q++)
{
*ps++ = scale;
}
}
}
Option opt_q = opt;
opt_q.blob_allocator = opt.workspace_allocator;
quantize_to_int8(bottom_blob, bottom_blob_int8, scales, opt_q);
}
Mat bottom_blob_bordered;
make_padding(bottom_blob_int8, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
channels = bottom_blob_bordered.c;
elempack = bottom_blob_bordered.elempack;
int outw = (w - kernel_extent_w) / stride_w + 1;
int outh = (h - kernel_extent_h) / stride_h + 1;
// depth-wise
if (channels * elempack == group && group == num_output)
{
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
out_elempack = num_output % 8 == 0 ? 8 : 1;
}
#endif // __SSE2__
bool use_int8_requantize = int8_scale_term > 100;
size_t out_elemsize = use_int8_requantize ? 1u * out_elempack : 4u * out_elempack;
top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#if __SSE2__
if (elempack == 8)
{
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
signed char* outptr_s8 = top_blob.channel(g);
float* outptr_f32 = top_blob.channel(g);
const signed char* kptr = (const signed char*)weight_data_tm + maxk * g * 8;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w * 8;
for (int k = 0; k < maxk; k++)
{
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _val = _mm_loadl_epi64((const __m128i*)(sptr + space_ofs[k] * 8));
_val = _mm_unpacklo_epi8(_val, _mm_cmpgt_epi8(_mm_setzero_si128(), _val));
__m128i _w = _mm_loadl_epi64((const __m128i*)(kptr + k * 8));
_w = _mm_unpacklo_epi8(_w, _mm_cmpgt_epi8(_mm_setzero_si128(), _w));
__m128i _sl = _mm_mullo_epi16(_val, _w);
__m128i _sh = _mm_mulhi_epi16(_val, _w);
__m128i _s0 = _mm_unpacklo_epi16(_sl, _sh);
__m128i _s1 = _mm_unpackhi_epi16(_sl, _sh);
_sum0 = _mm_add_epi32(_sum0, _s0);
_sum1 = _mm_add_epi32(_sum1, _s1);
}
__m128 _scale_in0;
__m128 _scale_in1;
{
__m128 _bottom_blob_int8_scales0 = _mm_loadu_ps((const float*)bottom_blob_int8_scales + g * 8);
__m128 _bottom_blob_int8_scales1 = _mm_loadu_ps((const float*)bottom_blob_int8_scales + g * 8 + 4);
__m128 _weight_data_int8_scales0 = _mm_loadu_ps((const float*)weight_data_int8_scales + g * 8);
__m128 _weight_data_int8_scales1 = _mm_loadu_ps((const float*)weight_data_int8_scales + g * 8 + 4);
_scale_in0 = _mm_rcp_ps(_mm_mul_ps(_bottom_blob_int8_scales0, _weight_data_int8_scales0));
_scale_in1 = _mm_rcp_ps(_mm_mul_ps(_bottom_blob_int8_scales1, _weight_data_int8_scales1));
__m128 _m0 = _mm_cmpneq_ps(_weight_data_int8_scales0, _mm_setzero_ps());
__m128 _m1 = _mm_cmpneq_ps(_weight_data_int8_scales1, _mm_setzero_ps());
_scale_in0 = _mm_and_ps(_scale_in0, _m0);
_scale_in1 = _mm_and_ps(_scale_in1, _m1);
}
__m128 _sumfp32_0 = _mm_mul_ps(_mm_cvtepi32_ps(_sum0), _scale_in0);
__m128 _sumfp32_1 = _mm_mul_ps(_mm_cvtepi32_ps(_sum1), _scale_in1);
if (bias_term)
{
__m128 _bias0 = _mm_loadu_ps((const float*)bias_data + g * 8);
__m128 _bias1 = _mm_loadu_ps((const float*)bias_data + g * 8 + 4);
_sumfp32_0 = _mm_add_ps(_sumfp32_0, _bias0);
_sumfp32_1 = _mm_add_ps(_sumfp32_1, _bias1);
}
_sumfp32_0 = activation_sse(_sumfp32_0, activation_type, activation_params);
_sumfp32_1 = activation_sse(_sumfp32_1, activation_type, activation_params);
if (use_int8_requantize)
{
// requantize and relu
__m128 _scale_out0 = _mm_loadu_ps((const float*)top_blob_int8_scales + g * 8);
__m128 _scale_out1 = _mm_loadu_ps((const float*)top_blob_int8_scales + g * 8 + 4);
_sumfp32_0 = _mm_mul_ps(_sumfp32_0, _scale_out0);
_sumfp32_1 = _mm_mul_ps(_sumfp32_1, _scale_out1);
int64_t _sum8 = float2int8_sse(_sumfp32_0, _sumfp32_1);
*(int64_t*)outptr_s8 = _sum8;
outptr_s8 += 8;
}
else
{
// dequantize and relu
_mm_storeu_ps(outptr_f32, _sumfp32_0);
_mm_storeu_ps(outptr_f32 + 4, _sumfp32_1);
outptr_f32 += 8;
}
}
}
}
}
}
#endif // __SSE2__
if (elempack == 1)
{
if (kernel_w == 3 && kernel_h == 3 && stride_w == 1 && stride_h == 1 && dilation_w == 1 && dilation_h == 1 && (activation_type == 0 || activation_type == 1))
{
if (use_int8_requantize)
{
std::vector<float> requantize_scales;
for (int g = 0; g < group; g++)
{
float scale_in;
if (weight_data_int8_scales[g] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
float scale_out = top_blob_int8_scales[g];
requantize_scales.push_back(scale_in);
requantize_scales.push_back(scale_out);
}
convdw3x3s1_int8_requant_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, requantize_scales, opt);
}
else
{
std::vector<float> dequantize_scales;
for (int g = 0; g < group; g++)
{
float top_rescale = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
dequantize_scales.push_back(top_rescale);
}
convdw3x3s1_int8_dequant_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, dequantize_scales, opt);
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
}
else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2 && (activation_type == 0 || activation_type == 1))
{
if (use_int8_requantize)
{
std::vector<float> requantize_scales;
for (int g = 0; g < group; g++)
{
float scale_in;
if (weight_data_int8_scales[g] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
float scale_out = top_blob_int8_scales[g];
requantize_scales.push_back(scale_in);
requantize_scales.push_back(scale_out);
}
convdw3x3s2_int8_requant_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, requantize_scales, opt);
}
else
{
std::vector<float> dequantize_scales;
for (int g = 0; g < group; g++)
{
float top_rescale = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
dequantize_scales.push_back(top_rescale);
}
convdw3x3s2_int8_dequant_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, dequantize_scales, opt);
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
signed char* outptr_s8 = top_blob.channel(g);
float* outptr_f32 = top_blob.channel(g);
const signed char* kptr = (const signed char*)weight_data_tm + maxk * g;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
int sum = 0;
const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++)
{
signed char val = sptr[space_ofs[k]];
signed char w = kptr[k];
sum += val * w;
}
float scale_in;
if (weight_data_int8_scales[g] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
float sumfp32 = sum * scale_in;
if (bias_term)
sumfp32 += bias_data[g];
sumfp32 = activation_ss(sumfp32, activation_type, activation_params);
if (use_int8_requantize)
{
// requantize
float scale_out = top_blob_int8_scales[g];
signed char sums8 = float2int8(sumfp32 * scale_out);
outptr_s8[0] = sums8;
outptr_s8 += 1;
}
else
{
// dequantize
outptr_f32[0] = sumfp32;
outptr_f32 += 1;
}
}
}
}
}
}
return 0;
}
bool use_int8_requantize = int8_scale_term > 100;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
if (use_int8_requantize)
out_elempack = num_output % 8 == 0 ? 8 : 1;
else
out_elempack = num_output % 4 == 0 ? 4 : 1;
}
#endif // __SSE2__
size_t out_elemsize = use_int8_requantize ? 1u * out_elempack : 4u * out_elempack;
top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
// group convolution
const int channels_g = channels * elempack / group;
const int num_output_g = num_output / group;
int g_elempack = 1;
int out_g_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
g_elempack = channels_g % 8 == 0 ? 8 : 1;
if (use_int8_requantize)
out_g_elempack = num_output_g % 8 == 0 ? 8 : 1;
else
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
}
#endif // __SSE2__
// unpacking
Mat bottom_blob_bordered_unpacked = bottom_blob_bordered;
if (elempack > g_elempack)
{
Option opt_p = opt;
opt_p.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob_bordered, bottom_blob_bordered_unpacked, g_elempack, opt_p);
}
Mat top_blob_unpacked = top_blob;
if (out_g_elempack < out_elempack)
{
top_blob_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
if (top_blob_unpacked.empty())
return -100;
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
const Mat bottom_blob_bordered_g = bottom_blob_bordered_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
Mat top_blob_g = top_blob_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
const ncnn::Layer* op = group_ops[g];
Option opt_g = opt;
opt_g.blob_allocator = top_blob_unpacked.allocator;
// forward
op->forward(bottom_blob_bordered_g, top_blob_g, opt_g);
}
// packing
if (out_g_elempack < out_elempack)
{
convert_packing(top_blob_unpacked, top_blob, out_elempack, opt);
}
else
{
top_blob = top_blob_unpacked;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x318, %rsp # imm = 0x318
movq %rcx, 0x30(%rsp)
movq %rdx, 0x20(%rsp)
movq %rsi, %r15
movl 0x38(%rsi), %ecx
movl 0x18(%rsi), %r12d
movq 0x10(%rsi), %rsi
testl %r12d, %r12d
je 0x2975b9
leal (,%rsi,8), %eax
cltd
idivl %r12d
cmpl $0x8, %eax
sete %dl
jmp 0x2975bb
xorl %edx, %edx
movq (%rdi), %rax
movq -0x18(%rax), %r9
movq %rdi, %r8
movl 0xd4(%rdi,%r9), %ebp
movl 0xd8(%rdi,%r9), %ebx
decl %ebp
imull 0xdc(%rdi,%r9), %ebp
decl %ebx
imull 0xe0(%rdi,%r9), %ebx
movq 0x8(%r15), %rdi
vmovups (%r15), %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
movq %rsi, 0x1f0(%rsp)
movl %r12d, 0x1f8(%rsp)
movq 0x20(%r15), %rsi
movq %rsi, 0x200(%rsp)
vmovdqu 0x28(%r15), %xmm0
vmovdqu %xmm0, 0x208(%rsp)
movl %ecx, 0x218(%rsp)
movq 0x40(%r15), %rsi
movq %rsi, 0x220(%rsp)
testq %rdi, %rdi
je 0x297646
lock
incl (%rdi)
movq (%r8), %rax
movq %r8, 0x10(%rsp)
testb %dl, %dl
je 0x29765e
movq 0x20(%rsp), %r15
movq 0x10(%rsp), %rcx
jmp 0x297770
imull %ecx, %r12d
movq -0x18(%rax), %rax
movq 0x10(%rsp), %r13
movl 0x108(%r13,%rax), %eax
movl %eax, 0x18(%rsp)
leaq 0x70(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
xorl %r14d, %r14d
pushq $0x4
popq %rdx
movl %r12d, %esi
xorl %ecx, %ecx
callq 0x635fa
movl %r12d, %eax
cltd
idivl 0x18(%rsp)
movq 0x70(%rsp), %rcx
movq (%r13), %rdx
testl %eax, %eax
cmovlel %r14d, %eax
movq 0x10(%rsp), %r8
movq -0x18(%rdx), %rsi
movslq 0x108(%r8,%rsi), %rdi
cmpq %rdi, %r14
jge 0x2976f9
movq 0x240(%r8,%rsi), %rsi
vmovd (%rsi,%r14,4), %xmm0
movl %eax, %esi
subl $0x1, %esi
jb 0x2976f4
vmovd %xmm0, (%rcx)
addq $0x4, %rcx
jmp 0x2976e5
incq %r14
jmp 0x2976c4
movq 0x30(%rsp), %rax
vmovups (%rax), %zmm0
leaq 0x130(%rsp), %rcx
vmovups %zmm0, (%rcx)
movq 0x10(%rax), %rax
movq %rax, 0x8(%rcx)
leaq 0x1e0(%rsp), %rsi
leaq 0x70(%rsp), %rdx
movq %r15, %rdi
vzeroupper
callq 0x652e3
movq 0x78(%rsp), %rax
testq %rax, %rax
movq 0x20(%rsp), %r15
je 0x297768
lock
decl (%rax)
jne 0x297768
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x297760
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x297768
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rcx
movq (%rcx), %rax
leaq 0x70(%rsp), %rdx
andq $0x0, 0x40(%rdx)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdx)
vmovdqu %xmm0, 0xc(%rdx)
vmovdqa %xmm0, 0x20(%rdx)
vmovdqu %xmm0, 0x2c(%rdx)
movq -0x18(%rax), %rdi
addq %rcx, %rdi
leaq 0x1e0(%rsp), %rsi
movq 0x30(%rsp), %rcx
callq 0x287daa
pushq $-0x64
popq %rax
cmpq $0x0, 0x70(%rsp)
movl %eax, 0x48(%rsp)
je 0x298fc6
movslq 0xa8(%rsp), %r14
movq 0xb0(%rsp), %rax
imulq %r14, %rax
testq %rax, %rax
je 0x298fc6
notl %ebp
movl 0x88(%rsp), %r13d
movl 0x9c(%rsp), %r12d
addl %r12d, %ebp
movq 0x10(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rcx
movl %ebp, %eax
movq %rdx, %rbp
cltd
idivl 0xe4(%rbp,%rcx)
movl %eax, %esi
notl %ebx
addl 0xa0(%rsp), %ebx
movl %ebx, %eax
cltd
idivl 0xe8(%rbp,%rcx)
movl %eax, %edx
movq %rsi, 0x18(%rsp)
incl %esi
movq %rdx, 0x60(%rsp)
leal 0x1(%rdx), %r10d
movl %r13d, %ebx
imull %r14d, %ebx
pushq $0x8
popq %r11
cmpl 0x108(%rbp,%rcx), %ebx
jne 0x297980
cmpl 0xd0(%rbp,%rcx), %ebx
jne 0x297980
movq 0x30(%rsp), %rdi
cmpb $0x0, 0x27(%rdi)
pushq $0x1
popq %rax
cmovel %eax, %r11d
testb $0x7, %bl
cmovnel %eax, %r11d
movl 0x10c(%rbp,%rcx), %eax
leal (,%r11,4), %r8d
movl %eax, 0x38(%rsp)
cmpl $0x65, %eax
cmovgel %r11d, %r8d
movl %ebx, %eax
cltd
idivl %r11d
movq 0x8(%rdi), %rcx
movq %rcx, (%rsp)
movq %r15, %rdi
movl %r10d, %edx
movl %eax, %ecx
movl %r11d, %r9d
callq 0x628f2
cmpq $0x0, (%r15)
je 0x298fc6
movslq 0x38(%r15), %rax
imulq 0x40(%r15), %rax
testq %rax, %rax
je 0x298fc6
cmpl $0x1, %r13d
je 0x297f2e
movl $0x0, 0x48(%rsp)
cmpl $0x8, %r13d
jne 0x298fc6
movq (%rbp), %rax
movq -0x18(%rax), %rax
movslq 0xd4(%rbp,%rax), %rcx
movslq 0xd8(%rbp,%rax), %r15
imulq %rcx, %r15
leaq 0x130(%rsp), %rdi
leaq 0xe0(%rsp), %rdx
movq %r15, %rsi
callq 0x73bbe
movq (%rbp), %rcx
movq -0x18(%rcx), %rdx
imull 0xe0(%rbp,%rdx), %r12d
movq 0x130(%rsp), %rax
movl 0xdc(%rbp,%rdx), %esi
imull 0xd4(%rbp,%rdx), %esi
subl %esi, %r12d
xorl %esi, %esi
xorl %edi, %edi
xorl %r8d, %r8d
cmpl 0xd8(%rbp,%rdx), %esi
jge 0x298067
movslq %r8d, %r8
leaq (%rax,%r8,4), %r10
xorl %r9d, %r9d
cmpl 0xd4(%rbp,%rdx), %r9d
jge 0x297976
movl %edi, (%r10,%r9,4)
movq -0x18(%rcx), %rdx
addl 0xdc(%rbp,%rdx), %edi
incq %r9
jmp 0x297958
addl %r12d, %edi
incl %esi
addq %r9, %r8
jmp 0x297941
movl 0xd0(%rbp,%rcx), %eax
movl 0x10c(%rbp,%rcx), %r14d
movq 0x30(%rsp), %rcx
cmpb $0x1, 0x27(%rcx)
jne 0x2979ac
cmpl $0x65, %r14d
jl 0x2979b2
testb $0x7, %al
pushq $0x1
popq %r9
cmovel %r11d, %r9d
jmp 0x2979c0
pushq $0x1
popq %r9
jmp 0x2979c0
xorl %ecx, %ecx
testb $0x3, %al
sete %cl
leal (%rcx,%rcx,2), %r9d
incl %r9d
leal (,%r9,4), %r12d
cmpl $0x65, %r14d
cmovgel %r9d, %r12d
cltd
idivl %r9d
movq 0x30(%rsp), %rcx
movq 0x8(%rcx), %rcx
movq %rcx, (%rsp)
movq %r15, %rdi
movl %esi, 0x28(%rsp)
movl %r10d, 0x40(%rsp)
movl %r10d, %edx
movl %eax, %ecx
movq %r12, %r8
movq %r9, 0x38(%rsp)
callq 0x628f2
cmpq $0x0, (%r15)
je 0x298fc6
movslq 0x38(%r15), %rax
imulq 0x40(%r15), %rax
testq %rax, %rax
je 0x298fc6
movq (%rbp), %rax
movq -0x18(%rax), %rax
movl 0xd0(%rbp,%rax), %ecx
movl 0x108(%rbp,%rax), %esi
movl %ebx, %eax
cltd
idivl %esi
movl %eax, %edi
movl %ecx, %eax
cltd
idivl %esi
movl %eax, %ebp
movq 0x30(%rsp), %rax
cmpb $0x1, 0x27(%rax)
jne 0x297a73
testb $0x7, %dil
pushq $0x1
popq %rbx
movl %ebx, %r15d
pushq $0x8
popq %rax
cmovel %eax, %r15d
cmpl $0x65, %r14d
movl 0x28(%rsp), %esi
movl 0x40(%rsp), %r9d
jl 0x297a85
testb $0x7, %bpl
cmovel %eax, %ebx
jmp 0x297a93
pushq $0x1
popq %r15
movl %r15d, %ebx
movl 0x28(%rsp), %esi
movl 0x40(%rsp), %r9d
jmp 0x297a93
xorl %eax, %eax
testb $0x3, %bpl
sete %al
leal (%rax,%rax,2), %ebx
incl %ebx
movq 0x78(%rsp), %rax
vmovaps 0x70(%rsp), %xmm0
vmovaps %xmm0, 0x130(%rsp)
movq 0x80(%rsp), %rcx
movq %rcx, 0x140(%rsp)
movl 0x88(%rsp), %ecx
movl %ecx, 0x148(%rsp)
movq 0x90(%rsp), %rcx
movq %rcx, 0x150(%rsp)
vmovups 0x98(%rsp), %xmm0
vmovups %xmm0, 0x158(%rsp)
movl 0xa8(%rsp), %ecx
movl %ecx, 0x168(%rsp)
movq 0xb0(%rsp), %rcx
movq %rcx, 0x170(%rsp)
testq %rax, %rax
je 0x297b0d
lock
incl (%rax)
cmpl %r15d, %r13d
movl %edi, 0x18(%rsp)
jle 0x297b5c
movq 0x30(%rsp), %rax
vmovups (%rax), %zmm0
leaq 0xe0(%rsp), %rcx
vmovups %zmm0, (%rcx)
movq 0x10(%rax), %rax
movq %rax, 0x8(%rcx)
leaq 0x70(%rsp), %rdi
leaq 0x130(%rsp), %rsi
movl %r15d, %edx
vzeroupper
callq 0x64e3b
movl 0x18(%rsp), %edi
movl 0x28(%rsp), %esi
movl 0x40(%rsp), %r9d
movq 0x20(%rsp), %rdx
movq 0x8(%rdx), %rax
vmovups (%rdx), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
movq 0x10(%rdx), %rcx
movq %rcx, 0xf0(%rsp)
movl 0x18(%rdx), %ecx
movl %ecx, 0xf8(%rsp)
movq 0x20(%rdx), %rcx
movq %rcx, 0x100(%rsp)
vmovups 0x28(%rdx), %xmm0
vmovups %xmm0, 0x108(%rsp)
movl 0x38(%rdx), %ecx
movl %ecx, 0x118(%rsp)
movq 0x40(%rdx), %rcx
movq %rcx, 0x120(%rsp)
testq %rax, %rax
je 0x297bc0
lock
incl (%rax)
cmpl 0x38(%rsp), %ebx
jae 0x297c3f
movq 0x10(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0xd0(%rcx,%rax), %eax
cltd
idivl %ebx
movl %eax, %ecx
movzbl %r12b, %eax
divb 0x38(%rsp)
movzbl %al, %r8d
imull %ebx, %r8d
movq 0x30(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
leaq 0xe0(%rsp), %rdi
movl %r9d, %edx
movl %ebx, %r9d
callq 0x628f2
movl 0x18(%rsp), %edi
pushq $-0x64
popq %r14
cmpq $0x0, 0xe0(%rsp)
je 0x298f53
movslq 0x118(%rsp), %rax
imulq 0x120(%rsp), %rax
testq %rax, %rax
je 0x298f53
xorl %r12d, %r12d
xorl %r13d, %r13d
xorl %r14d, %r14d
movq 0x10(%rsp), %r8
movq (%r8), %rax
movq -0x18(%rax), %rax
movslq 0x108(%r8,%rax), %rax
cmpq %rax, %r14
jge 0x297eaa
movl %r12d, %eax
cltd
idivl %r15d
movl %eax, %ecx
movl %edi, %eax
cltd
idivl %r15d
movslq %ecx, %rdx
imulq 0x170(%rsp), %rdx
movq 0x140(%rsp), %rcx
imulq %rcx, %rdx
addq 0x130(%rsp), %rdx
movq %rdx, 0x190(%rsp)
andq $0x0, 0x198(%rsp)
movl 0x148(%rsp), %edx
movq 0x150(%rsp), %rsi
movq %rcx, 0x1a0(%rsp)
movl %edx, 0x1a8(%rsp)
movq %rsi, 0x1b0(%rsp)
movl %eax, 0x1c8(%rsp)
vmovups 0x158(%rsp), %xmm0
movslq 0x164(%rsp), %rax
movslq 0x15c(%rsp), %rdx
movslq 0x160(%rsp), %rsi
imulq %rdx, %rsi
imulq %rcx, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x1d0(%rsp)
vmovups %xmm0, 0x1b8(%rsp)
movl %r13d, %eax
cltd
idivl %ebx
movl %eax, %ecx
movl %ebp, %eax
cltd
idivl %ebx
movslq %ecx, %rdx
imulq 0x120(%rsp), %rdx
movq 0xf0(%rsp), %rcx
imulq %rcx, %rdx
addq 0xe0(%rsp), %rdx
movq %rdx, 0x228(%rsp)
andq $0x0, 0x230(%rsp)
movl 0xf8(%rsp), %edx
movq 0x100(%rsp), %rsi
movq %rcx, 0x238(%rsp)
movl %edx, 0x240(%rsp)
movq %rsi, 0x248(%rsp)
movl %eax, 0x260(%rsp)
vmovups 0x108(%rsp), %xmm0
movslq 0x114(%rsp), %rax
movslq 0x10c(%rsp), %rdx
movslq 0x110(%rsp), %rdi
imulq %rdx, %rdi
imulq %rcx, %rax
imulq %rdi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x268(%rsp)
vmovups %xmm0, 0x250(%rsp)
movq 0x10(%r8), %rax
movq (%rax,%r14,8), %rdi
movq 0x30(%rsp), %rax
vmovups (%rax), %zmm0
vmovups %zmm0, 0x2d0(%rsp)
movq %rsi, 0x2d8(%rsp)
movq (%rdi), %rax
leaq 0x190(%rsp), %rsi
leaq 0x228(%rsp), %rdx
leaq 0x2d0(%rsp), %rcx
vzeroupper
callq *0x38(%rax)
movq 0x230(%rsp), %rax
testq %rax, %rax
movl 0x18(%rsp), %edi
je 0x297e5d
lock
decl (%rax)
jne 0x297e5d
movq 0x228(%rsp), %rsi
movq 0x248(%rsp), %rdi
testq %rdi, %rdi
je 0x297e51
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x18(%rsp), %edi
jmp 0x297e5d
movq %rsi, %rdi
callq 0x5f3e0
movl 0x18(%rsp), %edi
movq 0x198(%rsp), %rax
testq %rax, %rax
je 0x297e9c
lock
decl (%rax)
jne 0x297e9c
movq 0x190(%rsp), %rsi
movq 0x1b0(%rsp), %rdi
testq %rdi, %rdi
je 0x297e90
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x18(%rsp), %edi
jmp 0x297e9c
movq %rsi, %rdi
callq 0x5f3e0
movl 0x18(%rsp), %edi
incq %r14
addl %ebp, %r13d
addl %edi, %r12d
jmp 0x297c48
movq 0x38(%rsp), %rdx
xorl %r14d, %r14d
cmpl %edx, %ebx
jae 0x297ed2
leaq 0xe0(%rsp), %rdi
movq 0x20(%rsp), %rsi
movq 0x30(%rsp), %rcx
callq 0x64e3b
jmp 0x298f53
leaq 0xe0(%rsp), %rax
movq 0x20(%rsp), %rcx
cmpq %rcx, %rax
je 0x298f53
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x297ef8
lock
incl (%rax)
movq 0x8(%rcx), %rax
testq %rax, %rax
je 0x298efb
lock
decl (%rax)
jne 0x298efb
movq 0x20(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x298ef3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x298efb
movq (%rbp), %rax
movq -0x18(%rax), %rdx
movl 0xd4(%rbp,%rdx), %ecx
movl 0xd8(%rbp,%rdx), %r13d
movl %ecx, %esi
xorl $0x3, %esi
movl %r13d, %edi
xorl $0x3, %edi
orl %esi, %edi
jne 0x298ab0
cmpl $0x1, 0xe4(%rbp,%rdx)
jne 0x29899d
cmpl $0x1, 0xe8(%rbp,%rdx)
jne 0x29899d
cmpl $0x1, 0xdc(%rbp,%rdx)
jne 0x29899d
cmpl $0x1, 0xe0(%rbp,%rdx)
jne 0x29899d
cmpl $0x1, 0x110(%rbp,%rdx)
ja 0x29899d
movl $0x0, 0x48(%rsp)
cmpl $0x65, 0x38(%rsp)
jl 0x299046
vpxor %xmm0, %xmm0, %xmm0
leaq 0x130(%rsp), %r13
vmovdqa %xmm0, (%r13)
andq $0x0, 0x10(%r13)
xorl %ebx, %ebx
movq %rbp, %rcx
leaq 0xe0(%rsp), %rbp
leaq 0x190(%rsp), %r12
movq -0x18(%rax), %r14
movslq 0x108(%rcx,%r14), %rax
cmpq %rax, %rbx
jge 0x299347
movq 0x1f8(%rcx,%r14), %rax
vmovss (%rax,%rbx,4), %xmm1
vpxor %xmm0, %xmm0, %xmm0
vucomiss %xmm1, %xmm0
je 0x298022
movq 0x240(%rcx,%r14), %rax
vmulss (%rax,%rbx,4), %xmm1, %xmm0
vmovss 0x156c6a(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0xe0(%rsp)
movq 0x288(%rcx,%r14), %rax
vmovd (%rax,%rbx,4), %xmm0
vmovd %xmm0, 0x190(%rsp)
movq %r13, %rdi
movq %rbp, %rsi
callq 0x1ea12c
movq %r13, %rdi
movq %r12, %rsi
callq 0x1ea12c
incq %rbx
movq 0x10(%rsp), %rcx
movq (%rcx), %rax
jmp 0x297fdd
leal (,%r15,8), %ecx
movl %ecx, 0x40(%rsp)
xorl %edx, %edx
testl %r15d, %r15d
cmovlel %edx, %r15d
testl %r14d, %r14d
cmovlel %edx, %r14d
vpxor %xmm0, %xmm0, %xmm0
vbroadcastss 0x159124(%rip), %xmm1 # 0x3f11b4
vpbroadcastd 0x155f7b(%rip), %xmm2 # 0x3ee014
vpbroadcastw 0x1604fd(%rip), %xmm25 # 0x3f85a0
vpbroadcastw 0x1604f5(%rip), %xmm26 # 0x3f85a2
vbroadcastss 0x159101(%rip), %xmm27 # 0x3f11b8
vbroadcastss 0x1590fb(%rip), %xmm28 # 0x3f11bc
vbroadcastss 0x1590f6(%rip), %xmm7 # 0x3f11c0
vbroadcastss 0x155f41(%rip), %xmm8 # 0x3ee014
vbroadcastss 0x156bac(%rip), %xmm9 # 0x3eec88
vbroadcastss 0x1590df(%rip), %xmm10 # 0x3f11c4
vbroadcastss 0x1590da(%rip), %xmm11 # 0x3f11c8
vbroadcastss 0x1590d8(%rip), %xmm24 # 0x3f11d0
vbroadcastss 0x1590cb(%rip), %xmm13 # 0x3f11cc
vbroadcastss 0x1590ca(%rip), %xmm14 # 0x3f11d4
vbroadcastss 0x1590c5(%rip), %xmm15 # 0x3f11d8
vbroadcastss 0x1590bf(%rip), %xmm16 # 0x3f11dc
vpbroadcastd 0x156b61(%rip), %xmm17 # 0x3eec88
xorl %esi, %esi
movq %r14, 0x28(%rsp)
vbroadcastss 0x1590bc(%rip), %xmm20 # 0x3f11f4
vbroadcastss 0x1590ba(%rip), %xmm23 # 0x3f11fc
cmpq %r14, %rsi
je 0x298ed9
movq 0x20(%rsp), %rcx
movq 0x10(%rcx), %r8
imulq %rsi, %r8
imulq 0x40(%rcx), %r8
movq %rdx, 0x30(%rsp)
movslq %edx, %r9
addq (%rcx), %r8
movslq 0x9c(%rsp), %r10
movq 0xb0(%rsp), %r11
imulq %rsi, %r11
movq 0x80(%rsp), %rcx
imulq %rcx, %r11
addq 0x70(%rsp), %r11
imulq %rcx, %r10
movq %rsi, 0x48(%rsp)
leaq (,%rsi,8), %rbx
addq 0x28(%rbp), %r9
xorl %ecx, %ecx
movq %r8, %rdx
cmpl 0x60(%rsp), %ecx
jg 0x298982
movq (%rbp), %rdi
xorl %r13d, %r13d
cmpl 0x18(%rsp), %r13d
jg 0x29897b
movq -0x18(%rdi), %rsi
movslq 0xe8(%rbp,%rsi), %r14
movslq %ecx, %r12
imulq %r14, %r12
imulq %r10, %r12
movl 0xe4(%rbp,%rsi), %ebp
imull %r13d, %ebp
shll $0x3, %ebp
movslq %ebp, %r14
addq %r11, %r14
addq %r12, %r14
vpxor %xmm3, %xmm3, %xmm3
xorl %r12d, %r12d
vpxor %xmm4, %xmm4, %xmm4
cmpq %r12, %r15
je 0x29823f
movslq (%rax,%r12,4), %rbp
vmovq (%r14,%rbp,8), %xmm5
vpcmpgtb %xmm5, %xmm0, %xmm6
vpunpcklbw %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
vmovq (%r9,%r12,8), %xmm6
vpcmpgtb %xmm6, %xmm0, %xmm12
vpunpcklbw %xmm12, %xmm6, %xmm6 # xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
vpmullw %xmm5, %xmm6, %xmm12
vpmulhw %xmm6, %xmm5, %xmm5
vpunpcklwd %xmm5, %xmm12, %xmm6 # xmm6 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3]
vpunpckhwd %xmm5, %xmm12, %xmm5 # xmm5 = xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7]
vpaddd %xmm6, %xmm4, %xmm4
vpaddd %xmm5, %xmm3, %xmm3
incq %r12
jmp 0x2981fc
movq 0x10(%rsp), %rbp
movq 0x1f8(%rbp,%rsi), %r14
movq 0x240(%rbp,%rsi), %r12
vmovups (%r14,%rbx,4), %xmm21
vmovups 0x10(%r14,%rbx,4), %xmm22
vmulps (%r12,%rbx,4), %xmm21, %xmm5
vmulps 0x10(%r12,%rbx,4), %xmm22, %xmm6
vrcpps %xmm5, %xmm5
vrcpps %xmm6, %xmm6
vcmpneqps %xmm0, %xmm21, %k1
vcmpneqps %xmm0, %xmm22, %k2
vmovaps %xmm5, %xmm5 {%k1} {z}
vmovaps %xmm6, %xmm6 {%k2} {z}
vcvtdq2ps %xmm4, %xmm4
vmulps %xmm4, %xmm5, %xmm22
vcvtdq2ps %xmm3, %xmm3
vmulps %xmm3, %xmm6, %xmm21
cmpl $0x0, 0x100(%rbp,%rsi)
je 0x2982c9
movq 0x1b0(%rbp,%rsi), %r14
vaddps (%r14,%rbx,4), %xmm22, %xmm22
vaddps 0x10(%r14,%rbx,4), %xmm21, %xmm21
movl 0x110(%rbp,%rsi), %r14d
decl %r14d
cmpl $0x5, %r14d
ja 0x2988fa
leaq 0x1602d7(%rip), %r12 # 0x3f85bc
movslq (%r12,%r14,4), %r14
addq %r12, %r14
vmaxps %xmm0, %xmm22, %xmm3
vmaxps %xmm0, %xmm21, %xmm4
jmpq *%r14
movq 0x118(%rbp,%rsi), %r14
vbroadcastss (%r14), %xmm5
vminps %xmm0, %xmm22, %xmm6
vfmadd231ps %xmm6, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm6) + xmm3
vminps %xmm0, %xmm21, %xmm6
vfmadd231ps %xmm6, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm6) + xmm4
jmp 0x298906
vminps %xmm27, %xmm22, %xmm3
vmaxps %xmm28, %xmm3, %xmm3
vmovaps %xmm7, %xmm4
vfmadd213ps %xmm8, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm4) + xmm8
vcvttps2dq %xmm4, %xmm5
vcvtdq2ps %xmm5, %xmm5
vcmpltps %xmm5, %xmm4, %k1
vsubps %xmm9, %xmm5, %xmm5 {%k1}
vfmsub231ps %xmm10, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm10) - xmm3
vfnmsub231ps %xmm11, %xmm5, %xmm3 # xmm3 = -(xmm5 * xmm11) - xmm3
vmulps %xmm3, %xmm3, %xmm4
vmovaps %xmm13, %xmm6
vfmadd213ps %xmm24, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm24
vfmadd213ps %xmm14, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm14
vfmadd213ps %xmm15, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm15
vfmadd213ps %xmm16, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm16
vfmadd213ps %xmm8, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm8
vfmadd213ps %xmm3, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm6) + xmm3
vaddps %xmm6, %xmm9, %xmm3
vcvttps2dq %xmm5, %xmm4
vpslld $0x17, %xmm4, %xmm4
vpaddd %xmm17, %xmm4, %xmm4
vfmadd213ps %xmm9, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm4) + xmm9
vcmpleps %xmm0, %xmm4, %k1
vbroadcastss 0x158e38(%rip), %xmm29 # 0x3f11e0
vmaxps %xmm29, %xmm4, %xmm3
vpsrld $0x17, %xmm3, %xmm4
vpbroadcastd 0x158e27(%rip), %xmm30 # 0x3f11e4
vpternlogd $0xec, %xmm30, %xmm2, %xmm3
vmovaps %xmm1, %xmm30
vmovaps %xmm13, %xmm1
vmovdqa64 %xmm17, %xmm13
vmovaps %xmm16, %xmm17
vmovaps %xmm15, %xmm16
vmovaps %xmm14, %xmm15
vmovaps %xmm7, %xmm14
vmovaps %xmm24, %xmm7
vpbroadcastd 0x158def(%rip), %xmm24 # 0x3f11e8
vpaddd %xmm24, %xmm4, %xmm4
vcvtdq2ps %xmm4, %xmm4
vbroadcastss 0x158ddf(%rip), %xmm31 # 0x3f11ec
vcmpltps %xmm31, %xmm3, %k2
vbroadcastss 0x158dd2(%rip), %xmm18 # 0x3f11f0
vaddps %xmm18, %xmm3, %xmm5
vsubps %xmm9, %xmm4, %xmm4 {%k2}
vaddps %xmm3, %xmm5, %xmm5 {%k2}
vmulps %xmm5, %xmm5, %xmm3
vmovaps %xmm20, %xmm6
vbroadcastss 0x158db4(%rip), %xmm19 # 0x3f11f8
vfmadd213ps %xmm19, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm6) + xmm19
vfmadd213ps %xmm23, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm6) + xmm23
vbroadcastss 0x158da6(%rip), %xmm18 # 0x3f1200
vfmadd213ps %xmm18, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm6) + xmm18
vbroadcastss 0x158d9a(%rip), %xmm19 # 0x3f1204
vfmadd213ps %xmm19, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm6) + xmm19
vbroadcastss 0x158d8e(%rip), %xmm24 # 0x3f1208
vfmadd213ps %xmm24, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm6) + xmm24
vbroadcastss 0x158d82(%rip), %xmm29 # 0x3f120c
vfmadd213ps %xmm29, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm6) + xmm29
vbroadcastss 0x158d76(%rip), %xmm31 # 0x3f1210
vfmadd213ps %xmm31, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm6) + xmm31
vbroadcastss 0x158d6b(%rip), %xmm12 # 0x3f1214
vfmadd213ps %xmm12, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm6) + xmm12
vmulps %xmm5, %xmm3, %xmm12
vmulps %xmm6, %xmm12, %xmm6
vfmadd231ps %xmm11, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm11) + xmm6
vfmsub231ps %xmm3, %xmm8, %xmm6 # xmm6 = (xmm8 * xmm3) - xmm6
vsubps %xmm5, %xmm6, %xmm3
vfmsub231ps %xmm4, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm4) - xmm3
vbroadcastss 0x15a246(%rip), %xmm4 # 0x3f2718
vmulps %xmm4, %xmm3, %xmm3
vbroadcastss 0x159421(%rip), %xmm4 # 0x3f1900
vmovaps %xmm4, %xmm3 {%k1}
vminps %xmm27, %xmm3, %xmm3
vmaxps %xmm28, %xmm3, %xmm3
vmovaps %xmm14, %xmm4
vfmadd213ps %xmm8, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm4) + xmm8
vcvttps2dq %xmm4, %xmm5
vcvtdq2ps %xmm5, %xmm5
vcmpltps %xmm5, %xmm4, %k1
vsubps %xmm9, %xmm5, %xmm5 {%k1}
vfmsub231ps %xmm10, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm10) - xmm3
vfnmsub231ps %xmm11, %xmm5, %xmm3 # xmm3 = -(xmm5 * xmm11) - xmm3
vmulps %xmm3, %xmm3, %xmm4
vmovaps %xmm1, %xmm6
vfmadd213ps %xmm7, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm7
vfmadd213ps %xmm15, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm15
vfmadd213ps %xmm16, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm16
vfmadd213ps %xmm17, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm17
vfmadd213ps %xmm8, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm8
vfmadd213ps %xmm3, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm6) + xmm3
vaddps %xmm6, %xmm9, %xmm4
vcvttps2dq %xmm5, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm3, %xmm13, %xmm3
vfmadd213ps %xmm9, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + xmm9
vrcpps %xmm3, %xmm4
vaddps %xmm4, %xmm4, %xmm5
vbroadcastss 0x15a1a0(%rip), %xmm6 # 0x3f2708
vfmsub213ps %xmm6, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm3) - xmm6
vfnmadd213ps %xmm5, %xmm4, %xmm3 # xmm3 = -(xmm4 * xmm3) + xmm5
vminps %xmm27, %xmm21, %xmm4
vmaxps %xmm28, %xmm4, %xmm4
vmovaps %xmm14, %xmm5
vfmadd213ps %xmm8, %xmm4, %xmm5 # xmm5 = (xmm4 * xmm5) + xmm8
vcvttps2dq %xmm5, %xmm6
vcvtdq2ps %xmm6, %xmm6
vcmpltps %xmm6, %xmm5, %k1
vsubps %xmm9, %xmm6, %xmm6 {%k1}
vfmsub231ps %xmm10, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm10) - xmm4
vfnmsub231ps %xmm11, %xmm6, %xmm4 # xmm4 = -(xmm6 * xmm11) - xmm4
vmulps %xmm4, %xmm4, %xmm5
vmovaps %xmm1, %xmm12
vfmadd213ps %xmm7, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm12) + xmm7
vfmadd213ps %xmm15, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm12) + xmm15
vfmadd213ps %xmm16, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm12) + xmm16
vfmadd213ps %xmm17, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm12) + xmm17
vfmadd213ps %xmm8, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm12) + xmm8
vfmadd213ps %xmm4, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm12) + xmm4
vaddps %xmm9, %xmm12, %xmm4
vcvttps2dq %xmm6, %xmm5
vpslld $0x17, %xmm5, %xmm5
vpaddd %xmm5, %xmm13, %xmm5
vfmadd213ps %xmm9, %xmm4, %xmm5 # xmm5 = (xmm4 * xmm5) + xmm9
vcmpleps %xmm0, %xmm5, %k1
vmaxps 0x158bea(%rip){1to4}, %xmm5, %xmm4 # 0x3f11e0
vpsrld $0x17, %xmm4, %xmm5
vpternlogd $0xec, 0x158bde(%rip){1to4}, %xmm2, %xmm4 # 0x3f11e4
vcmpltps 0x158bdb(%rip){1to4}, %xmm4, %k2 # 0x3f11ec
vaddps 0x158bd5(%rip){1to4}, %xmm4, %xmm6 # 0x3f11f0
vaddps %xmm4, %xmm6, %xmm6 {%k2}
vmulps %xmm6, %xmm6, %xmm4
vmovaps %xmm20, %xmm12
vfmadd213ps 0x158bc3(%rip){1to4}, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm12) + mem
vfmadd213ps %xmm23, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm12) + xmm23
vfmadd213ps %xmm18, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm12) + xmm18
vfmadd213ps %xmm19, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm12) + xmm19
vfmadd213ps %xmm24, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm12) + xmm24
vfmadd213ps %xmm29, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm12) + xmm29
vfmadd213ps %xmm31, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm12) + xmm31
vfmadd213ps 0x158bb1(%rip){1to4}, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm12) + mem
vmulps %xmm6, %xmm4, %xmm29
vmulps %xmm12, %xmm29, %xmm12
vpaddd 0x158b6f(%rip){1to4}, %xmm5, %xmm5 # 0x3f11e8
vmovaps %xmm7, %xmm24
vmovaps %xmm14, %xmm7
vmovaps %xmm15, %xmm14
vmovaps %xmm16, %xmm15
vmovaps %xmm17, %xmm16
vmovdqa64 %xmm13, %xmm17
vmovaps %xmm1, %xmm13
vmovaps %xmm30, %xmm1
vcvtdq2ps %xmm5, %xmm5
vsubps %xmm9, %xmm5, %xmm5 {%k2}
vfmadd231ps %xmm11, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm11) + xmm12
vfmsub231ps %xmm4, %xmm8, %xmm12 # xmm12 = (xmm8 * xmm4) - xmm12
vsubps %xmm6, %xmm12, %xmm4
vfmsub231ps %xmm5, %xmm10, %xmm4 # xmm4 = (xmm10 * xmm5) - xmm4
vmulps 0x15a04d(%rip){1to4}, %xmm4, %xmm4 # 0x3f2718
vbroadcastss 0x15922c(%rip), %xmm5 # 0x3f1900
vmovaps %xmm5, %xmm4 {%k1}
vminps %xmm27, %xmm4, %xmm4
vmaxps %xmm28, %xmm4, %xmm4
vmovaps %xmm7, %xmm5
vfmadd213ps %xmm8, %xmm4, %xmm5 # xmm5 = (xmm4 * xmm5) + xmm8
vcvttps2dq %xmm5, %xmm6
vcvtdq2ps %xmm6, %xmm6
vcmpltps %xmm6, %xmm5, %k1
vsubps %xmm9, %xmm6, %xmm6 {%k1}
vfmsub231ps %xmm10, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm10) - xmm4
vfnmsub231ps %xmm11, %xmm6, %xmm4 # xmm4 = -(xmm6 * xmm11) - xmm4
vmulps %xmm4, %xmm4, %xmm5
vmovaps %xmm13, %xmm12
vfmadd213ps %xmm24, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm12) + xmm24
vfmadd213ps %xmm14, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm12) + xmm14
vfmadd213ps %xmm15, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm12) + xmm15
vfmadd213ps %xmm16, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm12) + xmm16
vfmadd213ps %xmm8, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm12) + xmm8
vfmadd213ps %xmm4, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm12) + xmm4
vaddps %xmm9, %xmm12, %xmm5
vcvttps2dq %xmm6, %xmm4
vpslld $0x17, %xmm4, %xmm4
vpaddd %xmm17, %xmm4, %xmm4
vfmadd213ps %xmm9, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm9
vrcpps %xmm4, %xmm5
vaddps %xmm5, %xmm5, %xmm6
vfmsub213ps 0x159fa6(%rip){1to4}, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm4) - mem
vfnmadd213ps %xmm6, %xmm5, %xmm4 # xmm4 = -(xmm5 * xmm4) + xmm6
vfmsub213ps %xmm22, %xmm22, %xmm3 # xmm3 = (xmm22 * xmm3) - xmm22
vfmsub213ps %xmm21, %xmm21, %xmm4 # xmm4 = (xmm21 * xmm4) - xmm21
jmp 0x298906
movq 0x118(%rbp,%rsi), %r14
vbroadcastss (%r14), %xmm4
vbroadcastss 0x4(%r14), %xmm5
vmaxps %xmm4, %xmm22, %xmm3
vminps %xmm5, %xmm3, %xmm3
vmaxps %xmm4, %xmm21, %xmm4
vminps %xmm5, %xmm4, %xmm4
jmp 0x298906
vxorps %xmm1, %xmm22, %xmm3
vminps %xmm27, %xmm3, %xmm3
vmaxps %xmm28, %xmm3, %xmm3
vmovaps %xmm7, %xmm4
vfmadd213ps %xmm8, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm4) + xmm8
vcvttps2dq %xmm4, %xmm5
vcvtdq2ps %xmm5, %xmm5
vcmpltps %xmm5, %xmm4, %k1
vsubps %xmm9, %xmm5, %xmm5 {%k1}
vfmsub231ps %xmm10, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm10) - xmm3
vfnmsub231ps %xmm11, %xmm5, %xmm3 # xmm3 = -(xmm5 * xmm11) - xmm3
vmulps %xmm3, %xmm3, %xmm4
vmovaps %xmm13, %xmm6
vfmadd213ps %xmm24, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm24
vfmadd213ps %xmm14, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm14
vfmadd213ps %xmm15, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm15
vfmadd213ps %xmm16, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm16
vfmadd213ps %xmm8, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm8
vfmadd213ps %xmm3, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm6) + xmm3
vaddps %xmm6, %xmm9, %xmm4
vcvttps2dq %xmm5, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm17, %xmm3, %xmm3
vfmadd213ps %xmm9, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + xmm9
vrcpps %xmm3, %xmm4
vfmsub213ps %xmm9, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) - xmm9
vfnmadd132ps %xmm4, %xmm4, %xmm3 # xmm3 = -(xmm3 * xmm4) + xmm4
vxorps %xmm1, %xmm21, %xmm4
vminps %xmm27, %xmm4, %xmm4
vmaxps %xmm28, %xmm4, %xmm4
vmovaps %xmm7, %xmm5
vfmadd213ps %xmm8, %xmm4, %xmm5 # xmm5 = (xmm4 * xmm5) + xmm8
vcvttps2dq %xmm5, %xmm6
vcvtdq2ps %xmm6, %xmm6
vcmpltps %xmm6, %xmm5, %k1
vsubps %xmm9, %xmm6, %xmm6 {%k1}
vfmsub231ps %xmm10, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm10) - xmm4
vfnmsub231ps %xmm11, %xmm6, %xmm4 # xmm4 = -(xmm6 * xmm11) - xmm4
vmulps %xmm4, %xmm4, %xmm5
vmovaps %xmm13, %xmm12
vfmadd213ps %xmm24, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm12) + xmm24
vfmadd213ps %xmm14, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm12) + xmm14
vfmadd213ps %xmm15, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm12) + xmm15
vfmadd213ps %xmm16, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm12) + xmm16
vfmadd213ps %xmm8, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm12) + xmm8
vfmadd213ps %xmm4, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm12) + xmm4
vaddps %xmm9, %xmm12, %xmm5
vcvttps2dq %xmm6, %xmm4
vpslld $0x17, %xmm4, %xmm4
vpaddd %xmm17, %xmm4, %xmm4
vfmadd213ps %xmm9, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm9
vrcpps %xmm4, %xmm5
vfmsub213ps %xmm9, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) - xmm9
vfnmadd132ps %xmm5, %xmm5, %xmm4 # xmm4 = -(xmm4 * xmm5) + xmm5
jmp 0x298906
movq 0x118(%rbp,%rsi), %r14
vbroadcastss (%r14), %xmm4
vbroadcastss 0x4(%r14), %xmm5
vmovaps %xmm22, %xmm3
vfmadd213ps %xmm5, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + xmm5
vmaxps %xmm0, %xmm3, %xmm3
vminps %xmm3, %xmm9, %xmm3
vmulps %xmm22, %xmm3, %xmm3
vfmadd213ps %xmm5, %xmm21, %xmm4 # xmm4 = (xmm21 * xmm4) + xmm5
vmaxps %xmm0, %xmm4, %xmm4
vminps %xmm4, %xmm9, %xmm4
vmulps %xmm21, %xmm4, %xmm4
jmp 0x298906
vmovaps %xmm22, %xmm3
vmovaps %xmm21, %xmm4
cmpl $0x65, 0x38(%rsp)
jl 0x298964
movq 0x288(%rbp,%rsi), %rsi
vmulps (%rsi,%rbx,4), %xmm3, %xmm3
vmulps 0x10(%rsi,%rbx,4), %xmm4, %xmm4
vmovdqa %xmm2, %xmm5
vpternlogd $0xf8, %xmm1, %xmm3, %xmm5
vmovdqa %xmm2, %xmm6
vpternlogd $0xf8, %xmm1, %xmm4, %xmm6
vaddps %xmm5, %xmm3, %xmm3
vaddps %xmm6, %xmm4, %xmm4
vcvttps2dq %xmm3, %xmm3
vcvttps2dq %xmm4, %xmm4
vpackssdw %xmm4, %xmm3, %xmm3
vpminsw %xmm25, %xmm3, %xmm3
vpmaxsw %xmm26, %xmm3, %xmm3
vpacksswb %xmm3, %xmm3, %xmm3
vmovq %xmm3, (%rdx)
addq $0x8, %rdx
jmp 0x298973
vmovups %xmm3, (%r8)
vmovups %xmm4, 0x10(%r8)
addq $0x20, %r8
incl %r13d
jmp 0x2981b8
incl %ecx
jmp 0x2981a7
movq 0x48(%rsp), %rsi
incq %rsi
movq 0x30(%rsp), %rdx
addl 0x40(%rsp), %edx
movq 0x28(%rsp), %r14
jmp 0x298142
pushq $0x3
popq %r13
cmpl $0x1, 0xdc(%rbp,%rdx)
jne 0x298ab0
cmpl $0x1, 0xe0(%rbp,%rdx)
jne 0x298ab0
cmpl $0x2, 0xe4(%rbp,%rdx)
jne 0x298ab0
cmpl $0x2, 0xe8(%rbp,%rdx)
jne 0x298ab0
cmpl $0x1, 0x110(%rbp,%rdx)
ja 0x298ab0
movl $0x0, 0x48(%rsp)
cmpl $0x65, 0x38(%rsp)
jl 0x299539
vpxor %xmm0, %xmm0, %xmm0
leaq 0x130(%rsp), %r13
vmovdqa %xmm0, (%r13)
andq $0x0, 0x10(%r13)
xorl %ebx, %ebx
movq %rbp, %rcx
leaq 0xe0(%rsp), %rbp
leaq 0x190(%rsp), %r12
movq -0x18(%rax), %r14
movslq 0x108(%rcx,%r14), %rax
cmpq %rax, %rbx
jge 0x299868
movq 0x1f8(%rcx,%r14), %rax
vmovss (%rax,%rbx,4), %xmm1
vpxor %xmm0, %xmm0, %xmm0
vucomiss %xmm1, %xmm0
je 0x298a6b
movq 0x240(%rcx,%r14), %rax
vmulss (%rax,%rbx,4), %xmm1, %xmm0
vmovss 0x156221(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0xe0(%rsp)
movq 0x288(%rcx,%r14), %rax
vmovd (%rax,%rbx,4), %xmm0
vmovd %xmm0, 0x190(%rsp)
movq %r13, %rdi
movq %rbp, %rsi
callq 0x1ea12c
movq %r13, %rdi
movq %r12, %rsi
callq 0x1ea12c
incq %rbx
movq 0x10(%rsp), %rcx
movq (%rcx), %rax
jmp 0x298a26
imull %ecx, %r13d
movslq %r13d, %rsi
leaq 0x130(%rsp), %rdi
leaq 0xe0(%rsp), %rdx
movq %rsi, 0xc0(%rsp)
callq 0x73bbe
movq (%rbp), %rax
movq %rax, 0x58(%rsp)
movq -0x18(%rax), %rax
imull 0xe0(%rbp,%rax), %r12d
movq 0x130(%rsp), %rbx
movl 0xdc(%rbp,%rax), %ecx
imull 0xd4(%rbp,%rax), %ecx
subl %ecx, %r12d
xorl %ecx, %ecx
xorl %edx, %edx
xorl %esi, %esi
cmpl 0xd8(%rbp,%rax), %esi
jge 0x298b48
movslq %ecx, %rcx
leaq (%rbx,%rcx,4), %r8
xorl %edi, %edi
cmpl 0xd4(%rbp,%rax), %edi
jge 0x298b3e
movl %edx, (%r8,%rdi,4)
movq 0x58(%rsp), %rax
movq -0x18(%rax), %rax
addl 0xdc(%rbp,%rax), %edx
incq %rdi
jmp 0x298b1c
addl %r12d, %edx
incl %esi
addq %rdi, %rcx
jmp 0x298b0a
xorl %edx, %edx
testl %r13d, %r13d
movl $0x0, %ecx
movq %rcx, 0x50(%rsp)
cmovlel %edx, %r13d
vbroadcastss 0x158650(%rip), %xmm4 # 0x3f11b4
leaq 0x15fa39(%rip), %r8 # 0x3f85a4
xorl %ebp, %ebp
movq 0x10(%rsp), %rcx
movslq 0x108(%rcx,%rax), %rax
cmpq %rax, %rbp
jge 0x298ed9
movq 0x10(%r15), %r9
imulq %rbp, %r9
imulq 0x40(%r15), %r9
addq (%r15), %r9
movslq 0x9c(%rsp), %r10
movq 0xb0(%rsp), %r11
imulq %rbp, %r11
movq 0x80(%rsp), %rax
imulq %rax, %r11
addq 0x70(%rsp), %r11
imulq %rax, %r10
movq 0x28(%rcx), %r15
addq 0x50(%rsp), %r15
xorl %eax, %eax
movq %r9, 0x28(%rsp)
movq %r10, 0x40(%rsp)
movq %r11, 0x68(%rsp)
movq %rbp, 0xc8(%rsp)
cmpl 0x60(%rsp), %eax
jg 0x298eb1
movl %eax, 0x30(%rsp)
movq 0x10(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x48(%rsp)
xorl %r14d, %r14d
cmpl 0x18(%rsp), %r14d
jg 0x298ea6
movq 0x48(%rsp), %rax
movq -0x18(%rax), %r12
movq 0x10(%rsp), %rcx
movslq 0xe8(%rcx,%r12), %rax
movslq 0x30(%rsp), %rdx
imulq %rax, %rdx
imulq %r10, %rdx
movslq 0xe4(%rcx,%r12), %rax
movslq %r14d, %rcx
imulq %rax, %rcx
addq %r11, %rcx
addq %rdx, %rcx
xorl %edx, %edx
xorl %eax, %eax
cmpq %rdx, %r13
je 0x298c5f
movslq (%rbx,%rdx,4), %rsi
movsbl (%rcx,%rsi), %esi
movsbl (%r15,%rdx), %edi
imull %esi, %edi
addl %edi, %eax
incq %rdx
jmp 0x298c43
movq 0x10(%rsp), %rdx
movq 0x1f8(%rdx,%r12), %rcx
vmovss (%rcx,%rbp,4), %xmm1
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm1, %xmm0
je 0x298c94
movq 0x240(%rdx,%r12), %rcx
vmulss (%rcx,%rbp,4), %xmm1, %xmm0
vmovss 0x155ff8(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vcvtsi2ss %eax, %xmm6, %xmm1
vmulss %xmm1, %xmm0, %xmm5
cmpl $0x0, 0x100(%rdx,%r12)
je 0x298cb4
movq 0x1b0(%rdx,%r12), %rax
vaddss (%rax,%rbp,4), %xmm5, %xmm5
movl 0x110(%rdx,%r12), %eax
decl %eax
cmpl $0x5, %eax
ja 0x298e2c
movslq (%r8,%rax,4), %rax
addq %r8, %rax
jmpq *%rax
vmaxss 0x155338(%rip), %xmm5, %xmm0 # 0x3ee010
jmp 0x298e30
vmovaps %xmm5, %xmm0
movq %r9, 0xd8(%rsp)
vmovss %xmm5, 0xd0(%rsp)
callq 0x5f410
vaddss 0x155f89(%rip), %xmm0, %xmm0 # 0x3eec88
callq 0x5f200
callq 0x5f160
movq 0x68(%rsp), %r11
movq 0x40(%rsp), %r10
movq 0xd8(%rsp), %r9
leaq 0x15f882(%rip), %r8 # 0x3f85a4
vbroadcastss 0x158489(%rip), %xmm4 # 0x3f11b4
vmulss 0xd0(%rsp), %xmm0, %xmm0
jmp 0x298e30
movq 0x10(%rsp), %rax
movq 0x118(%rax,%r12), %rax
vmovss 0x4(%rax), %xmm1
vmaxss (%rax), %xmm5, %xmm0
vucomiss %xmm1, %xmm0
jbe 0x298e30
vmovaps %xmm1, %xmm0
jmp 0x298e30
vmovss 0x15844e(%rip), %xmm2 # 0x3f11b8
vminss %xmm2, %xmm5, %xmm1
vxorps %xmm4, %xmm1, %xmm0
vcmpltss 0x15843f(%rip), %xmm1, %k1 # 0x3f11bc
vmovss %xmm2, %xmm0, %xmm0 {%k1}
movq %r9, %rbp
callq 0x5f410
movq 0x68(%rsp), %r11
movq 0x40(%rsp), %r10
movq %rbp, %r9
movq 0xc8(%rsp), %rbp
leaq 0x15f7fd(%rip), %r8 # 0x3f85a4
vbroadcastss 0x158404(%rip), %xmm4 # 0x3f11b4
vmovss 0x155ed0(%rip), %xmm1 # 0x3eec88
vaddss %xmm1, %xmm0, %xmm0
vdivss %xmm0, %xmm1, %xmm0
jmp 0x298e30
movq 0x10(%rsp), %rax
movq 0x118(%rax,%r12), %rax
vmovss (%rax), %xmm0
vcmpgtss 0x155232(%rip), %xmm5, %k1 # 0x3ee010
vmovss 0x155ea0(%rip), %xmm0 {%k1} # 0x3eec88
vmulss %xmm5, %xmm0, %xmm0
jmp 0x298e30
movq 0x10(%rsp), %rax
movq 0x118(%rax,%r12), %rax
vmovss (%rax), %xmm1
vmovss 0x4(%rax), %xmm2
vxorps %xmm4, %xmm2, %xmm0
vdivss %xmm1, %xmm0, %xmm3
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm3, %xmm5
jb 0x298e30
vmovss 0x155e6a(%rip), %xmm0 # 0x3eec88
vdivss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm3, %xmm0
vucomiss %xmm0, %xmm5
jbe 0x298e9b
vmovaps %xmm5, %xmm0
cmpl $0x65, 0x38(%rsp)
jl 0x298e81
movq 0x10(%rsp), %rax
movq 0x288(%rax,%r12), %rax
vmulss (%rax,%rbp,4), %xmm0, %xmm0
vpbroadcastd 0x15f7b2(%rip), %xmm1 # 0x3f8604
vpternlogd $0xf8, %xmm4, %xmm0, %xmm1
vaddss %xmm1, %xmm0, %xmm0
vroundss $0xb, %xmm0, %xmm0, %xmm0
vcvttss2si %xmm0, %eax
cmpl $-0x7e, %eax
pushq $-0x7f
popq %rcx
cmovll %ecx, %eax
cmpl $0x7f, %eax
pushq $0x7f
popq %rcx
cmovgel %ecx, %eax
movb %al, (%r9)
incq %r9
jmp 0x298e93
movq 0x28(%rsp), %rax
vmovss %xmm0, (%rax)
addq $0x4, %rax
movq %rax, 0x28(%rsp)
incl %r14d
jmp 0x298bfc
vfmadd213ss %xmm2, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm1) + xmm2
vmulss %xmm5, %xmm1, %xmm0
jmp 0x298e30
movl 0x30(%rsp), %eax
incl %eax
jmp 0x298bde
incq %rbp
movq 0x58(%rsp), %rax
movq -0x18(%rax), %rax
movq 0x50(%rsp), %rcx
addq 0xc0(%rsp), %rcx
movq %rcx, 0x50(%rsp)
movq 0x20(%rsp), %r15
jmp 0x298b6d
leaq 0x130(%rsp), %rdi
callq 0x624be
movl $0x0, 0x48(%rsp)
jmp 0x298fc6
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0xe0(%rsp), %xmm0
movq 0x20(%rsp), %rcx
vmovups %xmm0, (%rcx)
movq 0xf0(%rsp), %rax
movq %rax, 0x10(%rcx)
movl 0xf8(%rsp), %eax
movl %eax, 0x18(%rcx)
movq 0x100(%rsp), %rax
movq %rax, 0x20(%rcx)
vmovups 0x108(%rsp), %xmm0
vmovups %xmm0, 0x28(%rcx)
movl 0x118(%rsp), %eax
movl %eax, 0x38(%rcx)
movq 0x120(%rsp), %rax
movq %rax, 0x40(%rcx)
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x298f8a
lock
decl (%rax)
jne 0x298f8a
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
je 0x298f82
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x298f8a
movq %rsi, %rdi
callq 0x5f3e0
movl %r14d, 0x48(%rsp)
movq 0x138(%rsp), %rax
testq %rax, %rax
je 0x298fc6
lock
decl (%rax)
jne 0x298fc6
movq 0x130(%rsp), %rsi
movq 0x150(%rsp), %rdi
testq %rdi, %rdi
je 0x298fbe
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x298fc6
movq %rsi, %rdi
callq 0x5f3e0
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x298ff7
lock
decl (%rax)
jne 0x298ff7
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x298fef
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x298ff7
movq %rsi, %rdi
callq 0x5f3e0
movq 0x1e8(%rsp), %rax
testq %rax, %rax
movl 0x48(%rsp), %ebx
je 0x299032
lock
decl (%rax)
jne 0x299032
movq 0x1e0(%rsp), %rsi
movq 0x200(%rsp), %rdi
testq %rdi, %rdi
je 0x29902a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x299032
movq %rsi, %rdi
callq 0x5f3e0
movl %ebx, %eax
addq $0x318, %rsp # imm = 0x318
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
vpxor %xmm0, %xmm0, %xmm0
leaq 0x130(%rsp), %rbx
andq $0x0, 0x10(%rbx)
vmovdqa %xmm0, (%rbx)
xorl %r14d, %r14d
leaq 0xe0(%rsp), %r15
movq -0x18(%rax), %r12
movslq 0x108(%rbp,%r12), %rax
cmpq %rax, %r14
jge 0x2990bc
movq 0x1f8(%rbp,%r12), %rax
movq 0x240(%rbp,%r12), %rcx
vmovss (%rax,%r14,4), %xmm0
vmulss (%rcx,%r14,4), %xmm0, %xmm0
vmovss 0x155bed(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0xe0(%rsp)
movq %rbx, %rdi
movq %r15, %rsi
callq 0x1ea12c
incq %r14
movq (%rbp), %rax
jmp 0x299066
leaq 0x2a0(%rsp), %rdi
leaq 0x130(%rsp), %rsi
callq 0x290632
movslq 0x9c(%rsp), %rdx
movq 0x20(%rsp), %rcx
movslq 0x2c(%rcx), %rdi
movslq 0x30(%rcx), %rax
movl 0x38(%rcx), %esi
movq 0x28(%rbp), %r9
movq 0x1b0(%rbp,%r12), %r10
movl 0x34(%rcx), %r8d
movl %r8d, 0x68(%rsp)
movq (%rcx), %r8
movq 0x10(%rcx), %r11
movq 0x40(%rcx), %r15
imulq %r11, %r15
movq %rax, %rbx
imulq %rdi, %rbx
movq %r11, 0xd0(%rsp)
movq %rbx, 0x60(%rsp)
imulq %rbx, %r11
addq $0xf, %r11
movq 0x80(%rsp), %rbx
imulq 0xb0(%rsp), %rbx
movq %rbx, 0x50(%rsp)
andq $-0x10, %r11
movq %r11, 0x58(%rsp)
xorl %r11d, %r11d
testl %eax, %eax
movl $0x0, %ebp
cmovgl %eax, %ebp
movl 0x28(%rcx), %eax
movl %eax, 0xc0(%rsp)
testl %esi, %esi
cmovlel %r11d, %esi
movq %rsi, 0x38(%rsp)
vpxor %xmm0, %xmm0, %xmm0
movq 0x70(%rsp), %rax
movq %rax, 0x188(%rsp)
movq %rdx, 0x28(%rsp)
leaq (%rdx,%rdx), %rax
movq %rax, 0x180(%rsp)
movq 0x2a0(%rsp), %rax
movq %rax, 0x178(%rsp)
movq %r8, 0xd8(%rsp)
xorl %esi, %esi
movq %r10, 0x40(%rsp)
movq %r15, 0xc8(%rsp)
cmpq 0x38(%rsp), %rsi
je 0x29933a
movq 0x58(%rsp), %rax
xorl %edx, %edx
divq 0xd0(%rsp)
cmpl $0x4, 0xc0(%rsp)
cmoveq 0x60(%rsp), %rax
testq %r10, %r10
je 0x2991e8
vmovss (%r10,%rsi,4), %xmm2
jmp 0x2991ec
vxorps %xmm2, %xmm2, %xmm2
imulq %rsi, %r15
addq 0xd8(%rsp), %r15
movq 0x178(%rsp), %rcx
vmovss (%rcx,%rsi,4), %xmm1
imull 0x68(%rsp), %eax
testl %eax, %eax
movl $0x0, %ecx
cmovlel %ecx, %eax
xorl %ecx, %ecx
cmpl %ecx, %eax
je 0x299225
vmovss %xmm2, (%r8,%rcx,4)
incq %rcx
jmp 0x299216
leaq (%rsi,%rsi,8), %rax
movq 0x50(%rsp), %r13
movq %rsi, 0x18(%rsp)
imulq %rsi, %r13
addq 0x188(%rsp), %r13
movq 0x28(%rsp), %rcx
leaq (%rcx,%r13), %r12
movq 0x180(%rsp), %rcx
leaq (%rcx,%r13), %r11
xorl %ebx, %ebx
cmpl %ebp, %ebx
je 0x29931d
xorl %ecx, %ecx
movq %r15, %rdx
movl %edi, %r10d
testl %r10d, %r10d
jle 0x2992fe
movzwl (%r13,%rcx), %r14d
vmovd %r14d, %xmm2
vpinsrb $0x2, 0x2(%r13,%rcx), %xmm2, %xmm2
vpinsrb $0x3, (%r12,%rcx), %xmm2, %xmm2
vpinsrb $0x4, 0x1(%r12,%rcx), %xmm2, %xmm2
vpinsrb $0x5, 0x2(%r12,%rcx), %xmm2, %xmm2
vpinsrb $0x6, (%r11,%rcx), %xmm2, %xmm2
vpinsrb $0x7, 0x1(%r11,%rcx), %xmm2, %xmm2
vpmovsxbw (%r9,%rax), %xmm3
vpmovsxbw %xmm2, %xmm2
vpmaddwd %xmm2, %xmm3, %xmm2
vphaddd %xmm2, %xmm0, %xmm2
movsbl 0x2(%r11,%rcx), %r14d
movsbl 0x8(%r9,%rax), %esi
imull %r14d, %esi
vphaddd %xmm2, %xmm2, %xmm2
vphaddd %xmm2, %xmm2, %xmm2
vmovd %xmm2, %r14d
addl %esi, %r14d
vcvtsi2ss %r14d, %xmm4, %xmm2
vfmadd213ss (%r15,%rcx,4), %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + mem
vmovss %xmm2, (%r15,%rcx,4)
addq $0x4, %rdx
decl %r10d
incq %rcx
jmp 0x299266
addq %rcx, %r13
addq $0x2, %r13
addq %rcx, %r12
addq $0x2, %r12
addq %rcx, %r11
addq $0x2, %r11
incl %ebx
movq %rdx, %r15
jmp 0x299256
movq 0x18(%rsp), %rsi
incq %rsi
movq 0xc8(%rsp), %r15
addq %r15, %r8
movq 0x40(%rsp), %r10
jmp 0x2991b3
leaq 0x2a0(%rsp), %rdi
jmp 0x29984c
leaq 0x2b8(%rsp), %rdi
leaq 0x130(%rsp), %rsi
callq 0x290632
movslq 0x9c(%rsp), %rsi
movq 0x20(%rsp), %r15
movl 0x2c(%r15), %eax
movl 0x30(%r15), %edx
movl 0x38(%r15), %r9d
movq 0x10(%rsp), %rcx
movq 0x28(%rcx), %rdi
movq 0x1b0(%rcx,%r14), %r8
movq %rsi, 0x38(%rsp)
leaq (%rsi,%rsi), %rcx
movq %rcx, 0x28(%rsp)
xorl %r10d, %r10d
testl %edx, %edx
cmovlel %r10d, %edx
testl %r9d, %r9d
cmovlel %r10d, %r9d
movq %r9, 0x18(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vpbroadcastd 0x15f24e(%rip), %xmm1 # 0x3f8604
vpbroadcastd 0x157df5(%rip), %xmm2 # 0x3f11b4
cmpq 0x18(%rsp), %r10
je 0x29952c
testq %r8, %r8
je 0x2993d7
vmovss (%r8,%r10,4), %xmm3
jmp 0x2993db
vxorps %xmm3, %xmm3, %xmm3
movq 0x40(%r15), %r11
imulq %r10, %r11
imulq 0x10(%r15), %r11
addq (%r15), %r11
movq 0x2b8(%rsp), %rcx
vmovss (%rcx,%r10,8), %xmm4
vmovss 0x4(%rcx,%r10,8), %xmm5
movq 0xb0(%rsp), %rbx
imulq %r10, %rbx
imulq 0x80(%rsp), %rbx
addq 0x70(%rsp), %rbx
leaq (%r10,%r10,8), %r14
movq 0x38(%rsp), %rcx
leaq (%rbx,%rcx), %r15
movq 0x28(%rsp), %rcx
leaq (%rbx,%rcx), %r12
xorl %ebp, %ebp
cmpl %edx, %ebp
je 0x29951f
xorl %r13d, %r13d
movl %eax, %ecx
testl %ecx, %ecx
jle 0x299500
movzwl (%rbx,%r13), %r9d
vmovd %r9d, %xmm6
vpinsrb $0x2, 0x2(%rbx,%r13), %xmm6, %xmm6
vpinsrb $0x3, (%r15,%r13), %xmm6, %xmm6
vpinsrb $0x4, 0x1(%r15,%r13), %xmm6, %xmm6
vpinsrb $0x5, 0x2(%r15,%r13), %xmm6, %xmm6
vpinsrb $0x6, (%r12,%r13), %xmm6, %xmm6
vpinsrb $0x7, 0x1(%r12,%r13), %xmm6, %xmm6
vpmovsxbw (%rdi,%r14), %xmm7
vpmovsxbw %xmm6, %xmm6
vpmaddwd %xmm6, %xmm7, %xmm6
vphaddd %xmm6, %xmm0, %xmm6
movsbl 0x2(%r12,%r13), %r9d
movsbl 0x8(%rdi,%r14), %esi
imull %r9d, %esi
vphaddd %xmm6, %xmm6, %xmm6
vphaddd %xmm6, %xmm6, %xmm6
vmovd %xmm6, %r9d
addl %esi, %r9d
vcvtsi2ss %r9d, %xmm8, %xmm6
vfmadd213ss %xmm3, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm6) + xmm3
vmulss %xmm5, %xmm6, %xmm6
vmovdqa %xmm1, %xmm7
vpternlogd $0xf8, %xmm2, %xmm6, %xmm7
vaddss %xmm7, %xmm6, %xmm6
vroundss $0xb, %xmm6, %xmm6, %xmm6
vcvttss2si %xmm6, %esi
cmpl $-0x7e, %esi
pushq $-0x7f
popq %r9
cmovll %r9d, %esi
cmpl $0x7f, %esi
pushq $0x7f
popq %r9
cmovgel %r9d, %esi
movb %sil, (%r11,%r13)
decl %ecx
incq %r13
jmp 0x29943f
addq %r13, %rbx
addq $0x2, %rbx
addq %r13, %r15
addq $0x2, %r15
addq %r13, %r12
addq $0x2, %r12
incl %ebp
addq %r13, %r11
jmp 0x299432
incq %r10
movq 0x20(%rsp), %r15
jmp 0x2993bf
leaq 0x2b8(%rsp), %rdi
jmp 0x299a63
vpxor %xmm0, %xmm0, %xmm0
leaq 0x130(%rsp), %rbx
andq $0x0, 0x10(%rbx)
vmovdqa %xmm0, (%rbx)
xorl %r14d, %r14d
leaq 0xe0(%rsp), %r15
movq -0x18(%rax), %r12
movslq 0x108(%rbp,%r12), %rax
cmpq %rax, %r14
jge 0x2995af
movq 0x1f8(%rbp,%r12), %rax
movq 0x240(%rbp,%r12), %rcx
vmovss (%rax,%r14,4), %xmm0
vmulss (%rcx,%r14,4), %xmm0, %xmm0
vmovss 0x1556fa(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0xe0(%rsp)
movq %rbx, %rdi
movq %r15, %rsi
callq 0x1ea12c
incq %r14
movq (%rbp), %rax
jmp 0x299559
leaq 0x270(%rsp), %rdi
leaq 0x130(%rsp), %rsi
callq 0x290632
movslq 0x9c(%rsp), %rsi
movq 0x20(%rsp), %rdx
movslq 0x2c(%rdx), %rdi
movslq 0x30(%rdx), %rcx
movl 0x38(%rdx), %r8d
movl %esi, %eax
subl %edi, %eax
addl %eax, %eax
movq 0x28(%rbp), %r9
movq 0x1b0(%rbp,%r12), %r10
movl 0x34(%rdx), %r11d
movl %r11d, 0xd8(%rsp)
movq 0x10(%rdx), %r11
movq 0x40(%rdx), %r14
imulq %r11, %r14
movq %rcx, %rbx
movq %rdi, 0x18(%rsp)
imulq %rdi, %rbx
movq %r11, 0xd0(%rsp)
movq %rbx, 0x60(%rsp)
imulq %rbx, %r11
addq $0xf, %r11
andq $-0x10, %r11
movq %r11, 0x58(%rsp)
movq 0x80(%rsp), %rdi
imulq 0xb0(%rsp), %rdi
movq %rdi, 0x50(%rsp)
xorl %edi, %edi
testl %ecx, %ecx
movl $0x0, %ebp
cmovgl %ecx, %ebp
movq (%rdx), %r13
testl %r8d, %r8d
cmovlel %edi, %r8d
movq %r8, 0x28(%rsp)
movslq %eax, %r11
vpxor %xmm0, %xmm0, %xmm0
movl 0x28(%rdx), %eax
movl %eax, 0x188(%rsp)
movq 0x70(%rsp), %rax
movq %rax, 0x180(%rsp)
movq %rsi, 0x40(%rsp)
leaq (%rsi,%rsi), %rax
movq %rax, 0x178(%rsp)
movq 0x270(%rsp), %rax
movq %rax, 0x1d8(%rsp)
movq %r13, 0xc0(%rsp)
xorl %edi, %edi
movq %r10, 0x68(%rsp)
movq %r14, 0xc8(%rsp)
cmpq 0x28(%rsp), %rdi
je 0x299844
movq 0x58(%rsp), %rax
xorl %edx, %edx
divq 0xd0(%rsp)
cmpl $0x4, 0x188(%rsp)
cmoveq 0x60(%rsp), %rax
testq %r10, %r10
je 0x2996ed
vmovss (%r10,%rdi,4), %xmm2
jmp 0x2996f1
vxorps %xmm2, %xmm2, %xmm2
imulq %rdi, %r14
addq 0xc0(%rsp), %r14
movq 0x1d8(%rsp), %rcx
vmovss (%rcx,%rdi,4), %xmm1
imull 0xd8(%rsp), %eax
testl %eax, %eax
movl $0x0, %ecx
cmovlel %ecx, %eax
xorl %ecx, %ecx
cmpl %ecx, %eax
je 0x29972e
vmovss %xmm2, (%r13,%rcx,4)
incq %rcx
jmp 0x29971e
leaq (%rdi,%rdi,8), %rax
movq 0x50(%rsp), %rsi
movq %rdi, 0x38(%rsp)
imulq %rdi, %rsi
addq 0x180(%rsp), %rsi
movq 0x40(%rsp), %rcx
leaq (%rsi,%rcx), %rbx
movq 0x178(%rsp), %rcx
leaq (%rsi,%rcx), %r8
xorl %r15d, %r15d
cmpl %ebp, %r15d
je 0x299827
xorl %r10d, %r10d
movq %r14, %rdx
movq 0x18(%rsp), %rcx
testl %ecx, %ecx
jle 0x29980a
movzwl (%rsi,%r10), %r12d
vmovd %r12d, %xmm2
vpinsrb $0x2, 0x2(%rsi,%r10), %xmm2, %xmm2
vpinsrb $0x3, (%rbx,%r10), %xmm2, %xmm2
vpinsrb $0x4, 0x1(%rbx,%r10), %xmm2, %xmm2
vpinsrb $0x5, 0x2(%rbx,%r10), %xmm2, %xmm2
vpinsrb $0x6, (%r8,%r10), %xmm2, %xmm2
vpinsrb $0x7, 0x1(%r8,%r10), %xmm2, %xmm2
vpmovsxbw (%r9,%rax), %xmm3
vpmovsxbw %xmm2, %xmm2
vpmaddwd %xmm2, %xmm3, %xmm2
vphaddd %xmm2, %xmm0, %xmm2
movsbl 0x2(%r8,%r10), %r12d
movsbl 0x8(%r9,%rax), %edi
imull %r12d, %edi
vphaddd %xmm2, %xmm2, %xmm2
vphaddd %xmm2, %xmm2, %xmm2
vmovd %xmm2, %r12d
addl %edi, %r12d
vcvtsi2ss %r12d, %xmm4, %xmm2
vfmadd213ss (%r14,%r10,2), %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + mem
vmovss %xmm2, (%r14,%r10,2)
addq $0x4, %rdx
decl %ecx
addq $0x2, %r10
jmp 0x299774
addq %r11, %rsi
addq %r10, %rsi
addq %r11, %rbx
addq %r10, %rbx
addq %r11, %r8
addq %r10, %r8
incl %r15d
movq %rdx, %r14
jmp 0x299760
movq 0x38(%rsp), %rdi
incq %rdi
movq 0xc8(%rsp), %r14
addq %r14, %r13
movq 0x68(%rsp), %r10
jmp 0x2996b8
leaq 0x270(%rsp), %rdi
callq 0x621c2
leaq 0x130(%rsp), %rdi
callq 0x621c2
movq 0x20(%rsp), %r15
jmp 0x299a75
leaq 0x288(%rsp), %rdi
leaq 0x130(%rsp), %rsi
callq 0x290632
movslq 0x9c(%rsp), %r8
movq 0x20(%rsp), %r15
movl 0x2c(%r15), %eax
movl 0x30(%r15), %edx
movl 0x38(%r15), %r9d
movl %r8d, %ecx
subl %eax, %ecx
addl %ecx, %ecx
movq 0x10(%rsp), %rsi
movq 0x28(%rsi), %rdi
movq 0x1b0(%rsi,%r14), %rsi
movq %r8, 0x38(%rsp)
addq %r8, %r8
movq %r8, 0x40(%rsp)
movslq %ecx, %r10
xorl %r11d, %r11d
testl %edx, %edx
cmovlel %r11d, %edx
testl %r9d, %r9d
cmovlel %r11d, %r9d
movq %r9, 0x18(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vpbroadcastd 0x15ed24(%rip), %xmm1 # 0x3f8604
vpbroadcastd 0x1578cb(%rip), %xmm2 # 0x3f11b4
movq %rsi, 0x28(%rsp)
cmpq 0x18(%rsp), %r11
je 0x299a5b
testq %rsi, %rsi
je 0x299906
vmovss (%rsi,%r11,4), %xmm3
jmp 0x29990a
vxorps %xmm3, %xmm3, %xmm3
movq 0x40(%r15), %rbx
imulq %r11, %rbx
imulq 0x10(%r15), %rbx
addq (%r15), %rbx
movq 0x288(%rsp), %rcx
vmovss (%rcx,%r11,8), %xmm4
vmovss 0x4(%rcx,%r11,8), %xmm5
movq 0xb0(%rsp), %r14
imulq %r11, %r14
imulq 0x80(%rsp), %r14
addq 0x70(%rsp), %r14
leaq (%r11,%r11,8), %r15
movq 0x38(%rsp), %rcx
leaq (%r14,%rcx), %r12
movq 0x40(%rsp), %rcx
leaq (%r14,%rcx), %r13
xorl %ebp, %ebp
cmpl %edx, %ebp
je 0x299a49
xorl %r9d, %r9d
movl %eax, %ecx
testl %ecx, %ecx
jle 0x299a30
movzwl (%r14,%r9), %esi
vmovd %esi, %xmm6
vpinsrb $0x2, 0x2(%r14,%r9), %xmm6, %xmm6
vpinsrb $0x3, (%r12,%r9), %xmm6, %xmm6
vpinsrb $0x4, 0x1(%r12,%r9), %xmm6, %xmm6
vpinsrb $0x5, 0x2(%r12,%r9), %xmm6, %xmm6
vpinsrb $0x6, (%r13,%r9), %xmm6, %xmm6
vpinsrb $0x7, 0x1(%r13,%r9), %xmm6, %xmm6
vpmovsxbw (%rdi,%r15), %xmm7
vpmovsxbw %xmm6, %xmm6
vpmaddwd %xmm6, %xmm7, %xmm6
vphaddd %xmm6, %xmm0, %xmm6
movsbl 0x2(%r13,%r9), %esi
movsbl 0x8(%rdi,%r15), %r8d
imull %esi, %r8d
vphaddd %xmm6, %xmm6, %xmm6
vphaddd %xmm6, %xmm6, %xmm6
vmovd %xmm6, %esi
addl %r8d, %esi
vcvtsi2ss %esi, %xmm8, %xmm6
vfmadd213ss %xmm3, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm6) + xmm3
vmulss %xmm5, %xmm6, %xmm6
vmovdqa %xmm1, %xmm7
vpternlogd $0xf8, %xmm2, %xmm6, %xmm7
vaddss %xmm7, %xmm6, %xmm6
vroundss $0xb, %xmm6, %xmm6, %xmm6
vcvttss2si %xmm6, %esi
cmpl $-0x7e, %esi
pushq $-0x7f
popq %r8
cmovll %r8d, %esi
cmpl $0x7f, %esi
pushq $0x7f
popq %r8
cmovgel %r8d, %esi
movb %sil, (%rbx)
incq %rbx
decl %ecx
addq $0x2, %r9
jmp 0x29996e
addq %r10, %r14
addq %r9, %r14
addq %r10, %r12
addq %r9, %r12
addq %r10, %r13
addq %r9, %r13
incl %ebp
jmp 0x299961
incq %r11
movq 0x20(%rsp), %r15
movq 0x28(%rsp), %rsi
jmp 0x2998ee
leaq 0x288(%rsp), %rdi
callq 0x621c2
leaq 0x130(%rsp), %rdi
callq 0x621c2
movq 0x10(%rsp), %rax
movq 0x8(%rax), %rdi
testq %rdi, %rdi
je 0x298fc6
movq (%rdi), %rax
movq %r15, %rsi
movq 0x30(%rsp), %rdx
callq *0x48(%rax)
jmp 0x298fc6
jmp 0x299aa8
jmp 0x299aa8
jmp 0x299aa8
jmp 0x299aa8
jmp 0x299aa8
jmp 0x299aa8
jmp 0x299aa8
movq %rax, %rbx
leaq 0x130(%rsp), %rdi
callq 0x621c2
jmp 0x299c2a
jmp 0x299b43
jmp 0x299b43
jmp 0x299ca4
jmp 0x299ca4
movq %rax, %rbx
jmp 0x299bf3
jmp 0x299ca4
jmp 0x299b43
movq %rax, %rbx
jmp 0x299bbc
jmp 0x299ca4
jmp 0x299ca4
jmp 0x299b43
jmp 0x299ca4
jmp 0x299ca4
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x299c5b
lock
decl (%rax)
jne 0x299c5b
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x299c4b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x299c5b
jmp 0x299ca4
movq %rax, %rbx
jmp 0x299c5b
movq %rax, %rbx
jmp 0x299c2a
movq %rax, %rbx
movq 0x230(%rsp), %rax
testq %rax, %rax
je 0x299b85
lock
decl (%rax)
jne 0x299b85
movq 0x228(%rsp), %rsi
movq 0x248(%rsp), %rdi
testq %rdi, %rdi
jne 0x299b7f
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x299b85
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x198(%rsp), %rax
testq %rax, %rax
je 0x299bbc
lock
decl (%rax)
jne 0x299bbc
movq 0x190(%rsp), %rsi
movq 0x1b0(%rsp), %rdi
testq %rdi, %rdi
jne 0x299bb6
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x299bbc
movq (%rdi), %rax
callq *0x18(%rax)
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x299bf3
lock
decl (%rax)
jne 0x299bf3
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
jne 0x299bed
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x299bf3
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x138(%rsp), %rax
testq %rax, %rax
je 0x299c2a
lock
decl (%rax)
jne 0x299c2a
movq 0x130(%rsp), %rsi
movq 0x150(%rsp), %rdi
testq %rdi, %rdi
jne 0x299c24
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x299c2a
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x299c5b
lock
decl (%rax)
jne 0x299c5b
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x299c55
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x299c5b
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1e8(%rsp), %rax
testq %rax, %rax
je 0x299c92
lock
decl (%rax)
jne 0x299c92
movq 0x1e0(%rsp), %rsi
movq 0x200(%rsp), %rdi
testq %rdi, %rdi
jne 0x299c8c
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x299c92
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x299ca4
jmp 0x299ca4
jmp 0x299ca4
jmp 0x299ca4
jmp 0x299ca4
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx512.cpp |
virtual thunk to ncnn::ConvolutionDepthWise_x86_avx512::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int ConvolutionDepthWise_x86_avx512::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
#if NCNN_INT8
if (opt.use_int8_inference && int8_scale_term)
{
return forward_int8_x86(bottom_blob, top_blob, opt);
}
#endif
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
Mat bottom_blob_bordered;
make_padding(bottom_blob, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
int outw = (w - kernel_extent_w) / stride_w + 1;
int outh = (h - kernel_extent_h) / stride_h + 1;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
size_t out_elemsize = elemsize / elempack * out_elempack;
top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
// depth-wise
if (channels * elempack == group && group == num_output)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 16;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m512 _sum = _mm512_set1_ps(0.f);
if (bias_term)
{
_sum = _mm512_loadu_ps(((const float*)bias_data) + g * 16);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 16;
for (int k = 0; k < maxk; k++)
{
__m512 _val = _mm512_loadu_ps(sptr + space_ofs[k] * 16);
__m512 _w = _mm512_loadu_ps(kptr + k * 16);
_sum = _mm512_fmadd_ps(_val, _w, _sum);
}
_mm512_storeu_ps(outptr, _sum);
outptr += 16;
}
}
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 8;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_set1_ps(0.f);
if (bias_term)
{
_sum = _mm256_loadu_ps(((const float*)bias_data) + g * 8);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 8;
for (int k = 0; k < maxk; k++)
{
__m256 _val = _mm256_loadu_ps(sptr + space_ofs[k] * 8);
__m256 _w = _mm256_loadu_ps(kptr + k * 8);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
}
_mm256_storeu_ps(outptr + j * 8, _sum);
}
outptr += outw * 8;
}
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
#endif // __AVX__
if (elempack == 4)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 4;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_set1_ps(0.f);
if (bias_term)
{
_sum = _mm_loadu_ps(((const float*)bias_data) + g * 4);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 4;
for (int k = 0; k < maxk; k++)
{
__m128 _val = _mm_loadu_ps(sptr + space_ofs[k] * 4);
__m128 _w = _mm_loadu_ps(kptr + k * 4);
_sum = _mm_add_ps(_mm_mul_ps(_val, _w), _sum);
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr + j * 4, _sum);
}
outptr += outw * 4;
}
}
return 0;
}
}
#endif // __SSE2__
if (elempack == 1)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
}
// group convolution
const int channels_g = channels * elempack / group;
const int num_output_g = num_output / group;
int g_elempack = 1;
int out_g_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
g_elempack = channels_g % 16 == 0 ? 16 : channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 16 == 0 ? 16 : num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#elif __AVX__
g_elempack = channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#else
g_elempack = channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
// unpacking
Mat bottom_blob_bordered_unpacked = bottom_blob_bordered;
if (elempack > g_elempack)
{
Option opt_p = opt;
opt_p.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob_bordered, bottom_blob_bordered_unpacked, g_elempack, opt_p);
}
Mat top_blob_unpacked = top_blob;
if (out_g_elempack < out_elempack)
{
top_blob_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
if (top_blob_unpacked.empty())
return -100;
}
for (int g = 0; g < group; g++)
{
const Mat bottom_blob_bordered_g = bottom_blob_bordered_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
Mat top_blob_g = top_blob_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
const ncnn::Layer* op = group_ops[g];
Option opt_g = opt;
opt_g.blob_allocator = top_blob_unpacked.allocator;
// forward
op->forward(bottom_blob_bordered_g, top_blob_g, opt_g);
}
// packing
if (out_g_elempack < out_elempack)
{
convert_packing(top_blob_unpacked, top_blob, out_elempack, opt);
}
else
{
top_blob = top_blob_unpacked;
}
return 0;
} | movq (%rdi), %rax
addq -0x48(%rax), %rdi
jmp 0x2920d6
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx512.cpp |
ncnn::ConvolutionDepthWise_x86_avx512::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int ConvolutionDepthWise_x86_avx512::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& bottom_blob = bottom_blobs[0];
const Mat& _weight_data = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int _kernel_w = _weight_data.w;
const int _kernel_h = _weight_data.h;
const int _num_output = _weight_data.c * _weight_data.elempack;
Mat weight_data_flattened;
flatten(_weight_data, weight_data_flattened, opt);
if (weight_data_flattened.empty())
return -100;
// weight_data_flattened as pack1
weight_data_flattened.w *= weight_data_flattened.elempack;
weight_data_flattened.elemsize /= weight_data_flattened.elempack;
weight_data_flattened.elempack = 1;
Mat bias_data_flattened;
if (bias_term)
{
const Mat& _bias_data = bottom_blobs[2];
flatten(_bias_data, bias_data_flattened, opt);
if (bias_data_flattened.empty())
return -100;
// bias_data_flattened as pack1
bias_data_flattened.w *= bias_data_flattened.elempack;
bias_data_flattened.elemsize /= bias_data_flattened.elempack;
bias_data_flattened.elempack = 1;
}
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::ConvolutionDepthWise);
ncnn::ParamDict pd;
pd.set(0, _num_output);
pd.set(1, _kernel_w);
pd.set(11, _kernel_h);
pd.set(2, dilation_w);
pd.set(12, dilation_h);
pd.set(3, stride_w);
pd.set(13, stride_h);
pd.set(4, pad_left);
pd.set(15, pad_right);
pd.set(14, pad_top);
pd.set(16, pad_bottom);
pd.set(18, pad_value);
pd.set(5, bias_term);
pd.set(6, weight_data_flattened.w);
pd.set(7, group);
pd.set(8, int8_scale_term);
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
ncnn::Mat weights[2];
weights[0] = weight_data_flattened;
weights[1] = bias_data_flattened;
op->load_model(ncnn::ModelBinFromMatArray(weights));
op->create_pipeline(opt);
op->forward(bottom_blob, top_blob, opt);
op->destroy_pipeline(opt);
delete op;
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x168, %rsp # imm = 0x168
movq %rsi, %rbp
movq %rdi, %r13
movq (%rsi), %r14
leaq 0x48(%r14), %rdi
movq (%rdx), %rax
movq %rax, 0xb8(%rsp)
movl 0x60(%r14), %ebx
movl 0x74(%r14), %eax
movl %eax, 0x1c(%rsp)
movl 0x78(%r14), %eax
movl %eax, 0x18(%rsp)
imull 0x80(%r14), %ebx
leaq 0x70(%rsp), %rsi
andq $0x0, 0x40(%rsi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rsi)
vmovups %xmm0, 0xc(%rsi)
vmovaps %xmm0, 0x20(%rsi)
vmovups %xmm0, 0x2c(%rsi)
movq %rcx, %r15
movq %rcx, %rdx
callq 0x64ee7
pushq $-0x64
popq %r12
cmpq $0x0, 0x70(%rsp)
je 0x29a2d9
movslq 0xa8(%rsp), %rax
imulq 0xb0(%rsp), %rax
testq %rax, %rax
je 0x29a2d9
movslq 0x88(%rsp), %rcx
movl 0x9c(%rsp), %eax
imull %ecx, %eax
movl %eax, 0x9c(%rsp)
movq 0x80(%rsp), %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x80(%rsp)
movl $0x1, 0x88(%rsp)
andq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
vmovaps %xmm0, 0x40(%rsp)
vmovups %xmm0, 0x4c(%rsp)
movq (%r13), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x100(%r13,%rax)
je 0x299e22
movl $0x90, %edi
addq (%rbp), %rdi
leaq 0x20(%rsp), %rsi
movq %r15, %rdx
callq 0x64ee7
pushq $-0x64
popq %r12
cmpq $0x0, 0x20(%rsp)
je 0x29a2ab
movslq 0x58(%rsp), %rax
imulq 0x60(%rsp), %rax
testq %rax, %rax
je 0x29a2ab
movslq 0x38(%rsp), %rcx
movl 0x4c(%rsp), %eax
imull %ecx, %eax
movl %eax, 0x4c(%rsp)
movq 0x30(%rsp), %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x30(%rsp)
movl $0x1, 0x38(%rsp)
pushq $0x2a
popq %rdi
callq 0x782bf
movq %rax, %r12
leaq 0x8(%rsp), %rdi
callq 0x71548
leaq 0x8(%rsp), %rdi
xorl %esi, %esi
movl %ebx, %edx
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0x1
popq %rsi
movl 0x1c(%rsp), %edx
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0xb
popq %rsi
movl 0x18(%rsp), %edx
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xdc(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x2
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xe0(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xc
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xe4(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x3
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xd
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xec(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x4
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xf0(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xf
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xf4(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xe
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xf8(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x10
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
vmovss 0xfc(%r13,%rax), %xmm0
leaq 0x8(%rsp), %rdi
pushq $0x12
popq %rsi
callq 0x71952
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x100(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x5
popq %rsi
callq 0x7193a
movl 0x9c(%rsp), %edx
leaq 0x8(%rsp), %rdi
pushq $0x6
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x108(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x7
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x10c(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x8
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x110(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x9
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
leaq (%rax,%r13), %rdx
addq $0x118, %rdx # imm = 0x118
leaq 0x8(%rsp), %rdi
pushq $0xa
popq %rsi
callq 0x7196c
movq (%r12), %rax
leaq 0x8(%rsp), %rsi
movq %r12, %rdi
callq *0x10(%rax)
andq $0x0, 0x110(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0xd0(%rsp)
vmovups %xmm0, 0xdc(%rsp)
vmovaps %xmm0, 0xf0(%rsp)
vmovups %xmm0, 0xfc(%rsp)
andq $0x0, 0x158(%rsp)
vmovups %xmm0, 0x118(%rsp)
vmovups %xmm0, 0x124(%rsp)
vmovups %xmm0, 0x138(%rsp)
vmovups %xmm0, 0x144(%rsp)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x29a090
lock
incl (%rax)
movq 0xd8(%rsp), %rax
testq %rax, %rax
je 0x29a0c7
lock
decl (%rax)
jne 0x29a0c7
movq 0xd0(%rsp), %rsi
movq 0xf0(%rsp), %rdi
testq %rdi, %rdi
je 0x29a0bf
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29a0c7
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x70(%rsp), %xmm0
vmovaps %xmm0, 0xd0(%rsp)
movq 0x80(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl 0x88(%rsp), %eax
movl %eax, 0xe8(%rsp)
movq 0x90(%rsp), %rax
movq %rax, 0xf0(%rsp)
vmovups 0x98(%rsp), %xmm0
vmovups %xmm0, 0xf8(%rsp)
movl 0xa8(%rsp), %eax
movl %eax, 0x108(%rsp)
movq 0xb0(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x29a141
lock
incl (%rax)
movq 0x120(%rsp), %rax
testq %rax, %rax
je 0x29a178
lock
decl (%rax)
jne 0x29a178
movq 0x118(%rsp), %rsi
movq 0x138(%rsp), %rdi
testq %rdi, %rdi
je 0x29a170
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29a178
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x20(%rsp), %xmm0
leaq 0xd0(%rsp), %rsi
vmovups %xmm0, 0x48(%rsi)
movq 0x30(%rsp), %rax
movq %rax, 0x58(%rsi)
movl 0x38(%rsp), %eax
movl %eax, 0x60(%rsi)
movq 0x40(%rsp), %rax
movq %rax, 0x68(%rsi)
vmovups 0x48(%rsp), %xmm0
vmovaps %xmm0, 0x70(%rsi)
movl 0x58(%rsp), %eax
movl %eax, 0x80(%rsi)
movq 0x60(%rsp), %rax
movq %rax, 0x88(%rsi)
leaq 0xc0(%rsp), %rdi
callq 0x6b00e
movq (%r12), %rax
leaq 0xc0(%rsp), %rsi
movq %r12, %rdi
callq *0x18(%rax)
leaq 0xc0(%rsp), %rdi
callq 0x6b03a
movq (%r12), %rax
movq %r12, %rdi
movq %r15, %rsi
callq *0x20(%rax)
movq (%r12), %rax
movq %r12, %rdi
movq %r14, %rsi
movq 0xb8(%rsp), %rdx
movq %r15, %rcx
callq *0x38(%rax)
movq (%r12), %rax
movq %r12, %rdi
movq %r15, %rsi
callq *0x28(%rax)
movq (%r12), %rax
movq %r12, %rdi
callq *0x8(%rax)
pushq $0x48
popq %rbx
vxorps %xmm0, %xmm0, %xmm0
movq 0xd8(%rsp,%rbx), %rax
testq %rax, %rax
je 0x29a273
lock
decl (%rax)
jne 0x29a273
movq 0xd0(%rsp,%rbx), %rsi
movq 0xf0(%rsp,%rbx), %rdi
testq %rdi, %rdi
je 0x29a267
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x29a273
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%rbx), %rax
addq $0xd0, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %rbx
cmpq $-0x48, %rbx
jne 0x29a234
leaq 0x8(%rsp), %rdi
callq 0x71614
xorl %r12d, %r12d
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x29a2d9
lock
decl (%rax)
jne 0x29a2d9
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x29a2d1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29a2d9
movq %rsi, %rdi
callq 0x5f3e0
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x29a30a
lock
decl (%rax)
jne 0x29a30a
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x29a302
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29a30a
movq %rsi, %rdi
callq 0x5f3e0
movl %r12d, %eax
addq $0x168, %rsp # imm = 0x168
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x29a43f
jmp 0x29a43f
movq %rax, %rbx
leaq 0xc0(%rsp), %rdi
callq 0x6b03a
jmp 0x29a34c
jmp 0x29a349
jmp 0x29a341
jmp 0x29a341
movq %rax, %rbx
jmp 0x29a3d6
movq %rax, %rbx
pushq $0x48
popq %r14
vxorps %xmm0, %xmm0, %xmm0
movq 0xd8(%rsp,%r14), %rax
testq %rax, %rax
je 0x29a393
lock
decl (%rax)
jne 0x29a393
movq 0xd0(%rsp,%r14), %rsi
movq 0xf0(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x29a387
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x29a393
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0xd0, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x29a354
jmp 0x29a3cc
jmp 0x29a43f
movq %rax, %rbx
jmp 0x29a404
jmp 0x29a43f
movq %rax, %rbx
leaq 0x8(%rsp), %rdi
callq 0x71614
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x29a404
lock
decl (%rax)
jne 0x29a404
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x29a3fe
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x29a404
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x29a435
lock
decl (%rax)
jne 0x29a435
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x29a42f
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x29a435
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x29a43f
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx512.cpp |
virtual thunk to ncnn::ConvolutionDepthWise_x86_avx512::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int ConvolutionDepthWise_x86_avx512::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& bottom_blob = bottom_blobs[0];
const Mat& _weight_data = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int _kernel_w = _weight_data.w;
const int _kernel_h = _weight_data.h;
const int _num_output = _weight_data.c * _weight_data.elempack;
Mat weight_data_flattened;
flatten(_weight_data, weight_data_flattened, opt);
if (weight_data_flattened.empty())
return -100;
// weight_data_flattened as pack1
weight_data_flattened.w *= weight_data_flattened.elempack;
weight_data_flattened.elemsize /= weight_data_flattened.elempack;
weight_data_flattened.elempack = 1;
Mat bias_data_flattened;
if (bias_term)
{
const Mat& _bias_data = bottom_blobs[2];
flatten(_bias_data, bias_data_flattened, opt);
if (bias_data_flattened.empty())
return -100;
// bias_data_flattened as pack1
bias_data_flattened.w *= bias_data_flattened.elempack;
bias_data_flattened.elemsize /= bias_data_flattened.elempack;
bias_data_flattened.elempack = 1;
}
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::ConvolutionDepthWise);
ncnn::ParamDict pd;
pd.set(0, _num_output);
pd.set(1, _kernel_w);
pd.set(11, _kernel_h);
pd.set(2, dilation_w);
pd.set(12, dilation_h);
pd.set(3, stride_w);
pd.set(13, stride_h);
pd.set(4, pad_left);
pd.set(15, pad_right);
pd.set(14, pad_top);
pd.set(16, pad_bottom);
pd.set(18, pad_value);
pd.set(5, bias_term);
pd.set(6, weight_data_flattened.w);
pd.set(7, group);
pd.set(8, int8_scale_term);
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
ncnn::Mat weights[2];
weights[0] = weight_data_flattened;
weights[1] = bias_data_flattened;
op->load_model(ncnn::ModelBinFromMatArray(weights));
op->create_pipeline(opt);
op->forward(bottom_blob, top_blob, opt);
op->destroy_pipeline(opt);
delete op;
return 0;
} | movq (%rdi), %rax
addq -0x40(%rax), %rdi
jmp 0x299cb8
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx512.cpp |
ncnn::ConvolutionDepthWise_x86_fma::create_pipeline(ncnn::Option const&) | int ConvolutionDepthWise_x86_fma::create_pipeline(const Option& opt)
{
if (dynamic_weight)
return 0;
activation = create_activation_layer(activation_type, activation_params, opt);
#if NCNN_INT8
if (opt.use_int8_inference && weight_data.elemsize == (size_t)1u)
{
return create_pipeline_int8_x86(opt);
}
#endif
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
// depth-wise
if (channels == group && group == num_output)
{
int elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = channels % 16 == 0 ? 16 : channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#else
elempack = channels % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
#if __SSE2__
#if __AVX__
// pack16
#if __AVX512F__
if (elempack == 16)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 16, opt);
}
#endif // __AVX512F__
// pack8
if (elempack == 8)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 8, opt);
}
#endif // __AVX__
// pack4
if (elempack == 4)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 4, opt);
}
#endif // __SSE2__
if (elempack == 1)
{
// depth-wise specific
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
weight_data_tm = weight_data;
}
else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
weight_data_tm = weight_data;
}
else
{
create_group_ops(opt);
}
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
// group convolution
create_group_ops(opt);
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x50, %rsp
movq (%rdi), %rax
movq -0x18(%rax), %r13
cmpl $0x0, 0x160(%rdi,%r13)
je 0x29a51d
xorl %eax, %eax
addq $0x50, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
movq %rsi, %r14
movq %rdi, %rbx
movl 0x110(%rdi,%r13), %ecx
decl %ecx
cmpl $0x5, %ecx
ja 0x29a874
leaq 0x15e0e3(%rip), %rax # 0x3f8620
movslq (%rax,%rcx,4), %rcx
addq %rax, %rcx
jmpq *%rcx
pushq $0x1a
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r12, %rsi
callq *0x10(%rax)
jmp 0x29a6b0
pushq $0x47
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r12, %rsi
callq *0x10(%rax)
jmp 0x29a6b0
pushq $0x36
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq 0x118(%rbx,%r13), %rax
vmovss (%rax), %xmm0
movq %r12, %rdi
xorl %esi, %esi
callq 0x71952
movq 0x118(%rbx,%r13), %rax
vmovss 0x4(%rax), %xmm0
leaq 0x8(%rsp), %rdi
pushq $0x1
popq %rsi
callq 0x71952
movq (%r15), %rax
leaq 0x8(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
jmp 0x29a6b0
pushq $0x1e
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r12, %rsi
callq *0x10(%rax)
jmp 0x29a6b0
pushq $0x1a
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq 0x118(%rbx,%r13), %rax
vmovss (%rax), %xmm0
movq %r12, %rdi
xorl %esi, %esi
callq 0x71952
movq (%r15), %rax
leaq 0x8(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
jmp 0x29a6b0
pushq $0x43
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq 0x118(%rbx,%r13), %rax
vmovss (%rax), %xmm0
movq %r12, %rdi
xorl %esi, %esi
callq 0x71952
movq 0x118(%rbx,%r13), %rax
vmovss 0x4(%rax), %xmm0
leaq 0x8(%rsp), %rdi
pushq $0x1
popq %rsi
callq 0x71952
movq (%r15), %rax
leaq 0x8(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
leaq 0x8(%rsp), %rdi
callq 0x71614
movq (%r15), %rax
movq %r15, %rdi
movq %r14, %rsi
callq *0x20(%rax)
movq (%rbx), %rax
movq %r15, 0x8(%rbx)
movq -0x18(%rax), %r15
cmpb $0x1, 0x1e(%r14)
jne 0x29a6f3
cmpq $0x1, 0x178(%rbx,%r15)
jne 0x29a6f3
movq %rbx, %rdi
movq %r14, %rsi
callq 0x29aa72
jmp 0x29a50d
movl 0xd0(%rbx,%r15), %esi
movl 0xd4(%rbx,%r15), %r10d
movl 0xd8(%rbx,%r15), %r11d
movl %r11d, %r8d
imull %r10d, %r8d
movl 0x104(%rbx,%r15), %eax
movl 0x108(%rbx,%r15), %ecx
cltd
idivl %ecx
cltd
idivl %r8d
movl %eax, %edi
movl %esi, %eax
cltd
idivl %ecx
movl %eax, %r9d
movl %edi, %eax
cltd
idivl %r9d
cmpl %esi, %ecx
jne 0x29a812
imull %ecx, %eax
cmpl %ecx, %eax
jne 0x29a812
leaq (%rbx,%r15), %rsi
cmpb $0x1, 0x27(%r14)
jne 0x29a769
testb $0x7, %cl
je 0x29a87c
testb $0x3, %cl
je 0x29a8dd
xorl $0x3, %r10d
xorl $0x3, %r11d
orl %r10d, %r11d
jne 0x29a812
cmpl $0x1, 0xdc(%rsi)
jne 0x29a79e
cmpl $0x1, 0xe0(%rsi)
jne 0x29a79e
cmpl $0x1, 0xe4(%rsi)
jne 0x29a79e
cmpl $0x1, 0xe8(%rsi)
je 0x29a7c2
cmpl $0x1, 0xdc(%rsi)
jne 0x29a812
cmpl $0x1, 0xe0(%rsi)
jne 0x29a812
cmpl $0x2, 0xe4(%rsi)
jne 0x29a812
cmpl $0x2, 0xe8(%rsi)
jne 0x29a812
addq $0x168, %r15 # imm = 0x168
cmpq $0x28, %r15
je 0x29a81d
movq 0x8(%rbx,%r15), %rax
testq %rax, %rax
je 0x29a7dc
lock
incl (%rax)
leaq 0x28(%rbx), %r12
movq 0x30(%rbx), %rax
testq %rax, %rax
je 0x29a982
lock
decl (%rax)
jne 0x29a982
movq 0x28(%rbx), %rsi
movq 0x48(%rbx), %rdi
testq %rdi, %rdi
je 0x29a97a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29a982
movq %rbx, %rdi
movq %r14, %rsi
callq 0x29acde
cmpb $0x1, (%r14)
jne 0x29a50d
movq (%rbx), %rax
movq -0x18(%rax), %rax
leaq (%rbx,%rax), %r14
addq %rax, %rbx
addq $0x168, %rbx # imm = 0x168
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x29a942
lock
decl (%rax)
jne 0x29a942
movq 0x168(%r14), %rsi
movq 0x188(%r14), %rdi
testq %rdi, %rdi
je 0x29a93a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29a942
xorl %r15d, %r15d
jmp 0x29a6c9
addq $0x168, %rsi # imm = 0x168
leaq 0x8(%rsp), %r15
movq %r15, %rdi
movl %r8d, %edx
xorl %r8d, %r8d
callq 0x62e4e
leaq 0x28(%rbx), %rsi
pushq $0x8
popq %rdx
movq %r15, %rdi
movq %r14, %rcx
callq 0x64e3b
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x29a81d
lock
decl (%rax)
jne 0x29a81d
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x29a96d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29a81d
addq $0x168, %rsi # imm = 0x168
leaq 0x8(%rsp), %r15
movq %r15, %rdi
movl %r8d, %edx
xorl %r8d, %r8d
callq 0x62e4e
leaq 0x28(%rbx), %rsi
pushq $0x4
popq %rdx
movq %r15, %rdi
movq %r14, %rcx
callq 0x64e3b
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x29a81d
lock
decl (%rax)
jne 0x29a81d
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x29a96d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29a81d
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1a8(%r14)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rbx)
vmovups %xmm0, (%rbx)
vmovups %xmm0, 0x190(%r14)
andl $0x0, 0x1a0(%r14)
jmp 0x29a50d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x29a81d
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x68(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%r12)
vmovups %xmm0, (%r12)
andl $0x0, 0x60(%rbx)
vmovups %xmm0, 0x50(%rbx)
vmovups (%rbx,%r15), %xmm0
vmovups %xmm0, 0x28(%rbx)
movq 0x10(%rbx,%r15), %rax
movq %rax, 0x38(%rbx)
movl 0x18(%rbx,%r15), %eax
movl %eax, 0x40(%rbx)
movq 0x20(%rbx,%r15), %rax
movq %rax, 0x48(%rbx)
vmovups 0x28(%rbx,%r15), %xmm0
vmovups %xmm0, 0x50(%rbx)
movl 0x38(%rbx,%r15), %eax
movl %eax, 0x60(%rbx)
movq 0x40(%rbx,%r15), %rax
movq %rax, 0x68(%rbx)
jmp 0x29a81d
jmp 0x29aa4a
jmp 0x29aa4a
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x29aa69
lock
decl (%rax)
jne 0x29aa69
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x29aa38
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29aa69
jmp 0x29aa4a
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x29aa69
lock
decl (%rax)
jne 0x29aa69
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
jne 0x29aa42
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x29aa69
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29aa69
movq %rax, %rdi
callq 0x61d68
jmp 0x29aa5c
jmp 0x29aa5c
jmp 0x29aa5c
jmp 0x29aa5c
jmp 0x29aa5c
movq %rax, %rbx
leaq 0x8(%rsp), %rdi
callq 0x71614
movq %rbx, %rdi
callq 0x5f340
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_fma.cpp |
ncnn::ConvolutionDepthWise_x86_fma::create_group_ops(ncnn::Option const&) | int ConvolutionDepthWise_x86_fma::create_group_ops(const Option& opt)
{
// create Convolution op for each group
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
for (int i = 0; i < (int)group_ops.size(); i++)
delete group_ops[i];
group_ops.clear();
const int channels_g = channels / group;
const int num_output_g = num_output / group;
group_ops.resize(group);
for (int g = 0; g < group; g++)
{
Mat weight_data_g = weight_data.range(maxk * channels_g * num_output_g * g, maxk * channels_g * num_output_g).clone();
Mat bias_data_g;
if (bias_term)
bias_data_g = bias_data.range(num_output_g * g, num_output_g);
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution);
// set param
ncnn::ParamDict pd;
pd.set(0, num_output_g); // num_output
pd.set(1, kernel_w);
pd.set(11, kernel_h);
pd.set(2, dilation_w);
pd.set(12, dilation_h);
pd.set(3, stride_w);
pd.set(13, stride_h);
pd.set(4, 0); // pad_w
pd.set(14, 0); // pad_h
pd.set(5, bias_term);
pd.set(6, maxk * channels_g * num_output_g); // weight_data_size
pd.set(8, int8_scale_term);
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
// set weights
if (bias_term)
{
ncnn::Mat weights[5];
weights[0] = weight_data_g;
weights[1] = bias_data_g;
#if NCNN_INT8
if (int8_scale_term)
{
Mat weight_data_int8_scales_g(num_output_g);
weight_data_int8_scales_g.fill(weight_data_int8_scales[g]);
weights[2] = weight_data_int8_scales_g;
weights[3] = bottom_blob_int8_scales.range(g, 1);
}
if (int8_scale_term > 100)
{
weights[4] = top_blob_int8_scales.range(g, 1);
}
#endif
op->load_model(ModelBinFromMatArray(weights));
}
else
{
ncnn::Mat weights[4];
weights[0] = weight_data_g;
#if NCNN_INT8
if (int8_scale_term)
{
Mat weight_data_int8_scales_g(num_output_g);
weight_data_int8_scales_g.fill(weight_data_int8_scales[g]);
weights[1] = weight_data_int8_scales_g;
weights[2] = bottom_blob_int8_scales.range(g, 1);
}
if (int8_scale_term > 100)
{
weights[3] = top_blob_int8_scales.range(g, 1);
}
#endif
op->load_model(ModelBinFromMatArray(weights));
}
op->create_pipeline(opt);
group_ops[g] = op;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x278, %rsp # imm = 0x278
movq %rsi, 0x250(%rsp)
movq %rdi, %r14
movq (%rdi), %rax
movq -0x18(%rax), %rdx
movl 0xd0(%rdi,%rdx), %ecx
movl 0xd8(%rdi,%rdx), %ebp
imull 0xd4(%rdi,%rdx), %ebp
movl 0x104(%rdi,%rdx), %eax
movl 0x108(%rdi,%rdx), %r15d
cltd
idivl %r15d
cltd
idivl %ebp
movl %eax, %esi
movl %ecx, %eax
cltd
idivl %r15d
movl %eax, %ecx
movl %esi, %eax
cltd
idivl %ecx
movl %eax, %ebx
leaq 0x10(%rdi), %rax
movq %rax, 0x1e0(%rsp)
xorl %r12d, %r12d
movq 0x10(%r14), %rax
movq 0x18(%r14), %rcx
movq %rcx, %rdx
subq %rax, %rdx
shrq $0x3, %rdx
movslq %edx, %rdx
cmpq %rdx, %r12
jge 0x29ad7b
movq (%rax,%r12,8), %rdi
testq %rdi, %rdi
je 0x29ad76
movq (%rdi), %rax
callq *0x8(%rax)
incq %r12
jmp 0x29ad4d
imull %r15d, %ebx
cmpq %rax, %rcx
je 0x29ad88
movq %rax, 0x18(%r14)
movq (%r14), %rax
movq -0x18(%rax), %rcx
movslq 0x108(%r14,%rcx), %rsi
movl %ebx, %eax
cltd
idivl %esi
movl %eax, %ebx
movl 0xd0(%r14,%rcx), %eax
cltd
idivl %esi
movl %eax, %r15d
movq 0x1e0(%rsp), %rdi
callq 0x6fbc2
leaq 0x118(%r14), %rax
movq %rax, 0x258(%rsp)
imull %ebp, %ebx
imull %r15d, %ebx
movl %ebx, 0x6c(%rsp)
movslq %ebx, %rax
movq %rax, 0x260(%rsp)
movslq %r15d, %rax
movq %rax, 0x248(%rsp)
pushq $0x1
popq %rbx
xorl %edx, %edx
movl %r15d, 0x14(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
movslq 0x108(%r14,%rax), %rcx
cmpq %rcx, %rdx
jge 0x29baf2
movq %rdx, %rcx
movq %rdx, 0x8(%rsp)
movq 0x260(%rsp), %rdi
imulq %rdi, %rcx
movq 0x178(%r14,%rax), %rdx
imulq %rdx, %rcx
addq 0x168(%r14,%rax), %rcx
movl 0x180(%r14,%rax), %esi
movq 0x188(%r14,%rax), %rax
movq %rcx, 0x70(%rsp)
andq $0x0, 0x78(%rsp)
movq %rdx, 0x80(%rsp)
movl %esi, 0x88(%rsp)
movq %rax, 0x90(%rsp)
movl %ebx, 0x98(%rsp)
movl %edi, 0x9c(%rsp)
movabsq $0x100000001, %rax # imm = 0x100000001
movq %rax, 0xa0(%rsp)
movl %ebx, 0xa8(%rsp)
movq %rdi, 0xb0(%rsp)
leaq 0x200(%rsp), %rdi
leaq 0x70(%rsp), %rsi
xorl %edx, %edx
callq 0x624f0
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x29aed9
lock
decl (%rax)
jne 0x29aed9
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x29aed1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29aed9
movq %rsi, %rdi
callq 0x5f3e0
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x100(%r14,%rax)
je 0x29af49
movq 0x8(%rsp), %r15
movq 0x248(%rsp), %rcx
imulq %rcx, %r15
movq 0x1c0(%r14,%rax), %rsi
movq %rsi, 0x1f8(%rsp)
imulq %rsi, %r15
addq 0x1b0(%r14,%rax), %r15
movl 0x1c8(%r14,%rax), %edx
movl %edx, 0x1c(%rsp)
movq 0x1d0(%r14,%rax), %rax
movq %rax, 0x1f0(%rsp)
movl %ebx, %r13d
movl 0x14(%rsp), %eax
movl %eax, 0x18(%rsp)
movq %rcx, 0x1e8(%rsp)
jmp 0x29af83
xorl %r15d, %r15d
movq $0x0, 0x1f8(%rsp)
movl $0x0, 0x1c(%rsp)
movq $0x0, 0x1f0(%rsp)
xorl %r13d, %r13d
movl $0x0, 0x18(%rsp)
movq $0x0, 0x1e8(%rsp)
pushq $0x6
popq %rdi
callq 0x782bf
movq %rax, %rbp
leaq 0x268(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq %r12, %rdi
xorl %esi, %esi
movl 0x14(%rsp), %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd4(%r14,%rax), %edx
movq %r12, %rdi
movl %ebx, %esi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd8(%r14,%rax), %edx
movq %r12, %rdi
pushq $0xb
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xdc(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x2
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe0(%r14,%rax), %edx
movq %r12, %rdi
pushq $0xc
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe4(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x3
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r14,%rax), %edx
movq %r12, %rdi
pushq $0xd
popq %rsi
callq 0x7193a
movq %r12, %rdi
pushq $0x4
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq %r12, %rdi
pushq $0xe
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x100(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x5
popq %rsi
callq 0x7193a
movq %r12, %rdi
pushq $0x6
popq %rsi
movl 0x6c(%rsp), %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x10c(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x8
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x110(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x9
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rdx
addq 0x258(%rsp), %rdx
movq %r12, %rdi
pushq $0xa
popq %rsi
callq 0x7196c
movq (%rbp), %rax
movq %rbp, %rdi
movq %r12, %rsi
callq *0x10(%rax)
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x100(%r14,%rax)
je 0x29b170
pushq $0x40
popq %rax
vxorps %xmm0, %xmm0, %xmm0
andq $0x0, 0x70(%rsp,%rax)
vmovups %xmm0, 0x30(%rsp,%rax)
vmovups %xmm0, 0x3c(%rsp,%rax)
vmovups %xmm0, 0x50(%rsp,%rax)
vmovups %xmm0, 0x5c(%rsp,%rax)
addq $0x48, %rax
cmpq $0x1a8, %rax # imm = 0x1A8
jne 0x29b0fe
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x29b138
lock
incl (%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x29b1f1
lock
decl (%rax)
jne 0x29b1f1
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x29b1e9
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29b1f1
pushq $0x40
popq %rax
vxorps %xmm0, %xmm0, %xmm0
andq $0x0, 0x70(%rsp,%rax)
vmovups %xmm0, 0x30(%rsp,%rax)
vmovups %xmm0, 0x3c(%rsp,%rax)
vmovups %xmm0, 0x50(%rsp,%rax)
vmovups %xmm0, 0x5c(%rsp,%rax)
addq $0x48, %rax
cmpq $0x160, %rax # imm = 0x160
jne 0x29b177
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x29b1b1
lock
incl (%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x29b5a8
lock
decl (%rax)
jne 0x29b5a8
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x29b5a0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29b5a8
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x200(%rsp), %xmm0
vmovaps %xmm0, 0x70(%rsp)
movq 0x210(%rsp), %rax
movq %rax, 0x80(%rsp)
movl 0x218(%rsp), %eax
movl %eax, 0x88(%rsp)
movq 0x220(%rsp), %rax
movq %rax, 0x90(%rsp)
vmovups 0x228(%rsp), %xmm0
vmovups %xmm0, 0x98(%rsp)
movl 0x238(%rsp), %eax
movl %eax, 0xa8(%rsp)
movq 0x240(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0xc0(%rsp), %rax
testq %rax, %rax
je 0x29b295
lock
decl (%rax)
jne 0x29b295
movq 0xb8(%rsp), %rsi
movq 0xd8(%rsp), %rdi
testq %rdi, %rdi
je 0x29b28d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29b295
movq %rsi, %rdi
callq 0x5f3e0
movq %r15, 0xb8(%rsp)
andq $0x0, 0xc0(%rsp)
movq 0x1f8(%rsp), %rax
movq %rax, 0xc8(%rsp)
movl 0x1c(%rsp), %eax
movl %eax, 0xd0(%rsp)
movq 0x1f0(%rsp), %rax
movq %rax, 0xd8(%rsp)
movl %r13d, 0xe0(%rsp)
movl 0x18(%rsp), %eax
movl %eax, 0xe4(%rsp)
movl %r13d, 0xe8(%rsp)
movl %r13d, 0xec(%rsp)
movl %r13d, 0xf0(%rsp)
movq 0x1e8(%rsp), %rax
movq %rax, 0xf8(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x10c(%r14,%rax)
je 0x29b52a
andq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
leaq 0x40(%rsp), %rax
vmovups %xmm0, 0xc(%rax)
vmovaps %xmm0, (%rax)
leaq 0x20(%rsp), %rdi
movl 0x14(%rsp), %esi
pushq $0x4
popq %rdx
xorl %ecx, %ecx
callq 0x635fa
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x1f8(%r14,%rax), %rax
movq 0x8(%rsp), %rcx
vmovss (%rax,%rcx,4), %xmm0
movl 0x58(%rsp), %eax
imull 0x60(%rsp), %eax
movq 0x20(%rsp), %rcx
testl %eax, %eax
movl $0x0, %edx
cmovlel %edx, %eax
xorl %edx, %edx
cmpl %edx, %eax
je 0x29b39a
vmovss %xmm0, (%rcx,%rdx,4)
incq %rdx
jmp 0x29b38c
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x29b3a7
lock
incl (%rax)
movq 0x108(%rsp), %rax
testq %rax, %rax
je 0x29b3de
lock
decl (%rax)
jne 0x29b3de
movq 0x100(%rsp), %rsi
movq 0x120(%rsp), %rdi
testq %rdi, %rdi
je 0x29b3d6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29b3de
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x20(%rsp), %xmm0
vmovaps %xmm0, 0x100(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0x110(%rsp)
movl 0x38(%rsp), %eax
movl %eax, 0x118(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0x120(%rsp)
vmovups 0x48(%rsp), %xmm0
vmovups %xmm0, 0x128(%rsp)
movl 0x58(%rsp), %eax
movl %eax, 0x138(%rsp)
movq 0x60(%rsp), %rax
movq %rax, 0x140(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x250(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x240(%r14,%rax), %r13
movl 0x258(%r14,%rax), %r12d
movq 0x260(%r14,%rax), %rbx
movq 0x150(%rsp), %rax
testq %rax, %rax
je 0x29b4a0
lock
decl (%rax)
jne 0x29b4a0
movq 0x148(%rsp), %rsi
movq 0x168(%rsp), %rdi
testq %rdi, %rdi
je 0x29b498
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29b4a0
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x148(%rsp)
andq $0x0, 0x150(%rsp)
movq %r15, 0x158(%rsp)
movl %r12d, 0x160(%rsp)
movq %rbx, 0x168(%rsp)
vbroadcastss 0x15d12e(%rip), %xmm0 # 0x3f8600
vmovaps %xmm0, 0x170(%rsp)
movl $0x1, 0x180(%rsp)
movq $0x1, 0x188(%rsp)
movq 0x28(%rsp), %rax
testq %rax, %rax
pushq $0x1
popq %rbx
je 0x29b523
lock
decl (%rax)
jne 0x29b523
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x29b51b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29b523
movq %rsi, %rdi
callq 0x5f3e0
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x65, 0x10c(%r14,%rax)
jl 0x29b74a
movq 0x298(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x288(%r14,%rax), %r13
movl 0x2a0(%r14,%rax), %ebx
movq 0x2a8(%r14,%rax), %r12
movq 0x198(%rsp), %rax
testq %rax, %rax
je 0x29b6f6
lock
decl (%rax)
jne 0x29b6f6
movq 0x190(%rsp), %rsi
movq 0x1b0(%rsp), %rdi
testq %rdi, %rdi
je 0x29b6ee
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29b6f6
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x200(%rsp), %xmm0
vmovaps %xmm0, 0x70(%rsp)
movq 0x210(%rsp), %rax
movq %rax, 0x80(%rsp)
movl 0x218(%rsp), %eax
movl %eax, 0x88(%rsp)
movq 0x220(%rsp), %rax
movq %rax, 0x90(%rsp)
vmovups 0x228(%rsp), %xmm0
vmovups %xmm0, 0x98(%rsp)
movl 0x238(%rsp), %eax
movl %eax, 0xa8(%rsp)
movq 0x240(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x10c(%r14,%rax)
je 0x29b92d
andq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
leaq 0x40(%rsp), %rax
vmovups %xmm0, 0xc(%rax)
vmovaps %xmm0, (%rax)
leaq 0x20(%rsp), %rdi
movl 0x14(%rsp), %esi
pushq $0x4
popq %rdx
xorl %ecx, %ecx
callq 0x635fa
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x1f8(%r14,%rax), %rax
movq 0x8(%rsp), %rcx
vmovss (%rax,%rcx,4), %xmm0
movl 0x58(%rsp), %eax
imull 0x60(%rsp), %eax
movq 0x20(%rsp), %rcx
testl %eax, %eax
movl $0x0, %edx
cmovlel %edx, %eax
xorl %edx, %edx
cmpl %edx, %eax
je 0x29b6a3
vmovss %xmm0, (%rcx,%rdx,4)
incq %rdx
jmp 0x29b695
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x29b6b0
lock
incl (%rax)
movq 0xc0(%rsp), %rax
testq %rax, %rax
je 0x29b7e2
lock
decl (%rax)
jne 0x29b7e2
movq 0xb8(%rsp), %rsi
movq 0xd8(%rsp), %rdi
testq %rdi, %rdi
je 0x29b7da
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29b7e2
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x190(%rsp)
andq $0x0, 0x198(%rsp)
movq %r15, 0x1a0(%rsp)
movl %ebx, 0x1a8(%rsp)
movq %r12, 0x1b0(%rsp)
vbroadcastss 0x15ced9(%rip), %xmm0 # 0x3f8600
vmovups %xmm0, 0x1b8(%rsp)
movl $0x1, 0x1c8(%rsp)
movq $0x1, 0x1d0(%rsp)
pushq $0x1
popq %rbx
leaq 0x20(%rsp), %r15
movq %r15, %rdi
leaq 0x70(%rsp), %rsi
callq 0x6b00e
movq (%rbp), %rax
movq %rbp, %rdi
movq %r15, %rsi
callq *0x18(%rax)
movq %r15, %rdi
callq 0x6b03a
movl $0x120, %r15d # imm = 0x120
movq 0x78(%rsp,%r15), %rax
testq %rax, %rax
je 0x29b7a8
lock
decl (%rax)
jne 0x29b7a8
movq 0x70(%rsp,%r15), %rsi
movq 0x90(%rsp,%r15), %rdi
testq %rdi, %rdi
je 0x29b7a0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29b7a8
movq %rsi, %rdi
callq 0x5f3e0
leaq (%rsp,%r15), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r15
cmpq $-0x48, %r15
jne 0x29b777
jmp 0x29ba7b
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x20(%rsp), %xmm0
vmovups %xmm0, 0xb8(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0xc8(%rsp)
movl 0x38(%rsp), %eax
movl %eax, 0xd0(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0xd8(%rsp)
vmovups 0x48(%rsp), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
movl 0x58(%rsp), %eax
movl %eax, 0xf0(%rsp)
movq 0x60(%rsp), %rax
movq %rax, 0xf8(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x250(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x240(%r14,%rax), %r13
movl 0x258(%r14,%rax), %ebx
movq 0x260(%r14,%rax), %r12
movq 0x108(%rsp), %rax
testq %rax, %rax
je 0x29b8a4
lock
decl (%rax)
jne 0x29b8a4
movq 0x100(%rsp), %rsi
movq 0x120(%rsp), %rdi
testq %rdi, %rdi
je 0x29b89c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29b8a4
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x100(%rsp)
andq $0x0, 0x108(%rsp)
movq %r15, 0x110(%rsp)
movl %ebx, 0x118(%rsp)
movq %r12, 0x120(%rsp)
vbroadcastss 0x15cd2b(%rip), %xmm0 # 0x3f8600
vmovups %xmm0, 0x128(%rsp)
movl $0x1, 0x138(%rsp)
movq $0x1, 0x140(%rsp)
movq 0x28(%rsp), %rax
testq %rax, %rax
pushq $0x1
popq %rbx
je 0x29b926
lock
decl (%rax)
jne 0x29b926
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x29b91e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29b926
movq %rsi, %rdi
callq 0x5f3e0
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x65, 0x10c(%r14,%rax)
jl 0x29b9f0
movq 0x298(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x288(%r14,%rax), %r13
movl 0x2a0(%r14,%rax), %ebx
movq 0x2a8(%r14,%rax), %r12
movq 0x150(%rsp), %rax
testq %rax, %rax
je 0x29b99c
lock
decl (%rax)
jne 0x29b99c
movq 0x148(%rsp), %rsi
movq 0x168(%rsp), %rdi
testq %rdi, %rdi
je 0x29b994
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29b99c
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x148(%rsp)
andq $0x0, 0x150(%rsp)
movq %r15, 0x158(%rsp)
movl %ebx, 0x160(%rsp)
movq %r12, 0x168(%rsp)
vbroadcastss 0x15cc33(%rip), %xmm0 # 0x3f8600
vmovaps %xmm0, 0x170(%rsp)
movl $0x1, 0x180(%rsp)
movq $0x1, 0x188(%rsp)
pushq $0x1
popq %rbx
leaq 0x20(%rsp), %r15
movq %r15, %rdi
leaq 0x70(%rsp), %rsi
callq 0x6b00e
movq (%rbp), %rax
movq %rbp, %rdi
movq %r15, %rsi
callq *0x18(%rax)
movq %r15, %rdi
callq 0x6b03a
movl $0xd8, %r15d
movq 0x78(%rsp,%r15), %rax
testq %rax, %rax
je 0x29ba4e
lock
decl (%rax)
jne 0x29ba4e
movq 0x70(%rsp,%r15), %rsi
movq 0x90(%rsp,%r15), %rdi
testq %rdi, %rdi
je 0x29ba46
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29ba4e
movq %rsi, %rdi
callq 0x5f3e0
leaq (%rsp,%r15), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r15
cmpq $-0x48, %r15
jne 0x29ba1d
movq (%rbp), %rax
movq %rbp, %rdi
movq 0x250(%rsp), %rsi
callq *0x20(%rax)
leaq 0x268(%rsp), %rdi
movq 0x1e0(%rsp), %rax
movq (%rax), %rax
movq 0x8(%rsp), %rcx
movq %rbp, (%rax,%rcx,8)
callq 0x71614
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x29bae5
lock
decl (%rax)
jne 0x29bae5
movq 0x200(%rsp), %rsi
movq 0x220(%rsp), %rdi
testq %rdi, %rdi
je 0x29badd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29bae5
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rdx
incq %rdx
jmp 0x29adf3
xorl %eax, %eax
addq $0x278, %rsp # imm = 0x278
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x29bbba
jmp 0x29bd51
jmp 0x29bb12
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x29bbcc
lock
decl (%rax)
jne 0x29bbcc
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x29bb48
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x29bbcc
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29bbcc
jmp 0x29bd51
jmp 0x29bc44
jmp 0x29bbba
jmp 0x29bb5e
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x29bc56
lock
decl (%rax)
jne 0x29bc56
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x29bb94
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x29bc56
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29bc56
jmp 0x29bd51
jmp 0x29bd51
jmp 0x29bd51
jmp 0x29bd51
jmp 0x29bbba
jmp 0x29bc44
movq %rax, %rbx
jmp 0x29bbcc
movq %rax, %rbx
leaq 0x20(%rsp), %rdi
callq 0x6b03a
movl $0xd8, %r14d
vxorps %xmm0, %xmm0, %xmm0
movq 0x78(%rsp,%r14), %rax
testq %rax, %rax
je 0x29bc0f
lock
decl (%rax)
jne 0x29bc0f
movq 0x70(%rsp,%r14), %rsi
movq 0x90(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x29bc03
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x29bc0f
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x29bbd6
jmp 0x29bd05
jmp 0x29bd51
jmp 0x29bc44
movq %rax, %rbx
jmp 0x29bc56
movq %rax, %rbx
leaq 0x20(%rsp), %rdi
callq 0x6b03a
movl $0x120, %r14d # imm = 0x120
vxorps %xmm0, %xmm0, %xmm0
movq 0x78(%rsp,%r14), %rax
testq %rax, %rax
je 0x29bc99
lock
decl (%rax)
jne 0x29bc99
movq 0x70(%rsp,%r14), %rsi
movq 0x90(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x29bc8d
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x29bc99
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x29bc60
jmp 0x29bd05
jmp 0x29bd51
jmp 0x29bcf9
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x29bd49
lock
decl (%rax)
jne 0x29bd49
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x29bd39
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29bd49
jmp 0x29bd51
movq %rax, %rbx
jmp 0x29bd12
jmp 0x29bd51
jmp 0x29bd51
movq %rax, %rbx
leaq 0x268(%rsp), %rdi
callq 0x71614
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x29bd49
lock
decl (%rax)
jne 0x29bd49
movq 0x200(%rsp), %rsi
movq 0x220(%rsp), %rdi
testq %rdi, %rdi
jne 0x29bd43
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x29bd49
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_fma.cpp |
virtual thunk to ncnn::ConvolutionDepthWise_x86_fma::create_pipeline(ncnn::Option const&) | int ConvolutionDepthWise_x86_fma::create_pipeline(const Option& opt)
{
if (dynamic_weight)
return 0;
activation = create_activation_layer(activation_type, activation_params, opt);
#if NCNN_INT8
if (opt.use_int8_inference && weight_data.elemsize == (size_t)1u)
{
return create_pipeline_int8_x86(opt);
}
#endif
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
// depth-wise
if (channels == group && group == num_output)
{
int elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = channels % 16 == 0 ? 16 : channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#else
elempack = channels % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
#if __SSE2__
#if __AVX__
// pack16
#if __AVX512F__
if (elempack == 16)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 16, opt);
}
#endif // __AVX512F__
// pack8
if (elempack == 8)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 8, opt);
}
#endif // __AVX__
// pack4
if (elempack == 4)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 4, opt);
}
#endif // __SSE2__
if (elempack == 1)
{
// depth-wise specific
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
weight_data_tm = weight_data;
}
else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
weight_data_tm = weight_data;
}
else
{
create_group_ops(opt);
}
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
// group convolution
create_group_ops(opt);
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x30(%rax), %rdi
callq 0x29a4ee
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_fma.cpp |
ncnn::ConvolutionDepthWise_x86_fma::destroy_pipeline(ncnn::Option const&) | int ConvolutionDepthWise_x86_fma::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
for (int i = 0; i < (int)group_ops.size(); i++)
{
group_ops[i]->destroy_pipeline(opt);
delete group_ops[i];
}
group_ops.clear();
return 0;
} | pushq %r15
pushq %r14
pushq %rbx
movq %rsi, %r14
movq %rdi, %rbx
movq 0x8(%rdi), %rdi
testq %rdi, %rdi
je 0x29bd9d
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x29bd98
movq (%rdi), %rax
callq *0x8(%rax)
andq $0x0, 0x8(%rbx)
xorl %r15d, %r15d
movq 0x10(%rbx), %rax
movq 0x18(%rbx), %rcx
movq %rcx, %rdx
subq %rax, %rdx
shrq $0x3, %rdx
movslq %edx, %rdx
cmpq %rdx, %r15
jge 0x29bddf
movq (%rax,%r15,8), %rdi
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x10(%rbx), %rax
movq (%rax,%r15,8), %rdi
testq %rdi, %rdi
je 0x29bdda
movq (%rdi), %rax
callq *0x8(%rax)
incq %r15
jmp 0x29bda0
cmpq %rax, %rcx
je 0x29bde8
movq %rax, 0x18(%rbx)
xorl %eax, %eax
popq %rbx
popq %r14
popq %r15
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_fma.cpp |
virtual thunk to ncnn::ConvolutionDepthWise_x86_fma::destroy_pipeline(ncnn::Option const&) | int ConvolutionDepthWise_x86_fma::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
for (int i = 0; i < (int)group_ops.size(); i++)
{
group_ops[i]->destroy_pipeline(opt);
delete group_ops[i];
}
group_ops.clear();
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x38(%rax), %rdi
callq 0x29bd6c
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_fma.cpp |
ncnn::ConvolutionDepthWise_x86_fma::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int ConvolutionDepthWise_x86_fma::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
#if NCNN_INT8
if (opt.use_int8_inference && int8_scale_term)
{
return forward_int8_x86(bottom_blob, top_blob, opt);
}
#endif
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
Mat bottom_blob_bordered;
make_padding(bottom_blob, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
int outw = (w - kernel_extent_w) / stride_w + 1;
int outh = (h - kernel_extent_h) / stride_h + 1;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
size_t out_elemsize = elemsize / elempack * out_elempack;
top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
// depth-wise
if (channels * elempack == group && group == num_output)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 16;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m512 _sum = _mm512_set1_ps(0.f);
if (bias_term)
{
_sum = _mm512_loadu_ps(((const float*)bias_data) + g * 16);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 16;
for (int k = 0; k < maxk; k++)
{
__m512 _val = _mm512_loadu_ps(sptr + space_ofs[k] * 16);
__m512 _w = _mm512_loadu_ps(kptr + k * 16);
_sum = _mm512_fmadd_ps(_val, _w, _sum);
}
_mm512_storeu_ps(outptr, _sum);
outptr += 16;
}
}
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 8;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_set1_ps(0.f);
if (bias_term)
{
_sum = _mm256_loadu_ps(((const float*)bias_data) + g * 8);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 8;
for (int k = 0; k < maxk; k++)
{
__m256 _val = _mm256_loadu_ps(sptr + space_ofs[k] * 8);
__m256 _w = _mm256_loadu_ps(kptr + k * 8);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
}
_mm256_storeu_ps(outptr + j * 8, _sum);
}
outptr += outw * 8;
}
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
#endif // __AVX__
if (elempack == 4)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 4;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_set1_ps(0.f);
if (bias_term)
{
_sum = _mm_loadu_ps(((const float*)bias_data) + g * 4);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 4;
for (int k = 0; k < maxk; k++)
{
__m128 _val = _mm_loadu_ps(sptr + space_ofs[k] * 4);
__m128 _w = _mm_loadu_ps(kptr + k * 4);
_sum = _mm_add_ps(_mm_mul_ps(_val, _w), _sum);
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr + j * 4, _sum);
}
outptr += outw * 4;
}
}
return 0;
}
}
#endif // __SSE2__
if (elempack == 1)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
}
// group convolution
const int channels_g = channels * elempack / group;
const int num_output_g = num_output / group;
int g_elempack = 1;
int out_g_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
g_elempack = channels_g % 16 == 0 ? 16 : channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 16 == 0 ? 16 : num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#elif __AVX__
g_elempack = channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#else
g_elempack = channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
// unpacking
Mat bottom_blob_bordered_unpacked = bottom_blob_bordered;
if (elempack > g_elempack)
{
Option opt_p = opt;
opt_p.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob_bordered, bottom_blob_bordered_unpacked, g_elempack, opt_p);
}
Mat top_blob_unpacked = top_blob;
if (out_g_elempack < out_elempack)
{
top_blob_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
if (top_blob_unpacked.empty())
return -100;
}
for (int g = 0; g < group; g++)
{
const Mat bottom_blob_bordered_g = bottom_blob_bordered_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
Mat top_blob_g = top_blob_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
const ncnn::Layer* op = group_ops[g];
Option opt_g = opt;
opt_g.blob_allocator = top_blob_unpacked.allocator;
// forward
op->forward(bottom_blob_bordered_g, top_blob_g, opt_g);
}
// packing
if (out_g_elempack < out_elempack)
{
convert_packing(top_blob_unpacked, top_blob, out_elempack, opt);
}
else
{
top_blob = top_blob_unpacked;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x2e8, %rsp # imm = 0x2E8
movq %rdi, %r14
movq (%rdi), %rax
movq -0x18(%rax), %rdi
movq %rcx, 0x58(%rsp)
cmpb $0x1, 0x1e(%rcx)
jne 0x29be51
cmpl $0x0, 0x10c(%r14,%rdi)
je 0x29be51
movq %r14, %rdi
movq 0x58(%rsp), %rcx
addq $0x2e8, %rsp # imm = 0x2E8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x29ff92
movq %rdx, 0x28(%rsp)
movl 0x38(%rsi), %ecx
movq %rcx, 0x60(%rsp)
movq 0x10(%rsi), %r13
movslq 0x18(%rsi), %r15
movl 0xd4(%r14,%rdi), %ebx
decl %ebx
imull 0xdc(%r14,%rdi), %ebx
movl 0xd8(%r14,%rdi), %ebp
decl %ebp
imull 0xe0(%r14,%rdi), %ebp
leaq 0x80(%rsp), %rdx
andq $0x0, 0x40(%rdx)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdx)
vmovups %xmm0, 0xc(%rdx)
vmovaps %xmm0, 0x20(%rdx)
vmovups %xmm0, 0x2c(%rdx)
movq -0x18(%rax), %rdi
addq %r14, %rdi
movq 0x58(%rsp), %rcx
callq 0x287daa
pushq $-0x64
popq %r12
cmpq $0x0, 0x80(%rsp)
je 0x29fd9f
movslq 0xb8(%rsp), %rax
imulq 0xc0(%rsp), %rax
testq %rax, %rax
je 0x29fd9f
notl %ebx
notl %ebp
movl 0xac(%rsp), %edi
addl %edi, %ebx
movq (%r14), %rax
movq -0x18(%rax), %rcx
movl %ebx, %eax
cltd
idivl 0xe4(%r14,%rcx)
movl %eax, %ebx
leal 0x1(%rbx), %esi
addl 0xb0(%rsp), %ebp
movl %ebp, %eax
cltd
idivl 0xe8(%r14,%rcx)
movq %rax, 0x30(%rsp)
leal 0x1(%rax), %r8d
movl 0xd0(%r14,%rcx), %ecx
movq 0x58(%rsp), %rax
cmpb $0x1, 0x27(%rax)
pushq $0x8
popq %rbp
movl %edi, 0x10(%rsp)
jne 0x29bf5d
xorl %eax, %eax
testb $0x3, %cl
sete %al
testb $0x7, %cl
leal 0x1(%rax,%rax,2), %r9d
cmovel %ebp, %r9d
jmp 0x29bf61
pushq $0x1
popq %r9
movq %r13, %rax
xorl %edx, %edx
divq %r15
movq %rax, %r10
movl %r9d, %eax
movq %rax, 0x40(%rsp)
imulq %rax, %r10
movl %ecx, %eax
cltd
idivl %r9d
movq 0x58(%rsp), %rcx
movq 0x8(%rcx), %rcx
movq %rcx, (%rsp)
movq 0x28(%rsp), %r13
movq %r13, %rdi
movl %esi, 0x70(%rsp)
movl %r8d, 0xd0(%rsp)
movl %r8d, %edx
movl %eax, %ecx
movq %r10, 0xe0(%rsp)
movq %r10, %r8
callq 0x628f2
movq (%r13), %r9
testq %r9, %r9
je 0x29fd9f
movq 0x40(%r13), %r11
movslq 0x38(%r13), %rax
imulq %r11, %rax
testq %rax, %rax
je 0x29fd9f
movq %r15, %r10
movl %r10d, %eax
imull 0x60(%rsp), %eax
movq (%r14), %rcx
movq -0x18(%rcx), %rdi
movl 0xd0(%r14,%rdi), %ecx
movl 0x108(%r14,%rdi), %esi
cmpl %esi, %eax
jne 0x29e16e
cmpl %ecx, %eax
jne 0x29e16e
addq %r14, %rdi
cmpl $0x1, %r10d
je 0x29d338
movslq %ebx, %rbx
cmpl $0x4, %r10d
movq %r14, 0x8(%rsp)
je 0x29ca8e
movl %eax, %ecx
cmpl $0x8, %r10d
jne 0x29e16e
movl 0xd4(%rdi), %eax
movl 0xd8(%rdi), %r15d
cmpl $0x5, %eax
je 0x29dca2
cmpl $0x3, %eax
jne 0x29fa97
cmpl $0x3, %r15d
jne 0x29fa97
cmpl $0x1, 0xdc(%rdi)
jne 0x29f37c
cmpl $0x1, 0xe0(%rdi)
jne 0x29f37c
cmpl $0x1, 0xe4(%rdi)
jne 0x29f37c
cmpl $0x1, 0xe8(%rdi)
jne 0x29f37c
movl 0x2c(%r13), %eax
movl 0x30(%r13), %ecx
movl 0xac(%rsp), %edx
movl 0xb8(%rsp), %esi
movq 0x1b0(%rdi), %r8
leal 0x10(,%rdx,8), %edx
movslq %edx, %rdi
leal (,%rax,8), %edx
movslq %edx, %rdx
xorl %r9d, %r9d
testl %esi, %esi
cmovlel %r9d, %esi
movq %rsi, 0x40(%rsp)
shlq $0x2, %rdx
movq %rdx, 0x10(%rsp)
shlq $0x2, %rdi
movq %r8, 0x60(%rsp)
cmpq 0x40(%rsp), %r9
je 0x29fdf4
testq %r8, %r8
je 0x29c100
movq %r9, %rdx
shlq $0x5, %rdx
vmovups (%r8,%rdx), %ymm0
jmp 0x29c104
vxorps %xmm0, %xmm0, %xmm0
movq 0x40(%r13), %r10
imulq %r9, %r10
movq 0x10(%r13), %rsi
imulq %rsi, %r10
addq (%r13), %r10
movslq 0x2c(%r13), %rdx
movslq 0x54(%r14), %r11
imulq %r9, %r11
imulq 0x38(%r14), %r11
movq 0x28(%r14), %rbx
imulq %rsi, %rdx
addq %r10, %rdx
movslq 0xac(%rsp), %rsi
movq 0xc0(%rsp), %r15
movq %r9, 0x30(%rsp)
imulq %r9, %r15
movq 0x90(%rsp), %r8
imulq %r8, %r15
addq 0x80(%rsp), %r15
imulq %r8, %rsi
leaq (%r15,%rsi), %r12
leaq (%r15,%rsi,2), %r13
leaq (%rsi,%rsi,2), %rsi
addq %r15, %rsi
xorl %ebp, %ebp
movl %ebp, %r8d
orl $0x1, %r8d
cmpl %ecx, %r8d
jge 0x29ca6f
xorl %r9d, %r9d
xorl %r8d, %r8d
leal 0x3(%r8), %r14d
cmpl %eax, %r14d
jge 0x29c5c0
vmovaps 0x20(%r15,%r9), %ymm3
vmovaps 0x40(%r15,%r9), %ymm4
vmovaps 0x60(%r15,%r9), %ymm5
vmovaps 0x80(%r15,%r9), %ymm6
vmovaps (%rbx,%r11), %ymm1
vmovaps 0x20(%rbx,%r11), %ymm13
vmovaps 0x40(%rbx,%r11), %ymm12
vmovaps 0x60(%rbx,%r11), %ymm9
vmovaps (%r15,%r9), %ymm2
vfmadd213ps %ymm0, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm0
vfmadd231ps %ymm3, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm3) + ymm2
vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0
vfmadd231ps %ymm4, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm4) + ymm3
vfmadd231ps %ymm4, %ymm12, %ymm2 # ymm2 = (ymm12 * ymm4) + ymm2
vfmadd213ps %ymm0, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm5, %ymm12, %ymm3 # ymm3 = (ymm12 * ymm5) + ymm3
vfmadd213ps %ymm0, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm0
vfmadd231ps %ymm6, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm6) + ymm5
vfmadd231ps %ymm6, %ymm12, %ymm4 # ymm4 = (ymm12 * ymm6) + ymm4
vfmadd231ps 0xa0(%r15,%r9), %ymm12, %ymm5 # ymm5 = (ymm12 * mem) + ymm5
vmovaps 0x80(%rbx,%r11), %ymm11
vmovaps 0xa0(%rbx,%r11), %ymm8
vmovaps (%r12,%r9), %ymm6
vmovaps 0x20(%r12,%r9), %ymm7
vmovaps 0x40(%r12,%r9), %ymm10
vmovaps 0x60(%r12,%r9), %ymm15
vmovaps 0x80(%r12,%r9), %ymm14
vfmadd231ps %ymm6, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm6) + ymm2
vfmadd213ps %ymm0, %ymm1, %ymm6 # ymm6 = (ymm1 * ymm6) + ymm0
vfmadd231ps %ymm7, %ymm9, %ymm3 # ymm3 = (ymm9 * ymm7) + ymm3
vfmadd231ps %ymm7, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm7) + ymm6
vfmadd231ps %ymm7, %ymm11, %ymm2 # ymm2 = (ymm11 * ymm7) + ymm2
vfmadd213ps %ymm0, %ymm1, %ymm7 # ymm7 = (ymm1 * ymm7) + ymm0
vfmadd231ps %ymm10, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm10) + ymm4
vfmadd231ps %ymm10, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm10) + ymm7
vfmadd231ps %ymm10, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm10) + ymm3
vfmadd231ps %ymm10, %ymm12, %ymm6 # ymm6 = (ymm12 * ymm10) + ymm6
vfmadd231ps %ymm10, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm10) + ymm2
vfmadd213ps %ymm0, %ymm1, %ymm10 # ymm10 = (ymm1 * ymm10) + ymm0
vfmadd213ps %ymm0, %ymm15, %ymm1 # ymm1 = (ymm15 * ymm1) + ymm0
vfmadd231ps %ymm15, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm15) + ymm10
vfmadd231ps %ymm13, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm13) + ymm1
vmovaps 0xa0(%r12,%r9), %ymm13
vfmadd231ps %ymm15, %ymm9, %ymm5 # ymm5 = (ymm9 * ymm15) + ymm5
vfmadd231ps %ymm15, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm15) + ymm4
vfmadd231ps %ymm14, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm14) + ymm5
vfmadd231ps %ymm15, %ymm12, %ymm7 # ymm7 = (ymm12 * ymm15) + ymm7
vfmadd231ps %ymm14, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm14) + ymm10
vfmadd231ps %ymm12, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm12) + ymm1
vfmadd231ps %ymm15, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm15) + ymm3
vfmadd231ps %ymm14, %ymm8, %ymm4 # ymm4 = (ymm8 * ymm14) + ymm4
vfmadd231ps %ymm13, %ymm8, %ymm5 # ymm5 = (ymm8 * ymm13) + ymm5
vmovaps (%r13,%r9), %ymm14
vmovaps 0x20(%r13,%r9), %ymm15
vmovaps 0x40(%r13,%r9), %ymm13
vmovaps 0x60(%r13,%r9), %ymm12
vfmadd231ps %ymm14, %ymm9, %ymm6 # ymm6 = (ymm9 * ymm14) + ymm6
vfmadd231ps %ymm15, %ymm9, %ymm7 # ymm7 = (ymm9 * ymm15) + ymm7
vfmadd231ps %ymm13, %ymm9, %ymm10 # ymm10 = (ymm9 * ymm13) + ymm10
vfmadd231ps %ymm9, %ymm12, %ymm1 # ymm1 = (ymm12 * ymm9) + ymm1
vmovaps 0xc0(%rbx,%r11), %ymm9
vfmadd231ps %ymm14, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm14) + ymm2
vmovaps 0x80(%r13,%r9), %ymm14
vfmadd231ps %ymm15, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm15) + ymm6
vfmadd231ps %ymm13, %ymm11, %ymm7 # ymm7 = (ymm11 * ymm13) + ymm7
vfmadd231ps %ymm12, %ymm11, %ymm10 # ymm10 = (ymm11 * ymm12) + ymm10
vfmadd231ps %ymm11, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm11) + ymm1
vmovaps 0xe0(%rbx,%r11), %ymm11
vfmadd231ps %ymm15, %ymm9, %ymm3 # ymm3 = (ymm9 * ymm15) + ymm3
vfmadd231ps %ymm15, %ymm11, %ymm2 # ymm2 = (ymm11 * ymm15) + ymm2
vmovaps 0xa0(%r13,%r9), %ymm15
vfmadd231ps %ymm13, %ymm8, %ymm6 # ymm6 = (ymm8 * ymm13) + ymm6
vfmadd231ps %ymm12, %ymm8, %ymm7 # ymm7 = (ymm8 * ymm12) + ymm7
vfmadd231ps %ymm14, %ymm8, %ymm10 # ymm10 = (ymm8 * ymm14) + ymm10
vfmadd231ps %ymm8, %ymm15, %ymm1 # ymm1 = (ymm15 * ymm8) + ymm1
vmovaps 0x100(%rbx,%r11), %ymm8
vfmadd231ps %ymm13, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm13) + ymm4
vfmadd231ps %ymm13, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm13) + ymm3
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vfmadd231ps %ymm12, %ymm9, %ymm5 # ymm5 = (ymm9 * ymm12) + ymm5
vfmadd231ps %ymm12, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm12) + ymm4
vfmadd231ps %ymm14, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm14) + ymm5
vfmadd231ps %ymm12, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm12) + ymm3
vfmadd231ps %ymm14, %ymm8, %ymm4 # ymm4 = (ymm8 * ymm14) + ymm4
vfmadd231ps %ymm15, %ymm8, %ymm5 # ymm5 = (ymm8 * ymm15) + ymm5
vmovaps 0x20(%rsi,%r9), %ymm12
vmovaps 0x40(%rsi,%r9), %ymm13
vfmadd231ps (%rsi,%r9), %ymm9, %ymm6 # ymm6 = (ymm9 * mem) + ymm6
vmovaps 0x60(%rsi,%r9), %ymm14
vfmadd231ps %ymm12, %ymm9, %ymm7 # ymm7 = (ymm9 * ymm12) + ymm7
vfmadd231ps %ymm13, %ymm9, %ymm10 # ymm10 = (ymm9 * ymm13) + ymm10
vfmadd231ps %ymm9, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm9) + ymm1
vfmadd231ps %ymm12, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm12) + ymm6
vmovaps 0x80(%rsi,%r9), %ymm9
vfmadd231ps %ymm13, %ymm11, %ymm7 # ymm7 = (ymm11 * ymm13) + ymm7
vfmadd231ps %ymm14, %ymm11, %ymm10 # ymm10 = (ymm11 * ymm14) + ymm10
vfmadd231ps %ymm11, %ymm9, %ymm1 # ymm1 = (ymm9 * ymm11) + ymm1
vfmadd231ps %ymm13, %ymm8, %ymm6 # ymm6 = (ymm8 * ymm13) + ymm6
vfmadd231ps %ymm14, %ymm8, %ymm7 # ymm7 = (ymm8 * ymm14) + ymm7
vfmadd231ps %ymm9, %ymm8, %ymm10 # ymm10 = (ymm8 * ymm9) + ymm10
vfmadd231ps 0xa0(%rsi,%r9), %ymm8, %ymm1 # ymm1 = (ymm8 * mem) + ymm1
vmovaps %ymm2, (%r10,%r9)
vmovaps %ymm3, 0x20(%r10,%r9)
vmovaps %ymm4, 0x40(%r10,%r9)
vmovaps %ymm5, 0x60(%r10,%r9)
vmovaps %ymm6, (%rdx,%r9)
vmovaps %ymm7, 0x20(%rdx,%r9)
vmovaps %ymm10, 0x40(%rdx,%r9)
vmovaps %ymm1, 0x60(%rdx,%r9)
addl $0x4, %r8d
subq $-0x80, %r9
jmp 0x29c18c
vmovaps 0x20(%r15,%r9), %ymm3
vmovaps 0x40(%r15,%r9), %ymm4
vmovaps (%rbx,%r11), %ymm1
vmovaps 0x20(%rbx,%r11), %ymm9
vmovaps 0x40(%rbx,%r11), %ymm8
vmovaps 0x60(%rbx,%r11), %ymm5
vmovaps (%r15,%r9), %ymm2
vfmadd213ps %ymm0, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm0
vfmadd231ps %ymm3, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm3) + ymm2
vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0
vfmadd231ps %ymm4, %ymm9, %ymm3 # ymm3 = (ymm9 * ymm4) + ymm3
vfmadd231ps %ymm4, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm4) + ymm2
vfmadd231ps 0x60(%r15,%r9), %ymm8, %ymm3 # ymm3 = (ymm8 * mem) + ymm3
vmovaps 0x80(%rbx,%r11), %ymm7
vmovaps 0xa0(%rbx,%r11), %ymm6
vmovaps (%r12,%r9), %ymm4
vmovaps 0x20(%r12,%r9), %ymm10
vmovaps 0x40(%r12,%r9), %ymm11
vmovaps 0x60(%r12,%r9), %ymm12
vfmadd231ps %ymm4, %ymm5, %ymm2 # ymm2 = (ymm5 * ymm4) + ymm2
vfmadd231ps %ymm10, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm10) + ymm3
vfmadd213ps %ymm0, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm0
vfmadd213ps %ymm0, %ymm10, %ymm1 # ymm1 = (ymm10 * ymm1) + ymm0
vfmadd231ps %ymm10, %ymm7, %ymm2 # ymm2 = (ymm7 * ymm10) + ymm2
vfmadd231ps %ymm11, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm11) + ymm3
vfmadd231ps %ymm10, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm10) + ymm4
vfmadd231ps %ymm9, %ymm11, %ymm1 # ymm1 = (ymm11 * ymm9) + ymm1
vfmadd231ps %ymm11, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm11) + ymm2
vfmadd231ps %ymm12, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm12) + ymm3
vfmadd231ps %ymm11, %ymm8, %ymm4 # ymm4 = (ymm8 * ymm11) + ymm4
vfmadd231ps %ymm12, %ymm8, %ymm1 # ymm1 = (ymm8 * ymm12) + ymm1
vmovaps 0xc0(%rbx,%r11), %ymm10
vmovaps 0xe0(%rbx,%r11), %ymm9
vmovaps 0x100(%rbx,%r11), %ymm8
vmovaps (%r13,%r9), %ymm11
vmovaps 0x20(%r13,%r9), %ymm12
vmovaps 0x40(%r13,%r9), %ymm13
vmovaps 0x60(%r13,%r9), %ymm14
vfmadd231ps %ymm11, %ymm10, %ymm2 # ymm2 = (ymm10 * ymm11) + ymm2
vfmadd231ps %ymm12, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm12) + ymm3
vfmadd231ps %ymm11, %ymm5, %ymm4 # ymm4 = (ymm5 * ymm11) + ymm4
vfmadd231ps %ymm5, %ymm12, %ymm1 # ymm1 = (ymm12 * ymm5) + ymm1
vfmadd231ps %ymm12, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm12) + ymm2
vfmadd231ps %ymm13, %ymm9, %ymm3 # ymm3 = (ymm9 * ymm13) + ymm3
vfmadd231ps %ymm12, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm12) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vfmadd231ps %ymm14, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm14) + ymm3
vfmadd231ps %ymm13, %ymm6, %ymm4 # ymm4 = (ymm6 * ymm13) + ymm4
vfmadd231ps %ymm14, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm14) + ymm1
vmovaps 0x20(%rsi,%r9), %ymm5
vmovaps 0x40(%rsi,%r9), %ymm6
vfmadd231ps (%rsi,%r9), %ymm10, %ymm4 # ymm4 = (ymm10 * mem) + ymm4
vfmadd231ps %ymm10, %ymm5, %ymm1 # ymm1 = (ymm5 * ymm10) + ymm1
vfmadd231ps %ymm5, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm5) + ymm4
vfmadd231ps %ymm9, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm9) + ymm1
vfmadd231ps %ymm6, %ymm8, %ymm4 # ymm4 = (ymm8 * ymm6) + ymm4
vfmadd231ps 0x60(%rsi,%r9), %ymm8, %ymm1 # ymm1 = (ymm8 * mem) + ymm1
vmovaps %ymm2, (%r10,%r9)
vmovaps %ymm3, 0x20(%r10,%r9)
vmovaps %ymm4, (%rdx,%r9)
vmovaps %ymm1, 0x20(%rdx,%r9)
addl $0x2, %r8d
addq $0x40, %r9
leal 0x1(%r8), %r14d
cmpl %eax, %r14d
jl 0x29c43f
movq 0x8(%rsp), %r14
cmpl %eax, %r8d
jge 0x29c6d2
vmovaps (%rbx,%r11), %ymm4
vmovaps 0x20(%rbx,%r11), %ymm5
vmovaps 0x40(%rbx,%r11), %ymm6
vmovaps (%r15,%r9), %ymm1
vfmadd213ps %ymm0, %ymm4, %ymm1 # ymm1 = (ymm4 * ymm1) + ymm0
vfmadd231ps 0x20(%r15,%r9), %ymm5, %ymm1 # ymm1 = (ymm5 * mem) + ymm1
vmovaps 0x60(%rbx,%r11), %ymm7
vfmadd231ps 0x40(%r15,%r9), %ymm6, %ymm1 # ymm1 = (ymm6 * mem) + ymm1
vmovaps 0x80(%rbx,%r11), %ymm8
vmovaps 0xa0(%rbx,%r11), %ymm3
vmovaps (%r12,%r9), %ymm2
vmovaps 0x20(%r12,%r9), %ymm9
vmovaps 0x40(%r12,%r9), %ymm10
vfmadd231ps %ymm2, %ymm7, %ymm1 # ymm1 = (ymm7 * ymm2) + ymm1
vfmadd213ps %ymm0, %ymm4, %ymm2 # ymm2 = (ymm4 * ymm2) + ymm0
vfmadd231ps %ymm9, %ymm8, %ymm1 # ymm1 = (ymm8 * ymm9) + ymm1
vfmadd231ps %ymm9, %ymm5, %ymm2 # ymm2 = (ymm5 * ymm9) + ymm2
vfmadd231ps %ymm10, %ymm3, %ymm1 # ymm1 = (ymm3 * ymm10) + ymm1
vfmadd231ps %ymm10, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm10) + ymm2
vmovaps 0xc0(%rbx,%r11), %ymm4
vmovaps 0xe0(%rbx,%r11), %ymm5
vmovaps 0x100(%rbx,%r11), %ymm6
vmovaps (%r13,%r9), %ymm9
vmovaps 0x20(%r13,%r9), %ymm10
vmovaps 0x40(%r13,%r9), %ymm11
vfmadd231ps %ymm9, %ymm4, %ymm1 # ymm1 = (ymm4 * ymm9) + ymm1
vfmadd231ps %ymm9, %ymm7, %ymm2 # ymm2 = (ymm7 * ymm9) + ymm2
vfmadd231ps %ymm10, %ymm5, %ymm1 # ymm1 = (ymm5 * ymm10) + ymm1
vfmadd231ps %ymm10, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm10) + ymm2
vfmadd231ps %ymm11, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm11) + ymm1
vfmadd231ps %ymm11, %ymm3, %ymm2 # ymm2 = (ymm3 * ymm11) + ymm2
vfmadd231ps (%rsi,%r9), %ymm4, %ymm2 # ymm2 = (ymm4 * mem) + ymm2
vfmadd231ps 0x20(%rsi,%r9), %ymm5, %ymm2 # ymm2 = (ymm5 * mem) + ymm2
vfmadd231ps 0x40(%rsi,%r9), %ymm6, %ymm2 # ymm2 = (ymm6 * mem) + ymm2
vmovaps %ymm1, (%r10,%r9)
vmovaps %ymm2, (%rdx,%r9)
incl %r8d
addq $0x20, %r9
jmp 0x29c5d2
addq %rdi, %r15
addq %r9, %r15
addq %rdi, %r12
addq %r9, %r12
addq %rdi, %r13
addq %r9, %r13
addq %rdi, %rsi
addq %r9, %rsi
movq 0x10(%rsp), %r8
addq %r8, %r10
addq %r9, %r10
addq %r8, %rdx
addq %r9, %rdx
addl $0x2, %ebp
jmp 0x29c176
xorl %edx, %edx
xorl %esi, %esi
leal 0x3(%rsi), %r8d
cmpl %eax, %r8d
jge 0x29c9a5
vmovaps 0x20(%r15,%rdx), %ymm3
vmovaps 0x40(%r15,%rdx), %ymm4
vmovaps 0x60(%r15,%rdx), %ymm8
vmovaps 0x80(%r15,%rdx), %ymm6
vmovaps (%rbx,%r11), %ymm1
vmovaps 0x20(%rbx,%r11), %ymm9
vmovaps 0x40(%rbx,%r11), %ymm7
vmovaps 0x60(%rbx,%r11), %ymm5
vmovaps (%r15,%rdx), %ymm2
vfmadd213ps %ymm0, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm0
vfmadd231ps %ymm3, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm3) + ymm2
vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0
vfmadd231ps %ymm4, %ymm9, %ymm3 # ymm3 = (ymm9 * ymm4) + ymm3
vfmadd231ps %ymm4, %ymm7, %ymm2 # ymm2 = (ymm7 * ymm4) + ymm2
vfmadd213ps %ymm0, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm0
vfmadd213ps %ymm0, %ymm8, %ymm1 # ymm1 = (ymm8 * ymm1) + ymm0
vfmadd231ps %ymm8, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm8) + ymm4
vfmadd231ps %ymm9, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm9) + ymm1
vfmadd231ps %ymm8, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm8) + ymm3
vfmadd231ps 0xa0(%r15,%rdx), %ymm7, %ymm1 # ymm1 = (ymm7 * mem) + ymm1
vfmadd231ps %ymm6, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm6) + ymm4
vmovaps 0x80(%rbx,%r11), %ymm6
vmovaps 0xa0(%rbx,%r11), %ymm7
vmovaps 0x20(%r12,%rdx), %ymm8
vmovaps 0x40(%r12,%rdx), %ymm9
vmovaps 0x60(%r12,%rdx), %ymm10
vfmadd231ps (%r12,%rdx), %ymm5, %ymm2 # ymm2 = (ymm5 * mem) + ymm2
vmovaps 0x80(%r12,%rdx), %ymm11
vfmadd231ps %ymm8, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm8) + ymm3
vfmadd231ps %ymm9, %ymm5, %ymm4 # ymm4 = (ymm5 * ymm9) + ymm4
vfmadd231ps %ymm5, %ymm10, %ymm1 # ymm1 = (ymm10 * ymm5) + ymm1
vfmadd231ps %ymm8, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm8) + ymm2
vfmadd231ps %ymm9, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm9) + ymm3
vfmadd231ps %ymm10, %ymm6, %ymm4 # ymm4 = (ymm6 * ymm10) + ymm4
vfmadd231ps %ymm6, %ymm11, %ymm1 # ymm1 = (ymm11 * ymm6) + ymm1
vfmadd231ps %ymm9, %ymm7, %ymm2 # ymm2 = (ymm7 * ymm9) + ymm2
vfmadd231ps %ymm10, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm10) + ymm3
vfmadd231ps 0xa0(%r12,%rdx), %ymm7, %ymm1 # ymm1 = (ymm7 * mem) + ymm1
vfmadd231ps %ymm11, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm11) + ymm4
vmovaps 0xc0(%rbx,%r11), %ymm5
vmovaps 0xe0(%rbx,%r11), %ymm6
vmovaps 0x100(%rbx,%r11), %ymm7
vmovaps 0x20(%r13,%rdx), %ymm8
vmovaps 0x40(%r13,%rdx), %ymm9
vmovaps 0x60(%r13,%rdx), %ymm10
vmovaps 0x80(%r13,%rdx), %ymm11
vfmadd231ps (%r13,%rdx), %ymm5, %ymm2 # ymm2 = (ymm5 * mem) + ymm2
vfmadd231ps %ymm8, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm8) + ymm3
vfmadd231ps %ymm9, %ymm5, %ymm4 # ymm4 = (ymm5 * ymm9) + ymm4
vfmadd231ps %ymm5, %ymm10, %ymm1 # ymm1 = (ymm10 * ymm5) + ymm1
vfmadd231ps %ymm8, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm8) + ymm2
vfmadd231ps %ymm9, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm9) + ymm3
vfmadd231ps %ymm10, %ymm6, %ymm4 # ymm4 = (ymm6 * ymm10) + ymm4
vfmadd231ps %ymm6, %ymm11, %ymm1 # ymm1 = (ymm11 * ymm6) + ymm1
vfmadd231ps %ymm9, %ymm7, %ymm2 # ymm2 = (ymm7 * ymm9) + ymm2
vfmadd231ps %ymm10, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm10) + ymm3
vfmadd231ps %ymm11, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm11) + ymm4
vfmadd231ps 0xa0(%r13,%rdx), %ymm7, %ymm1 # ymm1 = (ymm7 * mem) + ymm1
vmovaps %ymm2, (%r10,%rdx)
vmovaps %ymm3, 0x20(%r10,%rdx)
vmovaps %ymm4, 0x40(%r10,%rdx)
vmovaps %ymm1, 0x60(%r10,%rdx)
addl $0x4, %esi
subq $-0x80, %rdx
jmp 0x29c707
vmovaps 0x20(%r15,%rdx), %ymm3
vmovaps 0x40(%r15,%rdx), %ymm4
vmovaps (%rbx,%r11), %ymm1
vmovaps 0x20(%rbx,%r11), %ymm5
vmovaps 0x40(%rbx,%r11), %ymm6
vmovaps 0x60(%rbx,%r11), %ymm7
vmovaps (%r15,%rdx), %ymm2
vfmadd213ps %ymm0, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm0
vfmadd213ps %ymm0, %ymm3, %ymm1 # ymm1 = (ymm3 * ymm1) + ymm0
vfmadd231ps %ymm3, %ymm5, %ymm2 # ymm2 = (ymm5 * ymm3) + ymm2
vfmadd231ps %ymm5, %ymm4, %ymm1 # ymm1 = (ymm4 * ymm5) + ymm1
vfmadd231ps %ymm4, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm4) + ymm2
vfmadd231ps 0x60(%r15,%rdx), %ymm6, %ymm1 # ymm1 = (ymm6 * mem) + ymm1
vmovaps 0x80(%rbx,%r11), %ymm3
vmovaps 0xa0(%rbx,%r11), %ymm4
vmovaps 0x20(%r12,%rdx), %ymm5
vfmadd231ps (%r12,%rdx), %ymm7, %ymm2 # ymm2 = (ymm7 * mem) + ymm2
vmovaps 0x40(%r12,%rdx), %ymm6
vfmadd231ps %ymm7, %ymm5, %ymm1 # ymm1 = (ymm5 * ymm7) + ymm1
vfmadd231ps %ymm5, %ymm3, %ymm2 # ymm2 = (ymm3 * ymm5) + ymm2
vfmadd231ps %ymm3, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm3) + ymm1
vfmadd231ps %ymm6, %ymm4, %ymm2 # ymm2 = (ymm4 * ymm6) + ymm2
vfmadd231ps 0x60(%r12,%rdx), %ymm4, %ymm1 # ymm1 = (ymm4 * mem) + ymm1
vmovaps 0xc0(%rbx,%r11), %ymm3
vmovaps 0xe0(%rbx,%r11), %ymm4
vmovaps 0x100(%rbx,%r11), %ymm5
vmovaps 0x20(%r13,%rdx), %ymm6
vmovaps 0x40(%r13,%rdx), %ymm7
vfmadd231ps (%r13,%rdx), %ymm3, %ymm2 # ymm2 = (ymm3 * mem) + ymm2
vfmadd231ps %ymm3, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm3) + ymm1
vfmadd231ps %ymm6, %ymm4, %ymm2 # ymm2 = (ymm4 * ymm6) + ymm2
vfmadd231ps %ymm4, %ymm7, %ymm1 # ymm1 = (ymm7 * ymm4) + ymm1
vfmadd231ps %ymm7, %ymm5, %ymm2 # ymm2 = (ymm5 * ymm7) + ymm2
vfmadd231ps 0x60(%r13,%rdx), %ymm5, %ymm1 # ymm1 = (ymm5 * mem) + ymm1
vmovaps %ymm2, (%r10,%rdx)
vmovaps %ymm1, 0x20(%r10,%rdx)
addl $0x2, %esi
addq $0x40, %rdx
leal 0x1(%rsi), %r8d
cmpl %eax, %r8d
jl 0x29c8b1
jmp 0x29ca4d
vmovaps (%rbx,%r11), %ymm1
vmovaps 0x20(%rbx,%r11), %ymm2
vmovaps 0x40(%rbx,%r11), %ymm3
vfmadd132ps (%r15,%rdx), %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vfmadd231ps 0x20(%r15,%rdx), %ymm2, %ymm1 # ymm1 = (ymm2 * mem) + ymm1
vmovaps 0x60(%rbx,%r11), %ymm2
vfmadd231ps 0x40(%r15,%rdx), %ymm3, %ymm1 # ymm1 = (ymm3 * mem) + ymm1
vmovaps 0x80(%rbx,%r11), %ymm3
vfmadd231ps (%r12,%rdx), %ymm2, %ymm1 # ymm1 = (ymm2 * mem) + ymm1
vfmadd231ps 0x20(%r12,%rdx), %ymm3, %ymm1 # ymm1 = (ymm3 * mem) + ymm1
vmovaps 0xa0(%rbx,%r11), %ymm2
vfmadd231ps 0x40(%r12,%rdx), %ymm2, %ymm1 # ymm1 = (ymm2 * mem) + ymm1
vmovaps 0xc0(%rbx,%r11), %ymm2
vmovaps 0xe0(%rbx,%r11), %ymm3
vmovaps 0x100(%rbx,%r11), %ymm4
vfmadd132ps (%r13,%rdx), %ymm1, %ymm2 # ymm2 = (ymm2 * mem) + ymm1
vfmadd231ps 0x20(%r13,%rdx), %ymm3, %ymm2 # ymm2 = (ymm3 * mem) + ymm2
vfmadd231ps 0x40(%r13,%rdx), %ymm4, %ymm2 # ymm2 = (ymm4 * mem) + ymm2
vmovaps %ymm2, (%r10,%rdx)
incl %esi
addq $0x20, %rdx
cmpl %eax, %esi
jl 0x29c9b7
addq %rdx, %r15
addq $0x40, %r15
addq %rdx, %r12
addq $0x40, %r12
addq %rdx, %r13
addq $0x40, %r13
incl %ebp
addq %rdx, %r10
cmpl %ecx, %ebp
jl 0x29c703
movq 0x30(%rsp), %r9
incq %r9
movq 0x28(%rsp), %r13
movq 0x60(%rsp), %r8
jmp 0x29c0e1
movl 0xd4(%rdi), %eax
movl 0xd8(%rdi), %r15d
cmpl $0x5, %eax
je 0x29d684
cmpl $0x3, %eax
jne 0x29ed47
cmpl $0x3, %r15d
jne 0x29ed47
cmpl $0x1, 0xdc(%rdi)
jne 0x29e688
cmpl $0x1, 0xe0(%rdi)
jne 0x29e688
cmpl $0x1, 0xe4(%rdi)
jne 0x29e688
cmpl $0x1, 0xe8(%rdi)
jne 0x29e688
movl 0x2c(%r13), %eax
movl 0x30(%r13), %ecx
movl 0xb8(%rsp), %edx
movq 0x1b0(%rdi), %rsi
xorl %edi, %edi
testl %ecx, %ecx
cmovlel %edi, %ecx
testl %edx, %edx
cmovlel %edi, %edx
movq %rdx, 0x1d8(%rsp)
movl $0x80, %r8d
movq %rsi, 0x1d0(%rsp)
cmpq 0x1d8(%rsp), %rdi
je 0x29fdf4
testq %rsi, %rsi
je 0x29cb44
movq %rdi, %rdx
shlq $0x4, %rdx
vmovups (%rsi,%rdx), %xmm0
jmp 0x29cb48
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x60(%rsp)
movq 0x40(%r13), %r9
imulq %rdi, %r9
imulq 0x10(%r13), %r9
addq (%r13), %r9
movq 0x28(%r14), %rdx
movslq 0x54(%r14), %rsi
imulq %rdi, %rsi
imulq 0x38(%r14), %rsi
movslq 0xac(%rsp), %r10
movq 0xc0(%rsp), %r13
imulq %rdi, %r13
movq 0x90(%rsp), %r11
imulq %r11, %r13
addq 0x80(%rsp), %r13
imulq %r11, %r10
leaq (%r10,%r13), %rbp
leaq (,%r10,2), %r11
addq %r13, %r11
vmovaps (%rdx,%rsi), %xmm0
vmovaps %xmm0, 0x70(%rsp)
vmovaps 0x10(%rdx,%rsi), %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovaps 0x20(%rdx,%rsi), %xmm0
vmovaps %xmm0, 0xd0(%rsp)
vmovaps 0x30(%rdx,%rsi), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovaps 0x40(%rdx,%rsi), %xmm15
vmovaps 0x50(%rdx,%rsi), %xmm0
vmovaps %xmm0, 0x170(%rsp)
vmovaps 0x60(%rdx,%rsi), %xmm0
vmovaps %xmm0, 0x30(%rsp)
vmovaps 0x70(%rdx,%rsi), %xmm0
vmovaps %xmm0, 0x160(%rsp)
vmovaps 0x80(%rdx,%rsi), %xmm0
vmovaps %xmm0, 0x150(%rsp)
xorl %r10d, %r10d
vmovaps %xmm15, 0x40(%rsp)
cmpl %ecx, %r10d
je 0x29d323
leaq 0x20(%r13), %r15
leaq 0x20(%rbp), %rbx
leaq 0x20(%r11), %rdx
xorl %esi, %esi
xorl %r12d, %r12d
leal 0x7(%r12), %r14d
cmpl %eax, %r14d
jge 0x29d152
vmovaps (%r13,%rsi), %xmm0
vmovaps 0x60(%rsp), %xmm14
vmovaps 0x70(%rsp), %xmm5
vfmadd213ps %xmm14, %xmm5, %xmm0 # xmm0 = (xmm5 * xmm0) + xmm14
vmovaps 0x10(%r13,%rsi), %xmm1
vmovaps 0x20(%r13,%rsi), %xmm11
vmovaps 0x30(%r13,%rsi), %xmm10
vfmadd231ps 0x10(%rsp), %xmm1, %xmm0 # xmm0 = (xmm1 * mem) + xmm0
vmovaps 0xd0(%rsp), %xmm6
vfmadd231ps %xmm11, %xmm6, %xmm0 # xmm0 = (xmm6 * xmm11) + xmm0
vmovaps 0xe0(%rsp), %xmm7
vfmadd231ps (%rbp,%rsi), %xmm7, %xmm0 # xmm0 = (xmm7 * mem) + xmm0
vmovaps 0x10(%rbp,%rsi), %xmm2
vmovaps 0x20(%rbp,%rsi), %xmm3
vfmadd231ps %xmm2, %xmm15, %xmm0 # xmm0 = (xmm15 * xmm2) + xmm0
vmovaps 0x170(%rsp), %xmm8
vfmadd231ps %xmm3, %xmm8, %xmm0 # xmm0 = (xmm8 * xmm3) + xmm0
vmovaps 0x30(%rsp), %xmm4
vfmadd231ps (%r11,%rsi), %xmm4, %xmm0 # xmm0 = (xmm4 * mem) + xmm0
vmovaps 0x30(%rbp,%rsi), %xmm15
vmovaps %xmm15, 0x280(%rsp)
vmovaps 0x10(%r11,%rsi), %xmm12
vmovaps 0x20(%r11,%rsi), %xmm13
vmovaps 0x30(%r11,%rsi), %xmm4
vmovaps %xmm4, 0xf0(%rsp)
vmovaps 0x160(%rsp), %xmm9
vfmadd231ps %xmm12, %xmm9, %xmm0 # xmm0 = (xmm9 * xmm12) + xmm0
vmovaps 0x150(%rsp), %xmm4
vfmadd231ps %xmm13, %xmm4, %xmm0 # xmm0 = (xmm4 * xmm13) + xmm0
vmovaps %xmm0, (%r9,%rsi)
vfmadd213ps %xmm14, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm1) + xmm14
vfmadd231ps 0x10(%rsp), %xmm11, %xmm1 # xmm1 = (xmm11 * mem) + xmm1
vfmadd231ps %xmm10, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm10) + xmm1
vfmadd231ps %xmm2, %xmm7, %xmm1 # xmm1 = (xmm7 * xmm2) + xmm1
vfmadd231ps 0x40(%rsp), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vfmadd231ps %xmm15, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm15) + xmm1
vfmadd231ps 0x30(%rsp), %xmm12, %xmm1 # xmm1 = (xmm12 * mem) + xmm1
vfmadd231ps %xmm13, %xmm9, %xmm1 # xmm1 = (xmm9 * xmm13) + xmm1
vmovaps 0xf0(%rsp), %xmm0
vfmadd231ps %xmm0, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm0) + xmm1
vmovaps 0x40(%r13,%rsi), %xmm12
vmovaps 0x40(%rbp,%rsi), %xmm2
vmovaps 0x40(%r11,%rsi), %xmm15
vmovaps %xmm15, 0x270(%rsp)
vmovaps %xmm1, 0x10(%r9,%rsi)
vfmadd213ps %xmm14, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm11) + xmm14
vfmadd231ps 0x10(%rsp), %xmm10, %xmm11 # xmm11 = (xmm10 * mem) + xmm11
vfmadd231ps %xmm12, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm12) + xmm11
vfmadd231ps %xmm3, %xmm7, %xmm11 # xmm11 = (xmm7 * xmm3) + xmm11
vmovaps 0x280(%rsp), %xmm15
vfmadd231ps 0x40(%rsp), %xmm15, %xmm11 # xmm11 = (xmm15 * mem) + xmm11
vfmadd231ps %xmm2, %xmm8, %xmm11 # xmm11 = (xmm8 * xmm2) + xmm11
vfmadd231ps 0x30(%rsp), %xmm13, %xmm11 # xmm11 = (xmm13 * mem) + xmm11
vfmadd231ps %xmm0, %xmm9, %xmm11 # xmm11 = (xmm9 * xmm0) + xmm11
vmovaps 0x270(%rsp), %xmm0
vfmadd231ps %xmm0, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm0) + xmm11
vmovaps 0x50(%r13,%rsi), %xmm13
vmovaps 0x50(%rbp,%rsi), %xmm3
vmovaps 0x50(%r11,%rsi), %xmm1
vmovaps %xmm11, 0x20(%r9,%rsi)
vfmadd213ps %xmm14, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm10) + xmm14
vfmadd231ps 0x10(%rsp), %xmm12, %xmm10 # xmm10 = (xmm12 * mem) + xmm10
vfmadd231ps %xmm13, %xmm6, %xmm10 # xmm10 = (xmm6 * xmm13) + xmm10
vfmadd231ps %xmm15, %xmm7, %xmm10 # xmm10 = (xmm7 * xmm15) + xmm10
vfmadd231ps 0x40(%rsp), %xmm2, %xmm10 # xmm10 = (xmm2 * mem) + xmm10
vfmadd231ps %xmm3, %xmm8, %xmm10 # xmm10 = (xmm8 * xmm3) + xmm10
vmovaps 0xf0(%rsp), %xmm11
vfmadd231ps 0x30(%rsp), %xmm11, %xmm10 # xmm10 = (xmm11 * mem) + xmm10
vfmadd231ps %xmm0, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm0) + xmm10
vfmadd231ps %xmm1, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm1) + xmm10
vmovaps %xmm1, 0x290(%rsp)
vmovaps 0x60(%r13,%rsi), %xmm11
vmovaps 0x60(%rbp,%rsi), %xmm15
vmovaps 0x60(%r11,%rsi), %xmm0
vmovaps %xmm10, 0x30(%r9,%rsi)
vfmadd213ps %xmm14, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm12) + xmm14
vfmadd231ps 0x10(%rsp), %xmm13, %xmm12 # xmm12 = (xmm13 * mem) + xmm12
vfmadd231ps %xmm11, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm11) + xmm12
vfmadd231ps %xmm2, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm2) + xmm12
vfmadd231ps 0x40(%rsp), %xmm3, %xmm12 # xmm12 = (xmm3 * mem) + xmm12
vfmadd231ps %xmm15, %xmm8, %xmm12 # xmm12 = (xmm8 * xmm15) + xmm12
vmovaps 0x270(%rsp), %xmm2
vfmadd231ps 0x30(%rsp), %xmm2, %xmm12 # xmm12 = (xmm2 * mem) + xmm12
vfmadd231ps %xmm1, %xmm9, %xmm12 # xmm12 = (xmm9 * xmm1) + xmm12
vfmadd231ps %xmm0, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm0) + xmm12
vmovaps %xmm0, 0xf0(%rsp)
vmovaps 0x70(%r13,%rsi), %xmm10
vmovaps 0x70(%rbp,%rsi), %xmm2
vmovaps 0x70(%r11,%rsi), %xmm1
vmovaps %xmm12, 0x40(%r9,%rsi)
vfmadd213ps %xmm14, %xmm5, %xmm13 # xmm13 = (xmm5 * xmm13) + xmm14
vfmadd231ps 0x10(%rsp), %xmm11, %xmm13 # xmm13 = (xmm11 * mem) + xmm13
vfmadd231ps %xmm10, %xmm6, %xmm13 # xmm13 = (xmm6 * xmm10) + xmm13
vfmadd231ps %xmm3, %xmm7, %xmm13 # xmm13 = (xmm7 * xmm3) + xmm13
vfmadd231ps 0x40(%rsp), %xmm15, %xmm13 # xmm13 = (xmm15 * mem) + xmm13
vfmadd231ps %xmm2, %xmm8, %xmm13 # xmm13 = (xmm8 * xmm2) + xmm13
vmovaps 0x290(%rsp), %xmm3
vfmadd231ps 0x30(%rsp), %xmm3, %xmm13 # xmm13 = (xmm3 * mem) + xmm13
vfmadd231ps %xmm0, %xmm9, %xmm13 # xmm13 = (xmm9 * xmm0) + xmm13
vfmadd231ps %xmm1, %xmm4, %xmm13 # xmm13 = (xmm4 * xmm1) + xmm13
vmovaps 0x80(%r13,%rsi), %xmm3
vmovaps 0x80(%rbp,%rsi), %xmm12
vmovaps 0x80(%r11,%rsi), %xmm0
vmovaps %xmm13, 0x50(%r9,%rsi)
vfmadd213ps %xmm14, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm11) + xmm14
vfmadd231ps 0x10(%rsp), %xmm10, %xmm11 # xmm11 = (xmm10 * mem) + xmm11
vfmadd231ps %xmm3, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm3) + xmm11
vfmadd231ps %xmm15, %xmm7, %xmm11 # xmm11 = (xmm7 * xmm15) + xmm11
vmovaps 0x40(%rsp), %xmm15
vfmadd231ps %xmm2, %xmm15, %xmm11 # xmm11 = (xmm15 * xmm2) + xmm11
vfmadd231ps %xmm12, %xmm8, %xmm11 # xmm11 = (xmm8 * xmm12) + xmm11
vmovaps 0xf0(%rsp), %xmm13
vfmadd231ps 0x30(%rsp), %xmm13, %xmm11 # xmm11 = (xmm13 * mem) + xmm11
vfmadd213ps %xmm14, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm10) + xmm14
vfmadd231ps 0x10(%rsp), %xmm3, %xmm10 # xmm10 = (xmm3 * mem) + xmm10
vfmadd231ps 0x90(%r13,%rsi), %xmm6, %xmm10 # xmm10 = (xmm6 * mem) + xmm10
vfmadd231ps %xmm2, %xmm7, %xmm10 # xmm10 = (xmm7 * xmm2) + xmm10
vfmadd231ps %xmm12, %xmm15, %xmm10 # xmm10 = (xmm15 * xmm12) + xmm10
vfmadd231ps %xmm1, %xmm9, %xmm11 # xmm11 = (xmm9 * xmm1) + xmm11
vfmadd231ps 0x90(%rbp,%rsi), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vfmadd231ps 0x30(%rsp), %xmm1, %xmm10 # xmm10 = (xmm1 * mem) + xmm10
vfmadd231ps %xmm0, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm0) + xmm10
vfmadd231ps 0x90(%r11,%rsi), %xmm4, %xmm10 # xmm10 = (xmm4 * mem) + xmm10
vfmadd231ps %xmm0, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm0) + xmm11
vmovaps %xmm11, 0x60(%r9,%rsi)
vmovaps %xmm10, 0x70(%r9,%rsi)
addl $0x8, %r12d
addq %r8, %rsi
addq %r8, %r15
addq %r8, %rbx
addq %r8, %rdx
jmp 0x29cc45
vmovaps (%r13,%rsi), %xmm13
vmovaps 0x70(%rsp), %xmm12
vfmadd213ps 0x60(%rsp), %xmm12, %xmm13 # xmm13 = (xmm12 * xmm13) + mem
vmovaps 0x10(%r13,%rsi), %xmm1
vmovaps 0x20(%r13,%rsi), %xmm10
vmovaps 0x30(%r13,%rsi), %xmm11
vfmadd231ps 0x10(%rsp), %xmm1, %xmm13 # xmm13 = (xmm1 * mem) + xmm13
vmovaps 0xd0(%rsp), %xmm6
vfmadd231ps %xmm10, %xmm6, %xmm13 # xmm13 = (xmm6 * xmm10) + xmm13
vmovaps 0xe0(%rsp), %xmm7
vfmadd231ps (%rbp,%rsi), %xmm7, %xmm13 # xmm13 = (xmm7 * mem) + xmm13
vmovaps 0x10(%rbp,%rsi), %xmm14
vmovaps 0x20(%rbp,%rsi), %xmm5
vfmadd231ps %xmm14, %xmm15, %xmm13 # xmm13 = (xmm15 * xmm14) + xmm13
vmovaps 0x170(%rsp), %xmm8
vfmadd231ps %xmm5, %xmm8, %xmm13 # xmm13 = (xmm8 * xmm5) + xmm13
vmovaps 0x30(%rsp), %xmm0
vfmadd231ps (%r11,%rsi), %xmm0, %xmm13 # xmm13 = (xmm0 * mem) + xmm13
vmovaps 0x30(%rbp,%rsi), %xmm0
vmovaps 0x10(%r11,%rsi), %xmm15
vmovaps 0x20(%r11,%rsi), %xmm3
vmovaps 0x30(%r11,%rsi), %xmm2
vmovaps %xmm2, 0xf0(%rsp)
vmovaps 0x160(%rsp), %xmm9
vfmadd231ps %xmm15, %xmm9, %xmm13 # xmm13 = (xmm9 * xmm15) + xmm13
vmovaps 0x150(%rsp), %xmm4
vfmadd231ps %xmm3, %xmm4, %xmm13 # xmm13 = (xmm4 * xmm3) + xmm13
vmovaps %xmm13, (%r9,%rsi)
vmovaps 0x60(%rsp), %xmm13
vfmadd213ps %xmm13, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm1) + xmm13
vfmadd231ps 0x10(%rsp), %xmm10, %xmm1 # xmm1 = (xmm10 * mem) + xmm1
vfmadd231ps %xmm11, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm11) + xmm1
vfmadd231ps %xmm14, %xmm7, %xmm1 # xmm1 = (xmm7 * xmm14) + xmm1
vfmadd231ps 0x40(%rsp), %xmm5, %xmm1 # xmm1 = (xmm5 * mem) + xmm1
vfmadd231ps %xmm0, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm0) + xmm1
vfmadd231ps 0x30(%rsp), %xmm15, %xmm1 # xmm1 = (xmm15 * mem) + xmm1
vfmadd231ps %xmm3, %xmm9, %xmm1 # xmm1 = (xmm9 * xmm3) + xmm1
vfmadd231ps %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm2) + xmm1
vmovaps 0x40(%r13,%rsi), %xmm14
vmovaps 0x40(%rbp,%rsi), %xmm15
vmovaps 0x40(%r11,%rsi), %xmm2
vmovaps %xmm1, 0x10(%r9,%rsi)
vfmadd213ps %xmm13, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm10) + xmm13
vfmadd231ps 0x10(%rsp), %xmm11, %xmm10 # xmm10 = (xmm11 * mem) + xmm10
vfmadd231ps %xmm14, %xmm6, %xmm10 # xmm10 = (xmm6 * xmm14) + xmm10
vfmadd231ps %xmm5, %xmm7, %xmm10 # xmm10 = (xmm7 * xmm5) + xmm10
vfmadd231ps 0x40(%rsp), %xmm0, %xmm10 # xmm10 = (xmm0 * mem) + xmm10
vfmadd231ps %xmm15, %xmm8, %xmm10 # xmm10 = (xmm8 * xmm15) + xmm10
vfmadd231ps 0x30(%rsp), %xmm3, %xmm10 # xmm10 = (xmm3 * mem) + xmm10
vfmadd213ps %xmm13, %xmm12, %xmm11 # xmm11 = (xmm12 * xmm11) + xmm13
vfmadd231ps 0x10(%rsp), %xmm14, %xmm11 # xmm11 = (xmm14 * mem) + xmm11
vfmadd231ps 0x50(%r13,%rsi), %xmm6, %xmm11 # xmm11 = (xmm6 * mem) + xmm11
vfmadd231ps %xmm0, %xmm7, %xmm11 # xmm11 = (xmm7 * xmm0) + xmm11
vfmadd231ps 0x40(%rsp), %xmm15, %xmm11 # xmm11 = (xmm15 * mem) + xmm11
vmovaps 0x40(%rsp), %xmm15
vmovaps 0xf0(%rsp), %xmm1
vfmadd231ps %xmm1, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm1) + xmm10
vfmadd231ps 0x50(%rbp,%rsi), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11
vfmadd231ps 0x30(%rsp), %xmm1, %xmm11 # xmm11 = (xmm1 * mem) + xmm11
vfmadd231ps %xmm2, %xmm9, %xmm11 # xmm11 = (xmm9 * xmm2) + xmm11
vfmadd231ps 0x50(%r11,%rsi), %xmm4, %xmm11 # xmm11 = (xmm4 * mem) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm2) + xmm10
vmovaps %xmm10, 0x20(%r9,%rsi)
vmovaps %xmm11, 0x30(%r9,%rsi)
addl $0x4, %r12d
addq $0x40, %rsi
addq $0x40, %r15
addq $0x40, %rbx
addq $0x40, %rdx
leal 0x3(%r12), %r14d
cmpl %eax, %r14d
jl 0x29cfa2
jmp 0x29d25d
vmovaps 0x10(%r13,%rsi), %xmm0
vmovaps 0x20(%r13,%rsi), %xmm2
vmovaps (%r13,%rsi), %xmm1
vmovaps 0x60(%rsp), %xmm3
vmovaps 0x70(%rsp), %xmm4
vfmadd213ps %xmm3, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm1) + xmm3
vmovaps 0x10(%rsp), %xmm6
vfmadd231ps %xmm0, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm0) + xmm1
vmovaps 0xd0(%rsp), %xmm5
vfmadd231ps %xmm2, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm2) + xmm1
vfmadd213ps %xmm3, %xmm4, %xmm0 # xmm0 = (xmm4 * xmm0) + xmm3
vfmadd231ps %xmm2, %xmm6, %xmm0 # xmm0 = (xmm6 * xmm2) + xmm0
vmovaps 0x10(%rbp,%rsi), %xmm2
vmovaps 0xe0(%rsp), %xmm3
vfmadd231ps (%rbp,%rsi), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vfmadd231ps 0x30(%r13,%rsi), %xmm5, %xmm0 # xmm0 = (xmm5 * mem) + xmm0
vfmadd231ps %xmm2, %xmm15, %xmm1 # xmm1 = (xmm15 * xmm2) + xmm1
vfmadd231ps %xmm2, %xmm3, %xmm0 # xmm0 = (xmm3 * xmm2) + xmm0
vmovaps 0x20(%rbp,%rsi), %xmm2
vmovaps 0x170(%rsp), %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm2) + xmm1
vfmadd231ps %xmm2, %xmm15, %xmm0 # xmm0 = (xmm15 * xmm2) + xmm0
vmovaps 0x10(%r11,%rsi), %xmm2
vmovaps 0x30(%rsp), %xmm5
vfmadd231ps (%r11,%rsi), %xmm5, %xmm1 # xmm1 = (xmm5 * mem) + xmm1
vmovaps 0x160(%rsp), %xmm4
vfmadd231ps %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm2) + xmm1
vfmadd231ps 0x30(%rbp,%rsi), %xmm3, %xmm0 # xmm0 = (xmm3 * mem) + xmm0
vfmadd231ps %xmm2, %xmm5, %xmm0 # xmm0 = (xmm5 * xmm2) + xmm0
vmovaps 0x20(%r11,%rsi), %xmm2
vfmadd231ps %xmm2, %xmm4, %xmm0 # xmm0 = (xmm4 * xmm2) + xmm0
vmovaps 0x150(%rsp), %xmm3
vfmadd231ps 0x30(%r11,%rsi), %xmm3, %xmm0 # xmm0 = (xmm3 * mem) + xmm0
vfmadd231ps %xmm2, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm2) + xmm1
vmovaps %xmm1, (%r9,%rsi)
vmovaps %xmm0, 0x10(%r9,%rsi)
addl $0x2, %r12d
addq $0x20, %rsi
addq $0x20, %r15
addq $0x20, %rbx
addq $0x20, %rdx
leal 0x1(%r12), %r14d
cmpl %eax, %r14d
jl 0x29d165
addq %rsi, %r9
movq 0x8(%rsp), %r14
cmpl %eax, %r12d
jge 0x29d312
vmovaps -0x20(%r15), %xmm0
vmovaps 0x70(%rsp), %xmm1
vfmadd213ps 0x60(%rsp), %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + mem
vmovaps 0x10(%rsp), %xmm1
vfmadd231ps -0x10(%r15), %xmm1, %xmm0 # xmm0 = (xmm1 * mem) + xmm0
vmovaps 0xd0(%rsp), %xmm1
vfmadd231ps (%r15), %xmm1, %xmm0 # xmm0 = (xmm1 * mem) + xmm0
vmovaps 0xe0(%rsp), %xmm1
vfmadd231ps -0x20(%rbx), %xmm1, %xmm0 # xmm0 = (xmm1 * mem) + xmm0
vfmadd231ps -0x10(%rbx), %xmm15, %xmm0 # xmm0 = (xmm15 * mem) + xmm0
vmovaps 0x170(%rsp), %xmm1
vfmadd231ps (%rbx), %xmm1, %xmm0 # xmm0 = (xmm1 * mem) + xmm0
vmovaps 0x30(%rsp), %xmm1
vfmadd231ps -0x20(%rdx), %xmm1, %xmm0 # xmm0 = (xmm1 * mem) + xmm0
vmovaps 0x160(%rsp), %xmm1
vfmadd231ps -0x10(%rdx), %xmm1, %xmm0 # xmm0 = (xmm1 * mem) + xmm0
vmovaps 0x150(%rsp), %xmm1
vfmadd231ps (%rdx), %xmm1, %xmm0 # xmm0 = (xmm1 * mem) + xmm0
vmovaps %xmm0, (%r9)
addq $0x10, %r9
incl %r12d
addq $0x10, %r15
addq $0x10, %rbx
addq $0x10, %rdx
jmp 0x29d273
incl %r10d
movq %r15, %r13
movq %rbx, %rbp
movq %rdx, %r11
jmp 0x29cc2b
incq %rdi
movq 0x28(%rsp), %r13
movq 0x1d0(%rsp), %rsi
jmp 0x29cb23
cmpl $0x3, 0xd4(%rdi)
movl %eax, %ecx
jne 0x29e16e
cmpl $0x3, 0xd8(%rdi)
movl %eax, %ecx
jne 0x29e16e
cmpl $0x1, 0xdc(%rdi)
movq %r9, 0x30(%rsp)
jne 0x29dfac
cmpl $0x1, 0xe0(%rdi)
jne 0x29dfac
cmpl $0x1, 0xe4(%rdi)
jne 0x29dfac
cmpl $0x1, 0xe8(%rdi)
jne 0x29dfac
movslq 0xac(%rsp), %rax
movl 0x2c(%r13), %edx
movl 0x30(%r13), %r8d
movl 0xb8(%rsp), %ecx
movq 0x28(%r14), %r10
imulq 0x10(%r13), %r11
movq 0x1b0(%rdi), %rsi
movslq %edx, %rdi
movq 0x80(%rsp), %rbx
movq %rbx, 0x170(%rsp)
movq 0x90(%rsp), %rbx
imulq 0xc0(%rsp), %rbx
movq %rbx, 0x160(%rsp)
leaq (%rax,%rax), %rbx
movq %rbx, 0x150(%rsp)
leaq (%rax,%rax,2), %rbx
movq %rbx, 0xf0(%rsp)
movq %rax, 0xd0(%rsp)
addl $0x2, %eax
movslq %eax, %rbp
xorl %eax, %eax
testl %ecx, %ecx
cmovlel %eax, %ecx
movq %rcx, 0x60(%rsp)
shlq $0x2, %rbp
movq %rdi, 0x8(%rsp)
leaq (,%rdi,4), %rcx
movq %rcx, 0x10(%rsp)
movq %rsi, 0xe0(%rsp)
movq %r11, 0x70(%rsp)
cmpq 0x60(%rsp), %rax
je 0x29fdf4
testq %rsi, %rsi
je 0x29d44e
vmovss (%rsi,%rax,4), %xmm0
jmp 0x29d452
vxorps %xmm0, %xmm0, %xmm0
movq %r11, %rsi
imulq %rax, %rsi
addq %r9, %rsi
imulq $0x24, %rax, %r13
movq 0x160(%rsp), %r15
movq %rax, 0x40(%rsp)
imulq %rax, %r15
addq 0x170(%rsp), %r15
movq 0xd0(%rsp), %rax
leaq (%r15,%rax,4), %rax
movq 0x150(%rsp), %rcx
leaq (%r15,%rcx,4), %r9
movq 0xf0(%rsp), %rcx
leaq (%r15,%rcx,4), %r11
xorl %edi, %edi
movq %rsi, %rbx
movl %edi, %ecx
orl $0x1, %ecx
cmpl %r8d, %ecx
jge 0x29d5c7
movq 0x8(%rsp), %rcx
leaq (%rsi,%rcx,4), %rsi
xorl %r12d, %r12d
movl %edx, %ecx
testl %ecx, %ecx
jle 0x29d59c
vmovsd 0x4(%r15,%r12), %xmm1
vmovss (%r15,%r12), %xmm2
vmovlhps %xmm1, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm1[0]
vshufps $0xd8, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,2],xmm1[1,3]
vmovss (%rax,%r12), %xmm2
vinsertps $0x30, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm2[0]
vmovups (%r10,%r13), %xmm3
vmovups 0x10(%r10,%r13), %xmm4
vmovsd 0x4(%rax,%r12), %xmm5
vmovss (%r9,%r12), %xmm6
vinsertps $0x20, %xmm6, %xmm5, %xmm7 # xmm7 = xmm5[0,1],xmm6[0],xmm5[3]
vinsertps $0x30, 0x4(%r9,%r12), %xmm7, %xmm7 # xmm7 = xmm7[0,1,2],mem[0]
vmulps %xmm7, %xmm4, %xmm7
vhaddps %xmm7, %xmm7, %xmm7
vmulps %xmm1, %xmm3, %xmm1
vhaddps %xmm7, %xmm7, %xmm7
vmovss 0x20(%r10,%r13), %xmm8
vhaddps %xmm1, %xmm1, %xmm1
vhaddps %xmm1, %xmm1, %xmm1
vaddss %xmm1, %xmm7, %xmm1
vfmadd231ss 0x8(%r9,%r12), %xmm8, %xmm1 # xmm1 = (xmm8 * mem) + xmm1
vmovlhps %xmm5, %xmm3, %xmm7 # xmm7 = xmm3[0],xmm5[0]
vshufps $0xd8, %xmm5, %xmm7, %xmm5 # xmm5 = xmm7[0,2],xmm5[1,3]
vinsertps $0x30, %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0,1,2],xmm6[0]
vmovsd 0x4(%r9,%r12), %xmm6
vmovhps (%r11,%r12), %xmm6, %xmm6 # xmm6 = xmm6[0,1],mem[0,1]
vmulps %xmm4, %xmm6, %xmm4
vhaddps %xmm4, %xmm4, %xmm4
vmovss %xmm2, %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[1,2,3]
vhaddps %xmm4, %xmm4, %xmm3
vmulps %xmm2, %xmm5, %xmm2
vhaddps %xmm2, %xmm2, %xmm2
vaddss %xmm0, %xmm1, %xmm1
vhaddps %xmm2, %xmm2, %xmm2
vaddss %xmm2, %xmm3, %xmm2
vfmadd231ss 0x8(%r11,%r12), %xmm8, %xmm2 # xmm2 = (xmm8 * mem) + xmm2
vaddss %xmm0, %xmm2, %xmm2
vmovss %xmm1, (%rbx,%r12)
vmovss %xmm2, (%rsi,%r12)
decl %ecx
addq $0x4, %r12
jmp 0x29d4be
addq %rbp, %r15
addq %r12, %r15
addq %rbp, %rax
addq %r12, %rax
addq %rbp, %r9
addq %r12, %r9
addq %rbp, %r11
addq %r12, %r11
addq 0x10(%rsp), %rbx
addq %r12, %rbx
addl $0x2, %edi
addq %r12, %rsi
jmp 0x29d4a2
movq 0x70(%rsp), %r11
cmpl %r8d, %edi
jge 0x29d665
xorl %ecx, %ecx
movl %edx, %esi
testl %esi, %esi
jle 0x29d646
vmovsd (%r15,%rcx), %xmm1
vinsertps $0x20, 0x8(%r15,%rcx), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x30, (%rax,%rcx), %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],mem[0]
vmovsd 0x4(%rax,%rcx), %xmm2
vinsertf128 $0x1, %xmm2, %ymm1, %ymm2
vmovsd (%r9,%rcx), %xmm3
vinsertf128 $0x1, %xmm3, %ymm1, %ymm1
vshufpd $0x2, %ymm1, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[2]
vmulps (%r10,%r13), %ymm1, %ymm1
vextractf128 $0x1, %ymm1, %xmm2
vhaddps %xmm1, %xmm2, %xmm1
vhaddps %xmm1, %xmm1, %xmm1
vhaddps %xmm1, %xmm1, %xmm1
vmovss 0x20(%r10,%r13), %xmm2
vfmadd231ss 0x8(%r9,%rcx), %xmm2, %xmm1 # xmm1 = (xmm2 * mem) + xmm1
vaddss %xmm0, %xmm1, %xmm1
vmovss %xmm1, (%rbx,%rcx)
decl %esi
addq $0x4, %rcx
jmp 0x29d5d9
addq %rcx, %r15
addq $0x8, %r15
addq %rcx, %rax
addq $0x8, %rax
addq %rcx, %r9
addq $0x8, %r9
incl %edi
addq %rcx, %rbx
jmp 0x29d5cc
movq 0x40(%rsp), %rax
incq %rax
movq 0x28(%rsp), %r13
movq 0x30(%rsp), %r9
movq 0xe0(%rsp), %rsi
jmp 0x29d437
cmpl $0x5, %r15d
jne 0x29ed47
cmpl $0x1, 0xdc(%rdi)
jne 0x29ea4a
cmpl $0x1, 0xe0(%rdi)
jne 0x29ea4a
cmpl $0x1, 0xe4(%rdi)
jne 0x29ea4a
cmpl $0x1, 0xe8(%rdi)
jne 0x29ea4a
movl 0x2c(%r13), %r8d
movl 0x30(%r13), %eax
movl %eax, 0x8(%rsp)
movl 0xac(%rsp), %eax
movl 0xb8(%rsp), %ecx
movq 0x1b0(%rdi), %rdx
leal 0x10(,%rax,4), %eax
movslq %eax, %rdi
leal (,%r8,4), %eax
cltq
xorl %esi, %esi
testl %r8d, %r8d
cmovlel %esi, %r8d
testl %ecx, %ecx
cmovlel %esi, %ecx
movq %rcx, 0x60(%rsp)
shlq $0x2, %rax
movq %rax, 0x30(%rsp)
shlq $0x2, %rdi
movq %rdx, 0x70(%rsp)
movq %r8, 0x10(%rsp)
cmpq 0x60(%rsp), %rsi
je 0x29fdf4
testq %rdx, %rdx
je 0x29d73f
movq %rsi, %rax
shlq $0x4, %rax
vmovups (%rdx,%rax), %xmm0
jmp 0x29d743
vxorps %xmm0, %xmm0, %xmm0
movq 0x40(%r13), %r10
imulq %rsi, %r10
movq 0x10(%r13), %rax
imulq %rax, %r10
addq (%r13), %r10
movslq 0x2c(%r13), %r9
movq 0x28(%r14), %r11
movslq 0x54(%r14), %rbx
imulq %rsi, %rbx
imulq 0x38(%r14), %rbx
imulq %rax, %r9
addq %r10, %r9
movslq 0xac(%rsp), %rax
movq 0xc0(%rsp), %r15
movq %rsi, 0x40(%rsp)
imulq %rsi, %r15
movq 0x90(%rsp), %rcx
imulq %rcx, %r15
addq 0x80(%rsp), %r15
imulq %rcx, %rax
leaq (%r15,%rax), %r12
leaq (%r15,%rax,2), %r13
leaq (%rax,%rax,2), %rbp
addq %r15, %rbp
leaq (%r15,%rax,4), %rdx
leaq (%rax,%rax,4), %rax
addq %r15, %rax
xorl %esi, %esi
movl %esi, %ecx
orl $0x1, %ecx
cmpl 0x8(%rsp), %ecx
jge 0x29dc81
movl %r8d, %ecx
xorl %r8d, %r8d
subl $0x1, %ecx
jb 0x29da6f
vmovaps (%r11,%rbx), %xmm1
vmovaps 0x10(%r11,%rbx), %xmm3
vmovaps 0x20(%r11,%rbx), %xmm4
vmovaps 0x30(%r11,%rbx), %xmm5
vmovaps (%r15,%r8), %xmm2
vfmadd213ps %xmm0, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm0
vfmadd231ps 0x10(%r15,%r8), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vfmadd231ps 0x20(%r15,%r8), %xmm4, %xmm2 # xmm2 = (xmm4 * mem) + xmm2
vfmadd231ps 0x30(%r15,%r8), %xmm5, %xmm2 # xmm2 = (xmm5 * mem) + xmm2
vmovaps 0x40(%r11,%rbx), %xmm6
vfmadd231ps 0x40(%r15,%r8), %xmm6, %xmm2 # xmm2 = (xmm6 * mem) + xmm2
vmovaps (%r12,%r8), %xmm7
vmovaps 0x10(%r12,%r8), %xmm8
vmovaps 0x20(%r12,%r8), %xmm9
vmovaps 0x30(%r12,%r8), %xmm10
vmovaps 0x40(%r12,%r8), %xmm11
vfmadd213ps %xmm0, %xmm7, %xmm1 # xmm1 = (xmm7 * xmm1) + xmm0
vfmadd231ps %xmm3, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm3) + xmm1
vfmadd231ps %xmm4, %xmm9, %xmm1 # xmm1 = (xmm9 * xmm4) + xmm1
vfmadd231ps %xmm5, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm5) + xmm1
vfmadd231ps %xmm6, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm6) + xmm1
vmovaps 0x50(%r11,%rbx), %xmm3
vmovaps 0x60(%r11,%rbx), %xmm4
vmovaps 0x70(%r11,%rbx), %xmm5
vmovaps 0x80(%r11,%rbx), %xmm6
vmovaps 0x90(%r11,%rbx), %xmm12
vfmadd231ps %xmm7, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm7) + xmm2
vfmadd231ps %xmm8, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm8) + xmm2
vfmadd231ps %xmm9, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm9) + xmm2
vfmadd231ps %xmm10, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm10) + xmm2
vfmadd231ps %xmm11, %xmm12, %xmm2 # xmm2 = (xmm12 * xmm11) + xmm2
vmovaps (%r13,%r8), %xmm7
vmovaps 0x10(%r13,%r8), %xmm8
vmovaps 0x20(%r13,%r8), %xmm9
vmovaps 0x30(%r13,%r8), %xmm10
vmovaps 0x40(%r13,%r8), %xmm11
vfmadd231ps %xmm3, %xmm7, %xmm1 # xmm1 = (xmm7 * xmm3) + xmm1
vfmadd231ps %xmm4, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm4) + xmm1
vfmadd231ps %xmm5, %xmm9, %xmm1 # xmm1 = (xmm9 * xmm5) + xmm1
vfmadd231ps %xmm6, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm6) + xmm1
vfmadd231ps %xmm12, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm12) + xmm1
vmovaps 0xa0(%r11,%rbx), %xmm3
vmovaps 0xb0(%r11,%rbx), %xmm4
vmovaps 0xc0(%r11,%rbx), %xmm5
vmovaps 0xd0(%r11,%rbx), %xmm6
vmovaps 0xe0(%r11,%rbx), %xmm12
vfmadd231ps %xmm7, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm7) + xmm2
vfmadd231ps %xmm8, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm8) + xmm2
vfmadd231ps %xmm9, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm9) + xmm2
vfmadd231ps %xmm10, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm10) + xmm2
vfmadd231ps %xmm11, %xmm12, %xmm2 # xmm2 = (xmm12 * xmm11) + xmm2
vmovaps (%rbp,%r8), %xmm7
vmovaps 0x10(%rbp,%r8), %xmm8
vmovaps 0x20(%rbp,%r8), %xmm9
vmovaps 0x30(%rbp,%r8), %xmm10
vmovaps 0x40(%rbp,%r8), %xmm11
vfmadd231ps %xmm3, %xmm7, %xmm1 # xmm1 = (xmm7 * xmm3) + xmm1
vfmadd231ps %xmm4, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm4) + xmm1
vfmadd231ps %xmm5, %xmm9, %xmm1 # xmm1 = (xmm9 * xmm5) + xmm1
vfmadd231ps %xmm6, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm6) + xmm1
vfmadd231ps %xmm12, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm12) + xmm1
vmovaps 0xf0(%r11,%rbx), %xmm4
vmovaps 0x100(%r11,%rbx), %xmm5
vmovaps 0x110(%r11,%rbx), %xmm6
vmovaps 0x120(%r11,%rbx), %xmm12
vmovaps 0x130(%r11,%rbx), %xmm13
vfmadd231ps %xmm7, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm7) + xmm2
vfmadd231ps %xmm8, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm8) + xmm2
vfmadd231ps %xmm9, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm9) + xmm2
vfmadd231ps %xmm10, %xmm12, %xmm2 # xmm2 = (xmm12 * xmm10) + xmm2
vfmadd231ps %xmm11, %xmm13, %xmm2 # xmm2 = (xmm13 * xmm11) + xmm2
vmovaps (%rdx,%r8), %xmm7
vmovaps 0x10(%rdx,%r8), %xmm8
vmovaps 0x20(%rdx,%r8), %xmm9
vmovaps 0x30(%rdx,%r8), %xmm10
vmovaps 0x40(%rdx,%r8), %xmm3
vfmadd231ps %xmm4, %xmm7, %xmm1 # xmm1 = (xmm7 * xmm4) + xmm1
vfmadd231ps %xmm5, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm5) + xmm1
vfmadd231ps %xmm6, %xmm9, %xmm1 # xmm1 = (xmm9 * xmm6) + xmm1
vfmadd231ps %xmm12, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm12) + xmm1
vfmadd231ps %xmm13, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm13) + xmm1
vmovaps 0x140(%r11,%rbx), %xmm4
vmovaps 0x150(%r11,%rbx), %xmm5
vmovaps 0x160(%r11,%rbx), %xmm6
vmovaps 0x170(%r11,%rbx), %xmm11
vmovaps 0x180(%r11,%rbx), %xmm12
vfmadd231ps %xmm7, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm7) + xmm2
vfmadd231ps %xmm8, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm8) + xmm2
vfmadd231ps %xmm9, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm9) + xmm2
vfmadd231ps %xmm10, %xmm11, %xmm2 # xmm2 = (xmm11 * xmm10) + xmm2
vfmadd231ps (%rax,%r8), %xmm4, %xmm1 # xmm1 = (xmm4 * mem) + xmm1
vfmadd231ps 0x10(%rax,%r8), %xmm5, %xmm1 # xmm1 = (xmm5 * mem) + xmm1
vfmadd231ps 0x20(%rax,%r8), %xmm6, %xmm1 # xmm1 = (xmm6 * mem) + xmm1
vfmadd231ps 0x30(%rax,%r8), %xmm11, %xmm1 # xmm1 = (xmm11 * mem) + xmm1
vfmadd231ps 0x40(%rax,%r8), %xmm12, %xmm1 # xmm1 = (xmm12 * mem) + xmm1
vfmadd231ps %xmm3, %xmm12, %xmm2 # xmm2 = (xmm12 * xmm3) + xmm2
vmovaps %xmm2, (%r10,%r8)
vmovaps %xmm1, (%r9,%r8)
addq $0x10, %r8
jmp 0x29d7d5
addq %rdi, %r15
addq %r8, %r15
addq %rdi, %r12
addq %r8, %r12
addq %rdi, %r13
addq %r8, %r13
addq %rdi, %rbp
addq %r8, %rbp
addq %rdi, %rdx
addq %r8, %rdx
addq %rdi, %rax
addq %r8, %rax
movq 0x30(%rsp), %rcx
addq %rcx, %r10
addq %r8, %r10
addq %rcx, %r9
addq %r8, %r9
addl $0x2, %esi
movq 0x10(%rsp), %r8
jmp 0x29d7c0
movl %r8d, %ecx
xorl %eax, %eax
subl $0x1, %ecx
jb 0x29dc5b
vmovaps (%r11,%rbx), %xmm1
vmovaps 0x10(%r11,%rbx), %xmm2
vmovaps 0x20(%r11,%rbx), %xmm3
vmovaps 0x30(%r11,%rbx), %xmm4
vmovaps 0x40(%r11,%rbx), %xmm5
vfmadd132ps (%r15,%rax), %xmm0, %xmm1 # xmm1 = (xmm1 * mem) + xmm0
vfmadd231ps 0x10(%r15,%rax), %xmm2, %xmm1 # xmm1 = (xmm2 * mem) + xmm1
vfmadd231ps 0x20(%r15,%rax), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vfmadd231ps 0x30(%r15,%rax), %xmm4, %xmm1 # xmm1 = (xmm4 * mem) + xmm1
vfmadd231ps 0x40(%r15,%rax), %xmm5, %xmm1 # xmm1 = (xmm5 * mem) + xmm1
vmovaps 0x50(%r11,%rbx), %xmm2
vmovaps 0x60(%r11,%rbx), %xmm3
vmovaps 0x70(%r11,%rbx), %xmm4
vmovaps 0x80(%r11,%rbx), %xmm5
vmovaps 0x90(%r11,%rbx), %xmm6
vfmadd132ps (%r12,%rax), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vfmadd231ps 0x10(%r12,%rax), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vfmadd231ps 0x20(%r12,%rax), %xmm4, %xmm2 # xmm2 = (xmm4 * mem) + xmm2
vfmadd231ps 0x30(%r12,%rax), %xmm5, %xmm2 # xmm2 = (xmm5 * mem) + xmm2
vfmadd231ps 0x40(%r12,%rax), %xmm6, %xmm2 # xmm2 = (xmm6 * mem) + xmm2
vmovaps 0xa0(%r11,%rbx), %xmm1
vmovaps 0xb0(%r11,%rbx), %xmm3
vmovaps 0xc0(%r11,%rbx), %xmm4
vmovaps 0xd0(%r11,%rbx), %xmm5
vmovaps 0xe0(%r11,%rbx), %xmm6
vfmadd132ps (%r13,%rax), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vfmadd231ps 0x10(%r13,%rax), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vfmadd231ps 0x20(%r13,%rax), %xmm4, %xmm1 # xmm1 = (xmm4 * mem) + xmm1
vfmadd231ps 0x30(%r13,%rax), %xmm5, %xmm1 # xmm1 = (xmm5 * mem) + xmm1
vfmadd231ps 0x40(%r13,%rax), %xmm6, %xmm1 # xmm1 = (xmm6 * mem) + xmm1
vmovaps 0xf0(%r11,%rbx), %xmm2
vmovaps 0x100(%r11,%rbx), %xmm3
vmovaps 0x110(%r11,%rbx), %xmm4
vmovaps 0x120(%r11,%rbx), %xmm5
vfmadd132ps (%rbp,%rax), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vfmadd231ps 0x10(%rbp,%rax), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vfmadd231ps 0x20(%rbp,%rax), %xmm4, %xmm2 # xmm2 = (xmm4 * mem) + xmm2
vfmadd231ps 0x30(%rbp,%rax), %xmm5, %xmm2 # xmm2 = (xmm5 * mem) + xmm2
vmovaps 0x130(%r11,%rbx), %xmm1
vfmadd231ps 0x40(%rbp,%rax), %xmm1, %xmm2 # xmm2 = (xmm1 * mem) + xmm2
vmovaps 0x140(%r11,%rbx), %xmm1
vmovaps 0x150(%r11,%rbx), %xmm3
vmovaps 0x160(%r11,%rbx), %xmm4
vmovaps 0x170(%r11,%rbx), %xmm5
vmovaps 0x180(%r11,%rbx), %xmm6
vfmadd132ps (%rdx,%rax), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vfmadd231ps 0x10(%rdx,%rax), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vfmadd231ps 0x20(%rdx,%rax), %xmm4, %xmm1 # xmm1 = (xmm4 * mem) + xmm1
vfmadd231ps 0x30(%rdx,%rax), %xmm5, %xmm1 # xmm1 = (xmm5 * mem) + xmm1
vfmadd231ps 0x40(%rdx,%rax), %xmm6, %xmm1 # xmm1 = (xmm6 * mem) + xmm1
vmovaps %xmm1, (%r10,%rax)
addq $0x10, %rax
jmp 0x29dab6
addq %rax, %r15
addq $0x40, %r15
leaq 0x40(%r12,%rax), %r12
addq %rax, %r13
addq $0x40, %r13
addq %rax, %rbp
addq $0x40, %rbp
addq %rax, %rdx
addq $0x40, %rdx
incl %esi
addq %rax, %r10
cmpl 0x8(%rsp), %esi
jl 0x29dab1
movq 0x40(%rsp), %rsi
incq %rsi
movq 0x28(%rsp), %r13
movq 0x70(%rsp), %rdx
jmp 0x29d721
cmpl $0x5, %r15d
jne 0x29fa97
cmpl $0x1, 0xdc(%rdi)
jne 0x29f77f
cmpl $0x1, 0xe0(%rdi)
jne 0x29f77f
cmpl $0x1, 0xe4(%rdi)
jne 0x29f77f
cmpl $0x1, 0xe8(%rdi)
jne 0x29f77f
movl 0x2c(%r13), %esi
movl 0x30(%r13), %ecx
movl 0xb8(%rsp), %eax
xorl %edx, %edx
testl %esi, %esi
cmovlel %edx, %esi
movq 0x1b0(%rdi), %rdi
testl %ecx, %ecx
cmovlel %edx, %ecx
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0x10(%rsp)
movl $0x80, %r8d
movq %rdi, 0x30(%rsp)
cmpq 0x10(%rsp), %rdx
je 0x29fdf4
testq %rdi, %rdi
je 0x29dd35
movq %rdx, %rax
shlq $0x5, %rax
vmovups (%rdi,%rax), %ymm0
jmp 0x29dd39
vxorps %xmm0, %xmm0, %xmm0
movq 0x40(%r13), %r9
imulq %rdx, %r9
imulq 0x10(%r13), %r9
addq (%r13), %r9
movq 0x28(%r14), %r10
movslq 0x54(%r14), %r11
imulq %rdx, %r11
imulq 0x38(%r14), %r11
movq 0xc0(%rsp), %rbx
movq %rdx, 0x8(%rsp)
imulq %rdx, %rbx
movq 0x90(%rsp), %rax
imulq %rax, %rbx
addq 0x80(%rsp), %rbx
movslq 0xac(%rsp), %rdx
imulq %rax, %rdx
leaq (%rbx,%rdx), %r15
leaq (%rbx,%rdx,2), %r12
leaq (%rdx,%rdx,2), %r13
addq %rbx, %r13
leaq (%rbx,%rdx,4), %rbp
xorl %edx, %edx
cmpl %ecx, %edx
je 0x29df95
movl %esi, %eax
xorl %edi, %edi
subl $0x1, %eax
jb 0x29df6d
vmovaps (%r10,%r11), %ymm1
vmovaps 0x20(%r10,%r11), %ymm2
vmovaps 0x40(%r10,%r11), %ymm3
vmovaps 0x60(%r10,%r11), %ymm4
vmovaps 0x80(%r10,%r11), %ymm5
vfmadd132ps (%rbx,%rdi), %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vfmadd231ps 0x20(%rbx,%rdi), %ymm2, %ymm1 # ymm1 = (ymm2 * mem) + ymm1
vfmadd231ps 0x40(%rbx,%rdi), %ymm3, %ymm1 # ymm1 = (ymm3 * mem) + ymm1
vfmadd231ps 0x60(%rbx,%rdi), %ymm4, %ymm1 # ymm1 = (ymm4 * mem) + ymm1
vfmadd231ps 0x80(%rbx,%rdi), %ymm5, %ymm1 # ymm1 = (ymm5 * mem) + ymm1
vmovaps 0xa0(%r10,%r11), %ymm2
vmovaps 0xc0(%r10,%r11), %ymm3
vmovaps 0xe0(%r10,%r11), %ymm4
vmovaps 0x100(%r10,%r11), %ymm5
vmovaps 0x120(%r10,%r11), %ymm6
vfmadd132ps (%r15,%rdi), %ymm1, %ymm2 # ymm2 = (ymm2 * mem) + ymm1
vfmadd231ps 0x20(%r15,%rdi), %ymm3, %ymm2 # ymm2 = (ymm3 * mem) + ymm2
vfmadd231ps 0x40(%r15,%rdi), %ymm4, %ymm2 # ymm2 = (ymm4 * mem) + ymm2
vfmadd231ps 0x60(%r15,%rdi), %ymm5, %ymm2 # ymm2 = (ymm5 * mem) + ymm2
vfmadd231ps 0x80(%r15,%rdi), %ymm6, %ymm2 # ymm2 = (ymm6 * mem) + ymm2
vmovaps 0x140(%r10,%r11), %ymm1
vmovaps 0x160(%r10,%r11), %ymm3
vmovaps 0x180(%r10,%r11), %ymm4
vmovaps 0x1a0(%r10,%r11), %ymm5
vmovaps 0x1c0(%r10,%r11), %ymm6
vfmadd132ps (%r12,%rdi), %ymm2, %ymm1 # ymm1 = (ymm1 * mem) + ymm2
vfmadd231ps 0x20(%r12,%rdi), %ymm3, %ymm1 # ymm1 = (ymm3 * mem) + ymm1
vfmadd231ps 0x40(%r12,%rdi), %ymm4, %ymm1 # ymm1 = (ymm4 * mem) + ymm1
vfmadd231ps 0x60(%r12,%rdi), %ymm5, %ymm1 # ymm1 = (ymm5 * mem) + ymm1
vfmadd231ps 0x80(%r12,%rdi), %ymm6, %ymm1 # ymm1 = (ymm6 * mem) + ymm1
vmovaps 0x1e0(%r10,%r11), %ymm2
vmovaps 0x200(%r10,%r11), %ymm3
vmovaps 0x220(%r10,%r11), %ymm4
vmovaps 0x240(%r10,%r11), %ymm5
vfmadd132ps (%r13,%rdi), %ymm1, %ymm2 # ymm2 = (ymm2 * mem) + ymm1
vfmadd231ps 0x20(%r13,%rdi), %ymm3, %ymm2 # ymm2 = (ymm3 * mem) + ymm2
vfmadd231ps 0x40(%r13,%rdi), %ymm4, %ymm2 # ymm2 = (ymm4 * mem) + ymm2
vfmadd231ps 0x60(%r13,%rdi), %ymm5, %ymm2 # ymm2 = (ymm5 * mem) + ymm2
vmovaps 0x260(%r10,%r11), %ymm1
vfmadd231ps 0x80(%r13,%rdi), %ymm1, %ymm2 # ymm2 = (ymm1 * mem) + ymm2
vmovaps 0x280(%r10,%r11), %ymm1
vmovaps 0x2a0(%r10,%r11), %ymm3
vmovaps 0x2c0(%r10,%r11), %ymm4
vmovaps 0x2e0(%r10,%r11), %ymm5
vmovaps 0x300(%r10,%r11), %ymm6
vfmadd132ps (%rbp,%rdi), %ymm2, %ymm1 # ymm1 = (ymm1 * mem) + ymm2
vfmadd231ps 0x20(%rbp,%rdi), %ymm3, %ymm1 # ymm1 = (ymm3 * mem) + ymm1
vfmadd231ps 0x40(%rbp,%rdi), %ymm4, %ymm1 # ymm1 = (ymm4 * mem) + ymm1
vfmadd231ps 0x60(%rbp,%rdi), %ymm5, %ymm1 # ymm1 = (ymm5 * mem) + ymm1
vfmadd231ps 0x80(%rbp,%rdi), %ymm6, %ymm1 # ymm1 = (ymm6 * mem) + ymm1
vmovaps %ymm1, (%r9,%rdi)
addq $0x20, %rdi
jmp 0x29ddad
addq %rdi, %rbx
addq %r8, %rbx
addq %rdi, %r15
addq %r8, %r15
addq %rdi, %r12
addq %r8, %r12
addq %rdi, %r13
addq %r8, %r13
addq %rdi, %rbp
addq %r8, %rbp
incl %edx
addq %rdi, %r9
jmp 0x29dda1
movq 0x8(%rsp), %rdx
incq %rdx
movq 0x28(%rsp), %r13
movq 0x30(%rsp), %rdi
jmp 0x29dd17
cmpl $0x1, 0xdc(%rdi)
movl %eax, %ecx
jne 0x29e16e
cmpl $0x1, 0xe0(%rdi)
movl %eax, %ecx
jne 0x29e16e
cmpl $0x2, 0xe4(%rdi)
movl %eax, %ecx
jne 0x29e16e
cmpl $0x2, 0xe8(%rdi)
movl %eax, %ecx
jne 0x29e16e
movslq 0xac(%rsp), %rdx
movl 0x2c(%r13), %eax
movl 0x30(%r13), %r8d
movl 0xb8(%rsp), %esi
movl %edx, %ecx
subl %eax, %ecx
addl %ecx, %ecx
movq 0x28(%r14), %r10
movq 0x1b0(%rdi), %rdi
imulq 0x10(%r13), %r11
movq 0x80(%rsp), %rbx
movq %rbx, 0x60(%rsp)
movq 0x90(%rsp), %rbx
imulq 0xc0(%rsp), %rbx
movq %rbx, 0x70(%rsp)
movq %rdx, 0x10(%rsp)
addq %rdx, %rdx
movq %rdx, 0xd0(%rsp)
movslq %ecx, %r15
xorl %r12d, %r12d
testl %r8d, %r8d
cmovlel %r12d, %r8d
testl %esi, %esi
cmovlel %r12d, %esi
movq %rsi, 0x8(%rsp)
shlq $0x2, %r15
movq %rdi, 0x40(%rsp)
cmpq 0x8(%rsp), %r12
je 0x29fdf4
testq %rdi, %rdi
je 0x29e081
vmovss (%rdi,%r12,4), %xmm0
jmp 0x29e085
vxorps %xmm0, %xmm0, %xmm0
movq %r11, %rdi
movq %r11, %r13
imulq %r12, %r13
addq %r9, %r13
imulq $0x24, %r12, %rbp
movq 0x70(%rsp), %rbx
imulq %r12, %rbx
addq 0x60(%rsp), %rbx
movq 0x10(%rsp), %rcx
leaq (%rbx,%rcx,4), %r11
movq 0xd0(%rsp), %rcx
leaq (%rbx,%rcx,4), %rsi
xorl %r9d, %r9d
cmpl %r8d, %r9d
je 0x29e154
xorl %edx, %edx
movl %eax, %ecx
testl %ecx, %ecx
jle 0x29e13a
vmovsd (%rbx,%rdx), %xmm1
vinsertps $0x20, 0x8(%rbx,%rdx), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x30, (%r11,%rdx), %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],mem[0]
vmovsd 0x4(%r11,%rdx), %xmm2
vinsertf128 $0x1, %xmm2, %ymm1, %ymm2
vmovsd (%rsi,%rdx), %xmm3
vinsertf128 $0x1, %xmm3, %ymm1, %ymm1
vshufpd $0x2, %ymm1, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[2]
vmulps (%r10,%rbp), %ymm1, %ymm1
vextractf128 $0x1, %ymm1, %xmm2
vhaddps %xmm1, %xmm2, %xmm1
vhaddps %xmm1, %xmm1, %xmm1
vhaddps %xmm1, %xmm1, %xmm1
vmovss 0x20(%r10,%rbp), %xmm2
vfmadd231ss 0x8(%rsi,%rdx), %xmm2, %xmm1 # xmm1 = (xmm2 * mem) + xmm1
vaddss %xmm0, %xmm1, %xmm1
vmovss %xmm1, (%r13)
addq $0x4, %r13
decl %ecx
addq $0x8, %rdx
jmp 0x29e0c9
addq %r15, %rbx
addq %rdx, %rbx
addq %r15, %r11
addq %rdx, %r11
addq %r15, %rsi
addq %rdx, %rsi
incl %r9d
jmp 0x29e0bc
incq %r12
movq 0x28(%rsp), %r13
movq 0x30(%rsp), %r9
movq %rdi, %r11
movq 0x40(%rsp), %rdi
jmp 0x29e069
cltd
idivl %esi
movl %eax, %edi
movl %ecx, %eax
cltd
idivl %esi
movl %eax, %r8d
movq 0x58(%rsp), %rax
cmpb $0x1, 0x27(%rax)
jne 0x29e1b4
xorl %eax, %eax
testb $0x3, %dil
sete %al
testb $0x7, %dil
leal 0x1(%rax,%rax,2), %eax
cmovel %ebp, %eax
movl %eax, 0x10(%rsp)
xorl %eax, %eax
testb $0x3, %r8b
sete %al
testb $0x7, %r8b
leal 0x1(%rax,%rax,2), %eax
cmovel %ebp, %eax
jmp 0x29e1bb
pushq $0x1
popq %rax
movl %eax, 0x10(%rsp)
movl %eax, 0x8(%rsp)
movq 0x40(%rsp), %r15
movq 0x88(%rsp), %rax
vmovaps 0x80(%rsp), %xmm0
vmovaps %xmm0, 0x180(%rsp)
movq 0x90(%rsp), %rcx
movq %rcx, 0x190(%rsp)
movl 0x98(%rsp), %ecx
movl %ecx, 0x198(%rsp)
movq 0xa0(%rsp), %rcx
movq %rcx, 0x1a0(%rsp)
vmovups 0xa8(%rsp), %xmm0
vmovups %xmm0, 0x1a8(%rsp)
movl 0xb8(%rsp), %ecx
movl %ecx, 0x1b8(%rsp)
movq 0xc0(%rsp), %rcx
movq %rcx, 0x1c0(%rsp)
testq %rax, %rax
je 0x29e244
lock
incl (%rax)
cmpl 0x10(%rsp), %r10d
jle 0x29e298
movl %r8d, %ebx
movl %edi, %ebp
movq 0x58(%rsp), %rax
vmovups (%rax), %ymm0
vmovups 0x20(%rax), %ymm1
leaq 0x100(%rsp), %rcx
vmovups %ymm1, 0x20(%rcx)
vmovups %ymm0, (%rcx)
movq 0x10(%rax), %rax
movq %rax, 0x8(%rcx)
leaq 0x80(%rsp), %rdi
leaq 0x180(%rsp), %rsi
movl 0x10(%rsp), %edx
vzeroupper
callq 0x64e3b
movl %ebp, %edi
movl %ebx, %r8d
movq 0x28(%rsp), %rdx
movq 0x8(%rdx), %rax
vmovups (%rdx), %xmm0
vmovaps %xmm0, 0x100(%rsp)
movq 0x10(%rdx), %rcx
movq %rcx, 0x110(%rsp)
movl 0x18(%rdx), %ecx
movl %ecx, 0x118(%rsp)
movq 0x20(%rdx), %rcx
movq %rcx, 0x120(%rsp)
vmovups 0x28(%rdx), %xmm0
vmovups %xmm0, 0x128(%rsp)
movl 0x38(%rdx), %ecx
movl %ecx, 0x138(%rsp)
movq 0x40(%rdx), %rcx
movq %rcx, 0x140(%rsp)
testq %rax, %rax
je 0x29e2fc
lock
incl (%rax)
cmpl %r15d, 0x8(%rsp)
jae 0x29e391
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r14,%rax), %eax
cltd
movl 0x8(%rsp), %r9d
idivl %r9d
movl %eax, %ecx
movq 0xe0(%rsp), %rax
xorl %edx, %edx
divq %r15
movl %edi, %ebp
movl %r8d, %ebx
movl %r9d, %r8d
imulq %rax, %r8
movq 0x58(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
leaq 0x100(%rsp), %rdi
movl 0x70(%rsp), %esi
movl 0xd0(%rsp), %edx
callq 0x628f2
pushq $-0x64
popq %r12
cmpq $0x0, 0x100(%rsp)
je 0x29fd31
movl %ebx, %r8d
movl %ebp, %edi
movslq 0x138(%rsp), %rax
imulq 0x140(%rsp), %rax
testq %rax, %rax
je 0x29fd31
xorl %r15d, %r15d
xorl %r12d, %r12d
xorl %ebp, %ebp
movq (%r14), %rax
movq -0x18(%rax), %rax
movslq 0x108(%r14,%rax), %rax
cmpq %rax, %rbp
jge 0x29e5ff
movl %r15d, %eax
cltd
movl 0x10(%rsp), %esi
idivl %esi
movl %eax, %ecx
movl %edi, %r13d
movl %edi, %eax
cltd
idivl %esi
movslq %ecx, %rdx
imulq 0x1c0(%rsp), %rdx
movq 0x190(%rsp), %rcx
imulq %rcx, %rdx
addq 0x180(%rsp), %rdx
movl 0x198(%rsp), %esi
movq 0x1a0(%rsp), %rdi
movq %rdx, 0x228(%rsp)
andq $0x0, 0x230(%rsp)
movq %rcx, 0x238(%rsp)
movl %esi, 0x240(%rsp)
movq %rdi, 0x248(%rsp)
movl %eax, 0x260(%rsp)
vmovups 0x1a8(%rsp), %xmm0
movslq 0x1b4(%rsp), %rax
movslq 0x1ac(%rsp), %rdx
movslq 0x1b0(%rsp), %rsi
imulq %rdx, %rsi
imulq %rcx, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x268(%rsp)
vmovups %xmm0, 0x250(%rsp)
movl %r12d, %eax
cltd
movl 0x8(%rsp), %esi
idivl %esi
movl %eax, %ecx
movl %r8d, %ebx
movl %r8d, %eax
cltd
idivl %esi
movslq %ecx, %rdx
imulq 0x140(%rsp), %rdx
movq 0x110(%rsp), %rsi
imulq %rsi, %rdx
addq 0x100(%rsp), %rdx
movl 0x118(%rsp), %edi
movq 0x120(%rsp), %rcx
movq %rdx, 0x1e0(%rsp)
andq $0x0, 0x1e8(%rsp)
movq %rsi, 0x1f0(%rsp)
movl %edi, 0x1f8(%rsp)
movq %rcx, 0x200(%rsp)
movl %eax, 0x218(%rsp)
vmovups 0x128(%rsp), %xmm0
movslq 0x134(%rsp), %rax
movslq 0x12c(%rsp), %rdx
movslq 0x130(%rsp), %rdi
imulq %rdx, %rdi
imulq %rsi, %rax
imulq %rdi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rsi
movq %rax, 0x220(%rsp)
vmovups %xmm0, 0x208(%rsp)
movq 0x10(%r14), %rax
movq (%rax,%rbp,8), %rdi
movq 0x58(%rsp), %rax
vmovups (%rax), %ymm0
vmovups 0x20(%rax), %ymm1
vmovups %ymm0, 0x2a0(%rsp)
vmovups %ymm1, 0x2c0(%rsp)
movq %rcx, 0x2a8(%rsp)
movq (%rdi), %rax
leaq 0x228(%rsp), %rsi
leaq 0x1e0(%rsp), %rdx
leaq 0x2a0(%rsp), %rcx
vzeroupper
callq *0x38(%rax)
movq 0x1e8(%rsp), %rax
testq %rax, %rax
je 0x29e5b4
lock
decl (%rax)
jne 0x29e5b4
movq 0x1e0(%rsp), %rsi
movq 0x200(%rsp), %rdi
testq %rdi, %rdi
je 0x29e5ac
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29e5b4
movq %rsi, %rdi
callq 0x5f3e0
movq 0x230(%rsp), %rax
testq %rax, %rax
je 0x29e5eb
lock
decl (%rax)
jne 0x29e5eb
movq 0x228(%rsp), %rsi
movq 0x248(%rsp), %rdi
testq %rdi, %rdi
je 0x29e5e3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29e5eb
movq %rsi, %rdi
callq 0x5f3e0
incq %rbp
movl %ebx, %r8d
addl %ebx, %r12d
movl %r13d, %edi
addl %r13d, %r15d
jmp 0x29e399
movq 0x40(%rsp), %rdx
cmpl %edx, 0x8(%rsp)
jae 0x29e629
xorl %r12d, %r12d
leaq 0x100(%rsp), %rdi
movq 0x28(%rsp), %rsi
movq 0x58(%rsp), %rcx
callq 0x64e3b
jmp 0x29fd31
xorl %r12d, %r12d
leaq 0x100(%rsp), %rax
movq 0x28(%rsp), %rcx
cmpq %rcx, %rax
je 0x29fd31
movq 0x108(%rsp), %rax
testq %rax, %rax
je 0x29e652
lock
incl (%rax)
movq 0x8(%rcx), %rax
testq %rax, %rax
je 0x29fcd9
lock
decl (%rax)
jne 0x29fcd9
movq 0x28(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x29fcd1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29fcd9
pushq $0x3
popq %r15
cmpl $0x1, 0xdc(%rdi)
jne 0x29ed47
cmpl $0x1, 0xe0(%rdi)
jne 0x29ed47
cmpl $0x2, 0xe4(%rdi)
jne 0x29ed47
cmpl $0x2, 0xe8(%rdi)
jne 0x29ed47
movl 0x2c(%r13), %eax
movl 0x30(%r13), %ecx
movl 0xac(%rsp), %r8d
movl 0xb8(%rsp), %edx
subl %eax, %r8d
shll $0x3, %r8d
movq 0x1b0(%rdi), %rsi
movslq %r8d, %rdi
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
testl %edx, %edx
cmovlel %r8d, %edx
shlq $0x2, %rdi
cmpq %rdx, %r8
je 0x29fdf4
testq %rsi, %rsi
je 0x29e718
movq %r8, %r9
shlq $0x4, %r9
vmovups (%rsi,%r9), %xmm0
jmp 0x29e71c
vxorps %xmm0, %xmm0, %xmm0
movq 0x40(%r13), %r9
imulq %r8, %r9
imulq 0x10(%r13), %r9
addq (%r13), %r9
movq %r14, %r10
movq 0x28(%r14), %r14
movslq 0x54(%r10), %r15
imulq %r8, %r15
imulq 0x38(%r10), %r15
movslq 0xac(%rsp), %rbx
movq 0xc0(%rsp), %r10
imulq %r8, %r10
movq 0x90(%rsp), %r11
imulq %r11, %r10
addq 0x80(%rsp), %r10
imulq %r11, %rbx
leaq (%r10,%rbx), %r11
leaq (%r10,%rbx,2), %rbx
vmovaps (%r14,%r15), %xmm1
vmovaps 0x10(%r14,%r15), %xmm2
vmovaps 0x20(%r14,%r15), %xmm3
vmovaps 0x30(%r14,%r15), %xmm4
vmovaps 0x40(%r14,%r15), %xmm5
vmovaps 0x50(%r14,%r15), %xmm6
vmovaps 0x60(%r14,%r15), %xmm7
vmovaps 0x70(%r14,%r15), %xmm8
vmovaps 0x80(%r14,%r15), %xmm9
xorl %ebp, %ebp
cmpl %ecx, %ebp
je 0x29ea3d
xorl %r15d, %r15d
xorl %r12d, %r12d
leal 0x3(%r12), %r14d
cmpl %eax, %r14d
jge 0x29e9be
vmovaps (%r10,%r15), %xmm12
vfmadd213ps %xmm0, %xmm1, %xmm12 # xmm12 = (xmm1 * xmm12) + xmm0
vfmadd231ps 0x10(%r10,%r15), %xmm2, %xmm12 # xmm12 = (xmm2 * mem) + xmm12
vmovaps 0x20(%r10,%r15), %xmm11
vfmadd231ps %xmm11, %xmm3, %xmm12 # xmm12 = (xmm3 * xmm11) + xmm12
vfmadd231ps (%r11,%r15), %xmm4, %xmm12 # xmm12 = (xmm4 * mem) + xmm12
vfmadd231ps 0x10(%r11,%r15), %xmm5, %xmm12 # xmm12 = (xmm5 * mem) + xmm12
vmovaps 0x20(%r11,%r15), %xmm13
vfmadd231ps %xmm13, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm13) + xmm12
vfmadd231ps (%rbx,%r15), %xmm7, %xmm12 # xmm12 = (xmm7 * mem) + xmm12
vfmadd231ps 0x10(%rbx,%r15), %xmm8, %xmm12 # xmm12 = (xmm8 * mem) + xmm12
vmovaps 0x40(%r10,%r15), %xmm10
vfmadd213ps %xmm0, %xmm1, %xmm11 # xmm11 = (xmm1 * xmm11) + xmm0
vfmadd231ps 0x30(%r10,%r15), %xmm2, %xmm11 # xmm11 = (xmm2 * mem) + xmm11
vmovaps 0x40(%r11,%r15), %xmm14
vfmadd231ps %xmm10, %xmm3, %xmm11 # xmm11 = (xmm3 * xmm10) + xmm11
vfmadd231ps %xmm13, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm13) + xmm11
vmovaps 0x20(%rbx,%r15), %xmm13
vfmadd231ps %xmm13, %xmm9, %xmm12 # xmm12 = (xmm9 * xmm13) + xmm12
vfmadd231ps 0x30(%r11,%r15), %xmm5, %xmm11 # xmm11 = (xmm5 * mem) + xmm11
vfmadd231ps %xmm14, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm14) + xmm11
vfmadd231ps %xmm13, %xmm7, %xmm11 # xmm11 = (xmm7 * xmm13) + xmm11
vmovaps 0x40(%rbx,%r15), %xmm13
vfmadd231ps 0x30(%rbx,%r15), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11
vmovaps %xmm12, (%r9)
vfmadd231ps %xmm13, %xmm9, %xmm11 # xmm11 = (xmm9 * xmm13) + xmm11
vmovaps 0x60(%r10,%r15), %xmm12
vfmadd213ps %xmm0, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm0
vfmadd231ps 0x50(%r10,%r15), %xmm2, %xmm10 # xmm10 = (xmm2 * mem) + xmm10
vfmadd231ps %xmm12, %xmm3, %xmm10 # xmm10 = (xmm3 * xmm12) + xmm10
vfmadd231ps %xmm14, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm14) + xmm10
vmovaps 0x60(%r11,%r15), %xmm14
vfmadd231ps 0x50(%r11,%r15), %xmm5, %xmm10 # xmm10 = (xmm5 * mem) + xmm10
vfmadd231ps %xmm14, %xmm6, %xmm10 # xmm10 = (xmm6 * xmm14) + xmm10
vfmadd231ps %xmm13, %xmm7, %xmm10 # xmm10 = (xmm7 * xmm13) + xmm10
vmovaps 0x60(%rbx,%r15), %xmm13
vfmadd231ps 0x50(%rbx,%r15), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vmovaps %xmm11, 0x10(%r9)
vfmadd213ps %xmm0, %xmm1, %xmm12 # xmm12 = (xmm1 * xmm12) + xmm0
vfmadd231ps 0x70(%r10,%r15), %xmm2, %xmm12 # xmm12 = (xmm2 * mem) + xmm12
vfmadd231ps 0x80(%r10,%r15), %xmm3, %xmm12 # xmm12 = (xmm3 * mem) + xmm12
vfmadd231ps %xmm14, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm14) + xmm12
vfmadd231ps 0x70(%r11,%r15), %xmm5, %xmm12 # xmm12 = (xmm5 * mem) + xmm12
vfmadd231ps 0x80(%r11,%r15), %xmm6, %xmm12 # xmm12 = (xmm6 * mem) + xmm12
vfmadd231ps %xmm13, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm13) + xmm12
vfmadd231ps 0x70(%rbx,%r15), %xmm8, %xmm12 # xmm12 = (xmm8 * mem) + xmm12
vfmadd231ps %xmm13, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm13) + xmm10
vfmadd231ps 0x80(%rbx,%r15), %xmm9, %xmm12 # xmm12 = (xmm9 * mem) + xmm12
vmovaps %xmm10, 0x20(%r9)
vmovaps %xmm12, 0x30(%r9)
addq $0x40, %r9
addl $0x4, %r12d
subq $-0x80, %r15
jmp 0x29e7c6
vmovaps (%r10,%r15), %xmm10
vfmadd213ps %xmm0, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm0
vfmadd231ps 0x10(%r10,%r15), %xmm2, %xmm10 # xmm10 = (xmm2 * mem) + xmm10
vmovaps 0x20(%r10,%r15), %xmm11
vfmadd231ps %xmm11, %xmm3, %xmm10 # xmm10 = (xmm3 * xmm11) + xmm10
vfmadd231ps (%r11,%r15), %xmm4, %xmm10 # xmm10 = (xmm4 * mem) + xmm10
vmovaps 0x20(%r11,%r15), %xmm12
vfmadd231ps 0x10(%r11,%r15), %xmm5, %xmm10 # xmm10 = (xmm5 * mem) + xmm10
vfmadd231ps %xmm12, %xmm6, %xmm10 # xmm10 = (xmm6 * xmm12) + xmm10
vfmadd231ps (%rbx,%r15), %xmm7, %xmm10 # xmm10 = (xmm7 * mem) + xmm10
vfmadd231ps 0x10(%rbx,%r15), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vfmadd213ps %xmm0, %xmm1, %xmm11 # xmm11 = (xmm1 * xmm11) + xmm0
vfmadd231ps 0x30(%r10,%r15), %xmm2, %xmm11 # xmm11 = (xmm2 * mem) + xmm11
vfmadd231ps 0x40(%r10,%r15), %xmm3, %xmm11 # xmm11 = (xmm3 * mem) + xmm11
vfmadd231ps %xmm12, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm12) + xmm11
vfmadd231ps 0x30(%r11,%r15), %xmm5, %xmm11 # xmm11 = (xmm5 * mem) + xmm11
vfmadd231ps 0x40(%r11,%r15), %xmm6, %xmm11 # xmm11 = (xmm6 * mem) + xmm11
vmovaps 0x20(%rbx,%r15), %xmm12
vfmadd231ps %xmm12, %xmm7, %xmm11 # xmm11 = (xmm7 * xmm12) + xmm11
vfmadd231ps 0x30(%rbx,%r15), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11
vfmadd231ps %xmm12, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm12) + xmm10
vfmadd231ps 0x40(%rbx,%r15), %xmm9, %xmm11 # xmm11 = (xmm9 * mem) + xmm11
vmovaps %xmm10, (%r9)
vmovaps %xmm11, 0x10(%r9)
addq $0x20, %r9
addl $0x2, %r12d
addq $0x40, %r15
leal 0x1(%r12), %r14d
cmpl %eax, %r14d
jl 0x29e91e
jmp 0x29ea1f
vmovaps (%r10,%r15), %xmm10
vfmadd213ps %xmm0, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm0
vfmadd231ps 0x10(%r10,%r15), %xmm2, %xmm10 # xmm10 = (xmm2 * mem) + xmm10
vfmadd231ps 0x20(%r10,%r15), %xmm3, %xmm10 # xmm10 = (xmm3 * mem) + xmm10
vfmadd231ps (%r11,%r15), %xmm4, %xmm10 # xmm10 = (xmm4 * mem) + xmm10
vfmadd231ps 0x10(%r11,%r15), %xmm5, %xmm10 # xmm10 = (xmm5 * mem) + xmm10
vfmadd231ps 0x20(%r11,%r15), %xmm6, %xmm10 # xmm10 = (xmm6 * mem) + xmm10
vfmadd231ps (%rbx,%r15), %xmm7, %xmm10 # xmm10 = (xmm7 * mem) + xmm10
vfmadd231ps 0x10(%rbx,%r15), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vfmadd231ps 0x20(%rbx,%r15), %xmm9, %xmm10 # xmm10 = (xmm9 * mem) + xmm10
vmovaps %xmm10, (%r9)
addq $0x10, %r9
incl %r12d
addq $0x20, %r15
cmpl %eax, %r12d
jl 0x29e9ce
addq %rdi, %r10
addq %r15, %r10
addq %rdi, %r11
addq %r15, %r11
addq %rdi, %rbx
addq %r15, %rbx
incl %ebp
jmp 0x29e7b8
incq %r8
movq 0x8(%rsp), %r14
jmp 0x29e6fb
pushq $0x5
popq %r15
cmpl $0x1, 0xdc(%rdi)
jne 0x29ed47
cmpl $0x1, 0xe0(%rdi)
jne 0x29ed47
cmpl $0x2, 0xe4(%rdi)
jne 0x29ed47
cmpl $0x2, 0xe8(%rdi)
jne 0x29ed47
movl 0x2c(%r13), %edx
movl 0x30(%r13), %ecx
movl 0xac(%rsp), %eax
movl 0xb8(%rsp), %esi
subl %edx, %eax
shll $0x3, %eax
movq 0x1b0(%rdi), %r9
xorl %r15d, %r15d
testl %edx, %edx
cmovlel %r15d, %edx
movl %edx, %edi
movslq %eax, %r8
testl %ecx, %ecx
cmovlel %r15d, %ecx
testl %esi, %esi
cmovlel %r15d, %esi
movq %rsi, 0x10(%rsp)
shlq $0x2, %r8
movq %r9, 0x30(%rsp)
cmpq 0x10(%rsp), %r15
je 0x29fdf4
testq %r9, %r9
je 0x29eaeb
movq %r15, %rax
shlq $0x4, %rax
vmovups (%r9,%rax), %xmm0
jmp 0x29eaef
vxorps %xmm0, %xmm0, %xmm0
movq 0x40(%r13), %r9
imulq %r15, %r9
imulq 0x10(%r13), %r9
addq (%r13), %r9
movq 0x28(%r14), %r10
movslq 0x54(%r14), %r11
imulq %r15, %r11
imulq 0x38(%r14), %r11
movq 0xc0(%rsp), %rbx
movq %r15, 0x8(%rsp)
imulq %r15, %rbx
movq 0x90(%rsp), %rax
imulq %rax, %rbx
addq 0x80(%rsp), %rbx
movslq 0xac(%rsp), %rdx
imulq %rax, %rdx
leaq (%rbx,%rdx), %r15
leaq (%rbx,%rdx,2), %r12
leaq (%rdx,%rdx,2), %r13
addq %rbx, %r13
leaq (%rbx,%rdx,4), %rbp
xorl %edx, %edx
cmpl %ecx, %edx
je 0x29ed30
movl %edi, %eax
xorl %esi, %esi
subl $0x1, %eax
jb 0x29ed0b
vmovaps (%r10,%r11), %xmm1
vmovaps 0x10(%r10,%r11), %xmm2
vmovaps 0x20(%r10,%r11), %xmm3
vmovaps 0x30(%r10,%r11), %xmm4
vmovaps 0x40(%r10,%r11), %xmm5
vfmadd132ps (%rbx,%rsi), %xmm0, %xmm1 # xmm1 = (xmm1 * mem) + xmm0
vfmadd231ps 0x10(%rbx,%rsi), %xmm2, %xmm1 # xmm1 = (xmm2 * mem) + xmm1
vfmadd231ps 0x20(%rbx,%rsi), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vfmadd231ps 0x30(%rbx,%rsi), %xmm4, %xmm1 # xmm1 = (xmm4 * mem) + xmm1
vfmadd231ps 0x40(%rbx,%rsi), %xmm5, %xmm1 # xmm1 = (xmm5 * mem) + xmm1
vmovaps 0x50(%r10,%r11), %xmm2
vmovaps 0x60(%r10,%r11), %xmm3
vmovaps 0x70(%r10,%r11), %xmm4
vmovaps 0x80(%r10,%r11), %xmm5
vmovaps 0x90(%r10,%r11), %xmm6
vfmadd132ps (%r15,%rsi), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vfmadd231ps 0x10(%r15,%rsi), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vfmadd231ps 0x20(%r15,%rsi), %xmm4, %xmm2 # xmm2 = (xmm4 * mem) + xmm2
vfmadd231ps 0x30(%r15,%rsi), %xmm5, %xmm2 # xmm2 = (xmm5 * mem) + xmm2
vfmadd231ps 0x40(%r15,%rsi), %xmm6, %xmm2 # xmm2 = (xmm6 * mem) + xmm2
vmovaps 0xa0(%r10,%r11), %xmm1
vmovaps 0xb0(%r10,%r11), %xmm3
vmovaps 0xc0(%r10,%r11), %xmm4
vmovaps 0xd0(%r10,%r11), %xmm5
vmovaps 0xe0(%r10,%r11), %xmm6
vfmadd132ps (%r12,%rsi), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vfmadd231ps 0x10(%r12,%rsi), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vfmadd231ps 0x20(%r12,%rsi), %xmm4, %xmm1 # xmm1 = (xmm4 * mem) + xmm1
vfmadd231ps 0x30(%r12,%rsi), %xmm5, %xmm1 # xmm1 = (xmm5 * mem) + xmm1
vfmadd231ps 0x40(%r12,%rsi), %xmm6, %xmm1 # xmm1 = (xmm6 * mem) + xmm1
vmovaps 0xf0(%r10,%r11), %xmm2
vmovaps 0x100(%r10,%r11), %xmm3
vmovaps 0x110(%r10,%r11), %xmm4
vmovaps 0x120(%r10,%r11), %xmm5
vmovaps 0x130(%r10,%r11), %xmm6
vfmadd132ps (%r13,%rsi), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vfmadd231ps 0x10(%r13,%rsi), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vfmadd231ps 0x20(%r13,%rsi), %xmm4, %xmm2 # xmm2 = (xmm4 * mem) + xmm2
vfmadd231ps 0x30(%r13,%rsi), %xmm5, %xmm2 # xmm2 = (xmm5 * mem) + xmm2
vfmadd231ps 0x40(%r13,%rsi), %xmm6, %xmm2 # xmm2 = (xmm6 * mem) + xmm2
vmovaps 0x140(%r10,%r11), %xmm1
vmovaps 0x150(%r10,%r11), %xmm3
vmovaps 0x160(%r10,%r11), %xmm4
vmovaps 0x170(%r10,%r11), %xmm5
vmovaps 0x180(%r10,%r11), %xmm6
vfmadd132ps (%rbp,%rsi), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vfmadd231ps 0x10(%rbp,%rsi), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vfmadd231ps 0x20(%rbp,%rsi), %xmm4, %xmm1 # xmm1 = (xmm4 * mem) + xmm1
vfmadd231ps 0x30(%rbp,%rsi), %xmm5, %xmm1 # xmm1 = (xmm5 * mem) + xmm1
vfmadd231ps 0x40(%rbp,%rsi), %xmm6, %xmm1 # xmm1 = (xmm6 * mem) + xmm1
vmovaps %xmm1, (%r9)
addq $0x10, %r9
addq $0x20, %rsi
jmp 0x29eb63
addq %r8, %rbx
addq %rsi, %rbx
addq %r8, %r15
addq %rsi, %r15
addq %r8, %r12
addq %rsi, %r12
addq %r8, %r13
addq %rsi, %r13
addq %r8, %rbp
addq %rsi, %rbp
incl %edx
jmp 0x29eb57
movq 0x8(%rsp), %r15
incq %r15
movq 0x28(%rsp), %r13
movq 0x30(%rsp), %r9
jmp 0x29eacc
imull %eax, %r15d
movslq %r15d, %rsi
leaq 0x180(%rsp), %rdi
leaq 0x100(%rsp), %rdx
callq 0x73bbe
movq (%r14), %rcx
movq -0x18(%rcx), %rdx
movl 0x10(%rsp), %r11d
imull 0xe0(%r14,%rdx), %r11d
movq 0x180(%rsp), %rax
movl 0xdc(%r14,%rdx), %esi
imull 0xd4(%r14,%rdx), %esi
subl %esi, %r11d
xorl %esi, %esi
xorl %edi, %edi
xorl %r8d, %r8d
cmpl 0xd8(%r14,%rdx), %r8d
jge 0x29edd9
movslq %esi, %rsi
leaq (%rax,%rsi,4), %r10
xorl %r9d, %r9d
cmpl 0xd4(%r14,%rdx), %r9d
jge 0x29edce
movl %edi, (%r10,%r9,4)
movq -0x18(%rcx), %rdx
addl 0xdc(%r14,%rdx), %edi
incq %r9
jmp 0x29edaf
addl %r11d, %edi
incl %r8d
addq %r9, %rsi
jmp 0x29ed9b
leal (,%r15,4), %ecx
movl %ecx, 0xe0(%rsp)
movl 0x70(%rsp), %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
movq %rcx, 0x40(%rsp)
xorl %ecx, %ecx
testl %r15d, %r15d
cmovlel %ecx, %r15d
movq 0x60(%rsp), %r12
testl %r12d, %r12d
cmovlel %ecx, %r12d
movq %r12, 0x60(%rsp)
shlq $0x2, %r15
leaq 0x15981c(%rip), %rdi # 0x3f8638
vxorps %xmm10, %xmm10, %xmm10
vbroadcastss 0x14f1ea(%rip), %xmm5 # 0x3ee014
vbroadcastss 0x14fe55(%rip), %xmm14 # 0x3eec88
xorl %edx, %edx
vbroadcastss 0x15237e(%rip), %xmm12 # 0x3f11bc
vbroadcastss 0x152381(%rip), %xmm6 # 0x3f11c8
vbroadcastss 0x152380(%rip), %xmm3 # 0x3f11d0
cmpq 0x60(%rsp), %rdx
je 0x29fcb9
movq %rcx, 0x70(%rsp)
movslq %ecx, %r9
movq 0x40(%r13), %r10
imulq %rdx, %r10
imulq 0x10(%r13), %r10
shlq $0x2, %r9
addq (%r13), %r10
movslq 0xac(%rsp), %r11
movq 0xc0(%rsp), %r13
imulq %rdx, %r13
movq 0x90(%rsp), %rcx
imulq %rcx, %r13
addq 0x80(%rsp), %r13
imulq %rcx, %r11
movq %rdx, 0xd0(%rsp)
shlq $0x4, %rdx
movq %rdx, 0x10(%rsp)
addq 0x28(%r14), %r9
xorl %ecx, %ecx
cmpl 0x30(%rsp), %ecx
jg 0x29f35b
movq (%r14), %rsi
xorl %r8d, %r8d
cmpq %rbx, %r8
jg 0x29f34b
movq -0x18(%rsi), %r12
cmpl $0x0, 0x100(%r14,%r12)
je 0x29eef7
movq 0x1b0(%r14,%r12), %rdx
movq 0x10(%rsp), %rbp
vmovups (%rdx,%rbp), %xmm8
jmp 0x29eefc
vxorps %xmm8, %xmm8, %xmm8
movslq 0xe8(%r14,%r12), %rdx
movq %r14, %rbp
movslq %ecx, %r14
imulq %rdx, %r14
imulq %r11, %r14
addq %r13, %r14
movl 0xe4(%rbp,%r12), %edx
imull %r8d, %edx
shll $0x2, %edx
movslq %edx, %rdx
leaq (%r14,%rdx,4), %rdx
xorl %r14d, %r14d
cmpq %r14, %r15
je 0x29ef4d
movslq (%rax,%r14), %rbp
shlq $0x4, %rbp
vmovups (%r9,%r14,4), %xmm1
vfmadd231ps (%rdx,%rbp), %xmm1, %xmm8 # xmm8 = (xmm1 * mem) + xmm8
addq $0x4, %r14
jmp 0x29ef2e
movq 0x8(%rsp), %r14
movl 0x110(%r14,%r12), %edx
decl %edx
cmpl $0x5, %edx
ja 0x29f332
movslq (%rdi,%rdx,4), %rdx
addq %rdi, %rdx
vmaxps %xmm10, %xmm8, %xmm1
jmpq *%rdx
movq 0x118(%r14,%r12), %rdx
vminps %xmm10, %xmm8, %xmm8
vbroadcastss (%rdx), %xmm15
vfmadd231ps %xmm15, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm15) + xmm1
jmp 0x29f336
vbroadcastss 0x152220(%rip), %xmm11 # 0x3f11b8
vminps %xmm11, %xmm8, %xmm1
vmaxps %xmm1, %xmm12, %xmm1
vbroadcastss 0x152216(%rip), %xmm13 # 0x3f11c0
vmovaps %xmm13, %xmm10
vfmadd213ps %xmm5, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm5
vcvttps2dq %xmm10, %xmm15
vcvtdq2ps %xmm15, %xmm15
vcmpltps %xmm15, %xmm10, %xmm10
vandps %xmm14, %xmm10, %xmm10
vsubps %xmm10, %xmm15, %xmm10
vbroadcastss 0x1521ed(%rip), %xmm11 # 0x3f11c4
vfmsub231ps %xmm11, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm11) - xmm1
vfnmsub231ps %xmm6, %xmm10, %xmm1 # xmm1 = -(xmm10 * xmm6) - xmm1
vmulps %xmm1, %xmm1, %xmm15
vbroadcastss 0x1521de(%rip), %xmm2 # 0x3f11cc
vfmadd213ps %xmm3, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm3
vbroadcastss 0x1521d8(%rip), %xmm7 # 0x3f11d4
vfmadd213ps %xmm7, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm7
vbroadcastss 0x1521ce(%rip), %xmm7 # 0x3f11d8
vfmadd213ps %xmm7, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm7
vbroadcastss 0x1521c4(%rip), %xmm4 # 0x3f11dc
vfmadd213ps %xmm4, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm4
vfmadd213ps %xmm5, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm5
vfmadd213ps %xmm1, %xmm15, %xmm2 # xmm2 = (xmm15 * xmm2) + xmm1
vaddps %xmm2, %xmm14, %xmm2
vcvttps2dq %xmm10, %xmm1
vpslld $0x17, %xmm1, %xmm1
vpaddd %xmm1, %xmm14, %xmm1
vfmadd213ps %xmm14, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm1) + xmm14
vbroadcastss 0x152199(%rip), %xmm2 # 0x3f11e0
vmaxps %xmm2, %xmm1, %xmm2
vpsrld $0x17, %xmm2, %xmm10
vbroadcastss 0x15218f(%rip), %xmm4 # 0x3f11e8
vpaddd %xmm4, %xmm10, %xmm10
vbroadcastss 0x15217e(%rip), %xmm4 # 0x3f11e4
vandps %xmm4, %xmm2, %xmm2
vorps %xmm5, %xmm2, %xmm2
vcvtdq2ps %xmm10, %xmm10
vbroadcastss 0x152170(%rip), %xmm4 # 0x3f11ec
vcmpltps %xmm4, %xmm2, %xmm15
vandps %xmm2, %xmm15, %xmm4
vbroadcastss 0x152162(%rip), %xmm9 # 0x3f11f0
vaddps %xmm2, %xmm9, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vandps %xmm14, %xmm15, %xmm4
vsubps %xmm4, %xmm10, %xmm4
vmulps %xmm2, %xmm2, %xmm10
vbroadcastss 0x152148(%rip), %xmm15 # 0x3f11f4
vbroadcastss 0x152143(%rip), %xmm9 # 0x3f11f8
vfmadd213ps %xmm9, %xmm2, %xmm15 # xmm15 = (xmm2 * xmm15) + xmm9
vbroadcastss 0x152139(%rip), %xmm9 # 0x3f11fc
vfmadd213ps %xmm9, %xmm2, %xmm15 # xmm15 = (xmm2 * xmm15) + xmm9
vbroadcastss 0x15212f(%rip), %xmm9 # 0x3f1200
vfmadd213ps %xmm9, %xmm2, %xmm15 # xmm15 = (xmm2 * xmm15) + xmm9
vbroadcastss 0x152125(%rip), %xmm9 # 0x3f1204
vfmadd213ps %xmm9, %xmm2, %xmm15 # xmm15 = (xmm2 * xmm15) + xmm9
vbroadcastss 0x15211b(%rip), %xmm9 # 0x3f1208
vfmadd213ps %xmm9, %xmm2, %xmm15 # xmm15 = (xmm2 * xmm15) + xmm9
vbroadcastss 0x152111(%rip), %xmm9 # 0x3f120c
vfmadd213ps %xmm9, %xmm2, %xmm15 # xmm15 = (xmm2 * xmm15) + xmm9
vbroadcastss 0x152107(%rip), %xmm9 # 0x3f1210
vfmadd213ps %xmm9, %xmm2, %xmm15 # xmm15 = (xmm2 * xmm15) + xmm9
vbroadcastss 0x1520fd(%rip), %xmm9 # 0x3f1214
vfmadd213ps %xmm9, %xmm2, %xmm15 # xmm15 = (xmm2 * xmm15) + xmm9
vmulps %xmm2, %xmm10, %xmm9
vmulps %xmm15, %xmm9, %xmm9
vfmadd231ps %xmm6, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm6) + xmm9
vfmsub231ps %xmm10, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm10) - xmm9
vxorps %xmm10, %xmm10, %xmm10
vcmpleps %xmm10, %xmm1, %xmm1
vsubps %xmm2, %xmm9, %xmm2
vfnmadd231ps %xmm4, %xmm11, %xmm2 # xmm2 = -(xmm11 * xmm4) + xmm2
vaddps %xmm2, %xmm2, %xmm2
vbroadcastss 0x1527b0(%rip), %xmm4 # 0x3f1900
vblendvps %xmm1, %xmm4, %xmm2, %xmm1
vbroadcastss 0x152059(%rip), %xmm2 # 0x3f11b8
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm1, %xmm12, %xmm1
vfmadd213ps %xmm5, %xmm1, %xmm13 # xmm13 = (xmm1 * xmm13) + xmm5
vcvttps2dq %xmm13, %xmm4
vcvtdq2ps %xmm4, %xmm4
vcmpltps %xmm4, %xmm13, %xmm2
vandps %xmm2, %xmm14, %xmm2
vsubps %xmm2, %xmm4, %xmm2
vfmsub231ps %xmm11, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm11) - xmm1
vfnmsub231ps %xmm6, %xmm2, %xmm1 # xmm1 = -(xmm2 * xmm6) - xmm1
vmulps %xmm1, %xmm1, %xmm4
vbroadcastss 0x152033(%rip), %xmm9 # 0x3f11cc
vfmadd213ps %xmm3, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm3
vbroadcastss 0x15202d(%rip), %xmm0 # 0x3f11d4
vfmadd213ps %xmm0, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm0
vfmadd213ps %xmm7, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm7
vbroadcastss 0x152022(%rip), %xmm0 # 0x3f11dc
vfmadd213ps %xmm0, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm0
vfmadd213ps %xmm5, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm5
vfmadd213ps %xmm1, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm9) + xmm1
vaddps %xmm14, %xmm9, %xmm4
vcvttps2dq %xmm2, %xmm1
vpslld $0x17, %xmm1, %xmm1
vpaddd %xmm1, %xmm14, %xmm1
vfmadd213ps %xmm14, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm1) + xmm14
vrcpps %xmm1, %xmm2
vaddps %xmm2, %xmm2, %xmm4
vbroadcastss 0x153517(%rip), %xmm0 # 0x3f2708
vfmsub213ps %xmm0, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm1) - xmm0
vfnmadd213ps %xmm4, %xmm2, %xmm1 # xmm1 = -(xmm2 * xmm1) + xmm4
vfmsub213ps %xmm8, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) - xmm8
jmp 0x29f336
movq 0x118(%r14,%r12), %rdx
vbroadcastss (%rdx), %xmm1
vbroadcastss 0x4(%rdx), %xmm15
vmaxps %xmm1, %xmm8, %xmm1
vminps %xmm1, %xmm15, %xmm1
jmp 0x29f336
vbroadcastss 0x151f86(%rip), %xmm1 # 0x3f11b4
vxorps %xmm1, %xmm8, %xmm1
vbroadcastss 0x151f7d(%rip), %xmm2 # 0x3f11b8
vminps %xmm2, %xmm1, %xmm1
vbroadcastss 0x151f74(%rip), %xmm2 # 0x3f11bc
vmaxps %xmm2, %xmm1, %xmm1
vbroadcastss 0x151f6b(%rip), %xmm8 # 0x3f11c0
vfmadd213ps %xmm5, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm8) + xmm5
vcvttps2dq %xmm8, %xmm15
vcvtdq2ps %xmm15, %xmm15
vcmpltps %xmm15, %xmm8, %xmm8
vandps %xmm14, %xmm8, %xmm8
vsubps %xmm8, %xmm15, %xmm8
vbroadcastss 0x151f47(%rip), %xmm2 # 0x3f11c4
vfmsub231ps %xmm2, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm2) - xmm1
vbroadcastss 0x153491(%rip), %xmm2 # 0x3f271c
vfmsub231ps %xmm2, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm2) - xmm1
vmulps %xmm1, %xmm1, %xmm15
vbroadcastss 0x151f2f(%rip), %xmm10 # 0x3f11cc
vbroadcastss 0x151f2a(%rip), %xmm2 # 0x3f11d0
vfmadd213ps %xmm2, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm2
vbroadcastss 0x151f20(%rip), %xmm2 # 0x3f11d4
vfmadd213ps %xmm2, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm2
vbroadcastss 0x151f16(%rip), %xmm0 # 0x3f11d8
vfmadd213ps %xmm0, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm0
vbroadcastss 0x151f0c(%rip), %xmm0 # 0x3f11dc
vfmadd213ps %xmm0, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm0
vfmadd213ps %xmm5, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm5
vfmadd213ps %xmm1, %xmm15, %xmm10 # xmm10 = (xmm15 * xmm10) + xmm1
vaddps %xmm14, %xmm10, %xmm10
vcvttps2dq %xmm8, %xmm1
vpslld $0x17, %xmm1, %xmm1
vpaddd %xmm1, %xmm14, %xmm1
vfmadd213ps %xmm14, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm1) + xmm14
vxorps %xmm10, %xmm10, %xmm10
vrcpps %xmm1, %xmm8
vfmsub213ps %xmm14, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) - xmm14
vfnmadd132ps %xmm8, %xmm8, %xmm1 # xmm1 = -(xmm1 * xmm8) + xmm8
jmp 0x29f336
movq 0x118(%r14,%r12), %rdx
vbroadcastss (%rdx), %xmm1
vbroadcastss 0x4(%rdx), %xmm2
vfmadd231ps %xmm1, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm1) + xmm2
vmaxps %xmm2, %xmm10, %xmm1
vminps %xmm1, %xmm14, %xmm1
vmulps %xmm1, %xmm8, %xmm1
jmp 0x29f336
vmovaps %xmm8, %xmm1
movq %r8, %rdx
shlq $0x4, %rdx
vmovups %xmm1, (%r10,%rdx)
incq %r8
jmp 0x29eecb
movq 0x40(%rsp), %rdx
leaq (%r10,%rdx,4), %r10
incl %ecx
jmp 0x29eebb
movq 0xd0(%rsp), %rdx
incq %rdx
movq 0x70(%rsp), %rcx
addl 0xe0(%rsp), %ecx
movq 0x28(%rsp), %r13
jmp 0x29ee50
pushq $0x3
popq %r15
cmpl $0x1, 0xdc(%rdi)
jne 0x29fa97
cmpl $0x1, 0xe0(%rdi)
jne 0x29fa97
cmpl $0x2, 0xe4(%rdi)
jne 0x29fa97
cmpl $0x2, 0xe8(%rdi)
jne 0x29fa97
movl 0x2c(%r13), %eax
movl 0x30(%r13), %ecx
movl 0xac(%rsp), %r8d
movl 0xb8(%rsp), %edx
subl %eax, %r8d
shll $0x4, %r8d
movq 0x1b0(%rdi), %rsi
movslq %r8d, %rdi
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
testl %edx, %edx
cmovlel %r8d, %edx
shlq $0x2, %rdi
cmpq %rdx, %r8
je 0x29fdf4
testq %rsi, %rsi
je 0x29f40c
movq %r8, %r9
shlq $0x5, %r9
vmovups (%rsi,%r9), %ymm0
jmp 0x29f410
vxorps %xmm0, %xmm0, %xmm0
movq 0x40(%r13), %r9
imulq %r8, %r9
imulq 0x10(%r13), %r9
addq (%r13), %r9
movq %r14, %r10
movq 0x28(%r14), %r14
movslq 0x54(%r10), %r15
imulq %r8, %r15
imulq 0x38(%r10), %r15
movslq 0xac(%rsp), %rbx
movq 0xc0(%rsp), %r10
imulq %r8, %r10
movq 0x90(%rsp), %r11
imulq %r11, %r10
addq 0x80(%rsp), %r10
imulq %r11, %rbx
leaq (%r10,%rbx), %r11
leaq (%r10,%rbx,2), %rbx
vmovaps (%r14,%r15), %ymm1
vmovaps 0x20(%r14,%r15), %ymm2
vmovaps 0x40(%r14,%r15), %ymm3
vmovaps 0x60(%r14,%r15), %ymm4
vmovaps 0x80(%r14,%r15), %ymm5
vmovaps 0xa0(%r14,%r15), %ymm6
vmovaps 0xc0(%r14,%r15), %ymm7
vmovaps 0xe0(%r14,%r15), %ymm8
vmovaps 0x100(%r14,%r15), %ymm9
xorl %ebp, %ebp
cmpl %ecx, %ebp
je 0x29f772
xorl %r15d, %r15d
xorl %r12d, %r12d
leal 0x3(%r12), %r14d
cmpl %eax, %r14d
jge 0x29f6f3
vmovaps 0x40(%r10,%r15), %ymm11
vmovaps 0x80(%r10,%r15), %ymm12
vmovaps 0xc0(%r10,%r15), %ymm14
vmovaps (%r10,%r15), %ymm10
vfmadd213ps %ymm0, %ymm1, %ymm10 # ymm10 = (ymm1 * ymm10) + ymm0
vfmadd231ps 0x20(%r10,%r15), %ymm2, %ymm10 # ymm10 = (ymm2 * mem) + ymm10
vfmadd231ps %ymm11, %ymm3, %ymm10 # ymm10 = (ymm3 * ymm11) + ymm10
vfmadd213ps %ymm0, %ymm1, %ymm11 # ymm11 = (ymm1 * ymm11) + ymm0
vfmadd231ps 0x60(%r10,%r15), %ymm2, %ymm11 # ymm11 = (ymm2 * mem) + ymm11
vfmadd231ps %ymm12, %ymm3, %ymm11 # ymm11 = (ymm3 * ymm12) + ymm11
vfmadd213ps %ymm0, %ymm1, %ymm12 # ymm12 = (ymm1 * ymm12) + ymm0
vmovaps %ymm14, %ymm13
vfmadd213ps %ymm0, %ymm1, %ymm13 # ymm13 = (ymm1 * ymm13) + ymm0
vfmadd231ps 0xa0(%r10,%r15), %ymm2, %ymm12 # ymm12 = (ymm2 * mem) + ymm12
vfmadd231ps 0xe0(%r10,%r15), %ymm2, %ymm13 # ymm13 = (ymm2 * mem) + ymm13
vfmadd231ps %ymm14, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm14) + ymm12
vfmadd231ps 0x100(%r10,%r15), %ymm3, %ymm13 # ymm13 = (ymm3 * mem) + ymm13
vmovaps 0x40(%r11,%r15), %ymm14
vmovaps 0x80(%r11,%r15), %ymm15
vfmadd231ps (%r11,%r15), %ymm4, %ymm10 # ymm10 = (ymm4 * mem) + ymm10
vfmadd231ps %ymm14, %ymm4, %ymm11 # ymm11 = (ymm4 * ymm14) + ymm11
vfmadd231ps 0x20(%r11,%r15), %ymm5, %ymm10 # ymm10 = (ymm5 * mem) + ymm10
vfmadd231ps %ymm14, %ymm6, %ymm10 # ymm10 = (ymm6 * ymm14) + ymm10
vmovaps 0xc0(%r11,%r15), %ymm14
vfmadd231ps %ymm15, %ymm4, %ymm12 # ymm12 = (ymm4 * ymm15) + ymm12
vfmadd231ps %ymm14, %ymm4, %ymm13 # ymm13 = (ymm4 * ymm14) + ymm13
vfmadd231ps 0x60(%r11,%r15), %ymm5, %ymm11 # ymm11 = (ymm5 * mem) + ymm11
vfmadd231ps 0xa0(%r11,%r15), %ymm5, %ymm12 # ymm12 = (ymm5 * mem) + ymm12
vfmadd231ps 0xe0(%r11,%r15), %ymm5, %ymm13 # ymm13 = (ymm5 * mem) + ymm13
vfmadd231ps %ymm15, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm15) + ymm11
vfmadd231ps %ymm14, %ymm6, %ymm12 # ymm12 = (ymm6 * ymm14) + ymm12
vfmadd231ps 0x100(%r11,%r15), %ymm6, %ymm13 # ymm13 = (ymm6 * mem) + ymm13
vmovaps 0x40(%rbx,%r15), %ymm14
vfmadd231ps (%rbx,%r15), %ymm7, %ymm10 # ymm10 = (ymm7 * mem) + ymm10
vfmadd231ps %ymm14, %ymm7, %ymm11 # ymm11 = (ymm7 * ymm14) + ymm11
vfmadd231ps 0x20(%rbx,%r15), %ymm8, %ymm10 # ymm10 = (ymm8 * mem) + ymm10
vfmadd231ps %ymm14, %ymm9, %ymm10 # ymm10 = (ymm9 * ymm14) + ymm10
vmovaps 0x80(%rbx,%r15), %ymm14
vfmadd231ps %ymm14, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm14) + ymm12
vfmadd231ps 0x60(%rbx,%r15), %ymm8, %ymm11 # ymm11 = (ymm8 * mem) + ymm11
vfmadd231ps %ymm14, %ymm9, %ymm11 # ymm11 = (ymm9 * ymm14) + ymm11
vmovaps 0xc0(%rbx,%r15), %ymm14
vfmadd231ps %ymm14, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm14) + ymm13
vfmadd231ps 0xa0(%rbx,%r15), %ymm8, %ymm12 # ymm12 = (ymm8 * mem) + ymm12
vfmadd231ps 0xe0(%rbx,%r15), %ymm8, %ymm13 # ymm13 = (ymm8 * mem) + ymm13
vfmadd231ps 0x100(%rbx,%r15), %ymm9, %ymm13 # ymm13 = (ymm9 * mem) + ymm13
vfmadd231ps %ymm14, %ymm9, %ymm12 # ymm12 = (ymm9 * ymm14) + ymm12
vmovaps %ymm10, (%r9)
vmovaps %ymm11, 0x20(%r9)
vmovaps %ymm12, 0x40(%r9)
vmovaps %ymm13, 0x60(%r9)
subq $-0x80, %r9
addl $0x4, %r12d
addq $0x100, %r15 # imm = 0x100
jmp 0x29f4c6
vmovaps (%r10,%r15), %ymm10
vfmadd213ps %ymm0, %ymm1, %ymm10 # ymm10 = (ymm1 * ymm10) + ymm0
vfmadd231ps 0x20(%r10,%r15), %ymm2, %ymm10 # ymm10 = (ymm2 * mem) + ymm10
vmovaps 0x40(%r10,%r15), %ymm11
vfmadd231ps %ymm11, %ymm3, %ymm10 # ymm10 = (ymm3 * ymm11) + ymm10
vfmadd213ps %ymm0, %ymm1, %ymm11 # ymm11 = (ymm1 * ymm11) + ymm0
vfmadd231ps 0x60(%r10,%r15), %ymm2, %ymm11 # ymm11 = (ymm2 * mem) + ymm11
vfmadd231ps 0x80(%r10,%r15), %ymm3, %ymm11 # ymm11 = (ymm3 * mem) + ymm11
vmovaps 0x40(%r11,%r15), %ymm12
vfmadd231ps (%r11,%r15), %ymm4, %ymm10 # ymm10 = (ymm4 * mem) + ymm10
vfmadd231ps %ymm12, %ymm4, %ymm11 # ymm11 = (ymm4 * ymm12) + ymm11
vfmadd231ps 0x20(%r11,%r15), %ymm5, %ymm10 # ymm10 = (ymm5 * mem) + ymm10
vfmadd231ps 0x60(%r11,%r15), %ymm5, %ymm11 # ymm11 = (ymm5 * mem) + ymm11
vfmadd231ps %ymm12, %ymm6, %ymm10 # ymm10 = (ymm6 * ymm12) + ymm10
vfmadd231ps 0x80(%r11,%r15), %ymm6, %ymm11 # ymm11 = (ymm6 * mem) + ymm11
vmovaps 0x40(%rbx,%r15), %ymm12
vfmadd231ps (%rbx,%r15), %ymm7, %ymm10 # ymm10 = (ymm7 * mem) + ymm10
vfmadd231ps %ymm12, %ymm7, %ymm11 # ymm11 = (ymm7 * ymm12) + ymm11
vfmadd231ps 0x20(%rbx,%r15), %ymm8, %ymm10 # ymm10 = (ymm8 * mem) + ymm10
vfmadd231ps 0x60(%rbx,%r15), %ymm8, %ymm11 # ymm11 = (ymm8 * mem) + ymm11
vfmadd231ps %ymm12, %ymm9, %ymm10 # ymm10 = (ymm9 * ymm12) + ymm10
vfmadd231ps 0x80(%rbx,%r15), %ymm9, %ymm11 # ymm11 = (ymm9 * mem) + ymm11
vmovaps %ymm10, (%r9)
vmovaps %ymm11, 0x20(%r9)
addq $0x40, %r9
addl $0x2, %r12d
subq $-0x80, %r15
leal 0x1(%r12), %r14d
cmpl %eax, %r14d
jl 0x29f64a
jmp 0x29f754
vmovaps (%r10,%r15), %ymm10
vfmadd213ps %ymm0, %ymm1, %ymm10 # ymm10 = (ymm1 * ymm10) + ymm0
vfmadd231ps 0x20(%r10,%r15), %ymm2, %ymm10 # ymm10 = (ymm2 * mem) + ymm10
vfmadd231ps 0x40(%r10,%r15), %ymm3, %ymm10 # ymm10 = (ymm3 * mem) + ymm10
vfmadd231ps (%r11,%r15), %ymm4, %ymm10 # ymm10 = (ymm4 * mem) + ymm10
vfmadd231ps 0x20(%r11,%r15), %ymm5, %ymm10 # ymm10 = (ymm5 * mem) + ymm10
vfmadd231ps 0x40(%r11,%r15), %ymm6, %ymm10 # ymm10 = (ymm6 * mem) + ymm10
vfmadd231ps (%rbx,%r15), %ymm7, %ymm10 # ymm10 = (ymm7 * mem) + ymm10
vfmadd231ps 0x20(%rbx,%r15), %ymm8, %ymm10 # ymm10 = (ymm8 * mem) + ymm10
vfmadd231ps 0x40(%rbx,%r15), %ymm9, %ymm10 # ymm10 = (ymm9 * mem) + ymm10
vmovaps %ymm10, (%r9)
addq $0x20, %r9
incl %r12d
addq $0x40, %r15
cmpl %eax, %r12d
jl 0x29f703
addq %rdi, %r10
addq %r15, %r10
addq %rdi, %r11
addq %r15, %r11
addq %rdi, %rbx
addq %r15, %rbx
incl %ebp
jmp 0x29f4b8
incq %r8
movq 0x8(%rsp), %r14
jmp 0x29f3ef
pushq $0x5
popq %r15
cmpl $0x1, 0xdc(%rdi)
jne 0x29fa97
cmpl $0x1, 0xe0(%rdi)
jne 0x29fa97
cmpl $0x2, 0xe4(%rdi)
jne 0x29fa97
cmpl $0x2, 0xe8(%rdi)
jne 0x29fa97
movl 0x2c(%r13), %edx
movl 0x30(%r13), %ecx
movl 0xac(%rsp), %eax
movl 0xb8(%rsp), %esi
subl %edx, %eax
shll $0x4, %eax
movq 0x1b0(%rdi), %r9
xorl %r15d, %r15d
testl %edx, %edx
cmovlel %r15d, %edx
movl %edx, %edi
movslq %eax, %r8
testl %ecx, %ecx
cmovlel %r15d, %ecx
testl %esi, %esi
cmovlel %r15d, %esi
movq %rsi, 0x10(%rsp)
shlq $0x2, %r8
movq %r9, 0x30(%rsp)
cmpq 0x10(%rsp), %r15
je 0x29fdf4
testq %r9, %r9
je 0x29f820
movq %r15, %rax
shlq $0x5, %rax
vmovups (%r9,%rax), %ymm0
jmp 0x29f824
vxorps %xmm0, %xmm0, %xmm0
movq 0x40(%r13), %r9
imulq %r15, %r9
imulq 0x10(%r13), %r9
addq (%r13), %r9
movq 0x28(%r14), %r10
movslq 0x54(%r14), %r11
imulq %r15, %r11
imulq 0x38(%r14), %r11
movq 0xc0(%rsp), %rbx
movq %r15, 0x8(%rsp)
imulq %r15, %rbx
movq 0x90(%rsp), %rax
imulq %rax, %rbx
addq 0x80(%rsp), %rbx
movslq 0xac(%rsp), %rdx
imulq %rax, %rdx
leaq (%rbx,%rdx), %r15
leaq (%rbx,%rdx,2), %r12
leaq (%rdx,%rdx,2), %r13
addq %rbx, %r13
leaq (%rbx,%rdx,4), %rbp
xorl %edx, %edx
cmpl %ecx, %edx
je 0x29fa80
movl %edi, %eax
xorl %esi, %esi
subl $0x1, %eax
jb 0x29fa5b
vmovaps (%r10,%r11), %ymm1
vmovaps 0x20(%r10,%r11), %ymm2
vmovaps 0x40(%r10,%r11), %ymm3
vmovaps 0x60(%r10,%r11), %ymm4
vmovaps 0x80(%r10,%r11), %ymm5
vfmadd132ps (%rbx,%rsi), %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vfmadd231ps 0x20(%rbx,%rsi), %ymm2, %ymm1 # ymm1 = (ymm2 * mem) + ymm1
vfmadd231ps 0x40(%rbx,%rsi), %ymm3, %ymm1 # ymm1 = (ymm3 * mem) + ymm1
vfmadd231ps 0x60(%rbx,%rsi), %ymm4, %ymm1 # ymm1 = (ymm4 * mem) + ymm1
vfmadd231ps 0x80(%rbx,%rsi), %ymm5, %ymm1 # ymm1 = (ymm5 * mem) + ymm1
vmovaps 0xa0(%r10,%r11), %ymm2
vmovaps 0xc0(%r10,%r11), %ymm3
vmovaps 0xe0(%r10,%r11), %ymm4
vmovaps 0x100(%r10,%r11), %ymm5
vmovaps 0x120(%r10,%r11), %ymm6
vfmadd132ps (%r15,%rsi), %ymm1, %ymm2 # ymm2 = (ymm2 * mem) + ymm1
vfmadd231ps 0x20(%r15,%rsi), %ymm3, %ymm2 # ymm2 = (ymm3 * mem) + ymm2
vfmadd231ps 0x40(%r15,%rsi), %ymm4, %ymm2 # ymm2 = (ymm4 * mem) + ymm2
vfmadd231ps 0x60(%r15,%rsi), %ymm5, %ymm2 # ymm2 = (ymm5 * mem) + ymm2
vfmadd231ps 0x80(%r15,%rsi), %ymm6, %ymm2 # ymm2 = (ymm6 * mem) + ymm2
vmovaps 0x140(%r10,%r11), %ymm1
vmovaps 0x160(%r10,%r11), %ymm3
vmovaps 0x180(%r10,%r11), %ymm4
vmovaps 0x1a0(%r10,%r11), %ymm5
vmovaps 0x1c0(%r10,%r11), %ymm6
vfmadd132ps (%r12,%rsi), %ymm2, %ymm1 # ymm1 = (ymm1 * mem) + ymm2
vfmadd231ps 0x20(%r12,%rsi), %ymm3, %ymm1 # ymm1 = (ymm3 * mem) + ymm1
vfmadd231ps 0x40(%r12,%rsi), %ymm4, %ymm1 # ymm1 = (ymm4 * mem) + ymm1
vfmadd231ps 0x60(%r12,%rsi), %ymm5, %ymm1 # ymm1 = (ymm5 * mem) + ymm1
vfmadd231ps 0x80(%r12,%rsi), %ymm6, %ymm1 # ymm1 = (ymm6 * mem) + ymm1
vmovaps 0x1e0(%r10,%r11), %ymm2
vmovaps 0x200(%r10,%r11), %ymm3
vmovaps 0x220(%r10,%r11), %ymm4
vmovaps 0x240(%r10,%r11), %ymm5
vmovaps 0x260(%r10,%r11), %ymm6
vfmadd132ps (%r13,%rsi), %ymm1, %ymm2 # ymm2 = (ymm2 * mem) + ymm1
vfmadd231ps 0x20(%r13,%rsi), %ymm3, %ymm2 # ymm2 = (ymm3 * mem) + ymm2
vfmadd231ps 0x40(%r13,%rsi), %ymm4, %ymm2 # ymm2 = (ymm4 * mem) + ymm2
vfmadd231ps 0x60(%r13,%rsi), %ymm5, %ymm2 # ymm2 = (ymm5 * mem) + ymm2
vfmadd231ps 0x80(%r13,%rsi), %ymm6, %ymm2 # ymm2 = (ymm6 * mem) + ymm2
vmovaps 0x280(%r10,%r11), %ymm1
vmovaps 0x2a0(%r10,%r11), %ymm3
vmovaps 0x2c0(%r10,%r11), %ymm4
vmovaps 0x2e0(%r10,%r11), %ymm5
vmovaps 0x300(%r10,%r11), %ymm6
vfmadd132ps (%rbp,%rsi), %ymm2, %ymm1 # ymm1 = (ymm1 * mem) + ymm2
vfmadd231ps 0x20(%rbp,%rsi), %ymm3, %ymm1 # ymm1 = (ymm3 * mem) + ymm1
vfmadd231ps 0x40(%rbp,%rsi), %ymm4, %ymm1 # ymm1 = (ymm4 * mem) + ymm1
vfmadd231ps 0x60(%rbp,%rsi), %ymm5, %ymm1 # ymm1 = (ymm5 * mem) + ymm1
vfmadd231ps 0x80(%rbp,%rsi), %ymm6, %ymm1 # ymm1 = (ymm6 * mem) + ymm1
vmovaps %ymm1, (%r9)
addq $0x20, %r9
addq $0x40, %rsi
jmp 0x29f898
addq %r8, %rbx
addq %rsi, %rbx
addq %r8, %r15
addq %rsi, %r15
addq %r8, %r12
addq %rsi, %r12
addq %r8, %r13
addq %rsi, %r13
addq %r8, %rbp
addq %rsi, %rbp
incl %edx
jmp 0x29f88c
movq 0x8(%rsp), %r15
incq %r15
movq 0x28(%rsp), %r13
movq 0x30(%rsp), %r9
jmp 0x29f801
imull %eax, %r15d
movslq %r15d, %rsi
leaq 0x180(%rsp), %rdi
leaq 0x100(%rsp), %rdx
callq 0x73bbe
movq (%r14), %rcx
movq -0x18(%rcx), %rdx
movl 0x10(%rsp), %r11d
imull 0xe0(%r14,%rdx), %r11d
movq 0x180(%rsp), %rax
movl 0xdc(%r14,%rdx), %esi
imull 0xd4(%r14,%rdx), %esi
subl %esi, %r11d
xorl %esi, %esi
xorl %edi, %edi
xorl %r8d, %r8d
cmpl 0xd8(%r14,%rdx), %esi
jge 0x29fb28
movslq %r8d, %r8
leaq (%rax,%r8,4), %r10
xorl %r9d, %r9d
cmpl 0xd4(%r14,%rdx), %r9d
jge 0x29fb1e
movl %edi, (%r10,%r9,4)
movq -0x18(%rcx), %rdx
addl 0xdc(%r14,%rdx), %edi
incq %r9
jmp 0x29faff
addl %r11d, %edi
incl %esi
addq %r9, %r8
jmp 0x29faeb
leal (,%r15,8), %ecx
movl %ecx, 0xd0(%rsp)
movl 0x70(%rsp), %ecx
shll $0x3, %ecx
movslq %ecx, %rcx
movq %rcx, 0x10(%rsp)
xorl %ecx, %ecx
testl %r15d, %r15d
cmovlel %ecx, %r15d
movq 0x60(%rsp), %r12
testl %r12d, %r12d
cmovlel %ecx, %r12d
movq %r12, 0x60(%rsp)
shlq $0x2, %r15
xorl %edx, %edx
cmpq 0x60(%rsp), %rdx
je 0x29fc9f
movq %rcx, 0x40(%rsp)
movslq %ecx, %r8
movq 0x40(%r13), %r9
imulq %rdx, %r9
imulq 0x10(%r13), %r9
shlq $0x2, %r8
addq (%r13), %r9
movslq 0xac(%rsp), %r10
movq 0xc0(%rsp), %r11
imulq %rdx, %r11
movq 0x90(%rsp), %rcx
imulq %rcx, %r11
addq 0x80(%rsp), %r11
imulq %rcx, %r10
movq %rdx, 0x70(%rsp)
movq %rdx, %r13
shlq $0x5, %r13
addq 0x28(%r14), %r8
xorl %ebp, %ebp
cmpl 0x30(%rsp), %ebp
jg 0x29fc81
movq (%r14), %rcx
xorl %r12d, %r12d
cmpq %rbx, %r12
jg 0x29fc71
movq -0x18(%rcx), %rsi
cmpl $0x0, 0x100(%r14,%rsi)
je 0x29fc04
movq 0x1b0(%r14,%rsi), %rdi
vmovups (%rdi,%r13), %ymm0
jmp 0x29fc08
vxorps %xmm0, %xmm0, %xmm0
movslq 0xe8(%r14,%rsi), %rdi
movq %r14, %rdx
movslq %ebp, %r14
imulq %rdi, %r14
imulq %r10, %r14
addq %r11, %r14
movl 0xe4(%rdx,%rsi), %esi
imull %r12d, %esi
shll $0x3, %esi
movslq %esi, %rsi
leaq (%r14,%rsi,4), %rsi
xorl %edi, %edi
cmpq %rdi, %r15
je 0x29fc57
movslq (%rax,%rdi), %r14
shlq $0x5, %r14
vmovups (%rsi,%r14), %ymm1
vfmadd231ps (%r8,%rdi,8), %ymm1, %ymm0 # ymm0 = (ymm1 * mem) + ymm0
addq $0x4, %rdi
jmp 0x29fc38
movq %r12, %rsi
shlq $0x5, %rsi
vmovups %ymm0, (%r9,%rsi)
incq %r12
movq 0x8(%rsp), %r14
jmp 0x29fbdc
movq 0x10(%rsp), %rcx
leaq (%r9,%rcx,4), %r9
incl %ebp
jmp 0x29fbcc
movq 0x70(%rsp), %rdx
incq %rdx
movq 0x40(%rsp), %rcx
addl 0xd0(%rsp), %ecx
movq 0x28(%rsp), %r13
jmp 0x29fb66
movq 0x8(%r14), %rdi
testq %rdi, %rdi
je 0x29fcb9
movq (%rdi), %rax
movq %r13, %rsi
movq 0x58(%rsp), %rdx
vzeroupper
callq *0x48(%rax)
leaq 0x180(%rsp), %rdi
vzeroupper
callq 0x624be
xorl %r12d, %r12d
jmp 0x29fd9f
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x100(%rsp), %xmm0
movq 0x28(%rsp), %rcx
vmovups %xmm0, (%rcx)
movq 0x110(%rsp), %rax
movq %rax, 0x10(%rcx)
movl 0x118(%rsp), %eax
movl %eax, 0x18(%rcx)
movq 0x120(%rsp), %rax
movq %rax, 0x20(%rcx)
vmovups 0x128(%rsp), %xmm0
vmovups %xmm0, 0x28(%rcx)
movl 0x138(%rsp), %eax
movl %eax, 0x38(%rcx)
movq 0x140(%rsp), %rax
movq %rax, 0x40(%rcx)
movq 0x108(%rsp), %rax
testq %rax, %rax
je 0x29fd68
lock
decl (%rax)
jne 0x29fd68
movq 0x100(%rsp), %rsi
movq 0x120(%rsp), %rdi
testq %rdi, %rdi
je 0x29fd60
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29fd68
movq %rsi, %rdi
callq 0x5f3e0
movq 0x188(%rsp), %rax
testq %rax, %rax
je 0x29fd9f
lock
decl (%rax)
jne 0x29fd9f
movq 0x180(%rsp), %rsi
movq 0x1a0(%rsp), %rdi
testq %rdi, %rdi
je 0x29fd97
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x29fd9f
movq %rsi, %rdi
callq 0x5f3e0
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x29fddc
lock
decl (%rax)
jne 0x29fddc
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x29fdd1
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x29fddc
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
movl %r12d, %eax
addq $0x2e8, %rsp # imm = 0x2E8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq 0x8(%r14), %rdi
testq %rdi, %rdi
je 0x29fcc9
movq (%rdi), %rax
movq %r13, %rsi
movq 0x58(%rsp), %rdx
vzeroupper
callq *0x48(%rax)
jmp 0x29fcc9
movq %rax, %rbx
leaq 0x180(%rsp), %rdi
callq 0x624be
jmp 0x29ff42
jmp 0x29fe5b
jmp 0x29fe5b
jmp 0x29ff89
jmp 0x29ff89
movq %rax, %rbx
jmp 0x29ff0b
movq %rax, %rbx
jmp 0x29fed4
jmp 0x29ff89
jmp 0x29fe5b
jmp 0x29ff89
jmp 0x29ff89
movq %rax, %rbx
jmp 0x29ff42
movq %rax, %rbx
movq 0x1e8(%rsp), %rax
testq %rax, %rax
je 0x29fe9d
lock
decl (%rax)
jne 0x29fe9d
movq 0x1e0(%rsp), %rsi
movq 0x200(%rsp), %rdi
testq %rdi, %rdi
jne 0x29fe97
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x29fe9d
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x230(%rsp), %rax
testq %rax, %rax
je 0x29fed4
lock
decl (%rax)
jne 0x29fed4
movq 0x228(%rsp), %rsi
movq 0x248(%rsp), %rdi
testq %rdi, %rdi
jne 0x29fece
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x29fed4
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x108(%rsp), %rax
testq %rax, %rax
je 0x29ff0b
lock
decl (%rax)
jne 0x29ff0b
movq 0x100(%rsp), %rsi
movq 0x120(%rsp), %rdi
testq %rdi, %rdi
jne 0x29ff05
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x29ff0b
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x188(%rsp), %rax
testq %rax, %rax
je 0x29ff42
lock
decl (%rax)
jne 0x29ff42
movq 0x180(%rsp), %rsi
movq 0x1a0(%rsp), %rdi
testq %rdi, %rdi
jne 0x29ff3c
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x29ff42
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x29ff79
lock
decl (%rax)
jne 0x29ff79
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x29ff73
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x29ff79
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x29ff89
jmp 0x29ff89
jmp 0x29ff89
jmp 0x29ff89
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_fma.cpp |
ncnn::ConvolutionDepthWise_x86_fma::forward_int8_x86(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int ConvolutionDepthWise_x86_fma::forward_int8_x86(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int elempack = bottom_blob.elempack;
int elembits = bottom_blob.elembits();
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
Mat bottom_blob_int8 = bottom_blob;
if (elembits != 8)
{
const int channels_g = channels * elempack / group;
Mat scales(channels * elempack);
{
float* ps = scales;
for (int g = 0; g < group; g++)
{
float scale = bottom_blob_int8_scales[g];
for (int q = 0; q < channels_g; q++)
{
*ps++ = scale;
}
}
}
Option opt_q = opt;
opt_q.blob_allocator = opt.workspace_allocator;
quantize_to_int8(bottom_blob, bottom_blob_int8, scales, opt_q);
}
Mat bottom_blob_bordered;
make_padding(bottom_blob_int8, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
channels = bottom_blob_bordered.c;
elempack = bottom_blob_bordered.elempack;
int outw = (w - kernel_extent_w) / stride_w + 1;
int outh = (h - kernel_extent_h) / stride_h + 1;
// depth-wise
if (channels * elempack == group && group == num_output)
{
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
out_elempack = num_output % 8 == 0 ? 8 : 1;
}
#endif // __SSE2__
bool use_int8_requantize = int8_scale_term > 100;
size_t out_elemsize = use_int8_requantize ? 1u * out_elempack : 4u * out_elempack;
top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#if __SSE2__
if (elempack == 8)
{
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
signed char* outptr_s8 = top_blob.channel(g);
float* outptr_f32 = top_blob.channel(g);
const signed char* kptr = (const signed char*)weight_data_tm + maxk * g * 8;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w * 8;
for (int k = 0; k < maxk; k++)
{
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _val = _mm_loadl_epi64((const __m128i*)(sptr + space_ofs[k] * 8));
_val = _mm_unpacklo_epi8(_val, _mm_cmpgt_epi8(_mm_setzero_si128(), _val));
__m128i _w = _mm_loadl_epi64((const __m128i*)(kptr + k * 8));
_w = _mm_unpacklo_epi8(_w, _mm_cmpgt_epi8(_mm_setzero_si128(), _w));
__m128i _sl = _mm_mullo_epi16(_val, _w);
__m128i _sh = _mm_mulhi_epi16(_val, _w);
__m128i _s0 = _mm_unpacklo_epi16(_sl, _sh);
__m128i _s1 = _mm_unpackhi_epi16(_sl, _sh);
_sum0 = _mm_add_epi32(_sum0, _s0);
_sum1 = _mm_add_epi32(_sum1, _s1);
}
__m128 _scale_in0;
__m128 _scale_in1;
{
__m128 _bottom_blob_int8_scales0 = _mm_loadu_ps((const float*)bottom_blob_int8_scales + g * 8);
__m128 _bottom_blob_int8_scales1 = _mm_loadu_ps((const float*)bottom_blob_int8_scales + g * 8 + 4);
__m128 _weight_data_int8_scales0 = _mm_loadu_ps((const float*)weight_data_int8_scales + g * 8);
__m128 _weight_data_int8_scales1 = _mm_loadu_ps((const float*)weight_data_int8_scales + g * 8 + 4);
_scale_in0 = _mm_rcp_ps(_mm_mul_ps(_bottom_blob_int8_scales0, _weight_data_int8_scales0));
_scale_in1 = _mm_rcp_ps(_mm_mul_ps(_bottom_blob_int8_scales1, _weight_data_int8_scales1));
__m128 _m0 = _mm_cmpneq_ps(_weight_data_int8_scales0, _mm_setzero_ps());
__m128 _m1 = _mm_cmpneq_ps(_weight_data_int8_scales1, _mm_setzero_ps());
_scale_in0 = _mm_and_ps(_scale_in0, _m0);
_scale_in1 = _mm_and_ps(_scale_in1, _m1);
}
__m128 _sumfp32_0 = _mm_mul_ps(_mm_cvtepi32_ps(_sum0), _scale_in0);
__m128 _sumfp32_1 = _mm_mul_ps(_mm_cvtepi32_ps(_sum1), _scale_in1);
if (bias_term)
{
__m128 _bias0 = _mm_loadu_ps((const float*)bias_data + g * 8);
__m128 _bias1 = _mm_loadu_ps((const float*)bias_data + g * 8 + 4);
_sumfp32_0 = _mm_add_ps(_sumfp32_0, _bias0);
_sumfp32_1 = _mm_add_ps(_sumfp32_1, _bias1);
}
_sumfp32_0 = activation_sse(_sumfp32_0, activation_type, activation_params);
_sumfp32_1 = activation_sse(_sumfp32_1, activation_type, activation_params);
if (use_int8_requantize)
{
// requantize and relu
__m128 _scale_out0 = _mm_loadu_ps((const float*)top_blob_int8_scales + g * 8);
__m128 _scale_out1 = _mm_loadu_ps((const float*)top_blob_int8_scales + g * 8 + 4);
_sumfp32_0 = _mm_mul_ps(_sumfp32_0, _scale_out0);
_sumfp32_1 = _mm_mul_ps(_sumfp32_1, _scale_out1);
int64_t _sum8 = float2int8_sse(_sumfp32_0, _sumfp32_1);
*(int64_t*)outptr_s8 = _sum8;
outptr_s8 += 8;
}
else
{
// dequantize and relu
_mm_storeu_ps(outptr_f32, _sumfp32_0);
_mm_storeu_ps(outptr_f32 + 4, _sumfp32_1);
outptr_f32 += 8;
}
}
}
}
}
}
#endif // __SSE2__
if (elempack == 1)
{
if (kernel_w == 3 && kernel_h == 3 && stride_w == 1 && stride_h == 1 && dilation_w == 1 && dilation_h == 1 && (activation_type == 0 || activation_type == 1))
{
if (use_int8_requantize)
{
std::vector<float> requantize_scales;
for (int g = 0; g < group; g++)
{
float scale_in;
if (weight_data_int8_scales[g] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
float scale_out = top_blob_int8_scales[g];
requantize_scales.push_back(scale_in);
requantize_scales.push_back(scale_out);
}
convdw3x3s1_int8_requant_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, requantize_scales, opt);
}
else
{
std::vector<float> dequantize_scales;
for (int g = 0; g < group; g++)
{
float top_rescale = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
dequantize_scales.push_back(top_rescale);
}
convdw3x3s1_int8_dequant_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, dequantize_scales, opt);
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
}
else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2 && (activation_type == 0 || activation_type == 1))
{
if (use_int8_requantize)
{
std::vector<float> requantize_scales;
for (int g = 0; g < group; g++)
{
float scale_in;
if (weight_data_int8_scales[g] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
float scale_out = top_blob_int8_scales[g];
requantize_scales.push_back(scale_in);
requantize_scales.push_back(scale_out);
}
convdw3x3s2_int8_requant_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, requantize_scales, opt);
}
else
{
std::vector<float> dequantize_scales;
for (int g = 0; g < group; g++)
{
float top_rescale = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
dequantize_scales.push_back(top_rescale);
}
convdw3x3s2_int8_dequant_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, dequantize_scales, opt);
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
signed char* outptr_s8 = top_blob.channel(g);
float* outptr_f32 = top_blob.channel(g);
const signed char* kptr = (const signed char*)weight_data_tm + maxk * g;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
int sum = 0;
const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++)
{
signed char val = sptr[space_ofs[k]];
signed char w = kptr[k];
sum += val * w;
}
float scale_in;
if (weight_data_int8_scales[g] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
float sumfp32 = sum * scale_in;
if (bias_term)
sumfp32 += bias_data[g];
sumfp32 = activation_ss(sumfp32, activation_type, activation_params);
if (use_int8_requantize)
{
// requantize
float scale_out = top_blob_int8_scales[g];
signed char sums8 = float2int8(sumfp32 * scale_out);
outptr_s8[0] = sums8;
outptr_s8 += 1;
}
else
{
// dequantize
outptr_f32[0] = sumfp32;
outptr_f32 += 1;
}
}
}
}
}
}
return 0;
}
bool use_int8_requantize = int8_scale_term > 100;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
if (use_int8_requantize)
out_elempack = num_output % 8 == 0 ? 8 : 1;
else
out_elempack = num_output % 4 == 0 ? 4 : 1;
}
#endif // __SSE2__
size_t out_elemsize = use_int8_requantize ? 1u * out_elempack : 4u * out_elempack;
top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
// group convolution
const int channels_g = channels * elempack / group;
const int num_output_g = num_output / group;
int g_elempack = 1;
int out_g_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
g_elempack = channels_g % 8 == 0 ? 8 : 1;
if (use_int8_requantize)
out_g_elempack = num_output_g % 8 == 0 ? 8 : 1;
else
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
}
#endif // __SSE2__
// unpacking
Mat bottom_blob_bordered_unpacked = bottom_blob_bordered;
if (elempack > g_elempack)
{
Option opt_p = opt;
opt_p.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob_bordered, bottom_blob_bordered_unpacked, g_elempack, opt_p);
}
Mat top_blob_unpacked = top_blob;
if (out_g_elempack < out_elempack)
{
top_blob_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
if (top_blob_unpacked.empty())
return -100;
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
const Mat bottom_blob_bordered_g = bottom_blob_bordered_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
Mat top_blob_g = top_blob_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
const ncnn::Layer* op = group_ops[g];
Option opt_g = opt;
opt_g.blob_allocator = top_blob_unpacked.allocator;
// forward
op->forward(bottom_blob_bordered_g, top_blob_g, opt_g);
}
// packing
if (out_g_elempack < out_elempack)
{
convert_packing(top_blob_unpacked, top_blob, out_elempack, opt);
}
else
{
top_blob = top_blob_unpacked;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x318, %rsp # imm = 0x318
movq %rcx, 0x30(%rsp)
movq %rdx, 0x20(%rsp)
movq %rsi, %r15
movl 0x38(%rsi), %ecx
movl 0x18(%rsi), %r12d
movq 0x10(%rsi), %rsi
testl %r12d, %r12d
je 0x29ffd3
leal (,%rsi,8), %eax
cltd
idivl %r12d
cmpl $0x8, %eax
sete %dl
jmp 0x29ffd5
xorl %edx, %edx
movq (%rdi), %rax
movq -0x18(%rax), %r9
movq %rdi, %r8
movl 0xd4(%rdi,%r9), %ebp
movl 0xd8(%rdi,%r9), %ebx
decl %ebp
imull 0xdc(%rdi,%r9), %ebp
decl %ebx
imull 0xe0(%rdi,%r9), %ebx
movq 0x8(%r15), %rdi
vmovups (%r15), %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
movq %rsi, 0x1f0(%rsp)
movl %r12d, 0x1f8(%rsp)
movq 0x20(%r15), %rsi
movq %rsi, 0x200(%rsp)
vmovdqu 0x28(%r15), %xmm0
vmovdqu %xmm0, 0x208(%rsp)
movl %ecx, 0x218(%rsp)
movq 0x40(%r15), %rsi
movq %rsi, 0x220(%rsp)
testq %rdi, %rdi
je 0x2a0060
lock
incl (%rdi)
movq (%r8), %rax
movq %r8, 0x8(%rsp)
testb %dl, %dl
je 0x2a0073
movq 0x8(%rsp), %rcx
jmp 0x2a0186
imull %ecx, %r12d
movq -0x18(%rax), %rax
movq 0x8(%rsp), %r13
movl 0x108(%r13,%rax), %eax
movl %eax, 0x10(%rsp)
leaq 0x70(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
xorl %r14d, %r14d
pushq $0x4
popq %rdx
movl %r12d, %esi
xorl %ecx, %ecx
callq 0x635fa
movl %r12d, %eax
cltd
idivl 0x10(%rsp)
movq 0x70(%rsp), %rcx
movq (%r13), %rdx
testl %eax, %eax
cmovlel %r14d, %eax
movq 0x8(%rsp), %r8
movq -0x18(%rdx), %rsi
movslq 0x108(%r8,%rsi), %rdi
cmpq %rdi, %r14
jge 0x2a010e
movq 0x240(%r8,%rsi), %rsi
vmovd (%rsi,%r14,4), %xmm0
movl %eax, %esi
subl $0x1, %esi
jb 0x2a0109
vmovd %xmm0, (%rcx)
addq $0x4, %rcx
jmp 0x2a00fa
incq %r14
jmp 0x2a00d9
movq 0x30(%rsp), %rax
vmovups (%rax), %ymm0
vmovups 0x20(%rax), %ymm1
leaq 0x130(%rsp), %rcx
vmovups %ymm1, 0x20(%rcx)
vmovups %ymm0, (%rcx)
movq 0x10(%rax), %rax
movq %rax, 0x8(%rcx)
leaq 0x1e0(%rsp), %rsi
leaq 0x70(%rsp), %rdx
movq %r15, %rdi
vzeroupper
callq 0x652e3
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2a017e
lock
decl (%rax)
jne 0x2a017e
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2a0176
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a017e
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
leaq 0x70(%rsp), %rdx
andq $0x0, 0x40(%rdx)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdx)
vmovdqu %xmm0, 0xc(%rdx)
vmovdqa %xmm0, 0x20(%rdx)
vmovdqu %xmm0, 0x2c(%rdx)
movq -0x18(%rax), %rdi
addq %rcx, %rdi
leaq 0x1e0(%rsp), %rsi
movq 0x30(%rsp), %rcx
callq 0x287daa
pushq $-0x64
popq %r12
cmpq $0x0, 0x70(%rsp)
je 0x2a1ab2
movslq 0xa8(%rsp), %r14
movq 0xb0(%rsp), %rax
imulq %r14, %rax
testq %rax, %rax
je 0x2a1ab2
notl %ebp
movl 0x88(%rsp), %r13d
movl 0x9c(%rsp), %edi
addl %edi, %ebp
movq 0x8(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rcx
movl %ebp, %eax
movq %rdx, %rbp
cltd
idivl 0xe4(%rbp,%rcx)
movl %eax, %esi
notl %ebx
addl 0xa0(%rsp), %ebx
movl %ebx, %eax
cltd
idivl 0xe8(%rbp,%rcx)
movl %eax, %edx
movq %rsi, 0x10(%rsp)
incl %esi
movq %rdx, 0x68(%rsp)
leal 0x1(%rdx), %r10d
movl %r13d, %ebx
imull %r14d, %ebx
pushq $0x8
popq %r15
cmpl 0x108(%rbp,%rcx), %ebx
jne 0x2a039a
cmpl 0xd0(%rbp,%rcx), %ebx
jne 0x2a039a
movl %edi, 0x18(%rsp)
movq 0x30(%rsp), %rdi
cmpb $0x0, 0x27(%rdi)
pushq $0x1
popq %rax
cmovel %eax, %r15d
testb $0x7, %bl
cmovnel %eax, %r15d
movl 0x10c(%rbp,%rcx), %eax
leal (,%r15,4), %r8d
movl %eax, 0x38(%rsp)
cmpl $0x65, %eax
cmovgel %r15d, %r8d
movl %ebx, %eax
cltd
idivl %r15d
movq 0x8(%rdi), %rcx
movq %rcx, (%rsp)
movq 0x20(%rsp), %rbx
movq %rbx, %rdi
movl %r10d, %edx
movl %eax, %ecx
movl %r15d, %r9d
callq 0x628f2
cmpq $0x0, (%rbx)
je 0x2a1ab2
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2a1ab2
xorl %r12d, %r12d
cmpl $0x1, %r13d
je 0x2a0977
cmpl $0x8, %r13d
jne 0x2a1ab2
movq (%rbp), %rax
movq -0x18(%rax), %rax
movslq 0xd4(%rbp,%rax), %rcx
movslq 0xd8(%rbp,%rax), %r15
imulq %rcx, %r15
leaq 0x130(%rsp), %rdi
leaq 0xe0(%rsp), %rdx
movq %r15, %rsi
callq 0x73bbe
movq (%rbp), %rcx
movq -0x18(%rcx), %rdx
movl 0x18(%rsp), %r11d
imull 0xe0(%rbp,%rdx), %r11d
movq 0x130(%rsp), %rax
movl 0xdc(%rbp,%rdx), %esi
imull 0xd4(%rbp,%rdx), %esi
subl %esi, %r11d
xorl %esi, %esi
xorl %edi, %edi
xorl %r8d, %r8d
cmpl 0xd8(%rbp,%rdx), %esi
jge 0x2a0aa8
movslq %r8d, %r8
leaq (%rax,%r8,4), %r10
xorl %r9d, %r9d
cmpl 0xd4(%rbp,%rdx), %r9d
jge 0x2a0390
movl %edi, (%r10,%r9,4)
movq -0x18(%rcx), %rdx
addl 0xdc(%rbp,%rdx), %edi
incq %r9
jmp 0x2a0372
addl %r11d, %edi
incl %esi
addq %r9, %r8
jmp 0x2a035b
movl 0xd0(%rbp,%rcx), %eax
movl 0x10c(%rbp,%rcx), %r14d
movq 0x30(%rsp), %rcx
cmpb $0x1, 0x27(%rcx)
movl %r13d, 0x40(%rsp)
movl %r12d, %r13d
jne 0x2a03ce
cmpl $0x65, %r14d
jl 0x2a03d4
testb $0x7, %al
pushq $0x1
popq %r9
cmovel %r15d, %r9d
jmp 0x2a03e2
pushq $0x1
popq %r9
jmp 0x2a03e2
xorl %ecx, %ecx
testb $0x3, %al
sete %cl
leal (%rcx,%rcx,2), %r9d
incl %r9d
leal (,%r9,4), %r8d
cmpl $0x65, %r14d
cmovgel %r9d, %r8d
cltd
idivl %r9d
movq 0x30(%rsp), %rcx
movq 0x8(%rcx), %rcx
movq %rcx, (%rsp)
movq 0x20(%rsp), %r12
movq %r12, %rdi
movl %esi, 0x18(%rsp)
movl %r10d, 0x28(%rsp)
movl %r10d, %edx
movl %eax, %ecx
movq %r8, 0x48(%rsp)
movq %r9, 0x38(%rsp)
callq 0x628f2
cmpq $0x0, (%r12)
je 0x2a04a1
movslq 0x38(%r12), %rax
imulq 0x40(%r12), %rax
testq %rax, %rax
movl %r13d, %r12d
movl 0x40(%rsp), %r8d
je 0x2a1ab2
movq (%rbp), %rax
movq -0x18(%rax), %rax
movl 0xd0(%rbp,%rax), %ecx
movl 0x108(%rbp,%rax), %esi
movl %ebx, %eax
cltd
idivl %esi
movl %eax, %edi
movl %ecx, %eax
cltd
idivl %esi
movl %eax, %ebp
movq 0x30(%rsp), %rax
cmpb $0x1, 0x27(%rax)
jne 0x2a04a9
testb $0x7, %dil
pushq $0x1
popq %rbx
movl %ebx, %r12d
cmovel %r15d, %r12d
cmpl $0x65, %r14d
movl 0x18(%rsp), %esi
movl 0x28(%rsp), %r9d
jl 0x2a04bb
testb $0x7, %bpl
cmovel %r15d, %ebx
jmp 0x2a04c9
movl %r13d, %r12d
jmp 0x2a1ab2
pushq $0x1
popq %r12
movl %r12d, %ebx
movl 0x18(%rsp), %esi
movl 0x28(%rsp), %r9d
jmp 0x2a04c9
xorl %eax, %eax
testb $0x3, %bpl
sete %al
leal (%rax,%rax,2), %ebx
incl %ebx
movq 0x78(%rsp), %rax
vmovaps 0x70(%rsp), %xmm0
vmovaps %xmm0, 0x130(%rsp)
movq 0x80(%rsp), %rcx
movq %rcx, 0x140(%rsp)
movl 0x88(%rsp), %ecx
movl %ecx, 0x148(%rsp)
movq 0x90(%rsp), %rcx
movq %rcx, 0x150(%rsp)
vmovups 0x98(%rsp), %xmm0
vmovups %xmm0, 0x158(%rsp)
movl 0xa8(%rsp), %ecx
movl %ecx, 0x168(%rsp)
movq 0xb0(%rsp), %rcx
movq %rcx, 0x170(%rsp)
testq %rax, %rax
movq 0x20(%rsp), %r14
je 0x2a0548
lock
incl (%rax)
cmpl %r12d, %r8d
movl %edi, 0x10(%rsp)
jle 0x2a059d
movq 0x30(%rsp), %rax
vmovups (%rax), %ymm0
vmovups 0x20(%rax), %ymm1
leaq 0xe0(%rsp), %rcx
vmovups %ymm1, 0x20(%rcx)
vmovups %ymm0, (%rcx)
movq 0x10(%rax), %rax
movq %rax, 0x8(%rcx)
leaq 0x70(%rsp), %rdi
leaq 0x130(%rsp), %rsi
movl %r12d, %edx
vzeroupper
callq 0x64e3b
movl 0x10(%rsp), %edi
movl 0x18(%rsp), %esi
movl 0x28(%rsp), %r9d
movq 0x8(%r14), %rax
vmovups (%r14), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
movq 0x10(%r14), %rcx
movq %rcx, 0xf0(%rsp)
movl 0x18(%r14), %ecx
movl %ecx, 0xf8(%rsp)
movq 0x20(%r14), %rcx
movq %rcx, 0x100(%rsp)
vmovups 0x28(%r14), %xmm0
vmovups %xmm0, 0x108(%rsp)
movl 0x38(%r14), %ecx
movl %ecx, 0x118(%rsp)
movq 0x40(%r14), %rcx
movq %rcx, 0x120(%rsp)
testq %rax, %rax
je 0x2a0600
lock
incl (%rax)
cmpl 0x38(%rsp), %ebx
jae 0x2a0680
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0xd0(%rcx,%rax), %eax
cltd
idivl %ebx
movl %eax, %ecx
movzbl 0x48(%rsp), %eax
divb 0x38(%rsp)
movzbl %al, %r8d
imull %ebx, %r8d
movq 0x30(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
leaq 0xe0(%rsp), %rdi
movl %r9d, %edx
movl %ebx, %r9d
callq 0x628f2
movl 0x10(%rsp), %edi
pushq $-0x64
popq %r14
cmpq $0x0, 0xe0(%rsp)
je 0x2a1a41
movslq 0x118(%rsp), %rax
imulq 0x120(%rsp), %rax
testq %rax, %rax
je 0x2a1a41
xorl %r15d, %r15d
xorl %r13d, %r13d
xorl %r14d, %r14d
movq 0x8(%rsp), %r8
movq (%r8), %rax
movq -0x18(%rax), %rax
movslq 0x108(%r8,%rax), %rax
cmpq %rax, %r14
jge 0x2a08f5
movl %r15d, %eax
cltd
idivl %r12d
movl %eax, %ecx
movl %edi, %eax
cltd
idivl %r12d
movslq %ecx, %rdx
imulq 0x170(%rsp), %rdx
movq 0x140(%rsp), %rcx
imulq %rcx, %rdx
addq 0x130(%rsp), %rdx
movl 0x148(%rsp), %esi
movq 0x150(%rsp), %rdi
movq %rdx, 0x198(%rsp)
andq $0x0, 0x1a0(%rsp)
movq %rcx, 0x1a8(%rsp)
movl %esi, 0x1b0(%rsp)
movq %rdi, 0x1b8(%rsp)
movl %eax, 0x1d0(%rsp)
vmovups 0x158(%rsp), %xmm0
movslq 0x164(%rsp), %rax
movslq 0x15c(%rsp), %rdx
movslq 0x160(%rsp), %rsi
imulq %rdx, %rsi
imulq %rcx, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x1d8(%rsp)
vmovups %xmm0, 0x1c0(%rsp)
movl %r13d, %eax
cltd
idivl %ebx
movl %eax, %ecx
movl %ebp, %eax
cltd
idivl %ebx
movslq %ecx, %rdx
imulq 0x120(%rsp), %rdx
movq 0xf0(%rsp), %rsi
imulq %rsi, %rdx
addq 0xe0(%rsp), %rdx
movl 0xf8(%rsp), %edi
movq 0x100(%rsp), %rcx
movq %rdx, 0x228(%rsp)
andq $0x0, 0x230(%rsp)
movq %rsi, 0x238(%rsp)
movl %edi, 0x240(%rsp)
movq %rcx, 0x248(%rsp)
movl %eax, 0x260(%rsp)
vmovups 0x108(%rsp), %xmm0
movslq 0x114(%rsp), %rax
movslq 0x10c(%rsp), %rdx
movslq 0x110(%rsp), %rdi
imulq %rdx, %rdi
imulq %rsi, %rax
imulq %rdi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rsi
movq %rax, 0x268(%rsp)
vmovups %xmm0, 0x250(%rsp)
movq 0x10(%r8), %rax
movq (%rax,%r14,8), %rdi
movq 0x30(%rsp), %rax
vmovups (%rax), %ymm0
vmovups 0x20(%rax), %ymm1
vmovups %ymm0, 0x2d0(%rsp)
vmovups %ymm1, 0x2f0(%rsp)
movq %rcx, 0x2d8(%rsp)
movq (%rdi), %rax
leaq 0x198(%rsp), %rsi
leaq 0x228(%rsp), %rdx
leaq 0x2d0(%rsp), %rcx
vzeroupper
callq *0x38(%rax)
movq 0x230(%rsp), %rax
testq %rax, %rax
movl 0x10(%rsp), %edi
je 0x2a08a8
lock
decl (%rax)
jne 0x2a08a8
movq 0x228(%rsp), %rsi
movq 0x248(%rsp), %rdi
testq %rdi, %rdi
je 0x2a089c
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x10(%rsp), %edi
jmp 0x2a08a8
movq %rsi, %rdi
callq 0x5f3e0
movl 0x10(%rsp), %edi
movq 0x1a0(%rsp), %rax
testq %rax, %rax
je 0x2a08e7
lock
decl (%rax)
jne 0x2a08e7
movq 0x198(%rsp), %rsi
movq 0x1b8(%rsp), %rdi
testq %rdi, %rdi
je 0x2a08db
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x10(%rsp), %edi
jmp 0x2a08e7
movq %rsi, %rdi
callq 0x5f3e0
movl 0x10(%rsp), %edi
incq %r14
addl %ebp, %r13d
addl %edi, %r15d
jmp 0x2a0689
movq 0x38(%rsp), %rdx
cmpl %edx, %ebx
jae 0x2a091d
xorl %r14d, %r14d
leaq 0xe0(%rsp), %rdi
movq 0x20(%rsp), %rsi
movq 0x30(%rsp), %rcx
callq 0x64e3b
jmp 0x2a1a41
leaq 0xe0(%rsp), %rax
movq 0x20(%rsp), %rbx
xorl %r14d, %r14d
cmpq %rbx, %rax
je 0x2a1a41
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x2a0946
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x2a19ee
lock
decl (%rax)
jne 0x2a19ee
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x2a19e6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a19ee
movq (%rbp), %rax
movq -0x18(%rax), %rdx
movl 0xd4(%rbp,%rdx), %ecx
movl 0xd8(%rbp,%rdx), %r13d
movl %ecx, %esi
xorl $0x3, %esi
movl %r13d, %edi
xorl $0x3, %edi
orl %esi, %edi
movq 0x20(%rsp), %r14
jne 0x2a15a1
cmpl $0x1, 0xe4(%rbp,%rdx)
jne 0x2a1496
cmpl $0x1, 0xe8(%rbp,%rdx)
jne 0x2a1496
cmpl $0x1, 0xdc(%rbp,%rdx)
jne 0x2a1496
cmpl $0x1, 0xe0(%rbp,%rdx)
jne 0x2a1496
cmpl $0x1, 0x110(%rbp,%rdx)
ja 0x2a1496
cmpl $0x65, 0x38(%rsp)
jl 0x2a1b2f
vxorps %xmm0, %xmm0, %xmm0
leaq 0x130(%rsp), %rbx
vmovaps %xmm0, (%rbx)
andq $0x0, 0x10(%rbx)
xorl %r14d, %r14d
leaq 0xe0(%rsp), %r15
leaq 0x198(%rsp), %r12
movq -0x18(%rax), %r13
movslq 0x108(%rbp,%r13), %rax
cmpq %rax, %r14
jge 0x2a1e6c
movq 0x1f8(%rbp,%r13), %rax
vmovss (%rax,%r14,4), %xmm1
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm1, %xmm0
je 0x2a0a66
movq 0x240(%rbp,%r13), %rax
vmulss (%rax,%r14,4), %xmm1, %xmm0
vmovss 0x14e226(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0xe0(%rsp)
movq 0x288(%rbp,%r13), %rax
vmovss (%rax,%r14,4), %xmm0
vmovss %xmm0, 0x198(%rsp)
movq %rbx, %rdi
movq %r15, %rsi
callq 0x1ea12c
movq %rbx, %rdi
movq %r12, %rsi
callq 0x1ea12c
incq %r14
movq (%rbp), %rax
jmp 0x2a0a1f
leal (,%r15,8), %ecx
movl %ecx, 0x28(%rsp)
xorl %edx, %edx
testl %r15d, %r15d
cmovlel %edx, %r15d
testl %r14d, %r14d
cmovlel %edx, %r14d
vpxor %xmm0, %xmm0, %xmm0
xorl %esi, %esi
movq %r14, 0x18(%rsp)
cmpq %r14, %rsi
je 0x2a19d1
movq 0x20(%rsp), %rcx
movq 0x10(%rcx), %r8
imulq %rsi, %r8
imulq 0x40(%rcx), %r8
movq %rdx, 0x30(%rsp)
movslq %edx, %r9
addq (%rcx), %r8
movslq 0x9c(%rsp), %r10
movq 0xb0(%rsp), %r11
imulq %rsi, %r11
movq 0x80(%rsp), %rcx
imulq %rcx, %r11
addq 0x70(%rsp), %r11
imulq %rcx, %r10
movq %rsi, 0x40(%rsp)
leaq (,%rsi,8), %rbx
addq 0x28(%rbp), %r9
xorl %ecx, %ecx
movq %r8, %rdx
cmpl 0x68(%rsp), %ecx
jg 0x2a147b
movq (%rbp), %rdi
xorl %r13d, %r13d
cmpl 0x10(%rsp), %r13d
jg 0x2a1474
movq -0x18(%rdi), %rsi
movslq 0xe8(%rbp,%rsi), %r14
movslq %ecx, %r12
imulq %r14, %r12
imulq %r10, %r12
movl 0xe4(%rbp,%rsi), %ebp
imull %r13d, %ebp
shll $0x3, %ebp
movslq %ebp, %r14
addq %r11, %r14
addq %r12, %r14
vpxor %xmm3, %xmm3, %xmm3
xorl %r12d, %r12d
vpxor %xmm4, %xmm4, %xmm4
cmpq %r12, %r15
je 0x2a0bcc
movslq (%rax,%r12,4), %rbp
vmovq (%r14,%rbp,8), %xmm5
vpcmpgtb %xmm5, %xmm0, %xmm6
vpunpcklbw %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
vmovq (%r9,%r12,8), %xmm6
vpcmpgtb %xmm6, %xmm0, %xmm11
vpunpcklbw %xmm11, %xmm6, %xmm6 # xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
vpmullw %xmm5, %xmm6, %xmm11
vpmulhw %xmm6, %xmm5, %xmm5
vpunpcklwd %xmm5, %xmm11, %xmm6 # xmm6 = xmm11[0],xmm5[0],xmm11[1],xmm5[1],xmm11[2],xmm5[2],xmm11[3],xmm5[3]
vpunpckhwd %xmm5, %xmm11, %xmm5 # xmm5 = xmm11[4],xmm5[4],xmm11[5],xmm5[5],xmm11[6],xmm5[6],xmm11[7],xmm5[7]
vpaddd %xmm6, %xmm4, %xmm4
vpaddd %xmm5, %xmm3, %xmm3
incq %r12
jmp 0x2a0b89
movq 0x8(%rsp), %rbp
movq 0x1f8(%rbp,%rsi), %r14
movq 0x240(%rbp,%rsi), %r12
vmovups (%r14,%rbx,4), %xmm5
vmovups 0x10(%r14,%rbx,4), %xmm6
vmulps (%r12,%rbx,4), %xmm5, %xmm11
vmulps 0x10(%r12,%rbx,4), %xmm6, %xmm13
vrcpps %xmm11, %xmm11
vrcpps %xmm13, %xmm13
vcmpneqps %xmm0, %xmm5, %xmm5
vandps %xmm5, %xmm11, %xmm5
vcmpneqps %xmm0, %xmm6, %xmm6
vandps %xmm6, %xmm13, %xmm6
vcvtdq2ps %xmm4, %xmm4
vmulps %xmm4, %xmm5, %xmm5
vcvtdq2ps %xmm3, %xmm3
vmulps %xmm3, %xmm6, %xmm6
cmpl $0x0, 0x100(%rbp,%rsi)
je 0x2a0c46
movq 0x1b0(%rbp,%rsi), %r14
vaddps (%r14,%rbx,4), %xmm5, %xmm5
vaddps 0x10(%r14,%rbx,4), %xmm6, %xmm6
movl 0x110(%rbp,%rsi), %r14d
decl %r14d
cmpl $0x5, %r14d
ja 0x2a13e7
leaq 0x157a06(%rip), %r12 # 0x3f8668
movslq (%r12,%r14,4), %r14
addq %r12, %r14
vmaxps %xmm0, %xmm5, %xmm4
vmaxps %xmm0, %xmm6, %xmm3
jmpq *%r14
movq 0x118(%rbp,%rsi), %r14
vminps %xmm0, %xmm5, %xmm5
vbroadcastss (%r14), %xmm11
vfmadd231ps %xmm5, %xmm11, %xmm4 # xmm4 = (xmm11 * xmm5) + xmm4
vminps %xmm0, %xmm6, %xmm5
vfmadd231ps %xmm5, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm5) + xmm3
jmp 0x2a13ef
vbroadcastss 0x150517(%rip), %xmm1 # 0x3f11b8
vminps %xmm1, %xmm5, %xmm3
vbroadcastss 0x15050e(%rip), %xmm1 # 0x3f11bc
vmaxps %xmm1, %xmm3, %xmm3
vbroadcastss 0x150505(%rip), %xmm15 # 0x3f11c0
vmovaps %xmm15, %xmm4
vbroadcastss 0x14d34c(%rip), %xmm8 # 0x3ee014
vfmadd213ps %xmm8, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm4) + xmm8
vcvttps2dq %xmm4, %xmm11
vcvtdq2ps %xmm11, %xmm11
vcmpltps %xmm11, %xmm4, %xmm4
vbroadcastss 0x14dfa3(%rip), %xmm1 # 0x3eec88
vandps %xmm1, %xmm4, %xmm4
vsubps %xmm4, %xmm11, %xmm4
vbroadcastss 0x1504ce(%rip), %xmm7 # 0x3f11c4
vfmsub231ps %xmm7, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm7) - xmm3
vmovaps %xmm7, %xmm9
vbroadcastss 0x1504c0(%rip), %xmm7 # 0x3f11c8
vfnmsub231ps %xmm7, %xmm4, %xmm3 # xmm3 = -(xmm4 * xmm7) - xmm3
vmovaps %xmm7, %xmm2
vmulps %xmm3, %xmm3, %xmm11
vbroadcastss 0x1504ae(%rip), %xmm13 # 0x3f11cc
vbroadcastss 0x1504a9(%rip), %xmm7 # 0x3f11d0
vfmadd213ps %xmm7, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm7
vbroadcastss 0x15049f(%rip), %xmm12 # 0x3f11d4
vfmadd213ps %xmm12, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm12
vbroadcastss 0x150495(%rip), %xmm10 # 0x3f11d8
vfmadd213ps %xmm10, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm10
vbroadcastss 0x15048b(%rip), %xmm7 # 0x3f11dc
vfmadd213ps %xmm7, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm7
vfmadd213ps %xmm8, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm8
vfmadd213ps %xmm3, %xmm11, %xmm13 # xmm13 = (xmm11 * xmm13) + xmm3
vaddps %xmm1, %xmm13, %xmm3
vcvttps2dq %xmm4, %xmm4
vpslld $0x17, %xmm4, %xmm4
vpaddd %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm4) + xmm1
vcmpleps %xmm0, %xmm4, %xmm3
vbroadcastss 0x15045c(%rip), %xmm7 # 0x3f11e0
vmaxps %xmm7, %xmm4, %xmm4
vpsrld $0x17, %xmm4, %xmm11
vbroadcastss 0x15044e(%rip), %xmm7 # 0x3f11e4
vandps %xmm7, %xmm4, %xmm4
vorps %xmm4, %xmm8, %xmm4
vbroadcastss 0x150441(%rip), %xmm7 # 0x3f11e8
vpaddd %xmm7, %xmm11, %xmm11
vcvtdq2ps %xmm11, %xmm11
vbroadcastss 0x150433(%rip), %xmm7 # 0x3f11ec
vcmpltps %xmm7, %xmm4, %xmm13
vandps %xmm4, %xmm13, %xmm7
vbroadcastss 0x150425(%rip), %xmm12 # 0x3f11f0
vaddps %xmm4, %xmm12, %xmm4
vaddps %xmm7, %xmm4, %xmm4
vandps %xmm1, %xmm13, %xmm7
vsubps %xmm7, %xmm11, %xmm7
vmulps %xmm4, %xmm4, %xmm11
vbroadcastss 0x15040c(%rip), %xmm13 # 0x3f11f4
vbroadcastss 0x150407(%rip), %xmm12 # 0x3f11f8
vfmadd213ps %xmm12, %xmm4, %xmm13 # xmm13 = (xmm4 * xmm13) + xmm12
vbroadcastss 0x1503fd(%rip), %xmm12 # 0x3f11fc
vfmadd213ps %xmm12, %xmm4, %xmm13 # xmm13 = (xmm4 * xmm13) + xmm12
vbroadcastss 0x1503f3(%rip), %xmm12 # 0x3f1200
vfmadd213ps %xmm12, %xmm4, %xmm13 # xmm13 = (xmm4 * xmm13) + xmm12
vbroadcastss 0x1503e9(%rip), %xmm12 # 0x3f1204
vfmadd213ps %xmm12, %xmm4, %xmm13 # xmm13 = (xmm4 * xmm13) + xmm12
vbroadcastss 0x1503df(%rip), %xmm12 # 0x3f1208
vfmadd213ps %xmm12, %xmm4, %xmm13 # xmm13 = (xmm4 * xmm13) + xmm12
vbroadcastss 0x1503d5(%rip), %xmm12 # 0x3f120c
vfmadd213ps %xmm12, %xmm4, %xmm13 # xmm13 = (xmm4 * xmm13) + xmm12
vbroadcastss 0x1503cb(%rip), %xmm12 # 0x3f1210
vfmadd213ps %xmm12, %xmm4, %xmm13 # xmm13 = (xmm4 * xmm13) + xmm12
vbroadcastss 0x1503c1(%rip), %xmm12 # 0x3f1214
vfmadd213ps %xmm12, %xmm4, %xmm13 # xmm13 = (xmm4 * xmm13) + xmm12
vmulps %xmm4, %xmm11, %xmm14
vmulps %xmm13, %xmm14, %xmm13
vmovaps %xmm2, %xmm14
vfmadd231ps %xmm2, %xmm7, %xmm13 # xmm13 = (xmm7 * xmm2) + xmm13
vfmsub231ps %xmm11, %xmm8, %xmm13 # xmm13 = (xmm8 * xmm11) - xmm13
vsubps %xmm4, %xmm13, %xmm4
vfmsub231ps %xmm7, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm7) - xmm4
vbroadcastss 0x151897(%rip), %xmm2 # 0x3f2718
vmulps %xmm2, %xmm4, %xmm4
vbroadcastss 0x150a72(%rip), %xmm2 # 0x3f1900
vblendvps %xmm3, %xmm2, %xmm4, %xmm3
vbroadcastss 0x15031b(%rip), %xmm2 # 0x3f11b8
vminps %xmm2, %xmm3, %xmm3
vbroadcastss 0x150312(%rip), %xmm2 # 0x3f11bc
vmaxps %xmm2, %xmm3, %xmm3
vmovaps %xmm15, %xmm4
vfmadd213ps %xmm8, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm4) + xmm8
vcvttps2dq %xmm4, %xmm7
vcvtdq2ps %xmm7, %xmm7
vcmpltps %xmm7, %xmm4, %xmm4
vandps %xmm1, %xmm4, %xmm4
vsubps %xmm4, %xmm7, %xmm4
vfmsub231ps %xmm9, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm9) - xmm3
vfnmsub231ps %xmm14, %xmm4, %xmm3 # xmm3 = -(xmm4 * xmm14) - xmm3
vmovaps %xmm14, %xmm13
vmulps %xmm3, %xmm3, %xmm7
vbroadcastss 0x1502e4(%rip), %xmm9 # 0x3f11cc
vmovaps %xmm9, %xmm11
vbroadcastss 0x1502da(%rip), %xmm14 # 0x3f11d0
vfmadd213ps %xmm14, %xmm3, %xmm11 # xmm11 = (xmm3 * xmm11) + xmm14
vbroadcastss 0x1502d0(%rip), %xmm10 # 0x3f11d4
vfmadd213ps %xmm10, %xmm3, %xmm11 # xmm11 = (xmm3 * xmm11) + xmm10
vbroadcastss 0x1502c6(%rip), %xmm2 # 0x3f11d8
vfmadd213ps %xmm2, %xmm3, %xmm11 # xmm11 = (xmm3 * xmm11) + xmm2
vbroadcastss 0x1502bc(%rip), %xmm12 # 0x3f11dc
vfmadd213ps %xmm12, %xmm3, %xmm11 # xmm11 = (xmm3 * xmm11) + xmm12
vfmadd213ps %xmm8, %xmm3, %xmm11 # xmm11 = (xmm3 * xmm11) + xmm8
vfmadd213ps %xmm3, %xmm7, %xmm11 # xmm11 = (xmm7 * xmm11) + xmm3
vaddps %xmm1, %xmm11, %xmm3
vcvttps2dq %xmm4, %xmm4
vpslld $0x17, %xmm4, %xmm4
vpaddd %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm4) + xmm1
vrcpps %xmm4, %xmm3
vaddps %xmm3, %xmm3, %xmm7
vbroadcastss 0x1517b2(%rip), %xmm2 # 0x3f2708
vfmsub213ps %xmm2, %xmm7, %xmm4 # xmm4 = (xmm7 * xmm4) - xmm2
vfnmadd213ps %xmm7, %xmm3, %xmm4 # xmm4 = -(xmm3 * xmm4) + xmm7
vbroadcastss 0x15024f(%rip), %xmm2 # 0x3f11b8
vminps %xmm2, %xmm6, %xmm3
vbroadcastss 0x150246(%rip), %xmm2 # 0x3f11bc
vmaxps %xmm2, %xmm3, %xmm3
vmovaps %xmm15, %xmm7
vfmadd213ps %xmm8, %xmm3, %xmm7 # xmm7 = (xmm3 * xmm7) + xmm8
vcvttps2dq %xmm7, %xmm11
vcvtdq2ps %xmm11, %xmm11
vcmpltps %xmm11, %xmm7, %xmm7
vandps %xmm1, %xmm7, %xmm7
vsubps %xmm7, %xmm11, %xmm7
vbroadcastss 0x150221(%rip), %xmm2 # 0x3f11c4
vfmsub231ps %xmm2, %xmm7, %xmm3 # xmm3 = (xmm7 * xmm2) - xmm3
vfnmsub231ps %xmm13, %xmm7, %xmm3 # xmm3 = -(xmm7 * xmm13) - xmm3
vmulps %xmm3, %xmm3, %xmm11
vmovaps %xmm9, %xmm13
vfmadd213ps %xmm14, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm14
vfmadd213ps %xmm10, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm10
vbroadcastss 0x15020f(%rip), %xmm10 # 0x3f11d8
vfmadd213ps %xmm10, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm10
vfmadd213ps %xmm12, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm12
vfmadd213ps %xmm8, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm8
vfmadd213ps %xmm3, %xmm11, %xmm13 # xmm13 = (xmm11 * xmm13) + xmm3
vaddps %xmm1, %xmm13, %xmm11
vcvttps2dq %xmm7, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm1, %xmm3, %xmm3
vfmadd213ps %xmm1, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm3) + xmm1
vbroadcastss 0x1501e4(%rip), %xmm2 # 0x3f11e0
vmaxps %xmm2, %xmm3, %xmm7
vpsrld $0x17, %xmm7, %xmm11
vbroadcastss 0x1501d6(%rip), %xmm2 # 0x3f11e4
vandps %xmm2, %xmm7, %xmm7
vorps %xmm7, %xmm8, %xmm7
vbroadcastss 0x1501c9(%rip), %xmm2 # 0x3f11e8
vpaddd %xmm2, %xmm11, %xmm11
vcvtdq2ps %xmm11, %xmm11
vbroadcastss 0x1501bb(%rip), %xmm2 # 0x3f11ec
vcmpltps %xmm2, %xmm7, %xmm13
vandps %xmm7, %xmm13, %xmm14
vbroadcastss 0x1501ad(%rip), %xmm2 # 0x3f11f0
vaddps %xmm2, %xmm7, %xmm7
vaddps %xmm7, %xmm14, %xmm7
vandps %xmm1, %xmm13, %xmm13
vsubps %xmm13, %xmm11, %xmm11
vmulps %xmm7, %xmm7, %xmm13
vbroadcastss 0x150193(%rip), %xmm14 # 0x3f11f4
vbroadcastss 0x15018e(%rip), %xmm2 # 0x3f11f8
vfmadd213ps %xmm2, %xmm7, %xmm14 # xmm14 = (xmm7 * xmm14) + xmm2
vbroadcastss 0x150184(%rip), %xmm2 # 0x3f11fc
vfmadd213ps %xmm2, %xmm7, %xmm14 # xmm14 = (xmm7 * xmm14) + xmm2
vbroadcastss 0x15017a(%rip), %xmm2 # 0x3f1200
vfmadd213ps %xmm2, %xmm7, %xmm14 # xmm14 = (xmm7 * xmm14) + xmm2
vbroadcastss 0x150170(%rip), %xmm2 # 0x3f1204
vfmadd213ps %xmm2, %xmm7, %xmm14 # xmm14 = (xmm7 * xmm14) + xmm2
vbroadcastss 0x150166(%rip), %xmm2 # 0x3f1208
vfmadd213ps %xmm2, %xmm7, %xmm14 # xmm14 = (xmm7 * xmm14) + xmm2
vbroadcastss 0x15015c(%rip), %xmm2 # 0x3f120c
vfmadd213ps %xmm2, %xmm7, %xmm14 # xmm14 = (xmm7 * xmm14) + xmm2
vbroadcastss 0x150152(%rip), %xmm2 # 0x3f1210
vfmadd213ps %xmm2, %xmm7, %xmm14 # xmm14 = (xmm7 * xmm14) + xmm2
vbroadcastss 0x150148(%rip), %xmm2 # 0x3f1214
vfmadd213ps %xmm2, %xmm7, %xmm14 # xmm14 = (xmm7 * xmm14) + xmm2
vmovaps %xmm15, %xmm2
vmulps %xmm7, %xmm13, %xmm15
vmulps %xmm14, %xmm15, %xmm14
vmovaps %xmm2, %xmm15
vbroadcastss 0x1500dd(%rip), %xmm9 # 0x3f11c8
vfmadd231ps %xmm9, %xmm11, %xmm14 # xmm14 = (xmm11 * xmm9) + xmm14
vfmsub231ps %xmm13, %xmm8, %xmm14 # xmm14 = (xmm8 * xmm13) - xmm14
vcmpleps %xmm0, %xmm3, %xmm3
vsubps %xmm7, %xmm14, %xmm7
vbroadcastss 0x1500bd(%rip), %xmm12 # 0x3f11c4
vfmsub231ps %xmm11, %xmm12, %xmm7 # xmm7 = (xmm12 * xmm11) - xmm7
vbroadcastss 0x151603(%rip), %xmm2 # 0x3f2718
vmulps %xmm2, %xmm7, %xmm7
vbroadcastss 0x1507de(%rip), %xmm2 # 0x3f1900
vblendvps %xmm3, %xmm2, %xmm7, %xmm3
vbroadcastss 0x150087(%rip), %xmm2 # 0x3f11b8
vminps %xmm2, %xmm3, %xmm3
vbroadcastss 0x15007e(%rip), %xmm2 # 0x3f11bc
vmaxps %xmm2, %xmm3, %xmm3
vmovaps %xmm15, %xmm7
vfmadd213ps %xmm8, %xmm3, %xmm7 # xmm7 = (xmm3 * xmm7) + xmm8
vcvttps2dq %xmm7, %xmm11
vcvtdq2ps %xmm11, %xmm11
vcmpltps %xmm11, %xmm7, %xmm7
vandps %xmm1, %xmm7, %xmm7
vsubps %xmm7, %xmm11, %xmm7
vfmsub231ps %xmm12, %xmm7, %xmm3 # xmm3 = (xmm7 * xmm12) - xmm3
vfnmsub231ps %xmm9, %xmm7, %xmm3 # xmm3 = -(xmm7 * xmm9) - xmm3
vmulps %xmm3, %xmm3, %xmm11
vbroadcastss 0x150053(%rip), %xmm13 # 0x3f11cc
vbroadcastss 0x15004e(%rip), %xmm2 # 0x3f11d0
vfmadd213ps %xmm2, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm2
vbroadcastss 0x150044(%rip), %xmm2 # 0x3f11d4
vfmadd213ps %xmm2, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm2
vfmadd213ps %xmm10, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm10
vbroadcastss 0x150039(%rip), %xmm2 # 0x3f11dc
vfmadd213ps %xmm2, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm2
vfmadd213ps %xmm8, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm8
vfmadd213ps %xmm3, %xmm11, %xmm13 # xmm13 = (xmm11 * xmm13) + xmm3
vaddps %xmm1, %xmm13, %xmm11
vcvttps2dq %xmm7, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm1, %xmm3, %xmm3
vfmadd213ps %xmm1, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm3) + xmm1
vrcpps %xmm3, %xmm7
vaddps %xmm7, %xmm7, %xmm11
vbroadcastss 0x15152f(%rip), %xmm1 # 0x3f2708
vfmsub213ps %xmm1, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm3) - xmm1
vfnmadd213ps %xmm11, %xmm7, %xmm3 # xmm3 = -(xmm7 * xmm3) + xmm11
vfmsub213ps %xmm5, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) - xmm5
vfmsub213ps %xmm6, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) - xmm6
jmp 0x2a13ef
movq 0x118(%rbp,%rsi), %r14
vbroadcastss (%r14), %xmm3
vbroadcastss 0x4(%r14), %xmm11
vmaxps %xmm3, %xmm5, %xmm4
vminps %xmm4, %xmm11, %xmm4
vmaxps %xmm3, %xmm6, %xmm3
vminps %xmm3, %xmm11, %xmm3
jmp 0x2a13ef
vbroadcastss 0x14ff91(%rip), %xmm1 # 0x3f11b4
vxorps %xmm1, %xmm5, %xmm3
vbroadcastss 0x14ff88(%rip), %xmm9 # 0x3f11b8
vminps %xmm3, %xmm9, %xmm3
vbroadcastss 0x14ff7f(%rip), %xmm10 # 0x3f11bc
vmaxps %xmm3, %xmm10, %xmm5
vbroadcastss 0x14ff76(%rip), %xmm8 # 0x3f11c0
vmovaps %xmm8, %xmm3
vbroadcastss 0x14cdbd(%rip), %xmm1 # 0x3ee014
vfmadd213ps %xmm1, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm3) + xmm1
vcvttps2dq %xmm3, %xmm4
vcvtdq2ps %xmm4, %xmm4
vcmpltps %xmm4, %xmm3, %xmm3
vbroadcastss 0x14da16(%rip), %xmm2 # 0x3eec88
vandps %xmm2, %xmm3, %xmm3
vsubps %xmm3, %xmm4, %xmm3
vbroadcastss 0x14ff41(%rip), %xmm7 # 0x3f11c4
vfmsub231ps %xmm7, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm7) - xmm5
vbroadcastss 0x14ff37(%rip), %xmm13 # 0x3f11c8
vfnmsub231ps %xmm13, %xmm3, %xmm5 # xmm5 = -(xmm3 * xmm13) - xmm5
vmulps %xmm5, %xmm5, %xmm11
vbroadcastss 0x14ff29(%rip), %xmm12 # 0x3f11cc
vmovaps %xmm12, %xmm4
vbroadcastss 0x14ff20(%rip), %xmm14 # 0x3f11d0
vfmadd213ps %xmm14, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm14
vbroadcastss 0x14ff16(%rip), %xmm15 # 0x3f11d4
vfmadd213ps %xmm15, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm15
vbroadcastss 0x14ff0c(%rip), %xmm7 # 0x3f11d8
vfmadd213ps %xmm7, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm7
vbroadcastss 0x14ff02(%rip), %xmm7 # 0x3f11dc
vfmadd213ps %xmm7, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm7
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vfmadd213ps %xmm5, %xmm11, %xmm4 # xmm4 = (xmm11 * xmm4) + xmm5
vbroadcastss 0x14fec2(%rip), %xmm5 # 0x3f11b4
vxorps %xmm5, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm5, %xmm10, %xmm5
vfmadd213ps %xmm1, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm8) + xmm1
vcvttps2dq %xmm8, %xmm11
vcvtdq2ps %xmm11, %xmm11
vcmpltps %xmm11, %xmm8, %xmm6
vandps %xmm2, %xmm6, %xmm6
vsubps %xmm6, %xmm11, %xmm6
vbroadcastss 0x14fea0(%rip), %xmm8 # 0x3f11c4
vfmsub231ps %xmm8, %xmm6, %xmm5 # xmm5 = (xmm6 * xmm8) - xmm5
vfnmsub231ps %xmm13, %xmm6, %xmm5 # xmm5 = -(xmm6 * xmm13) - xmm5
vmulps %xmm5, %xmm5, %xmm11
vfmadd213ps %xmm14, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm12) + xmm14
vfmadd213ps %xmm15, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm12) + xmm15
vbroadcastss 0x14fe93(%rip), %xmm8 # 0x3f11d8
vfmadd213ps %xmm8, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm12) + xmm8
vfmadd213ps %xmm7, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm12) + xmm7
vfmadd213ps %xmm1, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm12) + xmm1
vfmadd213ps %xmm5, %xmm11, %xmm12 # xmm12 = (xmm11 * xmm12) + xmm5
vaddps %xmm2, %xmm4, %xmm5
vcvttps2dq %xmm3, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm2, %xmm3, %xmm4
vfmadd213ps %xmm2, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm2
vrcpps %xmm4, %xmm3
vfmsub213ps %xmm2, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm4) - xmm2
vfnmadd132ps %xmm3, %xmm3, %xmm4 # xmm4 = -(xmm4 * xmm3) + xmm3
vaddps %xmm2, %xmm12, %xmm5
vcvttps2dq %xmm6, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm2, %xmm3, %xmm3
vfmadd213ps %xmm2, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm3) + xmm2
vrcpps %xmm3, %xmm5
vfmsub213ps %xmm2, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm3) - xmm2
vfnmadd132ps %xmm5, %xmm5, %xmm3 # xmm3 = -(xmm3 * xmm5) + xmm5
jmp 0x2a13ef
movq 0x118(%rbp,%rsi), %r14
vbroadcastss (%r14), %xmm3
vbroadcastss 0x4(%r14), %xmm7
vmovaps %xmm5, %xmm4
vfmadd213ps %xmm7, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm4) + xmm7
vmaxps %xmm0, %xmm4, %xmm4
vbroadcastss 0x14d8bc(%rip), %xmm1 # 0x3eec88
vminps %xmm1, %xmm4, %xmm4
vmulps %xmm5, %xmm4, %xmm4
vfmadd213ps %xmm7, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) + xmm7
vmaxps %xmm0, %xmm3, %xmm3
vminps %xmm1, %xmm3, %xmm3
vmulps %xmm6, %xmm3, %xmm3
jmp 0x2a13ef
vmovaps %xmm5, %xmm4
vmovaps %xmm6, %xmm3
cmpl $0x65, 0x38(%rsp)
jl 0x2a145d
movq 0x288(%rbp,%rsi), %rsi
vmulps (%rsi,%rbx,4), %xmm4, %xmm4
vmulps 0x10(%rsi,%rbx,4), %xmm3, %xmm3
vbroadcastss 0x14fda2(%rip), %xmm1 # 0x3f11b4
vandps %xmm1, %xmm4, %xmm5
vandps %xmm1, %xmm3, %xmm6
vbroadcastss 0x14cbf1(%rip), %xmm1 # 0x3ee014
vorps %xmm1, %xmm5, %xmm5
vorps %xmm1, %xmm6, %xmm6
vaddps %xmm5, %xmm4, %xmm4
vaddps %xmm6, %xmm3, %xmm3
vcvttps2dq %xmm4, %xmm4
vcvttps2dq %xmm3, %xmm3
vpackssdw %xmm3, %xmm4, %xmm3
vpminsw 0x157109(%rip), %xmm3, %xmm3 # 0x3f8550
vpmaxsw 0x157111(%rip), %xmm3, %xmm3 # 0x3f8560
vpacksswb %xmm3, %xmm3, %xmm3
vmovq %xmm3, (%rdx)
addq $0x8, %rdx
jmp 0x2a146c
vmovups %xmm4, (%r8)
vmovups %xmm3, 0x10(%r8)
addq $0x20, %r8
incl %r13d
jmp 0x2a0b45
incl %ecx
jmp 0x2a0b34
movq 0x40(%rsp), %rsi
incq %rsi
movq 0x30(%rsp), %rdx
addl 0x28(%rsp), %edx
movq 0x18(%rsp), %r14
jmp 0x2a0acf
pushq $0x3
popq %r13
cmpl $0x1, 0xdc(%rbp,%rdx)
jne 0x2a15a1
cmpl $0x1, 0xe0(%rbp,%rdx)
jne 0x2a15a1
cmpl $0x2, 0xe4(%rbp,%rdx)
jne 0x2a15a1
cmpl $0x2, 0xe8(%rbp,%rdx)
jne 0x2a15a1
cmpl $0x1, 0x110(%rbp,%rdx)
ja 0x2a15a1
cmpl $0x65, 0x38(%rsp)
jl 0x2a20a9
vxorps %xmm0, %xmm0, %xmm0
leaq 0x130(%rsp), %r13
vmovaps %xmm0, (%r13)
andq $0x0, 0x10(%r13)
xorl %ebx, %ebx
movq %rbp, %rcx
leaq 0xe0(%rsp), %rbp
leaq 0x198(%rsp), %r12
movq -0x18(%rax), %r14
movslq 0x108(%rcx,%r14), %rax
cmpq %rax, %rbx
jge 0x2a2400
movq 0x1f8(%rcx,%r14), %rax
vmovss (%rax,%rbx,4), %xmm1
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm1, %xmm0
je 0x2a155c
movq 0x240(%rcx,%r14), %rax
vmulss (%rax,%rbx,4), %xmm1, %xmm0
vmovss 0x14d730(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0xe0(%rsp)
movq 0x288(%rcx,%r14), %rax
vmovss (%rax,%rbx,4), %xmm0
vmovss %xmm0, 0x198(%rsp)
movq %r13, %rdi
movq %rbp, %rsi
callq 0x1ea12c
movq %r13, %rdi
movq %r12, %rsi
callq 0x1ea12c
incq %rbx
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
jmp 0x2a1517
imull %ecx, %r13d
movslq %r13d, %rsi
leaq 0x130(%rsp), %rdi
leaq 0xe0(%rsp), %rdx
movq %rsi, 0xc8(%rsp)
callq 0x73bbe
movq (%rbp), %r9
movq -0x18(%r9), %rax
movl 0x18(%rsp), %r10d
imull 0xe0(%rbp,%rax), %r10d
movq 0x130(%rsp), %rbx
movl 0xdc(%rbp,%rax), %ecx
imull 0xd4(%rbp,%rax), %ecx
subl %ecx, %r10d
xorl %ecx, %ecx
xorl %edx, %edx
xorl %esi, %esi
cmpl 0xd8(%rbp,%rax), %esi
jge 0x2a1634
movslq %ecx, %rcx
leaq (%rbx,%rcx,4), %r8
xorl %edi, %edi
cmpl 0xd4(%rbp,%rax), %edi
jge 0x2a162a
movl %edx, (%r8,%rdi,4)
movq -0x18(%r9), %rax
addl 0xdc(%rbp,%rax), %edx
incq %rdi
jmp 0x2a160d
addl %r10d, %edx
incl %esi
addq %rdi, %rcx
jmp 0x2a15fb
xorl %edx, %edx
testl %r13d, %r13d
movl $0x0, %ecx
movq %rcx, 0x50(%rsp)
cmovlel %edx, %r13d
vbroadcastss 0x14fb64(%rip), %xmm4 # 0x3f11b4
leaq 0x156ff9(%rip), %r8 # 0x3f8650
xorl %ebp, %ebp
movq %r9, 0xc0(%rsp)
movq 0x8(%rsp), %rcx
movslq 0x108(%rcx,%rax), %rax
cmpq %rax, %rbp
jge 0x2a19d1
movq 0x10(%r14), %r9
imulq %rbp, %r9
imulq 0x40(%r14), %r9
addq (%r14), %r9
movslq 0x9c(%rsp), %r10
movq 0xb0(%rsp), %r11
imulq %rbp, %r11
movq 0x80(%rsp), %rax
imulq %rax, %r11
addq 0x70(%rsp), %r11
imulq %rax, %r10
movq 0x28(%rcx), %r15
addq 0x50(%rsp), %r15
xorl %eax, %eax
movq %r9, 0x18(%rsp)
movq %r10, 0x28(%rsp)
movq %r11, 0x48(%rsp)
movq %rbp, 0xd0(%rsp)
cmpl 0x68(%rsp), %eax
jg 0x2a19a6
movl %eax, 0x30(%rsp)
movq 0x8(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x40(%rsp)
xorl %r14d, %r14d
cmpl 0x10(%rsp), %r14d
jg 0x2a199b
movq 0x40(%rsp), %rax
movq -0x18(%rax), %r12
movq 0x8(%rsp), %rcx
movslq 0xe8(%rcx,%r12), %rax
movslq 0x30(%rsp), %rdx
imulq %rax, %rdx
imulq %r10, %rdx
movslq 0xe4(%rcx,%r12), %rax
movslq %r14d, %rcx
imulq %rax, %rcx
addq %r11, %rcx
addq %rdx, %rcx
xorl %edx, %edx
xorl %eax, %eax
cmpq %rdx, %r13
je 0x2a1753
movslq (%rbx,%rdx,4), %rsi
movsbl (%rcx,%rsi), %esi
movsbl (%r15,%rdx), %edi
imull %esi, %edi
addl %edi, %eax
incq %rdx
jmp 0x2a1737
movq 0x8(%rsp), %rdx
movq 0x1f8(%rdx,%r12), %rcx
vmovss (%rcx,%rbp,4), %xmm1
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm1, %xmm0
je 0x2a1788
movq 0x240(%rdx,%r12), %rcx
vmulss (%rcx,%rbp,4), %xmm1, %xmm0
vmovss 0x14d504(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vcvtsi2ss %eax, %xmm6, %xmm1
vmulss %xmm1, %xmm0, %xmm5
cmpl $0x0, 0x100(%rdx,%r12)
je 0x2a17a8
movq 0x1b0(%rdx,%r12), %rax
vaddss (%rax,%rbp,4), %xmm5, %xmm5
movl 0x110(%rdx,%r12), %eax
decl %eax
cmpl $0x5, %eax
ja 0x2a1920
movslq (%r8,%rax,4), %rax
addq %r8, %rax
jmpq *%rax
vmaxss 0x14c844(%rip), %xmm5, %xmm0 # 0x3ee010
jmp 0x2a1924
vmovaps %xmm5, %xmm0
movq %r9, 0x58(%rsp)
vmovss %xmm5, 0xd8(%rsp)
callq 0x5f410
vaddss 0x14d498(%rip), %xmm0, %xmm0 # 0x3eec88
callq 0x5f200
callq 0x5f160
movq 0x48(%rsp), %r11
movq 0x28(%rsp), %r10
movq 0x58(%rsp), %r9
leaq 0x156e40(%rip), %r8 # 0x3f8650
vbroadcastss 0x14f99b(%rip), %xmm4 # 0x3f11b4
vmulss 0xd8(%rsp), %xmm0, %xmm0
jmp 0x2a1924
movq 0x8(%rsp), %rax
movq 0x118(%rax,%r12), %rax
vmovss 0x4(%rax), %xmm1
vmaxss (%rax), %xmm5, %xmm0
vucomiss %xmm1, %xmm0
jbe 0x2a1924
vmovaps %xmm1, %xmm0
jmp 0x2a1924
vminss 0x14f960(%rip), %xmm5, %xmm0 # 0x3f11b8
vxorps %xmm4, %xmm0, %xmm1
vcmpltss 0x14f957(%rip), %xmm0, %xmm0 # 0x3f11bc
vbroadcastss 0x14f94a(%rip), %xmm2 # 0x3f11b8
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
movq %r9, %rbp
callq 0x5f410
movq 0x48(%rsp), %r11
movq 0x28(%rsp), %r10
movq %rbp, %r9
movq 0xd0(%rsp), %rbp
leaq 0x156db8(%rip), %r8 # 0x3f8650
vbroadcastss 0x14f913(%rip), %xmm4 # 0x3f11b4
vmovss 0x14d3df(%rip), %xmm1 # 0x3eec88
vaddss %xmm1, %xmm0, %xmm0
vdivss %xmm0, %xmm1, %xmm0
jmp 0x2a1924
movq 0x8(%rsp), %rax
movq 0x118(%rax,%r12), %rax
vxorps %xmm0, %xmm0, %xmm0
vcmpltss %xmm5, %xmm0, %xmm0
vmovss (%rax), %xmm1
vbroadcastss 0x14d3b2(%rip), %xmm2 # 0x3eec88
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmulss %xmm5, %xmm0, %xmm0
jmp 0x2a1924
movq 0x8(%rsp), %rax
movq 0x118(%rax,%r12), %rax
vmovss (%rax), %xmm1
vmovss 0x4(%rax), %xmm2
vxorps %xmm4, %xmm2, %xmm0
vdivss %xmm1, %xmm0, %xmm3
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm3, %xmm5
jb 0x2a1924
vmovss 0x14d376(%rip), %xmm0 # 0x3eec88
vdivss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm3, %xmm0
vucomiss %xmm0, %xmm5
jbe 0x2a1990
vmovaps %xmm5, %xmm0
cmpl $0x65, 0x38(%rsp)
jl 0x2a1976
movq 0x8(%rsp), %rax
movq 0x288(%rax,%r12), %rax
vmulss (%rax,%rbp,4), %xmm0, %xmm0
vandps %xmm4, %xmm0, %xmm1
vbroadcastss 0x156cba(%rip), %xmm2 # 0x3f8604
vorps %xmm2, %xmm1, %xmm1
vaddss %xmm1, %xmm0, %xmm0
vroundss $0xb, %xmm0, %xmm0, %xmm0
vcvttss2si %xmm0, %eax
cmpl $-0x7e, %eax
pushq $-0x7f
popq %rcx
cmovll %ecx, %eax
cmpl $0x7f, %eax
pushq $0x7f
popq %rcx
cmovgel %ecx, %eax
movb %al, (%r9)
incq %r9
jmp 0x2a1988
movq 0x18(%rsp), %rax
vmovss %xmm0, (%rax)
addq $0x4, %rax
movq %rax, 0x18(%rsp)
incl %r14d
jmp 0x2a16f0
vfmadd213ss %xmm2, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm1) + xmm2
vmulss %xmm5, %xmm1, %xmm0
jmp 0x2a1924
movl 0x30(%rsp), %eax
incl %eax
jmp 0x2a16d2
incq %rbp
movq 0xc0(%rsp), %r9
movq -0x18(%r9), %rax
movq 0x50(%rsp), %rcx
addq 0xc8(%rsp), %rcx
movq %rcx, 0x50(%rsp)
movq 0x20(%rsp), %r14
jmp 0x2a1661
leaq 0x130(%rsp), %rdi
callq 0x624be
xorl %r12d, %r12d
jmp 0x2a1ab2
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0xe0(%rsp), %xmm0
vmovups %xmm0, (%rbx)
movq 0xf0(%rsp), %rax
movq %rax, 0x10(%rbx)
movl 0xf8(%rsp), %eax
movl %eax, 0x18(%rbx)
movq 0x100(%rsp), %rax
movq %rax, 0x20(%rbx)
vmovups 0x108(%rsp), %xmm0
vmovups %xmm0, 0x28(%rbx)
movl 0x118(%rsp), %eax
movl %eax, 0x38(%rbx)
movq 0x120(%rsp), %rax
movq %rax, 0x40(%rbx)
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x2a1a78
lock
decl (%rax)
jne 0x2a1a78
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
je 0x2a1a70
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a1a78
movq %rsi, %rdi
callq 0x5f3e0
movq 0x138(%rsp), %rax
testq %rax, %rax
movl %r14d, %r12d
je 0x2a1ab2
lock
decl (%rax)
jne 0x2a1ab2
movq 0x130(%rsp), %rsi
movq 0x150(%rsp), %rdi
testq %rdi, %rdi
je 0x2a1aaa
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a1ab2
movq %rsi, %rdi
callq 0x5f3e0
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2a1ae3
lock
decl (%rax)
jne 0x2a1ae3
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2a1adb
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a1ae3
movq %rsi, %rdi
callq 0x5f3e0
movq 0x1e8(%rsp), %rax
testq %rax, %rax
je 0x2a1b1a
lock
decl (%rax)
jne 0x2a1b1a
movq 0x1e0(%rsp), %rsi
movq 0x200(%rsp), %rdi
testq %rdi, %rdi
je 0x2a1b12
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a1b1a
movq %rsi, %rdi
callq 0x5f3e0
movl %r12d, %eax
addq $0x318, %rsp # imm = 0x318
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
vxorps %xmm0, %xmm0, %xmm0
leaq 0x130(%rsp), %rbx
andq $0x0, 0x10(%rbx)
vmovaps %xmm0, (%rbx)
xorl %r14d, %r14d
leaq 0xe0(%rsp), %r15
movq -0x18(%rax), %r12
movslq 0x108(%rbp,%r12), %rax
cmpq %rax, %r14
jge 0x2a1ba5
movq 0x1f8(%rbp,%r12), %rax
movq 0x240(%rbp,%r12), %rcx
vmovss (%rax,%r14,4), %xmm0
vmulss (%rcx,%r14,4), %xmm0, %xmm0
vmovss 0x14d104(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0xe0(%rsp)
movq %rbx, %rdi
movq %r15, %rsi
callq 0x1ea12c
incq %r14
movq (%rbp), %rax
jmp 0x2a1b4f
leaq 0x2a0(%rsp), %rdi
leaq 0x130(%rsp), %rsi
callq 0x290632
movslq 0x9c(%rsp), %rdx
movq 0x20(%rsp), %rcx
movslq 0x2c(%rcx), %rsi
movslq 0x30(%rcx), %rax
movl 0x38(%rcx), %edi
movq 0x28(%rbp), %r9
movq 0x1b0(%rbp,%r12), %r10
movl 0x34(%rcx), %r8d
movl %r8d, 0x58(%rsp)
movq (%rcx), %r8
movq 0x10(%rcx), %r11
movq 0x40(%rcx), %r15
imulq %r11, %r15
movq %rax, %rbx
movq %rsi, 0x38(%rsp)
imulq %rsi, %rbx
movq %r11, 0xd0(%rsp)
movq %rbx, 0x50(%rsp)
imulq %rbx, %r11
addq $0xf, %r11
andq $-0x10, %r11
movq %r11, 0xc8(%rsp)
movq 0x80(%rsp), %rsi
imulq 0xb0(%rsp), %rsi
movq %rsi, 0xc0(%rsp)
xorl %esi, %esi
testl %eax, %eax
movl $0x0, %r11d
cmovgl %eax, %r11d
movl %r11d, 0x10(%rsp)
movl 0x28(%rcx), %eax
movl %eax, 0x64(%rsp)
testl %edi, %edi
cmovlel %esi, %edi
movq %rdi, 0x18(%rsp)
movq 0x70(%rsp), %rax
movq %rax, 0x190(%rsp)
movq %rdx, 0x28(%rsp)
leaq (%rdx,%rdx), %rax
movq %rax, 0x188(%rsp)
movq 0x2a0(%rsp), %rax
movq %rax, 0x180(%rsp)
movq %r8, 0xd8(%rsp)
xorl %esi, %esi
movq %r10, 0x48(%rsp)
movq %r15, 0x68(%rsp)
cmpq 0x18(%rsp), %rsi
je 0x2a1e5f
movq 0xc8(%rsp), %rax
xorl %edx, %edx
divq 0xd0(%rsp)
cmpl $0x4, 0x64(%rsp)
cmoveq 0x50(%rsp), %rax
testq %r10, %r10
je 0x2a1cd7
vmovss (%r10,%rsi,4), %xmm1
jmp 0x2a1cdb
vxorps %xmm1, %xmm1, %xmm1
imulq %rsi, %r15
addq 0xd8(%rsp), %r15
movq 0x180(%rsp), %rcx
vmovss (%rcx,%rsi,4), %xmm0
imull 0x58(%rsp), %eax
testl %eax, %eax
movl $0x0, %ecx
cmovlel %ecx, %eax
xorl %ecx, %ecx
cmpl %ecx, %eax
je 0x2a1d14
vmovss %xmm1, (%r8,%rcx,4)
incq %rcx
jmp 0x2a1d05
leaq (%rsi,%rsi,8), %rax
movq 0xc0(%rsp), %r13
movq %rsi, 0x40(%rsp)
imulq %rsi, %r13
addq 0x190(%rsp), %r13
movq 0x28(%rsp), %rcx
leaq (%rcx,%r13), %r11
movq 0x188(%rsp), %rcx
leaq (%rcx,%r13), %rbx
xorl %r14d, %r14d
cmpl 0x10(%rsp), %r14d
je 0x2a1e45
xorl %ecx, %ecx
movq %r15, %rdx
movq 0x38(%rsp), %rsi
movl %esi, %r10d
testl %r10d, %r10d
jle 0x2a1e25
movsbl (%r13,%rcx), %r12d
movsbl (%r9,%rax), %esi
imull %r12d, %esi
movsbl 0x1(%r13,%rcx), %r12d
movsbl 0x1(%r9,%rax), %edi
imull %r12d, %edi
addl %esi, %edi
movsbl 0x2(%r13,%rcx), %esi
movsbl 0x2(%r9,%rax), %r12d
imull %esi, %r12d
movsbl (%r11,%rcx), %esi
movsbl 0x3(%r9,%rax), %ebp
imull %esi, %ebp
addl %r12d, %ebp
addl %edi, %ebp
movsbl 0x1(%r11,%rcx), %esi
movsbl 0x4(%r9,%rax), %edi
imull %esi, %edi
movsbl 0x2(%r11,%rcx), %esi
movsbl 0x5(%r9,%rax), %r12d
imull %esi, %r12d
addl %edi, %r12d
movsbl (%rbx,%rcx), %esi
movsbl 0x6(%r9,%rax), %edi
imull %esi, %edi
addl %r12d, %edi
addl %ebp, %edi
movsbl 0x1(%rbx,%rcx), %esi
movsbl 0x7(%r9,%rax), %ebp
imull %esi, %ebp
movsbl 0x2(%rbx,%rcx), %esi
movsbl 0x8(%r9,%rax), %r12d
imull %esi, %r12d
addl %ebp, %r12d
addl %edi, %r12d
vcvtsi2ss %r12d, %xmm2, %xmm1
vfmadd213ss (%r15,%rcx,4), %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + mem
vmovss %xmm1, (%r15,%rcx,4)
addq $0x4, %rdx
decl %r10d
incq %rcx
jmp 0x2a1d61
addq %rcx, %r13
addq $0x2, %r13
addq %rcx, %r11
addq $0x2, %r11
addq %rcx, %rbx
addq $0x2, %rbx
incl %r14d
movq %rdx, %r15
jmp 0x2a1d49
movq 0x40(%rsp), %rsi
incq %rsi
movq 0x68(%rsp), %r15
addq %r15, %r8
movq 0x48(%rsp), %r10
jmp 0x2a1ca2
leaq 0x2a0(%rsp), %rdi
jmp 0x2a23e4
leaq 0x2b8(%rsp), %rdi
leaq 0x130(%rsp), %rsi
callq 0x290632
movslq 0x9c(%rsp), %rax
movq 0x20(%rsp), %r15
movl 0x2c(%r15), %ecx
movl %ecx, 0x38(%rsp)
movl 0x30(%r15), %ecx
movl 0x38(%r15), %edx
movq 0x28(%rbp), %rdi
movq 0x1b0(%rbp,%r13), %rsi
xorl %r9d, %r9d
testl %ecx, %ecx
cmovlel %r9d, %ecx
movl %ecx, 0x10(%rsp)
movq %rax, 0x28(%rsp)
addq %rax, %rax
movq %rax, 0x58(%rsp)
testl %edx, %edx
cmovlel %r9d, %edx
movq %rdx, 0x18(%rsp)
vbroadcastss 0x14f2dc(%rip), %xmm0 # 0x3f11b4
vbroadcastss 0x156723(%rip), %xmm1 # 0x3f8604
pushq $-0x7f
popq %r8
pushq $0x7f
popq %rbx
movq %rsi, 0x48(%rsp)
cmpq 0x18(%rsp), %r9
je 0x2a209c
testq %rsi, %rsi
je 0x2a1f05
vmovss (%rsi,%r9,4), %xmm2
jmp 0x2a1f09
vxorps %xmm2, %xmm2, %xmm2
movq 0x40(%r15), %r14
imulq %r9, %r14
imulq 0x10(%r15), %r14
addq (%r15), %r14
movq 0x2b8(%rsp), %rax
vmovss (%rax,%r9,8), %xmm3
vmovss 0x4(%rax,%r9,8), %xmm4
movq 0xb0(%rsp), %r15
imulq %r9, %r15
imulq 0x80(%rsp), %r15
addq 0x70(%rsp), %r15
movq %r9, 0x40(%rsp)
leaq (%r9,%r9,8), %r12
movq 0x28(%rsp), %rax
leaq (%r15,%rax), %r13
movq 0x58(%rsp), %rax
leaq (%r15,%rax), %rbp
xorl %eax, %eax
cmpl 0x10(%rsp), %eax
je 0x2a2085
xorl %r10d, %r10d
movl 0x38(%rsp), %ecx
testl %ecx, %ecx
jle 0x2a2066
movsbl (%r15,%r10), %r11d
movsbl (%rdi,%r12), %r9d
imull %r11d, %r9d
movsbl 0x1(%r15,%r10), %r11d
movsbl 0x1(%rdi,%r12), %esi
imull %r11d, %esi
addl %r9d, %esi
movsbl 0x2(%r15,%r10), %r9d
movsbl 0x2(%rdi,%r12), %r11d
imull %r9d, %r11d
movsbl (%r13,%r10), %r9d
movsbl 0x3(%rdi,%r12), %edx
imull %r9d, %edx
addl %r11d, %edx
addl %esi, %edx
movsbl 0x1(%r13,%r10), %esi
movsbl 0x4(%rdi,%r12), %r9d
imull %esi, %r9d
movsbl 0x2(%r13,%r10), %esi
movsbl 0x5(%rdi,%r12), %r11d
imull %esi, %r11d
addl %r9d, %r11d
movsbl (%rbp,%r10), %esi
movsbl 0x6(%rdi,%r12), %r9d
imull %esi, %r9d
addl %r11d, %r9d
addl %edx, %r9d
movsbl 0x1(%rbp,%r10), %edx
movsbl 0x7(%rdi,%r12), %esi
imull %edx, %esi
movsbl 0x2(%rbp,%r10), %edx
movsbl 0x8(%rdi,%r12), %r11d
imull %edx, %r11d
addl %esi, %r11d
addl %r9d, %r11d
vcvtsi2ss %r11d, %xmm7, %xmm5
vfmadd213ss %xmm2, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm5) + xmm2
vmulss %xmm4, %xmm5, %xmm5
vandps %xmm0, %xmm5, %xmm6
vorps %xmm1, %xmm6, %xmm6
vaddss %xmm6, %xmm5, %xmm5
vroundss $0xb, %xmm5, %xmm5, %xmm5
vcvttss2si %xmm5, %r11d
cmpl $-0x7e, %r11d
jge 0x2a204f
movl %r8d, %r11d
cmpl $0x7f, %r11d
jl 0x2a2058
movl %ebx, %r11d
movb %r11b, (%r14,%r10)
decl %ecx
incq %r10
jmp 0x2a1f76
addq %r10, %r15
addq $0x2, %r15
addq %r10, %r13
addq $0x2, %r13
addq %r10, %rbp
addq $0x2, %rbp
incl %eax
addq %r10, %r14
jmp 0x2a1f65
movq 0x40(%rsp), %r9
incq %r9
movq 0x20(%rsp), %r15
movq 0x48(%rsp), %rsi
jmp 0x2a1eed
leaq 0x2b8(%rsp), %rdi
jmp 0x2a262f
vxorps %xmm0, %xmm0, %xmm0
leaq 0x130(%rsp), %rbx
andq $0x0, 0x10(%rbx)
vmovaps %xmm0, (%rbx)
xorl %r14d, %r14d
leaq 0xe0(%rsp), %r15
movq -0x18(%rax), %r12
movslq 0x108(%rbp,%r12), %rax
cmpq %rax, %r14
jge 0x2a211f
movq 0x1f8(%rbp,%r12), %rax
movq 0x240(%rbp,%r12), %rcx
vmovss (%rax,%r14,4), %xmm0
vmulss (%rcx,%r14,4), %xmm0, %xmm0
vmovss 0x14cb8a(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0xe0(%rsp)
movq %rbx, %rdi
movq %r15, %rsi
callq 0x1ea12c
incq %r14
movq (%rbp), %rax
jmp 0x2a20c9
leaq 0x270(%rsp), %rdi
leaq 0x130(%rsp), %rsi
callq 0x290632
movslq 0x9c(%rsp), %rdi
movq 0x20(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movslq 0x30(%rdx), %rax
movl 0x38(%rdx), %r8d
movl %edi, %ecx
subl %esi, %ecx
addl %ecx, %ecx
movq 0x28(%rbp), %r9
movq 0x1b0(%rbp,%r12), %r10
movq 0x10(%rdx), %r11
movq 0x40(%rdx), %rbx
imulq %r11, %rbx
movq %rax, %r14
movq %rsi, 0x38(%rsp)
imulq %rsi, %r14
movq %r11, 0x58(%rsp)
movq %r14, 0xd0(%rsp)
imulq %r14, %r11
addq $0xf, %r11
andq $-0x10, %r11
movq %r11, 0x68(%rsp)
movq 0x80(%rsp), %rsi
imulq 0xb0(%rsp), %rsi
movq %rsi, 0x50(%rsp)
xorl %r11d, %r11d
testl %eax, %eax
movl $0x0, %esi
cmovgl %eax, %esi
movl %esi, 0x10(%rsp)
movl 0x34(%rdx), %eax
movl %eax, 0xc8(%rsp)
movq (%rdx), %r12
movslq %ecx, %rsi
testl %r8d, %r8d
cmovlel %r11d, %r8d
movq %r8, 0x18(%rsp)
movl 0x28(%rdx), %eax
movl %eax, 0x64(%rsp)
movq 0x70(%rsp), %rax
movq %rax, 0x190(%rsp)
movq %rdi, 0x28(%rsp)
leaq (%rdi,%rdi), %rax
movq %rax, 0x188(%rsp)
movq 0x270(%rsp), %rax
movq %rax, 0x180(%rsp)
movq %r12, 0xc0(%rsp)
xorl %edi, %edi
movq %r10, 0x48(%rsp)
movq %rbx, 0xd8(%rsp)
cmpq 0x18(%rsp), %rdi
je 0x2a23dc
movq 0x68(%rsp), %rax
xorl %edx, %edx
divq 0x58(%rsp)
cmpl $0x4, 0x64(%rsp)
cmoveq 0xd0(%rsp), %rax
testq %r10, %r10
je 0x2a2256
vmovss (%r10,%rdi,4), %xmm1
jmp 0x2a225a
vxorps %xmm1, %xmm1, %xmm1
movq %rbx, %rdx
imulq %rdi, %rdx
addq 0xc0(%rsp), %rdx
movq 0x180(%rsp), %rcx
vmovss (%rcx,%rdi,4), %xmm0
imull 0xc8(%rsp), %eax
testl %eax, %eax
movl $0x0, %ecx
cmovlel %ecx, %eax
xorl %ecx, %ecx
cmpl %ecx, %eax
je 0x2a2299
vmovss %xmm1, (%r12,%rcx,4)
incq %rcx
jmp 0x2a228a
leaq (%rdi,%rdi,8), %rax
movq 0x50(%rsp), %r8
movq %rdi, 0x40(%rsp)
imulq %rdi, %r8
addq 0x190(%rsp), %r8
movq 0x28(%rsp), %rcx
leaq (%r8,%rcx), %r13
movq 0x188(%rsp), %rcx
leaq (%r8,%rcx), %r11
xorl %r14d, %r14d
cmpl 0x10(%rsp), %r14d
je 0x2a23bf
xorl %ecx, %ecx
movq 0x38(%rsp), %rdi
movl %edi, %r10d
testl %r10d, %r10d
jle 0x2a23a5
movsbl (%r8,%rcx), %ebx
movsbl (%r9,%rax), %r15d
imull %ebx, %r15d
movsbl 0x1(%r8,%rcx), %ebx
movsbl 0x1(%r9,%rax), %edi
imull %ebx, %edi
addl %r15d, %edi
movsbl 0x2(%r8,%rcx), %ebx
movsbl 0x2(%r9,%rax), %r15d
imull %ebx, %r15d
movsbl (%r13,%rcx), %ebx
movsbl 0x3(%r9,%rax), %ebp
imull %ebx, %ebp
addl %r15d, %ebp
addl %edi, %ebp
movsbl 0x1(%r13,%rcx), %edi
movsbl 0x4(%r9,%rax), %ebx
imull %edi, %ebx
movsbl 0x2(%r13,%rcx), %edi
movsbl 0x5(%r9,%rax), %r15d
imull %edi, %r15d
addl %ebx, %r15d
movsbl (%r11,%rcx), %edi
movsbl 0x6(%r9,%rax), %ebx
imull %edi, %ebx
addl %r15d, %ebx
addl %ebp, %ebx
movsbl 0x1(%r11,%rcx), %edi
movsbl 0x7(%r9,%rax), %ebp
imull %edi, %ebp
movsbl 0x2(%r11,%rcx), %edi
movsbl 0x8(%r9,%rax), %r15d
imull %edi, %r15d
addl %ebp, %r15d
addl %ebx, %r15d
vcvtsi2ss %r15d, %xmm2, %xmm1
vfmadd213ss (%rdx), %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + mem
vmovss %xmm1, (%rdx)
addq $0x4, %rdx
decl %r10d
addq $0x2, %rcx
jmp 0x2a22e0
addq %rsi, %r8
addq %rcx, %r8
addq %rsi, %r13
addq %rcx, %r13
addq %rsi, %r11
addq %rcx, %r11
incl %r14d
jmp 0x2a22cb
movq 0x40(%rsp), %rdi
incq %rdi
movq 0xd8(%rsp), %rbx
addq %rbx, %r12
movq 0x48(%rsp), %r10
jmp 0x2a2224
leaq 0x270(%rsp), %rdi
callq 0x621c2
leaq 0x130(%rsp), %rdi
callq 0x621c2
movq 0x20(%rsp), %r15
jmp 0x2a2641
leaq 0x288(%rsp), %rdi
leaq 0x130(%rsp), %rsi
callq 0x290632
movslq 0x9c(%rsp), %rsi
movq 0x20(%rsp), %r15
movl 0x2c(%r15), %eax
movl 0x30(%r15), %edx
movl 0x38(%r15), %r8d
movl %esi, %ecx
movl %eax, 0x10(%rsp)
subl %eax, %ecx
addl %ecx, %ecx
movq 0x8(%rsp), %rax
movq 0x28(%rax), %rdi
movq 0x1b0(%rax,%r14), %rax
movq %rsi, 0x18(%rsp)
addq %rsi, %rsi
movq %rsi, 0x48(%rsp)
movslq %ecx, %r10
xorl %esi, %esi
testl %edx, %edx
cmovlel %esi, %edx
testl %r8d, %r8d
cmovlel %esi, %r8d
movq %r8, 0x40(%rsp)
vbroadcastss 0x14ed3f(%rip), %xmm0 # 0x3f11b4
vbroadcastss 0x156186(%rip), %xmm1 # 0x3f8604
movq %rax, 0x28(%rsp)
cmpq 0x40(%rsp), %rsi
je 0x2a2627
testq %rax, %rax
je 0x2a249a
vmovss (%rax,%rsi,4), %xmm2
jmp 0x2a249e
vxorps %xmm2, %xmm2, %xmm2
movq 0x40(%r15), %rbx
imulq %rsi, %rbx
imulq 0x10(%r15), %rbx
addq (%r15), %rbx
movq 0x288(%rsp), %rcx
vmovss (%rcx,%rsi,8), %xmm3
vmovss 0x4(%rcx,%rsi,8), %xmm4
movq 0xb0(%rsp), %r14
imulq %rsi, %r14
imulq 0x80(%rsp), %r14
addq 0x70(%rsp), %r14
movq %rsi, 0x38(%rsp)
leaq (%rsi,%rsi,8), %r15
movq 0x18(%rsp), %rax
leaq (%r14,%rax), %r12
movq 0x48(%rsp), %rax
leaq (%r14,%rax), %r13
xorl %ebp, %ebp
cmpl %edx, %ebp
je 0x2a2610
xorl %r9d, %r9d
movl 0x10(%rsp), %ecx
testl %ecx, %ecx
jle 0x2a25f7
movsbl (%r14,%r9), %esi
movsbl (%rdi,%r15), %r8d
imull %esi, %r8d
movsbl 0x1(%r14,%r9), %esi
movsbl 0x1(%rdi,%r15), %r11d
imull %esi, %r11d
addl %r8d, %r11d
movsbl 0x2(%r14,%r9), %esi
movsbl 0x2(%rdi,%r15), %r8d
imull %esi, %r8d
movsbl (%r12,%r9), %esi
movsbl 0x3(%rdi,%r15), %eax
imull %esi, %eax
addl %r8d, %eax
addl %r11d, %eax
movsbl 0x1(%r12,%r9), %esi
movsbl 0x4(%rdi,%r15), %r8d
imull %esi, %r8d
movsbl 0x2(%r12,%r9), %esi
movsbl 0x5(%rdi,%r15), %r11d
imull %esi, %r11d
addl %r8d, %r11d
movsbl (%r13,%r9), %esi
movsbl 0x6(%rdi,%r15), %r8d
imull %esi, %r8d
addl %r11d, %r8d
addl %eax, %r8d
movsbl 0x1(%r13,%r9), %eax
movsbl 0x7(%rdi,%r15), %esi
imull %eax, %esi
movsbl 0x2(%r13,%r9), %eax
movsbl 0x8(%rdi,%r15), %r11d
imull %eax, %r11d
addl %esi, %r11d
addl %r8d, %r11d
vcvtsi2ss %r11d, %xmm7, %xmm5
vfmadd213ss %xmm2, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm5) + xmm2
vmulss %xmm4, %xmm5, %xmm5
vandps %xmm0, %xmm5, %xmm6
vorps %xmm1, %xmm6, %xmm6
vaddss %xmm6, %xmm5, %xmm5
vroundss $0xb, %xmm5, %xmm5, %xmm5
vcvttss2si %xmm5, %esi
cmpl $-0x7e, %esi
jge 0x2a25de
pushq $-0x7f
popq %rsi
cmpl $0x7f, %esi
jl 0x2a25e6
pushq $0x7f
popq %rsi
movb %sil, (%rbx)
incq %rbx
decl %ecx
addq $0x2, %r9
jmp 0x2a2507
addq %r10, %r14
addq %r9, %r14
addq %r10, %r12
addq %r9, %r12
addq %r10, %r13
addq %r9, %r13
incl %ebp
jmp 0x2a24f8
movq 0x38(%rsp), %rsi
incq %rsi
movq 0x20(%rsp), %r15
movq 0x28(%rsp), %rax
jmp 0x2a2483
leaq 0x288(%rsp), %rdi
callq 0x621c2
leaq 0x130(%rsp), %rdi
callq 0x621c2
movq 0x8(%rsp), %rax
xorl %r12d, %r12d
movq 0x8(%rax), %rdi
testq %rdi, %rdi
je 0x2a1ab2
movq (%rdi), %rax
movq %r15, %rsi
movq 0x30(%rsp), %rdx
callq *0x48(%rax)
jmp 0x2a1ab2
jmp 0x2a2677
jmp 0x2a2677
jmp 0x2a2677
jmp 0x2a2677
jmp 0x2a2677
jmp 0x2a2677
jmp 0x2a2677
movq %rax, %rbx
leaq 0x130(%rsp), %rdi
callq 0x621c2
jmp 0x2a27f9
jmp 0x2a2712
jmp 0x2a2712
jmp 0x2a2873
jmp 0x2a2873
movq %rax, %rbx
jmp 0x2a27c2
jmp 0x2a2873
jmp 0x2a2712
movq %rax, %rbx
jmp 0x2a278b
jmp 0x2a2873
jmp 0x2a2873
jmp 0x2a2712
jmp 0x2a2873
jmp 0x2a2873
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2a282a
lock
decl (%rax)
jne 0x2a282a
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2a281a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a282a
jmp 0x2a2873
movq %rax, %rbx
jmp 0x2a282a
movq %rax, %rbx
jmp 0x2a27f9
movq %rax, %rbx
movq 0x230(%rsp), %rax
testq %rax, %rax
je 0x2a2754
lock
decl (%rax)
jne 0x2a2754
movq 0x228(%rsp), %rsi
movq 0x248(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a274e
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a2754
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1a0(%rsp), %rax
testq %rax, %rax
je 0x2a278b
lock
decl (%rax)
jne 0x2a278b
movq 0x198(%rsp), %rsi
movq 0x1b8(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a2785
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a278b
movq (%rdi), %rax
callq *0x18(%rax)
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x2a27c2
lock
decl (%rax)
jne 0x2a27c2
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a27bc
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a27c2
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x138(%rsp), %rax
testq %rax, %rax
je 0x2a27f9
lock
decl (%rax)
jne 0x2a27f9
movq 0x130(%rsp), %rsi
movq 0x150(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a27f3
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a27f9
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2a282a
lock
decl (%rax)
jne 0x2a282a
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a2824
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a282a
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1e8(%rsp), %rax
testq %rax, %rax
je 0x2a2861
lock
decl (%rax)
jne 0x2a2861
movq 0x1e0(%rsp), %rsi
movq 0x200(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a285b
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a2861
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x2a2873
jmp 0x2a2873
jmp 0x2a2873
jmp 0x2a2873
jmp 0x2a2873
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_fma.cpp |
virtual thunk to ncnn::ConvolutionDepthWise_x86_fma::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int ConvolutionDepthWise_x86_fma::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
#if NCNN_INT8
if (opt.use_int8_inference && int8_scale_term)
{
return forward_int8_x86(bottom_blob, top_blob, opt);
}
#endif
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
Mat bottom_blob_bordered;
make_padding(bottom_blob, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
int outw = (w - kernel_extent_w) / stride_w + 1;
int outh = (h - kernel_extent_h) / stride_h + 1;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
size_t out_elemsize = elemsize / elempack * out_elempack;
top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
// depth-wise
if (channels * elempack == group && group == num_output)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 16;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m512 _sum = _mm512_set1_ps(0.f);
if (bias_term)
{
_sum = _mm512_loadu_ps(((const float*)bias_data) + g * 16);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 16;
for (int k = 0; k < maxk; k++)
{
__m512 _val = _mm512_loadu_ps(sptr + space_ofs[k] * 16);
__m512 _w = _mm512_loadu_ps(kptr + k * 16);
_sum = _mm512_fmadd_ps(_val, _w, _sum);
}
_mm512_storeu_ps(outptr, _sum);
outptr += 16;
}
}
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 8;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_set1_ps(0.f);
if (bias_term)
{
_sum = _mm256_loadu_ps(((const float*)bias_data) + g * 8);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 8;
for (int k = 0; k < maxk; k++)
{
__m256 _val = _mm256_loadu_ps(sptr + space_ofs[k] * 8);
__m256 _w = _mm256_loadu_ps(kptr + k * 8);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
}
_mm256_storeu_ps(outptr + j * 8, _sum);
}
outptr += outw * 8;
}
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
#endif // __AVX__
if (elempack == 4)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 4;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_set1_ps(0.f);
if (bias_term)
{
_sum = _mm_loadu_ps(((const float*)bias_data) + g * 4);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 4;
for (int k = 0; k < maxk; k++)
{
__m128 _val = _mm_loadu_ps(sptr + space_ofs[k] * 4);
__m128 _w = _mm_loadu_ps(kptr + k * 4);
_sum = _mm_add_ps(_mm_mul_ps(_val, _w), _sum);
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr + j * 4, _sum);
}
outptr += outw * 4;
}
}
return 0;
}
}
#endif // __SSE2__
if (elempack == 1)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
}
// group convolution
const int channels_g = channels * elempack / group;
const int num_output_g = num_output / group;
int g_elempack = 1;
int out_g_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
g_elempack = channels_g % 16 == 0 ? 16 : channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 16 == 0 ? 16 : num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#elif __AVX__
g_elempack = channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#else
g_elempack = channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
// unpacking
Mat bottom_blob_bordered_unpacked = bottom_blob_bordered;
if (elempack > g_elempack)
{
Option opt_p = opt;
opt_p.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob_bordered, bottom_blob_bordered_unpacked, g_elempack, opt_p);
}
Mat top_blob_unpacked = top_blob;
if (out_g_elempack < out_elempack)
{
top_blob_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
if (top_blob_unpacked.empty())
return -100;
}
for (int g = 0; g < group; g++)
{
const Mat bottom_blob_bordered_g = bottom_blob_bordered_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
Mat top_blob_g = top_blob_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
const ncnn::Layer* op = group_ops[g];
Option opt_g = opt;
opt_g.blob_allocator = top_blob_unpacked.allocator;
// forward
op->forward(bottom_blob_bordered_g, top_blob_g, opt_g);
}
// packing
if (out_g_elempack < out_elempack)
{
convert_packing(top_blob_unpacked, top_blob, out_elempack, opt);
}
else
{
top_blob = top_blob_unpacked;
}
return 0;
} | movq (%rdi), %rax
addq -0x48(%rax), %rdi
jmp 0x29be02
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_fma.cpp |
ncnn::ConvolutionDepthWise_x86_fma::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int ConvolutionDepthWise_x86_fma::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& bottom_blob = bottom_blobs[0];
const Mat& _weight_data = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int _kernel_w = _weight_data.w;
const int _kernel_h = _weight_data.h;
const int _num_output = _weight_data.c * _weight_data.elempack;
Mat weight_data_flattened;
flatten(_weight_data, weight_data_flattened, opt);
if (weight_data_flattened.empty())
return -100;
// weight_data_flattened as pack1
weight_data_flattened.w *= weight_data_flattened.elempack;
weight_data_flattened.elemsize /= weight_data_flattened.elempack;
weight_data_flattened.elempack = 1;
Mat bias_data_flattened;
if (bias_term)
{
const Mat& _bias_data = bottom_blobs[2];
flatten(_bias_data, bias_data_flattened, opt);
if (bias_data_flattened.empty())
return -100;
// bias_data_flattened as pack1
bias_data_flattened.w *= bias_data_flattened.elempack;
bias_data_flattened.elemsize /= bias_data_flattened.elempack;
bias_data_flattened.elempack = 1;
}
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::ConvolutionDepthWise);
ncnn::ParamDict pd;
pd.set(0, _num_output);
pd.set(1, _kernel_w);
pd.set(11, _kernel_h);
pd.set(2, dilation_w);
pd.set(12, dilation_h);
pd.set(3, stride_w);
pd.set(13, stride_h);
pd.set(4, pad_left);
pd.set(15, pad_right);
pd.set(14, pad_top);
pd.set(16, pad_bottom);
pd.set(18, pad_value);
pd.set(5, bias_term);
pd.set(6, weight_data_flattened.w);
pd.set(7, group);
pd.set(8, int8_scale_term);
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
ncnn::Mat weights[2];
weights[0] = weight_data_flattened;
weights[1] = bias_data_flattened;
op->load_model(ncnn::ModelBinFromMatArray(weights));
op->create_pipeline(opt);
op->forward(bottom_blob, top_blob, opt);
op->destroy_pipeline(opt);
delete op;
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x168, %rsp # imm = 0x168
movq %rsi, %rbp
movq %rdi, %r13
movq (%rsi), %r14
leaq 0x48(%r14), %rdi
movq (%rdx), %rax
movq %rax, 0xb8(%rsp)
movl 0x60(%r14), %ebx
movl 0x74(%r14), %eax
movl %eax, 0x1c(%rsp)
movl 0x78(%r14), %eax
movl %eax, 0x18(%rsp)
imull 0x80(%r14), %ebx
leaq 0x70(%rsp), %rsi
andq $0x0, 0x40(%rsi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rsi)
vmovups %xmm0, 0xc(%rsi)
vmovaps %xmm0, 0x20(%rsi)
vmovups %xmm0, 0x2c(%rsi)
movq %rcx, %r15
movq %rcx, %rdx
callq 0x64ee7
pushq $-0x64
popq %r12
cmpq $0x0, 0x70(%rsp)
je 0x2a2ea9
movslq 0xa8(%rsp), %rax
imulq 0xb0(%rsp), %rax
testq %rax, %rax
je 0x2a2ea9
movslq 0x88(%rsp), %rcx
movl 0x9c(%rsp), %eax
imull %ecx, %eax
movl %eax, 0x9c(%rsp)
movq 0x80(%rsp), %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x80(%rsp)
movl $0x1, 0x88(%rsp)
andq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
vmovaps %xmm0, 0x40(%rsp)
vmovups %xmm0, 0x4c(%rsp)
movq (%r13), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x100(%r13,%rax)
je 0x2a29f2
movl $0x90, %edi
addq (%rbp), %rdi
leaq 0x20(%rsp), %rsi
movq %r15, %rdx
callq 0x64ee7
pushq $-0x64
popq %r12
cmpq $0x0, 0x20(%rsp)
je 0x2a2e7b
movslq 0x58(%rsp), %rax
imulq 0x60(%rsp), %rax
testq %rax, %rax
je 0x2a2e7b
movslq 0x38(%rsp), %rcx
movl 0x4c(%rsp), %eax
imull %ecx, %eax
movl %eax, 0x4c(%rsp)
movq 0x30(%rsp), %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x30(%rsp)
movl $0x1, 0x38(%rsp)
pushq $0x2a
popq %rdi
callq 0x782bf
movq %rax, %r12
leaq 0x8(%rsp), %rdi
callq 0x71548
leaq 0x8(%rsp), %rdi
xorl %esi, %esi
movl %ebx, %edx
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0x1
popq %rsi
movl 0x1c(%rsp), %edx
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0xb
popq %rsi
movl 0x18(%rsp), %edx
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xdc(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x2
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xe0(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xc
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xe4(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x3
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xd
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xec(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x4
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xf0(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xf
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xf4(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xe
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xf8(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x10
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
vmovss 0xfc(%r13,%rax), %xmm0
leaq 0x8(%rsp), %rdi
pushq $0x12
popq %rsi
callq 0x71952
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x100(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x5
popq %rsi
callq 0x7193a
movl 0x9c(%rsp), %edx
leaq 0x8(%rsp), %rdi
pushq $0x6
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x108(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x7
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x10c(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x8
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x110(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x9
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
leaq (%rax,%r13), %rdx
addq $0x118, %rdx # imm = 0x118
leaq 0x8(%rsp), %rdi
pushq $0xa
popq %rsi
callq 0x7196c
movq (%r12), %rax
leaq 0x8(%rsp), %rsi
movq %r12, %rdi
callq *0x10(%rax)
andq $0x0, 0x110(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0xd0(%rsp)
vmovups %xmm0, 0xdc(%rsp)
vmovaps %xmm0, 0xf0(%rsp)
vmovups %xmm0, 0xfc(%rsp)
andq $0x0, 0x158(%rsp)
vmovups %xmm0, 0x118(%rsp)
vmovups %xmm0, 0x124(%rsp)
vmovups %xmm0, 0x138(%rsp)
vmovups %xmm0, 0x144(%rsp)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2a2c60
lock
incl (%rax)
movq 0xd8(%rsp), %rax
testq %rax, %rax
je 0x2a2c97
lock
decl (%rax)
jne 0x2a2c97
movq 0xd0(%rsp), %rsi
movq 0xf0(%rsp), %rdi
testq %rdi, %rdi
je 0x2a2c8f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a2c97
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x70(%rsp), %xmm0
vmovaps %xmm0, 0xd0(%rsp)
movq 0x80(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl 0x88(%rsp), %eax
movl %eax, 0xe8(%rsp)
movq 0x90(%rsp), %rax
movq %rax, 0xf0(%rsp)
vmovups 0x98(%rsp), %xmm0
vmovups %xmm0, 0xf8(%rsp)
movl 0xa8(%rsp), %eax
movl %eax, 0x108(%rsp)
movq 0xb0(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x2a2d11
lock
incl (%rax)
movq 0x120(%rsp), %rax
testq %rax, %rax
je 0x2a2d48
lock
decl (%rax)
jne 0x2a2d48
movq 0x118(%rsp), %rsi
movq 0x138(%rsp), %rdi
testq %rdi, %rdi
je 0x2a2d40
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a2d48
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x20(%rsp), %xmm0
leaq 0xd0(%rsp), %rsi
vmovups %xmm0, 0x48(%rsi)
movq 0x30(%rsp), %rax
movq %rax, 0x58(%rsi)
movl 0x38(%rsp), %eax
movl %eax, 0x60(%rsi)
movq 0x40(%rsp), %rax
movq %rax, 0x68(%rsi)
vmovups 0x48(%rsp), %xmm0
vmovaps %xmm0, 0x70(%rsi)
movl 0x58(%rsp), %eax
movl %eax, 0x80(%rsi)
movq 0x60(%rsp), %rax
movq %rax, 0x88(%rsi)
leaq 0xc0(%rsp), %rdi
callq 0x6b00e
movq (%r12), %rax
leaq 0xc0(%rsp), %rsi
movq %r12, %rdi
callq *0x18(%rax)
leaq 0xc0(%rsp), %rdi
callq 0x6b03a
movq (%r12), %rax
movq %r12, %rdi
movq %r15, %rsi
callq *0x20(%rax)
movq (%r12), %rax
movq %r12, %rdi
movq %r14, %rsi
movq 0xb8(%rsp), %rdx
movq %r15, %rcx
callq *0x38(%rax)
movq (%r12), %rax
movq %r12, %rdi
movq %r15, %rsi
callq *0x28(%rax)
movq (%r12), %rax
movq %r12, %rdi
callq *0x8(%rax)
pushq $0x48
popq %rbx
vxorps %xmm0, %xmm0, %xmm0
movq 0xd8(%rsp,%rbx), %rax
testq %rax, %rax
je 0x2a2e43
lock
decl (%rax)
jne 0x2a2e43
movq 0xd0(%rsp,%rbx), %rsi
movq 0xf0(%rsp,%rbx), %rdi
testq %rdi, %rdi
je 0x2a2e37
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x2a2e43
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%rbx), %rax
addq $0xd0, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %rbx
cmpq $-0x48, %rbx
jne 0x2a2e04
leaq 0x8(%rsp), %rdi
callq 0x71614
xorl %r12d, %r12d
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x2a2ea9
lock
decl (%rax)
jne 0x2a2ea9
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x2a2ea1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a2ea9
movq %rsi, %rdi
callq 0x5f3e0
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2a2eda
lock
decl (%rax)
jne 0x2a2eda
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2a2ed2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a2eda
movq %rsi, %rdi
callq 0x5f3e0
movl %r12d, %eax
addq $0x168, %rsp # imm = 0x168
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x2a300f
jmp 0x2a300f
movq %rax, %rbx
leaq 0xc0(%rsp), %rdi
callq 0x6b03a
jmp 0x2a2f1c
jmp 0x2a2f19
jmp 0x2a2f11
jmp 0x2a2f11
movq %rax, %rbx
jmp 0x2a2fa6
movq %rax, %rbx
pushq $0x48
popq %r14
vxorps %xmm0, %xmm0, %xmm0
movq 0xd8(%rsp,%r14), %rax
testq %rax, %rax
je 0x2a2f63
lock
decl (%rax)
jne 0x2a2f63
movq 0xd0(%rsp,%r14), %rsi
movq 0xf0(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x2a2f57
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x2a2f63
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0xd0, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x2a2f24
jmp 0x2a2f9c
jmp 0x2a300f
movq %rax, %rbx
jmp 0x2a2fd4
jmp 0x2a300f
movq %rax, %rbx
leaq 0x8(%rsp), %rdi
callq 0x71614
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x2a2fd4
lock
decl (%rax)
jne 0x2a2fd4
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a2fce
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a2fd4
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2a3005
lock
decl (%rax)
jne 0x2a3005
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a2fff
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a3005
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x2a300f
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_fma.cpp |
virtual thunk to ncnn::ConvolutionDepthWise_x86_fma::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int ConvolutionDepthWise_x86_fma::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& bottom_blob = bottom_blobs[0];
const Mat& _weight_data = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int _kernel_w = _weight_data.w;
const int _kernel_h = _weight_data.h;
const int _num_output = _weight_data.c * _weight_data.elempack;
Mat weight_data_flattened;
flatten(_weight_data, weight_data_flattened, opt);
if (weight_data_flattened.empty())
return -100;
// weight_data_flattened as pack1
weight_data_flattened.w *= weight_data_flattened.elempack;
weight_data_flattened.elemsize /= weight_data_flattened.elempack;
weight_data_flattened.elempack = 1;
Mat bias_data_flattened;
if (bias_term)
{
const Mat& _bias_data = bottom_blobs[2];
flatten(_bias_data, bias_data_flattened, opt);
if (bias_data_flattened.empty())
return -100;
// bias_data_flattened as pack1
bias_data_flattened.w *= bias_data_flattened.elempack;
bias_data_flattened.elemsize /= bias_data_flattened.elempack;
bias_data_flattened.elempack = 1;
}
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::ConvolutionDepthWise);
ncnn::ParamDict pd;
pd.set(0, _num_output);
pd.set(1, _kernel_w);
pd.set(11, _kernel_h);
pd.set(2, dilation_w);
pd.set(12, dilation_h);
pd.set(3, stride_w);
pd.set(13, stride_h);
pd.set(4, pad_left);
pd.set(15, pad_right);
pd.set(14, pad_top);
pd.set(16, pad_bottom);
pd.set(18, pad_value);
pd.set(5, bias_term);
pd.set(6, weight_data_flattened.w);
pd.set(7, group);
pd.set(8, int8_scale_term);
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
ncnn::Mat weights[2];
weights[0] = weight_data_flattened;
weights[1] = bias_data_flattened;
op->load_model(ncnn::ModelBinFromMatArray(weights));
op->create_pipeline(opt);
op->forward(bottom_blob, top_blob, opt);
op->destroy_pipeline(opt);
delete op;
return 0;
} | movq (%rdi), %rax
addq -0x40(%rax), %rdi
jmp 0x2a2888
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_fma.cpp |
ncnn::ConvolutionDepthWise_x86_avx::create_pipeline(ncnn::Option const&) | int ConvolutionDepthWise_x86_avx::create_pipeline(const Option& opt)
{
if (dynamic_weight)
return 0;
activation = create_activation_layer(activation_type, activation_params, opt);
#if NCNN_INT8
if (opt.use_int8_inference && weight_data.elemsize == (size_t)1u)
{
return create_pipeline_int8_x86(opt);
}
#endif
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
// depth-wise
if (channels == group && group == num_output)
{
int elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = channels % 16 == 0 ? 16 : channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#else
elempack = channels % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
#if __SSE2__
#if __AVX__
// pack16
#if __AVX512F__
if (elempack == 16)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 16, opt);
}
#endif // __AVX512F__
// pack8
if (elempack == 8)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 8, opt);
}
#endif // __AVX__
// pack4
if (elempack == 4)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 4, opt);
}
#endif // __SSE2__
if (elempack == 1)
{
// depth-wise specific
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
weight_data_tm = weight_data;
}
else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
weight_data_tm = weight_data;
}
else
{
create_group_ops(opt);
}
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
// group convolution
create_group_ops(opt);
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x50, %rsp
movq (%rdi), %rax
movq -0x18(%rax), %r13
cmpl $0x0, 0x160(%rdi,%r13)
je 0x2a30ed
xorl %eax, %eax
addq $0x50, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
movq %rsi, %r14
movq %rdi, %rbx
movl 0x110(%rdi,%r13), %ecx
decl %ecx
cmpl $0x5, %ecx
ja 0x2a3444
leaq 0x15559b(%rip), %rax # 0x3f86a8
movslq (%rax,%rcx,4), %rcx
addq %rax, %rcx
jmpq *%rcx
pushq $0x1a
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r12, %rsi
callq *0x10(%rax)
jmp 0x2a3280
pushq $0x47
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r12, %rsi
callq *0x10(%rax)
jmp 0x2a3280
pushq $0x36
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq 0x118(%rbx,%r13), %rax
vmovss (%rax), %xmm0
movq %r12, %rdi
xorl %esi, %esi
callq 0x71952
movq 0x118(%rbx,%r13), %rax
vmovss 0x4(%rax), %xmm0
leaq 0x8(%rsp), %rdi
pushq $0x1
popq %rsi
callq 0x71952
movq (%r15), %rax
leaq 0x8(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
jmp 0x2a3280
pushq $0x1e
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r12, %rsi
callq *0x10(%rax)
jmp 0x2a3280
pushq $0x1a
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq 0x118(%rbx,%r13), %rax
vmovss (%rax), %xmm0
movq %r12, %rdi
xorl %esi, %esi
callq 0x71952
movq (%r15), %rax
leaq 0x8(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
jmp 0x2a3280
pushq $0x43
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x8(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq 0x118(%rbx,%r13), %rax
vmovss (%rax), %xmm0
movq %r12, %rdi
xorl %esi, %esi
callq 0x71952
movq 0x118(%rbx,%r13), %rax
vmovss 0x4(%rax), %xmm0
leaq 0x8(%rsp), %rdi
pushq $0x1
popq %rsi
callq 0x71952
movq (%r15), %rax
leaq 0x8(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
leaq 0x8(%rsp), %rdi
callq 0x71614
movq (%r15), %rax
movq %r15, %rdi
movq %r14, %rsi
callq *0x20(%rax)
movq (%rbx), %rax
movq %r15, 0x8(%rbx)
movq -0x18(%rax), %r15
cmpb $0x1, 0x1e(%r14)
jne 0x2a32c3
cmpq $0x1, 0x178(%rbx,%r15)
jne 0x2a32c3
movq %rbx, %rdi
movq %r14, %rsi
callq 0x2a3642
jmp 0x2a30dd
movl 0xd0(%rbx,%r15), %esi
movl 0xd4(%rbx,%r15), %r10d
movl 0xd8(%rbx,%r15), %r11d
movl %r11d, %r8d
imull %r10d, %r8d
movl 0x104(%rbx,%r15), %eax
movl 0x108(%rbx,%r15), %ecx
cltd
idivl %ecx
cltd
idivl %r8d
movl %eax, %edi
movl %esi, %eax
cltd
idivl %ecx
movl %eax, %r9d
movl %edi, %eax
cltd
idivl %r9d
cmpl %esi, %ecx
jne 0x2a33e2
imull %ecx, %eax
cmpl %ecx, %eax
jne 0x2a33e2
leaq (%rbx,%r15), %rsi
cmpb $0x1, 0x27(%r14)
jne 0x2a3339
testb $0x7, %cl
je 0x2a344c
testb $0x3, %cl
je 0x2a34ad
xorl $0x3, %r10d
xorl $0x3, %r11d
orl %r10d, %r11d
jne 0x2a33e2
cmpl $0x1, 0xdc(%rsi)
jne 0x2a336e
cmpl $0x1, 0xe0(%rsi)
jne 0x2a336e
cmpl $0x1, 0xe4(%rsi)
jne 0x2a336e
cmpl $0x1, 0xe8(%rsi)
je 0x2a3392
cmpl $0x1, 0xdc(%rsi)
jne 0x2a33e2
cmpl $0x1, 0xe0(%rsi)
jne 0x2a33e2
cmpl $0x2, 0xe4(%rsi)
jne 0x2a33e2
cmpl $0x2, 0xe8(%rsi)
jne 0x2a33e2
addq $0x168, %r15 # imm = 0x168
cmpq $0x28, %r15
je 0x2a33ed
movq 0x8(%rbx,%r15), %rax
testq %rax, %rax
je 0x2a33ac
lock
incl (%rax)
leaq 0x28(%rbx), %r12
movq 0x30(%rbx), %rax
testq %rax, %rax
je 0x2a3552
lock
decl (%rax)
jne 0x2a3552
movq 0x28(%rbx), %rsi
movq 0x48(%rbx), %rdi
testq %rdi, %rdi
je 0x2a354a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a3552
movq %rbx, %rdi
movq %r14, %rsi
callq 0x2a38ae
cmpb $0x1, (%r14)
jne 0x2a30dd
movq (%rbx), %rax
movq -0x18(%rax), %rax
leaq (%rbx,%rax), %r14
addq %rax, %rbx
addq $0x168, %rbx # imm = 0x168
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x2a3512
lock
decl (%rax)
jne 0x2a3512
movq 0x168(%r14), %rsi
movq 0x188(%r14), %rdi
testq %rdi, %rdi
je 0x2a350a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a3512
xorl %r15d, %r15d
jmp 0x2a3299
addq $0x168, %rsi # imm = 0x168
leaq 0x8(%rsp), %r15
movq %r15, %rdi
movl %r8d, %edx
xorl %r8d, %r8d
callq 0x62e4e
leaq 0x28(%rbx), %rsi
pushq $0x8
popq %rdx
movq %r15, %rdi
movq %r14, %rcx
callq 0x64e3b
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x2a33ed
lock
decl (%rax)
jne 0x2a33ed
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x2a353d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a33ed
addq $0x168, %rsi # imm = 0x168
leaq 0x8(%rsp), %r15
movq %r15, %rdi
movl %r8d, %edx
xorl %r8d, %r8d
callq 0x62e4e
leaq 0x28(%rbx), %rsi
pushq $0x4
popq %rdx
movq %r15, %rdi
movq %r14, %rcx
callq 0x64e3b
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x2a33ed
lock
decl (%rax)
jne 0x2a33ed
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x2a353d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a33ed
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1a8(%r14)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rbx)
vmovups %xmm0, (%rbx)
vmovups %xmm0, 0x190(%r14)
andl $0x0, 0x1a0(%r14)
jmp 0x2a30dd
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a33ed
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x68(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%r12)
vmovups %xmm0, (%r12)
andl $0x0, 0x60(%rbx)
vmovups %xmm0, 0x50(%rbx)
vmovups (%rbx,%r15), %xmm0
vmovups %xmm0, 0x28(%rbx)
movq 0x10(%rbx,%r15), %rax
movq %rax, 0x38(%rbx)
movl 0x18(%rbx,%r15), %eax
movl %eax, 0x40(%rbx)
movq 0x20(%rbx,%r15), %rax
movq %rax, 0x48(%rbx)
vmovups 0x28(%rbx,%r15), %xmm0
vmovups %xmm0, 0x50(%rbx)
movl 0x38(%rbx,%r15), %eax
movl %eax, 0x60(%rbx)
movq 0x40(%rbx,%r15), %rax
movq %rax, 0x68(%rbx)
jmp 0x2a33ed
jmp 0x2a361a
jmp 0x2a361a
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x2a3639
lock
decl (%rax)
jne 0x2a3639
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x2a3608
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a3639
jmp 0x2a361a
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x2a3639
lock
decl (%rax)
jne 0x2a3639
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a3612
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a3639
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a3639
movq %rax, %rdi
callq 0x61d68
jmp 0x2a362c
jmp 0x2a362c
jmp 0x2a362c
jmp 0x2a362c
jmp 0x2a362c
movq %rax, %rbx
leaq 0x8(%rsp), %rdi
callq 0x71614
movq %rbx, %rdi
callq 0x5f340
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx.cpp |
ncnn::ConvolutionDepthWise_x86_avx::create_pipeline_int8_x86(ncnn::Option const&) | int ConvolutionDepthWise_x86_avx::create_pipeline_int8_x86(const Option& opt)
{
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
// depth-wise
if (channels == group && group == num_output)
{
int elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
elempack = channels % 8 == 0 ? 8 : 1;
}
#endif // __SSE2__
if (elempack == 8)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 8, opt);
}
if (elempack == 1)
{
weight_data_tm = weight_data;
}
return 0;
}
// group convolution
create_group_ops(opt);
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rsi, %r15
movq %rdi, %rbx
movq (%rdi), %rax
movq -0x18(%rax), %r14
movl 0xd8(%rdi,%r14), %r8d
imull 0xd4(%rdi,%r14), %r8d
movl 0xd0(%rdi,%r14), %esi
movl 0x104(%rdi,%r14), %eax
movl 0x108(%rdi,%r14), %ecx
cltd
idivl %ecx
cltd
idivl %r8d
movl %eax, %edi
movl %esi, %eax
cltd
idivl %ecx
movl %eax, %r9d
movl %edi, %eax
cltd
idivl %r9d
cmpl %esi, %ecx
jne 0x2a3724
imull %ecx, %eax
cmpl %ecx, %eax
jne 0x2a3724
testb $0x7, %cl
jne 0x2a3777
cmpb $0x0, 0x27(%r15)
je 0x2a3777
addq %rbx, %r14
addq $0x168, %r14 # imm = 0x168
movq %rsp, %r12
movq %r12, %rdi
movq %r14, %rsi
movl %r8d, %edx
xorl %r8d, %r8d
callq 0x62e4e
addq $0x28, %rbx
pushq $0x8
popq %rdx
movq %r12, %rdi
movq %rbx, %rsi
movq %r15, %rcx
callq 0x64e3b
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x2a385d
lock
decl (%rax)
jne 0x2a385d
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x2a37ec
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a385d
movq %rbx, %rdi
movq %r15, %rsi
callq 0x2a38ae
cmpb $0x1, (%r15)
jne 0x2a385d
movq (%rbx), %rax
movq -0x18(%rax), %rax
leaq (%rbx,%rax), %r14
addq %rax, %rbx
addq $0x168, %rbx # imm = 0x168
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x2a37c4
lock
decl (%rax)
jne 0x2a37c4
movq 0x168(%r14), %rsi
movq 0x188(%r14), %rdi
testq %rdi, %rdi
je 0x2a37bc
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a37c4
addq $0x168, %r14 # imm = 0x168
cmpq $0x28, %r14
je 0x2a385d
movq 0x8(%rbx,%r14), %rax
testq %rax, %rax
je 0x2a3795
lock
incl (%rax)
leaq 0x28(%rbx), %r15
movq 0x30(%rbx), %rax
testq %rax, %rax
je 0x2a37fe
lock
decl (%rax)
jne 0x2a37fe
movq 0x28(%rbx), %rsi
movq 0x48(%rbx), %rdi
testq %rdi, %rdi
je 0x2a37f6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a37fe
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1a8(%r14)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rbx)
vmovups %xmm0, (%rbx)
vmovups %xmm0, 0x190(%r14)
andl $0x0, 0x1a0(%r14)
jmp 0x2a385d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a385d
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x68(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%r15)
vmovups %xmm0, (%r15)
andl $0x0, 0x60(%rbx)
vmovups %xmm0, 0x50(%rbx)
vmovups (%rbx,%r14), %xmm0
vmovups %xmm0, 0x28(%rbx)
movq 0x10(%rbx,%r14), %rax
movq %rax, 0x38(%rbx)
movl 0x18(%rbx,%r14), %eax
movl %eax, 0x40(%rbx)
movq 0x20(%rbx,%r14), %rax
movq %rax, 0x48(%rbx)
vmovups 0x28(%rbx,%r14), %xmm0
vmovups %xmm0, 0x50(%rbx)
movl 0x38(%rbx,%r14), %eax
movl %eax, 0x60(%rbx)
movq 0x40(%rbx,%r14), %rax
movq %rax, 0x68(%rbx)
xorl %eax, %eax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
jmp 0x2a38a5
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x2a389d
lock
decl (%rax)
jne 0x2a389d
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a3897
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a389d
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx.cpp |
ncnn::ConvolutionDepthWise_x86_avx::create_group_ops(ncnn::Option const&) | int ConvolutionDepthWise_x86_avx::create_group_ops(const Option& opt)
{
// create Convolution op for each group
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
for (int i = 0; i < (int)group_ops.size(); i++)
delete group_ops[i];
group_ops.clear();
const int channels_g = channels / group;
const int num_output_g = num_output / group;
group_ops.resize(group);
for (int g = 0; g < group; g++)
{
Mat weight_data_g = weight_data.range(maxk * channels_g * num_output_g * g, maxk * channels_g * num_output_g).clone();
Mat bias_data_g;
if (bias_term)
bias_data_g = bias_data.range(num_output_g * g, num_output_g);
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution);
// set param
ncnn::ParamDict pd;
pd.set(0, num_output_g); // num_output
pd.set(1, kernel_w);
pd.set(11, kernel_h);
pd.set(2, dilation_w);
pd.set(12, dilation_h);
pd.set(3, stride_w);
pd.set(13, stride_h);
pd.set(4, 0); // pad_w
pd.set(14, 0); // pad_h
pd.set(5, bias_term);
pd.set(6, maxk * channels_g * num_output_g); // weight_data_size
pd.set(8, int8_scale_term);
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
// set weights
if (bias_term)
{
ncnn::Mat weights[5];
weights[0] = weight_data_g;
weights[1] = bias_data_g;
#if NCNN_INT8
if (int8_scale_term)
{
Mat weight_data_int8_scales_g(num_output_g);
weight_data_int8_scales_g.fill(weight_data_int8_scales[g]);
weights[2] = weight_data_int8_scales_g;
weights[3] = bottom_blob_int8_scales.range(g, 1);
}
if (int8_scale_term > 100)
{
weights[4] = top_blob_int8_scales.range(g, 1);
}
#endif
op->load_model(ModelBinFromMatArray(weights));
}
else
{
ncnn::Mat weights[4];
weights[0] = weight_data_g;
#if NCNN_INT8
if (int8_scale_term)
{
Mat weight_data_int8_scales_g(num_output_g);
weight_data_int8_scales_g.fill(weight_data_int8_scales[g]);
weights[1] = weight_data_int8_scales_g;
weights[2] = bottom_blob_int8_scales.range(g, 1);
}
if (int8_scale_term > 100)
{
weights[3] = top_blob_int8_scales.range(g, 1);
}
#endif
op->load_model(ModelBinFromMatArray(weights));
}
op->create_pipeline(opt);
group_ops[g] = op;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x278, %rsp # imm = 0x278
movq %rsi, 0x250(%rsp)
movq %rdi, %r14
movq (%rdi), %rax
movq -0x18(%rax), %rdx
movl 0xd0(%rdi,%rdx), %ecx
movl 0xd8(%rdi,%rdx), %ebp
imull 0xd4(%rdi,%rdx), %ebp
movl 0x104(%rdi,%rdx), %eax
movl 0x108(%rdi,%rdx), %r15d
cltd
idivl %r15d
cltd
idivl %ebp
movl %eax, %esi
movl %ecx, %eax
cltd
idivl %r15d
movl %eax, %ecx
movl %esi, %eax
cltd
idivl %ecx
movl %eax, %ebx
leaq 0x10(%rdi), %rax
movq %rax, 0x1e0(%rsp)
xorl %r12d, %r12d
movq 0x10(%r14), %rax
movq 0x18(%r14), %rcx
movq %rcx, %rdx
subq %rax, %rdx
shrq $0x3, %rdx
movslq %edx, %rdx
cmpq %rdx, %r12
jge 0x2a394b
movq (%rax,%r12,8), %rdi
testq %rdi, %rdi
je 0x2a3946
movq (%rdi), %rax
callq *0x8(%rax)
incq %r12
jmp 0x2a391d
imull %r15d, %ebx
cmpq %rax, %rcx
je 0x2a3958
movq %rax, 0x18(%r14)
movq (%r14), %rax
movq -0x18(%rax), %rcx
movslq 0x108(%r14,%rcx), %rsi
movl %ebx, %eax
cltd
idivl %esi
movl %eax, %ebx
movl 0xd0(%r14,%rcx), %eax
cltd
idivl %esi
movl %eax, %r15d
movq 0x1e0(%rsp), %rdi
callq 0x6fbc2
leaq 0x118(%r14), %rax
movq %rax, 0x258(%rsp)
imull %ebp, %ebx
imull %r15d, %ebx
movl %ebx, 0x6c(%rsp)
movslq %ebx, %rax
movq %rax, 0x260(%rsp)
movslq %r15d, %rax
movq %rax, 0x248(%rsp)
pushq $0x1
popq %rbx
xorl %edx, %edx
movl %r15d, 0x14(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
movslq 0x108(%r14,%rax), %rcx
cmpq %rcx, %rdx
jge 0x2a46c2
movq %rdx, %rcx
movq %rdx, 0x8(%rsp)
movq 0x260(%rsp), %rdi
imulq %rdi, %rcx
movq 0x178(%r14,%rax), %rdx
imulq %rdx, %rcx
addq 0x168(%r14,%rax), %rcx
movl 0x180(%r14,%rax), %esi
movq 0x188(%r14,%rax), %rax
movq %rcx, 0x70(%rsp)
andq $0x0, 0x78(%rsp)
movq %rdx, 0x80(%rsp)
movl %esi, 0x88(%rsp)
movq %rax, 0x90(%rsp)
movl %ebx, 0x98(%rsp)
movl %edi, 0x9c(%rsp)
movabsq $0x100000001, %rax # imm = 0x100000001
movq %rax, 0xa0(%rsp)
movl %ebx, 0xa8(%rsp)
movq %rdi, 0xb0(%rsp)
leaq 0x200(%rsp), %rdi
leaq 0x70(%rsp), %rsi
xorl %edx, %edx
callq 0x624f0
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2a3aa9
lock
decl (%rax)
jne 0x2a3aa9
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2a3aa1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a3aa9
movq %rsi, %rdi
callq 0x5f3e0
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x100(%r14,%rax)
je 0x2a3b19
movq 0x8(%rsp), %r15
movq 0x248(%rsp), %rcx
imulq %rcx, %r15
movq 0x1c0(%r14,%rax), %rsi
movq %rsi, 0x1f8(%rsp)
imulq %rsi, %r15
addq 0x1b0(%r14,%rax), %r15
movl 0x1c8(%r14,%rax), %edx
movl %edx, 0x1c(%rsp)
movq 0x1d0(%r14,%rax), %rax
movq %rax, 0x1f0(%rsp)
movl %ebx, %r13d
movl 0x14(%rsp), %eax
movl %eax, 0x18(%rsp)
movq %rcx, 0x1e8(%rsp)
jmp 0x2a3b53
xorl %r15d, %r15d
movq $0x0, 0x1f8(%rsp)
movl $0x0, 0x1c(%rsp)
movq $0x0, 0x1f0(%rsp)
xorl %r13d, %r13d
movl $0x0, 0x18(%rsp)
movq $0x0, 0x1e8(%rsp)
pushq $0x6
popq %rdi
callq 0x782bf
movq %rax, %rbp
leaq 0x268(%rsp), %r12
movq %r12, %rdi
callq 0x71548
movq %r12, %rdi
xorl %esi, %esi
movl 0x14(%rsp), %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd4(%r14,%rax), %edx
movq %r12, %rdi
movl %ebx, %esi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd8(%r14,%rax), %edx
movq %r12, %rdi
pushq $0xb
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xdc(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x2
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe0(%r14,%rax), %edx
movq %r12, %rdi
pushq $0xc
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe4(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x3
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r14,%rax), %edx
movq %r12, %rdi
pushq $0xd
popq %rsi
callq 0x7193a
movq %r12, %rdi
pushq $0x4
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq %r12, %rdi
pushq $0xe
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x100(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x5
popq %rsi
callq 0x7193a
movq %r12, %rdi
pushq $0x6
popq %rsi
movl 0x6c(%rsp), %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x10c(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x8
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x110(%r14,%rax), %edx
movq %r12, %rdi
pushq $0x9
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rdx
addq 0x258(%rsp), %rdx
movq %r12, %rdi
pushq $0xa
popq %rsi
callq 0x7196c
movq (%rbp), %rax
movq %rbp, %rdi
movq %r12, %rsi
callq *0x10(%rax)
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x100(%r14,%rax)
je 0x2a3d40
pushq $0x40
popq %rax
vxorps %xmm0, %xmm0, %xmm0
andq $0x0, 0x70(%rsp,%rax)
vmovups %xmm0, 0x30(%rsp,%rax)
vmovups %xmm0, 0x3c(%rsp,%rax)
vmovups %xmm0, 0x50(%rsp,%rax)
vmovups %xmm0, 0x5c(%rsp,%rax)
addq $0x48, %rax
cmpq $0x1a8, %rax # imm = 0x1A8
jne 0x2a3cce
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x2a3d08
lock
incl (%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2a3dc1
lock
decl (%rax)
jne 0x2a3dc1
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2a3db9
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a3dc1
pushq $0x40
popq %rax
vxorps %xmm0, %xmm0, %xmm0
andq $0x0, 0x70(%rsp,%rax)
vmovups %xmm0, 0x30(%rsp,%rax)
vmovups %xmm0, 0x3c(%rsp,%rax)
vmovups %xmm0, 0x50(%rsp,%rax)
vmovups %xmm0, 0x5c(%rsp,%rax)
addq $0x48, %rax
cmpq $0x160, %rax # imm = 0x160
jne 0x2a3d47
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x2a3d81
lock
incl (%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2a4178
lock
decl (%rax)
jne 0x2a4178
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2a4170
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a4178
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x200(%rsp), %xmm0
vmovaps %xmm0, 0x70(%rsp)
movq 0x210(%rsp), %rax
movq %rax, 0x80(%rsp)
movl 0x218(%rsp), %eax
movl %eax, 0x88(%rsp)
movq 0x220(%rsp), %rax
movq %rax, 0x90(%rsp)
vmovups 0x228(%rsp), %xmm0
vmovups %xmm0, 0x98(%rsp)
movl 0x238(%rsp), %eax
movl %eax, 0xa8(%rsp)
movq 0x240(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0xc0(%rsp), %rax
testq %rax, %rax
je 0x2a3e65
lock
decl (%rax)
jne 0x2a3e65
movq 0xb8(%rsp), %rsi
movq 0xd8(%rsp), %rdi
testq %rdi, %rdi
je 0x2a3e5d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a3e65
movq %rsi, %rdi
callq 0x5f3e0
movq %r15, 0xb8(%rsp)
andq $0x0, 0xc0(%rsp)
movq 0x1f8(%rsp), %rax
movq %rax, 0xc8(%rsp)
movl 0x1c(%rsp), %eax
movl %eax, 0xd0(%rsp)
movq 0x1f0(%rsp), %rax
movq %rax, 0xd8(%rsp)
movl %r13d, 0xe0(%rsp)
movl 0x18(%rsp), %eax
movl %eax, 0xe4(%rsp)
movl %r13d, 0xe8(%rsp)
movl %r13d, 0xec(%rsp)
movl %r13d, 0xf0(%rsp)
movq 0x1e8(%rsp), %rax
movq %rax, 0xf8(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x10c(%r14,%rax)
je 0x2a40fa
andq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
leaq 0x40(%rsp), %rax
vmovups %xmm0, 0xc(%rax)
vmovaps %xmm0, (%rax)
leaq 0x20(%rsp), %rdi
movl 0x14(%rsp), %esi
pushq $0x4
popq %rdx
xorl %ecx, %ecx
callq 0x635fa
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x1f8(%r14,%rax), %rax
movq 0x8(%rsp), %rcx
vmovss (%rax,%rcx,4), %xmm0
movl 0x58(%rsp), %eax
imull 0x60(%rsp), %eax
movq 0x20(%rsp), %rcx
testl %eax, %eax
movl $0x0, %edx
cmovlel %edx, %eax
xorl %edx, %edx
cmpl %edx, %eax
je 0x2a3f6a
vmovss %xmm0, (%rcx,%rdx,4)
incq %rdx
jmp 0x2a3f5c
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x2a3f77
lock
incl (%rax)
movq 0x108(%rsp), %rax
testq %rax, %rax
je 0x2a3fae
lock
decl (%rax)
jne 0x2a3fae
movq 0x100(%rsp), %rsi
movq 0x120(%rsp), %rdi
testq %rdi, %rdi
je 0x2a3fa6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a3fae
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x20(%rsp), %xmm0
vmovaps %xmm0, 0x100(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0x110(%rsp)
movl 0x38(%rsp), %eax
movl %eax, 0x118(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0x120(%rsp)
vmovups 0x48(%rsp), %xmm0
vmovups %xmm0, 0x128(%rsp)
movl 0x58(%rsp), %eax
movl %eax, 0x138(%rsp)
movq 0x60(%rsp), %rax
movq %rax, 0x140(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x250(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x240(%r14,%rax), %r13
movl 0x258(%r14,%rax), %r12d
movq 0x260(%r14,%rax), %rbx
movq 0x150(%rsp), %rax
testq %rax, %rax
je 0x2a4070
lock
decl (%rax)
jne 0x2a4070
movq 0x148(%rsp), %rsi
movq 0x168(%rsp), %rdi
testq %rdi, %rdi
je 0x2a4068
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a4070
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x148(%rsp)
andq $0x0, 0x150(%rsp)
movq %r15, 0x158(%rsp)
movl %r12d, 0x160(%rsp)
movq %rbx, 0x168(%rsp)
vbroadcastss 0x15455e(%rip), %xmm0 # 0x3f8600
vmovaps %xmm0, 0x170(%rsp)
movl $0x1, 0x180(%rsp)
movq $0x1, 0x188(%rsp)
movq 0x28(%rsp), %rax
testq %rax, %rax
pushq $0x1
popq %rbx
je 0x2a40f3
lock
decl (%rax)
jne 0x2a40f3
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x2a40eb
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a40f3
movq %rsi, %rdi
callq 0x5f3e0
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x65, 0x10c(%r14,%rax)
jl 0x2a431a
movq 0x298(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x288(%r14,%rax), %r13
movl 0x2a0(%r14,%rax), %ebx
movq 0x2a8(%r14,%rax), %r12
movq 0x198(%rsp), %rax
testq %rax, %rax
je 0x2a42c6
lock
decl (%rax)
jne 0x2a42c6
movq 0x190(%rsp), %rsi
movq 0x1b0(%rsp), %rdi
testq %rdi, %rdi
je 0x2a42be
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a42c6
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x200(%rsp), %xmm0
vmovaps %xmm0, 0x70(%rsp)
movq 0x210(%rsp), %rax
movq %rax, 0x80(%rsp)
movl 0x218(%rsp), %eax
movl %eax, 0x88(%rsp)
movq 0x220(%rsp), %rax
movq %rax, 0x90(%rsp)
vmovups 0x228(%rsp), %xmm0
vmovups %xmm0, 0x98(%rsp)
movl 0x238(%rsp), %eax
movl %eax, 0xa8(%rsp)
movq 0x240(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x10c(%r14,%rax)
je 0x2a44fd
andq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
leaq 0x40(%rsp), %rax
vmovups %xmm0, 0xc(%rax)
vmovaps %xmm0, (%rax)
leaq 0x20(%rsp), %rdi
movl 0x14(%rsp), %esi
pushq $0x4
popq %rdx
xorl %ecx, %ecx
callq 0x635fa
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x1f8(%r14,%rax), %rax
movq 0x8(%rsp), %rcx
vmovss (%rax,%rcx,4), %xmm0
movl 0x58(%rsp), %eax
imull 0x60(%rsp), %eax
movq 0x20(%rsp), %rcx
testl %eax, %eax
movl $0x0, %edx
cmovlel %edx, %eax
xorl %edx, %edx
cmpl %edx, %eax
je 0x2a4273
vmovss %xmm0, (%rcx,%rdx,4)
incq %rdx
jmp 0x2a4265
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x2a4280
lock
incl (%rax)
movq 0xc0(%rsp), %rax
testq %rax, %rax
je 0x2a43b2
lock
decl (%rax)
jne 0x2a43b2
movq 0xb8(%rsp), %rsi
movq 0xd8(%rsp), %rdi
testq %rdi, %rdi
je 0x2a43aa
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a43b2
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x190(%rsp)
andq $0x0, 0x198(%rsp)
movq %r15, 0x1a0(%rsp)
movl %ebx, 0x1a8(%rsp)
movq %r12, 0x1b0(%rsp)
vbroadcastss 0x154309(%rip), %xmm0 # 0x3f8600
vmovups %xmm0, 0x1b8(%rsp)
movl $0x1, 0x1c8(%rsp)
movq $0x1, 0x1d0(%rsp)
pushq $0x1
popq %rbx
leaq 0x20(%rsp), %r15
movq %r15, %rdi
leaq 0x70(%rsp), %rsi
callq 0x6b00e
movq (%rbp), %rax
movq %rbp, %rdi
movq %r15, %rsi
callq *0x18(%rax)
movq %r15, %rdi
callq 0x6b03a
movl $0x120, %r15d # imm = 0x120
movq 0x78(%rsp,%r15), %rax
testq %rax, %rax
je 0x2a4378
lock
decl (%rax)
jne 0x2a4378
movq 0x70(%rsp,%r15), %rsi
movq 0x90(%rsp,%r15), %rdi
testq %rdi, %rdi
je 0x2a4370
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a4378
movq %rsi, %rdi
callq 0x5f3e0
leaq (%rsp,%r15), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r15
cmpq $-0x48, %r15
jne 0x2a4347
jmp 0x2a464b
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x20(%rsp), %xmm0
vmovups %xmm0, 0xb8(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0xc8(%rsp)
movl 0x38(%rsp), %eax
movl %eax, 0xd0(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0xd8(%rsp)
vmovups 0x48(%rsp), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
movl 0x58(%rsp), %eax
movl %eax, 0xf0(%rsp)
movq 0x60(%rsp), %rax
movq %rax, 0xf8(%rsp)
movq (%r14), %rax
movq -0x18(%rax), %rax
movq 0x250(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x240(%r14,%rax), %r13
movl 0x258(%r14,%rax), %ebx
movq 0x260(%r14,%rax), %r12
movq 0x108(%rsp), %rax
testq %rax, %rax
je 0x2a4474
lock
decl (%rax)
jne 0x2a4474
movq 0x100(%rsp), %rsi
movq 0x120(%rsp), %rdi
testq %rdi, %rdi
je 0x2a446c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a4474
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x100(%rsp)
andq $0x0, 0x108(%rsp)
movq %r15, 0x110(%rsp)
movl %ebx, 0x118(%rsp)
movq %r12, 0x120(%rsp)
vbroadcastss 0x15415b(%rip), %xmm0 # 0x3f8600
vmovups %xmm0, 0x128(%rsp)
movl $0x1, 0x138(%rsp)
movq $0x1, 0x140(%rsp)
movq 0x28(%rsp), %rax
testq %rax, %rax
pushq $0x1
popq %rbx
je 0x2a44f6
lock
decl (%rax)
jne 0x2a44f6
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x2a44ee
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a44f6
movq %rsi, %rdi
callq 0x5f3e0
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x65, 0x10c(%r14,%rax)
jl 0x2a45c0
movq 0x298(%r14,%rax), %r15
movq %r15, %r13
imulq 0x8(%rsp), %r13
addq 0x288(%r14,%rax), %r13
movl 0x2a0(%r14,%rax), %ebx
movq 0x2a8(%r14,%rax), %r12
movq 0x150(%rsp), %rax
testq %rax, %rax
je 0x2a456c
lock
decl (%rax)
jne 0x2a456c
movq 0x148(%rsp), %rsi
movq 0x168(%rsp), %rdi
testq %rdi, %rdi
je 0x2a4564
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a456c
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x148(%rsp)
andq $0x0, 0x150(%rsp)
movq %r15, 0x158(%rsp)
movl %ebx, 0x160(%rsp)
movq %r12, 0x168(%rsp)
vbroadcastss 0x154063(%rip), %xmm0 # 0x3f8600
vmovaps %xmm0, 0x170(%rsp)
movl $0x1, 0x180(%rsp)
movq $0x1, 0x188(%rsp)
pushq $0x1
popq %rbx
leaq 0x20(%rsp), %r15
movq %r15, %rdi
leaq 0x70(%rsp), %rsi
callq 0x6b00e
movq (%rbp), %rax
movq %rbp, %rdi
movq %r15, %rsi
callq *0x18(%rax)
movq %r15, %rdi
callq 0x6b03a
movl $0xd8, %r15d
movq 0x78(%rsp,%r15), %rax
testq %rax, %rax
je 0x2a461e
lock
decl (%rax)
jne 0x2a461e
movq 0x70(%rsp,%r15), %rsi
movq 0x90(%rsp,%r15), %rdi
testq %rdi, %rdi
je 0x2a4616
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a461e
movq %rsi, %rdi
callq 0x5f3e0
leaq (%rsp,%r15), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r15
cmpq $-0x48, %r15
jne 0x2a45ed
movq (%rbp), %rax
movq %rbp, %rdi
movq 0x250(%rsp), %rsi
callq *0x20(%rax)
leaq 0x268(%rsp), %rdi
movq 0x1e0(%rsp), %rax
movq (%rax), %rax
movq 0x8(%rsp), %rcx
movq %rbp, (%rax,%rcx,8)
callq 0x71614
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x2a46b5
lock
decl (%rax)
jne 0x2a46b5
movq 0x200(%rsp), %rsi
movq 0x220(%rsp), %rdi
testq %rdi, %rdi
je 0x2a46ad
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a46b5
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rdx
incq %rdx
jmp 0x2a39c3
xorl %eax, %eax
addq $0x278, %rsp # imm = 0x278
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x2a478a
jmp 0x2a4921
jmp 0x2a46e2
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x2a479c
lock
decl (%rax)
jne 0x2a479c
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a4718
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a479c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a479c
jmp 0x2a4921
jmp 0x2a4814
jmp 0x2a478a
jmp 0x2a472e
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x2a4826
lock
decl (%rax)
jne 0x2a4826
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a4764
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a4826
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a4826
jmp 0x2a4921
jmp 0x2a4921
jmp 0x2a4921
jmp 0x2a4921
jmp 0x2a478a
jmp 0x2a4814
movq %rax, %rbx
jmp 0x2a479c
movq %rax, %rbx
leaq 0x20(%rsp), %rdi
callq 0x6b03a
movl $0xd8, %r14d
vxorps %xmm0, %xmm0, %xmm0
movq 0x78(%rsp,%r14), %rax
testq %rax, %rax
je 0x2a47df
lock
decl (%rax)
jne 0x2a47df
movq 0x70(%rsp,%r14), %rsi
movq 0x90(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x2a47d3
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x2a47df
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x2a47a6
jmp 0x2a48d5
jmp 0x2a4921
jmp 0x2a4814
movq %rax, %rbx
jmp 0x2a4826
movq %rax, %rbx
leaq 0x20(%rsp), %rdi
callq 0x6b03a
movl $0x120, %r14d # imm = 0x120
vxorps %xmm0, %xmm0, %xmm0
movq 0x78(%rsp,%r14), %rax
testq %rax, %rax
je 0x2a4869
lock
decl (%rax)
jne 0x2a4869
movq 0x70(%rsp,%r14), %rsi
movq 0x90(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x2a485d
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x2a4869
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0x70, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x2a4830
jmp 0x2a48d5
jmp 0x2a4921
jmp 0x2a48c9
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2a4919
lock
decl (%rax)
jne 0x2a4919
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2a4909
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a4919
jmp 0x2a4921
movq %rax, %rbx
jmp 0x2a48e2
jmp 0x2a4921
jmp 0x2a4921
movq %rax, %rbx
leaq 0x268(%rsp), %rdi
callq 0x71614
movq 0x208(%rsp), %rax
testq %rax, %rax
je 0x2a4919
lock
decl (%rax)
jne 0x2a4919
movq 0x200(%rsp), %rsi
movq 0x220(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a4913
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a4919
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx.cpp |
virtual thunk to ncnn::ConvolutionDepthWise_x86_avx::create_pipeline(ncnn::Option const&) | int ConvolutionDepthWise_x86_avx::create_pipeline(const Option& opt)
{
if (dynamic_weight)
return 0;
activation = create_activation_layer(activation_type, activation_params, opt);
#if NCNN_INT8
if (opt.use_int8_inference && weight_data.elemsize == (size_t)1u)
{
return create_pipeline_int8_x86(opt);
}
#endif
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
// depth-wise
if (channels == group && group == num_output)
{
int elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = channels % 16 == 0 ? 16 : channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#else
elempack = channels % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
#if __SSE2__
#if __AVX__
// pack16
#if __AVX512F__
if (elempack == 16)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 16, opt);
}
#endif // __AVX512F__
// pack8
if (elempack == 8)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 8, opt);
}
#endif // __AVX__
// pack4
if (elempack == 4)
{
Mat weight_data_r2 = weight_data.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 4, opt);
}
#endif // __SSE2__
if (elempack == 1)
{
// depth-wise specific
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
weight_data_tm = weight_data;
}
else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
weight_data_tm = weight_data;
}
else
{
create_group_ops(opt);
}
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
// group convolution
create_group_ops(opt);
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x30(%rax), %rdi
callq 0x2a30be
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx.cpp |
ncnn::ConvolutionDepthWise_x86_avx::destroy_pipeline(ncnn::Option const&) | int ConvolutionDepthWise_x86_avx::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
for (int i = 0; i < (int)group_ops.size(); i++)
{
group_ops[i]->destroy_pipeline(opt);
delete group_ops[i];
}
group_ops.clear();
return 0;
} | pushq %r15
pushq %r14
pushq %rbx
movq %rsi, %r14
movq %rdi, %rbx
movq 0x8(%rdi), %rdi
testq %rdi, %rdi
je 0x2a496d
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x2a4968
movq (%rdi), %rax
callq *0x8(%rax)
andq $0x0, 0x8(%rbx)
xorl %r15d, %r15d
movq 0x10(%rbx), %rax
movq 0x18(%rbx), %rcx
movq %rcx, %rdx
subq %rax, %rdx
shrq $0x3, %rdx
movslq %edx, %rdx
cmpq %rdx, %r15
jge 0x2a49af
movq (%rax,%r15,8), %rdi
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x10(%rbx), %rax
movq (%rax,%r15,8), %rdi
testq %rdi, %rdi
je 0x2a49aa
movq (%rdi), %rax
callq *0x8(%rax)
incq %r15
jmp 0x2a4970
cmpq %rax, %rcx
je 0x2a49b8
movq %rax, 0x18(%rbx)
xorl %eax, %eax
popq %rbx
popq %r14
popq %r15
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx.cpp |
virtual thunk to ncnn::ConvolutionDepthWise_x86_avx::destroy_pipeline(ncnn::Option const&) | int ConvolutionDepthWise_x86_avx::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
for (int i = 0; i < (int)group_ops.size(); i++)
{
group_ops[i]->destroy_pipeline(opt);
delete group_ops[i];
}
group_ops.clear();
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x38(%rax), %rdi
callq 0x2a493c
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx.cpp |
ncnn::ConvolutionDepthWise_x86_avx::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int ConvolutionDepthWise_x86_avx::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
#if NCNN_INT8
if (opt.use_int8_inference && int8_scale_term)
{
return forward_int8_x86(bottom_blob, top_blob, opt);
}
#endif
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
Mat bottom_blob_bordered;
make_padding(bottom_blob, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
int outw = (w - kernel_extent_w) / stride_w + 1;
int outh = (h - kernel_extent_h) / stride_h + 1;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
size_t out_elemsize = elemsize / elempack * out_elempack;
top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
// depth-wise
if (channels * elempack == group && group == num_output)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 16;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m512 _sum = _mm512_set1_ps(0.f);
if (bias_term)
{
_sum = _mm512_loadu_ps(((const float*)bias_data) + g * 16);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 16;
for (int k = 0; k < maxk; k++)
{
__m512 _val = _mm512_loadu_ps(sptr + space_ofs[k] * 16);
__m512 _w = _mm512_loadu_ps(kptr + k * 16);
_sum = _mm512_fmadd_ps(_val, _w, _sum);
}
_mm512_storeu_ps(outptr, _sum);
outptr += 16;
}
}
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 8;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_set1_ps(0.f);
if (bias_term)
{
_sum = _mm256_loadu_ps(((const float*)bias_data) + g * 8);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 8;
for (int k = 0; k < maxk; k++)
{
__m256 _val = _mm256_loadu_ps(sptr + space_ofs[k] * 8);
__m256 _w = _mm256_loadu_ps(kptr + k * 8);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
}
_mm256_storeu_ps(outptr + j * 8, _sum);
}
outptr += outw * 8;
}
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
#endif // __AVX__
if (elempack == 4)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 4;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_set1_ps(0.f);
if (bias_term)
{
_sum = _mm_loadu_ps(((const float*)bias_data) + g * 4);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 4;
for (int k = 0; k < maxk; k++)
{
__m128 _val = _mm_loadu_ps(sptr + space_ofs[k] * 4);
__m128 _w = _mm_loadu_ps(kptr + k * 4);
_sum = _mm_add_ps(_mm_mul_ps(_val, _w), _sum);
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr + j * 4, _sum);
}
outptr += outw * 4;
}
}
return 0;
}
}
#endif // __SSE2__
if (elempack == 1)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
}
// group convolution
const int channels_g = channels * elempack / group;
const int num_output_g = num_output / group;
int g_elempack = 1;
int out_g_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
g_elempack = channels_g % 16 == 0 ? 16 : channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 16 == 0 ? 16 : num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#elif __AVX__
g_elempack = channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#else
g_elempack = channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
// unpacking
Mat bottom_blob_bordered_unpacked = bottom_blob_bordered;
if (elempack > g_elempack)
{
Option opt_p = opt;
opt_p.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob_bordered, bottom_blob_bordered_unpacked, g_elempack, opt_p);
}
Mat top_blob_unpacked = top_blob;
if (out_g_elempack < out_elempack)
{
top_blob_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
if (top_blob_unpacked.empty())
return -100;
}
for (int g = 0; g < group; g++)
{
const Mat bottom_blob_bordered_g = bottom_blob_bordered_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
Mat top_blob_g = top_blob_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
const ncnn::Layer* op = group_ops[g];
Option opt_g = opt;
opt_g.blob_allocator = top_blob_unpacked.allocator;
// forward
op->forward(bottom_blob_bordered_g, top_blob_g, opt_g);
}
// packing
if (out_g_elempack < out_elempack)
{
convert_packing(top_blob_unpacked, top_blob, out_elempack, opt);
}
else
{
top_blob = top_blob_unpacked;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x3b8, %rsp # imm = 0x3B8
movq %rdi, %r14
movq (%rdi), %rax
movq -0x18(%rax), %rdi
movq %rcx, 0x48(%rsp)
cmpb $0x1, 0x1e(%rcx)
jne 0x2a4a21
cmpl $0x0, 0x10c(%r14,%rdi)
je 0x2a4a21
movq %r14, %rdi
movq 0x48(%rsp), %rcx
addq $0x3b8, %rsp # imm = 0x3B8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x2a98c2
movq %rdx, 0x18(%rsp)
movl 0x38(%rsi), %ecx
movq %rcx, 0xf0(%rsp)
movq 0x10(%rsi), %r13
movslq 0x18(%rsi), %r15
movl 0xd4(%r14,%rdi), %ebx
decl %ebx
imull 0xdc(%r14,%rdi), %ebx
movl 0xd8(%r14,%rdi), %ebp
decl %ebp
imull 0xe0(%r14,%rdi), %ebp
leaq 0x90(%rsp), %rdx
andq $0x0, 0x40(%rdx)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdx)
vmovups %xmm0, 0xc(%rdx)
vmovaps %xmm0, 0x20(%rdx)
vmovups %xmm0, 0x2c(%rdx)
movq -0x18(%rax), %rdi
addq %r14, %rdi
movq 0x48(%rsp), %rcx
callq 0x287daa
pushq $-0x64
popq %r12
cmpq $0x0, 0x90(%rsp)
je 0x2a96d0
movslq 0xc8(%rsp), %rax
imulq 0xd0(%rsp), %rax
testq %rax, %rax
je 0x2a96d0
notl %ebx
notl %ebp
movl 0xbc(%rsp), %edi
addl %edi, %ebx
movq (%r14), %rax
movq -0x18(%rax), %rcx
movl %ebx, %eax
cltd
idivl 0xe4(%r14,%rcx)
movl %eax, %ebx
leal 0x1(%rbx), %esi
addl 0xc0(%rsp), %ebp
movl %ebp, %eax
cltd
idivl 0xe8(%r14,%rcx)
movq %rax, 0x50(%rsp)
leal 0x1(%rax), %r8d
movl 0xd0(%r14,%rcx), %ecx
movq 0x48(%rsp), %rax
cmpb $0x1, 0x27(%rax)
pushq $0x8
popq %rbp
movl %edi, 0x20(%rsp)
jne 0x2a4b30
xorl %eax, %eax
testb $0x3, %cl
sete %al
testb $0x7, %cl
leal 0x1(%rax,%rax,2), %r9d
cmovel %ebp, %r9d
jmp 0x2a4b34
pushq $0x1
popq %r9
movq %r13, %rax
xorl %edx, %edx
divq %r15
movq %rax, %r10
movl %r9d, %eax
movq %rax, 0x70(%rsp)
imulq %rax, %r10
movl %ecx, %eax
cltd
idivl %r9d
movq 0x48(%rsp), %rcx
movq 0x8(%rcx), %rcx
movq %rcx, (%rsp)
movq 0x18(%rsp), %r13
movq %r13, %rdi
movl %esi, 0x110(%rsp)
movl %r8d, 0x130(%rsp)
movl %r8d, %edx
movl %eax, %ecx
movq %r10, 0x170(%rsp)
movq %r10, %r8
callq 0x628f2
movq (%r13), %r9
testq %r9, %r9
je 0x2a96d0
movq 0x40(%r13), %r11
movslq 0x38(%r13), %rax
imulq %r11, %rax
testq %rax, %rax
je 0x2a96d0
movq %r15, %r10
movl %r10d, %eax
imull 0xf0(%rsp), %eax
movq (%r14), %rcx
movq -0x18(%rcx), %rdi
movl 0xd0(%r14,%rdi), %ecx
movl 0x108(%r14,%rdi), %esi
cmpl %esi, %eax
jne 0x2a75fb
cmpl %ecx, %eax
jne 0x2a75fb
addq %r14, %rdi
cmpl $0x1, %r10d
je 0x2a6634
movslq %ebx, %rbx
cmpl $0x4, %r10d
movq %r14, 0x10(%rsp)
je 0x2a59a4
movl %eax, %ecx
cmpl $0x8, %r10d
jne 0x2a75fb
movl 0xd4(%rdi), %eax
movl 0xd8(%rdi), %r15d
cmpl $0x5, %eax
je 0x2a70c6
cmpl $0x3, %eax
jne 0x2a93b2
cmpl $0x3, %r15d
jne 0x2a93b2
cmpl $0x1, 0xdc(%rdi)
jne 0x2a8abf
cmpl $0x1, 0xe0(%rdi)
jne 0x2a8abf
cmpl $0x1, 0xe4(%rdi)
jne 0x2a8abf
cmpl $0x1, 0xe8(%rdi)
jne 0x2a8abf
movl 0x2c(%r13), %eax
movl 0x30(%r13), %ecx
movl 0xbc(%rsp), %edx
movl 0xc8(%rsp), %esi
movq 0x1b0(%rdi), %r8
leal 0x10(,%rdx,8), %edx
movslq %edx, %rdi
leal (,%rax,8), %edx
movslq %edx, %rdx
xorl %r9d, %r9d
testl %esi, %esi
cmovlel %r9d, %esi
movq %rsi, 0x160(%rsp)
shlq $0x2, %rdx
movq %rdx, 0x190(%rsp)
shlq $0x2, %rdi
movq %r8, 0x150(%rsp)
cmpq 0x160(%rsp), %r9
je 0x2a9725
testq %r8, %r8
je 0x2a4ce5
movq %r9, %rdx
shlq $0x5, %rdx
vmovups (%r8,%rdx), %ymm15
jmp 0x2a4cea
vxorps %xmm15, %xmm15, %xmm15
movq 0x40(%r13), %r10
imulq %r9, %r10
movq 0x10(%r13), %rsi
imulq %rsi, %r10
addq (%r13), %r10
movslq 0x2c(%r13), %rdx
movslq 0x54(%r14), %r11
imulq %r9, %r11
imulq 0x38(%r14), %r11
movq 0x28(%r14), %rbx
imulq %rsi, %rdx
addq %r10, %rdx
movslq 0xbc(%rsp), %rsi
movq 0xd0(%rsp), %r15
movq %r9, 0xe0(%rsp)
imulq %r9, %r15
movq 0xa0(%rsp), %r8
imulq %r8, %r15
addq 0x90(%rsp), %r15
imulq %r8, %rsi
leaq (%r15,%rsi), %r12
leaq (%r15,%rsi,2), %r13
leaq (%rsi,%rsi,2), %rsi
addq %r15, %rsi
xorl %ebp, %ebp
vmovups %ymm15, 0x20(%rsp)
movl %ebp, %r8d
orl $0x1, %r8d
cmpl %ecx, %r8d
jge 0x2a597f
xorl %r9d, %r9d
xorl %r8d, %r8d
leal 0x3(%r8), %r14d
cmpl %eax, %r14d
jge 0x2a53c6
vmovaps 0x20(%r15,%r9), %ymm0
vmovaps 0x40(%r15,%r9), %ymm2
vmovaps 0x60(%r15,%r9), %ymm6
vmovaps 0x80(%r15,%r9), %ymm7
vmovaps (%rbx,%r11), %ymm3
vmovaps 0x20(%rbx,%r11), %ymm5
vmovaps 0x40(%rbx,%r11), %ymm4
vmovaps 0x60(%rbx,%r11), %ymm1
vmulps (%r15,%r9), %ymm3, %ymm8
vaddps %ymm15, %ymm8, %ymm8
vmulps %ymm3, %ymm0, %ymm9
vaddps %ymm15, %ymm9, %ymm9
vmulps %ymm3, %ymm2, %ymm10
vaddps %ymm15, %ymm10, %ymm10
vmulps %ymm3, %ymm6, %ymm11
vaddps %ymm15, %ymm11, %ymm11
vmulps %ymm5, %ymm0, %ymm0
vmulps %ymm5, %ymm2, %ymm12
vmulps %ymm5, %ymm6, %ymm13
vmulps %ymm5, %ymm7, %ymm14
vmulps %ymm4, %ymm2, %ymm2
vaddps %ymm2, %ymm0, %ymm0
vaddps %ymm0, %ymm8, %ymm0
vmovups %ymm0, 0xf0(%rsp)
vmulps %ymm4, %ymm6, %ymm0
vaddps %ymm0, %ymm12, %ymm0
vaddps %ymm0, %ymm9, %ymm0
vmovups %ymm0, 0x110(%rsp)
vmulps %ymm4, %ymm7, %ymm0
vaddps %ymm0, %ymm13, %ymm0
vmulps 0xa0(%r15,%r9), %ymm4, %ymm2
vaddps %ymm0, %ymm10, %ymm0
vmovups %ymm0, 0x1f0(%rsp)
vaddps %ymm2, %ymm14, %ymm0
vaddps %ymm0, %ymm11, %ymm0
vmovups %ymm0, 0x130(%rsp)
vmovaps 0x80(%rbx,%r11), %ymm2
vmovaps (%r12,%r9), %ymm11
vmovaps 0x20(%r12,%r9), %ymm7
vmovaps 0x40(%r12,%r9), %ymm15
vmovaps 0x60(%r12,%r9), %ymm14
vmulps %ymm1, %ymm11, %ymm0
vmulps %ymm1, %ymm7, %ymm6
vmulps %ymm2, %ymm7, %ymm8
vaddps %ymm0, %ymm8, %ymm9
vmulps %ymm2, %ymm15, %ymm0
vaddps %ymm0, %ymm6, %ymm8
vmulps %ymm1, %ymm15, %ymm0
vmulps %ymm2, %ymm14, %ymm6
vaddps %ymm6, %ymm0, %ymm10
vmovaps 0x80(%r12,%r9), %ymm0
vmulps %ymm1, %ymm14, %ymm6
vmulps %ymm2, %ymm0, %ymm13
vaddps %ymm6, %ymm13, %ymm6
vmulps %ymm5, %ymm7, %ymm13
vmulps %ymm4, %ymm15, %ymm12
vaddps %ymm12, %ymm13, %ymm12
vmulps %ymm3, %ymm11, %ymm11
vaddps 0x20(%rsp), %ymm11, %ymm11
vaddps %ymm12, %ymm11, %ymm11
vmovups %ymm11, 0x50(%rsp)
vmulps %ymm5, %ymm15, %ymm11
vmulps %ymm4, %ymm14, %ymm12
vaddps %ymm12, %ymm11, %ymm11
vmulps %ymm3, %ymm7, %ymm7
vaddps 0x20(%rsp), %ymm7, %ymm7
vaddps %ymm7, %ymm11, %ymm7
vmovups %ymm7, 0x70(%rsp)
vmulps %ymm5, %ymm14, %ymm11
vmulps %ymm4, %ymm0, %ymm12
vaddps %ymm12, %ymm11, %ymm11
vmulps %ymm3, %ymm15, %ymm12
vaddps 0x20(%rsp), %ymm12, %ymm12
vaddps %ymm11, %ymm12, %ymm7
vmovups %ymm7, 0x170(%rsp)
vmulps %ymm5, %ymm0, %ymm5
vmovaps 0xa0(%r12,%r9), %ymm12
vmulps %ymm4, %ymm12, %ymm4
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm3, %ymm14, %ymm3
vaddps 0x20(%rsp), %ymm3, %ymm3
vaddps %ymm4, %ymm3, %ymm3
vmovups %ymm3, 0x270(%rsp)
vmovaps 0xa0(%rbx,%r11), %ymm13
vmulps %ymm13, %ymm15, %ymm4
vaddps %ymm4, %ymm9, %ymm4
vaddps 0xf0(%rsp), %ymm4, %ymm3
vmovups %ymm3, 0xf0(%rsp)
vmulps %ymm13, %ymm14, %ymm5
vaddps %ymm5, %ymm8, %ymm5
vaddps 0x110(%rsp), %ymm5, %ymm3
vmovups %ymm3, 0x110(%rsp)
vmulps %ymm0, %ymm13, %ymm0
vaddps %ymm0, %ymm10, %ymm0
vaddps 0x1f0(%rsp), %ymm0, %ymm0
vmovups %ymm0, 0x1f0(%rsp)
vmulps %ymm13, %ymm12, %ymm0
vaddps %ymm0, %ymm6, %ymm0
vaddps 0x130(%rsp), %ymm0, %ymm0
vmovups %ymm0, 0x130(%rsp)
vmovaps (%r13,%r9), %ymm6
vmovaps 0x20(%r13,%r9), %ymm9
vmovaps 0x40(%r13,%r9), %ymm10
vmulps %ymm1, %ymm6, %ymm12
vmulps %ymm2, %ymm9, %ymm14
vaddps %ymm14, %ymm12, %ymm12
vmulps %ymm1, %ymm9, %ymm14
vmulps %ymm2, %ymm10, %ymm15
vaddps %ymm15, %ymm14, %ymm8
vmovaps 0x60(%r13,%r9), %ymm14
vmulps %ymm1, %ymm10, %ymm15
vmulps %ymm2, %ymm14, %ymm7
vaddps %ymm7, %ymm15, %ymm7
vmulps %ymm1, %ymm14, %ymm1
vmovaps 0x80(%r13,%r9), %ymm15
vmulps %ymm2, %ymm15, %ymm2
vaddps %ymm2, %ymm1, %ymm5
vmovaps 0xc0(%rbx,%r11), %ymm1
vmulps %ymm1, %ymm6, %ymm6
vmovaps 0xe0(%rbx,%r11), %ymm2
vmulps %ymm2, %ymm9, %ymm11
vaddps %ymm6, %ymm11, %ymm11
vmulps %ymm1, %ymm9, %ymm6
vmulps %ymm2, %ymm10, %ymm9
vaddps %ymm6, %ymm9, %ymm0
vmulps %ymm1, %ymm10, %ymm6
vmulps %ymm2, %ymm14, %ymm9
vaddps %ymm6, %ymm9, %ymm3
vmulps %ymm1, %ymm14, %ymm6
vmulps %ymm2, %ymm15, %ymm9
vaddps %ymm6, %ymm9, %ymm4
vmulps %ymm13, %ymm10, %ymm6
vaddps %ymm6, %ymm12, %ymm6
vaddps 0x50(%rsp), %ymm6, %ymm6
vmovups %ymm6, 0x50(%rsp)
vmulps %ymm13, %ymm14, %ymm9
vaddps %ymm9, %ymm8, %ymm8
vaddps 0x70(%rsp), %ymm8, %ymm9
vmulps %ymm13, %ymm15, %ymm8
vaddps %ymm7, %ymm8, %ymm7
vaddps 0x170(%rsp), %ymm7, %ymm7
vmovaps 0xa0(%r13,%r9), %ymm8
vmulps %ymm13, %ymm8, %ymm12
vaddps %ymm5, %ymm12, %ymm5
vaddps 0x270(%rsp), %ymm5, %ymm13
vmovaps 0x100(%rbx,%r11), %ymm12
vmulps %ymm12, %ymm10, %ymm5
vaddps %ymm5, %ymm11, %ymm5
vaddps 0xf0(%rsp), %ymm5, %ymm10
vmulps %ymm12, %ymm14, %ymm5
vaddps %ymm5, %ymm0, %ymm0
vaddps 0x110(%rsp), %ymm0, %ymm5
vmulps %ymm12, %ymm15, %ymm0
vaddps %ymm0, %ymm3, %ymm0
vaddps 0x1f0(%rsp), %ymm0, %ymm3
vmulps %ymm12, %ymm8, %ymm0
vaddps %ymm0, %ymm4, %ymm0
vaddps 0x130(%rsp), %ymm0, %ymm0
vmovaps 0x20(%rsi,%r9), %ymm4
vmovaps 0x40(%rsi,%r9), %ymm8
vmulps (%rsi,%r9), %ymm1, %ymm11
vmovaps 0x60(%rsi,%r9), %ymm14
vmulps %ymm1, %ymm4, %ymm15
vmulps %ymm2, %ymm4, %ymm4
vaddps %ymm4, %ymm11, %ymm4
vmulps %ymm2, %ymm8, %ymm11
vaddps %ymm11, %ymm15, %ymm11
vmulps %ymm1, %ymm8, %ymm15
vmulps %ymm2, %ymm14, %ymm6
vaddps %ymm6, %ymm15, %ymm6
vmulps %ymm1, %ymm14, %ymm1
vmovaps 0x80(%rsi,%r9), %ymm15
vmulps %ymm2, %ymm15, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmulps %ymm12, %ymm8, %ymm2
vaddps %ymm2, %ymm4, %ymm2
vaddps 0x50(%rsp), %ymm2, %ymm2
vmulps %ymm12, %ymm14, %ymm4
vaddps %ymm4, %ymm11, %ymm4
vaddps %ymm4, %ymm9, %ymm4
vmulps %ymm12, %ymm15, %ymm8
vmovups 0x20(%rsp), %ymm15
vaddps %ymm6, %ymm8, %ymm6
vaddps %ymm6, %ymm7, %ymm6
vmulps 0xa0(%rsi,%r9), %ymm12, %ymm7
vaddps %ymm7, %ymm1, %ymm1
vaddps %ymm1, %ymm13, %ymm1
vmovaps %ymm10, (%r10,%r9)
vmovaps %ymm5, 0x20(%r10,%r9)
vmovaps %ymm3, 0x40(%r10,%r9)
vmovaps %ymm0, 0x60(%r10,%r9)
vmovaps %ymm2, (%rdx,%r9)
vmovaps %ymm4, 0x20(%rdx,%r9)
vmovaps %ymm6, 0x40(%rdx,%r9)
vmovaps %ymm1, 0x60(%rdx,%r9)
addl $0x4, %r8d
subq $-0x80, %r9
jmp 0x2a4d7b
vmovaps 0x20(%r15,%r9), %ymm1
vmovaps 0x40(%r15,%r9), %ymm3
vmovaps (%rbx,%r11), %ymm5
vmovaps 0x20(%rbx,%r11), %ymm0
vmovaps 0x40(%rbx,%r11), %ymm4
vmulps (%r15,%r9), %ymm5, %ymm6
vmovaps 0x60(%rbx,%r11), %ymm2
vaddps %ymm6, %ymm15, %ymm6
vmulps %ymm5, %ymm1, %ymm7
vaddps %ymm7, %ymm15, %ymm7
vmulps %ymm0, %ymm1, %ymm1
vmulps %ymm0, %ymm3, %ymm8
vmulps %ymm4, %ymm3, %ymm3
vaddps %ymm3, %ymm1, %ymm1
vaddps %ymm1, %ymm6, %ymm6
vmulps 0x60(%r15,%r9), %ymm4, %ymm1
vaddps %ymm1, %ymm8, %ymm1
vaddps %ymm1, %ymm7, %ymm7
vmovaps 0x80(%rbx,%r11), %ymm3
vmovaps 0xa0(%rbx,%r11), %ymm1
vmovaps (%r12,%r9), %ymm8
vmovaps 0x20(%r12,%r9), %ymm9
vmovaps 0x40(%r12,%r9), %ymm10
vmovaps 0x60(%r12,%r9), %ymm11
vmulps %ymm2, %ymm8, %ymm12
vmulps %ymm2, %ymm9, %ymm13
vmulps %ymm5, %ymm8, %ymm8
vaddps %ymm15, %ymm8, %ymm8
vmulps %ymm5, %ymm9, %ymm5
vaddps %ymm5, %ymm15, %ymm5
vmulps %ymm3, %ymm9, %ymm14
vaddps %ymm14, %ymm12, %ymm12
vmulps %ymm3, %ymm10, %ymm14
vaddps %ymm14, %ymm13, %ymm13
vmulps %ymm0, %ymm9, %ymm9
vmulps %ymm0, %ymm10, %ymm14
vmulps %ymm1, %ymm10, %ymm0
vaddps %ymm0, %ymm12, %ymm0
vaddps %ymm0, %ymm6, %ymm0
vmulps %ymm1, %ymm11, %ymm6
vaddps %ymm6, %ymm13, %ymm6
vaddps %ymm6, %ymm7, %ymm6
vmulps %ymm4, %ymm10, %ymm7
vaddps %ymm7, %ymm9, %ymm7
vaddps %ymm7, %ymm8, %ymm7
vmulps %ymm4, %ymm11, %ymm4
vaddps %ymm4, %ymm14, %ymm4
vaddps %ymm4, %ymm5, %ymm8
vmovaps 0xc0(%rbx,%r11), %ymm5
vmovaps 0xe0(%rbx,%r11), %ymm4
vmovaps (%r13,%r9), %ymm9
vmovaps 0x20(%r13,%r9), %ymm10
vmovaps 0x40(%r13,%r9), %ymm11
vmulps %ymm5, %ymm9, %ymm12
vmulps %ymm5, %ymm10, %ymm13
vmulps %ymm4, %ymm10, %ymm14
vaddps %ymm14, %ymm12, %ymm12
vmulps %ymm4, %ymm11, %ymm14
vaddps %ymm14, %ymm13, %ymm13
vmulps %ymm2, %ymm9, %ymm9
vmulps %ymm2, %ymm10, %ymm2
vmulps %ymm3, %ymm10, %ymm10
vaddps %ymm10, %ymm9, %ymm9
vmovaps 0x100(%rbx,%r11), %ymm10
vmulps %ymm3, %ymm11, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmulps %ymm10, %ymm11, %ymm3
vaddps %ymm3, %ymm12, %ymm3
vmovaps 0x60(%r13,%r9), %ymm12
vaddps %ymm3, %ymm0, %ymm0
vmulps %ymm10, %ymm12, %ymm3
vaddps %ymm3, %ymm13, %ymm3
vaddps %ymm3, %ymm6, %ymm3
vmulps %ymm1, %ymm11, %ymm6
vaddps %ymm6, %ymm9, %ymm6
vaddps %ymm6, %ymm7, %ymm6
vmulps %ymm1, %ymm12, %ymm1
vaddps %ymm1, %ymm2, %ymm1
vaddps %ymm1, %ymm8, %ymm1
vmulps (%rsi,%r9), %ymm5, %ymm2
vmovaps 0x20(%rsi,%r9), %ymm7
vmulps %ymm5, %ymm7, %ymm5
vmulps %ymm4, %ymm7, %ymm7
vaddps %ymm7, %ymm2, %ymm2
vmovaps 0x40(%rsi,%r9), %ymm7
vmulps %ymm4, %ymm7, %ymm4
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm7, %ymm10, %ymm5
vaddps %ymm5, %ymm2, %ymm2
vaddps %ymm2, %ymm6, %ymm2
vmulps 0x60(%rsi,%r9), %ymm10, %ymm5
vaddps %ymm5, %ymm4, %ymm4
vaddps %ymm4, %ymm1, %ymm1
vmovaps %ymm0, (%r10,%r9)
vmovaps %ymm3, 0x20(%r10,%r9)
vmovaps %ymm2, (%rdx,%r9)
vmovaps %ymm1, 0x20(%rdx,%r9)
addl $0x2, %r8d
addq $0x40, %r9
leal 0x1(%r8), %r14d
cmpl %eax, %r14d
jl 0x2a51d2
movq 0x10(%rsp), %r14
cmpl %eax, %r8d
jge 0x2a5510
vmovaps (%rbx,%r11), %ymm0
vmovaps 0x20(%rbx,%r11), %ymm1
vmovaps 0x40(%rbx,%r11), %ymm2
vmulps (%r15,%r9), %ymm0, %ymm3
vmovaps 0x60(%rbx,%r11), %ymm4
vaddps %ymm3, %ymm15, %ymm3
vmulps 0x20(%r15,%r9), %ymm1, %ymm5
vmulps 0x40(%r15,%r9), %ymm2, %ymm6
vaddps %ymm6, %ymm5, %ymm5
vaddps %ymm5, %ymm3, %ymm3
vmovaps 0x80(%rbx,%r11), %ymm5
vmovaps 0xa0(%rbx,%r11), %ymm6
vmovaps (%r12,%r9), %ymm7
vmovaps 0x20(%r12,%r9), %ymm8
vmovaps 0x40(%r12,%r9), %ymm9
vmulps %ymm4, %ymm7, %ymm10
vmulps %ymm0, %ymm7, %ymm0
vaddps %ymm0, %ymm15, %ymm0
vmulps %ymm5, %ymm8, %ymm7
vaddps %ymm7, %ymm10, %ymm7
vmulps %ymm1, %ymm8, %ymm1
vmulps %ymm6, %ymm9, %ymm8
vaddps %ymm7, %ymm8, %ymm7
vaddps %ymm7, %ymm3, %ymm3
vmulps %ymm2, %ymm9, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vaddps %ymm1, %ymm0, %ymm0
vmovaps 0xc0(%rbx,%r11), %ymm1
vmovaps 0xe0(%rbx,%r11), %ymm2
vmovaps 0x100(%rbx,%r11), %ymm7
vmovaps (%r13,%r9), %ymm8
vmovaps 0x20(%r13,%r9), %ymm9
vmovaps 0x40(%r13,%r9), %ymm10
vmulps %ymm1, %ymm8, %ymm11
vmulps %ymm4, %ymm8, %ymm4
vmulps %ymm2, %ymm9, %ymm8
vaddps %ymm8, %ymm11, %ymm8
vmulps %ymm5, %ymm9, %ymm5
vaddps %ymm5, %ymm4, %ymm4
vmulps %ymm7, %ymm10, %ymm5
vaddps %ymm5, %ymm8, %ymm5
vaddps %ymm5, %ymm3, %ymm3
vmulps %ymm6, %ymm10, %ymm5
vaddps %ymm5, %ymm4, %ymm4
vmulps (%rsi,%r9), %ymm1, %ymm1
vaddps %ymm4, %ymm0, %ymm0
vmulps 0x20(%rsi,%r9), %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmulps 0x40(%rsi,%r9), %ymm7, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vaddps %ymm1, %ymm0, %ymm0
vmovaps %ymm3, (%r10,%r9)
vmovaps %ymm0, (%rdx,%r9)
incl %r8d
addq $0x20, %r9
jmp 0x2a53d8
addq %rdi, %r15
addq %r9, %r15
addq %rdi, %r12
addq %r9, %r12
addq %rdi, %r13
addq %r9, %r13
addq %rdi, %rsi
addq %r9, %rsi
movq 0x190(%rsp), %r8
addq %r8, %r10
addq %r9, %r10
addq %r8, %rdx
addq %r9, %rdx
addl $0x2, %ebp
jmp 0x2a4d65
xorl %edx, %edx
xorl %esi, %esi
leal 0x3(%rsi), %r8d
cmpl %eax, %r8d
jge 0x2a5891
vmovaps 0x20(%r15,%rdx), %ymm1
vmovaps 0x40(%r15,%rdx), %ymm2
vmovaps 0x60(%r15,%rdx), %ymm3
vmovaps 0x80(%r15,%rdx), %ymm4
vmovaps (%rbx,%r11), %ymm5
vmovaps 0x20(%rbx,%r11), %ymm6
vmovaps 0x40(%rbx,%r11), %ymm7
vmulps (%r15,%rdx), %ymm5, %ymm8
vmovaps 0x60(%rbx,%r11), %ymm0
vaddps %ymm15, %ymm8, %ymm8
vmulps %ymm5, %ymm1, %ymm9
vaddps %ymm15, %ymm9, %ymm9
vmulps %ymm5, %ymm2, %ymm10
vaddps %ymm15, %ymm10, %ymm10
vmulps %ymm5, %ymm3, %ymm5
vaddps %ymm5, %ymm15, %ymm5
vmulps %ymm6, %ymm1, %ymm1
vmulps %ymm6, %ymm2, %ymm11
vmulps %ymm6, %ymm3, %ymm12
vmulps %ymm6, %ymm4, %ymm6
vmulps %ymm7, %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vaddps %ymm1, %ymm8, %ymm1
vmulps %ymm7, %ymm3, %ymm2
vaddps %ymm2, %ymm11, %ymm2
vaddps %ymm2, %ymm9, %ymm2
vmulps %ymm7, %ymm4, %ymm3
vaddps %ymm3, %ymm12, %ymm3
vaddps %ymm3, %ymm10, %ymm3
vmulps 0xa0(%r15,%rdx), %ymm7, %ymm4
vaddps %ymm4, %ymm6, %ymm4
vaddps %ymm4, %ymm5, %ymm4
vmovaps 0x80(%rbx,%r11), %ymm5
vmovaps 0xa0(%rbx,%r11), %ymm6
vmovaps 0x20(%r12,%rdx), %ymm7
vmovaps 0x40(%r12,%rdx), %ymm8
vmovaps 0x60(%r12,%rdx), %ymm9
vmulps (%r12,%rdx), %ymm0, %ymm10
vmovaps 0x80(%r12,%rdx), %ymm11
vmulps %ymm0, %ymm7, %ymm12
vmulps %ymm0, %ymm8, %ymm13
vmulps %ymm0, %ymm9, %ymm0
vmulps %ymm5, %ymm7, %ymm7
vaddps %ymm7, %ymm10, %ymm7
vmulps %ymm5, %ymm8, %ymm10
vaddps %ymm10, %ymm12, %ymm10
vmulps %ymm5, %ymm9, %ymm12
vaddps %ymm12, %ymm13, %ymm12
vmulps %ymm5, %ymm11, %ymm5
vaddps %ymm5, %ymm0, %ymm0
vmulps %ymm6, %ymm8, %ymm5
vaddps %ymm5, %ymm7, %ymm5
vaddps %ymm5, %ymm1, %ymm1
vmulps %ymm6, %ymm9, %ymm5
vaddps %ymm5, %ymm10, %ymm5
vaddps %ymm5, %ymm2, %ymm2
vmulps %ymm6, %ymm11, %ymm5
vaddps %ymm5, %ymm12, %ymm5
vaddps %ymm5, %ymm3, %ymm3
vmulps 0xa0(%r12,%rdx), %ymm6, %ymm5
vaddps %ymm5, %ymm0, %ymm0
vaddps %ymm0, %ymm4, %ymm0
vmovaps 0xc0(%rbx,%r11), %ymm4
vmovaps 0xe0(%rbx,%r11), %ymm5
vmovaps 0x100(%rbx,%r11), %ymm6
vmovaps 0x20(%r13,%rdx), %ymm7
vmovaps 0x40(%r13,%rdx), %ymm8
vmovaps 0x60(%r13,%rdx), %ymm9
vmovaps 0x80(%r13,%rdx), %ymm10
vmulps (%r13,%rdx), %ymm4, %ymm11
vmulps %ymm4, %ymm7, %ymm12
vmulps %ymm4, %ymm8, %ymm13
vmulps %ymm4, %ymm9, %ymm4
vmulps %ymm5, %ymm7, %ymm7
vaddps %ymm7, %ymm11, %ymm7
vmulps %ymm5, %ymm8, %ymm11
vaddps %ymm11, %ymm12, %ymm11
vmulps %ymm5, %ymm9, %ymm12
vaddps %ymm12, %ymm13, %ymm12
vmulps %ymm5, %ymm10, %ymm5
vaddps %ymm5, %ymm4, %ymm4
vmulps %ymm6, %ymm8, %ymm5
vaddps %ymm5, %ymm7, %ymm5
vaddps %ymm5, %ymm1, %ymm1
vmulps %ymm6, %ymm9, %ymm5
vaddps %ymm5, %ymm11, %ymm5
vaddps %ymm5, %ymm2, %ymm2
vmulps %ymm6, %ymm10, %ymm5
vaddps %ymm5, %ymm12, %ymm5
vaddps %ymm5, %ymm3, %ymm3
vmulps 0xa0(%r13,%rdx), %ymm6, %ymm5
vaddps %ymm5, %ymm4, %ymm4
vaddps %ymm4, %ymm0, %ymm0
vmovaps %ymm1, (%r10,%rdx)
vmovaps %ymm2, 0x20(%r10,%rdx)
vmovaps %ymm3, 0x40(%r10,%rdx)
vmovaps %ymm0, 0x60(%r10,%rdx)
addl $0x4, %esi
subq $-0x80, %rdx
jmp 0x2a5548
vmovaps 0x20(%r15,%rdx), %ymm0
vmovaps 0x40(%r15,%rdx), %ymm1
vmovaps (%rbx,%r11), %ymm2
vmovaps 0x20(%rbx,%r11), %ymm3
vmovaps 0x40(%rbx,%r11), %ymm4
vmovaps 0x60(%rbx,%r11), %ymm5
vmulps (%r15,%rdx), %ymm2, %ymm6
vaddps %ymm6, %ymm15, %ymm6
vmulps %ymm2, %ymm0, %ymm2
vaddps %ymm2, %ymm15, %ymm2
vmulps %ymm3, %ymm0, %ymm0
vmulps %ymm3, %ymm1, %ymm3
vmulps %ymm4, %ymm1, %ymm1
vaddps %ymm1, %ymm0, %ymm0
vmulps 0x60(%r15,%rdx), %ymm4, %ymm1
vaddps %ymm0, %ymm6, %ymm0
vaddps %ymm1, %ymm3, %ymm1
vaddps %ymm1, %ymm2, %ymm1
vmovaps 0x80(%rbx,%r11), %ymm2
vmovaps 0xa0(%rbx,%r11), %ymm3
vmovaps 0x20(%r12,%rdx), %ymm4
vmulps (%r12,%rdx), %ymm5, %ymm6
vmovaps 0x40(%r12,%rdx), %ymm7
vmulps %ymm5, %ymm4, %ymm5
vmulps %ymm2, %ymm4, %ymm4
vaddps %ymm4, %ymm6, %ymm4
vmulps %ymm2, %ymm7, %ymm2
vaddps %ymm2, %ymm5, %ymm2
vmulps %ymm3, %ymm7, %ymm5
vaddps %ymm5, %ymm4, %ymm4
vaddps %ymm4, %ymm0, %ymm0
vmulps 0x60(%r12,%rdx), %ymm3, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmovaps 0xc0(%rbx,%r11), %ymm2
vmovaps 0xe0(%rbx,%r11), %ymm3
vmovaps 0x100(%rbx,%r11), %ymm4
vmovaps 0x20(%r13,%rdx), %ymm5
vmovaps 0x40(%r13,%rdx), %ymm6
vmulps (%r13,%rdx), %ymm2, %ymm7
vmulps %ymm2, %ymm5, %ymm2
vmulps %ymm3, %ymm5, %ymm5
vaddps %ymm5, %ymm7, %ymm5
vmulps %ymm3, %ymm6, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmulps %ymm4, %ymm6, %ymm3
vaddps %ymm3, %ymm5, %ymm3
vaddps %ymm3, %ymm0, %ymm0
vmulps 0x60(%r13,%rdx), %ymm4, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmovaps %ymm0, (%r10,%rdx)
vmovaps %ymm1, 0x20(%r10,%rdx)
addl $0x2, %esi
addq $0x40, %rdx
leal 0x1(%rsi), %r8d
cmpl %eax, %r8d
jl 0x2a5766
jmp 0x2a595d
vmovaps (%r15,%rdx), %ymm0
vmovaps 0x20(%r15,%rdx), %ymm1
vmovaps 0x40(%r15,%rdx), %ymm2
vmulps (%rbx,%r11), %ymm0, %ymm0
vmulps 0x20(%rbx,%r11), %ymm1, %ymm1
vaddps %ymm0, %ymm15, %ymm0
vmulps 0x40(%rbx,%r11), %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vaddps %ymm1, %ymm0, %ymm0
vmovaps (%r12,%rdx), %ymm1
vmovaps 0x20(%r12,%rdx), %ymm2
vmovaps 0x40(%r12,%rdx), %ymm3
vmulps 0x60(%rbx,%r11), %ymm1, %ymm1
vmulps 0x80(%rbx,%r11), %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmulps 0xa0(%rbx,%r11), %ymm3, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vaddps %ymm1, %ymm0, %ymm0
vmovaps (%r13,%rdx), %ymm1
vmovaps 0x20(%r13,%rdx), %ymm2
vmovaps 0x40(%r13,%rdx), %ymm3
vmulps 0xc0(%rbx,%r11), %ymm1, %ymm1
vmulps 0xe0(%rbx,%r11), %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmulps 0x100(%rbx,%r11), %ymm3, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vaddps %ymm1, %ymm0, %ymm0
vmovaps %ymm0, (%r10,%rdx)
incl %esi
addq $0x20, %rdx
cmpl %eax, %esi
jl 0x2a58a3
addq %rdx, %r15
addq $0x40, %r15
addq %rdx, %r12
addq $0x40, %r12
addq %rdx, %r13
addq $0x40, %r13
incl %ebp
addq %rdx, %r10
cmpl %ecx, %ebp
jl 0x2a5544
movq 0xe0(%rsp), %r9
incq %r9
movq 0x18(%rsp), %r13
movq 0x150(%rsp), %r8
jmp 0x2a4cc3
movl 0xd4(%rdi), %eax
movl 0xd8(%rdi), %r15d
cmpl $0x5, %eax
je 0x2a6998
cmpl $0x3, %eax
jne 0x2a841c
cmpl $0x3, %r15d
jne 0x2a841c
cmpl $0x1, 0xdc(%rdi)
jne 0x2a7b18
cmpl $0x1, 0xe0(%rdi)
jne 0x2a7b18
cmpl $0x1, 0xe4(%rdi)
jne 0x2a7b18
cmpl $0x1, 0xe8(%rdi)
jne 0x2a7b18
movl 0x2c(%r13), %eax
movl 0x30(%r13), %ecx
movl 0xc8(%rsp), %edx
movq 0x1b0(%rdi), %rsi
xorl %edi, %edi
testl %ecx, %ecx
cmovlel %edi, %ecx
testl %edx, %edx
cmovlel %edi, %edx
movq %rdx, 0x2b8(%rsp)
movl $0x80, %r8d
movq %rsi, 0x2b0(%rsp)
cmpq 0x2b8(%rsp), %rdi
je 0x2a9725
testq %rsi, %rsi
je 0x2a5a5a
movq %rdi, %rdx
shlq $0x4, %rdx
vmovups (%rsi,%rdx), %xmm0
jmp 0x2a5a5e
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq 0x40(%r13), %r9
imulq %rdi, %r9
imulq 0x10(%r13), %r9
addq (%r13), %r9
movq 0x28(%r14), %rdx
movslq 0x54(%r14), %rsi
imulq %rdi, %rsi
imulq 0x38(%r14), %rsi
movslq 0xbc(%rsp), %r10
movq 0xd0(%rsp), %r13
imulq %rdi, %r13
movq 0xa0(%rsp), %r11
imulq %r11, %r13
addq 0x90(%rsp), %r13
imulq %r11, %r10
leaq (%r10,%r13), %rbp
leaq (,%r10,2), %r11
addq %r13, %r11
vmovaps (%rdx,%rsi), %xmm0
vmovaps %xmm0, 0x50(%rsp)
vmovaps 0x10(%rdx,%rsi), %xmm0
vmovaps %xmm0, 0x70(%rsp)
vmovaps 0x20(%rdx,%rsi), %xmm0
vmovaps %xmm0, 0x110(%rsp)
vmovaps 0x30(%rdx,%rsi), %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps 0x40(%rdx,%rsi), %xmm0
vmovaps %xmm0, 0x130(%rsp)
vmovaps 0x50(%rdx,%rsi), %xmm0
vmovaps %xmm0, 0x170(%rsp)
vmovaps 0x60(%rdx,%rsi), %xmm0
vmovaps %xmm0, 0x190(%rsp)
vmovaps 0x70(%rdx,%rsi), %xmm0
vmovaps %xmm0, 0x270(%rsp)
vmovaps 0x80(%rdx,%rsi), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
xorl %r10d, %r10d
cmpl %ecx, %r10d
je 0x2a661f
leaq 0x20(%r13), %r15
leaq 0x20(%rbp), %rbx
leaq 0x20(%r11), %rdx
xorl %esi, %esi
xorl %r12d, %r12d
leal 0x7(%r12), %r14d
cmpl %eax, %r14d
jge 0x2a63ad
vmovaps 0x50(%rsp), %xmm0
vmulps (%r13,%rsi), %xmm0, %xmm0
vaddps 0x20(%rsp), %xmm0, %xmm1
vmovaps 0x10(%r13,%rsi), %xmm3
vmovaps 0x20(%r13,%rsi), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovaps 0x30(%r13,%rsi), %xmm10
vmulps 0x70(%rsp), %xmm3, %xmm2
vmulps 0x110(%rsp), %xmm0, %xmm4
vaddps %xmm4, %xmm2, %xmm2
vmovaps 0xf0(%rsp), %xmm0
vmulps (%rbp,%rsi), %xmm0, %xmm4
vaddps %xmm2, %xmm1, %xmm1
vmovaps 0x10(%rbp,%rsi), %xmm13
vmovaps 0x20(%rbp,%rsi), %xmm0
vmovaps %xmm0, 0x260(%rsp)
vmovaps 0x30(%rbp,%rsi), %xmm2
vmovaps %xmm2, 0x160(%rsp)
vmovaps 0x130(%rsp), %xmm5
vmulps %xmm5, %xmm13, %xmm12
vmovaps %xmm5, %xmm9
vaddps %xmm4, %xmm12, %xmm4
vmulps 0x170(%rsp), %xmm0, %xmm12
vaddps %xmm4, %xmm12, %xmm4
vaddps %xmm4, %xmm1, %xmm1
vmovaps 0x190(%rsp), %xmm11
vmulps (%r11,%rsi), %xmm11, %xmm14
vmovaps 0x10(%r11,%rsi), %xmm15
vmovaps 0x20(%r11,%rsi), %xmm0
vmovaps 0x30(%r11,%rsi), %xmm12
vmovaps 0x270(%rsp), %xmm7
vmulps %xmm7, %xmm15, %xmm5
vaddps %xmm5, %xmm14, %xmm5
vmovaps 0x1f0(%rsp), %xmm8
vmulps %xmm0, %xmm8, %xmm14
vaddps %xmm5, %xmm14, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vmovaps %xmm1, (%r9,%rsi)
vmulps 0x50(%rsp), %xmm3, %xmm1
vaddps 0x20(%rsp), %xmm1, %xmm1
vmovaps 0xe0(%rsp), %xmm2
vmulps 0x70(%rsp), %xmm2, %xmm3
vmovaps 0x110(%rsp), %xmm2
vmovaps %xmm10, %xmm14
vmovaps %xmm10, 0x2a0(%rsp)
vmulps %xmm2, %xmm10, %xmm5
vmovaps 0xf0(%rsp), %xmm4
vmulps %xmm4, %xmm13, %xmm13
vaddps %xmm3, %xmm13, %xmm3
vaddps %xmm3, %xmm1, %xmm1
vmovaps %xmm1, 0x150(%rsp)
vmovaps %xmm9, %xmm10
vmovaps 0x260(%rsp), %xmm9
vmulps %xmm10, %xmm9, %xmm3
vmovaps 0x160(%rsp), %xmm1
vmovaps 0x170(%rsp), %xmm6
vmulps %xmm6, %xmm1, %xmm13
vaddps %xmm5, %xmm13, %xmm5
vmulps %xmm11, %xmm15, %xmm13
vaddps %xmm3, %xmm13, %xmm3
vmulps %xmm7, %xmm0, %xmm13
vmovaps %xmm0, %xmm11
vaddps %xmm3, %xmm13, %xmm3
vaddps 0x150(%rsp), %xmm3, %xmm7
vmulps %xmm8, %xmm12, %xmm3
vmovaps %xmm12, 0x290(%rsp)
vaddps %xmm3, %xmm5, %xmm3
vaddps %xmm3, %xmm7, %xmm5
vmovaps 0x40(%r13,%rsi), %xmm3
vmovaps 0x40(%rbp,%rsi), %xmm7
vmovaps 0x40(%r11,%rsi), %xmm8
vmovaps %xmm5, 0x10(%r9,%rsi)
vmovaps 0xe0(%rsp), %xmm0
vmulps 0x50(%rsp), %xmm0, %xmm0
vaddps 0x20(%rsp), %xmm0, %xmm0
vmulps 0x70(%rsp), %xmm14, %xmm5
vmulps %xmm2, %xmm3, %xmm14
vmulps %xmm4, %xmm9, %xmm2
vmovaps %xmm10, %xmm13
vmulps %xmm1, %xmm10, %xmm15
vaddps %xmm5, %xmm15, %xmm5
vmulps %xmm6, %xmm7, %xmm15
vmovaps %xmm7, 0x150(%rsp)
vaddps %xmm15, %xmm14, %xmm14
vmovaps 0x190(%rsp), %xmm1
vmulps %xmm1, %xmm11, %xmm4
vmovaps %xmm1, %xmm9
vaddps %xmm4, %xmm2, %xmm2
vaddps %xmm2, %xmm0, %xmm0
vmovaps 0x270(%rsp), %xmm10
vmulps %xmm10, %xmm12, %xmm2
vaddps %xmm2, %xmm5, %xmm2
vaddps %xmm2, %xmm0, %xmm4
vmovaps %xmm8, %xmm0
vmovaps %xmm8, 0x350(%rsp)
vmovaps 0x1f0(%rsp), %xmm6
vmulps %xmm6, %xmm8, %xmm2
vaddps %xmm2, %xmm14, %xmm2
vaddps %xmm2, %xmm4, %xmm5
vmovaps 0x50(%r13,%rsi), %xmm1
vmovaps %xmm1, 0x260(%rsp)
vmovaps 0x50(%rbp,%rsi), %xmm4
vmovaps %xmm4, 0x360(%rsp)
vmovaps 0x50(%r11,%rsi), %xmm2
vmovaps %xmm2, 0xe0(%rsp)
vmovaps %xmm5, 0x20(%r9,%rsi)
vmovaps 0x2a0(%rsp), %xmm2
vmulps 0x50(%rsp), %xmm2, %xmm5
vaddps 0x20(%rsp), %xmm5, %xmm5
vmulps 0x70(%rsp), %xmm3, %xmm12
vmovaps 0x110(%rsp), %xmm2
vmulps %xmm2, %xmm1, %xmm14
vmovaps 0x160(%rsp), %xmm1
vmulps 0xf0(%rsp), %xmm1, %xmm11
vmulps %xmm7, %xmm13, %xmm15
vaddps %xmm15, %xmm12, %xmm8
vmovaps 0x170(%rsp), %xmm13
vmulps %xmm4, %xmm13, %xmm15
vaddps %xmm15, %xmm14, %xmm14
vmulps 0x290(%rsp), %xmm9, %xmm12
vmovaps %xmm9, %xmm15
vaddps %xmm12, %xmm11, %xmm11
vaddps %xmm5, %xmm11, %xmm5
vmulps %xmm0, %xmm10, %xmm11
vaddps %xmm11, %xmm8, %xmm10
vaddps %xmm5, %xmm10, %xmm5
vmovaps 0xe0(%rsp), %xmm0
vmulps %xmm6, %xmm0, %xmm10
vaddps %xmm10, %xmm14, %xmm10
vaddps %xmm5, %xmm10, %xmm5
vmovaps 0x60(%r13,%rsi), %xmm8
vmovaps 0x60(%rbp,%rsi), %xmm1
vmovaps %xmm1, 0x290(%rsp)
vmovaps 0x60(%r11,%rsi), %xmm14
vmovaps %xmm5, 0x30(%r9,%rsi)
vmulps 0x50(%rsp), %xmm3, %xmm3
vaddps 0x20(%rsp), %xmm3, %xmm3
vmovaps 0x260(%rsp), %xmm7
vmulps 0x70(%rsp), %xmm7, %xmm5
vmulps %xmm2, %xmm8, %xmm10
vmovaps %xmm8, 0x160(%rsp)
vmovaps %xmm2, %xmm11
vmovaps 0xf0(%rsp), %xmm4
vmulps 0x150(%rsp), %xmm4, %xmm2
vmovaps 0x130(%rsp), %xmm6
vmovaps 0x360(%rsp), %xmm9
vmulps %xmm6, %xmm9, %xmm12
vaddps %xmm5, %xmm12, %xmm5
vmulps %xmm1, %xmm13, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vmulps 0x350(%rsp), %xmm15, %xmm12
vaddps %xmm2, %xmm12, %xmm1
vaddps %xmm1, %xmm3, %xmm1
vmovaps 0x270(%rsp), %xmm12
vmulps %xmm0, %xmm12, %xmm3
vaddps %xmm3, %xmm5, %xmm3
vaddps %xmm3, %xmm1, %xmm1
vmovaps 0x1f0(%rsp), %xmm2
vmulps %xmm2, %xmm14, %xmm3
vaddps %xmm3, %xmm10, %xmm3
vaddps %xmm3, %xmm1, %xmm1
vmovaps 0x70(%r13,%rsi), %xmm5
vmovaps %xmm5, 0x2a0(%rsp)
vmovaps 0x70(%rbp,%rsi), %xmm0
vmovaps 0x70(%r11,%rsi), %xmm10
vmovaps %xmm1, 0x40(%r9,%rsi)
vmulps 0x50(%rsp), %xmm7, %xmm1
vaddps 0x20(%rsp), %xmm1, %xmm1
vmulps 0x70(%rsp), %xmm8, %xmm3
vmulps %xmm5, %xmm11, %xmm7
vmulps %xmm4, %xmm9, %xmm13
vmovaps %xmm4, %xmm11
vmovaps 0x290(%rsp), %xmm9
vmulps %xmm6, %xmm9, %xmm5
vaddps %xmm5, %xmm3, %xmm3
vmovaps 0x170(%rsp), %xmm8
vmulps %xmm0, %xmm8, %xmm5
vmovaps %xmm0, %xmm4
vmovaps %xmm0, 0x150(%rsp)
vaddps %xmm5, %xmm7, %xmm5
vmulps 0xe0(%rsp), %xmm15, %xmm0
vaddps %xmm0, %xmm13, %xmm0
vaddps %xmm0, %xmm1, %xmm0
vmulps %xmm12, %xmm14, %xmm1
vaddps %xmm1, %xmm3, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps %xmm2, %xmm10, %xmm1
vmovaps %xmm10, %xmm2
vaddps %xmm1, %xmm5, %xmm1
vaddps %xmm1, %xmm0, %xmm3
vmovaps 0x80(%r13,%rsi), %xmm1
vmovaps 0x80(%rbp,%rsi), %xmm10
vmovaps 0x80(%r11,%rsi), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovaps %xmm3, 0x50(%r9,%rsi)
vmovaps 0x2a0(%rsp), %xmm7
vmulps 0x70(%rsp), %xmm7, %xmm3
vmovaps 0x110(%rsp), %xmm13
vmulps %xmm1, %xmm13, %xmm0
vmulps %xmm6, %xmm4, %xmm5
vaddps %xmm5, %xmm3, %xmm3
vmulps %xmm8, %xmm10, %xmm5
vaddps %xmm5, %xmm0, %xmm4
vmovaps %xmm11, %xmm0
vmulps %xmm11, %xmm9, %xmm5
vmulps %xmm15, %xmm14, %xmm14
vmovaps %xmm15, %xmm6
vaddps %xmm5, %xmm14, %xmm5
vmovaps 0x50(%rsp), %xmm14
vmulps 0x160(%rsp), %xmm14, %xmm11
vaddps 0x20(%rsp), %xmm11, %xmm11
vaddps %xmm5, %xmm11, %xmm5
vmulps %xmm2, %xmm12, %xmm11
vmovaps %xmm2, %xmm15
vaddps %xmm3, %xmm11, %xmm3
vaddps %xmm3, %xmm5, %xmm3
vmovaps 0x1f0(%rsp), %xmm2
vmovaps 0xe0(%rsp), %xmm9
vmulps %xmm2, %xmm9, %xmm5
vaddps %xmm5, %xmm4, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vmulps 0x90(%r13,%rsi), %xmm13, %xmm4
vmulps 0x90(%rbp,%rsi), %xmm8, %xmm5
vaddps %xmm5, %xmm4, %xmm4
vmulps 0x90(%r11,%rsi), %xmm2, %xmm5
vaddps %xmm5, %xmm4, %xmm4
vmovaps %xmm3, 0x60(%r9,%rsi)
vmulps 0x70(%rsp), %xmm1, %xmm2
vmulps 0x130(%rsp), %xmm10, %xmm1
vaddps %xmm1, %xmm2, %xmm1
vmulps 0x150(%rsp), %xmm0, %xmm2
vmulps %xmm6, %xmm15, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps %xmm7, %xmm14, %xmm3
vaddps 0x20(%rsp), %xmm3, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vmulps %xmm12, %xmm9, %xmm0
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm2, %xmm0
vaddps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x70(%r9,%rsi)
addl $0x8, %r12d
addq %r8, %rsi
addq %r8, %r15
addq %r8, %rbx
addq %r8, %rdx
jmp 0x2a5b61
vmovaps 0x50(%rsp), %xmm0
vmulps (%r13,%rsi), %xmm0, %xmm0
vaddps 0x20(%rsp), %xmm0, %xmm5
vmovaps 0x10(%r13,%rsi), %xmm1
vmovaps 0x20(%r13,%rsi), %xmm0
vmovaps 0x30(%r13,%rsi), %xmm2
vmovaps %xmm2, 0xe0(%rsp)
vmulps 0x70(%rsp), %xmm1, %xmm2
vmovaps 0x110(%rsp), %xmm12
vmulps %xmm0, %xmm12, %xmm3
vmovaps %xmm0, %xmm10
vmovaps %xmm0, 0x150(%rsp)
vaddps %xmm3, %xmm2, %xmm2
vmovaps 0xf0(%rsp), %xmm0
vmulps (%rbp,%rsi), %xmm0, %xmm3
vaddps %xmm2, %xmm5, %xmm2
vmovaps 0x10(%rbp,%rsi), %xmm4
vmovaps 0x20(%rbp,%rsi), %xmm13
vmovaps 0x30(%rbp,%rsi), %xmm5
vmovaps %xmm5, 0x160(%rsp)
vmulps 0x130(%rsp), %xmm4, %xmm5
vaddps %xmm5, %xmm3, %xmm3
vmovaps 0x170(%rsp), %xmm9
vmulps %xmm9, %xmm13, %xmm5
vaddps %xmm5, %xmm3, %xmm3
vaddps %xmm3, %xmm2, %xmm3
vmovaps 0x190(%rsp), %xmm11
vmulps (%r11,%rsi), %xmm11, %xmm5
vmovaps 0x10(%r11,%rsi), %xmm14
vmovaps 0x20(%r11,%rsi), %xmm8
vmovaps 0x30(%r11,%rsi), %xmm2
vmovaps 0x270(%rsp), %xmm6
vmulps %xmm6, %xmm14, %xmm15
vaddps %xmm5, %xmm15, %xmm5
vmovaps 0x1f0(%rsp), %xmm7
vmulps %xmm7, %xmm8, %xmm15
vaddps %xmm5, %xmm15, %xmm5
vaddps %xmm5, %xmm3, %xmm3
vmovaps %xmm3, (%r9,%rsi)
vmulps 0x50(%rsp), %xmm1, %xmm1
vaddps 0x20(%rsp), %xmm1, %xmm1
vmulps 0x70(%rsp), %xmm10, %xmm3
vmovaps 0xe0(%rsp), %xmm15
vmulps %xmm12, %xmm15, %xmm5
vmulps %xmm0, %xmm4, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm1, %xmm10
vmovaps 0x130(%rsp), %xmm1
vmulps %xmm1, %xmm13, %xmm3
vmovaps 0x160(%rsp), %xmm0
vmulps %xmm0, %xmm9, %xmm4
vaddps %xmm4, %xmm5, %xmm4
vmulps %xmm11, %xmm14, %xmm5
vaddps %xmm5, %xmm3, %xmm3
vmulps %xmm6, %xmm8, %xmm5
vaddps %xmm5, %xmm3, %xmm3
vaddps %xmm3, %xmm10, %xmm5
vmulps %xmm7, %xmm2, %xmm3
vmovaps %xmm2, %xmm10
vaddps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm5, %xmm5
vmovaps 0x40(%r13,%rsi), %xmm4
vmovaps 0x40(%rbp,%rsi), %xmm3
vmovaps 0x40(%r11,%rsi), %xmm2
vmovaps %xmm2, 0x260(%rsp)
vmovaps %xmm5, 0x10(%r9,%rsi)
vmulps 0x70(%rsp), %xmm15, %xmm5
vmulps %xmm4, %xmm12, %xmm14
vmulps %xmm1, %xmm0, %xmm15
vaddps %xmm5, %xmm15, %xmm5
vmulps %xmm3, %xmm9, %xmm15
vaddps %xmm15, %xmm14, %xmm14
vmovaps 0xf0(%rsp), %xmm15
vmulps %xmm15, %xmm13, %xmm0
vmulps %xmm11, %xmm8, %xmm2
vaddps %xmm2, %xmm0, %xmm0
vmovaps 0x150(%rsp), %xmm2
vmulps 0x50(%rsp), %xmm2, %xmm2
vmovaps 0x20(%rsp), %xmm13
vaddps %xmm2, %xmm13, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vmulps %xmm6, %xmm10, %xmm2
vaddps %xmm2, %xmm5, %xmm2
vaddps %xmm2, %xmm0, %xmm0
vmovaps 0x260(%rsp), %xmm8
vmulps %xmm7, %xmm8, %xmm2
vaddps %xmm2, %xmm14, %xmm2
vaddps %xmm2, %xmm0, %xmm0
vmulps 0x50(%r13,%rsi), %xmm12, %xmm2
vmulps 0x50(%rbp,%rsi), %xmm9, %xmm5
vaddps %xmm5, %xmm2, %xmm2
vmulps 0x50(%r11,%rsi), %xmm7, %xmm5
vaddps %xmm5, %xmm2, %xmm2
vmovaps %xmm0, 0x20(%r9,%rsi)
vmulps 0x70(%rsp), %xmm4, %xmm0
vmulps %xmm1, %xmm3, %xmm3
vaddps %xmm3, %xmm0, %xmm0
vmulps 0x160(%rsp), %xmm15, %xmm3
vmulps %xmm11, %xmm10, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vmovaps 0xe0(%rsp), %xmm1
vmulps 0x50(%rsp), %xmm1, %xmm4
vmovaps %xmm13, 0x20(%rsp)
vaddps %xmm4, %xmm13, %xmm4
vaddps %xmm3, %xmm4, %xmm3
vmulps %xmm6, %xmm8, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vaddps %xmm0, %xmm3, %xmm0
vaddps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, 0x30(%r9,%rsi)
addl $0x4, %r12d
addq $0x40, %rsi
addq $0x40, %r15
addq $0x40, %rbx
addq $0x40, %rdx
leal 0x3(%r12), %r14d
cmpl %eax, %r14d
jl 0x2a6129
jmp 0x2a6534
vmovaps 0x10(%r13,%rsi), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovaps 0x20(%r13,%rsi), %xmm4
vmovaps 0x10(%rbp,%rsi), %xmm5
vmovaps 0x20(%rbp,%rsi), %xmm3
vmovaps %xmm3, 0x160(%rsp)
vmovaps 0x10(%r11,%rsi), %xmm2
vmovaps 0x50(%rsp), %xmm1
vmulps (%r13,%rsi), %xmm1, %xmm10
vmovaps 0x20(%r11,%rsi), %xmm1
vmovaps %xmm1, 0x150(%rsp)
vmovaps 0x20(%rsp), %xmm14
vaddps %xmm14, %xmm10, %xmm10
vmovaps 0x70(%rsp), %xmm13
vmulps %xmm0, %xmm13, %xmm11
vmovaps 0x110(%rsp), %xmm6
vmulps %xmm6, %xmm4, %xmm12
vaddps %xmm12, %xmm11, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vmovaps 0xf0(%rsp), %xmm7
vmulps (%rbp,%rsi), %xmm7, %xmm11
vmovaps 0x130(%rsp), %xmm8
vmulps %xmm5, %xmm8, %xmm12
vaddps %xmm12, %xmm11, %xmm11
vmovaps 0x170(%rsp), %xmm9
vmulps %xmm3, %xmm9, %xmm12
vaddps %xmm12, %xmm11, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vmovaps 0x190(%rsp), %xmm15
vmulps (%r11,%rsi), %xmm15, %xmm11
vmovaps 0x270(%rsp), %xmm3
vmulps %xmm3, %xmm2, %xmm12
vaddps %xmm12, %xmm11, %xmm11
vmovaps 0x1f0(%rsp), %xmm0
vmulps %xmm0, %xmm1, %xmm12
vaddps %xmm12, %xmm11, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vmulps 0x30(%r13,%rsi), %xmm6, %xmm11
vmulps 0x30(%rbp,%rsi), %xmm9, %xmm12
vaddps %xmm12, %xmm11, %xmm11
vmulps 0x30(%r11,%rsi), %xmm0, %xmm12
vaddps %xmm12, %xmm11, %xmm11
vmovaps %xmm10, (%r9,%rsi)
vmulps %xmm4, %xmm13, %xmm4
vmulps %xmm7, %xmm5, %xmm5
vaddps %xmm5, %xmm4, %xmm4
vmovaps 0xe0(%rsp), %xmm0
vmulps 0x50(%rsp), %xmm0, %xmm0
vaddps %xmm0, %xmm14, %xmm0
vaddps %xmm4, %xmm0, %xmm0
vmulps 0x160(%rsp), %xmm8, %xmm1
vmulps %xmm2, %xmm15, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x150(%rsp), %xmm3, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vaddps %xmm0, %xmm11, %xmm0
vmovaps %xmm0, 0x10(%r9,%rsi)
addl $0x2, %r12d
addq $0x20, %rsi
addq $0x20, %r15
addq $0x20, %rbx
addq $0x20, %rdx
leal 0x1(%r12), %r14d
cmpl %eax, %r14d
jl 0x2a63c0
addq %rsi, %r9
movq 0x10(%rsp), %r14
cmpl %eax, %r12d
jge 0x2a660e
vmovaps 0x50(%rsp), %xmm0
vmulps -0x20(%r15), %xmm0, %xmm0
vaddps 0x20(%rsp), %xmm0, %xmm0
vmovaps 0x70(%rsp), %xmm1
vmulps -0x10(%r15), %xmm1, %xmm1
vmovaps 0x110(%rsp), %xmm2
vmulps (%r15), %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmovaps 0xf0(%rsp), %xmm1
vmulps -0x20(%rbx), %xmm1, %xmm1
vmovaps 0x130(%rsp), %xmm2
vmulps -0x10(%rbx), %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovaps 0x170(%rsp), %xmm2
vmulps (%rbx), %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovaps 0x190(%rsp), %xmm2
vmulps -0x20(%rdx), %xmm2, %xmm2
vmovaps 0x270(%rsp), %xmm3
vmulps -0x10(%rdx), %xmm3, %xmm3
vaddps %xmm1, %xmm0, %xmm0
vaddps %xmm3, %xmm2, %xmm1
vmovaps 0x1f0(%rsp), %xmm2
vmulps (%rdx), %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, (%r9)
addq $0x10, %r9
incl %r12d
addq $0x10, %r15
addq $0x10, %rbx
addq $0x10, %rdx
jmp 0x2a654a
incl %r10d
movq %r15, %r13
movq %rbx, %rbp
movq %rdx, %r11
jmp 0x2a5b47
incq %rdi
movq 0x18(%rsp), %r13
movq 0x2b0(%rsp), %rsi
jmp 0x2a5a39
cmpl $0x3, 0xd4(%rdi)
movl %eax, %ecx
jne 0x2a75fb
cmpl $0x3, 0xd8(%rdi)
movl %eax, %ecx
jne 0x2a75fb
cmpl $0x1, 0xdc(%rdi)
movq %r9, 0x50(%rsp)
jne 0x2a742a
cmpl $0x1, 0xe0(%rdi)
jne 0x2a742a
cmpl $0x1, 0xe4(%rdi)
jne 0x2a742a
cmpl $0x1, 0xe8(%rdi)
jne 0x2a742a
movslq 0xbc(%rsp), %rax
movl 0x2c(%r13), %edx
movl 0x30(%r13), %r8d
movl 0xc8(%rsp), %ecx
movq 0x28(%r14), %r10
imulq 0x10(%r13), %r11
movq 0x1b0(%rdi), %rsi
movslq %edx, %rdi
movq 0x90(%rsp), %rbx
movq %rbx, 0x1f0(%rsp)
movq 0xa0(%rsp), %rbx
imulq 0xd0(%rsp), %rbx
movq %rbx, 0x270(%rsp)
leaq (%rax,%rax), %rbx
movq %rbx, 0x190(%rsp)
leaq (%rax,%rax,2), %rbx
movq %rbx, 0xe0(%rsp)
movq %rax, 0x130(%rsp)
addl $0x2, %eax
movslq %eax, %rbp
xorl %eax, %eax
testl %ecx, %ecx
cmovlel %eax, %ecx
movq %rcx, 0xf0(%rsp)
shlq $0x2, %rbp
movq %rdi, 0x10(%rsp)
leaq (,%rdi,4), %rcx
movq %rcx, 0x20(%rsp)
movq %rsi, 0x170(%rsp)
movq %r11, 0x110(%rsp)
cmpq 0xf0(%rsp), %rax
je 0x2a9725
testq %rsi, %rsi
je 0x2a6753
vmovss (%rsi,%rax,4), %xmm0
jmp 0x2a6757
vxorps %xmm0, %xmm0, %xmm0
movq %r11, %rsi
imulq %rax, %rsi
addq %r9, %rsi
imulq $0x24, %rax, %r13
movq 0x270(%rsp), %r15
movq %rax, 0x70(%rsp)
imulq %rax, %r15
addq 0x1f0(%rsp), %r15
movq 0x130(%rsp), %rax
leaq (%r15,%rax,4), %rax
movq 0x190(%rsp), %rcx
leaq (%r15,%rcx,4), %r9
movq 0xe0(%rsp), %rcx
leaq (%r15,%rcx,4), %r11
xorl %edi, %edi
movq %rsi, %rbx
movl %edi, %ecx
orl $0x1, %ecx
cmpl %r8d, %ecx
jge 0x2a68d4
movq 0x10(%rsp), %rcx
leaq (%rsi,%rcx,4), %rsi
xorl %r12d, %r12d
movl %edx, %ecx
testl %ecx, %ecx
jle 0x2a68a9
vmovsd 0x4(%r15,%r12), %xmm1
vmovss (%r15,%r12), %xmm2
vmovlhps %xmm1, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm1[0]
vshufps $0xd8, %xmm1, %xmm2, %xmm2 # xmm2 = xmm2[0,2],xmm1[1,3]
vmovss (%rax,%r12), %xmm1
vinsertps $0x30, %xmm1, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm1[0]
vmovsd 0x4(%rax,%r12), %xmm3
vmovss (%r9,%r12), %xmm4
vinsertps $0x20, %xmm4, %xmm3, %xmm5 # xmm5 = xmm3[0,1],xmm4[0],xmm3[3]
vinsertps $0x30, 0x4(%r9,%r12), %xmm5, %xmm5 # xmm5 = xmm5[0,1,2],mem[0]
vmovups 0x10(%r10,%r13), %xmm6
vmovss 0x20(%r10,%r13), %xmm7
vmulss 0x8(%r9,%r12), %xmm7, %xmm8
vmulps %xmm5, %xmm6, %xmm5
vhaddps %xmm5, %xmm5, %xmm5
vmovups (%r10,%r13), %xmm9
vhaddps %xmm5, %xmm5, %xmm5
vmulps %xmm2, %xmm9, %xmm2
vhaddps %xmm2, %xmm2, %xmm2
vhaddps %xmm2, %xmm2, %xmm2
vaddss %xmm2, %xmm8, %xmm2
vmovlhps %xmm3, %xmm9, %xmm8 # xmm8 = xmm9[0],xmm3[0]
vshufps $0xd8, %xmm3, %xmm8, %xmm3 # xmm3 = xmm8[0,2],xmm3[1,3]
vinsertps $0x30, %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0,1,2],xmm4[0]
vmovsd 0x4(%r9,%r12), %xmm4
vmovhps (%r11,%r12), %xmm4, %xmm4 # xmm4 = xmm4[0,1],mem[0,1]
vmulss 0x8(%r11,%r12), %xmm7, %xmm7
vmulps %xmm6, %xmm4, %xmm4
vhaddps %xmm4, %xmm4, %xmm4
vmovss %xmm1, %xmm9, %xmm1 # xmm1 = xmm1[0],xmm9[1,2,3]
vhaddps %xmm4, %xmm4, %xmm4
vmulps %xmm1, %xmm3, %xmm1
vhaddps %xmm1, %xmm1, %xmm1
vaddss %xmm0, %xmm2, %xmm2
vhaddps %xmm1, %xmm1, %xmm1
vaddss %xmm2, %xmm5, %xmm2
vaddss %xmm7, %xmm4, %xmm3
vaddss %xmm0, %xmm1, %xmm1
vaddss %xmm1, %xmm3, %xmm1
vmovss %xmm2, (%rbx,%r12)
vmovss %xmm1, (%rsi,%r12)
decl %ecx
addq $0x4, %r12
jmp 0x2a67c3
addq %rbp, %r15
addq %r12, %r15
addq %rbp, %rax
addq %r12, %rax
addq %rbp, %r9
addq %r12, %r9
addq %rbp, %r11
addq %r12, %r11
addq 0x20(%rsp), %rbx
addq %r12, %rbx
addl $0x2, %edi
addq %r12, %rsi
jmp 0x2a67a7
movq 0x110(%rsp), %r11
cmpl %r8d, %edi
jge 0x2a6979
xorl %ecx, %ecx
movl %edx, %esi
testl %esi, %esi
jle 0x2a695a
vmovsd (%r15,%rcx), %xmm1
vinsertps $0x20, 0x8(%r15,%rcx), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x30, (%rax,%rcx), %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],mem[0]
vmovsd 0x4(%rax,%rcx), %xmm2
vinsertf128 $0x1, %xmm2, %ymm1, %ymm2
vmovsd (%r9,%rcx), %xmm3
vinsertf128 $0x1, %xmm3, %ymm1, %ymm1
vshufpd $0x2, %ymm1, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[2]
vmulps (%r10,%r13), %ymm1, %ymm1
vmovss 0x20(%r10,%r13), %xmm2
vmulss 0x8(%r9,%rcx), %xmm2, %xmm2
vextractf128 $0x1, %ymm1, %xmm3
vhaddps %xmm1, %xmm3, %xmm1
vhaddps %xmm1, %xmm1, %xmm1
vhaddps %xmm1, %xmm1, %xmm1
vaddss %xmm0, %xmm2, %xmm2
vaddss %xmm1, %xmm2, %xmm1
vmovss %xmm1, (%rbx,%rcx)
decl %esi
addq $0x4, %rcx
jmp 0x2a68e9
addq %rcx, %r15
addq $0x8, %r15
addq %rcx, %rax
addq $0x8, %rax
addq %rcx, %r9
addq $0x8, %r9
incl %edi
addq %rcx, %rbx
jmp 0x2a68dc
movq 0x70(%rsp), %rax
incq %rax
movq 0x18(%rsp), %r13
movq 0x50(%rsp), %r9
movq 0x170(%rsp), %rsi
jmp 0x2a6739
cmpl $0x5, %r15d
jne 0x2a841c
cmpl $0x1, 0xdc(%rdi)
jne 0x2a80c5
cmpl $0x1, 0xe0(%rdi)
jne 0x2a80c5
cmpl $0x1, 0xe4(%rdi)
jne 0x2a80c5
cmpl $0x1, 0xe8(%rdi)
jne 0x2a80c5
movl 0x2c(%r13), %r8d
movl 0x30(%r13), %eax
movl %eax, 0x10(%rsp)
movl 0xbc(%rsp), %eax
movl 0xc8(%rsp), %ecx
movq 0x1b0(%rdi), %rdx
leal 0x10(,%rax,4), %eax
movslq %eax, %rdi
leal (,%r8,4), %eax
cltq
xorl %esi, %esi
testl %r8d, %r8d
cmovlel %esi, %r8d
testl %ecx, %ecx
cmovlel %esi, %ecx
movq %rcx, 0xf0(%rsp)
shlq $0x2, %rax
movq %rax, 0x50(%rsp)
shlq $0x2, %rdi
movq %rdx, 0x110(%rsp)
movq %r8, 0x20(%rsp)
cmpq 0xf0(%rsp), %rsi
je 0x2a9725
testq %rdx, %rdx
je 0x2a6a5c
movq %rsi, %rax
shlq $0x4, %rax
vmovups (%rdx,%rax), %xmm0
jmp 0x2a6a60
vxorps %xmm0, %xmm0, %xmm0
movq 0x40(%r13), %r10
imulq %rsi, %r10
movq 0x10(%r13), %rax
imulq %rax, %r10
addq (%r13), %r10
movslq 0x2c(%r13), %r9
movq 0x28(%r14), %r11
movslq 0x54(%r14), %rbx
imulq %rsi, %rbx
imulq 0x38(%r14), %rbx
imulq %rax, %r9
addq %r10, %r9
movslq 0xbc(%rsp), %rax
movq 0xd0(%rsp), %r15
movq %rsi, 0x70(%rsp)
imulq %rsi, %r15
movq 0xa0(%rsp), %rcx
imulq %rcx, %r15
addq 0x90(%rsp), %r15
imulq %rcx, %rax
leaq (%r15,%rax), %r12
leaq (%r15,%rax,2), %r13
leaq (%rax,%rax,2), %rbp
addq %r15, %rbp
leaq (%r15,%rax,4), %rdx
leaq (%rax,%rax,4), %rax
addq %r15, %rax
xorl %esi, %esi
movl %esi, %ecx
orl $0x1, %ecx
cmpl 0x10(%rsp), %ecx
jge 0x2a70a2
movl %r8d, %ecx
xorl %r8d, %r8d
subl $0x1, %ecx
jb 0x2a6e36
vmovaps (%r11,%rbx), %xmm1
vmovaps 0x10(%r11,%rbx), %xmm2
vmovaps 0x20(%r11,%rbx), %xmm3
vmovaps 0x30(%r11,%rbx), %xmm4
vmulps (%r15,%r8), %xmm1, %xmm5
vmovaps 0x40(%r11,%rbx), %xmm6
vaddps %xmm0, %xmm5, %xmm5
vmulps 0x10(%r15,%r8), %xmm2, %xmm7
vmulps 0x20(%r15,%r8), %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vaddps %xmm7, %xmm5, %xmm5
vmulps 0x30(%r15,%r8), %xmm4, %xmm7
vmulps 0x40(%r15,%r8), %xmm6, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmovaps (%r12,%r8), %xmm8
vmovaps 0x10(%r12,%r8), %xmm9
vmovaps 0x20(%r12,%r8), %xmm10
vmovaps 0x30(%r12,%r8), %xmm11
vmovaps 0x40(%r12,%r8), %xmm12
vmulps %xmm1, %xmm8, %xmm1
vaddps %xmm0, %xmm1, %xmm1
vmulps %xmm2, %xmm9, %xmm2
vmulps %xmm3, %xmm10, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps %xmm4, %xmm11, %xmm2
vmulps %xmm6, %xmm12, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmovaps 0x50(%r11,%rbx), %xmm3
vmovaps 0x60(%r11,%rbx), %xmm4
vmovaps 0x70(%r11,%rbx), %xmm6
vmovaps 0x80(%r11,%rbx), %xmm13
vmovaps 0x90(%r11,%rbx), %xmm14
vmulps %xmm3, %xmm8, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vaddps %xmm7, %xmm5, %xmm5
vmulps %xmm4, %xmm9, %xmm7
vmulps %xmm6, %xmm10, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmulps %xmm11, %xmm13, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmulps %xmm12, %xmm14, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vaddps %xmm7, %xmm5, %xmm5
vmovaps (%r13,%r8), %xmm7
vmovaps 0x10(%r13,%r8), %xmm8
vmovaps 0x20(%r13,%r8), %xmm9
vmovaps 0x30(%r13,%r8), %xmm10
vmovaps 0x40(%r13,%r8), %xmm11
vmulps %xmm3, %xmm7, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps %xmm4, %xmm8, %xmm2
vmulps %xmm6, %xmm9, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps %xmm13, %xmm10, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps %xmm14, %xmm11, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm2
vmovaps 0xa0(%r11,%rbx), %xmm3
vmovaps 0xb0(%r11,%rbx), %xmm4
vmovaps 0xc0(%r11,%rbx), %xmm6
vmovaps 0xd0(%r11,%rbx), %xmm12
vmovaps 0xe0(%r11,%rbx), %xmm13
vmulps %xmm7, %xmm3, %xmm1
vmulps %xmm4, %xmm8, %xmm7
vaddps %xmm7, %xmm1, %xmm1
vmulps %xmm6, %xmm9, %xmm7
vaddps %xmm7, %xmm1, %xmm1
vmulps %xmm10, %xmm12, %xmm7
vaddps %xmm7, %xmm1, %xmm1
vmulps %xmm11, %xmm13, %xmm7
vaddps %xmm7, %xmm1, %xmm1
vaddps %xmm1, %xmm5, %xmm1
vmovaps (%rbp,%r8), %xmm5
vmovaps 0x10(%rbp,%r8), %xmm7
vmovaps 0x20(%rbp,%r8), %xmm8
vmovaps 0x30(%rbp,%r8), %xmm9
vmovaps 0x40(%rbp,%r8), %xmm10
vmulps %xmm3, %xmm5, %xmm3
vmulps %xmm4, %xmm7, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vmulps %xmm6, %xmm8, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vmulps %xmm12, %xmm9, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vmulps %xmm13, %xmm10, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmovaps 0xf0(%r11,%rbx), %xmm3
vmovaps 0x100(%r11,%rbx), %xmm4
vmovaps 0x110(%r11,%rbx), %xmm6
vmovaps 0x120(%r11,%rbx), %xmm11
vmovaps 0x130(%r11,%rbx), %xmm12
vmulps %xmm5, %xmm3, %xmm5
vmulps %xmm7, %xmm4, %xmm7
vaddps %xmm7, %xmm5, %xmm5
vmulps %xmm6, %xmm8, %xmm7
vaddps %xmm7, %xmm5, %xmm5
vmulps %xmm9, %xmm11, %xmm7
vaddps %xmm7, %xmm5, %xmm5
vmulps %xmm10, %xmm12, %xmm7
vaddps %xmm7, %xmm5, %xmm5
vmovaps (%rdx,%r8), %xmm7
vmovaps 0x10(%rdx,%r8), %xmm8
vmovaps 0x20(%rdx,%r8), %xmm9
vmovaps 0x30(%rdx,%r8), %xmm10
vmovaps 0x40(%rdx,%r8), %xmm13
vmulps %xmm3, %xmm7, %xmm3
vmulps %xmm4, %xmm8, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vmulps %xmm6, %xmm9, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vmulps %xmm11, %xmm10, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vmulps %xmm12, %xmm13, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vmovaps 0x140(%r11,%rbx), %xmm4
vmovaps 0x150(%r11,%rbx), %xmm6
vmovaps 0x160(%r11,%rbx), %xmm11
vmovaps 0x170(%r11,%rbx), %xmm12
vmovaps 0x180(%r11,%rbx), %xmm14
vmulps %xmm7, %xmm4, %xmm7
vaddps %xmm7, %xmm5, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vmulps %xmm6, %xmm8, %xmm5
vmulps %xmm9, %xmm11, %xmm7
vaddps %xmm7, %xmm5, %xmm5
vmulps %xmm10, %xmm12, %xmm7
vaddps %xmm7, %xmm5, %xmm5
vmulps %xmm13, %xmm14, %xmm7
vaddps %xmm7, %xmm5, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vmulps (%rax,%r8), %xmm4, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vmulps 0x10(%rax,%r8), %xmm6, %xmm4
vmulps 0x20(%rax,%r8), %xmm11, %xmm5
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm5, %xmm4, %xmm3
vmulps 0x30(%rax,%r8), %xmm12, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vmulps 0x40(%rax,%r8), %xmm14, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmovaps %xmm1, (%r10,%r8)
vmovaps %xmm2, (%r9,%r8)
addq $0x10, %r8
jmp 0x2a6af2
addq %rdi, %r15
addq %r8, %r15
addq %rdi, %r12
addq %r8, %r12
addq %rdi, %r13
addq %r8, %r13
addq %rdi, %rbp
addq %r8, %rbp
addq %rdi, %rdx
addq %r8, %rdx
addq %rdi, %rax
addq %r8, %rax
movq 0x50(%rsp), %rcx
addq %rcx, %r10
addq %r8, %r10
addq %rcx, %r9
addq %r8, %r9
addl $0x2, %esi
movq 0x20(%rsp), %r8
jmp 0x2a6add
movl %r8d, %ecx
xorl %eax, %eax
subl $0x1, %ecx
jb 0x2a707c
vmovaps (%r11,%rbx), %xmm1
vmovaps 0x10(%r11,%rbx), %xmm2
vmovaps 0x20(%r11,%rbx), %xmm3
vmovaps 0x30(%r11,%rbx), %xmm4
vmulps (%r15,%rax), %xmm1, %xmm1
vmovaps 0x40(%r11,%rbx), %xmm5
vaddps %xmm0, %xmm1, %xmm1
vmulps 0x10(%r15,%rax), %xmm2, %xmm2
vmulps 0x20(%r15,%rax), %xmm3, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x30(%r15,%rax), %xmm4, %xmm2
vmulps 0x40(%r15,%rax), %xmm5, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmovaps 0x50(%r11,%rbx), %xmm3
vmovaps 0x60(%r11,%rbx), %xmm4
vmovaps 0x70(%r11,%rbx), %xmm5
vmovaps 0x80(%r11,%rbx), %xmm6
vmovaps 0x90(%r11,%rbx), %xmm7
vmulps (%r12,%rax), %xmm3, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x10(%r12,%rax), %xmm4, %xmm2
vmulps 0x20(%r12,%rax), %xmm5, %xmm3
vmulps 0x30(%r12,%rax), %xmm6, %xmm4
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vmulps 0x40(%r12,%rax), %xmm7, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovaps 0xa0(%r11,%rbx), %xmm2
vmovaps 0xb0(%r11,%rbx), %xmm3
vmovaps 0xc0(%r11,%rbx), %xmm4
vmovaps 0xd0(%r11,%rbx), %xmm5
vmulps (%r13,%rax), %xmm2, %xmm2
vmovaps 0xe0(%r11,%rbx), %xmm6
vmulps 0x10(%r13,%rax), %xmm3, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps 0x20(%r13,%rax), %xmm4, %xmm3
vmulps 0x30(%r13,%rax), %xmm5, %xmm4
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vmulps 0x40(%r13,%rax), %xmm6, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovaps 0xf0(%r11,%rbx), %xmm2
vmovaps 0x100(%r11,%rbx), %xmm3
vmovaps 0x110(%r11,%rbx), %xmm4
vmovaps 0x120(%r11,%rbx), %xmm5
vmulps (%rbp,%rax), %xmm2, %xmm2
vmovaps 0x130(%r11,%rbx), %xmm6
vmulps 0x10(%rbp,%rax), %xmm3, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps 0x20(%rbp,%rax), %xmm4, %xmm3
vmulps 0x30(%rbp,%rax), %xmm5, %xmm4
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vmulps 0x40(%rbp,%rax), %xmm6, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmovaps 0x140(%r11,%rbx), %xmm3
vmovaps 0x150(%r11,%rbx), %xmm4
vmovaps 0x160(%r11,%rbx), %xmm5
vmovaps 0x170(%r11,%rbx), %xmm6
vmovaps 0x180(%r11,%rbx), %xmm7
vmulps (%rdx,%rax), %xmm3, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x10(%rdx,%rax), %xmm4, %xmm2
vmulps 0x20(%rdx,%rax), %xmm5, %xmm3
vmulps 0x30(%rdx,%rax), %xmm6, %xmm4
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vmulps 0x40(%rdx,%rax), %xmm7, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovaps %xmm1, (%r10,%rax)
addq $0x10, %rax
jmp 0x2a6e7d
addq %rax, %r15
addq $0x40, %r15
leaq 0x40(%r12,%rax), %r12
addq %rax, %r13
addq $0x40, %r13
addq %rax, %rbp
addq $0x40, %rbp
addq %rax, %rdx
addq $0x40, %rdx
incl %esi
addq %rax, %r10
cmpl 0x10(%rsp), %esi
jl 0x2a6e78
movq 0x70(%rsp), %rsi
incq %rsi
movq 0x18(%rsp), %r13
movq 0x110(%rsp), %rdx
jmp 0x2a6a3b
cmpl $0x5, %r15d
jne 0x2a93b2
cmpl $0x1, 0xdc(%rdi)
jne 0x2a9040
cmpl $0x1, 0xe0(%rdi)
jne 0x2a9040
cmpl $0x1, 0xe4(%rdi)
jne 0x2a9040
cmpl $0x1, 0xe8(%rdi)
jne 0x2a9040
movl 0x2c(%r13), %esi
movl 0x30(%r13), %ecx
movl 0xc8(%rsp), %eax
xorl %edx, %edx
testl %esi, %esi
cmovlel %edx, %esi
movq 0x1b0(%rdi), %rdi
testl %ecx, %ecx
cmovlel %edx, %ecx
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0x20(%rsp)
movl $0x80, %r8d
movq %rdi, 0x50(%rsp)
cmpq 0x20(%rsp), %rdx
je 0x2a9725
testq %rdi, %rdi
je 0x2a7159
movq %rdx, %rax
shlq $0x5, %rax
vmovups (%rdi,%rax), %ymm0
jmp 0x2a715d
vxorps %xmm0, %xmm0, %xmm0
movq 0x40(%r13), %r9
imulq %rdx, %r9
imulq 0x10(%r13), %r9
addq (%r13), %r9
movq 0x28(%r14), %r10
movslq 0x54(%r14), %r11
imulq %rdx, %r11
imulq 0x38(%r14), %r11
movq 0xd0(%rsp), %rbx
movq %rdx, 0x10(%rsp)
imulq %rdx, %rbx
movq 0xa0(%rsp), %rax
imulq %rax, %rbx
addq 0x90(%rsp), %rbx
movslq 0xbc(%rsp), %rdx
imulq %rax, %rdx
leaq (%rbx,%rdx), %r15
leaq (%rbx,%rdx,2), %r12
leaq (%rdx,%rdx,2), %r13
addq %rbx, %r13
leaq (%rbx,%rdx,4), %rbp
xorl %edx, %edx
cmpl %ecx, %edx
je 0x2a7413
movl %esi, %eax
xorl %edi, %edi
subl $0x1, %eax
jb 0x2a73eb
vmovaps (%r10,%r11), %ymm1
vmovaps 0x20(%r10,%r11), %ymm2
vmovaps 0x40(%r10,%r11), %ymm3
vmovaps 0x60(%r10,%r11), %ymm4
vmulps (%rbx,%rdi), %ymm1, %ymm1
vmovaps 0x80(%r10,%r11), %ymm5
vaddps %ymm0, %ymm1, %ymm1
vmulps 0x20(%rbx,%rdi), %ymm2, %ymm2
vmulps 0x40(%rbx,%rdi), %ymm3, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmulps 0x60(%rbx,%rdi), %ymm4, %ymm2
vmulps 0x80(%rbx,%rdi), %ymm5, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmovaps 0xa0(%r10,%r11), %ymm3
vmovaps 0xc0(%r10,%r11), %ymm4
vmovaps 0xe0(%r10,%r11), %ymm5
vmovaps 0x100(%r10,%r11), %ymm6
vmovaps 0x120(%r10,%r11), %ymm7
vmulps (%r15,%rdi), %ymm3, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmulps 0x20(%r15,%rdi), %ymm4, %ymm2
vmulps 0x40(%r15,%rdi), %ymm5, %ymm3
vmulps 0x60(%r15,%rdi), %ymm6, %ymm4
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm4, %ymm2, %ymm2
vmulps 0x80(%r15,%rdi), %ymm7, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmovaps 0x140(%r10,%r11), %ymm2
vmovaps 0x160(%r10,%r11), %ymm3
vmovaps 0x180(%r10,%r11), %ymm4
vmovaps 0x1a0(%r10,%r11), %ymm5
vmulps (%r12,%rdi), %ymm2, %ymm2
vmovaps 0x1c0(%r10,%r11), %ymm6
vmulps 0x20(%r12,%rdi), %ymm3, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmulps 0x40(%r12,%rdi), %ymm4, %ymm3
vmulps 0x60(%r12,%rdi), %ymm5, %ymm4
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm4, %ymm2, %ymm2
vmulps 0x80(%r12,%rdi), %ymm6, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmovaps 0x1e0(%r10,%r11), %ymm2
vmovaps 0x200(%r10,%r11), %ymm3
vmovaps 0x220(%r10,%r11), %ymm4
vmovaps 0x240(%r10,%r11), %ymm5
vmulps (%r13,%rdi), %ymm2, %ymm2
vmovaps 0x260(%r10,%r11), %ymm6
vmulps 0x20(%r13,%rdi), %ymm3, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmulps 0x40(%r13,%rdi), %ymm4, %ymm3
vmulps 0x60(%r13,%rdi), %ymm5, %ymm4
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm4, %ymm2, %ymm2
vmulps 0x80(%r13,%rdi), %ymm6, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmovaps 0x280(%r10,%r11), %ymm3
vmovaps 0x2a0(%r10,%r11), %ymm4
vmovaps 0x2c0(%r10,%r11), %ymm5
vmovaps 0x2e0(%r10,%r11), %ymm6
vmovaps 0x300(%r10,%r11), %ymm7
vmulps (%rbp,%rdi), %ymm3, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmulps 0x20(%rbp,%rdi), %ymm4, %ymm2
vmulps 0x40(%rbp,%rdi), %ymm5, %ymm3
vmulps 0x60(%rbp,%rdi), %ymm6, %ymm4
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm4, %ymm2, %ymm2
vmulps 0x80(%rbp,%rdi), %ymm7, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmovaps %ymm1, (%r9,%rdi)
addq $0x20, %rdi
jmp 0x2a71d1
addq %rdi, %rbx
addq %r8, %rbx
addq %rdi, %r15
addq %r8, %r15
addq %rdi, %r12
addq %r8, %r12
addq %rdi, %r13
addq %r8, %r13
addq %rdi, %rbp
addq %r8, %rbp
incl %edx
addq %rdi, %r9
jmp 0x2a71c5
movq 0x10(%rsp), %rdx
incq %rdx
movq 0x18(%rsp), %r13
movq 0x50(%rsp), %rdi
jmp 0x2a713b
cmpl $0x1, 0xdc(%rdi)
movl %eax, %ecx
jne 0x2a75fb
cmpl $0x1, 0xe0(%rdi)
movl %eax, %ecx
jne 0x2a75fb
cmpl $0x2, 0xe4(%rdi)
movl %eax, %ecx
jne 0x2a75fb
cmpl $0x2, 0xe8(%rdi)
movl %eax, %ecx
jne 0x2a75fb
movslq 0xbc(%rsp), %rdx
movl 0x2c(%r13), %eax
movl 0x30(%r13), %r8d
movl 0xc8(%rsp), %esi
movl %edx, %ecx
subl %eax, %ecx
addl %ecx, %ecx
movq 0x28(%r14), %r10
movq 0x1b0(%rdi), %rdi
imulq 0x10(%r13), %r11
movq 0x90(%rsp), %rbx
movq %rbx, 0xf0(%rsp)
movq 0xa0(%rsp), %rbx
imulq 0xd0(%rsp), %rbx
movq %rbx, 0x110(%rsp)
movq %rdx, 0x20(%rsp)
addq %rdx, %rdx
movq %rdx, 0x130(%rsp)
movslq %ecx, %r15
xorl %r12d, %r12d
testl %r8d, %r8d
cmovlel %r12d, %r8d
testl %esi, %esi
cmovlel %r12d, %esi
movq %rsi, 0x10(%rsp)
shlq $0x2, %r15
movq %rdi, 0x70(%rsp)
cmpq 0x10(%rsp), %r12
je 0x2a9725
testq %rdi, %rdi
je 0x2a7505
vmovss (%rdi,%r12,4), %xmm0
jmp 0x2a7509
vxorps %xmm0, %xmm0, %xmm0
movq %r11, %rdi
movq %r11, %r13
imulq %r12, %r13
addq %r9, %r13
imulq $0x24, %r12, %rbp
movq 0x110(%rsp), %r11
imulq %r12, %r11
addq 0xf0(%rsp), %r11
movq 0x20(%rsp), %rcx
leaq (%r11,%rcx,4), %rsi
movq 0x130(%rsp), %rcx
leaq (%r11,%rcx,4), %rbx
xorl %r9d, %r9d
cmpl %r8d, %r9d
je 0x2a75e1
xorl %edx, %edx
movl %eax, %ecx
testl %ecx, %ecx
jle 0x2a75c7
vmovsd (%r11,%rdx), %xmm1
vinsertps $0x20, 0x8(%r11,%rdx), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x30, (%rsi,%rdx), %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],mem[0]
vmovsd 0x4(%rsi,%rdx), %xmm2
vinsertf128 $0x1, %xmm2, %ymm1, %ymm2
vmovsd (%rbx,%rdx), %xmm3
vinsertf128 $0x1, %xmm3, %ymm1, %ymm1
vshufpd $0x2, %ymm1, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[2]
vmulps (%r10,%rbp), %ymm1, %ymm1
vmovss 0x20(%r10,%rbp), %xmm2
vmulss 0x8(%rbx,%rdx), %xmm2, %xmm2
vextractf128 $0x1, %ymm1, %xmm3
vhaddps %xmm1, %xmm3, %xmm1
vaddss %xmm0, %xmm2, %xmm2
vhaddps %xmm1, %xmm1, %xmm1
vhaddps %xmm1, %xmm1, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vmovss %xmm1, (%r13)
addq $0x4, %r13
decl %ecx
addq $0x8, %rdx
jmp 0x2a7553
addq %r15, %r11
addq %rdx, %r11
addq %r15, %rsi
addq %rdx, %rsi
addq %r15, %rbx
addq %rdx, %rbx
incl %r9d
jmp 0x2a7546
incq %r12
movq 0x18(%rsp), %r13
movq 0x50(%rsp), %r9
movq %rdi, %r11
movq 0x70(%rsp), %rdi
jmp 0x2a74ed
cltd
idivl %esi
movl %eax, %edi
movl %ecx, %eax
cltd
idivl %esi
movl %eax, %r8d
movq 0x48(%rsp), %rax
cmpb $0x1, 0x27(%rax)
jne 0x2a7641
xorl %eax, %eax
testb $0x3, %dil
sete %al
testb $0x7, %dil
leal 0x1(%rax,%rax,2), %eax
cmovel %ebp, %eax
movl %eax, 0x20(%rsp)
xorl %eax, %eax
testb $0x3, %r8b
sete %al
testb $0x7, %r8b
leal 0x1(%rax,%rax,2), %eax
cmovel %ebp, %eax
jmp 0x2a7648
pushq $0x1
popq %rax
movl %eax, 0x20(%rsp)
movl %eax, 0x10(%rsp)
movq 0x70(%rsp), %r15
movq 0x98(%rsp), %rax
vmovaps 0x90(%rsp), %xmm0
vmovaps %xmm0, 0x210(%rsp)
movq 0xa0(%rsp), %rcx
movq %rcx, 0x220(%rsp)
movl 0xa8(%rsp), %ecx
movl %ecx, 0x228(%rsp)
movq 0xb0(%rsp), %rcx
movq %rcx, 0x230(%rsp)
vmovups 0xb8(%rsp), %xmm0
vmovups %xmm0, 0x238(%rsp)
movl 0xc8(%rsp), %ecx
movl %ecx, 0x248(%rsp)
movq 0xd0(%rsp), %rcx
movq %rcx, 0x250(%rsp)
testq %rax, %rax
je 0x2a76d1
lock
incl (%rax)
cmpl 0x20(%rsp), %r10d
jle 0x2a7725
movl %r8d, %ebx
movl %edi, %ebp
movq 0x48(%rsp), %rax
vmovups (%rax), %ymm0
vmovups 0x20(%rax), %ymm1
leaq 0x1a0(%rsp), %rcx
vmovups %ymm1, 0x20(%rcx)
vmovups %ymm0, (%rcx)
movq 0x10(%rax), %rax
movq %rax, 0x8(%rcx)
leaq 0x90(%rsp), %rdi
leaq 0x210(%rsp), %rsi
movl 0x20(%rsp), %edx
vzeroupper
callq 0x64e3b
movl %ebp, %edi
movl %ebx, %r8d
movq 0x18(%rsp), %rdx
movq 0x8(%rdx), %rax
vmovups (%rdx), %xmm0
vmovaps %xmm0, 0x1a0(%rsp)
movq 0x10(%rdx), %rcx
movq %rcx, 0x1b0(%rsp)
movl 0x18(%rdx), %ecx
movl %ecx, 0x1b8(%rsp)
movq 0x20(%rdx), %rcx
movq %rcx, 0x1c0(%rsp)
vmovups 0x28(%rdx), %xmm0
vmovups %xmm0, 0x1c8(%rsp)
movl 0x38(%rdx), %ecx
movl %ecx, 0x1d8(%rsp)
movq 0x40(%rdx), %rcx
movq %rcx, 0x1e0(%rsp)
testq %rax, %rax
je 0x2a7789
lock
incl (%rax)
cmpl %r15d, 0x10(%rsp)
jae 0x2a7821
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r14,%rax), %eax
cltd
movl 0x10(%rsp), %r9d
idivl %r9d
movl %eax, %ecx
movq 0x170(%rsp), %rax
xorl %edx, %edx
divq %r15
movl %edi, %ebp
movl %r8d, %ebx
movl %r9d, %r8d
imulq %rax, %r8
movq 0x48(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
leaq 0x1a0(%rsp), %rdi
movl 0x110(%rsp), %esi
movl 0x130(%rsp), %edx
callq 0x628f2
pushq $-0x64
popq %r12
cmpq $0x0, 0x1a0(%rsp)
je 0x2a9662
movl %ebx, %r8d
movl %ebp, %edi
movslq 0x1d8(%rsp), %rax
imulq 0x1e0(%rsp), %rax
testq %rax, %rax
je 0x2a9662
xorl %r15d, %r15d
xorl %r12d, %r12d
xorl %ebp, %ebp
movq (%r14), %rax
movq -0x18(%rax), %rax
movslq 0x108(%r14,%rax), %rax
cmpq %rax, %rbp
jge 0x2a7a8f
movl %r15d, %eax
cltd
movl 0x20(%rsp), %esi
idivl %esi
movl %eax, %ecx
movl %edi, %r13d
movl %edi, %eax
cltd
idivl %esi
movslq %ecx, %rdx
imulq 0x250(%rsp), %rdx
movq 0x220(%rsp), %rcx
imulq %rcx, %rdx
addq 0x210(%rsp), %rdx
movl 0x228(%rsp), %esi
movq 0x230(%rsp), %rdi
movq %rdx, 0x308(%rsp)
andq $0x0, 0x310(%rsp)
movq %rcx, 0x318(%rsp)
movl %esi, 0x320(%rsp)
movq %rdi, 0x328(%rsp)
movl %eax, 0x340(%rsp)
vmovups 0x238(%rsp), %xmm0
movslq 0x244(%rsp), %rax
movslq 0x23c(%rsp), %rdx
movslq 0x240(%rsp), %rsi
imulq %rdx, %rsi
imulq %rcx, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x348(%rsp)
vmovups %xmm0, 0x330(%rsp)
movl %r12d, %eax
cltd
movl 0x10(%rsp), %esi
idivl %esi
movl %eax, %ecx
movl %r8d, %ebx
movl %r8d, %eax
cltd
idivl %esi
movslq %ecx, %rdx
imulq 0x1e0(%rsp), %rdx
movq 0x1b0(%rsp), %rsi
imulq %rsi, %rdx
addq 0x1a0(%rsp), %rdx
movl 0x1b8(%rsp), %edi
movq 0x1c0(%rsp), %rcx
movq %rdx, 0x2c0(%rsp)
andq $0x0, 0x2c8(%rsp)
movq %rsi, 0x2d0(%rsp)
movl %edi, 0x2d8(%rsp)
movq %rcx, 0x2e0(%rsp)
movl %eax, 0x2f8(%rsp)
vmovups 0x1c8(%rsp), %xmm0
movslq 0x1d4(%rsp), %rax
movslq 0x1cc(%rsp), %rdx
movslq 0x1d0(%rsp), %rdi
imulq %rdx, %rdi
imulq %rsi, %rax
imulq %rdi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rsi
movq %rax, 0x300(%rsp)
vmovups %xmm0, 0x2e8(%rsp)
movq 0x10(%r14), %rax
movq (%rax,%rbp,8), %rdi
movq 0x48(%rsp), %rax
vmovups (%rax), %ymm0
vmovups 0x20(%rax), %ymm1
vmovups %ymm0, 0x370(%rsp)
vmovups %ymm1, 0x390(%rsp)
movq %rcx, 0x378(%rsp)
movq (%rdi), %rax
leaq 0x308(%rsp), %rsi
leaq 0x2c0(%rsp), %rdx
leaq 0x370(%rsp), %rcx
vzeroupper
callq *0x38(%rax)
movq 0x2c8(%rsp), %rax
testq %rax, %rax
je 0x2a7a44
lock
decl (%rax)
jne 0x2a7a44
movq 0x2c0(%rsp), %rsi
movq 0x2e0(%rsp), %rdi
testq %rdi, %rdi
je 0x2a7a3c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a7a44
movq %rsi, %rdi
callq 0x5f3e0
movq 0x310(%rsp), %rax
testq %rax, %rax
je 0x2a7a7b
lock
decl (%rax)
jne 0x2a7a7b
movq 0x308(%rsp), %rsi
movq 0x328(%rsp), %rdi
testq %rdi, %rdi
je 0x2a7a73
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a7a7b
movq %rsi, %rdi
callq 0x5f3e0
incq %rbp
movl %ebx, %r8d
addl %ebx, %r12d
movl %r13d, %edi
addl %r13d, %r15d
jmp 0x2a7829
movq 0x70(%rsp), %rdx
cmpl %edx, 0x10(%rsp)
jae 0x2a7ab9
xorl %r12d, %r12d
leaq 0x1a0(%rsp), %rdi
movq 0x18(%rsp), %rsi
movq 0x48(%rsp), %rcx
callq 0x64e3b
jmp 0x2a9662
xorl %r12d, %r12d
leaq 0x1a0(%rsp), %rax
movq 0x18(%rsp), %rcx
cmpq %rcx, %rax
je 0x2a9662
movq 0x1a8(%rsp), %rax
testq %rax, %rax
je 0x2a7ae2
lock
incl (%rax)
movq 0x8(%rcx), %rax
testq %rax, %rax
je 0x2a960a
lock
decl (%rax)
jne 0x2a960a
movq 0x18(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x2a9602
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a960a
pushq $0x3
popq %r15
cmpl $0x1, 0xdc(%rdi)
jne 0x2a841c
cmpl $0x1, 0xe0(%rdi)
jne 0x2a841c
cmpl $0x2, 0xe4(%rdi)
jne 0x2a841c
cmpl $0x2, 0xe8(%rdi)
jne 0x2a841c
movl 0x2c(%r13), %eax
movl 0x30(%r13), %ecx
movl 0xbc(%rsp), %r8d
movl 0xc8(%rsp), %edx
subl %eax, %r8d
shll $0x3, %r8d
movq 0x1b0(%rdi), %rsi
movslq %r8d, %rdi
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
testl %edx, %edx
cmovlel %r8d, %edx
shlq $0x2, %rdi
cmpq %rdx, %r8
je 0x2a9725
testq %rsi, %rsi
je 0x2a7ba8
movq %r8, %r9
shlq $0x4, %r9
vmovups (%rsi,%r9), %xmm15
jmp 0x2a7bad
vxorps %xmm15, %xmm15, %xmm15
movq 0x40(%r13), %r9
imulq %r8, %r9
imulq 0x10(%r13), %r9
addq (%r13), %r9
movq %r14, %r10
movq 0x28(%r14), %r14
movslq 0x54(%r10), %r15
imulq %r8, %r15
imulq 0x38(%r10), %r15
movslq 0xbc(%rsp), %rbx
movq 0xd0(%rsp), %r10
imulq %r8, %r10
movq 0xa0(%rsp), %r11
imulq %r11, %r10
addq 0x90(%rsp), %r10
imulq %r11, %rbx
leaq (%r10,%rbx), %r11
leaq (%r10,%rbx,2), %rbx
vmovaps (%r14,%r15), %xmm3
vmovaps 0x10(%r14,%r15), %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovaps 0x20(%r14,%r15), %xmm0
vmovaps %xmm0, 0x170(%rsp)
vmovaps 0x30(%r14,%r15), %xmm6
vmovaps 0x40(%r14,%r15), %xmm7
vmovaps 0x50(%r14,%r15), %xmm14
vmovaps 0x60(%r14,%r15), %xmm0
vmovaps %xmm0, 0x50(%rsp)
vmovaps 0x70(%r14,%r15), %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps 0x80(%r14,%r15), %xmm0
vmovaps %xmm0, 0x130(%rsp)
xorl %ebp, %ebp
vmovaps %xmm14, 0x1f0(%rsp)
vmovaps %xmm3, 0x70(%rsp)
vmovaps %xmm15, 0x110(%rsp)
cmpl %ecx, %ebp
je 0x2a80b8
xorl %r15d, %r15d
xorl %r12d, %r12d
leal 0x3(%r12), %r14d
cmpl %eax, %r14d
jge 0x2a7ed4
vmulps (%r10,%r15), %xmm3, %xmm10
vaddps %xmm15, %xmm10, %xmm11
vmovaps 0x20(%rsp), %xmm0
vmulps 0x10(%r10,%r15), %xmm0, %xmm12
vmovaps 0x20(%r10,%r15), %xmm14
vmovaps 0x40(%r10,%r15), %xmm10
vmovaps 0x170(%rsp), %xmm8
vmulps %xmm8, %xmm14, %xmm13
vaddps %xmm13, %xmm12, %xmm12
vaddps %xmm12, %xmm11, %xmm12
vmulps (%r11,%r15), %xmm6, %xmm11
vmulps 0x10(%r11,%r15), %xmm7, %xmm13
vaddps %xmm13, %xmm11, %xmm13
vmovaps 0x20(%r11,%r15), %xmm15
vmovaps 0x40(%r11,%r15), %xmm11
vmovaps 0x1f0(%rsp), %xmm9
vmulps %xmm9, %xmm15, %xmm0
vaddps %xmm0, %xmm13, %xmm0
vaddps %xmm0, %xmm12, %xmm0
vmovaps 0x50(%rsp), %xmm5
vmulps (%rbx,%r15), %xmm5, %xmm12
vmovaps 0xf0(%rsp), %xmm4
vmulps 0x10(%rbx,%r15), %xmm4, %xmm13
vaddps %xmm13, %xmm12, %xmm12
vmovaps 0x20(%rbx,%r15), %xmm1
vmovaps 0x40(%rbx,%r15), %xmm13
vmovaps 0x130(%rsp), %xmm3
vmulps %xmm3, %xmm1, %xmm2
vaddps %xmm2, %xmm12, %xmm2
vaddps %xmm2, %xmm0, %xmm0
vmovaps 0x20(%rsp), %xmm2
vmulps 0x30(%r10,%r15), %xmm2, %xmm2
vmulps 0x30(%r11,%r15), %xmm7, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmulps 0x30(%rbx,%r15), %xmm4, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmovaps %xmm0, (%r9)
vmulps 0x70(%rsp), %xmm14, %xmm0
vaddps 0x110(%rsp), %xmm0, %xmm0
vmulps %xmm8, %xmm10, %xmm12
vmulps %xmm6, %xmm15, %xmm14
vmulps %xmm9, %xmm11, %xmm15
vaddps %xmm15, %xmm12, %xmm12
vmulps %xmm5, %xmm1, %xmm1
vaddps %xmm1, %xmm14, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vaddps %xmm2, %xmm0, %xmm0
vmulps %xmm3, %xmm13, %xmm1
vaddps %xmm1, %xmm12, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmovaps 0x60(%r10,%r15), %xmm12
vmovaps 0x60(%r11,%r15), %xmm14
vmovaps 0x20(%rsp), %xmm1
vmulps 0x50(%r10,%r15), %xmm1, %xmm1
vmulps 0x50(%r11,%r15), %xmm7, %xmm2
vmovaps 0x60(%rbx,%r15), %xmm15
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x50(%rbx,%r15), %xmm4, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovaps %xmm0, 0x10(%r9)
vmulps %xmm8, %xmm12, %xmm0
vmulps %xmm9, %xmm14, %xmm2
vaddps %xmm2, %xmm0, %xmm0
vmulps 0x70(%rsp), %xmm10, %xmm2
vaddps 0x110(%rsp), %xmm2, %xmm2
vmulps %xmm6, %xmm11, %xmm10
vmulps %xmm5, %xmm13, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vaddps %xmm2, %xmm10, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vmulps %xmm3, %xmm15, %xmm2
vaddps %xmm2, %xmm0, %xmm0
vmovaps 0x20(%rsp), %xmm2
vmulps 0x70(%r10,%r15), %xmm2, %xmm2
vmulps 0x80(%r10,%r15), %xmm8, %xmm10
vmulps 0x70(%r11,%r15), %xmm7, %xmm11
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm2, %xmm11, %xmm1
vmulps 0x80(%r11,%r15), %xmm9, %xmm2
vaddps %xmm2, %xmm10, %xmm2
vmulps 0x70(%rbx,%r15), %xmm4, %xmm10
vaddps %xmm1, %xmm10, %xmm1
vmulps 0x80(%rbx,%r15), %xmm3, %xmm10
vaddps %xmm2, %xmm10, %xmm2
vmovaps %xmm0, 0x20(%r9)
vmulps %xmm6, %xmm14, %xmm0
vmulps %xmm5, %xmm15, %xmm10
vmovaps 0x110(%rsp), %xmm15
vmovaps 0x70(%rsp), %xmm3
vaddps %xmm0, %xmm10, %xmm0
vmulps %xmm3, %xmm12, %xmm10
vaddps %xmm15, %xmm10, %xmm10
vaddps %xmm0, %xmm10, %xmm0
vaddps %xmm1, %xmm0, %xmm0
vaddps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, 0x30(%r9)
addq $0x40, %r9
addl $0x4, %r12d
subq $-0x80, %r15
jmp 0x2a7c96
vmovaps 0x1f0(%rsp), %xmm14
leal 0x1(%r12), %r14d
cmpl %eax, %r14d
jge 0x2a8096
vmovaps 0x20(%r10,%r15), %xmm10
vmovaps 0x20(%r11,%r15), %xmm11
vmulps (%r10,%r15), %xmm3, %xmm0
vmovaps 0x20(%rbx,%r15), %xmm12
vaddps %xmm0, %xmm15, %xmm0
vmovaps 0x20(%rsp), %xmm13
vmulps 0x10(%r10,%r15), %xmm13, %xmm1
vmovaps 0x170(%rsp), %xmm5
vmulps %xmm5, %xmm10, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps (%r11,%r15), %xmm6, %xmm2
vaddps %xmm1, %xmm0, %xmm0
vmulps 0x10(%r11,%r15), %xmm7, %xmm1
vaddps %xmm1, %xmm2, %xmm1
vmulps %xmm14, %xmm11, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovaps 0x50(%rsp), %xmm9
vmulps (%rbx,%r15), %xmm9, %xmm2
vaddps %xmm1, %xmm0, %xmm0
vmovaps 0xf0(%rsp), %xmm8
vmulps 0x10(%rbx,%r15), %xmm8, %xmm1
vaddps %xmm1, %xmm2, %xmm1
vmovaps 0x130(%rsp), %xmm4
vmulps %xmm4, %xmm12, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x30(%r10,%r15), %xmm13, %xmm2
vmulps 0x40(%r10,%r15), %xmm5, %xmm13
vmovaps %xmm14, %xmm5
vmulps 0x30(%r11,%r15), %xmm7, %xmm14
vaddps %xmm1, %xmm0, %xmm0
vaddps %xmm2, %xmm14, %xmm1
vmovaps %xmm5, %xmm14
vmulps 0x40(%r11,%r15), %xmm5, %xmm2
vaddps %xmm2, %xmm13, %xmm2
vmulps 0x30(%rbx,%r15), %xmm8, %xmm13
vaddps %xmm1, %xmm13, %xmm1
vmulps 0x40(%rbx,%r15), %xmm4, %xmm13
vaddps %xmm2, %xmm13, %xmm2
vmovaps %xmm0, (%r9)
vmulps %xmm6, %xmm11, %xmm0
vmulps %xmm9, %xmm12, %xmm11
vaddps %xmm0, %xmm11, %xmm0
vmulps %xmm3, %xmm10, %xmm10
vaddps %xmm15, %xmm10, %xmm10
vaddps %xmm0, %xmm10, %xmm0
vaddps %xmm1, %xmm0, %xmm0
vaddps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%r9)
addq $0x20, %r9
addl $0x2, %r12d
addq $0x40, %r15
jmp 0x2a7edd
vmulps (%r10,%r15), %xmm3, %xmm0
vaddps %xmm0, %xmm15, %xmm0
vmovaps 0x20(%rsp), %xmm1
vmulps 0x10(%r10,%r15), %xmm1, %xmm1
vmovaps 0x170(%rsp), %xmm2
vmulps 0x20(%r10,%r15), %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps (%r11,%r15), %xmm6, %xmm1
vmulps 0x10(%r11,%r15), %xmm7, %xmm2
vmulps 0x20(%r11,%r15), %xmm14, %xmm10
vaddps %xmm2, %xmm1, %xmm1
vaddps %xmm1, %xmm10, %xmm1
vmovaps 0x50(%rsp), %xmm2
vmulps (%rbx,%r15), %xmm2, %xmm2
vaddps %xmm1, %xmm0, %xmm0
vmovaps 0xf0(%rsp), %xmm1
vmulps 0x10(%rbx,%r15), %xmm1, %xmm1
vaddps %xmm1, %xmm2, %xmm1
vmovaps 0x130(%rsp), %xmm2
vmulps 0x20(%rbx,%r15), %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, (%r9)
addq $0x10, %r9
incl %r12d
addq $0x20, %r15
cmpl %eax, %r12d
jl 0x2a7fff
addq %rdi, %r10
addq %r15, %r10
addq %rdi, %r11
addq %r15, %r11
addq %rdi, %rbx
addq %r15, %rbx
incl %ebp
jmp 0x2a7c88
incq %r8
movq 0x10(%rsp), %r14
jmp 0x2a7b8b
pushq $0x5
popq %r15
cmpl $0x1, 0xdc(%rdi)
jne 0x2a841c
cmpl $0x1, 0xe0(%rdi)
jne 0x2a841c
cmpl $0x2, 0xe4(%rdi)
jne 0x2a841c
cmpl $0x2, 0xe8(%rdi)
jne 0x2a841c
movl 0x2c(%r13), %edx
movl 0x30(%r13), %ecx
movl 0xbc(%rsp), %eax
movl 0xc8(%rsp), %esi
subl %edx, %eax
shll $0x3, %eax
movq 0x1b0(%rdi), %r9
xorl %r15d, %r15d
testl %edx, %edx
cmovlel %r15d, %edx
movl %edx, %edi
movslq %eax, %r8
testl %ecx, %ecx
cmovlel %r15d, %ecx
testl %esi, %esi
cmovlel %r15d, %esi
movq %rsi, 0x20(%rsp)
shlq $0x2, %r8
movq %r9, 0x50(%rsp)
cmpq 0x20(%rsp), %r15
je 0x2a9725
testq %r9, %r9
je 0x2a8166
movq %r15, %rax
shlq $0x4, %rax
vmovups (%r9,%rax), %xmm0
jmp 0x2a816a
vxorps %xmm0, %xmm0, %xmm0
movq 0x40(%r13), %r9
imulq %r15, %r9
imulq 0x10(%r13), %r9
addq (%r13), %r9
movq 0x28(%r14), %r10
movslq 0x54(%r14), %r11
imulq %r15, %r11
imulq 0x38(%r14), %r11
movq 0xd0(%rsp), %rbx
movq %r15, 0x10(%rsp)
imulq %r15, %rbx
movq 0xa0(%rsp), %rax
imulq %rax, %rbx
addq 0x90(%rsp), %rbx
movslq 0xbc(%rsp), %rdx
imulq %rax, %rdx
leaq (%rbx,%rdx), %r15
leaq (%rbx,%rdx,2), %r12
leaq (%rdx,%rdx,2), %r13
addq %rbx, %r13
leaq (%rbx,%rdx,4), %rbp
xorl %edx, %edx
cmpl %ecx, %edx
je 0x2a8405
movl %edi, %eax
xorl %esi, %esi
subl $0x1, %eax
jb 0x2a83e0
vmovaps (%r10,%r11), %xmm1
vmovaps 0x10(%r10,%r11), %xmm2
vmovaps 0x20(%r10,%r11), %xmm3
vmovaps 0x30(%r10,%r11), %xmm4
vmovaps 0x40(%r10,%r11), %xmm5
vmulps (%rbx,%rsi), %xmm1, %xmm1
vmulps 0x10(%rbx,%rsi), %xmm2, %xmm2
vmulps 0x20(%rbx,%rsi), %xmm3, %xmm3
vaddps %xmm0, %xmm1, %xmm1
vaddps %xmm3, %xmm2, %xmm2
vmulps 0x30(%rbx,%rsi), %xmm4, %xmm3
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x40(%rbx,%rsi), %xmm5, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vmovaps 0x50(%r10,%r11), %xmm3
vmovaps 0x60(%r10,%r11), %xmm4
vmovaps 0x70(%r10,%r11), %xmm5
vmovaps 0x80(%r10,%r11), %xmm6
vmovaps 0x90(%r10,%r11), %xmm7
vmulps (%r15,%rsi), %xmm3, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps 0x10(%r15,%rsi), %xmm4, %xmm3
vmulps 0x20(%r15,%rsi), %xmm5, %xmm4
vaddps %xmm2, %xmm1, %xmm1
vaddps %xmm4, %xmm3, %xmm2
vmulps 0x30(%r15,%rsi), %xmm6, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps 0x40(%r15,%rsi), %xmm7, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovaps 0xa0(%r10,%r11), %xmm2
vmovaps 0xb0(%r10,%r11), %xmm3
vmovaps 0xc0(%r10,%r11), %xmm4
vmovaps 0xd0(%r10,%r11), %xmm5
vmovaps 0xe0(%r10,%r11), %xmm6
vmulps (%r12,%rsi), %xmm2, %xmm2
vmulps 0x10(%r12,%rsi), %xmm3, %xmm3
vmulps 0x20(%r12,%rsi), %xmm4, %xmm4
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vmulps 0x30(%r12,%rsi), %xmm5, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps 0x40(%r12,%rsi), %xmm6, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovaps 0xf0(%r10,%r11), %xmm2
vmovaps 0x100(%r10,%r11), %xmm3
vmovaps 0x110(%r10,%r11), %xmm4
vmovaps 0x120(%r10,%r11), %xmm5
vmovaps 0x130(%r10,%r11), %xmm6
vmulps (%r13,%rsi), %xmm2, %xmm2
vmulps 0x10(%r13,%rsi), %xmm3, %xmm3
vmulps 0x20(%r13,%rsi), %xmm4, %xmm4
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vmulps 0x30(%r13,%rsi), %xmm5, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps 0x40(%r13,%rsi), %xmm6, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmovaps 0x140(%r10,%r11), %xmm3
vmovaps 0x150(%r10,%r11), %xmm4
vmovaps 0x160(%r10,%r11), %xmm5
vmovaps 0x170(%r10,%r11), %xmm6
vmovaps 0x180(%r10,%r11), %xmm7
vmulps (%rbp,%rsi), %xmm3, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps 0x10(%rbp,%rsi), %xmm4, %xmm3
vmulps 0x20(%rbp,%rsi), %xmm5, %xmm4
vaddps %xmm2, %xmm1, %xmm1
vaddps %xmm4, %xmm3, %xmm2
vmulps 0x30(%rbp,%rsi), %xmm6, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps 0x40(%rbp,%rsi), %xmm7, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovaps %xmm1, (%r9)
addq $0x10, %r9
addq $0x20, %rsi
jmp 0x2a81de
addq %r8, %rbx
addq %rsi, %rbx
addq %r8, %r15
addq %rsi, %r15
addq %r8, %r12
addq %rsi, %r12
addq %r8, %r13
addq %rsi, %r13
addq %r8, %rbp
addq %rsi, %rbp
incl %edx
jmp 0x2a81d2
movq 0x10(%rsp), %r15
incq %r15
movq 0x18(%rsp), %r13
movq 0x50(%rsp), %r9
jmp 0x2a8147
imull %eax, %r15d
movslq %r15d, %rsi
leaq 0x210(%rsp), %rdi
leaq 0x1a0(%rsp), %rdx
callq 0x73bbe
movq (%r14), %rcx
movq -0x18(%rcx), %rdx
movl 0x20(%rsp), %r11d
imull 0xe0(%r14,%rdx), %r11d
movq 0x210(%rsp), %rax
movl 0xdc(%r14,%rdx), %esi
imull 0xd4(%r14,%rdx), %esi
subl %esi, %r11d
xorl %esi, %esi
xorl %edi, %edi
xorl %r8d, %r8d
cmpl 0xd8(%r14,%rdx), %r8d
jge 0x2a84ae
movslq %esi, %rsi
leaq (%rax,%rsi,4), %r10
xorl %r9d, %r9d
cmpl 0xd4(%r14,%rdx), %r9d
jge 0x2a84a3
movl %edi, (%r10,%r9,4)
movq -0x18(%rcx), %rdx
addl 0xdc(%r14,%rdx), %edi
incq %r9
jmp 0x2a8484
addl %r11d, %edi
incl %r8d
addq %r9, %rsi
jmp 0x2a8470
leal (,%r15,4), %ecx
movl %ecx, 0x170(%rsp)
movl 0x110(%rsp), %ecx
shll $0x2, %ecx
xorl %edx, %edx
testl %r15d, %r15d
cmovlel %edx, %r15d
movq 0xf0(%rsp), %r12
testl %r12d, %r12d
cmovlel %edx, %r12d
movq %r12, 0xf0(%rsp)
movslq %ecx, %rcx
movq %rcx, 0x70(%rsp)
shlq $0x2, %r15
leaq 0x1501c6(%rip), %rdi # 0x3f86c0
vxorps %xmm0, %xmm0, %xmm0
vbroadcastss 0x145b0d(%rip), %xmm5 # 0x3ee014
vbroadcastss 0x146778(%rip), %xmm6 # 0x3eec88
xorl %ecx, %ecx
vbroadcastss 0x148ca5(%rip), %xmm9 # 0x3f11c0
vbroadcastss 0x148ca8(%rip), %xmm10 # 0x3f11cc
vbroadcastss 0x148ca3(%rip), %xmm11 # 0x3f11d0
vbroadcastss 0x148c9e(%rip), %xmm12 # 0x3f11d4
vbroadcastss 0x148c99(%rip), %xmm13 # 0x3f11d8
cmpq 0xf0(%rsp), %rcx
je 0x2a95ea
movq %rdx, 0x110(%rsp)
movslq %edx, %r9
movq 0x40(%r13), %r10
imulq %rcx, %r10
imulq 0x10(%r13), %r10
shlq $0x2, %r9
addq (%r13), %r10
movslq 0xbc(%rsp), %r11
movq 0xd0(%rsp), %r13
imulq %rcx, %r13
movq %rcx, %rdx
movq 0xa0(%rsp), %rcx
imulq %rcx, %r13
addq 0x90(%rsp), %r13
imulq %rcx, %r11
movq %rdx, 0x130(%rsp)
shlq $0x4, %rdx
movq %rdx, 0x20(%rsp)
addq 0x28(%r14), %r9
xorl %ecx, %ecx
cmpl 0x50(%rsp), %ecx
jg 0x2a8a9b
movq (%r14), %rdx
xorl %r8d, %r8d
cmpq %rbx, %r8
jg 0x2a8a8b
movq -0x18(%rdx), %r12
cmpl $0x0, 0x100(%r14,%r12)
je 0x2a85ef
movq 0x1b0(%r14,%r12), %rsi
movq 0x20(%rsp), %rbp
vmovups (%rsi,%rbp), %xmm1
jmp 0x2a85f3
vxorps %xmm1, %xmm1, %xmm1
movslq 0xe8(%r14,%r12), %rsi
movq %r14, %rbp
movslq %ecx, %r14
imulq %rsi, %r14
imulq %r11, %r14
addq %r13, %r14
movl 0xe4(%rbp,%r12), %esi
imull %r8d, %esi
shll $0x2, %esi
movslq %esi, %rsi
leaq (%r14,%rsi,4), %rsi
xorl %r14d, %r14d
cmpq %r14, %r15
je 0x2a8647
movslq (%rax,%r14), %rbp
shlq $0x4, %rbp
vmovups (%r9,%r14,4), %xmm2
vmulps (%rsi,%rbp), %xmm2, %xmm2
vaddps %xmm1, %xmm2, %xmm1
addq $0x4, %r14
jmp 0x2a8625
movq 0x10(%rsp), %r14
movl 0x110(%r14,%r12), %esi
decl %esi
cmpl $0x5, %esi
ja 0x2a8a72
movslq (%rdi,%rsi,4), %rsi
addq %rdi, %rsi
vmaxps %xmm0, %xmm1, %xmm7
jmpq *%rsi
movq 0x118(%r14,%r12), %rsi
vminps %xmm0, %xmm1, %xmm1
vbroadcastss (%rsi), %xmm15
vmulps %xmm1, %xmm15, %xmm1
vaddps %xmm7, %xmm1, %xmm7
jmp 0x2a8a76
vbroadcastss 0x148b25(%rip), %xmm4 # 0x3f11b8
vminps %xmm4, %xmm1, %xmm7
vbroadcastss 0x148b1c(%rip), %xmm8 # 0x3f11bc
vmaxps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm9, %xmm14
vaddps %xmm5, %xmm14, %xmm14
vcvttps2dq %xmm14, %xmm15
vcvtdq2ps %xmm15, %xmm15
vcmpltps %xmm15, %xmm14, %xmm14
vandps %xmm6, %xmm14, %xmm14
vsubps %xmm14, %xmm15, %xmm14
vbroadcastss 0x149252(%rip), %xmm4 # 0x3f1920
vmulps %xmm4, %xmm14, %xmm15
vsubps %xmm15, %xmm7, %xmm7
vmulps %xmm7, %xmm7, %xmm15
vmulps %xmm7, %xmm10, %xmm2
vaddps %xmm2, %xmm11, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm2, %xmm13, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vmovaps %xmm5, %xmm0
vbroadcastss 0x148ad8(%rip), %xmm5 # 0x3f11dc
vaddps %xmm5, %xmm2, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm0, %xmm2, %xmm2
vmulps %xmm2, %xmm15, %xmm2
vaddps %xmm6, %xmm7, %xmm7
vaddps %xmm2, %xmm7, %xmm2
vcvttps2dq %xmm14, %xmm7
vpslld $0x17, %xmm7, %xmm7
vpaddd %xmm6, %xmm7, %xmm7
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm6, %xmm2, %xmm7
vbroadcastss 0x148aa5(%rip), %xmm2 # 0x3f11e0
vmaxps %xmm2, %xmm7, %xmm2
vpsrld $0x17, %xmm2, %xmm14
vbroadcastss 0x148a9b(%rip), %xmm3 # 0x3f11e8
vpaddd %xmm3, %xmm14, %xmm14
vbroadcastss 0x148a8a(%rip), %xmm3 # 0x3f11e4
vandps %xmm3, %xmm2, %xmm2
vorps %xmm0, %xmm2, %xmm2
vcvtdq2ps %xmm14, %xmm14
vbroadcastss 0x148a7c(%rip), %xmm3 # 0x3f11ec
vcmpltps %xmm3, %xmm2, %xmm15
vandps %xmm2, %xmm15, %xmm3
vbroadcastss 0x148a6e(%rip), %xmm8 # 0x3f11f0
vaddps %xmm2, %xmm8, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vandps %xmm6, %xmm15, %xmm3
vsubps %xmm3, %xmm14, %xmm3
vmulps %xmm2, %xmm2, %xmm14
vbroadcastss 0x148a55(%rip), %xmm15 # 0x3f11f4
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x148a4c(%rip), %xmm5 # 0x3f11f8
vaddps %xmm5, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x148a3f(%rip), %xmm5 # 0x3f11fc
vaddps %xmm5, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x148a32(%rip), %xmm5 # 0x3f1200
vaddps %xmm5, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x148a25(%rip), %xmm5 # 0x3f1204
vaddps %xmm5, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x148a18(%rip), %xmm5 # 0x3f1208
vaddps %xmm5, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x148a0b(%rip), %xmm5 # 0x3f120c
vaddps %xmm5, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1489fe(%rip), %xmm5 # 0x3f1210
vaddps %xmm5, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1489f1(%rip), %xmm5 # 0x3f1214
vaddps %xmm5, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1490f0(%rip), %xmm5 # 0x3f1924
vaddps %xmm5, %xmm15, %xmm15
vmulps %xmm15, %xmm14, %xmm14
vcmpleps 0x14583a(%rip), %xmm7, %xmm7 # 0x3ee080
vmulps %xmm4, %xmm3, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm14, %xmm2
vbroadcastss 0x149ebd(%rip), %xmm3 # 0x3f2718
vmulps %xmm3, %xmm2, %xmm2
vbroadcastss 0x149098(%rip), %xmm3 # 0x3f1900
vblendvps %xmm7, %xmm3, %xmm2, %xmm2
vbroadcastss 0x148941(%rip), %xmm3 # 0x3f11b8
vminps %xmm3, %xmm2, %xmm2
vbroadcastss 0x148938(%rip), %xmm3 # 0x3f11bc
vmaxps %xmm3, %xmm2, %xmm2
vmulps %xmm2, %xmm9, %xmm3
vaddps %xmm0, %xmm3, %xmm3
vcvttps2dq %xmm3, %xmm7
vcvtdq2ps %xmm7, %xmm7
vcmpltps %xmm7, %xmm3, %xmm3
vandps %xmm6, %xmm3, %xmm3
vsubps %xmm3, %xmm7, %xmm3
vmulps %xmm4, %xmm3, %xmm7
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm2, %xmm7
vmulps %xmm2, %xmm10, %xmm14
vaddps %xmm11, %xmm14, %xmm14
vmulps %xmm2, %xmm14, %xmm14
vaddps %xmm12, %xmm14, %xmm14
vmulps %xmm2, %xmm14, %xmm14
vaddps %xmm13, %xmm14, %xmm14
vmulps %xmm2, %xmm14, %xmm14
vbroadcastss 0x148903(%rip), %xmm4 # 0x3f11dc
vaddps %xmm4, %xmm14, %xmm14
vmovaps %xmm0, %xmm5
vxorps %xmm0, %xmm0, %xmm0
vmulps %xmm2, %xmm14, %xmm14
vaddps %xmm5, %xmm14, %xmm14
vmulps %xmm7, %xmm14, %xmm7
vaddps %xmm6, %xmm2, %xmm2
vaddps %xmm7, %xmm2, %xmm2
vcvttps2dq %xmm3, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm6, %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vaddps %xmm6, %xmm2, %xmm2
vrcpps %xmm2, %xmm3
vaddps %xmm3, %xmm3, %xmm7
vmulps %xmm7, %xmm2, %xmm2
vbroadcastss 0x149de5(%rip), %xmm4 # 0x3f2708
vsubps %xmm2, %xmm4, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm7, %xmm8, %xmm3
vaddps %xmm2, %xmm3, %xmm2
jmp 0x2a8a6c
movq 0x118(%r14,%r12), %rsi
vbroadcastss (%rsi), %xmm7
vbroadcastss 0x4(%rsi), %xmm15
vmaxps %xmm7, %xmm1, %xmm1
vminps %xmm1, %xmm15, %xmm7
jmp 0x2a8a76
vbroadcastss 0x148853(%rip), %xmm2 # 0x3f11b4
vxorps %xmm2, %xmm1, %xmm1
vbroadcastss 0x14884a(%rip), %xmm2 # 0x3f11b8
vminps %xmm2, %xmm1, %xmm1
vbroadcastss 0x148841(%rip), %xmm2 # 0x3f11bc
vmaxps %xmm2, %xmm1, %xmm1
vbroadcastss 0x148838(%rip), %xmm2 # 0x3f11c0
vmulps %xmm2, %xmm1, %xmm7
vaddps %xmm5, %xmm7, %xmm7
vcvttps2dq %xmm7, %xmm15
vcvtdq2ps %xmm15, %xmm15
vcmpltps %xmm15, %xmm7, %xmm7
vandps %xmm6, %xmm7, %xmm7
vsubps %xmm7, %xmm15, %xmm7
vbroadcastss 0x14a030(%rip), %xmm2 # 0x3f29e0
vmulps %xmm2, %xmm7, %xmm15
vaddps %xmm1, %xmm15, %xmm1
vmulps %xmm1, %xmm1, %xmm15
vbroadcastss 0x148807(%rip), %xmm2 # 0x3f11cc
vmulps %xmm2, %xmm1, %xmm14
vbroadcastss 0x1487fe(%rip), %xmm2 # 0x3f11d0
vaddps %xmm2, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vbroadcastss 0x1487f1(%rip), %xmm2 # 0x3f11d4
vaddps %xmm2, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vbroadcastss 0x1487e4(%rip), %xmm2 # 0x3f11d8
vaddps %xmm2, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vbroadcastss 0x1487d7(%rip), %xmm2 # 0x3f11dc
vaddps %xmm2, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vaddps %xmm5, %xmm14, %xmm14
vmulps %xmm14, %xmm15, %xmm14
vaddps %xmm6, %xmm1, %xmm1
vaddps %xmm1, %xmm14, %xmm1
vcvttps2dq %xmm7, %xmm7
vpslld $0x17, %xmm7, %xmm7
vpaddd %xmm6, %xmm7, %xmm7
vmulps %xmm7, %xmm1, %xmm1
vaddps %xmm6, %xmm1, %xmm1
vrcpps %xmm1, %xmm7
vmulps %xmm7, %xmm1, %xmm1
vsubps %xmm1, %xmm6, %xmm1
vmulps %xmm1, %xmm7, %xmm1
vaddps %xmm1, %xmm7, %xmm7
jmp 0x2a8a76
movq 0x118(%r14,%r12), %rsi
vbroadcastss (%rsi), %xmm2
vbroadcastss 0x4(%rsi), %xmm3
vmulps %xmm1, %xmm2, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vmaxps %xmm0, %xmm2, %xmm2
vminps %xmm6, %xmm2, %xmm2
vmulps %xmm1, %xmm2, %xmm7
jmp 0x2a8a76
vmovaps %xmm1, %xmm7
movq %r8, %rsi
shlq $0x4, %rsi
vmovups %xmm7, (%r10,%rsi)
incq %r8
jmp 0x2a85c3
movq 0x70(%rsp), %rdx
leaq (%r10,%rdx,4), %r10
incl %ecx
jmp 0x2a85b3
movq 0x130(%rsp), %rcx
incq %rcx
movq 0x110(%rsp), %rdx
addl 0x170(%rsp), %edx
movq 0x18(%rsp), %r13
jmp 0x2a853f
pushq $0x3
popq %r15
cmpl $0x1, 0xdc(%rdi)
jne 0x2a93b2
cmpl $0x1, 0xe0(%rdi)
jne 0x2a93b2
cmpl $0x2, 0xe4(%rdi)
jne 0x2a93b2
cmpl $0x2, 0xe8(%rdi)
jne 0x2a93b2
movl 0x2c(%r13), %eax
movl 0x30(%r13), %ecx
movl 0xbc(%rsp), %r8d
movl 0xc8(%rsp), %edx
subl %eax, %r8d
shll $0x4, %r8d
movq 0x1b0(%rdi), %rsi
movslq %r8d, %rdi
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
testl %edx, %edx
cmovlel %r8d, %edx
shlq $0x2, %rdi
cmpq %rdx, %r8
je 0x2a9725
testq %rsi, %rsi
je 0x2a8b4f
movq %r8, %r9
shlq $0x5, %r9
vmovups (%rsi,%r9), %ymm10
jmp 0x2a8b54
vxorps %xmm10, %xmm10, %xmm10
movq 0x40(%r13), %r9
imulq %r8, %r9
imulq 0x10(%r13), %r9
addq (%r13), %r9
movq %r14, %r10
movq 0x28(%r14), %r14
movslq 0x54(%r10), %r15
imulq %r8, %r15
imulq 0x38(%r10), %r15
movslq 0xbc(%rsp), %rbx
movq 0xd0(%rsp), %r10
imulq %r8, %r10
movq 0xa0(%rsp), %r11
imulq %r11, %r10
addq 0x90(%rsp), %r10
imulq %r11, %rbx
leaq (%r10,%rbx), %r11
leaq (%r10,%rbx,2), %rbx
vmovaps (%r14,%r15), %ymm11
vmovaps 0x20(%r14,%r15), %ymm12
vmovaps 0x40(%r14,%r15), %ymm13
vmovaps 0x60(%r14,%r15), %ymm14
vmovaps 0x80(%r14,%r15), %ymm15
vmovaps 0xa0(%r14,%r15), %ymm6
vmovaps 0xc0(%r14,%r15), %ymm7
vmovaps 0xe0(%r14,%r15), %ymm8
vmovaps 0x100(%r14,%r15), %ymm9
xorl %ebp, %ebp
vmovups %ymm10, 0x20(%rsp)
vmovups %ymm11, 0x50(%rsp)
vmovups %ymm12, 0x70(%rsp)
vmovups %ymm13, 0xf0(%rsp)
vmovups %ymm14, 0x110(%rsp)
vmovups %ymm15, 0x130(%rsp)
vmovups %ymm6, 0x170(%rsp)
cmpl %ecx, %ebp
je 0x2a9033
xorl %r15d, %r15d
xorl %r12d, %r12d
leal 0x3(%r12), %r14d
cmpl %eax, %r14d
jge 0x2a8e76
vmovaps 0x40(%r10,%r15), %ymm10
vmovaps 0x80(%r10,%r15), %ymm11
vmovaps 0xc0(%r10,%r15), %ymm12
vmovups 0x50(%rsp), %ymm0
vmulps (%r10,%r15), %ymm0, %ymm13
vmovups 0x20(%rsp), %ymm1
vaddps %ymm1, %ymm13, %ymm13
vmulps %ymm0, %ymm10, %ymm14
vaddps %ymm1, %ymm14, %ymm14
vmulps %ymm0, %ymm11, %ymm15
vaddps %ymm1, %ymm15, %ymm15
vmulps %ymm0, %ymm12, %ymm0
vaddps %ymm1, %ymm0, %ymm0
vmovups 0x70(%rsp), %ymm4
vmulps 0x20(%r10,%r15), %ymm4, %ymm1
vmulps 0x60(%r10,%r15), %ymm4, %ymm2
vmulps 0xa0(%r10,%r15), %ymm4, %ymm3
vmulps 0xe0(%r10,%r15), %ymm4, %ymm4
vmovups 0xf0(%rsp), %ymm5
vmulps %ymm5, %ymm10, %ymm10
vaddps %ymm1, %ymm10, %ymm1
vaddps %ymm1, %ymm13, %ymm1
vmulps %ymm5, %ymm11, %ymm10
vaddps %ymm2, %ymm10, %ymm2
vaddps %ymm2, %ymm14, %ymm2
vmulps %ymm5, %ymm12, %ymm10
vaddps %ymm3, %ymm10, %ymm3
vmulps 0x100(%r10,%r15), %ymm5, %ymm10
vaddps %ymm3, %ymm15, %ymm3
vaddps %ymm4, %ymm10, %ymm4
vaddps %ymm4, %ymm0, %ymm0
vmovaps 0x40(%r11,%r15), %ymm4
vmovaps 0x80(%r11,%r15), %ymm10
vmovaps 0xc0(%r11,%r15), %ymm11
vmovups 0x110(%rsp), %ymm5
vmulps (%r11,%r15), %ymm5, %ymm12
vmulps %ymm5, %ymm4, %ymm13
vmulps %ymm5, %ymm10, %ymm14
vmovups 0x130(%rsp), %ymm6
vmulps 0x20(%r11,%r15), %ymm6, %ymm15
vmulps %ymm5, %ymm11, %ymm5
vaddps %ymm15, %ymm12, %ymm12
vmulps 0x60(%r11,%r15), %ymm6, %ymm15
vaddps %ymm15, %ymm13, %ymm13
vmulps 0xa0(%r11,%r15), %ymm6, %ymm15
vaddps %ymm15, %ymm14, %ymm14
vmulps 0xe0(%r11,%r15), %ymm6, %ymm15
vaddps %ymm5, %ymm15, %ymm5
vmovups 0x170(%rsp), %ymm6
vmulps %ymm6, %ymm4, %ymm4
vaddps %ymm4, %ymm12, %ymm4
vaddps %ymm4, %ymm1, %ymm1
vmulps %ymm6, %ymm10, %ymm4
vaddps %ymm4, %ymm13, %ymm4
vaddps %ymm4, %ymm2, %ymm2
vmulps %ymm6, %ymm11, %ymm4
vaddps %ymm4, %ymm14, %ymm4
vaddps %ymm4, %ymm3, %ymm3
vmulps 0x100(%r11,%r15), %ymm6, %ymm4
vaddps %ymm4, %ymm5, %ymm4
vaddps %ymm4, %ymm0, %ymm0
vmovaps 0x40(%rbx,%r15), %ymm4
vmovaps 0x80(%rbx,%r15), %ymm5
vmulps (%rbx,%r15), %ymm7, %ymm10
vmovaps 0xc0(%rbx,%r15), %ymm11
vmulps %ymm7, %ymm4, %ymm12
vmulps 0x20(%rbx,%r15), %ymm8, %ymm13
vmulps %ymm7, %ymm5, %ymm14
vaddps %ymm13, %ymm10, %ymm10
vmulps 0x60(%rbx,%r15), %ymm8, %ymm13
vmulps %ymm7, %ymm11, %ymm15
vaddps %ymm13, %ymm12, %ymm12
vmulps 0xa0(%rbx,%r15), %ymm8, %ymm13
vaddps %ymm13, %ymm14, %ymm13
vmulps 0xe0(%rbx,%r15), %ymm8, %ymm14
vaddps %ymm14, %ymm15, %ymm14
vmulps %ymm4, %ymm9, %ymm4
vaddps %ymm4, %ymm10, %ymm4
vaddps %ymm4, %ymm1, %ymm1
vmulps %ymm5, %ymm9, %ymm4
vaddps %ymm4, %ymm12, %ymm4
vaddps %ymm4, %ymm2, %ymm2
vmulps %ymm9, %ymm11, %ymm4
vaddps %ymm4, %ymm13, %ymm4
vaddps %ymm4, %ymm3, %ymm3
vmulps 0x100(%rbx,%r15), %ymm9, %ymm4
vaddps %ymm4, %ymm14, %ymm4
vaddps %ymm4, %ymm0, %ymm0
vmovaps %ymm1, (%r9)
vmovaps %ymm2, 0x20(%r9)
vmovaps %ymm3, 0x40(%r9)
vmovaps %ymm0, 0x60(%r9)
subq $-0x80, %r9
addl $0x4, %r12d
addq $0x100, %r15 # imm = 0x100
jmp 0x2a8c40
vmovups 0x20(%rsp), %ymm10
vmovups 0x50(%rsp), %ymm11
vmovups 0x70(%rsp), %ymm12
vmovups 0xf0(%rsp), %ymm13
vmovups 0x110(%rsp), %ymm14
vmovups 0x130(%rsp), %ymm15
vmovups 0x170(%rsp), %ymm6
leal 0x1(%r12), %r14d
cmpl %eax, %r14d
jge 0x2a9015
vmovaps 0x40(%r10,%r15), %ymm0
vmulps (%r10,%r15), %ymm11, %ymm1
vaddps %ymm1, %ymm10, %ymm1
vmulps %ymm0, %ymm11, %ymm2
vaddps %ymm2, %ymm10, %ymm2
vmulps 0x20(%r10,%r15), %ymm12, %ymm3
vmulps 0x60(%r10,%r15), %ymm12, %ymm4
vmulps %ymm0, %ymm13, %ymm0
vaddps %ymm0, %ymm3, %ymm0
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x80(%r10,%r15), %ymm13, %ymm1
vaddps %ymm1, %ymm4, %ymm1
vaddps %ymm1, %ymm2, %ymm1
vmovaps 0x40(%r11,%r15), %ymm2
vmulps (%r11,%r15), %ymm14, %ymm3
vmulps %ymm2, %ymm14, %ymm4
vmulps 0x20(%r11,%r15), %ymm15, %ymm5
vaddps %ymm5, %ymm3, %ymm3
vmulps 0x60(%r11,%r15), %ymm15, %ymm5
vaddps %ymm5, %ymm4, %ymm4
vmulps %ymm6, %ymm2, %ymm2
vaddps %ymm2, %ymm3, %ymm2
vaddps %ymm2, %ymm0, %ymm0
vmulps 0x80(%r11,%r15), %ymm6, %ymm2
vaddps %ymm2, %ymm4, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmulps (%rbx,%r15), %ymm7, %ymm2
vmovaps 0x40(%rbx,%r15), %ymm3
vmulps %ymm7, %ymm3, %ymm4
vmulps 0x20(%rbx,%r15), %ymm8, %ymm5
vaddps %ymm5, %ymm2, %ymm2
vmulps 0x60(%rbx,%r15), %ymm8, %ymm5
vaddps %ymm5, %ymm4, %ymm4
vmulps %ymm3, %ymm9, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmulps 0x80(%rbx,%r15), %ymm9, %ymm3
vaddps %ymm2, %ymm0, %ymm0
vaddps %ymm3, %ymm4, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmovaps %ymm0, (%r9)
vmovaps %ymm1, 0x20(%r9)
addq $0x40, %r9
addl $0x2, %r12d
subq $-0x80, %r15
jmp 0x2a8eac
vmulps (%r10,%r15), %ymm11, %ymm0
vaddps %ymm0, %ymm10, %ymm0
vmulps 0x20(%r10,%r15), %ymm12, %ymm1
vmulps 0x40(%r10,%r15), %ymm13, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vaddps %ymm1, %ymm0, %ymm0
vmulps (%r11,%r15), %ymm14, %ymm1
vmulps 0x20(%r11,%r15), %ymm15, %ymm2
vmulps 0x40(%r11,%r15), %ymm6, %ymm3
vaddps %ymm2, %ymm1, %ymm1
vaddps %ymm3, %ymm1, %ymm1
vmulps (%rbx,%r15), %ymm7, %ymm2
vaddps %ymm1, %ymm0, %ymm0
vmulps 0x20(%rbx,%r15), %ymm8, %ymm1
vaddps %ymm1, %ymm2, %ymm1
vmulps 0x40(%rbx,%r15), %ymm9, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vaddps %ymm1, %ymm0, %ymm0
vmovaps %ymm0, (%r9)
addq $0x20, %r9
incl %r12d
addq $0x40, %r15
cmpl %eax, %r12d
jl 0x2a8fa5
addq %rdi, %r10
addq %r15, %r10
addq %rdi, %r11
addq %r15, %r11
addq %rdi, %rbx
addq %r15, %rbx
incl %ebp
jmp 0x2a8c32
incq %r8
movq 0x10(%rsp), %r14
jmp 0x2a8b32
pushq $0x5
popq %r15
cmpl $0x1, 0xdc(%rdi)
jne 0x2a93b2
cmpl $0x1, 0xe0(%rdi)
jne 0x2a93b2
cmpl $0x2, 0xe4(%rdi)
jne 0x2a93b2
cmpl $0x2, 0xe8(%rdi)
jne 0x2a93b2
movl 0x2c(%r13), %edx
movl 0x30(%r13), %ecx
movl 0xbc(%rsp), %eax
movl 0xc8(%rsp), %esi
subl %edx, %eax
shll $0x4, %eax
movq 0x1b0(%rdi), %r9
xorl %r15d, %r15d
testl %edx, %edx
cmovlel %r15d, %edx
movl %edx, %edi
movslq %eax, %r8
testl %ecx, %ecx
cmovlel %r15d, %ecx
testl %esi, %esi
cmovlel %r15d, %esi
movq %rsi, 0x20(%rsp)
shlq $0x2, %r8
movq %r9, 0x50(%rsp)
cmpq 0x20(%rsp), %r15
je 0x2a9725
testq %r9, %r9
je 0x2a90e1
movq %r15, %rax
shlq $0x5, %rax
vmovups (%r9,%rax), %ymm0
jmp 0x2a90e5
vxorps %xmm0, %xmm0, %xmm0
movq 0x40(%r13), %r9
imulq %r15, %r9
imulq 0x10(%r13), %r9
addq (%r13), %r9
movq 0x28(%r14), %r10
movslq 0x54(%r14), %r11
imulq %r15, %r11
imulq 0x38(%r14), %r11
movq 0xd0(%rsp), %rbx
movq %r15, 0x10(%rsp)
imulq %r15, %rbx
movq 0xa0(%rsp), %rax
imulq %rax, %rbx
addq 0x90(%rsp), %rbx
movslq 0xbc(%rsp), %rdx
imulq %rax, %rdx
leaq (%rbx,%rdx), %r15
leaq (%rbx,%rdx,2), %r12
leaq (%rdx,%rdx,2), %r13
addq %rbx, %r13
leaq (%rbx,%rdx,4), %rbp
xorl %edx, %edx
cmpl %ecx, %edx
je 0x2a939b
movl %edi, %eax
xorl %esi, %esi
subl $0x1, %eax
jb 0x2a9376
vmovaps (%r10,%r11), %ymm1
vmovaps 0x20(%r10,%r11), %ymm2
vmovaps 0x40(%r10,%r11), %ymm3
vmovaps 0x60(%r10,%r11), %ymm4
vmovaps 0x80(%r10,%r11), %ymm5
vmulps (%rbx,%rsi), %ymm1, %ymm1
vmulps 0x20(%rbx,%rsi), %ymm2, %ymm2
vmulps 0x40(%rbx,%rsi), %ymm3, %ymm3
vaddps %ymm0, %ymm1, %ymm1
vaddps %ymm3, %ymm2, %ymm2
vmulps 0x60(%rbx,%rsi), %ymm4, %ymm3
vaddps %ymm2, %ymm1, %ymm1
vmulps 0x80(%rbx,%rsi), %ymm5, %ymm2
vaddps %ymm2, %ymm3, %ymm2
vmovaps 0xa0(%r10,%r11), %ymm3
vmovaps 0xc0(%r10,%r11), %ymm4
vmovaps 0xe0(%r10,%r11), %ymm5
vmovaps 0x100(%r10,%r11), %ymm6
vmovaps 0x120(%r10,%r11), %ymm7
vmulps (%r15,%rsi), %ymm3, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmulps 0x20(%r15,%rsi), %ymm4, %ymm3
vmulps 0x40(%r15,%rsi), %ymm5, %ymm4
vaddps %ymm2, %ymm1, %ymm1
vaddps %ymm4, %ymm3, %ymm2
vmulps 0x60(%r15,%rsi), %ymm6, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmulps 0x80(%r15,%rsi), %ymm7, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmovaps 0x140(%r10,%r11), %ymm2
vmovaps 0x160(%r10,%r11), %ymm3
vmovaps 0x180(%r10,%r11), %ymm4
vmovaps 0x1a0(%r10,%r11), %ymm5
vmovaps 0x1c0(%r10,%r11), %ymm6
vmulps (%r12,%rsi), %ymm2, %ymm2
vmulps 0x20(%r12,%rsi), %ymm3, %ymm3
vmulps 0x40(%r12,%rsi), %ymm4, %ymm4
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm4, %ymm2, %ymm2
vmulps 0x60(%r12,%rsi), %ymm5, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmulps 0x80(%r12,%rsi), %ymm6, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmovaps 0x1e0(%r10,%r11), %ymm2
vmovaps 0x200(%r10,%r11), %ymm3
vmovaps 0x220(%r10,%r11), %ymm4
vmovaps 0x240(%r10,%r11), %ymm5
vmovaps 0x260(%r10,%r11), %ymm6
vmulps (%r13,%rsi), %ymm2, %ymm2
vmulps 0x20(%r13,%rsi), %ymm3, %ymm3
vmulps 0x40(%r13,%rsi), %ymm4, %ymm4
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm4, %ymm2, %ymm2
vmulps 0x60(%r13,%rsi), %ymm5, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmulps 0x80(%r13,%rsi), %ymm6, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmovaps 0x280(%r10,%r11), %ymm3
vmovaps 0x2a0(%r10,%r11), %ymm4
vmovaps 0x2c0(%r10,%r11), %ymm5
vmovaps 0x2e0(%r10,%r11), %ymm6
vmovaps 0x300(%r10,%r11), %ymm7
vmulps (%rbp,%rsi), %ymm3, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmulps 0x20(%rbp,%rsi), %ymm4, %ymm3
vmulps 0x40(%rbp,%rsi), %ymm5, %ymm4
vaddps %ymm2, %ymm1, %ymm1
vaddps %ymm4, %ymm3, %ymm2
vmulps 0x60(%rbp,%rsi), %ymm6, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmulps 0x80(%rbp,%rsi), %ymm7, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmovaps %ymm1, (%r9)
addq $0x20, %r9
addq $0x40, %rsi
jmp 0x2a9159
addq %r8, %rbx
addq %rsi, %rbx
addq %r8, %r15
addq %rsi, %r15
addq %r8, %r12
addq %rsi, %r12
addq %r8, %r13
addq %rsi, %r13
addq %r8, %rbp
addq %rsi, %rbp
incl %edx
jmp 0x2a914d
movq 0x10(%rsp), %r15
incq %r15
movq 0x18(%rsp), %r13
movq 0x50(%rsp), %r9
jmp 0x2a90c2
imull %eax, %r15d
movslq %r15d, %rsi
leaq 0x210(%rsp), %rdi
leaq 0x1a0(%rsp), %rdx
callq 0x73bbe
movq (%r14), %rcx
movq -0x18(%rcx), %rdx
movl 0x20(%rsp), %r11d
imull 0xe0(%r14,%rdx), %r11d
movq 0x210(%rsp), %rax
movl 0xdc(%r14,%rdx), %esi
imull 0xd4(%r14,%rdx), %esi
subl %esi, %r11d
xorl %esi, %esi
xorl %edi, %edi
xorl %r8d, %r8d
cmpl 0xd8(%r14,%rdx), %esi
jge 0x2a9443
movslq %r8d, %r8
leaq (%rax,%r8,4), %r10
xorl %r9d, %r9d
cmpl 0xd4(%r14,%rdx), %r9d
jge 0x2a9439
movl %edi, (%r10,%r9,4)
movq -0x18(%rcx), %rdx
addl 0xdc(%r14,%rdx), %edi
incq %r9
jmp 0x2a941a
addl %r11d, %edi
incl %esi
addq %r9, %r8
jmp 0x2a9406
leal (,%r15,8), %ecx
movl %ecx, 0x130(%rsp)
movl 0x110(%rsp), %ecx
shll $0x3, %ecx
movslq %ecx, %rcx
movq %rcx, 0x20(%rsp)
xorl %ecx, %ecx
testl %r15d, %r15d
cmovlel %ecx, %r15d
movq 0xf0(%rsp), %r12
testl %r12d, %r12d
cmovlel %ecx, %r12d
movq %r12, 0xf0(%rsp)
shlq $0x2, %r15
xorl %edx, %edx
cmpq 0xf0(%rsp), %rdx
je 0x2a95d0
movq %rcx, 0x70(%rsp)
movslq %ecx, %r8
movq 0x40(%r13), %r9
imulq %rdx, %r9
imulq 0x10(%r13), %r9
shlq $0x2, %r8
addq (%r13), %r9
movslq 0xbc(%rsp), %r10
movq 0xd0(%rsp), %r11
imulq %rdx, %r11
movq 0xa0(%rsp), %rcx
imulq %rcx, %r11
addq 0x90(%rsp), %r11
imulq %rcx, %r10
movq %rdx, 0x110(%rsp)
movq %rdx, %r13
shlq $0x5, %r13
addq 0x28(%r14), %r8
xorl %ebp, %ebp
cmpl 0x50(%rsp), %ebp
jg 0x2a95af
movq (%r14), %rcx
xorl %r12d, %r12d
cmpq %rbx, %r12
jg 0x2a959f
movq -0x18(%rcx), %rsi
cmpl $0x0, 0x100(%r14,%rsi)
je 0x2a952e
movq 0x1b0(%r14,%rsi), %rdi
vmovups (%rdi,%r13), %ymm0
jmp 0x2a9532
vxorps %xmm0, %xmm0, %xmm0
movslq 0xe8(%r14,%rsi), %rdi
movq %r14, %rdx
movslq %ebp, %r14
imulq %rdi, %r14
imulq %r10, %r14
addq %r11, %r14
movl 0xe4(%rdx,%rsi), %esi
imull %r12d, %esi
shll $0x3, %esi
movslq %esi, %rsi
leaq (%r14,%rsi,4), %rsi
xorl %edi, %edi
cmpq %rdi, %r15
je 0x2a9585
movslq (%rax,%rdi), %r14
shlq $0x5, %r14
vmovups (%r8,%rdi,8), %ymm1
vmulps (%rsi,%r14), %ymm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
addq $0x4, %rdi
jmp 0x2a9562
movq %r12, %rsi
shlq $0x5, %rsi
vmovups %ymm0, (%r9,%rsi)
incq %r12
movq 0x10(%rsp), %r14
jmp 0x2a9506
movq 0x20(%rsp), %rcx
leaq (%r9,%rcx,4), %r9
incl %ebp
jmp 0x2a94f6
movq 0x110(%rsp), %rdx
incq %rdx
movq 0x70(%rsp), %rcx
addl 0x130(%rsp), %ecx
movq 0x18(%rsp), %r13
jmp 0x2a948a
movq 0x8(%r14), %rdi
testq %rdi, %rdi
je 0x2a95ea
movq (%rdi), %rax
movq %r13, %rsi
movq 0x48(%rsp), %rdx
vzeroupper
callq *0x48(%rax)
leaq 0x210(%rsp), %rdi
vzeroupper
callq 0x624be
xorl %r12d, %r12d
jmp 0x2a96d0
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x1a0(%rsp), %xmm0
movq 0x18(%rsp), %rcx
vmovups %xmm0, (%rcx)
movq 0x1b0(%rsp), %rax
movq %rax, 0x10(%rcx)
movl 0x1b8(%rsp), %eax
movl %eax, 0x18(%rcx)
movq 0x1c0(%rsp), %rax
movq %rax, 0x20(%rcx)
vmovups 0x1c8(%rsp), %xmm0
vmovups %xmm0, 0x28(%rcx)
movl 0x1d8(%rsp), %eax
movl %eax, 0x38(%rcx)
movq 0x1e0(%rsp), %rax
movq %rax, 0x40(%rcx)
movq 0x1a8(%rsp), %rax
testq %rax, %rax
je 0x2a9699
lock
decl (%rax)
jne 0x2a9699
movq 0x1a0(%rsp), %rsi
movq 0x1c0(%rsp), %rdi
testq %rdi, %rdi
je 0x2a9691
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a9699
movq %rsi, %rdi
callq 0x5f3e0
movq 0x218(%rsp), %rax
testq %rax, %rax
je 0x2a96d0
lock
decl (%rax)
jne 0x2a96d0
movq 0x210(%rsp), %rsi
movq 0x230(%rsp), %rdi
testq %rdi, %rdi
je 0x2a96c8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a96d0
movq %rsi, %rdi
callq 0x5f3e0
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x2a970d
lock
decl (%rax)
jne 0x2a970d
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x2a9702
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x2a970d
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
movl %r12d, %eax
addq $0x3b8, %rsp # imm = 0x3B8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq 0x8(%r14), %rdi
testq %rdi, %rdi
je 0x2a95fa
movq (%rdi), %rax
movq %r13, %rsi
movq 0x48(%rsp), %rdx
vzeroupper
callq *0x48(%rax)
jmp 0x2a95fa
movq %rax, %rbx
leaq 0x210(%rsp), %rdi
callq 0x624be
jmp 0x2a9873
jmp 0x2a978c
jmp 0x2a978c
jmp 0x2a98ba
jmp 0x2a98ba
movq %rax, %rbx
jmp 0x2a983c
movq %rax, %rbx
jmp 0x2a9805
jmp 0x2a98ba
jmp 0x2a978c
jmp 0x2a98ba
jmp 0x2a98ba
movq %rax, %rbx
jmp 0x2a9873
movq %rax, %rbx
movq 0x2c8(%rsp), %rax
testq %rax, %rax
je 0x2a97ce
lock
decl (%rax)
jne 0x2a97ce
movq 0x2c0(%rsp), %rsi
movq 0x2e0(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a97c8
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a97ce
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x310(%rsp), %rax
testq %rax, %rax
je 0x2a9805
lock
decl (%rax)
jne 0x2a9805
movq 0x308(%rsp), %rsi
movq 0x328(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a97ff
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a9805
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1a8(%rsp), %rax
testq %rax, %rax
je 0x2a983c
lock
decl (%rax)
jne 0x2a983c
movq 0x1a0(%rsp), %rsi
movq 0x1c0(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a9836
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a983c
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x218(%rsp), %rax
testq %rax, %rax
je 0x2a9873
lock
decl (%rax)
jne 0x2a9873
movq 0x210(%rsp), %rsi
movq 0x230(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a986d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a9873
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x2a98aa
lock
decl (%rax)
jne 0x2a98aa
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
jne 0x2a98a4
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2a98aa
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x2a98ba
jmp 0x2a98ba
jmp 0x2a98ba
jmp 0x2a98ba
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx.cpp |
ncnn::ConvolutionDepthWise_x86_avx::forward_int8_x86(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int ConvolutionDepthWise_x86_avx::forward_int8_x86(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int elempack = bottom_blob.elempack;
int elembits = bottom_blob.elembits();
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
Mat bottom_blob_int8 = bottom_blob;
if (elembits != 8)
{
const int channels_g = channels * elempack / group;
Mat scales(channels * elempack);
{
float* ps = scales;
for (int g = 0; g < group; g++)
{
float scale = bottom_blob_int8_scales[g];
for (int q = 0; q < channels_g; q++)
{
*ps++ = scale;
}
}
}
Option opt_q = opt;
opt_q.blob_allocator = opt.workspace_allocator;
quantize_to_int8(bottom_blob, bottom_blob_int8, scales, opt_q);
}
Mat bottom_blob_bordered;
make_padding(bottom_blob_int8, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
channels = bottom_blob_bordered.c;
elempack = bottom_blob_bordered.elempack;
int outw = (w - kernel_extent_w) / stride_w + 1;
int outh = (h - kernel_extent_h) / stride_h + 1;
// depth-wise
if (channels * elempack == group && group == num_output)
{
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
out_elempack = num_output % 8 == 0 ? 8 : 1;
}
#endif // __SSE2__
bool use_int8_requantize = int8_scale_term > 100;
size_t out_elemsize = use_int8_requantize ? 1u * out_elempack : 4u * out_elempack;
top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#if __SSE2__
if (elempack == 8)
{
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
signed char* outptr_s8 = top_blob.channel(g);
float* outptr_f32 = top_blob.channel(g);
const signed char* kptr = (const signed char*)weight_data_tm + maxk * g * 8;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w * 8;
for (int k = 0; k < maxk; k++)
{
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _val = _mm_loadl_epi64((const __m128i*)(sptr + space_ofs[k] * 8));
_val = _mm_unpacklo_epi8(_val, _mm_cmpgt_epi8(_mm_setzero_si128(), _val));
__m128i _w = _mm_loadl_epi64((const __m128i*)(kptr + k * 8));
_w = _mm_unpacklo_epi8(_w, _mm_cmpgt_epi8(_mm_setzero_si128(), _w));
__m128i _sl = _mm_mullo_epi16(_val, _w);
__m128i _sh = _mm_mulhi_epi16(_val, _w);
__m128i _s0 = _mm_unpacklo_epi16(_sl, _sh);
__m128i _s1 = _mm_unpackhi_epi16(_sl, _sh);
_sum0 = _mm_add_epi32(_sum0, _s0);
_sum1 = _mm_add_epi32(_sum1, _s1);
}
__m128 _scale_in0;
__m128 _scale_in1;
{
__m128 _bottom_blob_int8_scales0 = _mm_loadu_ps((const float*)bottom_blob_int8_scales + g * 8);
__m128 _bottom_blob_int8_scales1 = _mm_loadu_ps((const float*)bottom_blob_int8_scales + g * 8 + 4);
__m128 _weight_data_int8_scales0 = _mm_loadu_ps((const float*)weight_data_int8_scales + g * 8);
__m128 _weight_data_int8_scales1 = _mm_loadu_ps((const float*)weight_data_int8_scales + g * 8 + 4);
_scale_in0 = _mm_rcp_ps(_mm_mul_ps(_bottom_blob_int8_scales0, _weight_data_int8_scales0));
_scale_in1 = _mm_rcp_ps(_mm_mul_ps(_bottom_blob_int8_scales1, _weight_data_int8_scales1));
__m128 _m0 = _mm_cmpneq_ps(_weight_data_int8_scales0, _mm_setzero_ps());
__m128 _m1 = _mm_cmpneq_ps(_weight_data_int8_scales1, _mm_setzero_ps());
_scale_in0 = _mm_and_ps(_scale_in0, _m0);
_scale_in1 = _mm_and_ps(_scale_in1, _m1);
}
__m128 _sumfp32_0 = _mm_mul_ps(_mm_cvtepi32_ps(_sum0), _scale_in0);
__m128 _sumfp32_1 = _mm_mul_ps(_mm_cvtepi32_ps(_sum1), _scale_in1);
if (bias_term)
{
__m128 _bias0 = _mm_loadu_ps((const float*)bias_data + g * 8);
__m128 _bias1 = _mm_loadu_ps((const float*)bias_data + g * 8 + 4);
_sumfp32_0 = _mm_add_ps(_sumfp32_0, _bias0);
_sumfp32_1 = _mm_add_ps(_sumfp32_1, _bias1);
}
_sumfp32_0 = activation_sse(_sumfp32_0, activation_type, activation_params);
_sumfp32_1 = activation_sse(_sumfp32_1, activation_type, activation_params);
if (use_int8_requantize)
{
// requantize and relu
__m128 _scale_out0 = _mm_loadu_ps((const float*)top_blob_int8_scales + g * 8);
__m128 _scale_out1 = _mm_loadu_ps((const float*)top_blob_int8_scales + g * 8 + 4);
_sumfp32_0 = _mm_mul_ps(_sumfp32_0, _scale_out0);
_sumfp32_1 = _mm_mul_ps(_sumfp32_1, _scale_out1);
int64_t _sum8 = float2int8_sse(_sumfp32_0, _sumfp32_1);
*(int64_t*)outptr_s8 = _sum8;
outptr_s8 += 8;
}
else
{
// dequantize and relu
_mm_storeu_ps(outptr_f32, _sumfp32_0);
_mm_storeu_ps(outptr_f32 + 4, _sumfp32_1);
outptr_f32 += 8;
}
}
}
}
}
}
#endif // __SSE2__
if (elempack == 1)
{
if (kernel_w == 3 && kernel_h == 3 && stride_w == 1 && stride_h == 1 && dilation_w == 1 && dilation_h == 1 && (activation_type == 0 || activation_type == 1))
{
if (use_int8_requantize)
{
std::vector<float> requantize_scales;
for (int g = 0; g < group; g++)
{
float scale_in;
if (weight_data_int8_scales[g] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
float scale_out = top_blob_int8_scales[g];
requantize_scales.push_back(scale_in);
requantize_scales.push_back(scale_out);
}
convdw3x3s1_int8_requant_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, requantize_scales, opt);
}
else
{
std::vector<float> dequantize_scales;
for (int g = 0; g < group; g++)
{
float top_rescale = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
dequantize_scales.push_back(top_rescale);
}
convdw3x3s1_int8_dequant_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, dequantize_scales, opt);
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
}
else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2 && (activation_type == 0 || activation_type == 1))
{
if (use_int8_requantize)
{
std::vector<float> requantize_scales;
for (int g = 0; g < group; g++)
{
float scale_in;
if (weight_data_int8_scales[g] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
float scale_out = top_blob_int8_scales[g];
requantize_scales.push_back(scale_in);
requantize_scales.push_back(scale_out);
}
convdw3x3s2_int8_requant_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, requantize_scales, opt);
}
else
{
std::vector<float> dequantize_scales;
for (int g = 0; g < group; g++)
{
float top_rescale = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
dequantize_scales.push_back(top_rescale);
}
convdw3x3s2_int8_dequant_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, dequantize_scales, opt);
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
signed char* outptr_s8 = top_blob.channel(g);
float* outptr_f32 = top_blob.channel(g);
const signed char* kptr = (const signed char*)weight_data_tm + maxk * g;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
int sum = 0;
const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++)
{
signed char val = sptr[space_ofs[k]];
signed char w = kptr[k];
sum += val * w;
}
float scale_in;
if (weight_data_int8_scales[g] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[g] * weight_data_int8_scales[g]);
float sumfp32 = sum * scale_in;
if (bias_term)
sumfp32 += bias_data[g];
sumfp32 = activation_ss(sumfp32, activation_type, activation_params);
if (use_int8_requantize)
{
// requantize
float scale_out = top_blob_int8_scales[g];
signed char sums8 = float2int8(sumfp32 * scale_out);
outptr_s8[0] = sums8;
outptr_s8 += 1;
}
else
{
// dequantize
outptr_f32[0] = sumfp32;
outptr_f32 += 1;
}
}
}
}
}
}
return 0;
}
bool use_int8_requantize = int8_scale_term > 100;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
if (use_int8_requantize)
out_elempack = num_output % 8 == 0 ? 8 : 1;
else
out_elempack = num_output % 4 == 0 ? 4 : 1;
}
#endif // __SSE2__
size_t out_elemsize = use_int8_requantize ? 1u * out_elempack : 4u * out_elempack;
top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
// group convolution
const int channels_g = channels * elempack / group;
const int num_output_g = num_output / group;
int g_elempack = 1;
int out_g_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
g_elempack = channels_g % 8 == 0 ? 8 : 1;
if (use_int8_requantize)
out_g_elempack = num_output_g % 8 == 0 ? 8 : 1;
else
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
}
#endif // __SSE2__
// unpacking
Mat bottom_blob_bordered_unpacked = bottom_blob_bordered;
if (elempack > g_elempack)
{
Option opt_p = opt;
opt_p.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob_bordered, bottom_blob_bordered_unpacked, g_elempack, opt_p);
}
Mat top_blob_unpacked = top_blob;
if (out_g_elempack < out_elempack)
{
top_blob_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
if (top_blob_unpacked.empty())
return -100;
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
const Mat bottom_blob_bordered_g = bottom_blob_bordered_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
Mat top_blob_g = top_blob_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
const ncnn::Layer* op = group_ops[g];
Option opt_g = opt;
opt_g.blob_allocator = top_blob_unpacked.allocator;
// forward
op->forward(bottom_blob_bordered_g, top_blob_g, opt_g);
}
// packing
if (out_g_elempack < out_elempack)
{
convert_packing(top_blob_unpacked, top_blob, out_elempack, opt);
}
else
{
top_blob = top_blob_unpacked;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x318, %rsp # imm = 0x318
movq %rcx, 0x30(%rsp)
movq %rdx, 0x20(%rsp)
movq %rsi, %r15
movl 0x38(%rsi), %ecx
movl 0x18(%rsi), %r12d
movq 0x10(%rsi), %rsi
testl %r12d, %r12d
je 0x2a9903
leal (,%rsi,8), %eax
cltd
idivl %r12d
cmpl $0x8, %eax
sete %dl
jmp 0x2a9905
xorl %edx, %edx
movq (%rdi), %rax
movq -0x18(%rax), %r9
movq %rdi, %r8
movl 0xd4(%rdi,%r9), %ebp
movl 0xd8(%rdi,%r9), %ebx
decl %ebp
imull 0xdc(%rdi,%r9), %ebp
decl %ebx
imull 0xe0(%rdi,%r9), %ebx
movq 0x8(%r15), %rdi
vmovups (%r15), %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
movq %rsi, 0x1f0(%rsp)
movl %r12d, 0x1f8(%rsp)
movq 0x20(%r15), %rsi
movq %rsi, 0x200(%rsp)
vmovdqu 0x28(%r15), %xmm0
vmovdqu %xmm0, 0x208(%rsp)
movl %ecx, 0x218(%rsp)
movq 0x40(%r15), %rsi
movq %rsi, 0x220(%rsp)
testq %rdi, %rdi
je 0x2a9990
lock
incl (%rdi)
movq (%r8), %rax
movq %r8, 0x8(%rsp)
testb %dl, %dl
je 0x2a99a3
movq 0x8(%rsp), %rcx
jmp 0x2a9ab6
imull %ecx, %r12d
movq -0x18(%rax), %rax
movq 0x8(%rsp), %r13
movl 0x108(%r13,%rax), %eax
movl %eax, 0x10(%rsp)
leaq 0x70(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
xorl %r14d, %r14d
pushq $0x4
popq %rdx
movl %r12d, %esi
xorl %ecx, %ecx
callq 0x635fa
movl %r12d, %eax
cltd
idivl 0x10(%rsp)
movq 0x70(%rsp), %rcx
movq (%r13), %rdx
testl %eax, %eax
cmovlel %r14d, %eax
movq 0x8(%rsp), %r8
movq -0x18(%rdx), %rsi
movslq 0x108(%r8,%rsi), %rdi
cmpq %rdi, %r14
jge 0x2a9a3e
movq 0x240(%r8,%rsi), %rsi
vmovd (%rsi,%r14,4), %xmm0
movl %eax, %esi
subl $0x1, %esi
jb 0x2a9a39
vmovd %xmm0, (%rcx)
addq $0x4, %rcx
jmp 0x2a9a2a
incq %r14
jmp 0x2a9a09
movq 0x30(%rsp), %rax
vmovups (%rax), %ymm0
vmovups 0x20(%rax), %ymm1
leaq 0x130(%rsp), %rcx
vmovups %ymm1, 0x20(%rcx)
vmovups %ymm0, (%rcx)
movq 0x10(%rax), %rax
movq %rax, 0x8(%rcx)
leaq 0x1e0(%rsp), %rsi
leaq 0x70(%rsp), %rdx
movq %r15, %rdi
vzeroupper
callq 0x652e3
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2a9aae
lock
decl (%rax)
jne 0x2a9aae
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2a9aa6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2a9aae
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
leaq 0x70(%rsp), %rdx
andq $0x0, 0x40(%rdx)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdx)
vmovdqu %xmm0, 0xc(%rdx)
vmovdqa %xmm0, 0x20(%rdx)
vmovdqu %xmm0, 0x2c(%rdx)
movq -0x18(%rax), %rdi
addq %rcx, %rdi
leaq 0x1e0(%rsp), %rsi
movq 0x30(%rsp), %rcx
callq 0x287daa
pushq $-0x64
popq %r12
cmpq $0x0, 0x70(%rsp)
je 0x2ab463
movslq 0xa8(%rsp), %r14
movq 0xb0(%rsp), %rax
imulq %r14, %rax
testq %rax, %rax
je 0x2ab463
notl %ebp
movl 0x88(%rsp), %r13d
movl 0x9c(%rsp), %edi
addl %edi, %ebp
movq 0x8(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rcx
movl %ebp, %eax
movq %rdx, %rbp
cltd
idivl 0xe4(%rbp,%rcx)
movl %eax, %esi
notl %ebx
addl 0xa0(%rsp), %ebx
movl %ebx, %eax
cltd
idivl 0xe8(%rbp,%rcx)
movl %eax, %edx
movq %rsi, 0x10(%rsp)
incl %esi
movq %rdx, 0x68(%rsp)
leal 0x1(%rdx), %r10d
movl %r13d, %ebx
imull %r14d, %ebx
pushq $0x8
popq %r15
cmpl 0x108(%rbp,%rcx), %ebx
jne 0x2a9cca
cmpl 0xd0(%rbp,%rcx), %ebx
jne 0x2a9cca
movl %edi, 0x18(%rsp)
movq 0x30(%rsp), %rdi
cmpb $0x0, 0x27(%rdi)
pushq $0x1
popq %rax
cmovel %eax, %r15d
testb $0x7, %bl
cmovnel %eax, %r15d
movl 0x10c(%rbp,%rcx), %eax
leal (,%r15,4), %r8d
movl %eax, 0x38(%rsp)
cmpl $0x65, %eax
cmovgel %r15d, %r8d
movl %ebx, %eax
cltd
idivl %r15d
movq 0x8(%rdi), %rcx
movq %rcx, (%rsp)
movq 0x20(%rsp), %rbx
movq %rbx, %rdi
movl %r10d, %edx
movl %eax, %ecx
movl %r15d, %r9d
callq 0x628f2
cmpq $0x0, (%rbx)
je 0x2ab463
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2ab463
xorl %r12d, %r12d
cmpl $0x1, %r13d
je 0x2aa2a7
cmpl $0x8, %r13d
jne 0x2ab463
movq (%rbp), %rax
movq -0x18(%rax), %rax
movslq 0xd4(%rbp,%rax), %rcx
movslq 0xd8(%rbp,%rax), %r15
imulq %rcx, %r15
leaq 0x130(%rsp), %rdi
leaq 0xe0(%rsp), %rdx
movq %r15, %rsi
callq 0x73bbe
movq (%rbp), %rcx
movq -0x18(%rcx), %rdx
movl 0x18(%rsp), %r11d
imull 0xe0(%rbp,%rdx), %r11d
movq 0x130(%rsp), %rax
movl 0xdc(%rbp,%rdx), %esi
imull 0xd4(%rbp,%rdx), %esi
subl %esi, %r11d
xorl %esi, %esi
xorl %edi, %edi
xorl %r8d, %r8d
cmpl 0xd8(%rbp,%rdx), %esi
jge 0x2aa3d8
movslq %r8d, %r8
leaq (%rax,%r8,4), %r10
xorl %r9d, %r9d
cmpl 0xd4(%rbp,%rdx), %r9d
jge 0x2a9cc0
movl %edi, (%r10,%r9,4)
movq -0x18(%rcx), %rdx
addl 0xdc(%rbp,%rdx), %edi
incq %r9
jmp 0x2a9ca2
addl %r11d, %edi
incl %esi
addq %r9, %r8
jmp 0x2a9c8b
movl 0xd0(%rbp,%rcx), %eax
movl 0x10c(%rbp,%rcx), %r14d
movq 0x30(%rsp), %rcx
cmpb $0x1, 0x27(%rcx)
movl %r13d, 0x40(%rsp)
movl %r12d, %r13d
jne 0x2a9cfe
cmpl $0x65, %r14d
jl 0x2a9d04
testb $0x7, %al
pushq $0x1
popq %r9
cmovel %r15d, %r9d
jmp 0x2a9d12
pushq $0x1
popq %r9
jmp 0x2a9d12
xorl %ecx, %ecx
testb $0x3, %al
sete %cl
leal (%rcx,%rcx,2), %r9d
incl %r9d
leal (,%r9,4), %r8d
cmpl $0x65, %r14d
cmovgel %r9d, %r8d
cltd
idivl %r9d
movq 0x30(%rsp), %rcx
movq 0x8(%rcx), %rcx
movq %rcx, (%rsp)
movq 0x20(%rsp), %r12
movq %r12, %rdi
movl %esi, 0x18(%rsp)
movl %r10d, 0x28(%rsp)
movl %r10d, %edx
movl %eax, %ecx
movq %r8, 0x48(%rsp)
movq %r9, 0x38(%rsp)
callq 0x628f2
cmpq $0x0, (%r12)
je 0x2a9dd1
movslq 0x38(%r12), %rax
imulq 0x40(%r12), %rax
testq %rax, %rax
movl %r13d, %r12d
movl 0x40(%rsp), %r8d
je 0x2ab463
movq (%rbp), %rax
movq -0x18(%rax), %rax
movl 0xd0(%rbp,%rax), %ecx
movl 0x108(%rbp,%rax), %esi
movl %ebx, %eax
cltd
idivl %esi
movl %eax, %edi
movl %ecx, %eax
cltd
idivl %esi
movl %eax, %ebp
movq 0x30(%rsp), %rax
cmpb $0x1, 0x27(%rax)
jne 0x2a9dd9
testb $0x7, %dil
pushq $0x1
popq %rbx
movl %ebx, %r12d
cmovel %r15d, %r12d
cmpl $0x65, %r14d
movl 0x18(%rsp), %esi
movl 0x28(%rsp), %r9d
jl 0x2a9deb
testb $0x7, %bpl
cmovel %r15d, %ebx
jmp 0x2a9df9
movl %r13d, %r12d
jmp 0x2ab463
pushq $0x1
popq %r12
movl %r12d, %ebx
movl 0x18(%rsp), %esi
movl 0x28(%rsp), %r9d
jmp 0x2a9df9
xorl %eax, %eax
testb $0x3, %bpl
sete %al
leal (%rax,%rax,2), %ebx
incl %ebx
movq 0x78(%rsp), %rax
vmovaps 0x70(%rsp), %xmm0
vmovaps %xmm0, 0x130(%rsp)
movq 0x80(%rsp), %rcx
movq %rcx, 0x140(%rsp)
movl 0x88(%rsp), %ecx
movl %ecx, 0x148(%rsp)
movq 0x90(%rsp), %rcx
movq %rcx, 0x150(%rsp)
vmovups 0x98(%rsp), %xmm0
vmovups %xmm0, 0x158(%rsp)
movl 0xa8(%rsp), %ecx
movl %ecx, 0x168(%rsp)
movq 0xb0(%rsp), %rcx
movq %rcx, 0x170(%rsp)
testq %rax, %rax
movq 0x20(%rsp), %r14
je 0x2a9e78
lock
incl (%rax)
cmpl %r12d, %r8d
movl %edi, 0x10(%rsp)
jle 0x2a9ecd
movq 0x30(%rsp), %rax
vmovups (%rax), %ymm0
vmovups 0x20(%rax), %ymm1
leaq 0xe0(%rsp), %rcx
vmovups %ymm1, 0x20(%rcx)
vmovups %ymm0, (%rcx)
movq 0x10(%rax), %rax
movq %rax, 0x8(%rcx)
leaq 0x70(%rsp), %rdi
leaq 0x130(%rsp), %rsi
movl %r12d, %edx
vzeroupper
callq 0x64e3b
movl 0x10(%rsp), %edi
movl 0x18(%rsp), %esi
movl 0x28(%rsp), %r9d
movq 0x8(%r14), %rax
vmovups (%r14), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
movq 0x10(%r14), %rcx
movq %rcx, 0xf0(%rsp)
movl 0x18(%r14), %ecx
movl %ecx, 0xf8(%rsp)
movq 0x20(%r14), %rcx
movq %rcx, 0x100(%rsp)
vmovups 0x28(%r14), %xmm0
vmovups %xmm0, 0x108(%rsp)
movl 0x38(%r14), %ecx
movl %ecx, 0x118(%rsp)
movq 0x40(%r14), %rcx
movq %rcx, 0x120(%rsp)
testq %rax, %rax
je 0x2a9f30
lock
incl (%rax)
cmpl 0x38(%rsp), %ebx
jae 0x2a9fb0
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0xd0(%rcx,%rax), %eax
cltd
idivl %ebx
movl %eax, %ecx
movzbl 0x48(%rsp), %eax
divb 0x38(%rsp)
movzbl %al, %r8d
imull %ebx, %r8d
movq 0x30(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
leaq 0xe0(%rsp), %rdi
movl %r9d, %edx
movl %ebx, %r9d
callq 0x628f2
movl 0x10(%rsp), %edi
pushq $-0x64
popq %r14
cmpq $0x0, 0xe0(%rsp)
je 0x2ab3f2
movslq 0x118(%rsp), %rax
imulq 0x120(%rsp), %rax
testq %rax, %rax
je 0x2ab3f2
xorl %r15d, %r15d
xorl %r13d, %r13d
xorl %r14d, %r14d
movq 0x8(%rsp), %r8
movq (%r8), %rax
movq -0x18(%rax), %rax
movslq 0x108(%r8,%rax), %rax
cmpq %rax, %r14
jge 0x2aa225
movl %r15d, %eax
cltd
idivl %r12d
movl %eax, %ecx
movl %edi, %eax
cltd
idivl %r12d
movslq %ecx, %rdx
imulq 0x170(%rsp), %rdx
movq 0x140(%rsp), %rcx
imulq %rcx, %rdx
addq 0x130(%rsp), %rdx
movl 0x148(%rsp), %esi
movq 0x150(%rsp), %rdi
movq %rdx, 0x198(%rsp)
andq $0x0, 0x1a0(%rsp)
movq %rcx, 0x1a8(%rsp)
movl %esi, 0x1b0(%rsp)
movq %rdi, 0x1b8(%rsp)
movl %eax, 0x1d0(%rsp)
vmovups 0x158(%rsp), %xmm0
movslq 0x164(%rsp), %rax
movslq 0x15c(%rsp), %rdx
movslq 0x160(%rsp), %rsi
imulq %rdx, %rsi
imulq %rcx, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x1d8(%rsp)
vmovups %xmm0, 0x1c0(%rsp)
movl %r13d, %eax
cltd
idivl %ebx
movl %eax, %ecx
movl %ebp, %eax
cltd
idivl %ebx
movslq %ecx, %rdx
imulq 0x120(%rsp), %rdx
movq 0xf0(%rsp), %rsi
imulq %rsi, %rdx
addq 0xe0(%rsp), %rdx
movl 0xf8(%rsp), %edi
movq 0x100(%rsp), %rcx
movq %rdx, 0x228(%rsp)
andq $0x0, 0x230(%rsp)
movq %rsi, 0x238(%rsp)
movl %edi, 0x240(%rsp)
movq %rcx, 0x248(%rsp)
movl %eax, 0x260(%rsp)
vmovups 0x108(%rsp), %xmm0
movslq 0x114(%rsp), %rax
movslq 0x10c(%rsp), %rdx
movslq 0x110(%rsp), %rdi
imulq %rdx, %rdi
imulq %rsi, %rax
imulq %rdi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rsi
movq %rax, 0x268(%rsp)
vmovups %xmm0, 0x250(%rsp)
movq 0x10(%r8), %rax
movq (%rax,%r14,8), %rdi
movq 0x30(%rsp), %rax
vmovups (%rax), %ymm0
vmovups 0x20(%rax), %ymm1
vmovups %ymm0, 0x2d0(%rsp)
vmovups %ymm1, 0x2f0(%rsp)
movq %rcx, 0x2d8(%rsp)
movq (%rdi), %rax
leaq 0x198(%rsp), %rsi
leaq 0x228(%rsp), %rdx
leaq 0x2d0(%rsp), %rcx
vzeroupper
callq *0x38(%rax)
movq 0x230(%rsp), %rax
testq %rax, %rax
movl 0x10(%rsp), %edi
je 0x2aa1d8
lock
decl (%rax)
jne 0x2aa1d8
movq 0x228(%rsp), %rsi
movq 0x248(%rsp), %rdi
testq %rdi, %rdi
je 0x2aa1cc
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x10(%rsp), %edi
jmp 0x2aa1d8
movq %rsi, %rdi
callq 0x5f3e0
movl 0x10(%rsp), %edi
movq 0x1a0(%rsp), %rax
testq %rax, %rax
je 0x2aa217
lock
decl (%rax)
jne 0x2aa217
movq 0x198(%rsp), %rsi
movq 0x1b8(%rsp), %rdi
testq %rdi, %rdi
je 0x2aa20b
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x10(%rsp), %edi
jmp 0x2aa217
movq %rsi, %rdi
callq 0x5f3e0
movl 0x10(%rsp), %edi
incq %r14
addl %ebp, %r13d
addl %edi, %r15d
jmp 0x2a9fb9
movq 0x38(%rsp), %rdx
cmpl %edx, %ebx
jae 0x2aa24d
xorl %r14d, %r14d
leaq 0xe0(%rsp), %rdi
movq 0x20(%rsp), %rsi
movq 0x30(%rsp), %rcx
callq 0x64e3b
jmp 0x2ab3f2
leaq 0xe0(%rsp), %rax
movq 0x20(%rsp), %rbx
xorl %r14d, %r14d
cmpq %rbx, %rax
je 0x2ab3f2
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x2aa276
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x2ab39f
lock
decl (%rax)
jne 0x2ab39f
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x2ab397
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2ab39f
movq (%rbp), %rax
movq -0x18(%rax), %rdx
movl 0xd4(%rbp,%rdx), %ecx
movl 0xd8(%rbp,%rdx), %r13d
movl %ecx, %esi
xorl $0x3, %esi
movl %r13d, %edi
xorl $0x3, %edi
orl %esi, %edi
movq 0x20(%rsp), %r14
jne 0x2aaf50
cmpl $0x1, 0xe4(%rbp,%rdx)
jne 0x2aae45
cmpl $0x1, 0xe8(%rbp,%rdx)
jne 0x2aae45
cmpl $0x1, 0xdc(%rbp,%rdx)
jne 0x2aae45
cmpl $0x1, 0xe0(%rbp,%rdx)
jne 0x2aae45
cmpl $0x1, 0x110(%rbp,%rdx)
ja 0x2aae45
cmpl $0x65, 0x38(%rsp)
jl 0x2ab4e0
vxorps %xmm0, %xmm0, %xmm0
leaq 0x130(%rsp), %rbx
vmovaps %xmm0, (%rbx)
andq $0x0, 0x10(%rbx)
xorl %r14d, %r14d
leaq 0xe0(%rsp), %r15
leaq 0x198(%rsp), %r12
movq -0x18(%rax), %r13
movslq 0x108(%rbp,%r13), %rax
cmpq %rax, %r14
jge 0x2ab821
movq 0x1f8(%rbp,%r13), %rax
vmovss (%rax,%r14,4), %xmm1
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm1, %xmm0
je 0x2aa396
movq 0x240(%rbp,%r13), %rax
vmulss (%rax,%r14,4), %xmm1, %xmm0
vmovss 0x1448f6(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0xe0(%rsp)
movq 0x288(%rbp,%r13), %rax
vmovss (%rax,%r14,4), %xmm0
vmovss %xmm0, 0x198(%rsp)
movq %rbx, %rdi
movq %r15, %rsi
callq 0x1ea12c
movq %rbx, %rdi
movq %r12, %rsi
callq 0x1ea12c
incq %r14
movq (%rbp), %rax
jmp 0x2aa34f
leal (,%r15,8), %ecx
movl %ecx, 0x28(%rsp)
xorl %edx, %edx
testl %r15d, %r15d
cmovlel %edx, %r15d
testl %r14d, %r14d
cmovlel %edx, %r14d
vpxor %xmm0, %xmm0, %xmm0
vbroadcastss 0x143c13(%rip), %xmm2 # 0x3ee014
vbroadcastss 0x14487e(%rip), %xmm8 # 0x3eec88
xorl %esi, %esi
movq %r14, 0x18(%rsp)
cmpq %r14, %rsi
je 0x2ab382
movq 0x20(%rsp), %rcx
movq 0x10(%rcx), %r8
imulq %rsi, %r8
imulq 0x40(%rcx), %r8
movq %rdx, 0x30(%rsp)
movslq %edx, %r9
addq (%rcx), %r8
movslq 0x9c(%rsp), %r10
movq 0xb0(%rsp), %r11
imulq %rsi, %r11
movq 0x80(%rsp), %rcx
imulq %rcx, %r11
addq 0x70(%rsp), %r11
imulq %rcx, %r10
movq %rsi, 0x40(%rsp)
leaq (,%rsi,8), %rbx
addq 0x28(%rbp), %r9
xorl %ecx, %ecx
movq %r8, %rdx
cmpl 0x68(%rsp), %ecx
jg 0x2aae2a
movq (%rbp), %rdi
xorl %r13d, %r13d
cmpl 0x10(%rsp), %r13d
jg 0x2aae23
movq -0x18(%rdi), %rsi
movslq 0xe8(%rbp,%rsi), %r14
movslq %ecx, %r12
imulq %r14, %r12
imulq %r10, %r12
movl 0xe4(%rbp,%rsi), %ebp
imull %r13d, %ebp
shll $0x3, %ebp
movslq %ebp, %r14
addq %r11, %r14
addq %r12, %r14
vpxor %xmm3, %xmm3, %xmm3
xorl %r12d, %r12d
vpxor %xmm4, %xmm4, %xmm4
cmpq %r12, %r15
je 0x2aa50d
movslq (%rax,%r12,4), %rbp
vmovq (%r14,%rbp,8), %xmm5
vpcmpgtb %xmm5, %xmm0, %xmm6
vpunpcklbw %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
vmovq (%r9,%r12,8), %xmm6
vpcmpgtb %xmm6, %xmm0, %xmm7
vpunpcklbw %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
vpmullw %xmm5, %xmm6, %xmm7
vpmulhw %xmm6, %xmm5, %xmm5
vpunpcklwd %xmm5, %xmm7, %xmm6 # xmm6 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
vpunpckhwd %xmm5, %xmm7, %xmm5 # xmm5 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
vpaddd %xmm6, %xmm4, %xmm4
vpaddd %xmm5, %xmm3, %xmm3
incq %r12
jmp 0x2aa4cb
movq 0x8(%rsp), %rbp
movq 0x1f8(%rbp,%rsi), %r14
movq 0x240(%rbp,%rsi), %r12
vmovups (%r14,%rbx,4), %xmm5
vmovups 0x10(%r14,%rbx,4), %xmm15
vmulps (%r12,%rbx,4), %xmm5, %xmm6
vmulps 0x10(%r12,%rbx,4), %xmm15, %xmm7
vrcpps %xmm6, %xmm6
vrcpps %xmm7, %xmm7
vcmpneqps %xmm0, %xmm5, %xmm5
vandps %xmm6, %xmm5, %xmm5
vcmpneqps %xmm0, %xmm15, %xmm6
vandps %xmm7, %xmm6, %xmm6
vcvtdq2ps %xmm4, %xmm4
vmulps %xmm4, %xmm5, %xmm4
vcvtdq2ps %xmm3, %xmm3
vmulps %xmm3, %xmm6, %xmm5
cmpl $0x0, 0x100(%rbp,%rsi)
je 0x2aa585
movq 0x1b0(%rbp,%rsi), %r14
vaddps (%r14,%rbx,4), %xmm4, %xmm4
vaddps 0x10(%r14,%rbx,4), %xmm5, %xmm5
movl 0x110(%rbp,%rsi), %r14d
decl %r14d
cmpl $0x5, %r14d
ja 0x2aad9f
leaq 0x14e14f(%rip), %r12 # 0x3f86f0
movslq (%r12,%r14,4), %r14
addq %r12, %r14
vmaxps %xmm0, %xmm4, %xmm15
vmaxps %xmm0, %xmm5, %xmm3
jmpq *%r14
movq 0x118(%rbp,%rsi), %r14
vminps %xmm0, %xmm4, %xmm4
vbroadcastss (%r14), %xmm6
vmulps %xmm4, %xmm6, %xmm4
vaddps %xmm4, %xmm15, %xmm15
vminps %xmm0, %xmm5, %xmm4
vmulps %xmm4, %xmm6, %xmm4
jmp 0x2aad5c
vbroadcastss 0x146bd6(%rip), %xmm1 # 0x3f11b8
vminps %xmm1, %xmm4, %xmm3
vbroadcastss 0x146bcd(%rip), %xmm1 # 0x3f11bc
vmaxps %xmm1, %xmm3, %xmm3
vbroadcastss 0x146bc4(%rip), %xmm1 # 0x3f11c0
vmulps %xmm1, %xmm3, %xmm6
vaddps %xmm2, %xmm6, %xmm6
vcvttps2dq %xmm6, %xmm7
vcvtdq2ps %xmm7, %xmm7
vcmpltps %xmm7, %xmm6, %xmm6
vandps %xmm6, %xmm8, %xmm6
vsubps %xmm6, %xmm7, %xmm6
vbroadcastss 0x1472fe(%rip), %xmm9 # 0x3f1920
vmulps %xmm6, %xmm9, %xmm7
vsubps %xmm7, %xmm3, %xmm3
vmulps %xmm3, %xmm3, %xmm7
vbroadcastss 0x146b95(%rip), %xmm10 # 0x3f11cc
vmulps %xmm3, %xmm10, %xmm15
vmovaps %xmm10, %xmm12
vbroadcastss 0x146b87(%rip), %xmm10 # 0x3f11d0
vaddps %xmm10, %xmm15, %xmm15
vmovaps %xmm10, %xmm13
vmulps %xmm3, %xmm15, %xmm15
vbroadcastss 0x146b74(%rip), %xmm11 # 0x3f11d4
vaddps %xmm11, %xmm15, %xmm15
vmovaps %xmm11, %xmm14
vmulps %xmm3, %xmm15, %xmm15
vbroadcastss 0x146b61(%rip), %xmm1 # 0x3f11d8
vaddps %xmm1, %xmm15, %xmm15
vmulps %xmm3, %xmm15, %xmm15
vbroadcastss 0x146b54(%rip), %xmm10 # 0x3f11dc
vaddps %xmm10, %xmm15, %xmm15
vmulps %xmm3, %xmm15, %xmm15
vaddps %xmm2, %xmm15, %xmm15
vmulps %xmm7, %xmm15, %xmm7
vaddps %xmm3, %xmm8, %xmm3
vaddps %xmm7, %xmm3, %xmm3
vcvttps2dq %xmm6, %xmm6
vpslld $0x17, %xmm6, %xmm6
vpaddd %xmm6, %xmm8, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm3, %xmm8, %xmm6
vcmpleps %xmm0, %xmm6, %xmm3
vbroadcastss 0x146b1c(%rip), %xmm7 # 0x3f11e0
vmaxps %xmm7, %xmm6, %xmm6
vpsrld $0x17, %xmm6, %xmm7
vbroadcastss 0x146b0e(%rip), %xmm10 # 0x3f11e4
vandps %xmm6, %xmm10, %xmm6
vorps %xmm2, %xmm6, %xmm6
vbroadcastss 0x146b01(%rip), %xmm10 # 0x3f11e8
vpaddd %xmm7, %xmm10, %xmm7
vcvtdq2ps %xmm7, %xmm7
vbroadcastss 0x146af4(%rip), %xmm10 # 0x3f11ec
vcmpltps %xmm10, %xmm6, %xmm15
vandps %xmm6, %xmm15, %xmm10
vbroadcastss 0x146ae5(%rip), %xmm1 # 0x3f11f0
vaddps %xmm1, %xmm6, %xmm6
vaddps %xmm6, %xmm10, %xmm6
vandps %xmm8, %xmm15, %xmm10
vsubps %xmm10, %xmm7, %xmm7
vmulps %xmm6, %xmm6, %xmm10
vbroadcastss 0x146aca(%rip), %xmm15 # 0x3f11f4
vmulps %xmm6, %xmm15, %xmm15
vbroadcastss 0x146ac1(%rip), %xmm11 # 0x3f11f8
vaddps %xmm11, %xmm15, %xmm15
vmulps %xmm6, %xmm15, %xmm15
vbroadcastss 0x146ab3(%rip), %xmm11 # 0x3f11fc
vaddps %xmm11, %xmm15, %xmm15
vmulps %xmm6, %xmm15, %xmm15
vbroadcastss 0x146aa5(%rip), %xmm11 # 0x3f1200
vaddps %xmm11, %xmm15, %xmm15
vmulps %xmm6, %xmm15, %xmm15
vbroadcastss 0x146a97(%rip), %xmm11 # 0x3f1204
vaddps %xmm11, %xmm15, %xmm15
vmulps %xmm6, %xmm15, %xmm15
vbroadcastss 0x146a89(%rip), %xmm11 # 0x3f1208
vaddps %xmm11, %xmm15, %xmm15
vmulps %xmm6, %xmm15, %xmm15
vbroadcastss 0x146a7b(%rip), %xmm11 # 0x3f120c
vaddps %xmm11, %xmm15, %xmm15
vmulps %xmm6, %xmm15, %xmm15
vbroadcastss 0x146a6d(%rip), %xmm11 # 0x3f1210
vaddps %xmm11, %xmm15, %xmm15
vmulps %xmm6, %xmm15, %xmm15
vbroadcastss 0x146a5f(%rip), %xmm11 # 0x3f1214
vaddps %xmm11, %xmm15, %xmm15
vmulps %xmm6, %xmm15, %xmm15
vmulps %xmm7, %xmm9, %xmm7
vbroadcastss 0x147159(%rip), %xmm11 # 0x3f1924
vaddps %xmm11, %xmm15, %xmm15
vmulps %xmm15, %xmm10, %xmm10
vaddps %xmm6, %xmm7, %xmm6
vaddps %xmm6, %xmm10, %xmm6
vbroadcastss 0x147f32(%rip), %xmm11 # 0x3f2718
vmulps %xmm6, %xmm11, %xmm6
vbroadcastss 0x14710d(%rip), %xmm11 # 0x3f1900
vblendvps %xmm3, %xmm11, %xmm6, %xmm3
vbroadcastss 0x1469b6(%rip), %xmm1 # 0x3f11b8
vminps %xmm1, %xmm3, %xmm3
vbroadcastss 0x1469ad(%rip), %xmm1 # 0x3f11bc
vmaxps %xmm1, %xmm3, %xmm3
vbroadcastss 0x1469a4(%rip), %xmm1 # 0x3f11c0
vmulps %xmm1, %xmm3, %xmm6
vaddps %xmm2, %xmm6, %xmm6
vcvttps2dq %xmm6, %xmm7
vcvtdq2ps %xmm7, %xmm7
vcmpltps %xmm7, %xmm6, %xmm6
vandps %xmm6, %xmm8, %xmm6
vsubps %xmm6, %xmm7, %xmm6
vmulps %xmm6, %xmm9, %xmm7
vsubps %xmm7, %xmm3, %xmm3
vmulps %xmm3, %xmm3, %xmm7
vmovaps %xmm12, %xmm1
vmulps %xmm3, %xmm12, %xmm10
vmovaps %xmm13, %xmm12
vaddps %xmm13, %xmm10, %xmm10
vmulps %xmm3, %xmm10, %xmm10
vmovaps %xmm14, %xmm13
vaddps %xmm14, %xmm10, %xmm10
vmulps %xmm3, %xmm10, %xmm10
vbroadcastss 0x146966(%rip), %xmm14 # 0x3f11d8
vaddps %xmm14, %xmm10, %xmm10
vmulps %xmm3, %xmm10, %xmm10
vbroadcastss 0x146958(%rip), %xmm11 # 0x3f11dc
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm3, %xmm10, %xmm10
vaddps %xmm2, %xmm10, %xmm10
vmulps %xmm7, %xmm10, %xmm7
vaddps %xmm3, %xmm8, %xmm3
vaddps %xmm7, %xmm3, %xmm3
vcvttps2dq %xmm6, %xmm6
vpslld $0x17, %xmm6, %xmm6
vpaddd %xmm6, %xmm8, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm3, %xmm8, %xmm3
vrcpps %xmm3, %xmm6
vaddps %xmm6, %xmm6, %xmm7
vmulps %xmm7, %xmm3, %xmm3
vbroadcastss 0x147e41(%rip), %xmm11 # 0x3f2708
vsubps %xmm3, %xmm11, %xmm3
vmulps %xmm3, %xmm6, %xmm3
vbroadcastss 0x146918(%rip), %xmm11 # 0x3f11f0
vaddps %xmm7, %xmm11, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vmulps %xmm4, %xmm3, %xmm15
vbroadcastss 0x1468cb(%rip), %xmm3 # 0x3f11b8
vminps %xmm3, %xmm5, %xmm3
vbroadcastss 0x1468c2(%rip), %xmm4 # 0x3f11bc
vmaxps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1468b9(%rip), %xmm4 # 0x3f11c0
vmulps %xmm4, %xmm3, %xmm4
vaddps %xmm2, %xmm4, %xmm4
vcvttps2dq %xmm4, %xmm6
vcvtdq2ps %xmm6, %xmm6
vcmpltps %xmm6, %xmm4, %xmm4
vandps %xmm4, %xmm8, %xmm4
vsubps %xmm4, %xmm6, %xmm4
vmulps %xmm4, %xmm9, %xmm6
vsubps %xmm6, %xmm3, %xmm3
vmulps %xmm3, %xmm3, %xmm6
vmulps %xmm1, %xmm3, %xmm7
vaddps %xmm7, %xmm12, %xmm7
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovaps %xmm13, %xmm12
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm7, %xmm14, %xmm7
vmovaps %xmm14, %xmm13
vmulps %xmm3, %xmm7, %xmm7
vbroadcastss 0x14687d(%rip), %xmm14 # 0x3f11dc
vaddps %xmm7, %xmm14, %xmm7
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm2, %xmm7, %xmm7
vmulps %xmm7, %xmm6, %xmm6
vaddps %xmm3, %xmm8, %xmm3
vaddps %xmm6, %xmm3, %xmm3
vcvttps2dq %xmm4, %xmm4
vpslld $0x17, %xmm4, %xmm4
vpaddd %xmm4, %xmm8, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm8, %xmm3
vbroadcastss 0x14684b(%rip), %xmm1 # 0x3f11e0
vmaxps %xmm1, %xmm3, %xmm4
vpsrld $0x17, %xmm4, %xmm6
vbroadcastss 0x14683d(%rip), %xmm1 # 0x3f11e4
vandps %xmm1, %xmm4, %xmm4
vorps %xmm2, %xmm4, %xmm4
vbroadcastss 0x146830(%rip), %xmm1 # 0x3f11e8
vpaddd %xmm1, %xmm6, %xmm6
vcvtdq2ps %xmm6, %xmm6
vbroadcastss 0x146823(%rip), %xmm1 # 0x3f11ec
vcmpltps %xmm1, %xmm4, %xmm7
vandps %xmm4, %xmm7, %xmm10
vaddps %xmm4, %xmm11, %xmm4
vmovaps %xmm11, %xmm1
vaddps %xmm4, %xmm10, %xmm4
vandps %xmm7, %xmm8, %xmm7
vsubps %xmm7, %xmm6, %xmm6
vmulps %xmm4, %xmm4, %xmm7
vbroadcastss 0x146801(%rip), %xmm10 # 0x3f11f4
vmulps %xmm4, %xmm10, %xmm10
vbroadcastss 0x1467f8(%rip), %xmm11 # 0x3f11f8
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm4, %xmm10, %xmm10
vbroadcastss 0x1467ea(%rip), %xmm11 # 0x3f11fc
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm4, %xmm10, %xmm10
vbroadcastss 0x1467dc(%rip), %xmm11 # 0x3f1200
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm4, %xmm10, %xmm10
vbroadcastss 0x1467ce(%rip), %xmm11 # 0x3f1204
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm4, %xmm10, %xmm10
vbroadcastss 0x1467c0(%rip), %xmm11 # 0x3f1208
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm4, %xmm10, %xmm10
vbroadcastss 0x1467b2(%rip), %xmm11 # 0x3f120c
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm4, %xmm10, %xmm10
vbroadcastss 0x1467a4(%rip), %xmm11 # 0x3f1210
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm4, %xmm10, %xmm10
vbroadcastss 0x146796(%rip), %xmm11 # 0x3f1214
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm4, %xmm10, %xmm10
vbroadcastss 0x146e94(%rip), %xmm11 # 0x3f1924
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm7, %xmm10, %xmm7
vcmpleps %xmm0, %xmm3, %xmm3
vmulps %xmm6, %xmm9, %xmm6
vaddps %xmm4, %xmm6, %xmm4
vaddps %xmm7, %xmm4, %xmm4
vbroadcastss 0x147c65(%rip), %xmm6 # 0x3f2718
vmulps %xmm6, %xmm4, %xmm4
vbroadcastss 0x146e40(%rip), %xmm6 # 0x3f1900
vblendvps %xmm3, %xmm6, %xmm4, %xmm3
vbroadcastss 0x1466e9(%rip), %xmm4 # 0x3f11b8
vminps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1466e0(%rip), %xmm4 # 0x3f11bc
vmaxps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1466d7(%rip), %xmm4 # 0x3f11c0
vmulps %xmm4, %xmm3, %xmm4
vaddps %xmm2, %xmm4, %xmm4
vcvttps2dq %xmm4, %xmm6
vcvtdq2ps %xmm6, %xmm6
vcmpltps %xmm6, %xmm4, %xmm4
vandps %xmm4, %xmm8, %xmm4
vsubps %xmm4, %xmm6, %xmm4
vmulps %xmm4, %xmm9, %xmm6
vsubps %xmm6, %xmm3, %xmm3
vmulps %xmm3, %xmm3, %xmm6
vbroadcastss 0x1466b1(%rip), %xmm7 # 0x3f11cc
vmulps %xmm7, %xmm3, %xmm7
vbroadcastss 0x1466a8(%rip), %xmm9 # 0x3f11d0
vaddps %xmm7, %xmm9, %xmm7
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm7, %xmm12, %xmm7
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm7, %xmm14, %xmm7
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm2, %xmm7, %xmm7
vmulps %xmm7, %xmm6, %xmm6
vaddps %xmm3, %xmm8, %xmm3
vaddps %xmm6, %xmm3, %xmm3
vcvttps2dq %xmm4, %xmm4
vpslld $0x17, %xmm4, %xmm4
vpaddd %xmm4, %xmm8, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm8, %xmm3
vrcpps %xmm3, %xmm4
vaddps %xmm4, %xmm4, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vbroadcastss 0x147b86(%rip), %xmm7 # 0x3f2708
vsubps %xmm3, %xmm7, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm1, %xmm6, %xmm4
vaddps %xmm3, %xmm4, %xmm3
jmp 0x2aad99
movq 0x118(%rbp,%rsi), %r14
vbroadcastss (%r14), %xmm3
vbroadcastss 0x4(%r14), %xmm6
vmaxps %xmm3, %xmm4, %xmm4
vminps %xmm6, %xmm4, %xmm15
vmaxps %xmm3, %xmm5, %xmm3
vminps %xmm6, %xmm3, %xmm3
jmp 0x2aada7
vbroadcastss 0x1465ec(%rip), %xmm12 # 0x3f11b4
vxorps %xmm4, %xmm12, %xmm3
vbroadcastss 0x1465e3(%rip), %xmm13 # 0x3f11b8
vminps %xmm3, %xmm13, %xmm3
vbroadcastss 0x1465da(%rip), %xmm14 # 0x3f11bc
vmaxps %xmm3, %xmm14, %xmm3
vbroadcastss 0x1465d1(%rip), %xmm9 # 0x3f11c0
vmulps %xmm3, %xmm9, %xmm4
vaddps %xmm2, %xmm4, %xmm4
vcvttps2dq %xmm4, %xmm6
vcvtdq2ps %xmm6, %xmm6
vcmpltps %xmm6, %xmm4, %xmm4
vandps %xmm4, %xmm8, %xmm4
vsubps %xmm4, %xmm6, %xmm4
vbroadcastss 0x146d0b(%rip), %xmm1 # 0x3f1920
vmulps %xmm1, %xmm4, %xmm6
vsubps %xmm6, %xmm3, %xmm3
vmulps %xmm3, %xmm3, %xmm6
vbroadcastss 0x1465a2(%rip), %xmm10 # 0x3f11cc
vmulps %xmm3, %xmm10, %xmm7
vbroadcastss 0x146599(%rip), %xmm11 # 0x3f11d0
vaddps %xmm7, %xmm11, %xmm7
vmulps %xmm3, %xmm7, %xmm7
vmovaps %xmm2, %xmm1
vbroadcastss 0x146588(%rip), %xmm2 # 0x3f11d4
vaddps %xmm2, %xmm7, %xmm7
vmulps %xmm3, %xmm7, %xmm7
vbroadcastss 0x14657b(%rip), %xmm10 # 0x3f11d8
vaddps %xmm7, %xmm10, %xmm7
vmulps %xmm3, %xmm7, %xmm7
vbroadcastss 0x14656e(%rip), %xmm11 # 0x3f11dc
vaddps %xmm7, %xmm11, %xmm7
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm1, %xmm7, %xmm7
vmulps %xmm7, %xmm6, %xmm6
vaddps %xmm3, %xmm8, %xmm3
vaddps %xmm6, %xmm3, %xmm3
vcvttps2dq %xmm4, %xmm4
vpslld $0x17, %xmm4, %xmm4
vpaddd %xmm4, %xmm8, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm8, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vsubps %xmm3, %xmm8, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm15
vxorps %xmm5, %xmm12, %xmm3
vminps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm14, %xmm3
vmulps %xmm3, %xmm9, %xmm4
vaddps %xmm1, %xmm4, %xmm4
vcvttps2dq %xmm4, %xmm5
vcvtdq2ps %xmm5, %xmm5
vcmpltps %xmm5, %xmm4, %xmm4
vandps %xmm4, %xmm8, %xmm4
vsubps %xmm4, %xmm5, %xmm4
vbroadcastss 0x146c3f(%rip), %xmm5 # 0x3f1920
vmulps %xmm5, %xmm4, %xmm5
vsubps %xmm5, %xmm3, %xmm3
vmulps %xmm3, %xmm3, %xmm5
vbroadcastss 0x1464d6(%rip), %xmm6 # 0x3f11cc
vmulps %xmm6, %xmm3, %xmm6
vbroadcastss 0x1464cd(%rip), %xmm7 # 0x3f11d0
vaddps %xmm7, %xmm6, %xmm6
vmulps %xmm3, %xmm6, %xmm6
vaddps %xmm2, %xmm6, %xmm6
vmovaps %xmm1, %xmm2
vmulps %xmm3, %xmm6, %xmm6
vaddps %xmm6, %xmm10, %xmm6
vmulps %xmm3, %xmm6, %xmm6
vaddps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm6, %xmm6
vaddps %xmm1, %xmm6, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vaddps %xmm3, %xmm8, %xmm3
vaddps %xmm5, %xmm3, %xmm3
vcvttps2dq %xmm4, %xmm4
vpslld $0x17, %xmm4, %xmm4
vpaddd %xmm4, %xmm8, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm8, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vsubps %xmm3, %xmm8, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
jmp 0x2aada7
movq 0x118(%rbp,%rsi), %r14
vbroadcastss (%r14), %xmm3
vbroadcastss 0x4(%r14), %xmm6
vmulps %xmm4, %xmm3, %xmm7
vaddps %xmm6, %xmm7, %xmm7
vmaxps %xmm0, %xmm7, %xmm7
vminps %xmm7, %xmm8, %xmm7
vmulps %xmm4, %xmm7, %xmm15
vmulps %xmm5, %xmm3, %xmm3
vaddps %xmm6, %xmm3, %xmm3
vmaxps %xmm0, %xmm3, %xmm3
vminps %xmm3, %xmm8, %xmm3
vmulps %xmm5, %xmm3, %xmm3
jmp 0x2aada7
vmovaps %xmm4, %xmm15
vmovaps %xmm5, %xmm3
cmpl $0x65, 0x38(%rsp)
jl 0x2aae0c
movq 0x288(%rbp,%rsi), %rsi
vmulps (%rsi,%rbx,4), %xmm15, %xmm4
vmulps 0x10(%rsi,%rbx,4), %xmm3, %xmm3
vbroadcastss 0x1463ea(%rip), %xmm1 # 0x3f11b4
vandps %xmm1, %xmm4, %xmm5
vandps %xmm1, %xmm3, %xmm6
vorps %xmm2, %xmm5, %xmm5
vorps %xmm2, %xmm6, %xmm6
vaddps %xmm5, %xmm4, %xmm4
vaddps %xmm6, %xmm3, %xmm3
vcvttps2dq %xmm4, %xmm4
vcvttps2dq %xmm3, %xmm3
vpackssdw %xmm3, %xmm4, %xmm3
vpminsw 0x14d75a(%rip), %xmm3, %xmm3 # 0x3f8550
vpmaxsw 0x14d762(%rip), %xmm3, %xmm3 # 0x3f8560
vpacksswb %xmm3, %xmm3, %xmm3
vmovq %xmm3, (%rdx)
addq $0x8, %rdx
jmp 0x2aae1b
vmovups %xmm15, (%r8)
vmovups %xmm3, 0x10(%r8)
addq $0x20, %r8
incl %r13d
jmp 0x2aa487
incl %ecx
jmp 0x2aa476
movq 0x40(%rsp), %rsi
incq %rsi
movq 0x30(%rsp), %rdx
addl 0x28(%rsp), %edx
movq 0x18(%rsp), %r14
jmp 0x2aa411
pushq $0x3
popq %r13
cmpl $0x1, 0xdc(%rbp,%rdx)
jne 0x2aaf50
cmpl $0x1, 0xe0(%rbp,%rdx)
jne 0x2aaf50
cmpl $0x2, 0xe4(%rbp,%rdx)
jne 0x2aaf50
cmpl $0x2, 0xe8(%rbp,%rdx)
jne 0x2aaf50
cmpl $0x1, 0x110(%rbp,%rdx)
ja 0x2aaf50
cmpl $0x65, 0x38(%rsp)
jl 0x2aba61
vxorps %xmm0, %xmm0, %xmm0
leaq 0x130(%rsp), %r13
vmovaps %xmm0, (%r13)
andq $0x0, 0x10(%r13)
xorl %ebx, %ebx
movq %rbp, %rcx
leaq 0xe0(%rsp), %rbp
leaq 0x198(%rsp), %r12
movq -0x18(%rax), %r14
movslq 0x108(%rcx,%r14), %rax
cmpq %rax, %rbx
jge 0x2abdbb
movq 0x1f8(%rcx,%r14), %rax
vmovss (%rax,%rbx,4), %xmm1
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm1, %xmm0
je 0x2aaf0b
movq 0x240(%rcx,%r14), %rax
vmulss (%rax,%rbx,4), %xmm1, %xmm0
vmovss 0x143d81(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0xe0(%rsp)
movq 0x288(%rcx,%r14), %rax
vmovss (%rax,%rbx,4), %xmm0
vmovss %xmm0, 0x198(%rsp)
movq %r13, %rdi
movq %rbp, %rsi
callq 0x1ea12c
movq %r13, %rdi
movq %r12, %rsi
callq 0x1ea12c
incq %rbx
movq 0x8(%rsp), %rcx
movq (%rcx), %rax
jmp 0x2aaec6
imull %ecx, %r13d
movslq %r13d, %rsi
leaq 0x130(%rsp), %rdi
leaq 0xe0(%rsp), %rdx
movq %rsi, 0xc8(%rsp)
callq 0x73bbe
movq (%rbp), %r9
movq -0x18(%r9), %rax
movl 0x18(%rsp), %r10d
imull 0xe0(%rbp,%rax), %r10d
movq 0x130(%rsp), %rbx
movl 0xdc(%rbp,%rax), %ecx
imull 0xd4(%rbp,%rax), %ecx
subl %ecx, %r10d
xorl %ecx, %ecx
xorl %edx, %edx
xorl %esi, %esi
cmpl 0xd8(%rbp,%rax), %esi
jge 0x2aafe3
movslq %ecx, %rcx
leaq (%rbx,%rcx,4), %r8
xorl %edi, %edi
cmpl 0xd4(%rbp,%rax), %edi
jge 0x2aafd9
movl %edx, (%r8,%rdi,4)
movq -0x18(%r9), %rax
addl 0xdc(%rbp,%rax), %edx
incq %rdi
jmp 0x2aafbc
addl %r10d, %edx
incl %esi
addq %rdi, %rcx
jmp 0x2aafaa
xorl %edx, %edx
testl %r13d, %r13d
movl $0x0, %ecx
movq %rcx, 0x50(%rsp)
cmovlel %edx, %r13d
vbroadcastss 0x1461b5(%rip), %xmm4 # 0x3f11b4
leaq 0x14d6d2(%rip), %r8 # 0x3f86d8
xorl %ebp, %ebp
movq %r9, 0xc0(%rsp)
movq 0x8(%rsp), %rcx
movslq 0x108(%rcx,%rax), %rax
cmpq %rax, %rbp
jge 0x2ab382
movq 0x10(%r14), %r9
imulq %rbp, %r9
imulq 0x40(%r14), %r9
addq (%r14), %r9
movslq 0x9c(%rsp), %r10
movq 0xb0(%rsp), %r11
imulq %rbp, %r11
movq 0x80(%rsp), %rax
imulq %rax, %r11
addq 0x70(%rsp), %r11
imulq %rax, %r10
movq 0x28(%rcx), %r15
addq 0x50(%rsp), %r15
xorl %eax, %eax
movq %r9, 0x18(%rsp)
movq %r10, 0x28(%rsp)
movq %r11, 0x48(%rsp)
movq %rbp, 0xd0(%rsp)
cmpl 0x68(%rsp), %eax
jg 0x2ab357
movl %eax, 0x30(%rsp)
movq 0x8(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x40(%rsp)
xorl %r14d, %r14d
cmpl 0x10(%rsp), %r14d
jg 0x2ab34c
movq 0x40(%rsp), %rax
movq -0x18(%rax), %r12
movq 0x8(%rsp), %rcx
movslq 0xe8(%rcx,%r12), %rax
movslq 0x30(%rsp), %rdx
imulq %rax, %rdx
imulq %r10, %rdx
movslq 0xe4(%rcx,%r12), %rax
movslq %r14d, %rcx
imulq %rax, %rcx
addq %r11, %rcx
addq %rdx, %rcx
xorl %edx, %edx
xorl %eax, %eax
cmpq %rdx, %r13
je 0x2ab102
movslq (%rbx,%rdx,4), %rsi
movsbl (%rcx,%rsi), %esi
movsbl (%r15,%rdx), %edi
imull %esi, %edi
addl %edi, %eax
incq %rdx
jmp 0x2ab0e6
movq 0x8(%rsp), %rdx
movq 0x1f8(%rdx,%r12), %rcx
vmovss (%rcx,%rbp,4), %xmm1
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm1, %xmm0
je 0x2ab137
movq 0x240(%rdx,%r12), %rcx
vmulss (%rcx,%rbp,4), %xmm1, %xmm0
vmovss 0x143b55(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vcvtsi2ss %eax, %xmm6, %xmm1
vmulss %xmm1, %xmm0, %xmm5
cmpl $0x0, 0x100(%rdx,%r12)
je 0x2ab157
movq 0x1b0(%rdx,%r12), %rax
vaddss (%rax,%rbp,4), %xmm5, %xmm5
movl 0x110(%rdx,%r12), %eax
decl %eax
cmpl $0x5, %eax
ja 0x2ab2cf
movslq (%r8,%rax,4), %rax
addq %r8, %rax
jmpq *%rax
vmaxss 0x142e95(%rip), %xmm5, %xmm0 # 0x3ee010
jmp 0x2ab2d3
vmovaps %xmm5, %xmm0
movq %r9, 0x58(%rsp)
vmovss %xmm5, 0xd8(%rsp)
callq 0x5f410
vaddss 0x143ae9(%rip), %xmm0, %xmm0 # 0x3eec88
callq 0x5f200
callq 0x5f160
movq 0x48(%rsp), %r11
movq 0x28(%rsp), %r10
movq 0x58(%rsp), %r9
leaq 0x14d519(%rip), %r8 # 0x3f86d8
vbroadcastss 0x145fec(%rip), %xmm4 # 0x3f11b4
vmulss 0xd8(%rsp), %xmm0, %xmm0
jmp 0x2ab2d3
movq 0x8(%rsp), %rax
movq 0x118(%rax,%r12), %rax
vmovss 0x4(%rax), %xmm1
vmaxss (%rax), %xmm5, %xmm0
vucomiss %xmm1, %xmm0
jbe 0x2ab2d3
vmovaps %xmm1, %xmm0
jmp 0x2ab2d3
vminss 0x145fb1(%rip), %xmm5, %xmm0 # 0x3f11b8
vxorps %xmm4, %xmm0, %xmm1
vcmpltss 0x145fa8(%rip), %xmm0, %xmm0 # 0x3f11bc
vbroadcastss 0x145f9b(%rip), %xmm2 # 0x3f11b8
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
movq %r9, %rbp
callq 0x5f410
movq 0x48(%rsp), %r11
movq 0x28(%rsp), %r10
movq %rbp, %r9
movq 0xd0(%rsp), %rbp
leaq 0x14d491(%rip), %r8 # 0x3f86d8
vbroadcastss 0x145f64(%rip), %xmm4 # 0x3f11b4
vmovss 0x143a30(%rip), %xmm1 # 0x3eec88
vaddss %xmm1, %xmm0, %xmm0
vdivss %xmm0, %xmm1, %xmm0
jmp 0x2ab2d3
movq 0x8(%rsp), %rax
movq 0x118(%rax,%r12), %rax
vxorps %xmm0, %xmm0, %xmm0
vcmpltss %xmm5, %xmm0, %xmm0
vmovss (%rax), %xmm1
vbroadcastss 0x143a03(%rip), %xmm2 # 0x3eec88
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmulss %xmm5, %xmm0, %xmm0
jmp 0x2ab2d3
movq 0x8(%rsp), %rax
movq 0x118(%rax,%r12), %rax
vmovss (%rax), %xmm1
vmovss 0x4(%rax), %xmm2
vxorps %xmm4, %xmm2, %xmm0
vdivss %xmm1, %xmm0, %xmm3
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm3, %xmm5
jb 0x2ab2d3
vmovss 0x1439c7(%rip), %xmm0 # 0x3eec88
vdivss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm3, %xmm0
vucomiss %xmm0, %xmm5
jbe 0x2ab33f
vmovaps %xmm5, %xmm0
cmpl $0x65, 0x38(%rsp)
jl 0x2ab325
movq 0x8(%rsp), %rax
movq 0x288(%rax,%r12), %rax
vmulss (%rax,%rbp,4), %xmm0, %xmm0
vandps %xmm4, %xmm0, %xmm1
vbroadcastss 0x14d30b(%rip), %xmm2 # 0x3f8604
vorps %xmm2, %xmm1, %xmm1
vaddss %xmm1, %xmm0, %xmm0
vroundss $0xb, %xmm0, %xmm0, %xmm0
vcvttss2si %xmm0, %eax
cmpl $-0x7e, %eax
pushq $-0x7f
popq %rcx
cmovll %ecx, %eax
cmpl $0x7f, %eax
pushq $0x7f
popq %rcx
cmovgel %ecx, %eax
movb %al, (%r9)
incq %r9
jmp 0x2ab337
movq 0x18(%rsp), %rax
vmovss %xmm0, (%rax)
addq $0x4, %rax
movq %rax, 0x18(%rsp)
incl %r14d
jmp 0x2ab09f
vmulss %xmm5, %xmm1, %xmm0
vaddss %xmm2, %xmm0, %xmm0
jmp 0x2ab28b
movl 0x30(%rsp), %eax
incl %eax
jmp 0x2ab081
incq %rbp
movq 0xc0(%rsp), %r9
movq -0x18(%r9), %rax
movq 0x50(%rsp), %rcx
addq 0xc8(%rsp), %rcx
movq %rcx, 0x50(%rsp)
movq 0x20(%rsp), %r14
jmp 0x2ab010
leaq 0x130(%rsp), %rdi
callq 0x624be
xorl %r12d, %r12d
jmp 0x2ab463
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0xe0(%rsp), %xmm0
vmovups %xmm0, (%rbx)
movq 0xf0(%rsp), %rax
movq %rax, 0x10(%rbx)
movl 0xf8(%rsp), %eax
movl %eax, 0x18(%rbx)
movq 0x100(%rsp), %rax
movq %rax, 0x20(%rbx)
vmovups 0x108(%rsp), %xmm0
vmovups %xmm0, 0x28(%rbx)
movl 0x118(%rsp), %eax
movl %eax, 0x38(%rbx)
movq 0x120(%rsp), %rax
movq %rax, 0x40(%rbx)
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x2ab429
lock
decl (%rax)
jne 0x2ab429
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
je 0x2ab421
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2ab429
movq %rsi, %rdi
callq 0x5f3e0
movq 0x138(%rsp), %rax
testq %rax, %rax
movl %r14d, %r12d
je 0x2ab463
lock
decl (%rax)
jne 0x2ab463
movq 0x130(%rsp), %rsi
movq 0x150(%rsp), %rdi
testq %rdi, %rdi
je 0x2ab45b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2ab463
movq %rsi, %rdi
callq 0x5f3e0
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2ab494
lock
decl (%rax)
jne 0x2ab494
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2ab48c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2ab494
movq %rsi, %rdi
callq 0x5f3e0
movq 0x1e8(%rsp), %rax
testq %rax, %rax
je 0x2ab4cb
lock
decl (%rax)
jne 0x2ab4cb
movq 0x1e0(%rsp), %rsi
movq 0x200(%rsp), %rdi
testq %rdi, %rdi
je 0x2ab4c3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2ab4cb
movq %rsi, %rdi
callq 0x5f3e0
movl %r12d, %eax
addq $0x318, %rsp # imm = 0x318
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
vxorps %xmm0, %xmm0, %xmm0
leaq 0x130(%rsp), %rbx
andq $0x0, 0x10(%rbx)
vmovaps %xmm0, (%rbx)
xorl %r14d, %r14d
leaq 0xe0(%rsp), %r15
movq -0x18(%rax), %r12
movslq 0x108(%rbp,%r12), %rax
cmpq %rax, %r14
jge 0x2ab556
movq 0x1f8(%rbp,%r12), %rax
movq 0x240(%rbp,%r12), %rcx
vmovss (%rax,%r14,4), %xmm0
vmulss (%rcx,%r14,4), %xmm0, %xmm0
vmovss 0x143753(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0xe0(%rsp)
movq %rbx, %rdi
movq %r15, %rsi
callq 0x1ea12c
incq %r14
movq (%rbp), %rax
jmp 0x2ab500
leaq 0x2a0(%rsp), %rdi
leaq 0x130(%rsp), %rsi
callq 0x290632
movslq 0x9c(%rsp), %rdx
movq 0x20(%rsp), %rcx
movslq 0x2c(%rcx), %rsi
movslq 0x30(%rcx), %rax
movl 0x38(%rcx), %edi
movq 0x28(%rbp), %r9
movq 0x1b0(%rbp,%r12), %r10
movl 0x34(%rcx), %r8d
movl %r8d, 0x58(%rsp)
movq (%rcx), %r8
movq 0x10(%rcx), %r11
movq 0x40(%rcx), %r15
imulq %r11, %r15
movq %rax, %rbx
movq %rsi, 0x38(%rsp)
imulq %rsi, %rbx
movq %r11, 0xd0(%rsp)
movq %rbx, 0x50(%rsp)
imulq %rbx, %r11
addq $0xf, %r11
andq $-0x10, %r11
movq %r11, 0xc8(%rsp)
movq 0x80(%rsp), %rsi
imulq 0xb0(%rsp), %rsi
movq %rsi, 0xc0(%rsp)
xorl %esi, %esi
testl %eax, %eax
movl $0x0, %r11d
cmovgl %eax, %r11d
movl %r11d, 0x10(%rsp)
movl 0x28(%rcx), %eax
movl %eax, 0x64(%rsp)
testl %edi, %edi
cmovlel %esi, %edi
movq %rdi, 0x18(%rsp)
movq 0x70(%rsp), %rax
movq %rax, 0x190(%rsp)
movq %rdx, 0x28(%rsp)
leaq (%rdx,%rdx), %rax
movq %rax, 0x188(%rsp)
movq 0x2a0(%rsp), %rax
movq %rax, 0x180(%rsp)
movq %r8, 0xd8(%rsp)
xorl %esi, %esi
movq %r10, 0x48(%rsp)
movq %r15, 0x68(%rsp)
cmpq 0x18(%rsp), %rsi
je 0x2ab814
movq 0xc8(%rsp), %rax
xorl %edx, %edx
divq 0xd0(%rsp)
cmpl $0x4, 0x64(%rsp)
cmoveq 0x50(%rsp), %rax
testq %r10, %r10
je 0x2ab688
vmovss (%r10,%rsi,4), %xmm1
jmp 0x2ab68c
vxorps %xmm1, %xmm1, %xmm1
imulq %rsi, %r15
addq 0xd8(%rsp), %r15
movq 0x180(%rsp), %rcx
vmovss (%rcx,%rsi,4), %xmm0
imull 0x58(%rsp), %eax
testl %eax, %eax
movl $0x0, %ecx
cmovlel %ecx, %eax
xorl %ecx, %ecx
cmpl %ecx, %eax
je 0x2ab6c5
vmovss %xmm1, (%r8,%rcx,4)
incq %rcx
jmp 0x2ab6b6
leaq (%rsi,%rsi,8), %rax
movq 0xc0(%rsp), %r13
movq %rsi, 0x40(%rsp)
imulq %rsi, %r13
addq 0x190(%rsp), %r13
movq 0x28(%rsp), %rcx
leaq (%rcx,%r13), %r11
movq 0x188(%rsp), %rcx
leaq (%rcx,%r13), %rbx
xorl %r14d, %r14d
cmpl 0x10(%rsp), %r14d
je 0x2ab7fa
xorl %ecx, %ecx
movq %r15, %rdx
movq 0x38(%rsp), %rsi
movl %esi, %r10d
testl %r10d, %r10d
jle 0x2ab7da
movsbl (%r13,%rcx), %r12d
movsbl (%r9,%rax), %esi
imull %r12d, %esi
movsbl 0x1(%r13,%rcx), %r12d
movsbl 0x1(%r9,%rax), %edi
imull %r12d, %edi
addl %esi, %edi
movsbl 0x2(%r13,%rcx), %esi
movsbl 0x2(%r9,%rax), %r12d
imull %esi, %r12d
movsbl (%r11,%rcx), %esi
movsbl 0x3(%r9,%rax), %ebp
imull %esi, %ebp
addl %r12d, %ebp
addl %edi, %ebp
movsbl 0x1(%r11,%rcx), %esi
movsbl 0x4(%r9,%rax), %edi
imull %esi, %edi
movsbl 0x2(%r11,%rcx), %esi
movsbl 0x5(%r9,%rax), %r12d
imull %esi, %r12d
addl %edi, %r12d
movsbl (%rbx,%rcx), %esi
movsbl 0x6(%r9,%rax), %edi
imull %esi, %edi
addl %r12d, %edi
addl %ebp, %edi
movsbl 0x1(%rbx,%rcx), %esi
movsbl 0x7(%r9,%rax), %ebp
imull %esi, %ebp
movsbl 0x2(%rbx,%rcx), %esi
movsbl 0x8(%r9,%rax), %r12d
imull %esi, %r12d
addl %ebp, %r12d
addl %edi, %r12d
vcvtsi2ss %r12d, %xmm2, %xmm1
vmulss %xmm1, %xmm0, %xmm1
vaddss (%r15,%rcx,4), %xmm1, %xmm1
vmovss %xmm1, (%r15,%rcx,4)
addq $0x4, %rdx
decl %r10d
incq %rcx
jmp 0x2ab712
addq %rcx, %r13
addq $0x2, %r13
addq %rcx, %r11
addq $0x2, %r11
addq %rcx, %rbx
addq $0x2, %rbx
incl %r14d
movq %rdx, %r15
jmp 0x2ab6fa
movq 0x40(%rsp), %rsi
incq %rsi
movq 0x68(%rsp), %r15
addq %r15, %r8
movq 0x48(%rsp), %r10
jmp 0x2ab653
leaq 0x2a0(%rsp), %rdi
jmp 0x2abd9f
leaq 0x2b8(%rsp), %rdi
leaq 0x130(%rsp), %rsi
callq 0x290632
movslq 0x9c(%rsp), %rax
movq 0x20(%rsp), %r15
movl 0x2c(%r15), %ecx
movl %ecx, 0x38(%rsp)
movl 0x30(%r15), %ecx
movl 0x38(%r15), %edx
movq 0x28(%rbp), %rdi
movq 0x1b0(%rbp,%r13), %rsi
xorl %r9d, %r9d
testl %ecx, %ecx
cmovlel %r9d, %ecx
movl %ecx, 0x10(%rsp)
movq %rax, 0x28(%rsp)
addq %rax, %rax
movq %rax, 0x58(%rsp)
testl %edx, %edx
cmovlel %r9d, %edx
movq %rdx, 0x18(%rsp)
vbroadcastss 0x145927(%rip), %xmm0 # 0x3f11b4
vbroadcastss 0x14cd6e(%rip), %xmm1 # 0x3f8604
pushq $-0x7f
popq %r8
pushq $0x7f
popq %rbx
movq %rsi, 0x48(%rsp)
cmpq 0x18(%rsp), %r9
je 0x2aba54
testq %rsi, %rsi
je 0x2ab8ba
vmovss (%rsi,%r9,4), %xmm2
jmp 0x2ab8be
vxorps %xmm2, %xmm2, %xmm2
movq 0x40(%r15), %r14
imulq %r9, %r14
imulq 0x10(%r15), %r14
addq (%r15), %r14
movq 0x2b8(%rsp), %rax
vmovss (%rax,%r9,8), %xmm3
vmovss 0x4(%rax,%r9,8), %xmm4
movq 0xb0(%rsp), %r15
imulq %r9, %r15
imulq 0x80(%rsp), %r15
addq 0x70(%rsp), %r15
movq %r9, 0x40(%rsp)
leaq (%r9,%r9,8), %r12
movq 0x28(%rsp), %rax
leaq (%r15,%rax), %r13
movq 0x58(%rsp), %rax
leaq (%r15,%rax), %rbp
xorl %eax, %eax
cmpl 0x10(%rsp), %eax
je 0x2aba3d
xorl %r10d, %r10d
movl 0x38(%rsp), %ecx
testl %ecx, %ecx
jle 0x2aba1e
movsbl (%r15,%r10), %r11d
movsbl (%rdi,%r12), %r9d
imull %r11d, %r9d
movsbl 0x1(%r15,%r10), %r11d
movsbl 0x1(%rdi,%r12), %esi
imull %r11d, %esi
addl %r9d, %esi
movsbl 0x2(%r15,%r10), %r9d
movsbl 0x2(%rdi,%r12), %r11d
imull %r9d, %r11d
movsbl (%r13,%r10), %r9d
movsbl 0x3(%rdi,%r12), %edx
imull %r9d, %edx
addl %r11d, %edx
addl %esi, %edx
movsbl 0x1(%r13,%r10), %esi
movsbl 0x4(%rdi,%r12), %r9d
imull %esi, %r9d
movsbl 0x2(%r13,%r10), %esi
movsbl 0x5(%rdi,%r12), %r11d
imull %esi, %r11d
addl %r9d, %r11d
movsbl (%rbp,%r10), %esi
movsbl 0x6(%rdi,%r12), %r9d
imull %esi, %r9d
addl %r11d, %r9d
addl %edx, %r9d
movsbl 0x1(%rbp,%r10), %edx
movsbl 0x7(%rdi,%r12), %esi
imull %edx, %esi
movsbl 0x2(%rbp,%r10), %edx
movsbl 0x8(%rdi,%r12), %r11d
imull %edx, %r11d
addl %esi, %r11d
addl %r9d, %r11d
vcvtsi2ss %r11d, %xmm7, %xmm5
vmulss %xmm5, %xmm3, %xmm5
vaddss %xmm2, %xmm5, %xmm5
vmulss %xmm4, %xmm5, %xmm5
vandps %xmm0, %xmm5, %xmm6
vorps %xmm1, %xmm6, %xmm6
vaddss %xmm6, %xmm5, %xmm5
vroundss $0xb, %xmm5, %xmm5, %xmm5
vcvttss2si %xmm5, %r11d
cmpl $-0x7e, %r11d
jge 0x2aba07
movl %r8d, %r11d
cmpl $0x7f, %r11d
jl 0x2aba10
movl %ebx, %r11d
movb %r11b, (%r14,%r10)
decl %ecx
incq %r10
jmp 0x2ab92b
addq %r10, %r15
addq $0x2, %r15
addq %r10, %r13
addq $0x2, %r13
addq %r10, %rbp
addq $0x2, %rbp
incl %eax
addq %r10, %r14
jmp 0x2ab91a
movq 0x40(%rsp), %r9
incq %r9
movq 0x20(%rsp), %r15
movq 0x48(%rsp), %rsi
jmp 0x2ab8a2
leaq 0x2b8(%rsp), %rdi
jmp 0x2abfed
vxorps %xmm0, %xmm0, %xmm0
leaq 0x130(%rsp), %rbx
andq $0x0, 0x10(%rbx)
vmovaps %xmm0, (%rbx)
xorl %r14d, %r14d
leaq 0xe0(%rsp), %r15
movq -0x18(%rax), %r12
movslq 0x108(%rbp,%r12), %rax
cmpq %rax, %r14
jge 0x2abad7
movq 0x1f8(%rbp,%r12), %rax
movq 0x240(%rbp,%r12), %rcx
vmovss (%rax,%r14,4), %xmm0
vmulss (%rcx,%r14,4), %xmm0, %xmm0
vmovss 0x1431d2(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0xe0(%rsp)
movq %rbx, %rdi
movq %r15, %rsi
callq 0x1ea12c
incq %r14
movq (%rbp), %rax
jmp 0x2aba81
leaq 0x270(%rsp), %rdi
leaq 0x130(%rsp), %rsi
callq 0x290632
movslq 0x9c(%rsp), %rdi
movq 0x20(%rsp), %rdx
movslq 0x2c(%rdx), %rsi
movslq 0x30(%rdx), %rax
movl 0x38(%rdx), %r8d
movl %edi, %ecx
subl %esi, %ecx
addl %ecx, %ecx
movq 0x28(%rbp), %r9
movq 0x1b0(%rbp,%r12), %r10
movq 0x10(%rdx), %r11
movq 0x40(%rdx), %rbx
imulq %r11, %rbx
movq %rax, %r14
movq %rsi, 0x38(%rsp)
imulq %rsi, %r14
movq %r11, 0x58(%rsp)
movq %r14, 0xd0(%rsp)
imulq %r14, %r11
addq $0xf, %r11
andq $-0x10, %r11
movq %r11, 0x68(%rsp)
movq 0x80(%rsp), %rsi
imulq 0xb0(%rsp), %rsi
movq %rsi, 0x50(%rsp)
xorl %r11d, %r11d
testl %eax, %eax
movl $0x0, %esi
cmovgl %eax, %esi
movl %esi, 0x10(%rsp)
movl 0x34(%rdx), %eax
movl %eax, 0xc8(%rsp)
movq (%rdx), %r12
movslq %ecx, %rsi
testl %r8d, %r8d
cmovlel %r11d, %r8d
movq %r8, 0x18(%rsp)
movl 0x28(%rdx), %eax
movl %eax, 0x64(%rsp)
movq 0x70(%rsp), %rax
movq %rax, 0x190(%rsp)
movq %rdi, 0x28(%rsp)
leaq (%rdi,%rdi), %rax
movq %rax, 0x188(%rsp)
movq 0x270(%rsp), %rax
movq %rax, 0x180(%rsp)
movq %r12, 0xc0(%rsp)
xorl %edi, %edi
movq %r10, 0x48(%rsp)
movq %rbx, 0xd8(%rsp)
cmpq 0x18(%rsp), %rdi
je 0x2abd97
movq 0x68(%rsp), %rax
xorl %edx, %edx
divq 0x58(%rsp)
cmpl $0x4, 0x64(%rsp)
cmoveq 0xd0(%rsp), %rax
testq %r10, %r10
je 0x2abc0e
vmovss (%r10,%rdi,4), %xmm1
jmp 0x2abc12
vxorps %xmm1, %xmm1, %xmm1
movq %rbx, %rdx
imulq %rdi, %rdx
addq 0xc0(%rsp), %rdx
movq 0x180(%rsp), %rcx
vmovss (%rcx,%rdi,4), %xmm0
imull 0xc8(%rsp), %eax
testl %eax, %eax
movl $0x0, %ecx
cmovlel %ecx, %eax
xorl %ecx, %ecx
cmpl %ecx, %eax
je 0x2abc51
vmovss %xmm1, (%r12,%rcx,4)
incq %rcx
jmp 0x2abc42
leaq (%rdi,%rdi,8), %rax
movq 0x50(%rsp), %r8
movq %rdi, 0x40(%rsp)
imulq %rdi, %r8
addq 0x190(%rsp), %r8
movq 0x28(%rsp), %rcx
leaq (%r8,%rcx), %r13
movq 0x188(%rsp), %rcx
leaq (%r8,%rcx), %r11
xorl %r14d, %r14d
cmpl 0x10(%rsp), %r14d
je 0x2abd7a
xorl %ecx, %ecx
movq 0x38(%rsp), %rdi
movl %edi, %r10d
testl %r10d, %r10d
jle 0x2abd60
movsbl (%r8,%rcx), %ebx
movsbl (%r9,%rax), %r15d
imull %ebx, %r15d
movsbl 0x1(%r8,%rcx), %ebx
movsbl 0x1(%r9,%rax), %edi
imull %ebx, %edi
addl %r15d, %edi
movsbl 0x2(%r8,%rcx), %ebx
movsbl 0x2(%r9,%rax), %r15d
imull %ebx, %r15d
movsbl (%r13,%rcx), %ebx
movsbl 0x3(%r9,%rax), %ebp
imull %ebx, %ebp
addl %r15d, %ebp
addl %edi, %ebp
movsbl 0x1(%r13,%rcx), %edi
movsbl 0x4(%r9,%rax), %ebx
imull %edi, %ebx
movsbl 0x2(%r13,%rcx), %edi
movsbl 0x5(%r9,%rax), %r15d
imull %edi, %r15d
addl %ebx, %r15d
movsbl (%r11,%rcx), %edi
movsbl 0x6(%r9,%rax), %ebx
imull %edi, %ebx
addl %r15d, %ebx
addl %ebp, %ebx
movsbl 0x1(%r11,%rcx), %edi
movsbl 0x7(%r9,%rax), %ebp
imull %edi, %ebp
movsbl 0x2(%r11,%rcx), %edi
movsbl 0x8(%r9,%rax), %r15d
imull %edi, %r15d
addl %ebp, %r15d
addl %ebx, %r15d
vcvtsi2ss %r15d, %xmm2, %xmm1
vmulss %xmm1, %xmm0, %xmm1
vaddss (%rdx), %xmm1, %xmm1
vmovss %xmm1, (%rdx)
addq $0x4, %rdx
decl %r10d
addq $0x2, %rcx
jmp 0x2abc98
addq %rsi, %r8
addq %rcx, %r8
addq %rsi, %r13
addq %rcx, %r13
addq %rsi, %r11
addq %rcx, %r11
incl %r14d
jmp 0x2abc83
movq 0x40(%rsp), %rdi
incq %rdi
movq 0xd8(%rsp), %rbx
addq %rbx, %r12
movq 0x48(%rsp), %r10
jmp 0x2abbdc
leaq 0x270(%rsp), %rdi
callq 0x621c2
leaq 0x130(%rsp), %rdi
callq 0x621c2
movq 0x20(%rsp), %r15
jmp 0x2abfff
leaq 0x288(%rsp), %rdi
leaq 0x130(%rsp), %rsi
callq 0x290632
movslq 0x9c(%rsp), %rsi
movq 0x20(%rsp), %r15
movl 0x2c(%r15), %eax
movl 0x30(%r15), %edx
movl 0x38(%r15), %r8d
movl %esi, %ecx
movl %eax, 0x10(%rsp)
subl %eax, %ecx
addl %ecx, %ecx
movq 0x8(%rsp), %rax
movq 0x28(%rax), %rdi
movq 0x1b0(%rax,%r14), %rax
movq %rsi, 0x18(%rsp)
addq %rsi, %rsi
movq %rsi, 0x48(%rsp)
movslq %ecx, %r10
xorl %esi, %esi
testl %edx, %edx
cmovlel %esi, %edx
testl %r8d, %r8d
cmovlel %esi, %r8d
movq %r8, 0x40(%rsp)
vbroadcastss 0x145384(%rip), %xmm0 # 0x3f11b4
vbroadcastss 0x14c7cb(%rip), %xmm1 # 0x3f8604
movq %rax, 0x28(%rsp)
cmpq 0x40(%rsp), %rsi
je 0x2abfe5
testq %rax, %rax
je 0x2abe55
vmovss (%rax,%rsi,4), %xmm2
jmp 0x2abe59
vxorps %xmm2, %xmm2, %xmm2
movq 0x40(%r15), %rbx
imulq %rsi, %rbx
imulq 0x10(%r15), %rbx
addq (%r15), %rbx
movq 0x288(%rsp), %rcx
vmovss (%rcx,%rsi,8), %xmm3
vmovss 0x4(%rcx,%rsi,8), %xmm4
movq 0xb0(%rsp), %r14
imulq %rsi, %r14
imulq 0x80(%rsp), %r14
addq 0x70(%rsp), %r14
movq %rsi, 0x38(%rsp)
leaq (%rsi,%rsi,8), %r15
movq 0x18(%rsp), %rax
leaq (%r14,%rax), %r12
movq 0x48(%rsp), %rax
leaq (%r14,%rax), %r13
xorl %ebp, %ebp
cmpl %edx, %ebp
je 0x2abfce
xorl %r9d, %r9d
movl 0x10(%rsp), %ecx
testl %ecx, %ecx
jle 0x2abfb5
movsbl (%r14,%r9), %esi
movsbl (%rdi,%r15), %r8d
imull %esi, %r8d
movsbl 0x1(%r14,%r9), %esi
movsbl 0x1(%rdi,%r15), %r11d
imull %esi, %r11d
addl %r8d, %r11d
movsbl 0x2(%r14,%r9), %esi
movsbl 0x2(%rdi,%r15), %r8d
imull %esi, %r8d
movsbl (%r12,%r9), %esi
movsbl 0x3(%rdi,%r15), %eax
imull %esi, %eax
addl %r8d, %eax
addl %r11d, %eax
movsbl 0x1(%r12,%r9), %esi
movsbl 0x4(%rdi,%r15), %r8d
imull %esi, %r8d
movsbl 0x2(%r12,%r9), %esi
movsbl 0x5(%rdi,%r15), %r11d
imull %esi, %r11d
addl %r8d, %r11d
movsbl (%r13,%r9), %esi
movsbl 0x6(%rdi,%r15), %r8d
imull %esi, %r8d
addl %r11d, %r8d
addl %eax, %r8d
movsbl 0x1(%r13,%r9), %eax
movsbl 0x7(%rdi,%r15), %esi
imull %eax, %esi
movsbl 0x2(%r13,%r9), %eax
movsbl 0x8(%rdi,%r15), %r11d
imull %eax, %r11d
addl %esi, %r11d
addl %r8d, %r11d
vcvtsi2ss %r11d, %xmm7, %xmm5
vmulss %xmm5, %xmm3, %xmm5
vaddss %xmm2, %xmm5, %xmm5
vmulss %xmm4, %xmm5, %xmm5
vandps %xmm0, %xmm5, %xmm6
vorps %xmm1, %xmm6, %xmm6
vaddss %xmm6, %xmm5, %xmm5
vroundss $0xb, %xmm5, %xmm5, %xmm5
vcvttss2si %xmm5, %esi
cmpl $-0x7e, %esi
jge 0x2abf9c
pushq $-0x7f
popq %rsi
cmpl $0x7f, %esi
jl 0x2abfa4
pushq $0x7f
popq %rsi
movb %sil, (%rbx)
incq %rbx
decl %ecx
addq $0x2, %r9
jmp 0x2abec2
addq %r10, %r14
addq %r9, %r14
addq %r10, %r12
addq %r9, %r12
addq %r10, %r13
addq %r9, %r13
incl %ebp
jmp 0x2abeb3
movq 0x38(%rsp), %rsi
incq %rsi
movq 0x20(%rsp), %r15
movq 0x28(%rsp), %rax
jmp 0x2abe3e
leaq 0x288(%rsp), %rdi
callq 0x621c2
leaq 0x130(%rsp), %rdi
callq 0x621c2
movq 0x8(%rsp), %rax
xorl %r12d, %r12d
movq 0x8(%rax), %rdi
testq %rdi, %rdi
je 0x2ab463
movq (%rdi), %rax
movq %r15, %rsi
movq 0x30(%rsp), %rdx
callq *0x48(%rax)
jmp 0x2ab463
jmp 0x2ac035
jmp 0x2ac035
jmp 0x2ac035
jmp 0x2ac035
jmp 0x2ac035
jmp 0x2ac035
jmp 0x2ac035
movq %rax, %rbx
leaq 0x130(%rsp), %rdi
callq 0x621c2
jmp 0x2ac1b7
jmp 0x2ac0d0
jmp 0x2ac0d0
jmp 0x2ac231
jmp 0x2ac231
movq %rax, %rbx
jmp 0x2ac180
jmp 0x2ac231
jmp 0x2ac0d0
movq %rax, %rbx
jmp 0x2ac149
jmp 0x2ac231
jmp 0x2ac231
jmp 0x2ac0d0
jmp 0x2ac231
jmp 0x2ac231
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2ac1e8
lock
decl (%rax)
jne 0x2ac1e8
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2ac1d8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2ac1e8
jmp 0x2ac231
movq %rax, %rbx
jmp 0x2ac1e8
movq %rax, %rbx
jmp 0x2ac1b7
movq %rax, %rbx
movq 0x230(%rsp), %rax
testq %rax, %rax
je 0x2ac112
lock
decl (%rax)
jne 0x2ac112
movq 0x228(%rsp), %rsi
movq 0x248(%rsp), %rdi
testq %rdi, %rdi
jne 0x2ac10c
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2ac112
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1a0(%rsp), %rax
testq %rax, %rax
je 0x2ac149
lock
decl (%rax)
jne 0x2ac149
movq 0x198(%rsp), %rsi
movq 0x1b8(%rsp), %rdi
testq %rdi, %rdi
jne 0x2ac143
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2ac149
movq (%rdi), %rax
callq *0x18(%rax)
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x2ac180
lock
decl (%rax)
jne 0x2ac180
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
jne 0x2ac17a
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2ac180
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x138(%rsp), %rax
testq %rax, %rax
je 0x2ac1b7
lock
decl (%rax)
jne 0x2ac1b7
movq 0x130(%rsp), %rsi
movq 0x150(%rsp), %rdi
testq %rdi, %rdi
jne 0x2ac1b1
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2ac1b7
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2ac1e8
lock
decl (%rax)
jne 0x2ac1e8
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x2ac1e2
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2ac1e8
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1e8(%rsp), %rax
testq %rax, %rax
je 0x2ac21f
lock
decl (%rax)
jne 0x2ac21f
movq 0x1e0(%rsp), %rsi
movq 0x200(%rsp), %rdi
testq %rdi, %rdi
jne 0x2ac219
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2ac21f
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x2ac231
jmp 0x2ac231
jmp 0x2ac231
jmp 0x2ac231
jmp 0x2ac231
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx.cpp |
virtual thunk to ncnn::ConvolutionDepthWise_x86_avx::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int ConvolutionDepthWise_x86_avx::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
#if NCNN_INT8
if (opt.use_int8_inference && int8_scale_term)
{
return forward_int8_x86(bottom_blob, top_blob, opt);
}
#endif
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
Mat bottom_blob_bordered;
make_padding(bottom_blob, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
int outw = (w - kernel_extent_w) / stride_w + 1;
int outh = (h - kernel_extent_h) / stride_h + 1;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
size_t out_elemsize = elemsize / elempack * out_elempack;
top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
// depth-wise
if (channels * elempack == group && group == num_output)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack16_avx512(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 16;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m512 _sum = _mm512_set1_ps(0.f);
if (bias_term)
{
_sum = _mm512_loadu_ps(((const float*)bias_data) + g * 16);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 16;
for (int k = 0; k < maxk; k++)
{
__m512 _val = _mm512_loadu_ps(sptr + space_ofs[k] * 16);
__m512 _w = _mm512_loadu_ps(kptr + k * 16);
_sum = _mm512_fmadd_ps(_val, _w, _sum);
}
_mm512_storeu_ps(outptr, _sum);
outptr += 16;
}
}
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack8_avx(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
else
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 8;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_set1_ps(0.f);
if (bias_term)
{
_sum = _mm256_loadu_ps(((const float*)bias_data) + g * 8);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 8;
for (int k = 0; k < maxk; k++)
{
__m256 _val = _mm256_loadu_ps(sptr + space_ofs[k] * 8);
__m256 _w = _mm256_loadu_ps(kptr + k * 8);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
}
_mm256_storeu_ps(outptr + j * 8, _sum);
}
outptr += outw * 8;
}
}
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
#endif // __AVX__
if (elempack == 4)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw5x5s1_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 5 && kernel_h == 5 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw5x5s2_pack4_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
{
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 4;
const Mat m = bottom_blob_bordered.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_set1_ps(0.f);
if (bias_term)
{
_sum = _mm_loadu_ps(((const float*)bias_data) + g * 4);
}
const float* sptr = m.row(i * stride_h) + j * stride_w * 4;
for (int k = 0; k < maxk; k++)
{
__m128 _val = _mm_loadu_ps(sptr + space_ofs[k] * 4);
__m128 _w = _mm_loadu_ps(kptr + k * 4);
_sum = _mm_add_ps(_mm_mul_ps(_val, _w), _sum);
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr + j * 4, _sum);
}
outptr += outw * 4;
}
}
return 0;
}
}
#endif // __SSE2__
if (elempack == 1)
{
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1)
{
convdw3x3s1_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2)
{
convdw3x3s2_sse(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, opt);
if (activation)
{
activation->forward_inplace(top_blob, opt);
}
return 0;
}
}
}
// group convolution
const int channels_g = channels * elempack / group;
const int num_output_g = num_output / group;
int g_elempack = 1;
int out_g_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
g_elempack = channels_g % 16 == 0 ? 16 : channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 16 == 0 ? 16 : num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#elif __AVX__
g_elempack = channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#else
g_elempack = channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
// unpacking
Mat bottom_blob_bordered_unpacked = bottom_blob_bordered;
if (elempack > g_elempack)
{
Option opt_p = opt;
opt_p.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob_bordered, bottom_blob_bordered_unpacked, g_elempack, opt_p);
}
Mat top_blob_unpacked = top_blob;
if (out_g_elempack < out_elempack)
{
top_blob_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
if (top_blob_unpacked.empty())
return -100;
}
for (int g = 0; g < group; g++)
{
const Mat bottom_blob_bordered_g = bottom_blob_bordered_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
Mat top_blob_g = top_blob_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
const ncnn::Layer* op = group_ops[g];
Option opt_g = opt;
opt_g.blob_allocator = top_blob_unpacked.allocator;
// forward
op->forward(bottom_blob_bordered_g, top_blob_g, opt_g);
}
// packing
if (out_g_elempack < out_elempack)
{
convert_packing(top_blob_unpacked, top_blob, out_elempack, opt);
}
else
{
top_blob = top_blob_unpacked;
}
return 0;
} | movq (%rdi), %rax
addq -0x48(%rax), %rdi
jmp 0x2a49d2
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx.cpp |
ncnn::ConvolutionDepthWise_x86_avx::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int ConvolutionDepthWise_x86_avx::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& bottom_blob = bottom_blobs[0];
const Mat& _weight_data = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int _kernel_w = _weight_data.w;
const int _kernel_h = _weight_data.h;
const int _num_output = _weight_data.c * _weight_data.elempack;
Mat weight_data_flattened;
flatten(_weight_data, weight_data_flattened, opt);
if (weight_data_flattened.empty())
return -100;
// weight_data_flattened as pack1
weight_data_flattened.w *= weight_data_flattened.elempack;
weight_data_flattened.elemsize /= weight_data_flattened.elempack;
weight_data_flattened.elempack = 1;
Mat bias_data_flattened;
if (bias_term)
{
const Mat& _bias_data = bottom_blobs[2];
flatten(_bias_data, bias_data_flattened, opt);
if (bias_data_flattened.empty())
return -100;
// bias_data_flattened as pack1
bias_data_flattened.w *= bias_data_flattened.elempack;
bias_data_flattened.elemsize /= bias_data_flattened.elempack;
bias_data_flattened.elempack = 1;
}
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::ConvolutionDepthWise);
ncnn::ParamDict pd;
pd.set(0, _num_output);
pd.set(1, _kernel_w);
pd.set(11, _kernel_h);
pd.set(2, dilation_w);
pd.set(12, dilation_h);
pd.set(3, stride_w);
pd.set(13, stride_h);
pd.set(4, pad_left);
pd.set(15, pad_right);
pd.set(14, pad_top);
pd.set(16, pad_bottom);
pd.set(18, pad_value);
pd.set(5, bias_term);
pd.set(6, weight_data_flattened.w);
pd.set(7, group);
pd.set(8, int8_scale_term);
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
ncnn::Mat weights[2];
weights[0] = weight_data_flattened;
weights[1] = bias_data_flattened;
op->load_model(ncnn::ModelBinFromMatArray(weights));
op->create_pipeline(opt);
op->forward(bottom_blob, top_blob, opt);
op->destroy_pipeline(opt);
delete op;
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x168, %rsp # imm = 0x168
movq %rsi, %rbp
movq %rdi, %r13
movq (%rsi), %r14
leaq 0x48(%r14), %rdi
movq (%rdx), %rax
movq %rax, 0xb8(%rsp)
movl 0x60(%r14), %ebx
movl 0x74(%r14), %eax
movl %eax, 0x1c(%rsp)
movl 0x78(%r14), %eax
movl %eax, 0x18(%rsp)
imull 0x80(%r14), %ebx
leaq 0x70(%rsp), %rsi
andq $0x0, 0x40(%rsi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rsi)
vmovups %xmm0, 0xc(%rsi)
vmovaps %xmm0, 0x20(%rsi)
vmovups %xmm0, 0x2c(%rsi)
movq %rcx, %r15
movq %rcx, %rdx
callq 0x64ee7
pushq $-0x64
popq %r12
cmpq $0x0, 0x70(%rsp)
je 0x2ac867
movslq 0xa8(%rsp), %rax
imulq 0xb0(%rsp), %rax
testq %rax, %rax
je 0x2ac867
movslq 0x88(%rsp), %rcx
movl 0x9c(%rsp), %eax
imull %ecx, %eax
movl %eax, 0x9c(%rsp)
movq 0x80(%rsp), %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x80(%rsp)
movl $0x1, 0x88(%rsp)
andq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
vmovaps %xmm0, 0x40(%rsp)
vmovups %xmm0, 0x4c(%rsp)
movq (%r13), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x100(%r13,%rax)
je 0x2ac3b0
movl $0x90, %edi
addq (%rbp), %rdi
leaq 0x20(%rsp), %rsi
movq %r15, %rdx
callq 0x64ee7
pushq $-0x64
popq %r12
cmpq $0x0, 0x20(%rsp)
je 0x2ac839
movslq 0x58(%rsp), %rax
imulq 0x60(%rsp), %rax
testq %rax, %rax
je 0x2ac839
movslq 0x38(%rsp), %rcx
movl 0x4c(%rsp), %eax
imull %ecx, %eax
movl %eax, 0x4c(%rsp)
movq 0x30(%rsp), %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x30(%rsp)
movl $0x1, 0x38(%rsp)
pushq $0x2a
popq %rdi
callq 0x782bf
movq %rax, %r12
leaq 0x8(%rsp), %rdi
callq 0x71548
leaq 0x8(%rsp), %rdi
xorl %esi, %esi
movl %ebx, %edx
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0x1
popq %rsi
movl 0x1c(%rsp), %edx
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0xb
popq %rsi
movl 0x18(%rsp), %edx
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xdc(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x2
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xe0(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xc
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xe4(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x3
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xd
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xec(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x4
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xf0(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xf
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xf4(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0xe
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xf8(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x10
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
vmovss 0xfc(%r13,%rax), %xmm0
leaq 0x8(%rsp), %rdi
pushq $0x12
popq %rsi
callq 0x71952
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x100(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x5
popq %rsi
callq 0x7193a
movl 0x9c(%rsp), %edx
leaq 0x8(%rsp), %rdi
pushq $0x6
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x108(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x7
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x10c(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x8
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0x110(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x9
popq %rsi
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
leaq (%rax,%r13), %rdx
addq $0x118, %rdx # imm = 0x118
leaq 0x8(%rsp), %rdi
pushq $0xa
popq %rsi
callq 0x7196c
movq (%r12), %rax
leaq 0x8(%rsp), %rsi
movq %r12, %rdi
callq *0x10(%rax)
andq $0x0, 0x110(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0xd0(%rsp)
vmovups %xmm0, 0xdc(%rsp)
vmovaps %xmm0, 0xf0(%rsp)
vmovups %xmm0, 0xfc(%rsp)
andq $0x0, 0x158(%rsp)
vmovups %xmm0, 0x118(%rsp)
vmovups %xmm0, 0x124(%rsp)
vmovups %xmm0, 0x138(%rsp)
vmovups %xmm0, 0x144(%rsp)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2ac61e
lock
incl (%rax)
movq 0xd8(%rsp), %rax
testq %rax, %rax
je 0x2ac655
lock
decl (%rax)
jne 0x2ac655
movq 0xd0(%rsp), %rsi
movq 0xf0(%rsp), %rdi
testq %rdi, %rdi
je 0x2ac64d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2ac655
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x70(%rsp), %xmm0
vmovaps %xmm0, 0xd0(%rsp)
movq 0x80(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl 0x88(%rsp), %eax
movl %eax, 0xe8(%rsp)
movq 0x90(%rsp), %rax
movq %rax, 0xf0(%rsp)
vmovups 0x98(%rsp), %xmm0
vmovups %xmm0, 0xf8(%rsp)
movl 0xa8(%rsp), %eax
movl %eax, 0x108(%rsp)
movq 0xb0(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x2ac6cf
lock
incl (%rax)
movq 0x120(%rsp), %rax
testq %rax, %rax
je 0x2ac706
lock
decl (%rax)
jne 0x2ac706
movq 0x118(%rsp), %rsi
movq 0x138(%rsp), %rdi
testq %rdi, %rdi
je 0x2ac6fe
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2ac706
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x20(%rsp), %xmm0
leaq 0xd0(%rsp), %rsi
vmovups %xmm0, 0x48(%rsi)
movq 0x30(%rsp), %rax
movq %rax, 0x58(%rsi)
movl 0x38(%rsp), %eax
movl %eax, 0x60(%rsi)
movq 0x40(%rsp), %rax
movq %rax, 0x68(%rsi)
vmovups 0x48(%rsp), %xmm0
vmovaps %xmm0, 0x70(%rsi)
movl 0x58(%rsp), %eax
movl %eax, 0x80(%rsi)
movq 0x60(%rsp), %rax
movq %rax, 0x88(%rsi)
leaq 0xc0(%rsp), %rdi
callq 0x6b00e
movq (%r12), %rax
leaq 0xc0(%rsp), %rsi
movq %r12, %rdi
callq *0x18(%rax)
leaq 0xc0(%rsp), %rdi
callq 0x6b03a
movq (%r12), %rax
movq %r12, %rdi
movq %r15, %rsi
callq *0x20(%rax)
movq (%r12), %rax
movq %r12, %rdi
movq %r14, %rsi
movq 0xb8(%rsp), %rdx
movq %r15, %rcx
callq *0x38(%rax)
movq (%r12), %rax
movq %r12, %rdi
movq %r15, %rsi
callq *0x28(%rax)
movq (%r12), %rax
movq %r12, %rdi
callq *0x8(%rax)
pushq $0x48
popq %rbx
vxorps %xmm0, %xmm0, %xmm0
movq 0xd8(%rsp,%rbx), %rax
testq %rax, %rax
je 0x2ac801
lock
decl (%rax)
jne 0x2ac801
movq 0xd0(%rsp,%rbx), %rsi
movq 0xf0(%rsp,%rbx), %rdi
testq %rdi, %rdi
je 0x2ac7f5
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x2ac801
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%rbx), %rax
addq $0xd0, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %rbx
cmpq $-0x48, %rbx
jne 0x2ac7c2
leaq 0x8(%rsp), %rdi
callq 0x71614
xorl %r12d, %r12d
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x2ac867
lock
decl (%rax)
jne 0x2ac867
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x2ac85f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2ac867
movq %rsi, %rdi
callq 0x5f3e0
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2ac898
lock
decl (%rax)
jne 0x2ac898
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2ac890
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2ac898
movq %rsi, %rdi
callq 0x5f3e0
movl %r12d, %eax
addq $0x168, %rsp # imm = 0x168
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x2ac9cd
jmp 0x2ac9cd
movq %rax, %rbx
leaq 0xc0(%rsp), %rdi
callq 0x6b03a
jmp 0x2ac8da
jmp 0x2ac8d7
jmp 0x2ac8cf
jmp 0x2ac8cf
movq %rax, %rbx
jmp 0x2ac964
movq %rax, %rbx
pushq $0x48
popq %r14
vxorps %xmm0, %xmm0, %xmm0
movq 0xd8(%rsp,%r14), %rax
testq %rax, %rax
je 0x2ac921
lock
decl (%rax)
jne 0x2ac921
movq 0xd0(%rsp,%r14), %rsi
movq 0xf0(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x2ac915
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x2ac921
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0xd0, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x2ac8e2
jmp 0x2ac95a
jmp 0x2ac9cd
movq %rax, %rbx
jmp 0x2ac992
jmp 0x2ac9cd
movq %rax, %rbx
leaq 0x8(%rsp), %rdi
callq 0x71614
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x2ac992
lock
decl (%rax)
jne 0x2ac992
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x2ac98c
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2ac992
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2ac9c3
lock
decl (%rax)
jne 0x2ac9c3
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x2ac9bd
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2ac9c3
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x2ac9cd
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx.cpp |
virtual thunk to ncnn::ConvolutionDepthWise_x86_avx::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int ConvolutionDepthWise_x86_avx::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& bottom_blob = bottom_blobs[0];
const Mat& _weight_data = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int _kernel_w = _weight_data.w;
const int _kernel_h = _weight_data.h;
const int _num_output = _weight_data.c * _weight_data.elempack;
Mat weight_data_flattened;
flatten(_weight_data, weight_data_flattened, opt);
if (weight_data_flattened.empty())
return -100;
// weight_data_flattened as pack1
weight_data_flattened.w *= weight_data_flattened.elempack;
weight_data_flattened.elemsize /= weight_data_flattened.elempack;
weight_data_flattened.elempack = 1;
Mat bias_data_flattened;
if (bias_term)
{
const Mat& _bias_data = bottom_blobs[2];
flatten(_bias_data, bias_data_flattened, opt);
if (bias_data_flattened.empty())
return -100;
// bias_data_flattened as pack1
bias_data_flattened.w *= bias_data_flattened.elempack;
bias_data_flattened.elemsize /= bias_data_flattened.elempack;
bias_data_flattened.elempack = 1;
}
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::ConvolutionDepthWise);
ncnn::ParamDict pd;
pd.set(0, _num_output);
pd.set(1, _kernel_w);
pd.set(11, _kernel_h);
pd.set(2, dilation_w);
pd.set(12, dilation_h);
pd.set(3, stride_w);
pd.set(13, stride_h);
pd.set(4, pad_left);
pd.set(15, pad_right);
pd.set(14, pad_top);
pd.set(16, pad_bottom);
pd.set(18, pad_value);
pd.set(5, bias_term);
pd.set(6, weight_data_flattened.w);
pd.set(7, group);
pd.set(8, int8_scale_term);
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
ncnn::Mat weights[2];
weights[0] = weight_data_flattened;
weights[1] = bias_data_flattened;
op->load_model(ncnn::ModelBinFromMatArray(weights));
op->create_pipeline(opt);
op->forward(bottom_blob, top_blob, opt);
op->destroy_pipeline(opt);
delete op;
return 0;
} | movq (%rdi), %rax
addq -0x40(%rax), %rdi
jmp 0x2ac246
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/convolutiondepthwise_x86_avx.cpp |
ncnn::Padding::load_param(ncnn::ParamDict const&) | int Padding::load_param(const ParamDict& pd)
{
top = pd.get(0, 0);
bottom = pd.get(1, 0);
left = pd.get(2, 0);
right = pd.get(3, 0);
type = pd.get(4, 0);
value = pd.get(5, 0.f);
per_channel_pad_data_size = pd.get(6, 0);
front = pd.get(7, 0);
behind = pd.get(8, 0);
return 0;
} | pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %r14
movq %rdi, %rbx
movq %rsi, %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xd0(%rbx)
pushq $0x1
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xd4(%rbx)
pushq $0x2
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xd8(%rbx)
pushq $0x3
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xdc(%rbx)
pushq $0x4
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xe0(%rbx)
pushq $0x5
popq %rsi
xorps %xmm0, %xmm0
movq %r14, %rdi
callq 0x718c0
movss %xmm0, 0xe4(%rbx)
pushq $0x6
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xf0(%rbx)
pushq $0x7
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xe8(%rbx)
pushq $0x8
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xec(%rbx)
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
nop
| /csukuangfj[P]ncnn/src/layer/padding.cpp |
ncnn::Padding::load_model(ncnn::ModelBin const&) | int Padding::load_model(const ModelBin& mb)
{
if (per_channel_pad_data_size)
{
per_channel_pad_data = mb.load(per_channel_pad_data_size, 1);
}
return 0;
} | pushq %r14
pushq %rbx
subq $0x48, %rsp
movl 0xf0(%rdi), %edx
testl %edx, %edx
je 0x2acbe2
movq %rdi, %rbx
movq (%rsi), %rax
movq %rsp, %r14
pushq $0x1
popq %rcx
movq %r14, %rdi
callq *0x10(%rax)
leaq 0xf8(%rbx), %rcx
movq 0x8(%r14), %rax
cmpq %r14, %rcx
je 0x2acbba
testq %rax, %rax
je 0x2acb2b
lock
incl (%rax)
movq 0x100(%rbx), %rax
testq %rax, %rax
je 0x2acb5f
lock
decl (%rax)
jne 0x2acb5f
movq 0xf8(%rbx), %rsi
movq 0x118(%rbx), %rdi
testq %rdi, %rdi
je 0x2acb57
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2acb5f
movq %rsi, %rdi
callq 0x5f3e0
movq (%rsp), %rax
movq %rax, 0xf8(%rbx)
movq 0x8(%rsp), %rax
movq %rax, 0x100(%rbx)
movq 0x10(%rsp), %rcx
movq %rcx, 0x108(%rbx)
movl 0x18(%rsp), %ecx
movl %ecx, 0x110(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x118(%rbx)
movups 0x28(%rsp), %xmm0
movups %xmm0, 0x120(%rbx)
movl 0x38(%rsp), %ecx
movl %ecx, 0x130(%rbx)
movq 0x40(%rsp), %rcx
movq %rcx, 0x138(%rbx)
testq %rax, %rax
je 0x2acbe2
lock
decl (%rax)
jne 0x2acbe2
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x2acbda
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2acbe2
movq %rsi, %rdi
callq 0x5f3e0
xorl %eax, %eax
addq $0x48, %rsp
popq %rbx
popq %r14
retq
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x2acc1c
lock
decl (%rax)
jne 0x2acc1c
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x2acc16
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2acc1c
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x2acc26
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/padding.cpp |
ncnn::Padding::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int Padding::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
if (top == 0 && bottom == 0 && left == 0 && right == 0 && front == 0 && behind == 0)
{
top_blob = bottom_blob;
return 0;
}
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int outw = w + left + right;
if (dims == 1)
{
top_blob.create(outw, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
if (elemsize == 1)
copy_make_border_image<signed char>(bottom_blob, top_blob, 0, left, type, static_cast<signed char>(value));
if (elemsize == 2)
copy_make_border_image<unsigned short>(bottom_blob, top_blob, 0, left, type, support_fp16_storage && opt.use_fp16_storage ? float32_to_float16(value) : float32_to_bfloat16(value));
if (elemsize == 4)
copy_make_border_image<float>(bottom_blob, top_blob, 0, left, type, value);
return 0;
}
int outh = h + top + bottom;
if (dims == 2)
{
top_blob.create(outw, outh, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
if (elemsize == 1)
copy_make_border_image<signed char>(bottom_blob, top_blob, top, left, type, static_cast<signed char>(value));
if (elemsize == 2)
copy_make_border_image<unsigned short>(bottom_blob, top_blob, top, left, type, support_fp16_storage && opt.use_fp16_storage ? float32_to_float16(value) : float32_to_bfloat16(value));
if (elemsize == 4)
copy_make_border_image<float>(bottom_blob, top_blob, top, left, type, value);
return 0;
}
if (dims == 3)
{
int outc = channels + front + behind;
top_blob.create(outw, outh, outc, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc; q++)
{
Mat borderm = top_blob.channel(q);
float pad_value = per_channel_pad_data_size ? per_channel_pad_data[q] : value;
//Channel padding
if (((q < front) || (q >= (channels + front))) && type == 0)
{
if (elemsize == 1)
{
borderm.fill(static_cast<signed char>(pad_value));
}
if (elemsize == 2)
{
borderm.fill(support_fp16_storage && opt.use_fp16_storage ? float32_to_float16(pad_value) : float32_to_bfloat16(pad_value));
}
if (elemsize == 4)
{
borderm.fill(pad_value);
}
}
else
{
int q_ = q - front;
if (type == 1)
{
q_ = q_ <= 0 ? 0 : q_;
q_ = q_ >= channels - 1 ? channels - 1 : q_;
}
if (type == 2)
{
q_ = abs(q_);
q_ = (channels - 1) - abs(q_ - (channels - 1));
}
const Mat m = bottom_blob.channel(q_);
if (elemsize == 1)
copy_make_border_image<signed char>(m, borderm, top, left, type, static_cast<signed char>(pad_value));
if (elemsize == 2)
copy_make_border_image<unsigned short>(m, borderm, top, left, type, support_fp16_storage && opt.use_fp16_storage ? float32_to_float16(pad_value) : float32_to_bfloat16(pad_value));
if (elemsize == 4)
copy_make_border_image<float>(m, borderm, top, left, type, pad_value);
}
}
return 0;
}
if (dims == 4)
{
int outd = d + front + behind;
top_blob.create(outw, outh, outd, channels, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float pad_value = per_channel_pad_data_size ? per_channel_pad_data[q] : value;
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if (((z < front) || (z >= (d + front))) && type == 0)
{
if (elemsize == 1)
{
borderm.fill(static_cast<signed char>(pad_value));
}
if (elemsize == 2)
{
borderm.fill(support_fp16_storage && opt.use_fp16_storage ? float32_to_float16(pad_value) : float32_to_bfloat16(pad_value));
}
if (elemsize == 4)
{
borderm.fill(pad_value);
}
}
else
{
int z_ = z - front;
if (type == 1)
{
z_ = z_ <= 0 ? 0 : z_;
z_ = z_ >= d - 1 ? d - 1 : z_;
}
if (type == 2)
{
z_ = abs(z_);
z_ = (d - 1) - abs(z_ - (d - 1));
}
const Mat m = bottom_blob.channel(q).depth(z_);
if (elemsize == 1)
copy_make_border_image<signed char>(m, borderm, top, left, type, static_cast<signed char>(pad_value));
if (elemsize == 2)
copy_make_border_image<unsigned short>(m, borderm, top, left, type, support_fp16_storage && opt.use_fp16_storage ? float32_to_float16(pad_value) : float32_to_bfloat16(pad_value));
if (elemsize == 4)
copy_make_border_image<float>(m, borderm, top, left, type, pad_value);
}
}
}
return 0;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x118, %rsp # imm = 0x118
movq %rcx, 0x20(%rsp)
movq %rdx, %rbx
movq %rsi, %r14
movl 0xd0(%rdi), %eax
movl 0xd4(%rdi), %edx
testl %eax, %eax
sete %cl
testl %edx, %edx
sete %sil
movq 0xd8(%rdi), %xmm1
movq %rdi, 0x8(%rsp)
movq 0xe8(%rdi), %xmm0
movdqa %xmm1, %xmm2
punpcklqdq %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0]
pxor %xmm3, %xmm3
pcmpeqd %xmm2, %xmm3
movmskps %xmm3, %edi
xorl $0xf, %edi
sete %dil
andb %cl, %sil
andb %dil, %sil
cmpb $0x1, %sil
jne 0x2acceb
movq $0x0, 0x10(%rsp)
cmpq %r14, %rbx
je 0x2ad8eb
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x2accba
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x2ad760
lock
decl (%rax)
jne 0x2ad760
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x2ad758
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2ad760
movl 0x28(%r14), %ecx
movq 0x10(%r14), %r8
pshufd $0x55, %xmm1, %xmm2 # xmm2 = xmm1[1,1,1,1]
paddd %xmm1, %xmm2
movd %xmm2, %esi
addl 0x2c(%r14), %esi
pushq $-0x64
popq %rdi
movq %rdi, 0x10(%rsp)
cmpl $0x1, %ecx
movq %r8, 0x28(%rsp)
jne 0x2acdaf
movq 0x20(%rsp), %rax
movq 0x8(%rax), %rcx
movq %rbx, %rdi
movq %r8, %rdx
callq 0x635fa
cmpq $0x0, (%rbx)
je 0x2ad8eb
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2ad8eb
movq 0x28(%rsp), %rax
cmpq $0x4, %rax
je 0x2ad7ef
cmpq $0x2, %rax
je 0x2ad7b8
movq $0x0, 0x10(%rsp)
cmpq $0x1, %rax
jne 0x2ad8eb
movq 0x8(%rsp), %rax
movl 0xd8(%rax), %ecx
movl 0xe0(%rax), %r8d
cvttss2si 0xe4(%rax), %eax
movsbl %al, %r9d
movq $0x0, 0x10(%rsp)
movq %r14, %rdi
movq %rbx, %rsi
xorl %edx, %edx
callq 0x2ad910
jmp 0x2ad8eb
movl 0x38(%r14), %edi
movq %rdi, 0x40(%rsp)
addl %eax, %edx
addl 0x30(%r14), %edx
pshufd $0x55, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1]
cmpl $0x4, %ecx
movq %r14, 0x60(%rsp)
je 0x2ad2bc
cmpl $0x3, %ecx
je 0x2ace6c
cmpl $0x2, %ecx
jne 0x2ad8e2
movq 0x20(%rsp), %rax
movq 0x8(%rax), %r8
movq %rbx, %rdi
movq 0x28(%rsp), %rcx
callq 0x636fa
cmpq $0x0, (%rbx)
je 0x2ad8eb
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2ad8eb
movq 0x28(%rsp), %rax
cmpq $0x4, %rax
je 0x2ad892
cmpq $0x2, %rax
je 0x2ad854
cmpq $0x1, %rax
jne 0x2ad8e2
movq 0x8(%rsp), %rax
movl 0xd0(%rax), %edx
movl 0xd8(%rax), %ecx
movl 0xe0(%rax), %r8d
cvttss2si 0xe4(%rax), %eax
movsbl %al, %r9d
movq %r14, %rdi
movq %rbx, %rsi
callq 0x2ad910
jmp 0x2ad8e2
paddd %xmm0, %xmm1
movd %xmm1, %ecx
addl 0x40(%rsp), %ecx
movq 0x20(%rsp), %rax
movq 0x8(%rax), %r9
movq %rbx, %rdi
movq %rcx, %r14
movq 0x28(%rsp), %r8
callq 0x63810
cmpq $0x0, (%rbx)
je 0x2ad8eb
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2ad8eb
movq 0x8(%rsp), %rax
addq $0xe4, %rax
movq %rax, 0x50(%rsp)
xorl %ecx, %ecx
testl %r14d, %r14d
movl $0x0, %eax
movq %rax, 0x10(%rsp)
cmovlel %ecx, %r14d
movq %r14, 0x48(%rsp)
movq 0x40(%rsp), %rax
decl %eax
movl %eax, 0x68(%rsp)
xorl %esi, %esi
cmpq 0x48(%rsp), %rsi
je 0x2ad8eb
movslq 0x2c(%rbx), %rax
movslq 0x30(%rbx), %rcx
movl 0x34(%rbx), %r14d
movq (%rbx), %r12
movq 0x10(%rbx), %rbp
movq 0x40(%rbx), %r13
movq %r13, %rdx
movq %rsi, 0x38(%rsp)
imulq %rsi, %rdx
imulq %rbp, %rdx
addq %r12, %rdx
movl 0x18(%rbx), %esi
movq 0x20(%rbx), %rdi
movq %rdx, 0xb8(%rsp)
andq $0x0, 0xc0(%rsp)
movq %rbp, 0xc8(%rsp)
movl %esi, 0xd0(%rsp)
movq %rdi, 0xd8(%rsp)
movl %eax, 0xe4(%rsp)
movl %ecx, 0xe8(%rsp)
movl $0x1, 0xec(%rsp)
movl %r14d, 0xf0(%rsp)
imulq %rax, %rcx
movq %rbp, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rbp
movq %rax, %r15
movq %rax, 0xf8(%rsp)
movl 0x28(%rbx), %eax
leal -0x1(%rax), %edx
movl %edx, 0xe0(%rsp)
cmpl $0x4, %eax
jne 0x2acfa7
movq %rcx, 0xf8(%rsp)
movq %rcx, %r15
movq 0x38(%rsp), %rdi
leaq (,%rdi,4), %rax
movq 0x8(%rsp), %rcx
addq 0xf8(%rcx), %rax
cmpl $0x0, 0xf0(%rcx)
cmoveq 0x50(%rsp), %rax
movd (%rax), %xmm0
movl 0xe0(%rcx), %r8d
movl 0xe8(%rcx), %edx
movslq %edx, %rax
cmpq %rax, %rdi
setl %cl
addl 0x40(%rsp), %eax
cltq
cmpq %rax, %rdi
setge %al
orb %cl, %al
testl %r8d, %r8d
sete %sil
andb %al, %sil
movd %xmm0, %ecx
cmpb $0x1, %sil
jne 0x2ad061
movq 0x28(%rsp), %rax
cmpq $0x4, %rax
je 0x2ad201
cmpq $0x2, %rax
je 0x2ad19f
cmpq $0x1, %rax
jne 0x2ad2af
cvttss2si %xmm0, %eax
imull %r15d, %r14d
testl %r14d, %r14d
movl $0x0, %ecx
cmovlel %ecx, %r14d
imulq %rbp, %r13
imulq 0x38(%rsp), %r13
addq %r13, %r12
xorl %ecx, %ecx
cmpq %rcx, %r14
je 0x2ad2af
movb %al, (%r12,%rcx)
incq %rcx
jmp 0x2ad04f
movl %edi, %eax
subl %edx, %eax
cmpl $0x2, %r8d
movl %r8d, %ebp
je 0x2ad08d
cmpl $0x1, %ebp
movq 0x60(%rsp), %r11
jne 0x2ad0a8
testl %eax, %eax
movl $0x0, %edx
cmovlel %edx, %eax
movl 0x68(%rsp), %edx
cmpl %edx, %eax
cmovgel %edx, %eax
jmp 0x2ad0a8
movl %eax, %edx
negl %edx
cmovsl %eax, %edx
movl 0x68(%rsp), %esi
subl %esi, %edx
movl %edx, %eax
negl %eax
cmovnsl %edx, %eax
addl %esi, %eax
movq 0x60(%rsp), %r11
movslq 0x2c(%r11), %rdx
movslq 0x30(%r11), %rsi
cltq
imulq 0x40(%r11), %rax
movq 0x10(%r11), %rdi
imulq %rdi, %rax
addq (%r11), %rax
movl 0x34(%r11), %r8d
movl 0x18(%r11), %r9d
movq 0x20(%r11), %r10
movq %rax, 0x70(%rsp)
andq $0x0, 0x78(%rsp)
movq %rdi, 0x80(%rsp)
movl %r9d, 0x88(%rsp)
movq %r10, 0x90(%rsp)
movl %edx, 0x9c(%rsp)
movl %esi, 0xa0(%rsp)
movl $0x1, 0xa4(%rsp)
movl %r8d, 0xa8(%rsp)
imulq %rdx, %rsi
movq %rdi, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0xb0(%rsp)
movl 0x28(%r11), %eax
leal -0x1(%rax), %edx
movl %edx, 0x98(%rsp)
cmpl $0x4, %eax
jne 0x2ad14d
movq %rsi, 0xb0(%rsp)
movq 0x28(%rsp), %rax
cmpq $0x4, %rax
je 0x2ad234
cmpq $0x2, %rax
je 0x2ad1c9
cmpq $0x1, %rax
jne 0x2ad2af
movq 0x8(%rsp), %rax
movl 0xd0(%rax), %edx
movl 0xd8(%rax), %ecx
cvttss2si %xmm0, %eax
movsbl %al, %r9d
leaq 0x70(%rsp), %rdi
leaq 0xb8(%rsp), %rsi
movl %ebp, %r8d
callq 0x2ad910
jmp 0x2ad2af
movq 0x8(%rsp), %rax
cmpb $0x1, 0xd(%rax)
jne 0x2ad25c
movq 0x20(%rsp), %rax
cmpb $0x1, 0x22(%rax)
jne 0x2ad25c
callq 0x6460e
movl %eax, %ecx
jmp 0x2ad25f
movq 0x8(%rsp), %rax
movl 0xd0(%rax), %r15d
movl 0xd8(%rax), %r14d
cmpb $0x1, 0xd(%rax)
jne 0x2ad28d
movq 0x20(%rsp), %rax
cmpb $0x1, 0x22(%rax)
jne 0x2ad28d
callq 0x6460e
movl %eax, %ecx
jmp 0x2ad290
imull %r15d, %r14d
testl %r14d, %r14d
movl $0x0, %eax
cmovlel %eax, %r14d
imulq %rbp, %r13
imulq 0x38(%rsp), %r13
addq %r13, %r12
xorl %eax, %eax
cmpl %eax, %r14d
je 0x2ad2af
movss %xmm0, (%r12,%rax,4)
incq %rax
jmp 0x2ad220
movq 0x8(%rsp), %rax
movl 0xd0(%rax), %edx
movl 0xd8(%rax), %ecx
leaq 0x70(%rsp), %rdi
leaq 0xb8(%rsp), %rsi
movl %ebp, %r8d
callq 0x2ae697
jmp 0x2ad2af
shrl $0x10, %ecx
imull %r15d, %r14d
testl %r14d, %r14d
movl $0x0, %eax
cmovlel %eax, %r14d
imulq %rbp, %r13
imulq 0x38(%rsp), %r13
addq %r13, %r12
xorl %eax, %eax
cmpq %rax, %r14
je 0x2ad2af
movw %cx, (%r12,%rax,2)
incq %rax
jmp 0x2ad27e
shrl $0x10, %ecx
movzwl %cx, %r9d
leaq 0x70(%rsp), %rdi
leaq 0xb8(%rsp), %rsi
movl %r15d, %edx
movl %r14d, %ecx
movl %ebp, %r8d
callq 0x2adf5b
movq 0x38(%rsp), %rsi
incq %rsi
jmp 0x2acee2
movl 0x34(%r14), %eax
paddd %xmm0, %xmm1
movd %xmm1, %ecx
movq %rax, 0x100(%rsp)
addl %eax, %ecx
movq 0x20(%rsp), %rax
movq 0x8(%rax), %rax
movq %rax, (%rsp)
movq %rbx, %rdi
movq %rcx, %r14
movq 0x40(%rsp), %r8
movq 0x28(%rsp), %r9
callq 0x6393c
cmpq $0x0, (%rbx)
je 0x2ad8eb
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2ad8eb
movq 0x8(%rsp), %rax
addq $0xe4, %rax
movq %rax, 0x108(%rsp)
xorl %ecx, %ecx
testl %r14d, %r14d
cmovlel %ecx, %r14d
movq %r14, 0x68(%rsp)
movq 0x100(%rsp), %rax
decl %eax
movl %eax, 0x5c(%rsp)
movq 0x40(%rsp), %rdx
testl %edx, %edx
movl $0x0, %eax
movq %rax, 0x10(%rsp)
cmovlel %ecx, %edx
movq %rdx, 0x40(%rsp)
xorl %edx, %edx
movq %rbx, 0x110(%rsp)
cmpq 0x40(%rsp), %rdx
je 0x2ad8eb
leaq (,%rdx,4), %rax
movq 0x8(%rsp), %rcx
addq 0xf8(%rcx), %rax
cmpl $0x0, 0xf0(%rcx)
cmoveq 0x108(%rsp), %rax
movd (%rax), %xmm0
movd %xmm0, %eax
shrl $0x10, %eax
movl %eax, 0x34(%rsp)
cvttss2si %xmm0, %eax
movl %eax, 0x38(%rsp)
xorl %edi, %edi
movss %xmm0, 0x1c(%rsp)
movq %rdx, 0x50(%rsp)
cmpq 0x68(%rsp), %rdi
je 0x2ad750
movslq 0x2c(%rbx), %r15
movslq 0x30(%rbx), %r14
movq (%rbx), %r13
movq %rbx, %rsi
movq 0x10(%rbx), %rbx
movq 0x40(%rsi), %r12
imulq %rdx, %r12
movq %r12, %rax
imulq %rbx, %rax
addq %r13, %rax
movl 0x18(%rsi), %ecx
movq %r14, %rbp
imulq %r15, %rbp
movq %rdi, %rdx
imulq %rbx, %rdx
imulq %rbp, %rdx
addq %rax, %rdx
movq 0x20(%rsi), %rax
movq %rdx, 0xb8(%rsp)
andq $0x0, 0xc0(%rsp)
movq %rbx, 0xc8(%rsp)
movl %ecx, 0xd0(%rsp)
movq %rax, 0xd8(%rsp)
movl $0x2, 0xe0(%rsp)
movl %r15d, 0xe4(%rsp)
movl %r14d, 0xe8(%rsp)
movabsq $0x100000001, %rax # imm = 0x100000001
movq %rax, 0xec(%rsp)
movq %rbp, 0xf8(%rsp)
movq 0x8(%rsp), %rsi
movl 0xe8(%rsi), %ecx
movslq %ecx, %rax
cmpq %rax, %rdi
setl %dl
addl 0x100(%rsp), %eax
cltq
cmpq %rax, %rdi
movl 0xe0(%rsi), %esi
setge %al
orb %dl, %al
testl %esi, %esi
sete %dl
andb %al, %dl
cmpb $0x1, %dl
movq %rdi, 0x48(%rsp)
jne 0x2ad4f3
movq 0x28(%rsp), %rax
cmpq $0x4, %rax
je 0x2ad6d4
cmpq $0x2, %rax
je 0x2ad627
cmpq $0x1, %rax
jne 0x2ad736
testl %ebp, %ebp
movl $0x0, %eax
cmovlel %eax, %ebp
imulq 0x48(%rsp), %r14
imulq %r15, %r14
addq %r12, %r14
imulq %r14, %rbx
addq %rbx, %r13
xorl %eax, %eax
cmpq %rax, %rbp
je 0x2ad736
movl 0x38(%rsp), %ecx
movb %cl, (%r13,%rax)
incq %rax
jmp 0x2ad4dc
movl %edi, %eax
subl %ecx, %eax
cmpl $0x2, %esi
je 0x2ad523
cmpl $0x1, %esi
movq 0x60(%rsp), %r9
movq 0x50(%rsp), %rdi
movl %esi, %r14d
jne 0x2ad546
testl %eax, %eax
movl $0x0, %ecx
cmovlel %ecx, %eax
movl 0x5c(%rsp), %ecx
cmpl %ecx, %eax
cmovgel %ecx, %eax
jmp 0x2ad546
movl %esi, %r14d
movl %eax, %ecx
negl %ecx
cmovsl %eax, %ecx
movl 0x5c(%rsp), %edx
subl %edx, %ecx
movl %ecx, %eax
negl %eax
cmovnsl %ecx, %eax
addl %edx, %eax
movq 0x60(%rsp), %r9
movq 0x50(%rsp), %rdi
movslq 0x2c(%r9), %rcx
movslq 0x30(%r9), %rdx
movq 0x40(%r9), %rsi
imulq %rdi, %rsi
movq 0x10(%r9), %rdi
imulq %rdi, %rsi
addq (%r9), %rsi
movl 0x18(%r9), %r8d
movq 0x20(%r9), %r9
movq %rdx, %r10
imulq %rcx, %r10
cltq
imulq %rdi, %rax
imulq %r10, %rax
addq %rsi, %rax
movq %rax, 0x70(%rsp)
andq $0x0, 0x78(%rsp)
movq %rdi, 0x80(%rsp)
movl %r8d, 0x88(%rsp)
movq %r9, 0x90(%rsp)
movl $0x2, 0x98(%rsp)
movl %ecx, 0x9c(%rsp)
movl %edx, 0xa0(%rsp)
movabsq $0x100000001, %rax # imm = 0x100000001
movq %rax, 0xa4(%rsp)
movq %r10, 0xb0(%rsp)
movq 0x28(%rsp), %rax
cmpq $0x4, %rax
je 0x2ad704
cmpq $0x2, %rax
je 0x2ad684
cmpq $0x1, %rax
jne 0x2ad736
movq 0x8(%rsp), %rax
movl 0xd0(%rax), %edx
movl 0xd8(%rax), %ecx
movsbl 0x38(%rsp), %r9d
leaq 0x70(%rsp), %rdi
leaq 0xb8(%rsp), %rsi
movl %r14d, %r8d
callq 0x2ad910
jmp 0x2ad730
movq 0x8(%rsp), %rax
cmpb $0x1, 0xd(%rax)
movl 0x34(%rsp), %eax
jne 0x2ad650
movq 0x20(%rsp), %rax
cmpb $0x1, 0x22(%rax)
movl 0x34(%rsp), %eax
jne 0x2ad650
callq 0x6460e
movss 0x1c(%rsp), %xmm0
testl %ebp, %ebp
movl $0x0, %ecx
cmovlel %ecx, %ebp
imulq 0x48(%rsp), %r14
imulq %r15, %r14
addq %r12, %r14
imulq %r14, %rbx
addq %rbx, %r13
xorl %ecx, %ecx
cmpq %rcx, %rbp
je 0x2ad736
movw %ax, (%r13,%rcx,2)
incq %rcx
jmp 0x2ad670
movq 0x8(%rsp), %rax
movl 0xd0(%rax), %r13d
movl 0xd8(%rax), %ebp
cmpb $0x1, 0xd(%rax)
movl 0x34(%rsp), %eax
jne 0x2ad6b4
movq 0x20(%rsp), %rax
cmpb $0x1, 0x22(%rax)
movl 0x34(%rsp), %eax
jne 0x2ad6b4
callq 0x6460e
movzwl %ax, %r9d
leaq 0x70(%rsp), %rdi
leaq 0xb8(%rsp), %rsi
movl %r13d, %edx
movl %ebp, %ecx
movl %r14d, %r8d
callq 0x2adf5b
jmp 0x2ad730
testl %ebp, %ebp
movl $0x0, %eax
cmovlel %eax, %ebp
imulq 0x48(%rsp), %r14
imulq %r15, %r14
addq %r12, %r14
imulq %r14, %rbx
addq %rbx, %r13
xorl %eax, %eax
cmpl %eax, %ebp
je 0x2ad736
movss %xmm0, (%r13,%rax,4)
incq %rax
jmp 0x2ad6f4
movq 0x8(%rsp), %rax
movl 0xd0(%rax), %edx
movl 0xd8(%rax), %ecx
leaq 0x70(%rsp), %rdi
leaq 0xb8(%rsp), %rsi
movl %r14d, %r8d
movss 0x1c(%rsp), %xmm0
callq 0x2ae697
movss 0x1c(%rsp), %xmm0
movq 0x48(%rsp), %rdi
incq %rdi
movq 0x110(%rsp), %rbx
movq 0x50(%rsp), %rdx
jmp 0x2ad3b5
incq %rdx
jmp 0x2ad362
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
pxor %xmm0, %xmm0
movdqu %xmm0, (%rbx)
movdqu %xmm0, 0xc(%rbx)
andl $0x0, 0x38(%rbx)
movdqu %xmm0, 0x28(%rbx)
movups (%r14), %xmm0
movups %xmm0, (%rbx)
movq 0x10(%r14), %rax
movq %rax, 0x10(%rbx)
movl 0x18(%r14), %eax
movl %eax, 0x18(%rbx)
movq 0x20(%r14), %rax
movq %rax, 0x20(%rbx)
movdqu 0x28(%r14), %xmm0
movdqu %xmm0, 0x28(%rbx)
movl 0x38(%r14), %eax
movl %eax, 0x38(%rbx)
movq 0x40(%r14), %rax
movq %rax, 0x40(%rbx)
jmp 0x2ad8eb
movq 0x8(%rsp), %rax
movl 0xd8(%rax), %ebp
movl 0xe0(%rax), %r15d
cmpb $0x1, 0xd(%rax)
jne 0x2ad824
movq 0x20(%rsp), %rax
cmpb $0x1, 0x22(%rax)
jne 0x2ad824
movq 0x8(%rsp), %rax
movd 0xe4(%rax), %xmm0
callq 0x6460e
jmp 0x2ad830
movq 0x8(%rsp), %rax
movl 0xd8(%rax), %ecx
movl 0xe0(%rax), %r8d
movd 0xe4(%rax), %xmm0
movq $0x0, 0x10(%rsp)
movq %r14, %rdi
movq %rbx, %rsi
xorl %edx, %edx
callq 0x2ae697
jmp 0x2ad8eb
movq 0x8(%rsp), %rax
movzwl 0xe6(%rax), %eax
movzwl %ax, %r9d
movq $0x0, 0x10(%rsp)
movq %r14, %rdi
movq %rbx, %rsi
xorl %edx, %edx
movl %ebp, %ecx
movl %r15d, %r8d
callq 0x2adf5b
jmp 0x2ad8eb
movq 0x8(%rsp), %rax
movl 0xd0(%rax), %ebp
movl 0xd8(%rax), %r15d
movl 0xe0(%rax), %r13d
cmpb $0x1, 0xd(%rax)
jne 0x2ad8bf
movq 0x20(%rsp), %rax
cmpb $0x1, 0x22(%rax)
jne 0x2ad8bf
movq 0x8(%rsp), %rax
movd 0xe4(%rax), %xmm0
callq 0x6460e
jmp 0x2ad8cb
movq 0x8(%rsp), %rax
movl 0xd0(%rax), %edx
movl 0xd8(%rax), %ecx
movl 0xe0(%rax), %r8d
movd 0xe4(%rax), %xmm0
movq %r14, %rdi
movq %rbx, %rsi
callq 0x2ae697
jmp 0x2ad8e2
movq 0x8(%rsp), %rax
movzwl 0xe6(%rax), %eax
movzwl %ax, %r9d
movq %r14, %rdi
movq %rbx, %rsi
movl %ebp, %edx
movl %r15d, %ecx
movl %r13d, %r8d
callq 0x2adf5b
movq $0x0, 0x10(%rsp)
movq 0x10(%rsp), %rax
addq $0x118, %rsp # imm = 0x118
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x2ad908
jmp 0x2ad908
jmp 0x2ad908
movq %rax, %rdi
callq 0x5f340
| /csukuangfj[P]ncnn/src/layer/padding.cpp |
void ncnn::copy_make_border_image<signed char>(ncnn::Mat const&, ncnn::Mat&, int, int, int, signed char) | static void copy_make_border_image(const Mat& src, Mat& dst, int top, int left, int type, T v)
{
int w = dst.w;
int h = dst.h;
const T* ptr = src;
T* outptr = dst;
if (type == 0)
{
int y = 0;
// fill top
for (; y < top; y++)
{
int x = 0;
for (; x < w; x++)
{
outptr[x] = v;
}
outptr += w;
}
// fill center
for (; y < (top + src.h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = v;
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = v;
}
ptr += src.w;
outptr += w;
}
// fill bottom
for (; y < h; y++)
{
int x = 0;
for (; x < w; x++)
{
outptr[x] = v;
}
outptr += w;
}
}
if (type == 1)
{
int y = 0;
// fill top
for (; y < top; y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[0];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - 1];
}
outptr += w;
}
// fill center
for (; y < (top + src.h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[0];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - 1];
}
ptr += src.w;
outptr += w;
}
// fill bottom
ptr -= src.w;
for (; y < h; y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[0];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - 1];
}
outptr += w;
}
}
if (type == 2)
{
int y = 0;
// fill top
ptr += top * src.w;
for (; y < top; y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[left - x];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - (x - left - src.w) - 2];
}
outptr += w;
ptr -= src.w;
}
// fill center
for (; y < (top + src.h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[left - x];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - (x - left - src.w) - 2];
}
ptr += src.w;
outptr += w;
}
// fill bottom
ptr -= 2 * src.w;
for (; y < h; y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[left - x];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - (x - left - src.w) - 2];
}
outptr += w;
ptr -= src.w;
}
}
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x38, %rsp
movl %r9d, %r13d
movl %ecx, %r9d
movq %rdi, %r14
movslq 0x2c(%rsi), %r12
movl 0x30(%rsi), %eax
movl %eax, 0x2c(%rsp)
movq (%rdi), %r15
movq (%rsi), %rbp
movslq %ecx, %rax
movq %rax, (%rsp)
testl %r8d, %r8d
movl %ecx, 0xc(%rsp)
movl %edx, 0x8(%rsp)
jne 0x2ada5f
movl %r8d, 0x30(%rsp)
xorl %eax, %eax
testl %r12d, %r12d
movl $0x0, %ebx
cmovgl %r12d, %ebx
testl %edx, %edx
movl $0x0, %esi
cmovgl %edx, %esi
cmpl %esi, %eax
je 0x2ad989
xorl %ecx, %ecx
cmpq %rcx, %rbx
je 0x2ad982
movb %r13b, (%rbp,%rcx)
incq %rcx
jmp 0x2ad973
addq %r12, %rbp
incl %eax
jmp 0x2ad96d
xorl %edi, %edi
movq (%rsp), %rax
testl %eax, %eax
cmovgl %r9d, %edi
negq %rax
movq %rax, 0x18(%rsp)
movq %rdi, 0x20(%rsp)
movl 0x30(%r14), %eax
addl 0x8(%rsp), %eax
cmpl %eax, %esi
jge 0x2ada54
xorl %eax, %eax
cmpq %rax, %rdi
je 0x2ad9c3
movb %r13b, (%rbp,%rax)
incq %rax
jmp 0x2ad9b4
movslq 0x2c(%r14), %rdx
cmpq $0xb, %rdx
jg 0x2ad9f4
movq 0x18(%rsp), %rax
leaq (%r15,%rax), %rcx
movq %rdi, %rax
addl %r9d, %edx
movslq %edx, %rdx
cmpq %rdx, %rax
jge 0x2ada1c
movb (%rcx,%rax), %dl
movb %dl, (%rbp,%rax)
incq %rax
movl 0x2c(%r14), %edx
jmp 0x2ad9d9
movq (%rsp), %rax
leaq (%rax,%rbp), %rdi
movl %esi, 0x10(%rsp)
movq %r15, %rsi
callq 0x5f3c0
movq 0x20(%rsp), %rdi
movl 0x10(%rsp), %esi
movl 0xc(%rsp), %r9d
movl 0x2c(%r14), %eax
addl %edi, %eax
cltq
cmpq %r12, %rax
jge 0x2ada2d
movb %r13b, (%rbp,%rax)
incq %rax
jmp 0x2ada1e
movslq 0x2c(%r14), %rax
addq %rax, %r15
addq %r12, %rbp
incl %esi
jmp 0x2ad9a2
xorl %eax, %eax
cmpq %rax, %rbx
je 0x2ada4f
movb %r13b, (%rbp,%rax)
incq %rax
jmp 0x2ada40
addq %r12, %rbp
incl %esi
cmpl 0x2c(%rsp), %esi
jl 0x2ada3e
movl 0x30(%rsp), %r8d
cmpl $0x1, %r8d
jne 0x2adb3c
movl %r8d, 0x30(%rsp)
xorl %r8d, %r8d
movq (%rsp), %rax
testl %eax, %eax
movl $0x0, %ebx
cmovgl %r9d, %ebx
movl 0x8(%rsp), %ecx
testl %ecx, %ecx
movl $0x0, %esi
cmovgl %ecx, %esi
leaq (%rbx,%rbp), %rdi
movq %r15, %r13
subq %rax, %r13
movl %esi, 0x20(%rsp)
cmpl %esi, %r8d
je 0x2adb45
xorl %eax, %eax
cmpq %rax, %rbx
je 0x2adab8
movb (%r15), %cl
movb %cl, (%rbp,%rax)
incq %rax
jmp 0x2adaa7
movslq 0x2c(%r14), %rdx
cmpq $0xb, %rdx
jg 0x2adae2
movq %rbx, %rax
addl %r9d, %edx
movslq %edx, %rcx
cmpq %rcx, %rax
jge 0x2adb15
movb (%r13,%rax), %cl
movb %cl, (%rbp,%rax)
incq %rax
movl 0x2c(%r14), %edx
jmp 0x2adac5
movq (%rsp), %rax
movq %rdi, 0x10(%rsp)
leaq (%rax,%rbp), %rdi
movq %r15, %rsi
movq %r8, 0x18(%rsp)
callq 0x5f3c0
movq 0x18(%rsp), %r8
movq 0x10(%rsp), %rdi
movl 0x20(%rsp), %esi
movl 0xc(%rsp), %r9d
movl 0x2c(%r14), %eax
addl %ebx, %eax
cltq
cmpq %r12, %rax
jge 0x2adb2e
movslq 0x2c(%r14), %rcx
movb -0x1(%rcx,%r15), %cl
movb %cl, (%rbp,%rax)
incq %rax
jmp 0x2adb17
addq %r12, %rbp
incl %r8d
addq %r12, %rdi
jmp 0x2ada9c
movl 0x8(%rsp), %edx
jmp 0x2adcb4
movq (%rsp), %rax
negq %rax
movq %rax, 0x18(%rsp)
movl 0x30(%r14), %eax
addl 0x8(%rsp), %eax
cmpl %eax, %esi
jge 0x2adbf9
xorl %eax, %eax
cmpq %rax, %rbx
je 0x2adb74
movb (%r15), %cl
movb %cl, (%rbp,%rax)
incq %rax
jmp 0x2adb63
movslq 0x2c(%r14), %rdx
cmpq $0xb, %rdx
jg 0x2adba5
movq 0x18(%rsp), %rax
leaq (%r15,%rax), %rcx
movq %rbx, %rax
addl %r9d, %edx
movslq %edx, %rdx
cmpq %rdx, %rax
jge 0x2adbd0
movb (%rcx,%rax), %dl
movb %dl, (%rbp,%rax)
incq %rax
movl 0x2c(%r14), %edx
jmp 0x2adb8a
movq (%rsp), %rax
movq %rdi, 0x10(%rsp)
leaq (%rax,%rbp), %rdi
movl %esi, %r13d
movq %r15, %rsi
callq 0x5f3c0
movq 0x10(%rsp), %rdi
movl %r13d, %esi
movl 0xc(%rsp), %r9d
movl 0x2c(%r14), %eax
addl %ebx, %eax
cltq
movslq 0x2c(%r14), %rcx
cmpq %r12, %rax
jge 0x2adbe9
movb -0x1(%rcx,%r15), %cl
movb %cl, (%rbp,%rax)
incq %rax
jmp 0x2adbd2
addq %rcx, %r15
addq %r12, %rbp
incl %esi
addq %r12, %rdi
jmp 0x2adb51
movslq 0x2c(%r14), %rax
movq %r15, %r13
subq %rax, %r13
addq (%rsp), %rax
subq %rax, %r15
cmpl 0x2c(%rsp), %esi
jge 0x2adca8
xorl %eax, %eax
cmpq %rax, %rbx
je 0x2adc28
movb (%r13), %cl
movb %cl, (%rbp,%rax)
incq %rax
jmp 0x2adc16
movslq 0x2c(%r14), %rdx
cmpq $0xb, %rdx
jg 0x2adc55
movq %rdi, %rcx
movq %rbx, %rax
addl %r9d, %edx
movslq %edx, %rdx
cmpq %rdx, %rax
jge 0x2adc82
movb (%r15,%rax), %dl
movb %dl, (%rcx)
incq %rax
movl 0x2c(%r14), %edx
incq %rcx
jmp 0x2adc38
movq (%rsp), %rax
movq %rdi, 0x10(%rsp)
leaq (%rax,%rbp), %rdi
movl %esi, 0x20(%rsp)
movq %r13, %rsi
callq 0x5f3c0
movq 0x10(%rsp), %rdi
movl 0x20(%rsp), %esi
movl 0xc(%rsp), %r9d
movl 0x2c(%r14), %eax
addl %ebx, %eax
cltq
cmpq %r12, %rax
jge 0x2adc9b
movslq 0x2c(%r14), %rcx
movb -0x1(%r13,%rcx), %cl
movb %cl, (%rbp,%rax)
incq %rax
jmp 0x2adc84
addq %r12, %rbp
incl %esi
addq %r12, %rdi
jmp 0x2adc0a
movq %r13, %r15
movl 0x8(%rsp), %edx
movl 0x30(%rsp), %r8d
cmpl $0x2, %r8d
jne 0x2adf4c
movl 0x2c(%r14), %eax
movl %eax, %ecx
imull %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %r15
xorl %esi, %esi
movq (%rsp), %rcx
testl %ecx, %ecx
movl $0x0, %r13d
cmovgl %ecx, %r13d
testl %edx, %edx
movl $0x0, %edi
cmovgl %edx, %edi
movq %rbp, %rbx
addq %r13, %rbx
movq %rcx, %rdx
negq %rdx
movq %rdx, 0x20(%rsp)
leal -0x2(%rcx), %r10d
movl %r10d, 0x18(%rsp)
movl %edi, 0x10(%rsp)
cmpl %edi, %esi
je 0x2addc4
movq (%rsp), %rax
addq %r15, %rax
xorl %ecx, %ecx
cmpq %rcx, %r13
je 0x2add2b
movb (%rax), %dl
movb %dl, (%rbp,%rcx)
incq %rcx
decq %rax
jmp 0x2add18
movslq 0x2c(%r14), %rdx
cmpq $0xb, %rdx
jg 0x2add5c
movq 0x20(%rsp), %rax
leaq (%r15,%rax), %rcx
movq %r13, %rax
addl %r9d, %edx
movslq %edx, %rdx
cmpq %rdx, %rax
jge 0x2add8b
movb (%rcx,%rax), %dl
movb %dl, (%rbp,%rax)
incq %rax
movl 0x2c(%r14), %edx
jmp 0x2add41
movq (%rsp), %rax
leaq (%rax,%rbp), %rdi
movq %rsi, 0x30(%rsp)
movq %r15, %rsi
callq 0x5f3c0
movq 0x30(%rsp), %rsi
movl 0x18(%rsp), %r10d
movl 0x10(%rsp), %edi
movl 0xc(%rsp), %r9d
movl 0x2c(%r14), %eax
addl %r13d, %eax
cltq
movl %r10d, %ecx
subl %eax, %ecx
cmpq %r12, %rax
jge 0x2addb0
movl 0x2c(%r14), %edx
leal (%rcx,%rdx,2), %edx
movslq %edx, %rdx
movb (%r15,%rdx), %dl
movb %dl, (%rbp,%rax)
incq %rax
decl %ecx
jmp 0x2add92
addq %r12, %rbp
movslq 0x2c(%r14), %rax
subq %rax, %r15
incl %esi
addq %r12, %rbx
jmp 0x2add07
movl 0x8(%rsp), %esi
movl 0x30(%r14), %ecx
addl %esi, %ecx
cmpl %ecx, %edi
jge 0x2ade88
movq (%rsp), %rax
addq %r15, %rax
xorl %ecx, %ecx
cmpq %rcx, %r13
je 0x2addf2
movb (%rax), %dl
movb %dl, (%rbp,%rcx)
incq %rcx
decq %rax
jmp 0x2adddf
movslq 0x2c(%r14), %rdx
cmpq $0xb, %rdx
jg 0x2ade23
movq 0x20(%rsp), %rax
leaq (%r15,%rax), %rcx
movq %r13, %rax
addl %r9d, %edx
movslq %edx, %rdx
cmpq %rdx, %rax
jge 0x2ade50
movb (%rcx,%rax), %dl
movb %dl, (%rbp,%rax)
incq %rax
movl 0x2c(%r14), %edx
jmp 0x2ade08
movq (%rsp), %rax
movl %edi, 0x10(%rsp)
leaq (%rax,%rbp), %rdi
movq %r15, %rsi
callq 0x5f3c0
movl 0x8(%rsp), %esi
movl 0x18(%rsp), %r10d
movl 0x10(%rsp), %edi
movl 0xc(%rsp), %r9d
movl 0x2c(%r14), %eax
addl %r13d, %eax
movslq %eax, %rcx
movl %r10d, %edx
subl %ecx, %edx
movl 0x2c(%r14), %eax
cmpq %r12, %rcx
jge 0x2ade75
leal (%rdx,%rax,2), %eax
cltq
movb (%r15,%rax), %al
movb %al, (%rbp,%rcx)
incq %rcx
decl %edx
jmp 0x2ade58
movslq %eax, %rcx
addq %rcx, %r15
addq %r12, %rbp
incl %edi
addq %r12, %rbx
jmp 0x2addc8
addl %eax, %eax
cltq
subq %rax, %r15
cmpl 0x2c(%rsp), %edi
jge 0x2adf4c
movq (%rsp), %rax
addq %r15, %rax
xorl %ecx, %ecx
cmpq %rcx, %r13
je 0x2adeb5
movb (%rax), %dl
movb %dl, (%rbp,%rcx)
incq %rcx
decq %rax
jmp 0x2adea2
movslq 0x2c(%r14), %rdx
cmpq $0xb, %rdx
jg 0x2adeea
movq 0x20(%rsp), %rax
leaq (%r15,%rax), %rcx
movq %rbx, %rsi
movq %r13, %rax
addl %r9d, %edx
movslq %edx, %rdx
cmpq %rdx, %rax
jge 0x2adf13
movb (%rcx,%rax), %dl
movb %dl, (%rsi)
incq %rax
movl 0x2c(%r14), %edx
incq %rsi
jmp 0x2adece
movq (%rsp), %rax
movl %edi, 0x10(%rsp)
leaq (%rax,%rbp), %rdi
movq %r15, %rsi
callq 0x5f3c0
movl 0x18(%rsp), %r10d
movl 0x10(%rsp), %edi
movl 0xc(%rsp), %r9d
movl 0x2c(%r14), %eax
addl %r13d, %eax
cltq
movl %r10d, %ecx
subl %eax, %ecx
cmpq %r12, %rax
jge 0x2adf38
movl 0x2c(%r14), %edx
leal (%rcx,%rdx,2), %edx
movslq %edx, %rdx
movb (%r15,%rdx), %dl
movb %dl, (%rbp,%rax)
incq %rax
decl %ecx
jmp 0x2adf1a
addq %r12, %rbp
movslq 0x2c(%r14), %rax
subq %rax, %r15
incl %edi
addq %r12, %rbx
jmp 0x2ade8f
addq $0x38, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
| /csukuangfj[P]ncnn/src/layer/padding.cpp |
void ncnn::copy_make_border_image<unsigned short>(ncnn::Mat const&, ncnn::Mat&, int, int, int, unsigned short) | static void copy_make_border_image(const Mat& src, Mat& dst, int top, int left, int type, T v)
{
int w = dst.w;
int h = dst.h;
const T* ptr = src;
T* outptr = dst;
if (type == 0)
{
int y = 0;
// fill top
for (; y < top; y++)
{
int x = 0;
for (; x < w; x++)
{
outptr[x] = v;
}
outptr += w;
}
// fill center
for (; y < (top + src.h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = v;
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = v;
}
ptr += src.w;
outptr += w;
}
// fill bottom
for (; y < h; y++)
{
int x = 0;
for (; x < w; x++)
{
outptr[x] = v;
}
outptr += w;
}
}
if (type == 1)
{
int y = 0;
// fill top
for (; y < top; y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[0];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - 1];
}
outptr += w;
}
// fill center
for (; y < (top + src.h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[0];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - 1];
}
ptr += src.w;
outptr += w;
}
// fill bottom
ptr -= src.w;
for (; y < h; y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[0];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - 1];
}
outptr += w;
}
}
if (type == 2)
{
int y = 0;
// fill top
ptr += top * src.w;
for (; y < top; y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[left - x];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - (x - left - src.w) - 2];
}
outptr += w;
ptr -= src.w;
}
// fill center
for (; y < (top + src.h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[left - x];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - (x - left - src.w) - 2];
}
ptr += src.w;
outptr += w;
}
// fill bottom
ptr -= 2 * src.w;
for (; y < h; y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[left - x];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - (x - left - src.w) - 2];
}
outptr += w;
ptr -= src.w;
}
}
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x58, %rsp
movslq 0x2c(%rsi), %rbx
movl 0x30(%rsi), %eax
movl %eax, 0x4c(%rsp)
movq (%rdi), %r12
movq (%rsi), %rbp
movq %rcx, 0x28(%rsp)
movslq %ecx, %rax
movq %rax, 0x18(%rsp)
testl %r8d, %r8d
movq %rdi, 0x40(%rsp)
movl %edx, 0xc(%rsp)
jne 0x2ae0d4
movl %r9d, %r13d
movl %r8d, 0x30(%rsp)
xorl %eax, %eax
testl %ebx, %ebx
movl $0x0, %r14d
cmovgl %ebx, %r14d
testl %edx, %edx
movl $0x0, %r9d
cmovgl %edx, %r9d
leaq (%rbx,%rbx), %r10
cmpl %r9d, %eax
je 0x2adfdd
xorl %ecx, %ecx
cmpq %rcx, %r14
je 0x2adfd6
movw %r13w, (%rbp,%rcx,2)
incq %rcx
jmp 0x2adfc6
incl %eax
addq %r10, %rbp
jmp 0x2adfbf
xorl %r15d, %r15d
movq 0x18(%rsp), %rax
testl %eax, %eax
cmovgl 0x28(%rsp), %r15d
addq %rax, %rax
negq %rax
movq %rax, 0x38(%rsp)
movq %r10, 0x20(%rsp)
movl 0x30(%rdi), %eax
addl 0xc(%rsp), %eax
cmpl %eax, %r9d
jge 0x2ae0ac
xorl %eax, %eax
cmpq %rax, %r15
je 0x2ae01f
movw %r13w, (%rbp,%rax,2)
incq %rax
jmp 0x2ae00f
movl 0x2c(%rdi), %edx
cmpl $0xb, %edx
jg 0x2ae052
movq 0x28(%rsp), %rax
addl %edx, %eax
movslq %eax, %rcx
movq 0x38(%rsp), %rax
leaq (%r12,%rax), %rsi
movq %r15, %rax
cmpq %rcx, %rax
jge 0x2ae088
movzwl (%rsi,%rax,2), %r8d
movw %r8w, (%rbp,%rax,2)
incq %rax
jmp 0x2ae03d
movq 0x18(%rsp), %rax
leaq (,%rax,2), %rdi
addq %rbp, %rdi
addq %rdx, %rdx
movq %r12, %rsi
movl %r9d, 0x10(%rsp)
callq 0x5f3c0
movq 0x20(%rsp), %r10
movl 0x10(%rsp), %r9d
movq 0x40(%rsp), %rdi
movl 0x2c(%rdi), %edx
leal (%rdx,%r15), %eax
cltq
cmpq %rbx, %rax
jge 0x2ae09a
movw %r13w, (%rbp,%rax,2)
incq %rax
jmp 0x2ae08a
movslq %edx, %rax
leaq (%r12,%rax,2), %r12
addq %r10, %rbp
incl %r9d
jmp 0x2adffd
movl 0x4c(%rsp), %ecx
cmpl %ecx, %r9d
jge 0x2ae0cf
xorl %eax, %eax
cmpq %rax, %r14
je 0x2ae0c7
movw %r13w, (%rbp,%rax,2)
incq %rax
jmp 0x2ae0b7
addq %r10, %rbp
incl %r9d
jmp 0x2ae0b0
movl 0x30(%rsp), %r8d
cmpl $0x1, %r8d
jne 0x2ae1dc
movl %r8d, 0x30(%rsp)
xorl %esi, %esi
movq 0x18(%rsp), %rax
testl %eax, %eax
movl $0x0, %r14d
cmovgl 0x28(%rsp), %r14d
movl 0xc(%rsp), %ecx
testl %ecx, %ecx
movl $0x0, %r8d
cmovgl %ecx, %r8d
leaq (%rbx,%rbx), %r9
leaq (%rbp,%r14,2), %r10
addq %rax, %rax
movq %r12, %r15
movq %rax, 0x50(%rsp)
subq %rax, %r15
movq %r9, 0x20(%rsp)
cmpl %r8d, %esi
je 0x2ae1e5
xorl %eax, %eax
cmpq %rax, %r14
je 0x2ae143
movzwl (%r12), %ecx
movw %cx, (%rbp,%rax,2)
incq %rax
jmp 0x2ae12f
movl 0x2c(%rdi), %edx
cmpl $0xb, %edx
jg 0x2ae169
addl 0x28(%rsp), %edx
movslq %edx, %rcx
movq %r14, %rax
cmpq %rcx, %rax
jge 0x2ae1b1
movzwl (%r15,%rax,2), %edx
movw %dx, (%rbp,%rax,2)
incq %rax
jmp 0x2ae155
movq 0x18(%rsp), %rax
leaq (,%rax,2), %rdi
addq %rbp, %rdi
addq %rdx, %rdx
movq %rsi, 0x38(%rsp)
movq %r12, %rsi
movl %r8d, %r13d
movq %r10, 0x10(%rsp)
callq 0x5f3c0
movq 0x38(%rsp), %rsi
movq 0x10(%rsp), %r10
movq 0x20(%rsp), %r9
movl %r13d, %r8d
movq 0x40(%rsp), %rdi
movl 0x2c(%rdi), %ecx
leal (%rcx,%r14), %eax
jmp 0x2ae1b4
movl 0x2c(%rdi), %ecx
decl %ecx
movslq %ecx, %rcx
cltq
cmpq %rbx, %rax
jge 0x2ae1cf
movzwl (%r12,%rcx,2), %edx
movw %dx, (%rbp,%rax,2)
incq %rax
jmp 0x2ae1bb
addq %r9, %rbp
incl %esi
addq %r9, %r10
jmp 0x2ae124
movl 0xc(%rsp), %ecx
jmp 0x2ae38a
movq 0x50(%rsp), %rax
negq %rax
movq %rax, 0x10(%rsp)
movl 0x30(%rdi), %eax
addl 0xc(%rsp), %eax
cmpl %eax, %r8d
jge 0x2ae2b4
xorl %eax, %eax
cmpq %rax, %r14
je 0x2ae218
movzwl (%r12), %ecx
movw %cx, (%rbp,%rax,2)
incq %rax
jmp 0x2ae204
movl 0x2c(%rdi), %edx
cmpl $0xb, %edx
jg 0x2ae246
addl 0x28(%rsp), %edx
movslq %edx, %rax
movq 0x10(%rsp), %rcx
leaq (%r12,%rcx), %rdx
movq %r14, %rcx
cmpq %rax, %rcx
jge 0x2ae280
movzwl (%rdx,%rcx,2), %esi
movw %si, (%rbp,%rcx,2)
incq %rcx
jmp 0x2ae233
movq 0x18(%rsp), %rax
leaq (,%rax,2), %rdi
addq %rbp, %rdi
addq %rdx, %rdx
movq %r12, %rsi
movl %r8d, %r13d
movq %r10, %r15
callq 0x5f3c0
movq %r15, %r10
movq 0x20(%rsp), %r9
movl %r13d, %r8d
movq 0x40(%rsp), %rdi
movl 0x2c(%rdi), %eax
leal (%rax,%r14), %ecx
jmp 0x2ae283
movl 0x2c(%rdi), %eax
leal -0x1(%rax), %edx
movslq %edx, %rdx
movslq %ecx, %rcx
cmpq %rbx, %rcx
jge 0x2ae2a0
movzwl (%r12,%rdx,2), %esi
movw %si, (%rbp,%rcx,2)
incq %rcx
jmp 0x2ae28c
cltq
leaq (%r12,%rax,2), %r12
addq %r9, %rbp
incl %r8d
addq %r9, %r10
jmp 0x2ae1f2
movslq 0x2c(%rdi), %rdx
movq %r12, %r13
subq %rdx, %r13
subq %rdx, %r13
movq 0x50(%rsp), %rax
leaq (%rax,%rdx,2), %rax
subq %rax, %r12
cmpl 0x4c(%rsp), %r8d
jge 0x2ae37e
xorl %eax, %eax
cmpq %rax, %r14
je 0x2ae2ee
movzwl (%r13), %ecx
movw %cx, (%rbp,%rax,2)
incq %rax
jmp 0x2ae2da
cmpl $0xb, %edx
jg 0x2ae316
addl 0x28(%rsp), %edx
movslq %edx, %rcx
movq %r10, %rdx
movq %r14, %rax
cmpq %rcx, %rax
jge 0x2ae353
movzwl (%r12,%rax,2), %esi
movw %si, (%rdx)
incq %rax
addq $0x2, %rdx
jmp 0x2ae300
movq 0x18(%rsp), %rax
leaq (,%rax,2), %rdi
addq %rbp, %rdi
addl %edx, %edx
movq %r13, %rsi
movl %r8d, %r15d
movq %r10, 0x10(%rsp)
callq 0x5f3c0
movq 0x10(%rsp), %r10
movq 0x20(%rsp), %r9
movl %r15d, %r8d
movq 0x40(%rsp), %rdi
movl 0x2c(%rdi), %edx
leal (%rdx,%r14), %eax
jmp 0x2ae356
movl 0x2c(%rdi), %edx
movslq %edx, %rcx
cltq
cmpq %rbx, %rax
jge 0x2ae370
movzwl -0x2(%r13,%rcx,2), %esi
movw %si, (%rbp,%rax,2)
incq %rax
jmp 0x2ae35b
addq %r9, %rbp
incl %r8d
addq %r9, %r10
jmp 0x2ae2cd
movq %r13, %r12
movl 0xc(%rsp), %ecx
movl 0x30(%rsp), %r8d
cmpl $0x2, %r8d
jne 0x2ae688
movl 0x2c(%rdi), %edx
movl %edx, %eax
imull %ecx, %eax
cltq
leaq (%r12,%rax,2), %r15
xorl %r8d, %r8d
movq 0x18(%rsp), %rax
testl %eax, %eax
movl $0x0, %r12d
cmovgl %eax, %r12d
testl %ecx, %ecx
movl $0x0, %r13d
cmovgl %ecx, %r13d
leaq (%rbx,%rbx), %r9
leaq (,%r12,2), %r10
addq %rbp, %r10
leaq (%rax,%rax), %r11
movq %r11, %rcx
negq %rcx
movq %rcx, 0x38(%rsp)
leal -0x2(%rax), %r14d
movq %r9, 0x10(%rsp)
movq %r11, 0x20(%rsp)
movq %r14, 0x30(%rsp)
movl %r13d, 0x50(%rsp)
cmpl %r13d, %r8d
je 0x2ae4cb
leaq (%r15,%r11), %rax
xorl %ecx, %ecx
cmpq %rcx, %r12
je 0x2ae41d
movzwl (%rax), %esi
movw %si, (%rbp,%rcx,2)
incq %rcx
addq $-0x2, %rax
jmp 0x2ae407
cmpl $0xb, %edx
jg 0x2ae448
addl 0x28(%rsp), %edx
movslq %edx, %rcx
movq 0x38(%rsp), %rax
leaq (%r15,%rax), %rdx
movq %r12, %rax
cmpq %rcx, %rax
jge 0x2ae490
movzwl (%rdx,%rax,2), %esi
movw %si, (%rbp,%rax,2)
incq %rax
jmp 0x2ae435
movq 0x18(%rsp), %rax
leaq (,%rax,2), %rdi
addq %rbp, %rdi
addl %edx, %edx
movq %r15, %rsi
movq %r10, %r13
movq %r8, %r14
callq 0x5f3c0
movq %r14, %r8
movq 0x20(%rsp), %r11
movq %r13, %r10
movl 0x50(%rsp), %r13d
movq 0x30(%rsp), %r14
movq 0x10(%rsp), %r9
movq 0x40(%rsp), %rdi
movl 0x2c(%rdi), %edx
leal (%rdx,%r12), %eax
jmp 0x2ae493
movl 0x2c(%rdi), %edx
cltq
leal (%r14,%rdx,2), %ecx
subl %eax, %ecx
cmpq %rbx, %rax
jge 0x2ae4b4
movslq %ecx, %rcx
movzwl (%r15,%rcx,2), %esi
movw %si, (%rbp,%rax,2)
incq %rax
decl %ecx
jmp 0x2ae49b
addq %r9, %rbp
movslq %edx, %rax
addq %rax, %rax
subq %rax, %r15
incl %r8d
addq %r9, %r10
jmp 0x2ae3f8
movl 0xc(%rsp), %r8d
movl 0x30(%rdi), %eax
addl %r8d, %eax
cmpl %eax, %r13d
jge 0x2ae5ac
leaq (%r15,%r11), %rax
xorl %ecx, %ecx
cmpq %rcx, %r12
je 0x2ae4fb
movzwl (%rax), %esi
movw %si, (%rbp,%rcx,2)
incq %rcx
addq $-0x2, %rax
jmp 0x2ae4e5
cmpl $0xb, %edx
jg 0x2ae526
addl 0x28(%rsp), %edx
movslq %edx, %rcx
movq 0x38(%rsp), %rax
leaq (%r15,%rax), %rdx
movq %r12, %rax
cmpq %rcx, %rax
jge 0x2ae573
movzwl (%rdx,%rax,2), %esi
movw %si, (%rbp,%rax,2)
incq %rax
jmp 0x2ae513
movq 0x18(%rsp), %rax
leaq (,%rax,2), %rdi
addq %rbp, %rdi
addl %edx, %edx
movq %r15, %rsi
movl %r13d, 0x50(%rsp)
movq %r10, %r13
movl %r8d, %r14d
callq 0x5f3c0
movl %r14d, %r8d
movq 0x20(%rsp), %r11
movq %r13, %r10
movl 0x50(%rsp), %r13d
movq 0x30(%rsp), %r14
movq 0x10(%rsp), %r9
movq 0x40(%rsp), %rdi
movl 0x2c(%rdi), %edx
leal (%rdx,%r12), %eax
jmp 0x2ae576
movl 0x2c(%rdi), %edx
cltq
leal (%r14,%rdx,2), %ecx
subl %eax, %ecx
cmpq %rbx, %rax
jge 0x2ae597
movslq %ecx, %rcx
movzwl (%r15,%rcx,2), %esi
movw %si, (%rbp,%rax,2)
incq %rax
decl %ecx
jmp 0x2ae57e
movslq %edx, %rax
leaq (%r15,%rax,2), %r15
addq %r9, %rbp
incl %r13d
addq %r9, %r10
jmp 0x2ae4d0
leal (%rdx,%rdx), %eax
cltq
addq %rax, %rax
subq %rax, %r15
cmpl 0x4c(%rsp), %r13d
jge 0x2ae688
leaq (%r15,%r11), %rax
xorl %ecx, %ecx
cmpq %rcx, %r12
je 0x2ae5de
movzwl (%rax), %esi
movw %si, (%rbp,%rcx,2)
incq %rcx
addq $-0x2, %rax
jmp 0x2ae5c8
cmpl $0xb, %edx
jg 0x2ae610
addl 0x28(%rsp), %edx
movslq %edx, %rcx
movq 0x38(%rsp), %rax
leaq (%r15,%rax), %rdx
movq %r10, %rsi
movq %r12, %rax
cmpq %rcx, %rax
jge 0x2ae64d
movzwl (%rdx,%rax,2), %r8d
movw %r8w, (%rsi)
incq %rax
addq $0x2, %rsi
jmp 0x2ae5f9
movq 0x18(%rsp), %rax
leaq (,%rax,2), %rdi
addq %rbp, %rdi
addl %edx, %edx
movq %r15, %rsi
movq %r10, %r14
callq 0x5f3c0
movq 0x20(%rsp), %r11
movq %r14, %r10
movq 0x30(%rsp), %r14
movq 0x10(%rsp), %r9
movq 0x40(%rsp), %rdi
movl 0x2c(%rdi), %edx
leal (%rdx,%r12), %eax
jmp 0x2ae650
movl 0x2c(%rdi), %edx
cltq
leal (%r14,%rdx,2), %ecx
subl %eax, %ecx
cmpq %rbx, %rax
jge 0x2ae671
movslq %ecx, %rcx
movzwl (%r15,%rcx,2), %esi
movw %si, (%rbp,%rax,2)
incq %rax
decl %ecx
jmp 0x2ae658
addq %r9, %rbp
movslq %edx, %rax
addq %rax, %rax
subq %rax, %r15
incl %r13d
addq %r9, %r10
jmp 0x2ae5b7
addq $0x58, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
| /csukuangfj[P]ncnn/src/layer/padding.cpp |
void ncnn::copy_make_border_image<float>(ncnn::Mat const&, ncnn::Mat&, int, int, int, float) | static void copy_make_border_image(const Mat& src, Mat& dst, int top, int left, int type, T v)
{
int w = dst.w;
int h = dst.h;
const T* ptr = src;
T* outptr = dst;
if (type == 0)
{
int y = 0;
// fill top
for (; y < top; y++)
{
int x = 0;
for (; x < w; x++)
{
outptr[x] = v;
}
outptr += w;
}
// fill center
for (; y < (top + src.h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = v;
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = v;
}
ptr += src.w;
outptr += w;
}
// fill bottom
for (; y < h; y++)
{
int x = 0;
for (; x < w; x++)
{
outptr[x] = v;
}
outptr += w;
}
}
if (type == 1)
{
int y = 0;
// fill top
for (; y < top; y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[0];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - 1];
}
outptr += w;
}
// fill center
for (; y < (top + src.h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[0];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - 1];
}
ptr += src.w;
outptr += w;
}
// fill bottom
ptr -= src.w;
for (; y < h; y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[0];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - 1];
}
outptr += w;
}
}
if (type == 2)
{
int y = 0;
// fill top
ptr += top * src.w;
for (; y < top; y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[left - x];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - (x - left - src.w) - 2];
}
outptr += w;
ptr -= src.w;
}
// fill center
for (; y < (top + src.h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[left - x];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - (x - left - src.w) - 2];
}
ptr += src.w;
outptr += w;
}
// fill bottom
ptr -= 2 * src.w;
for (; y < h; y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = ptr[left - x];
}
if (src.w < 12)
{
for (; x < (left + src.w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, src.w * sizeof(T));
x += src.w;
}
for (; x < w; x++)
{
outptr[x] = ptr[src.w - (x - left - src.w) - 2];
}
outptr += w;
ptr -= src.w;
}
}
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x58, %rsp
movslq 0x2c(%rsi), %rbx
movl 0x30(%rsi), %eax
movl %eax, 0x4c(%rsp)
movq (%rdi), %r12
movq (%rsi), %rbp
movq %rcx, 0x30(%rsp)
movslq %ecx, %rax
movq %rax, 0x18(%rsp)
testl %r8d, %r8d
movq %rdi, 0x40(%rsp)
movl %edx, 0xc(%rsp)
jne 0x2ae81b
movl %r8d, 0x38(%rsp)
xorl %eax, %eax
testl %ebx, %ebx
movl $0x0, %r14d
cmovgl %ebx, %r14d
testl %edx, %edx
movl $0x0, %r9d
cmovgl %edx, %r9d
leaq (,%rbx,4), %r15
cmpl %r9d, %eax
je 0x2ae71a
xorl %ecx, %ecx
cmpq %rcx, %r14
je 0x2ae713
movss %xmm0, (%rbp,%rcx,4)
incq %rcx
jmp 0x2ae703
incl %eax
addq %r15, %rbp
jmp 0x2ae6fc
xorl %r13d, %r13d
movq 0x18(%rsp), %rax
testl %eax, %eax
cmovgl 0x30(%rsp), %r13d
leaq (,%rax,4), %rax
negq %rax
movq %rax, 0x20(%rsp)
movss %xmm0, 0x28(%rsp)
movl 0x30(%rdi), %eax
addl 0xc(%rsp), %eax
cmpl %eax, %r9d
jge 0x2ae80b
xorl %eax, %eax
cmpq %rax, %r13
je 0x2ae762
movss %xmm0, (%rbp,%rax,4)
incq %rax
jmp 0x2ae752
movslq 0x2c(%rdi), %rdx
cmpq $0xb, %rdx
jg 0x2ae797
movq 0x30(%rsp), %rax
addl %edx, %eax
movslq %eax, %rcx
movq 0x20(%rsp), %rax
leaq (%r12,%rax), %rsi
movq %r13, %rax
cmpq %rcx, %rax
jge 0x2ae7cf
movss (%rsi,%rax,4), %xmm1
movss %xmm1, (%rbp,%rax,4)
incq %rax
jmp 0x2ae782
movq 0x18(%rsp), %rax
leaq (,%rax,4), %rdi
addq %rbp, %rdi
shlq $0x2, %rdx
movq %r12, %rsi
movl %r9d, 0x10(%rsp)
callq 0x5f3c0
movl 0x10(%rsp), %r9d
movss 0x28(%rsp), %xmm0
movq 0x40(%rsp), %rdi
movl 0x2c(%rdi), %edx
leal (%rdx,%r13), %eax
cltq
cmpq %rbx, %rax
jge 0x2ae7e1
movss %xmm0, (%rbp,%rax,4)
incq %rax
jmp 0x2ae7d1
movslq %edx, %rax
leaq (%r12,%rax,4), %r12
addq %r15, %rbp
incl %r9d
jmp 0x2ae740
xorl %eax, %eax
cmpq %rax, %r14
je 0x2ae805
movss %xmm0, (%rbp,%rax,4)
incq %rax
jmp 0x2ae7f5
addq %r15, %rbp
incl %r9d
cmpl 0x4c(%rsp), %r9d
jl 0x2ae7f3
movl 0xc(%rsp), %edx
movl 0x38(%rsp), %r8d
cmpl $0x1, %r8d
jne 0x2aeaef
movl %r8d, 0x38(%rsp)
xorl %r10d, %r10d
movq 0x18(%rsp), %rax
testl %eax, %eax
movl $0x0, %r14d
cmovgl 0x30(%rsp), %r14d
testl %edx, %edx
movl $0x0, %esi
cmovgl %edx, %esi
leaq (,%rbx,4), %r8
leaq (%rbp,%r14,4), %r9
leaq (,%rax,4), %rax
movq %r12, %r15
movq %rax, 0x50(%rsp)
subq %rax, %r15
movq %r8, 0x10(%rsp)
movl %esi, 0x28(%rsp)
cmpl %esi, %r10d
je 0x2ae92f
xorl %eax, %eax
cmpq %rax, %r14
je 0x2ae894
movss (%r12), %xmm0
movss %xmm0, (%rbp,%rax,4)
incq %rax
jmp 0x2ae87e
movslq 0x2c(%rdi), %rdx
cmpq $0xb, %rdx
jg 0x2ae8be
addl 0x30(%rsp), %edx
movslq %edx, %rcx
movq %r14, %rax
cmpq %rcx, %rax
jge 0x2ae901
movss (%r15,%rax,4), %xmm0
movss %xmm0, (%rbp,%rax,4)
incq %rax
jmp 0x2ae8a8
movq 0x18(%rsp), %rax
leaq (,%rax,4), %rdi
addq %rbp, %rdi
shlq $0x2, %rdx
movq %r12, %rsi
movq %r9, %r13
movq %r10, 0x20(%rsp)
callq 0x5f3c0
movq 0x20(%rsp), %r10
movq %r13, %r9
movq 0x10(%rsp), %r8
movl 0x28(%rsp), %esi
movq 0x40(%rsp), %rdi
movl 0x2c(%rdi), %ecx
leal (%rcx,%r14), %eax
jmp 0x2ae904
movl 0x2c(%rdi), %ecx
decl %ecx
movslq %ecx, %rcx
cltq
cmpq %rbx, %rax
jge 0x2ae921
movss (%r12,%rcx,4), %xmm0
movss %xmm0, (%rbp,%rax,4)
incq %rax
jmp 0x2ae90b
addq %r8, %rbp
incl %r10d
addq %r8, %r9
jmp 0x2ae873
movq 0x50(%rsp), %rax
negq %rax
movq %rax, 0x20(%rsp)
movl 0xc(%rsp), %r10d
movl 0x30(%rdi), %eax
addl %r10d, %eax
cmpl %eax, %esi
jge 0x2aea0e
xorl %eax, %eax
cmpq %rax, %r14
je 0x2ae967
movss (%r12), %xmm0
movss %xmm0, (%rbp,%rax,4)
incq %rax
jmp 0x2ae951
movslq 0x2c(%rdi), %rdx
cmpq $0xb, %rdx
jg 0x2ae999
addl 0x30(%rsp), %edx
movslq %edx, %rax
movq 0x20(%rsp), %rcx
leaq (%r12,%rcx), %rdx
movq %r14, %rcx
cmpq %rax, %rcx
jge 0x2ae9d9
movss (%rdx,%rcx,4), %xmm0
movss %xmm0, (%rbp,%rcx,4)
incq %rcx
jmp 0x2ae984
movq 0x18(%rsp), %rax
leaq (,%rax,4), %rdi
addq %rbp, %rdi
shlq $0x2, %rdx
movl %esi, %r13d
movq %r12, %rsi
movq %r9, %r15
callq 0x5f3c0
movq %r15, %r9
movq 0x10(%rsp), %r8
movl %r13d, %esi
movl 0xc(%rsp), %r10d
movq 0x40(%rsp), %rdi
movl 0x2c(%rdi), %eax
leal (%rax,%r14), %ecx
jmp 0x2ae9dc
movl 0x2c(%rdi), %eax
leal -0x1(%rax), %edx
movslq %edx, %rdx
movslq %ecx, %rcx
cmpq %rbx, %rcx
jge 0x2ae9fb
movss (%r12,%rdx,4), %xmm0
movss %xmm0, (%rbp,%rcx,4)
incq %rcx
jmp 0x2ae9e5
cltq
leaq (%r12,%rax,4), %r12
addq %r8, %rbp
incl %esi
addq %r8, %r9
jmp 0x2ae941
movslq 0x2c(%rdi), %rax
leaq (,%rax,4), %rcx
movq %r12, %r13
subq %rcx, %r13
movq 0x50(%rsp), %rcx
leaq (%rcx,%rax,4), %rcx
subq %rcx, %r12
cmpl 0x4c(%rsp), %esi
jge 0x2aeae3
xorl %ecx, %ecx
cmpq %rcx, %r14
je 0x2aea4e
movss (%r13), %xmm0
movss %xmm0, (%rbp,%rcx,4)
incq %rcx
jmp 0x2aea38
cmpl $0xb, %eax
jg 0x2aea77
addl 0x30(%rsp), %eax
cltq
movq %r9, %rdx
movq %r14, %rcx
cmpq %rax, %rcx
jge 0x2aeab6
movss (%r12,%rcx,4), %xmm0
movss %xmm0, (%rdx)
incq %rcx
addq $0x4, %rdx
jmp 0x2aea5f
movq 0x18(%rsp), %rcx
leaq (,%rcx,4), %rdi
addq %rbp, %rdi
movl %eax, %edx
shlq $0x2, %rdx
movl %esi, 0x28(%rsp)
movq %r13, %rsi
movq %r9, %r15
callq 0x5f3c0
movq %r15, %r9
movq 0x10(%rsp), %r8
movl 0x28(%rsp), %esi
movq 0x40(%rsp), %rdi
movl 0x2c(%rdi), %eax
leal (%rax,%r14), %ecx
jmp 0x2aeab9
movl 0x2c(%rdi), %eax
movslq %eax, %rdx
movslq %ecx, %rcx
cmpq %rbx, %rcx
jge 0x2aead6
movss -0x4(%r13,%rdx,4), %xmm0
movss %xmm0, (%rbp,%rcx,4)
incq %rcx
jmp 0x2aeabf
addq %r8, %rbp
incl %esi
addq %r8, %r9
jmp 0x2aea2c
movq %r13, %r12
movl 0x38(%rsp), %r8d
movl 0xc(%rsp), %edx
cmpl $0x2, %r8d
jne 0x2aedff
movl 0x2c(%rdi), %eax
movl %eax, %ecx
imull %edx, %ecx
movslq %ecx, %rcx
leaq (%r12,%rcx,4), %r15
xorl %esi, %esi
movq 0x18(%rsp), %rcx
testl %ecx, %ecx
movl $0x0, %r12d
cmovgl %ecx, %r12d
testl %edx, %edx
movl $0x0, %r9d
cmovgl %edx, %r9d
leaq (,%rbx,4), %rdx
movq %rdx, 0x10(%rsp)
leaq (,%r12,4), %r13
addq %rbp, %r13
leaq (,%rcx,4), %r10
movq %r10, %rdx
negq %rdx
movq %rdx, 0x20(%rsp)
leal -0x2(%rcx), %r11d
movq %r10, 0x28(%rsp)
movq %r11, 0x38(%rsp)
cmpl %r9d, %esi
je 0x2aec3c
leaq (%r15,%r10), %rcx
xorl %edx, %edx
cmpq %rdx, %r12
je 0x2aeb87
movss (%rcx), %xmm0
movss %xmm0, (%rbp,%rdx,4)
incq %rdx
addq $-0x4, %rcx
jmp 0x2aeb6f
cmpl $0xb, %eax
jg 0x2aebb3
addl 0x30(%rsp), %eax
cltq
movq 0x20(%rsp), %rcx
leaq (%r15,%rcx), %rdx
movq %r12, %rcx
cmpq %rax, %rcx
jge 0x2aebf9
movss (%rdx,%rcx,4), %xmm0
movss %xmm0, (%rbp,%rcx,4)
incq %rcx
jmp 0x2aeb9e
movq 0x18(%rsp), %rcx
leaq (,%rcx,4), %rdi
addq %rbp, %rdi
movl %eax, %edx
shlq $0x2, %rdx
movq %rsi, 0x50(%rsp)
movq %r15, %rsi
movl %r9d, %r14d
callq 0x5f3c0
movq 0x50(%rsp), %rsi
movq 0x38(%rsp), %r11
movq 0x28(%rsp), %r10
movl %r14d, %r9d
movq 0x40(%rsp), %rdi
movl 0x2c(%rdi), %eax
leal (%rax,%r12), %ecx
jmp 0x2aebfc
movl 0x2c(%rdi), %eax
movslq %ecx, %rcx
leal (%r11,%rax,2), %edx
subl %ecx, %edx
cmpq %rbx, %rcx
jge 0x2aec20
movslq %edx, %rdx
movss (%r15,%rdx,4), %xmm0
movss %xmm0, (%rbp,%rcx,4)
incq %rcx
decl %edx
jmp 0x2aec05
movq 0x10(%rsp), %rdx
addq %rdx, %rbp
movslq %eax, %rcx
shlq $0x2, %rcx
subq %rcx, %r15
incl %esi
addq %rdx, %r13
jmp 0x2aeb60
movq 0x10(%rsp), %r14
movl 0x30(%rdi), %ecx
addl 0xc(%rsp), %ecx
cmpl %ecx, %r9d
jge 0x2aed18
leaq (%r15,%r10), %rcx
xorl %edx, %edx
cmpq %rdx, %r12
je 0x2aec6f
movss (%rcx), %xmm0
movss %xmm0, (%rbp,%rdx,4)
incq %rdx
addq $-0x4, %rcx
jmp 0x2aec57
cmpl $0xb, %eax
jg 0x2aec9b
addl 0x30(%rsp), %eax
cltq
movq 0x20(%rsp), %rcx
leaq (%r15,%rcx), %rdx
movq %r12, %rcx
cmpq %rax, %rcx
jge 0x2aecdc
movss (%rdx,%rcx,4), %xmm0
movss %xmm0, (%rbp,%rcx,4)
incq %rcx
jmp 0x2aec86
movq 0x18(%rsp), %rcx
leaq (,%rcx,4), %rdi
addq %rbp, %rdi
movl %eax, %edx
shlq $0x2, %rdx
movq %r15, %rsi
movl %r9d, %r14d
callq 0x5f3c0
movq 0x38(%rsp), %r11
movq 0x28(%rsp), %r10
movl %r14d, %r9d
movq 0x10(%rsp), %r14
movq 0x40(%rsp), %rdi
movl 0x2c(%rdi), %eax
leal (%rax,%r12), %ecx
jmp 0x2aecdf
movl 0x2c(%rdi), %eax
movslq %ecx, %rcx
leal (%r11,%rax,2), %edx
subl %ecx, %edx
cmpq %rbx, %rcx
jge 0x2aed03
movslq %edx, %rdx
movss (%r15,%rdx,4), %xmm0
movss %xmm0, (%rbp,%rcx,4)
incq %rcx
decl %edx
jmp 0x2aece8
movslq %eax, %rcx
leaq (%r15,%rcx,4), %r15
addq %r14, %rbp
incl %r9d
addq %r14, %r13
jmp 0x2aec41
leal (%rax,%rax), %ecx
movslq %ecx, %rcx
shlq $0x2, %rcx
subq %rcx, %r15
cmpl 0x4c(%rsp), %r9d
jge 0x2aedff
leaq (%r15,%r10), %rcx
xorl %edx, %edx
cmpq %rdx, %r12
je 0x2aed4e
movss (%rcx), %xmm0
movss %xmm0, (%rbp,%rdx,4)
incq %rdx
addq $-0x4, %rcx
jmp 0x2aed36
cmpl $0xb, %eax
jg 0x2aed7f
addl 0x30(%rsp), %eax
cltq
movq 0x20(%rsp), %rcx
leaq (%r15,%rcx), %rdx
movq %r13, %rsi
movq %r12, %rcx
cmpq %rax, %rcx
jge 0x2aedc0
movss (%rdx,%rcx,4), %xmm0
movss %xmm0, (%rsi)
incq %rcx
addq $0x4, %rsi
jmp 0x2aed68
movq 0x18(%rsp), %rcx
leaq (,%rcx,4), %rdi
addq %rbp, %rdi
movl %eax, %edx
shlq $0x2, %rdx
movq %r15, %rsi
movl %r9d, %r14d
callq 0x5f3c0
movq 0x38(%rsp), %r11
movq 0x28(%rsp), %r10
movl %r14d, %r9d
movq 0x10(%rsp), %r14
movq 0x40(%rsp), %rdi
movl 0x2c(%rdi), %eax
leal (%rax,%r12), %ecx
jmp 0x2aedc3
movl 0x2c(%rdi), %eax
movslq %ecx, %rcx
leal (%r11,%rax,2), %edx
subl %ecx, %edx
cmpq %rbx, %rcx
jge 0x2aede7
movslq %edx, %rdx
movss (%r15,%rdx,4), %xmm0
movss %xmm0, (%rbp,%rcx,4)
incq %rcx
decl %edx
jmp 0x2aedcc
addq %r14, %rbp
movslq %eax, %rcx
shlq $0x2, %rcx
subq %rcx, %r15
incl %r9d
addq %r14, %r13
jmp 0x2aed25
addq $0x58, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
| /csukuangfj[P]ncnn/src/layer/padding.cpp |
ncnn::Padding_x86::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int Padding_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
if (top == 0 && bottom == 0 && left == 0 && right == 0 && front == 0 && behind == 0)
{
top_blob = bottom_blob;
return 0;
}
int elembits = bottom_blob.elembits();
if (elembits == 8)
return forward_int8(bottom_blob, top_blob, opt);
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
int out_elempack = outw % 16 == 0 ? 16 : outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 16 == 0 && out_elempack == 16 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m512 pad_value = _mm512_set1_ps(value);
padding_constant_pack16_avx512(bottom_blob, top_blob, 0, 0, left / 16, right / 16, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
int out_elempack = outh % 16 == 0 ? 16 : outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 16 == 0 && out_elempack == 16 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m512 pad_value = _mm512_set1_ps(value);
padding_constant_pack16_avx512(bottom_blob, top_blob, top / 16, bottom / 16, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
int out_elempack = outc % 16 == 0 ? 16 : outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 16 == 0 && out_elempack == 16 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m512 pad_value = per_channel_pad_data_size ? _mm512_loadu_ps((const float*)per_channel_pad_data + q * 16) : _mm512_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack16_avx512(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack16_avx512(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack16_avx512(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m512 pad_value = per_channel_pad_data_size ? _mm512_loadu_ps((const float*)per_channel_pad_data + q * 16) : _mm512_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack16_avx512(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
int out_elempack = outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m256 pad_value = _mm256_set1_ps(value);
padding_constant_pack8_avx(bottom_blob, top_blob, 0, 0, left / 8, right / 8, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
int out_elempack = outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m256 pad_value = _mm256_set1_ps(value);
padding_constant_pack8_avx(bottom_blob, top_blob, top / 8, bottom / 8, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
int out_elempack = outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 8 == 0 && out_elempack == 8 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m256 pad_value = per_channel_pad_data_size ? _mm256_loadu_ps((const float*)per_channel_pad_data + q * 8) : _mm256_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack8_avx(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack8_avx(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack8_avx(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m256 pad_value = per_channel_pad_data_size ? _mm256_loadu_ps((const float*)per_channel_pad_data + q * 8) : _mm256_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack8_avx(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __AVX__
if (elempack == 4)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
#if __AVX__
int out_elempack = outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
#else
int out_elempack = outw % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 4 == 0 && out_elempack == 4 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m128 pad_value = _mm_set1_ps(value);
padding_constant_pack4_sse(bottom_blob, top_blob, 0, 0, left / 4, right / 4, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
#if __AVX__
int out_elempack = outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
#else
int out_elempack = outh % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 4 == 0 && out_elempack == 4 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m128 pad_value = _mm_set1_ps(value);
padding_constant_pack4_sse(bottom_blob, top_blob, top / 4, bottom / 4, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
#if __AVX__
int out_elempack = outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
#else
int out_elempack = outc % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 4 == 0 && out_elempack == 4 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m128 pad_value = per_channel_pad_data_size ? _mm_loadu_ps((const float*)per_channel_pad_data + q * 4) : _mm_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack4_sse(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack4_sse(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack4_sse(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m128 pad_value = per_channel_pad_data_size ? _mm_loadu_ps((const float*)per_channel_pad_data + q * 4) : _mm_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack4_sse(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
return Padding::forward(bottom_blob_unpacked, top_blob, opt);
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xe8, %rsp
movq %rcx, %rbx
movq %rdx, %r15
movq %rsi, %rcx
movq (%rdi), %rax
movq -0x18(%rax), %rbp
movq %rdi, (%rsp)
movl 0xd0(%rdi,%rbp), %r10d
testl %r10d, %r10d
movq %rsi, 0x8(%rsp)
jne 0x2aeef1
movq (%rsp), %rax
cmpl $0x0, 0xd4(%rax,%rbp)
jne 0x2aeef1
movq (%rsp), %rax
cmpl $0x0, 0xd8(%rax,%rbp)
jne 0x2aeef1
movq (%rsp), %rax
cmpl $0x0, 0xdc(%rax,%rbp)
jne 0x2aeef1
movq (%rsp), %rax
cmpl $0x0, 0xe8(%rax,%rbp)
jne 0x2aeef1
movq (%rsp), %rax
cmpl $0x0, 0xec(%rax,%rbp)
je 0x2afc57
movl 0x18(%rcx), %r11d
movq 0x10(%rcx), %r9
testl %r11d, %r11d
je 0x2aef32
leal (,%r9,8), %eax
cltd
idivl %r11d
cmpl $0x8, %eax
jne 0x2aef32
movq (%rsp), %rdi
movq %rcx, %rsi
movq %r15, %rdx
movq %rbx, %rcx
addq $0xe8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x2afd44
movl 0x28(%rcx), %r13d
movl 0x2c(%rcx), %esi
movl 0x30(%rcx), %r14d
movl 0x34(%rcx), %eax
movl %eax, 0x18(%rsp)
movl 0x38(%rcx), %eax
movq %rax, 0x10(%rsp)
cmpl $0x4, %r11d
jne 0x2afab9
leal -0x1(%r13), %eax
cmpl $0x3, %eax
ja 0x2afab9
leaq 0x1497d6(%rip), %rcx # 0x3f8740
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
pushq $-0x64
popq %rcx
movq %rcx, 0x20(%rsp)
jmpq *%rax
movq (%rsp), %rcx
movl 0xd8(%rcx,%rbp), %r8d
leal (%r8,%rsi,4), %eax
addl 0xdc(%rcx,%rbp), %eax
testb $0x3, %al
sete %r10b
movq %r9, %rdx
shrq $0x2, %rdx
leal (%r10,%r10), %ecx
shlq %cl, %rdx
testb $0x3, %r8b
sete %cl
andb %r10b, %cl
cmpb $0x1, %cl
movq 0x8(%rsp), %rcx
jne 0x2afab9
movq (%rsp), %rdi
cmpl $0x0, 0xe0(%rdi,%rbp)
jne 0x2afab9
sarl $0x2, %eax
movq 0x8(%rbx), %r8
pushq $0x4
popq %rbp
movq %r15, %rdi
movl %eax, %esi
movl %ebp, %ecx
callq 0x626da
cmpq $0x0, (%r15)
je 0x2afba1
movslq 0x38(%r15), %rax
imulq 0x40(%r15), %rax
testq %rax, %rax
je 0x2afba1
movq (%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rcx
movss 0xe4(%rdx,%rcx), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movl 0xd8(%rdx,%rcx), %eax
movl 0xdc(%rdx,%rcx), %ecx
cltd
idivl %ebp
movl %eax, %r8d
movl %ecx, %eax
cltd
idivl %ebp
movq $0x0, 0x20(%rsp)
movq 0x8(%rsp), %rdi
movq %r15, %rsi
xorl %edx, %edx
xorl %ecx, %ecx
movl %eax, %r9d
callq 0x2b0c1b
jmp 0x2afba1
movq (%rsp), %rdx
movl 0xe8(%rdx,%rbp), %eax
movq 0x10(%rsp), %rcx
leal (%rax,%rcx,4), %r12d
addl 0xec(%rdx,%rbp), %r12d
testb $0x3, %r12b
sete %dl
movq %r9, %r8
shrq $0x2, %r8
leal (%rdx,%rdx), %ecx
shlq %cl, %r8
testb $0x3, %al
sete %al
andb %dl, %al
cmpb $0x1, %al
movq 0x8(%rsp), %rcx
jne 0x2afab9
movq (%rsp), %rcx
movl 0xd8(%rcx,%rbp), %eax
addl %esi, %eax
addl 0xdc(%rcx,%rbp), %eax
addl %r14d, %r10d
addl 0xd4(%rcx,%rbp), %r10d
movq 0x10(%rsp), %rcx
leal (,%rcx,4), %ecx
cmpl %ecx, %r12d
movq 0x8(%rsp), %rcx
je 0x2af0e0
movq (%rsp), %rdx
cmpl $0x0, 0xe0(%rdx,%rbp)
jne 0x2afab9
sarl $0x2, %r12d
subq $0x8, %rsp
pushq $0x4
popq %r9
movq %r15, %rdi
movl %eax, %esi
movl %r10d, %edx
movl %r12d, %ecx
pushq 0x8(%rbx)
callq 0x628f2
addq $0x10, %rsp
cmpq $0x0, (%r15)
je 0x2afba1
movslq 0x38(%r15), %rax
imulq 0x40(%r15), %rax
testq %rax, %rax
je 0x2afba1
movq (%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
movl 0xe8(%rcx,%rax), %eax
pushq $-0x4
popq %rcx
cltd
idivl %ecx
movl %eax, %edx
xorl %ecx, %ecx
testl %r12d, %r12d
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %r12d
movl %edx, 0xbc(%rsp)
movl %edx, %r14d
xorl %r9d, %r9d
movq %r12, 0xe0(%rsp)
movq (%rsp), %rbx
cmpq %r12, %r9
je 0x2afba1
movslq 0x2c(%r15), %rax
movslq 0x30(%r15), %rcx
movq 0x40(%r15), %r13
imulq %r9, %r13
movq 0x10(%r15), %rdi
imulq %rdi, %r13
addq (%r15), %r13
movl 0x34(%r15), %esi
movl 0x18(%r15), %edx
movq 0x20(%r15), %r8
movq %r13, 0x28(%rsp)
andq $0x0, 0x30(%rsp)
movq %rdi, 0x38(%rsp)
movl %edx, 0x40(%rsp)
movq %r8, 0x48(%rsp)
movl %eax, 0x54(%rsp)
movl %ecx, 0x58(%rsp)
movl $0x1, 0x5c(%rsp)
movl %esi, 0x60(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x68(%rsp)
movl 0x28(%r15), %edx
leal -0x1(%rdx), %edi
movl %edi, 0x50(%rsp)
cmpl $0x4, %edx
jne 0x2af1f7
movq %rcx, 0x68(%rsp)
movq %rcx, %rax
movq (%rsp), %rdx
movq (%rdx), %rcx
movq -0x18(%rcx), %rdi
cmpl $0x0, 0xf0(%rdx,%rdi)
je 0x2af226
movq (%rsp), %rdx
movq 0xf8(%rdx,%rdi), %rdx
movq %r9, %r8
shlq $0x4, %r8
movups (%rdx,%r8), %xmm0
jmp 0x2af237
movq (%rsp), %rdx
movss 0xe4(%rdx,%rdi), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movl %r14d, %r14d
movl 0xbc(%rsp), %edx
movq %r9, 0x18(%rsp)
addl %r9d, %edx
setns %r8b
cmpl 0x10(%rsp), %edx
setl %r9b
testb %r9b, %r8b
je 0x2af417
movq %r14, 0xc0(%rsp)
movq 0x8(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rsi
movl 0x34(%r10), %r8d
movq (%r10), %r12
movq 0x10(%r10), %r14
movq 0x40(%r10), %r11
movl %edx, %ebp
imulq %r11, %rbp
imulq %r14, %rbp
addq %r12, %rbp
movl 0x18(%r10), %edx
movq 0x20(%r10), %r9
movq %rbp, 0x70(%rsp)
andq $0x0, 0x78(%rsp)
movq %r14, 0x80(%rsp)
movl %edx, 0x88(%rsp)
movq %r9, 0x90(%rsp)
movl %eax, 0x9c(%rsp)
movl %esi, 0xa0(%rsp)
movl $0x1, 0xa4(%rsp)
movl %r8d, 0xa8(%rsp)
imulq %rax, %rsi
movq %r14, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r14
movq %rax, 0xb0(%rsp)
movl 0x28(%r10), %eax
leal -0x1(%rax), %edx
movl %edx, 0x98(%rsp)
cmpl $0x4, %eax
jne 0x2af315
movq %rsi, 0xb0(%rsp)
movq (%rsp), %rax
movl 0xe0(%rax,%rdi), %eax
testl %eax, %eax
jne 0x2af36f
movl 0xd0(%rbx,%rdi), %edx
movl 0xd4(%rbx,%rdi), %ecx
movl 0xd8(%rbx,%rdi), %r8d
movl 0xdc(%rbx,%rdi), %r9d
leaq 0x70(%rsp), %rdi
leaq 0x28(%rsp), %rsi
movq %r11, 0xd8(%rsp)
callq 0x2b0c1b
movq 0xd8(%rsp), %r11
movq (%rbx), %rcx
movq -0x18(%rcx), %rdi
movl 0xe0(%rbx,%rdi), %eax
cmpl $0x1, %eax
jne 0x2af521
movq (%rsp), %rsi
movl 0xd8(%rsi,%rdi), %eax
xorl %r8d, %r8d
testl %eax, %eax
cmovlel %r8d, %eax
movl 0xdc(%rsi,%rdi), %edx
testl %edx, %edx
cmovlel %r8d, %edx
movl 0xd0(%rsi,%rdi), %r9d
testl %r9d, %r9d
cmovlel %r8d, %r9d
movl 0xd4(%rsi,%rdi), %esi
imulq %r14, %r11
imulq 0xc0(%rsp), %r11
addq %r11, %r12
cmpl %r9d, %r8d
je 0x2af43d
movaps (%rbp), %xmm0
movl %eax, %edi
subl $0x1, %edi
jb 0x2af3da
movaps %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2af3ca
xorl %edi, %edi
xorl %r10d, %r10d
cmpl 0x9c(%rsp), %r10d
jge 0x2af3fd
movaps (%r12,%rdi), %xmm0
movaps %xmm0, (%r13,%rdi)
incl %r10d
addq $0x10, %rdi
jmp 0x2af3df
addq %rdi, %r13
movl %edx, %edi
subl $0x1, %edi
jb 0x2af412
movaps %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2af402
incl %r8d
jmp 0x2af3bf
imull %eax, %esi
testl %esi, %esi
movl $0x0, %eax
cmovlel %eax, %esi
movq 0x18(%rsp), %r9
subl $0x1, %esi
jb 0x2af7be
movups %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2af429
xorl %edi, %edi
cmpl 0xa0(%rsp), %edi
jge 0x2af49b
movaps (%rbp), %xmm0
movl %eax, %r8d
subl $0x1, %r8d
jb 0x2af460
movaps %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2af44f
xorl %r8d, %r8d
cmpl 0x9c(%rsp), %r8d
jge 0x2af483
movaps (%rbp), %xmm0
movaps %xmm0, (%r13)
addq $0x10, %rbp
addq $0x10, %r13
incl %r8d
jmp 0x2af463
movl %edx, %r8d
subl $0x1, %r8d
jb 0x2af497
movaps %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2af486
incl %edi
jmp 0x2af43f
movl 0x9c(%rsp), %edi
shll $0x2, %edi
movslq %edi, %rdi
shlq $0x2, %rdi
subq %rdi, %rbp
xorl %edi, %edi
testl %esi, %esi
cmovlel %edi, %esi
cmpl %esi, %edi
je 0x2af512
movaps (%rbp), %xmm0
movl %eax, %r8d
subl $0x1, %r8d
jb 0x2af4d2
movaps %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2af4c1
xorl %r8d, %r8d
xorl %r9d, %r9d
cmpl 0x9c(%rsp), %r9d
jge 0x2af4f7
movaps (%rbp,%r8), %xmm0
movaps %xmm0, (%r13,%r8)
incl %r9d
addq $0x10, %r8
jmp 0x2af4d8
addq %r8, %r13
movl %edx, %r8d
subl $0x1, %r8d
jb 0x2af50e
movaps %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2af4fd
incl %edi
jmp 0x2af4b6
movq -0x18(%rcx), %rdi
movq (%rsp), %rax
movl 0xe0(%rax,%rdi), %eax
cmpl $0x2, %eax
pushq $-0x20
popq %r13
jne 0x2af748
movq (%rsp), %r8
movl 0xd0(%r8,%rdi), %r9d
movslq 0xd8(%r8,%rdi), %rax
movl 0x9c(%rsp), %ecx
imull %r9d, %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
shlq $0x2, %rcx
addq 0x70(%rsp), %rcx
xorl %r10d, %r10d
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movl 0xdc(%r8,%rdi), %esi
testl %esi, %esi
cmovlel %r10d, %esi
movl 0xd4(%r8,%rdi), %edi
testl %r9d, %r9d
cmovlel %r10d, %r9d
movq 0x28(%rsp), %r8
shlq $0x4, %rax
shlq $0x4, %rdx
shlq $0x4, %rsi
cmpl %r9d, %r10d
je 0x2af626
leaq (%rcx,%rax), %r11
xorl %r12d, %r12d
xorl %r14d, %r14d
movq %rdx, %r13
addq %r14, %r13
je 0x2af5c6
movaps (%r11,%r14), %xmm0
movaps %xmm0, (%r8,%r12)
addq $-0x10, %r14
addq $0x10, %r12
jmp 0x2af5aa
subq %r14, %r8
xorl %ebp, %ebp
movq %rcx, %r11
pushq $-0x20
popq %r13
cmpl 0x9c(%rsp), %ebp
jge 0x2af5ef
movaps (%r11), %xmm0
movaps %xmm0, (%r8)
addq $0x10, %r11
addq $0x10, %r8
incl %ebp
jmp 0x2af5d2
movq %r13, %r14
leaq (%rsi,%r14), %r12
cmpq $-0x20, %r12
je 0x2af60f
movaps (%r11,%r14), %xmm0
movaps %xmm0, (%r8)
addq $0x10, %r8
addq $-0x10, %r14
jmp 0x2af5f2
movslq 0x9c(%rsp), %r11
shlq $0x4, %r11
subq %r11, %rcx
incl %r10d
jmp 0x2af597
xorl %r9d, %r9d
cmpl 0xa0(%rsp), %r9d
jge 0x2af6a2
leaq (%rcx,%rax), %r10
xorl %r14d, %r14d
xorl %r11d, %r11d
movq %rdx, %r12
addq %r11, %r12
je 0x2af659
movaps (%r10,%r11), %xmm0
movaps %xmm0, (%r8,%r14)
addq $-0x10, %r11
addq $0x10, %r14
jmp 0x2af63d
subq %r11, %r8
xorl %r10d, %r10d
cmpl 0x9c(%rsp), %r10d
jge 0x2af67d
movaps (%rcx), %xmm0
movaps %xmm0, (%r8)
addq $0x10, %rcx
addq $0x10, %r8
incl %r10d
jmp 0x2af65f
movq %r13, %r10
leaq (%rsi,%r10), %r11
cmpq $-0x20, %r11
je 0x2af69d
movaps (%rcx,%r10), %xmm0
movaps %xmm0, (%r8)
addq $0x10, %r8
addq $-0x10, %r10
jmp 0x2af680
incl %r9d
jmp 0x2af629
movslq 0x9c(%rsp), %r9
shlq $0x5, %r9
subq %r9, %rcx
xorl %r9d, %r9d
testl %edi, %edi
cmovlel %r9d, %edi
cmpl %edi, %r9d
je 0x2af748
leaq (%rcx,%rax), %r10
xorl %r14d, %r14d
xorl %r11d, %r11d
movq %rdx, %r12
addq %r11, %r12
je 0x2af6e9
movaps (%r10,%r11), %xmm0
movaps %xmm0, (%r8,%r14)
addq $-0x10, %r11
addq $0x10, %r14
jmp 0x2af6cd
subq %r11, %r8
xorl %r11d, %r11d
movq %rcx, %r10
cmpl 0x9c(%rsp), %r11d
jge 0x2af711
movaps (%r10), %xmm0
movaps %xmm0, (%r8)
addq $0x10, %r10
addq $0x10, %r8
incl %r11d
jmp 0x2af6f2
movq %r13, %r11
leaq (%rsi,%r11), %r14
cmpq $-0x20, %r14
je 0x2af731
movaps (%r10,%r11), %xmm0
movaps %xmm0, (%r8)
addq $0x10, %r8
addq $-0x10, %r11
jmp 0x2af714
movslq 0x9c(%rsp), %r10
shlq $0x4, %r10
subq %r10, %rcx
incl %r9d
jmp 0x2af6ba
movq 0x78(%rsp), %rax
testq %rax, %rax
movq 0xc0(%rsp), %r14
je 0x2af781
lock
decl (%rax)
jne 0x2af781
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2af779
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2af781
movq %rsi, %rdi
callq 0x5f3e0
movq 0x30(%rsp), %rax
testq %rax, %rax
movq 0x18(%rsp), %r9
je 0x2af7be
lock
decl (%rax)
jne 0x2af7be
movq 0x28(%rsp), %rsi
movq 0x48(%rsp), %rdi
testq %rdi, %rdi
je 0x2af7b1
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %r9
jmp 0x2af7be
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %r9
incq %r9
incl %r14d
movq 0xe0(%rsp), %r12
jmp 0x2af165
movq (%rsp), %rax
cmpl $0x0, 0xe0(%rax,%rbp)
movq 0x8(%rsp), %rcx
jne 0x2afab9
movq (%rsp), %rax
movl 0xe8(%rax,%rbp), %r12d
addl 0x18(%rsp), %r12d
addl 0xec(%rax,%rbp), %r12d
addl %r14d, %r10d
addl 0xd4(%rax,%rbp), %r10d
addl 0xd8(%rax,%rbp), %esi
addl 0xdc(%rax,%rbp), %esi
movq %r15, %rdi
movl %r10d, %edx
movl %r12d, %ecx
movq 0x10(%rsp), %r8
pushq 0x8(%rbx)
pushq $0x4
callq 0x62a26
addq $0x10, %rsp
cmpq $0x0, (%r15)
je 0x2afba1
movslq 0x38(%r15), %rax
imulq 0x40(%r15), %rax
testq %rax, %rax
je 0x2afba1
xorl %ecx, %ecx
testl %r12d, %r12d
cmovlel %ecx, %r12d
movq 0x10(%rsp), %rdx
testl %edx, %edx
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %edx
movq %rdx, 0x10(%rsp)
movabsq $0x100000001, %r13 # imm = 0x100000001
xorl %ebx, %ebx
cmpq 0x10(%rsp), %rbx
je 0x2afba1
movq (%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xf0(%rcx,%rax)
je 0x2af8b9
movq (%rsp), %rcx
movq 0xf8(%rcx,%rax), %rax
movq %rbx, %rcx
shlq $0x4, %rcx
movups (%rax,%rcx), %xmm0
jmp 0x2af8ca
movq (%rsp), %rcx
movss 0xe4(%rcx,%rax), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
xorl %ebp, %ebp
movaps %xmm0, 0xc0(%rsp)
cmpq %r12, %rbp
je 0x2afa67
movslq 0x2c(%r15), %rdx
movslq 0x30(%r15), %rsi
movq 0x40(%r15), %rdi
imulq %rbx, %rdi
movq 0x10(%r15), %r8
imulq %r8, %rdi
addq (%r15), %rdi
movl 0x18(%r15), %r9d
movq 0x20(%r15), %r10
movq %rsi, %rax
imulq %rdx, %rax
movq %rbp, %rcx
imulq %r8, %rcx
imulq %rax, %rcx
addq %rdi, %rcx
movq %rcx, 0x28(%rsp)
andq $0x0, 0x30(%rsp)
movq %r8, 0x38(%rsp)
movl %r9d, 0x40(%rsp)
movq %r10, 0x48(%rsp)
movl $0x2, 0x50(%rsp)
movl %edx, 0x54(%rsp)
movl %esi, 0x58(%rsp)
movq %r13, 0x5c(%rsp)
movq %rax, 0x68(%rsp)
movq (%rsp), %rdi
movq (%rdi), %rdx
movq -0x18(%rdx), %rsi
movl 0xe8(%rdi,%rsi), %edi
movl %ebp, %edx
subl %edi, %edx
setns %dil
cmpl 0x18(%rsp), %edx
setl %r8b
testb %r8b, %dil
je 0x2afa4f
movq 0x8(%rsp), %rcx
movslq 0x2c(%rcx), %rax
movq %r15, %r14
movslq 0x30(%rcx), %r15
movq 0x40(%rcx), %rdi
imulq %rbx, %rdi
movq 0x10(%rcx), %r8
imulq %r8, %rdi
addq (%rcx), %rdi
movl 0x18(%rcx), %r9d
movq 0x20(%rcx), %r10
movq %r15, %rcx
imulq %rax, %rcx
movl %edx, %edx
movq %rcx, %r11
imulq %r8, %r11
imulq %rdx, %r11
addq %rdi, %r11
movq %r11, 0x70(%rsp)
andq $0x0, 0x78(%rsp)
movq %r8, 0x80(%rsp)
movl %r9d, 0x88(%rsp)
movq %r10, 0x90(%rsp)
movl $0x2, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movl %r15d, 0xa0(%rsp)
movq (%rsp), %rax
movq %r14, %r15
movq %r13, 0xa4(%rsp)
movq %rcx, 0xb0(%rsp)
movl 0xd0(%rax,%rsi), %edx
movl 0xd4(%rax,%rsi), %ecx
movl 0xd8(%rax,%rsi), %r8d
movl 0xdc(%rax,%rsi), %r9d
leaq 0x70(%rsp), %rdi
leaq 0x28(%rsp), %rsi
movaps 0xc0(%rsp), %xmm0
callq 0x2b0c1b
movaps 0xc0(%rsp), %xmm0
incq %rbp
jmp 0x2af8d4
testl %eax, %eax
movl $0x0, %edx
cmovlel %edx, %eax
subl $0x1, %eax
jb 0x2afa47
movups %xmm0, (%rcx)
addq $0x10, %rcx
jmp 0x2afa59
incq %rbx
jmp 0x2af880
leal (%r10,%r14,4), %edx
movq (%rsp), %rax
addl 0xd4(%rax,%rbp), %edx
testb $0x3, %dl
sete %r8b
movq %r9, %rax
shrq $0x2, %rax
leal (%r8,%r8), %ecx
shlq %cl, %rax
testb $0x3, %r10b
sete %cl
andb %r8b, %cl
cmpb $0x1, %cl
movq 0x8(%rsp), %rcx
jne 0x2afab9
movq (%rsp), %rdi
cmpl $0x0, 0xe0(%rdi,%rbp)
je 0x2afbb8
movq (%rcx), %rax
movq %rax, 0x28(%rsp)
movq 0x8(%rcx), %rax
movq %rax, 0x30(%rsp)
movq %r9, 0x38(%rsp)
movl %r11d, 0x40(%rsp)
movq %rcx, %rdx
movq 0x20(%rcx), %rcx
movq %rcx, 0x48(%rsp)
movl %r13d, 0x50(%rsp)
movl %esi, 0x54(%rsp)
movl %r14d, 0x58(%rsp)
movl 0x18(%rsp), %ecx
movl %ecx, 0x5c(%rsp)
movq 0x10(%rsp), %rcx
movl %ecx, 0x60(%rsp)
movq 0x40(%rdx), %rcx
movq %rcx, 0x68(%rsp)
testq %rax, %rax
je 0x2afb10
lock
incl (%rax)
cmpl $0x1, %r11d
je 0x2afb53
movups (%rbx), %xmm0
movups 0x10(%rbx), %xmm1
movups 0x20(%rbx), %xmm2
movups 0x30(%rbx), %xmm3
leaq 0x70(%rsp), %rcx
movaps %xmm3, 0x30(%rcx)
movaps %xmm2, 0x20(%rcx)
movaps %xmm1, 0x10(%rcx)
movaps %xmm0, (%rcx)
movq 0x10(%rbx), %rax
movq %rax, 0x8(%rcx)
leaq 0x28(%rsp), %rsi
pushq $0x1
popq %rdx
movq 0x8(%rsp), %rdi
callq 0x64e3b
movq (%rsp), %rdi
movq (%rdi), %rax
addq -0x18(%rax), %rdi
leaq 0x28(%rsp), %rsi
movq %r15, %rdx
movq %rbx, %rcx
callq 0x2acc2e
movq %rax, 0x20(%rsp)
movq 0x30(%rsp), %rax
testq %rax, %rax
je 0x2afba1
lock
decl (%rax)
jne 0x2afba1
movq 0x28(%rsp), %rsi
movq 0x48(%rsp), %rdi
testq %rdi, %rdi
je 0x2afb99
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2afba1
movq %rsi, %rdi
callq 0x5f3e0
movq 0x20(%rsp), %rax
addq $0xe8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq (%rsp), %rcx
addl 0xd8(%rcx,%rbp), %esi
addl 0xdc(%rcx,%rbp), %esi
sarl $0x2, %edx
movq 0x8(%rbx), %r9
pushq $0x4
popq %rbp
movq %r15, %rdi
movq %rax, %rcx
movl %ebp, %r8d
callq 0x627de
cmpq $0x0, (%r15)
je 0x2afba1
movslq 0x38(%r15), %rax
imulq 0x40(%r15), %rax
testq %rax, %rax
je 0x2afba1
movq (%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rsi
movss 0xe4(%rdi,%rsi), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movl 0xd0(%rdi,%rsi), %eax
movl 0xd4(%rdi,%rsi), %ecx
cltd
idivl %ebp
movl %eax, %r10d
movl %ecx, %eax
cltd
idivl %ebp
movl 0xd8(%rdi,%rsi), %r8d
movl 0xdc(%rdi,%rsi), %r9d
movq 0x8(%rsp), %rdi
movq %r15, %rsi
movl %r10d, %edx
movl %eax, %ecx
callq 0x2b0c1b
movq $0x0, 0x20(%rsp)
jmp 0x2afba1
movq $0x0, 0x20(%rsp)
cmpq %rcx, %r15
je 0x2afba1
movq 0x8(%rcx), %rax
testq %rax, %rax
je 0x2afc75
lock
incl (%rax)
movq 0x8(%r15), %rax
testq %rax, %rax
je 0x2afca4
lock
decl (%rax)
jne 0x2afca4
movq (%r15), %rsi
movq 0x20(%r15), %rdi
testq %rdi, %rdi
je 0x2afc97
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2afc9f
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rcx
andq $0x0, 0x40(%r15)
xorps %xmm0, %xmm0
movups %xmm0, (%r15)
movups %xmm0, 0xc(%r15)
andl $0x0, 0x38(%r15)
movups %xmm0, 0x28(%r15)
movups (%rcx), %xmm0
movups %xmm0, (%r15)
movq 0x10(%rcx), %rax
movq %rax, 0x10(%r15)
movl 0x18(%rcx), %eax
movl %eax, 0x18(%r15)
movq 0x20(%rcx), %rax
movq %rax, 0x20(%r15)
movups 0x28(%rcx), %xmm0
movups %xmm0, 0x28(%r15)
movl 0x38(%rcx), %eax
movl %eax, 0x38(%r15)
movq 0x40(%rcx), %rax
movq %rax, 0x40(%r15)
jmp 0x2afba1
jmp 0x2afd3b
jmp 0x2afd3b
jmp 0x2afd3b
jmp 0x2afd02
movq %rax, %rbx
movq 0x30(%rsp), %rax
testq %rax, %rax
je 0x2afd33
lock
decl (%rax)
jne 0x2afd33
movq 0x28(%rsp), %rsi
movq 0x48(%rsp), %rdi
testq %rdi, %rdi
jne 0x2afd2d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2afd33
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/x86/padding_x86.cpp |
ncnn::Padding_x86::forward_int8(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int Padding_x86::forward_int8(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
#if __SSE2__
if (elempack == 8)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
int out_elempack = outw % 8 == 0 ? 8 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int64_t v8 = (int64_t)value;
int64_t pad_value = v8 | (v8 << 8) | (v8 << 16) | (v8 << 24) | (v8 << 32) | (v8 << 40) | (v8 << 48) | (v8 << 56);
padding_constant_pack8_int8_sse(bottom_blob, top_blob, 0, 0, left / 8, right / 8, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
int out_elempack = outh % 8 == 0 ? 8 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int64_t v8 = (int64_t)value;
int64_t pad_value = v8 | (v8 << 8) | (v8 << 16) | (v8 << 24) | (v8 << 32) | (v8 << 40) | (v8 << 48) | (v8 << 56);
padding_constant_pack8_int8_sse(bottom_blob, top_blob, top / 8, bottom / 8, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
int out_elempack = outc % 8 == 0 ? 8 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 8 == 0 && out_elempack == 8 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
// TODO perchannel
// int64_t pad_value = per_channel_pad_data_size ? vld1_s8(per_channel_pad_data + q * 8) : vdup_n_s8((signed char)value);
int64_t v8 = (int64_t)value;
int64_t pad_value = v8 | (v8 << 8) | (v8 << 16) | (v8 << 24) | (v8 << 32) | (v8 << 40) | (v8 << 48) | (v8 << 56);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill<int64_t>(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack8_int8_sse(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack8_int8_sse(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack8_int8_sse(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
// TODO perchannel
// int64_t pad_value = per_channel_pad_data_size ? vld1_s8(per_channel_pad_data + q * 8) : vdup_n_s8((signed char)value);
int64_t v8 = (int64_t)value;
int64_t pad_value = v8 | (v8 << 8) | (v8 << 16) | (v8 << 24) | (v8 << 32) | (v8 << 40) | (v8 << 48) | (v8 << 56);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill<int64_t>(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack8_int8_sse(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
return Padding::forward(bottom_blob_unpacked, top_blob, opt);
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x118, %rsp # imm = 0x118
movq %rcx, %r12
movq %rdx, %rbx
movq %rsi, %rax
movq %rdi, %r13
movl 0x2c(%rsi), %esi
movl 0x30(%rax), %edx
movl 0x34(%rax), %ecx
movl %ecx, 0x18(%rsp)
movl 0x38(%rax), %ebp
movq 0x10(%rax), %r9
movl 0x18(%rax), %edi
movq %rax, 0x28(%rsp)
movl 0x28(%rax), %r11d
cmpl $0x8, %edi
jne 0x2b09db
leal -0x1(%r11), %eax
cmpl $0x3, %eax
ja 0x2b09db
leaq 0x1489b2(%rip), %rcx # 0x3f8750
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
pushq $-0x64
popq %rcx
movq %rcx, 0x20(%rsp)
movq %r13, 0x10(%rsp)
jmpq *%rax
movq (%r13), %rax
movq -0x18(%rax), %r8
movl 0xd8(%r13,%r8), %r14d
leal (%r14,%rsi,8), %eax
addl 0xdc(%r13,%r8), %eax
xorl %r15d, %r15d
testb $0x7, %al
sete %r15b
movq %r9, %r10
shrq $0x3, %r10
leal (%r15,%r15,2), %ecx
shlq %cl, %r10
testb $0x7, %r14b
sete %cl
andb %cl, %r15b
cmpb $0x1, %r15b
jne 0x2b09db
cmpl $0x0, 0xe0(%r13,%r8)
jne 0x2b09db
sarl $0x3, %eax
movq 0x8(%r12), %r8
pushq $0x8
popq %rbp
movq %rbx, %rdi
movl %eax, %esi
movq %r10, %rdx
movl %ebp, %ecx
callq 0x626da
cmpq $0x0, (%rbx)
je 0x2b0ad5
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2b0ad5
movq 0x10(%rsp), %r8
movq (%r8), %rax
movq -0x18(%rax), %rcx
cvttss2si 0xe4(%r8,%rcx), %rax
movq %rax, %rdx
shlq $0x8, %rdx
movq %rax, %rsi
shlq $0x10, %rsi
orq %rdx, %rsi
movq %rax, %rdx
shlq $0x18, %rdx
movq %rax, %rdi
shlq $0x20, %rdi
orq %rdx, %rdi
orq %rsi, %rdi
movq %rax, %rdx
shlq $0x28, %rdx
movq %rax, %rsi
shlq $0x30, %rsi
orq %rdx, %rsi
movq %rax, %r9
shlq $0x38, %r9
orq %rsi, %r9
orq %rdi, %r9
orq %rax, %r9
movl 0xd8(%r8,%rcx), %eax
movl 0xdc(%r8,%rcx), %ecx
cltd
idivl %ebp
movl %eax, %r8d
movl %ecx, %eax
cltd
idivl %ebp
movq %r9, (%rsp)
movq $0x0, 0x20(%rsp)
movq 0x28(%rsp), %rdi
movq %rbx, %rsi
xorl %edx, %edx
xorl %ecx, %ecx
movl %eax, %r9d
callq 0x2b0cbe
jmp 0x2b0ad5
movq (%r13), %rax
movq -0x18(%rax), %r10
movl 0xe8(%r13,%r10), %eax
leal (%rax,%rbp,8), %r15d
addl 0xec(%r13,%r10), %r15d
xorl %r14d, %r14d
testb $0x7, %r15b
sete %r14b
movq %r9, %r8
shrq $0x3, %r8
leal (%r14,%r14,2), %ecx
shlq %cl, %r8
testb $0x7, %al
sete %al
andb %al, %r14b
cmpb $0x1, %r14b
jne 0x2b09db
movl 0xd8(%r13,%r10), %eax
addl %esi, %eax
addl 0xdc(%r13,%r10), %eax
movl 0xd0(%r13,%r10), %ecx
addl %edx, %ecx
addl 0xd4(%r13,%r10), %ecx
movq %rbp, 0x30(%rsp)
movq 0x30(%rsp), %r14
leal (,%r14,8), %ebp
cmpl %ebp, %r15d
movq 0x30(%rsp), %rbp
je 0x2aff73
cmpl $0x0, 0xe0(%r13,%r10)
jne 0x2b09db
sarl $0x3, %r15d
movq 0x8(%r12), %rdx
movq %rdx, (%rsp)
pushq $0x8
popq %r9
movq %rbx, %rdi
movl %eax, %esi
movl %ecx, %edx
movl %r15d, %ecx
callq 0x628f2
cmpq $0x0, (%rbx)
je 0x2b0ad5
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2b0ad5
movq %r15, %r13
movq 0x10(%rsp), %r12
movq (%r12), %rax
movq %rax, 0x98(%rsp)
movq -0x18(%rax), %rax
movl 0xe8(%r12,%rax), %eax
pushq $-0x8
popq %rcx
cltd
idivl %ecx
movl %eax, %edx
xorl %ecx, %ecx
testl %r13d, %r13d
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %r13d
movl %edx, 0xa4(%rsp)
movl %edx, %r10d
xorl %ecx, %ecx
movq %r13, 0x110(%rsp)
cmpq %r13, %rcx
je 0x2b0ad5
movslq 0x2c(%rbx), %rax
movslq 0x30(%rbx), %r9
movl 0x34(%rbx), %esi
movq (%rbx), %rdi
movq 0x10(%rbx), %r11
movq 0x40(%rbx), %r8
movq %r8, %rbp
movq %rcx, 0x48(%rsp)
imulq %rcx, %rbp
imulq %r11, %rbp
addq %rdi, %rbp
movl 0x18(%rbx), %ecx
movq 0x20(%rbx), %rdx
movq %rbp, 0x50(%rsp)
andq $0x0, 0x58(%rsp)
movq %r11, 0x60(%rsp)
movl %ecx, 0x68(%rsp)
movq %rdx, 0x70(%rsp)
movl %eax, 0x7c(%rsp)
movl %r9d, 0x80(%rsp)
movl $0x1, 0x84(%rsp)
movl %esi, 0x88(%rsp)
imulq %rax, %r9
movq %r11, %rax
imulq %r9, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r11
movq %rax, 0x90(%rsp)
movl 0x28(%rbx), %ecx
leal -0x1(%rcx), %edx
movl %edx, 0x78(%rsp)
cmpl $0x4, %ecx
jne 0x2b00a8
movq %r9, 0x90(%rsp)
movq %r9, %rax
movq 0x98(%rsp), %rcx
movq -0x18(%rcx), %r9
cvttss2si 0xe4(%r12,%r9), %rcx
movl %r10d, %r10d
movq %r10, 0x18(%rsp)
movq %rcx, %rdx
shlq $0x8, %rdx
movq %rcx, %r10
shlq $0x10, %r10
orq %rdx, %r10
movq %rcx, %rdx
shlq $0x18, %rdx
movq %rcx, %r14
shlq $0x20, %r14
orq %rdx, %r14
orq %r10, %r14
movq %rcx, %rdx
shlq $0x28, %rdx
movq %rcx, %r15
shlq $0x30, %r15
orq %rdx, %r15
movq %rcx, %r10
shlq $0x38, %r10
orq %r15, %r10
orq %r14, %r10
orq %rcx, %r10
movl 0xa4(%rsp), %ecx
movq 0x48(%rsp), %r15
addl %r15d, %ecx
setns %dl
cmpl 0x30(%rsp), %ecx
setl %r14b
testb %r14b, %dl
je 0x2b031e
movq 0x28(%rsp), %rsi
movslq 0x2c(%rsi), %rdi
movslq 0x30(%rsi), %r8
movl 0x34(%rsi), %eax
movq (%rsi), %r13
movq 0x10(%rsi), %r11
movq 0x40(%rsi), %rdx
movl %ecx, %r12d
movq %rdx, 0x38(%rsp)
imulq %rdx, %r12
imulq %r11, %r12
addq %r13, %r12
movl 0x18(%rsi), %ecx
movq 0x20(%rsi), %rdx
movq %r12, 0xc0(%rsp)
andq $0x0, 0xc8(%rsp)
movq %r11, 0xd0(%rsp)
movl %ecx, 0xd8(%rsp)
movq %rdx, 0xe0(%rsp)
movl %edi, 0xec(%rsp)
movl %r8d, 0xf0(%rsp)
movl $0x1, 0xf4(%rsp)
movl %eax, 0xf8(%rsp)
movq %r8, 0xb0(%rsp)
movq %r8, %rcx
movq %rdi, 0x40(%rsp)
imulq %rdi, %rcx
movq %r11, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
movq %r11, 0xa8(%rsp)
divq %r11
movq %rax, 0x100(%rsp)
movl 0x28(%rsi), %eax
leal -0x1(%rax), %edx
movl %edx, 0xe8(%rsp)
cmpl $0x4, %eax
jne 0x2b01ff
movq %rcx, 0x100(%rsp)
movq 0x10(%rsp), %r14
movl 0xe0(%r14,%r9), %eax
testl %eax, %eax
jne 0x2b025d
movl 0xd0(%r14,%r9), %edx
movl 0xd4(%r14,%r9), %ecx
movl 0xd8(%r14,%r9), %r8d
movl 0xdc(%r14,%r9), %r9d
movq %r10, (%rsp)
leaq 0xc0(%rsp), %rdi
leaq 0x50(%rsp), %rsi
callq 0x2b0cbe
movq (%r14), %rax
movq %rax, 0x98(%rsp)
movq -0x18(%rax), %r9
movl 0xe0(%r14,%r9), %eax
cmpl $0x1, %eax
jne 0x2b0463
movq 0x10(%rsp), %rax
movl 0xd0(%rax,%r9), %r8d
movl 0xd4(%rax,%r9), %ecx
movl %ecx, 0xa0(%rsp)
movl 0xd8(%rax,%r9), %edx
xorl %r10d, %r10d
testl %edx, %edx
cmovlel %r10d, %edx
movq 0x40(%rsp), %rcx
testl %ecx, %ecx
movl $0x0, %r11d
cmovgl %ecx, %r11d
movl 0xdc(%rax,%r9), %esi
testl %esi, %esi
cmovlel %r10d, %esi
testl %r8d, %r8d
cmovlel %r10d, %r8d
movq 0x38(%rsp), %r9
imulq 0xa8(%rsp), %r9
imulq 0x18(%rsp), %r9
addq %r13, %r9
movq %rbp, %rdi
cmpl %r8d, %r10d
je 0x2b034d
movl %edx, %ecx
subl $0x1, %ecx
jb 0x2b02ec
movq (%r12), %r14
movq %r14, (%rdi)
addq $0x8, %rdi
jmp 0x2b02da
xorl %r14d, %r14d
cmpl %r14d, %r11d
je 0x2b0304
movq (%r9,%r14,8), %rcx
movq %rcx, (%rdi)
addq $0x8, %rdi
incq %r14
jmp 0x2b02ef
movl %esi, %ecx
subl $0x1, %ecx
jb 0x2b0319
movq -0x8(%r9,%r14,8), %r15
movq %r15, (%rdi)
addq $0x8, %rdi
jmp 0x2b0306
incl %r10d
jmp 0x2b02d3
imull %eax, %esi
testl %esi, %esi
movl $0x0, %eax
cmovlel %eax, %esi
imulq %r11, %r8
imulq %r15, %r8
addq %r8, %rdi
xorl %eax, %eax
movq %r15, %rcx
cmpq %rax, %rsi
je 0x2b067e
movq %r10, (%rdi,%rax,8)
incq %rax
jmp 0x2b033b
movslq 0xec(%rsp), %rax
xorl %r11d, %r11d
testl %eax, %eax
movl $0x0, %r8d
movq %rax, 0x108(%rsp)
cmovgl %eax, %r8d
movq 0xb0(%rsp), %rax
testl %eax, %eax
movl $0x0, %r14d
cmovgl %eax, %r14d
movq %r12, %r9
cmpl %r14d, %r11d
je 0x2b03df
movl %edx, %ecx
subl $0x1, %ecx
jb 0x2b039b
movq (%r9), %r10
movq %r10, (%rdi)
addq $0x8, %rdi
jmp 0x2b038a
xorl %r15d, %r15d
xorl %r10d, %r10d
cmpl %r15d, %r8d
je 0x2b03b7
movq (%r9,%r15,8), %rcx
movq %rcx, (%rdi,%r15,8)
addq $-0x8, %r10
incq %r15
jmp 0x2b03a1
movq %r9, %rcx
subq %r10, %rcx
subq %r10, %rdi
movl %esi, %r10d
subl $0x1, %r10d
jb 0x2b03d7
movq -0x8(%r9,%r15,8), %rax
movq %rax, (%rdi)
addq $0x8, %rdi
jmp 0x2b03c3
incl %r11d
movq %rcx, %r9
jmp 0x2b0383
movq 0x108(%rsp), %rax
shlq $0x3, %rax
subq %rax, %r9
xorl %r10d, %r10d
movl 0xa0(%rsp), %r14d
testl %r14d, %r14d
cmovlel %r10d, %r14d
cmpl %r14d, %r10d
je 0x2b044a
movl %edx, %ecx
subl $0x1, %ecx
jb 0x2b0418
movq (%r9), %rax
movq %rax, (%rdi)
addq $0x8, %rdi
jmp 0x2b0407
xorl %r11d, %r11d
cmpl %r11d, %r8d
je 0x2b0430
movq (%r9,%r11,8), %rax
movq %rax, (%rdi)
addq $0x8, %rdi
incq %r11
jmp 0x2b041b
movl %esi, %ecx
subl $0x1, %ecx
jb 0x2b0445
movq -0x8(%r9,%r11,8), %rax
movq %rax, (%rdi)
addq $0x8, %rdi
jmp 0x2b0432
incl %r10d
jmp 0x2b0400
movq 0x98(%rsp), %rax
movq -0x18(%rax), %r9
movq 0x10(%rsp), %rax
movl 0xe0(%rax,%r9), %eax
cmpl $0x2, %eax
jne 0x2b066c
movq 0x10(%rsp), %rcx
movl 0xd0(%rcx,%r9), %r14d
movslq 0xd8(%rcx,%r9), %rax
movq %rcx, %r8
movl %r14d, %r11d
movq 0x40(%rsp), %rcx
imull %ecx, %r11d
movq %rcx, %rdx
negq %rdx
xorl %r15d, %r15d
testl %eax, %eax
movl $0x0, %esi
cmovgl %eax, %esi
testl %ecx, %ecx
movl $0x0, %edi
cmovgl %ecx, %edi
movl 0xdc(%r8,%r9), %r8d
testl %r8d, %r8d
cmovlel %r15d, %r8d
testl %r14d, %r14d
cmovlel %r15d, %r14d
movq 0x38(%rsp), %r10
imulq 0xa8(%rsp), %r10
movslq %r11d, %rcx
leaq (%r12,%rcx,8), %r11
imulq 0x18(%rsp), %r10
leaq (%r10,%rax,8), %r10
leaq (%r10,%rcx,8), %r10
movq 0x10(%rsp), %r12
movl 0xd4(%r12,%r9), %r9d
movl %r9d, 0x38(%rsp)
shlq $0x3, %rax
addq %r10, %r13
movq 0x40(%rsp), %rcx
leaq (,%rcx,8), %r10
negq %r10
negq %rsi
negq %r8
cmpl %r14d, %r15d
je 0x2b0575
xorl %ecx, %ecx
cmpq %rcx, %rsi
je 0x2b0533
movq (%r13,%rcx,8), %r12
movq %r12, (%rbp)
addq $0x8, %rbp
decq %rcx
jmp 0x2b051c
xorl %ecx, %ecx
movq %r11, %r12
cmpl %edi, %ecx
je 0x2b0550
movq (%r12), %r9
addq $0x8, %r12
movq %r9, (%rbp)
addq $0x8, %rbp
incl %ecx
jmp 0x2b0538
xorl %ecx, %ecx
cmpq %rcx, %r8
je 0x2b0569
movq -0x10(%r12,%rcx,8), %r9
movq %r9, (%rbp)
addq $0x8, %rbp
decq %rcx
jmp 0x2b0552
leaq (%r11,%rdx,8), %r11
incl %r15d
addq %r10, %r13
jmp 0x2b0515
xorl %r14d, %r14d
movq 0xb0(%rsp), %rcx
testl %ecx, %ecx
movl $0x0, %r15d
cmovgl %ecx, %r15d
movl 0x38(%rsp), %r13d
cmpl %r15d, %r14d
je 0x2b05ea
leaq (%r11,%rax), %rcx
xorl %r12d, %r12d
cmpq %r12, %rsi
je 0x2b05b3
movq (%rcx,%r12,8), %r9
movq %r9, (%rbp)
addq $0x8, %rbp
decq %r12
jmp 0x2b059d
xorl %ecx, %ecx
cmpl %edi, %ecx
je 0x2b05cc
movq (%r11), %r9
addq $0x8, %r11
movq %r9, (%rbp)
addq $0x8, %rbp
incl %ecx
jmp 0x2b05b5
xorl %ecx, %ecx
cmpq %rcx, %r8
je 0x2b05e5
movq -0x10(%r11,%rcx,8), %r9
movq %r9, (%rbp)
addq $0x8, %rbp
decq %rcx
jmp 0x2b05ce
incl %r14d
jmp 0x2b0591
movq 0x40(%rsp), %rcx
addl %ecx, %ecx
movslq %ecx, %rcx
shlq $0x3, %rcx
movq %r11, %r14
subq %rcx, %r14
xorl %r15d, %r15d
testl %r13d, %r13d
cmovlel %r15d, %r13d
subq %rcx, %rax
addq %r11, %rax
cmpl %r13d, %r15d
je 0x2b066c
xorl %ecx, %ecx
cmpq %rcx, %rsi
je 0x2b062b
movq (%rax,%rcx,8), %r9
movq %r9, (%rbp)
addq $0x8, %rbp
decq %rcx
jmp 0x2b0615
xorl %ecx, %ecx
movq %r14, %r11
cmpl %edi, %ecx
je 0x2b0647
movq (%r11), %r9
addq $0x8, %r11
movq %r9, (%rbp)
addq $0x8, %rbp
incl %ecx
jmp 0x2b0630
xorl %ecx, %ecx
cmpq %rcx, %r8
je 0x2b0660
movq -0x10(%r11,%rcx,8), %r9
movq %r9, (%rbp)
addq $0x8, %rbp
decq %rcx
jmp 0x2b0649
leaq (%r14,%rdx,8), %r14
incl %r15d
addq %r10, %rax
jmp 0x2b060e
movq 0x10(%rsp), %r12
movq 0x110(%rsp), %r13
movq 0x48(%rsp), %rcx
incq %rcx
movq 0x18(%rsp), %r10
incl %r10d
jmp 0x2afffe
movq (%r13), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xe0(%r13,%rax)
jne 0x2b09db
movl 0xe8(%r13,%rax), %ecx
addl 0x18(%rsp), %ecx
addl 0xec(%r13,%rax), %ecx
addl 0xd0(%r13,%rax), %edx
addl 0xd4(%r13,%rax), %edx
addl 0xd8(%r13,%rax), %esi
addl 0xdc(%r13,%rax), %esi
movq 0x8(%r12), %rax
movq %rax, 0x8(%rsp)
movl $0x8, (%rsp)
movq %rbx, %rdi
movq %rcx, %r14
movl %ebp, %r8d
callq 0x62a26
cmpq $0x0, (%rbx)
je 0x2b0ad5
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2b0ad5
xorl %ecx, %ecx
testl %r14d, %r14d
cmovlel %ecx, %r14d
movq %r14, 0x48(%rsp)
testl %ebp, %ebp
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %ebp
movq %rbp, 0x30(%rsp)
movabsq $0x100000001, %r12 # imm = 0x100000001
xorl %ebp, %ebp
movq %rbx, 0xb8(%rsp)
movq 0x10(%rsp), %r13
cmpq 0x30(%rsp), %rbp
je 0x2b0ad5
movq 0x10(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
cvttss2si 0xe4(%rcx,%rax), %rax
movq %rax, %rcx
shlq $0x8, %rcx
movq %rax, %rdx
shlq $0x10, %rdx
orq %rcx, %rdx
movq %rax, %rcx
shlq $0x18, %rcx
movq %rax, %rsi
shlq $0x20, %rsi
orq %rcx, %rsi
orq %rdx, %rsi
movq %rax, %rcx
shlq $0x28, %rcx
movq %rax, %rdx
shlq $0x30, %rdx
orq %rcx, %rdx
movq %rax, %r14
shlq $0x38, %r14
orq %rdx, %r14
orq %rsi, %r14
orq %rax, %r14
xorl %r15d, %r15d
cmpq 0x48(%rsp), %r15
je 0x2b0981
movslq 0x2c(%rbx), %rdi
movslq 0x30(%rbx), %rdx
movq (%rbx), %rax
movq 0x10(%rbx), %rsi
movq 0x40(%rbx), %r8
imulq %rbp, %r8
movq %r8, %r9
imulq %rsi, %r9
addq %rax, %r9
movl 0x18(%rbx), %r10d
movq %rdx, %rcx
imulq %rdi, %rcx
movq %r15, %r11
imulq %rsi, %r11
imulq %rcx, %r11
addq %r9, %r11
movq 0x20(%rbx), %r9
movq %r11, 0x50(%rsp)
andq $0x0, 0x58(%rsp)
movq %rsi, 0x60(%rsp)
movl %r10d, 0x68(%rsp)
movq %r9, 0x70(%rsp)
movl $0x2, 0x78(%rsp)
movl %edi, 0x7c(%rsp)
movl %edx, 0x80(%rsp)
movq %r12, 0x84(%rsp)
movq %rcx, 0x90(%rsp)
movq (%r13), %r9
movq -0x18(%r9), %r9
movl 0xe8(%r13,%r9), %r11d
movl %r15d, %r10d
subl %r11d, %r10d
setns %r11b
cmpl 0x18(%rsp), %r10d
movq %r12, %rbx
setl %r12b
testb %r12b, %r11b
je 0x2b0948
movq 0x28(%rsp), %r8
movslq 0x2c(%r8), %rax
movslq 0x30(%r8), %rcx
movq 0x40(%r8), %rdx
imulq %rbp, %rdx
movq 0x10(%r8), %rsi
imulq %rsi, %rdx
addq (%r8), %rdx
movl 0x18(%r8), %edi
movq 0x20(%r8), %r8
movq %rcx, %r11
imulq %rax, %r11
movl %r10d, %r10d
movq %r11, %r12
imulq %rsi, %r12
imulq %r10, %r12
addq %rdx, %r12
movq %r12, 0xc0(%rsp)
andq $0x0, 0xc8(%rsp)
movq %rsi, 0xd0(%rsp)
movl %edi, 0xd8(%rsp)
movq %r8, 0xe0(%rsp)
movl $0x2, 0xe8(%rsp)
movl %eax, 0xec(%rsp)
movl %ecx, 0xf0(%rsp)
movq %rbx, 0xf4(%rsp)
movq %r11, 0x100(%rsp)
movl 0xd0(%r13,%r9), %edx
movl 0xd4(%r13,%r9), %ecx
movl 0xd8(%r13,%r9), %r8d
movl 0xdc(%r13,%r9), %r9d
movq %r14, (%rsp)
leaq 0xc0(%rsp), %rdi
leaq 0x50(%rsp), %rsi
callq 0x2b0cbe
movq %rbx, %r12
movq 0xb8(%rsp), %rbx
incq %r15
jmp 0x2b07b9
testl %ecx, %ecx
movl $0x0, %r9d
cmovlel %r9d, %ecx
imulq %r15, %rdx
imulq %rdi, %rdx
addq %r8, %rdx
imulq %rdx, %rsi
addq %rsi, %rax
xorl %edx, %edx
movq %rbx, %r12
movq 0xb8(%rsp), %rbx
cmpq %rdx, %rcx
je 0x2b0940
movq %r14, (%rax,%rdx,8)
incq %rdx
jmp 0x2b0973
incq %rbp
jmp 0x2b074f
movq (%r13), %rax
movq -0x18(%rax), %r10
movl 0xd0(%r13,%r10), %r14d
leal (%r14,%rdx,8), %eax
addl 0xd4(%r13,%r10), %eax
xorl %r15d, %r15d
testb $0x7, %al
sete %r15b
movq %r9, %r8
shrq $0x3, %r8
leal (%r15,%r15,2), %ecx
shlq %cl, %r8
testb $0x7, %r14b
sete %cl
andb %cl, %r15b
cmpb $0x1, %r15b
jne 0x2b09db
cmpl $0x0, 0xe0(%r13,%r10)
je 0x2b0aec
movq 0x28(%rsp), %r8
movq (%r8), %rax
movq %rax, 0x50(%rsp)
movq 0x8(%r8), %rax
movq %rax, 0x58(%rsp)
movq %r9, 0x60(%rsp)
movl %edi, 0x68(%rsp)
movq 0x20(%r8), %rcx
movq %rcx, 0x70(%rsp)
movl %r11d, 0x78(%rsp)
movl %esi, 0x7c(%rsp)
movl %edx, 0x80(%rsp)
movl 0x18(%rsp), %ecx
movl %ecx, 0x84(%rsp)
movl %ebp, 0x88(%rsp)
movq 0x40(%r8), %rcx
movq %rcx, 0x90(%rsp)
testq %rax, %rax
je 0x2b0a39
lock
incl (%rax)
cmpl $0x1, %edi
je 0x2b0a87
movups (%r12), %xmm0
movups 0x10(%r12), %xmm1
movups 0x20(%r12), %xmm2
movups 0x30(%r12), %xmm3
leaq 0xc0(%rsp), %rcx
movaps %xmm3, 0x30(%rcx)
movaps %xmm2, 0x20(%rcx)
movaps %xmm1, 0x10(%rcx)
movaps %xmm0, (%rcx)
movq 0x10(%r12), %rax
movq %rax, 0x8(%rcx)
leaq 0x50(%rsp), %rsi
pushq $0x1
popq %rdx
movq 0x28(%rsp), %rdi
callq 0x64e3b
movq (%r13), %rax
addq -0x18(%rax), %r13
leaq 0x50(%rsp), %rsi
movq %r13, %rdi
movq %rbx, %rdx
movq %r12, %rcx
callq 0x2acc2e
movq %rax, 0x20(%rsp)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2b0ad5
lock
decl (%rax)
jne 0x2b0ad5
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x2b0acd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2b0ad5
movq %rsi, %rdi
callq 0x5f3e0
movq 0x20(%rsp), %rax
addq $0x118, %rsp # imm = 0x118
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
addl 0xd8(%r13,%r10), %esi
addl 0xdc(%r13,%r10), %esi
sarl $0x3, %eax
movq 0x8(%r12), %r9
pushq $0x8
popq %rbp
movq %rbx, %rdi
movl %eax, %edx
movq %r8, %rcx
movl %ebp, %r8d
callq 0x627de
cmpq $0x0, (%rbx)
je 0x2b0ad5
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2b0ad5
movq 0x10(%rsp), %r9
movq (%r9), %rax
movq -0x18(%rax), %rsi
cvttss2si 0xe4(%r9,%rsi), %rax
movq %rax, %rcx
shlq $0x8, %rcx
movq %rax, %rdx
shlq $0x10, %rdx
orq %rcx, %rdx
movq %rax, %rcx
shlq $0x18, %rcx
movq %rax, %rdi
shlq $0x20, %rdi
orq %rcx, %rdi
orq %rdx, %rdi
movq %rax, %rcx
shlq $0x28, %rcx
movq %rax, %rdx
shlq $0x30, %rdx
orq %rcx, %rdx
movq %rax, %r11
shlq $0x38, %r11
orq %rdx, %r11
orq %rdi, %r11
orq %rax, %r11
movl 0xd0(%r9,%rsi), %eax
movl 0xd4(%r9,%rsi), %ecx
cltd
idivl %ebp
movl %eax, %r10d
movl %ecx, %eax
cltd
idivl %ebp
movl 0xd8(%r9,%rsi), %r8d
movl 0xdc(%r9,%rsi), %r9d
movq %r11, (%rsp)
movq 0x28(%rsp), %rdi
movq %rbx, %rsi
movl %r10d, %edx
movl %eax, %ecx
callq 0x2b0cbe
movq $0x0, 0x20(%rsp)
jmp 0x2b0ad5
jmp 0x2b0c13
jmp 0x2b0bda
movq %rax, %rbx
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2b0c0b
lock
decl (%rax)
jne 0x2b0c0b
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
jne 0x2b0c05
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2b0c0b
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/x86/padding_x86.cpp |
virtual thunk to ncnn::Padding_x86::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int Padding_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
if (top == 0 && bottom == 0 && left == 0 && right == 0 && front == 0 && behind == 0)
{
top_blob = bottom_blob;
return 0;
}
int elembits = bottom_blob.elembits();
if (elembits == 8)
return forward_int8(bottom_blob, top_blob, opt);
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
int out_elempack = outw % 16 == 0 ? 16 : outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 16 == 0 && out_elempack == 16 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m512 pad_value = _mm512_set1_ps(value);
padding_constant_pack16_avx512(bottom_blob, top_blob, 0, 0, left / 16, right / 16, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
int out_elempack = outh % 16 == 0 ? 16 : outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 16 == 0 && out_elempack == 16 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m512 pad_value = _mm512_set1_ps(value);
padding_constant_pack16_avx512(bottom_blob, top_blob, top / 16, bottom / 16, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
int out_elempack = outc % 16 == 0 ? 16 : outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 16 == 0 && out_elempack == 16 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m512 pad_value = per_channel_pad_data_size ? _mm512_loadu_ps((const float*)per_channel_pad_data + q * 16) : _mm512_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack16_avx512(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack16_avx512(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack16_avx512(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m512 pad_value = per_channel_pad_data_size ? _mm512_loadu_ps((const float*)per_channel_pad_data + q * 16) : _mm512_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack16_avx512(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
int out_elempack = outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m256 pad_value = _mm256_set1_ps(value);
padding_constant_pack8_avx(bottom_blob, top_blob, 0, 0, left / 8, right / 8, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
int out_elempack = outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m256 pad_value = _mm256_set1_ps(value);
padding_constant_pack8_avx(bottom_blob, top_blob, top / 8, bottom / 8, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
int out_elempack = outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 8 == 0 && out_elempack == 8 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m256 pad_value = per_channel_pad_data_size ? _mm256_loadu_ps((const float*)per_channel_pad_data + q * 8) : _mm256_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack8_avx(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack8_avx(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack8_avx(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m256 pad_value = per_channel_pad_data_size ? _mm256_loadu_ps((const float*)per_channel_pad_data + q * 8) : _mm256_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack8_avx(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __AVX__
if (elempack == 4)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
#if __AVX__
int out_elempack = outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
#else
int out_elempack = outw % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 4 == 0 && out_elempack == 4 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m128 pad_value = _mm_set1_ps(value);
padding_constant_pack4_sse(bottom_blob, top_blob, 0, 0, left / 4, right / 4, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
#if __AVX__
int out_elempack = outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
#else
int out_elempack = outh % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 4 == 0 && out_elempack == 4 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m128 pad_value = _mm_set1_ps(value);
padding_constant_pack4_sse(bottom_blob, top_blob, top / 4, bottom / 4, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
#if __AVX__
int out_elempack = outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
#else
int out_elempack = outc % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 4 == 0 && out_elempack == 4 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m128 pad_value = per_channel_pad_data_size ? _mm_loadu_ps((const float*)per_channel_pad_data + q * 4) : _mm_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack4_sse(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack4_sse(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack4_sse(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m128 pad_value = per_channel_pad_data_size ? _mm_loadu_ps((const float*)per_channel_pad_data + q * 4) : _mm_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack4_sse(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
return Padding::forward(bottom_blob_unpacked, top_blob, opt);
} | movq (%rdi), %rax
addq -0x48(%rax), %rdi
jmp 0x2aee70
| /csukuangfj[P]ncnn/src/layer/x86/padding_x86.cpp |
ncnn::padding_constant_pack8_int8_sse(ncnn::Mat const&, ncnn::Mat&, int, int, int, int, long) | static void padding_constant_pack8_int8_sse(const Mat& src, Mat& dst, int top, int bottom, int left, int right, int64_t _v)
{
const int64_t* ptr = src;
int64_t* outptr = dst;
// fill top
for (int y = 0; y < top; y++)
{
for (int x = 0; x < dst.w; x++)
{
*outptr++ = _v;
}
}
// fill center
for (int y = 0; y < src.h; y++)
{
for (int x = 0; x < left; x++)
{
*outptr++ = _v;
}
for (int x = 0; x < src.w; x++)
{
*outptr++ = *ptr++;
}
for (int x = 0; x < right; x++)
{
*outptr++ = _v;
}
}
// fill bottom
for (int y = 0; y < bottom; y++)
{
for (int x = 0; x < dst.w; x++)
{
*outptr++ = _v;
}
}
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movq 0x30(%rsp), %rax
movq (%rdi), %r11
movl 0x2c(%rsi), %ebx
xorl %ebp, %ebp
testl %ebx, %ebx
cmovlel %ebp, %ebx
movq (%rsi), %r10
testl %edx, %edx
cmovlel %ebp, %edx
cmpl %edx, %ebp
je 0x2b0cfa
movl %ebx, %r14d
subl $0x1, %r14d
jb 0x2b0cf6
movq %rax, (%r10)
addq $0x8, %r10
jmp 0x2b0ce7
incl %ebp
jmp 0x2b0ce0
xorl %edx, %edx
testl %r8d, %r8d
cmovlel %edx, %r8d
movl 0x2c(%rdi), %ebx
testl %ebx, %ebx
cmovlel %edx, %ebx
testl %r9d, %r9d
cmovlel %edx, %r9d
movl 0x30(%rdi), %edi
testl %edi, %edi
cmovlel %edx, %edi
cmpl %edi, %edx
je 0x2b0d66
movl %r8d, %ebp
subl $0x1, %ebp
jb 0x2b0d2f
movq %rax, (%r10)
addq $0x8, %r10
jmp 0x2b0d21
xorl %r15d, %r15d
xorl %r14d, %r14d
cmpl %r15d, %ebx
je 0x2b0d4b
movq (%r11,%r15,8), %r12
movq %r12, (%r10,%r15,8)
addq $-0x8, %r14
incq %r15
jmp 0x2b0d35
subq %r14, %r11
subq %r14, %r10
movl %r9d, %ebp
subl $0x1, %ebp
jb 0x2b0d62
movq %rax, (%r10)
addq $0x8, %r10
jmp 0x2b0d54
incl %edx
jmp 0x2b0d1a
movl 0x2c(%rsi), %edx
xorl %esi, %esi
testl %edx, %edx
cmovlel %esi, %edx
testl %ecx, %ecx
cmovlel %esi, %ecx
movl %edx, %edi
cmpl %ecx, %esi
je 0x2b0d8d
subl $0x1, %edi
jb 0x2b0d89
movq %rax, (%r10)
addq $0x8, %r10
jmp 0x2b0d7b
incl %esi
jmp 0x2b0d75
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
nop
| /csukuangfj[P]ncnn/src/layer/x86/padding_pack8_int8.h |
ncnn::Padding_x86_avx512::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int Padding_x86_avx512::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
if (top == 0 && bottom == 0 && left == 0 && right == 0 && front == 0 && behind == 0)
{
top_blob = bottom_blob;
return 0;
}
int elembits = bottom_blob.elembits();
if (elembits == 8)
return forward_int8(bottom_blob, top_blob, opt);
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
int out_elempack = outw % 16 == 0 ? 16 : outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 16 == 0 && out_elempack == 16 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m512 pad_value = _mm512_set1_ps(value);
padding_constant_pack16_avx512(bottom_blob, top_blob, 0, 0, left / 16, right / 16, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
int out_elempack = outh % 16 == 0 ? 16 : outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 16 == 0 && out_elempack == 16 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m512 pad_value = _mm512_set1_ps(value);
padding_constant_pack16_avx512(bottom_blob, top_blob, top / 16, bottom / 16, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
int out_elempack = outc % 16 == 0 ? 16 : outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 16 == 0 && out_elempack == 16 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m512 pad_value = per_channel_pad_data_size ? _mm512_loadu_ps((const float*)per_channel_pad_data + q * 16) : _mm512_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack16_avx512(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack16_avx512(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack16_avx512(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m512 pad_value = per_channel_pad_data_size ? _mm512_loadu_ps((const float*)per_channel_pad_data + q * 16) : _mm512_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack16_avx512(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
int out_elempack = outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m256 pad_value = _mm256_set1_ps(value);
padding_constant_pack8_avx(bottom_blob, top_blob, 0, 0, left / 8, right / 8, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
int out_elempack = outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m256 pad_value = _mm256_set1_ps(value);
padding_constant_pack8_avx(bottom_blob, top_blob, top / 8, bottom / 8, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
int out_elempack = outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 8 == 0 && out_elempack == 8 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m256 pad_value = per_channel_pad_data_size ? _mm256_loadu_ps((const float*)per_channel_pad_data + q * 8) : _mm256_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack8_avx(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack8_avx(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack8_avx(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m256 pad_value = per_channel_pad_data_size ? _mm256_loadu_ps((const float*)per_channel_pad_data + q * 8) : _mm256_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack8_avx(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __AVX__
if (elempack == 4)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
#if __AVX__
int out_elempack = outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
#else
int out_elempack = outw % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 4 == 0 && out_elempack == 4 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m128 pad_value = _mm_set1_ps(value);
padding_constant_pack4_sse(bottom_blob, top_blob, 0, 0, left / 4, right / 4, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
#if __AVX__
int out_elempack = outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
#else
int out_elempack = outh % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 4 == 0 && out_elempack == 4 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m128 pad_value = _mm_set1_ps(value);
padding_constant_pack4_sse(bottom_blob, top_blob, top / 4, bottom / 4, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
#if __AVX__
int out_elempack = outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
#else
int out_elempack = outc % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 4 == 0 && out_elempack == 4 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m128 pad_value = per_channel_pad_data_size ? _mm_loadu_ps((const float*)per_channel_pad_data + q * 4) : _mm_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack4_sse(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack4_sse(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack4_sse(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m128 pad_value = per_channel_pad_data_size ? _mm_loadu_ps((const float*)per_channel_pad_data + q * 4) : _mm_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack4_sse(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
return Padding::forward(bottom_blob_unpacked, top_blob, opt);
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x118, %rsp # imm = 0x118
movq %rcx, %rbp
movq %rdx, 0x8(%rsp)
movq %rsi, %r14
movq (%rdi), %rax
movq -0x18(%rax), %rax
leaq (%rdi,%rax), %r11
movq %rdi, 0x10(%rsp)
movl 0xd0(%rdi,%rax), %r10d
testl %r10d, %r10d
jne 0x2b0e4f
cmpl $0x0, 0xd4(%r11)
jne 0x2b0e4f
cmpl $0x0, 0xd8(%r11)
jne 0x2b0e4f
cmpl $0x0, 0xdc(%r11)
jne 0x2b0e4f
cmpl $0x0, 0xe8(%r11)
jne 0x2b0e4f
cmpl $0x0, 0xec(%r11)
je 0x2b351e
movl 0x18(%r14), %r15d
movq 0x10(%r14), %r9
testl %r15d, %r15d
je 0x2b0e93
leal (,%r9,8), %eax
cltd
idivl %r15d
cmpl $0x8, %eax
jne 0x2b0ea5
movq 0x10(%rsp), %rdi
movq %r14, %rsi
movq 0x8(%rsp), %rdx
movq %rbp, %rcx
addq $0x118, %rsp # imm = 0x118
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x2b362c
vmovdqu 0x2c(%r14), %xmm0
movq %r14, %rdi
movl 0x28(%r14), %r13d
jmp 0x2b1157
vmovdqu 0x2c(%r14), %xmm0
movq %r14, (%rsp)
movl 0x28(%r14), %ecx
vpextrd $0x2, %xmm0, %r12d
pushq $0x4
popq %r13
vpextrd $0x1, %xmm0, %eax
vmovd %xmm0, %esi
vpextrd $0x3, %xmm0, %edx
movq %rdx, 0x18(%rsp)
pushq $-0x64
popq %rdx
movq %rdx, 0x20(%rsp)
cmpl $0x4, %r15d
je 0x2b104a
cmpl $0x8, %r15d
je 0x2b0f4a
cmpl $0x10, %r15d
jne 0x2b1150
leal -0x1(%rcx), %edx
cmpl $0x3, %edx
ja 0x2b1150
leaq 0x147891(%rip), %rcx # 0x3f8798
movslq (%rcx,%rdx,4), %rdx
addq %rcx, %rdx
jmpq *%rdx
shll $0x4, %esi
movl 0xd8(%r11), %eax
addl %eax, %esi
addl 0xdc(%r11), %esi
testb $0xf, %sil
je 0x2b2bce
testb $0x7, %sil
je 0x2b3371
xorl %ecx, %ecx
testb $0x3, %sil
sete %cl
leal (%rcx,%rcx,2), %ecx
incl %ecx
jmp 0x2b3374
leal -0x1(%rcx), %edx
cmpl $0x3, %edx
ja 0x2b1150
leaq 0x14782b(%rip), %rcx # 0x3f8788
movslq (%rcx,%rdx,4), %rdx
addq %rcx, %rdx
jmpq *%rdx
movl 0xd8(%r11), %edx
leal (%rdx,%rsi,8), %eax
addl 0xdc(%r11), %eax
testb $0x3, %al
sete %cl
movq %r9, %r10
shrq $0x3, %r10
addb %cl, %cl
testb $0x7, %al
movzbl %cl, %esi
pushq $0x3
popq %rcx
cmovnel %esi, %ecx
shlq %cl, %r10
orl %eax, %edx
pushq $0x1
popq %r13
testb $0x7, %dl
jne 0x2b1153
cmpl $0x0, 0xe0(%r11)
movq (%rsp), %rdi
jne 0x2b1157
pushq $0x8
popq %r12
cltd
idivl %r12d
movq 0x8(%rbp), %r8
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %eax, %esi
movq %r10, %rdx
movl %r12d, %ecx
callq 0x626da
cmpq $0x0, (%rbx)
je 0x2b1239
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b1239
movq 0x10(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rcx
vpbroadcastd 0xe4(%rdx,%rcx), %ymm0
movl 0xd8(%rdx,%rcx), %eax
movl 0xdc(%rdx,%rcx), %ecx
cltd
idivl %r12d
movl %eax, %r8d
movl %ecx, %eax
cltd
idivl %r12d
movq $0x0, 0x20(%rsp)
movq (%rsp), %rdi
movq 0x8(%rsp), %rsi
xorl %edx, %edx
xorl %ecx, %ecx
movl %eax, %r9d
callq 0x2b4597
jmp 0x2b1239
leal -0x1(%rcx), %edx
cmpl $0x3, %edx
ja 0x2b1150
leaq 0x14771b(%rip), %rcx # 0x3f8778
movslq (%rcx,%rdx,4), %rdx
addq %rcx, %rdx
jmpq *%rdx
movl 0xd8(%r11), %r8d
leal (%r8,%rsi,4), %esi
addl 0xdc(%r11), %esi
testb $0x3, %sil
sete %cl
movq %r9, %rdx
shrq $0x2, %rdx
addb %cl, %cl
movl %esi, %eax
andl $0x7, %eax
movzbl %cl, %r10d
pushq $0x3
popq %rcx
cmovnel %r10d, %ecx
shlq %cl, %rdx
pushq $0x1
popq %r13
testb $0x3, %r8b
jne 0x2b1153
cmpl $0x4, %eax
jne 0x2b1153
cmpl $0x0, 0xe0(%r11)
movq (%rsp), %rdi
jne 0x2b1157
sarl $0x2, %esi
movq 0x8(%rbp), %r8
pushq $0x4
popq %rbp
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %ebp, %ecx
callq 0x626da
cmpq $0x0, (%rbx)
je 0x2b1239
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b1239
movq 0x10(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rcx
vpbroadcastd 0xe4(%rdx,%rcx), %xmm0
movl 0xd8(%rdx,%rcx), %eax
movl 0xdc(%rdx,%rcx), %ecx
cltd
idivl %ebp
movl %eax, %r8d
movl %ecx, %eax
cltd
idivl %ebp
movq $0x0, 0x20(%rsp)
movq (%rsp), %rdi
movq 0x8(%rsp), %rsi
xorl %edx, %edx
xorl %ecx, %ecx
movl %eax, %r9d
callq 0x2b4637
jmp 0x2b1239
movl %ecx, %r13d
movq (%rsp), %rdi
movq 0x8(%rdi), %rax
vmovups (%rdi), %xmm1
vmovaps %xmm1, 0x80(%rsp)
movq %r9, 0x90(%rsp)
movl %r15d, 0x98(%rsp)
movq 0x20(%rdi), %rcx
movq %rcx, 0xa0(%rsp)
movl %r13d, 0xa8(%rsp)
vmovdqu %xmm0, 0xac(%rsp)
movq 0x40(%rdi), %rcx
movq %rcx, 0xc0(%rsp)
testq %rax, %rax
je 0x2b11a9
lock
incl (%rax)
cmpl $0x1, %r15d
je 0x2b11dc
vmovdqu64 (%rbp), %zmm0
leaq 0x30(%rsp), %rcx
vmovdqu64 %zmm0, (%rcx)
movq 0x10(%rbp), %rax
movq %rax, 0x8(%rcx)
leaq 0x80(%rsp), %rsi
pushq $0x1
popq %rdx
vzeroupper
callq 0x64e3b
movq 0x10(%rsp), %rdi
movq (%rdi), %rax
addq -0x18(%rax), %rdi
leaq 0x80(%rsp), %rsi
movq 0x8(%rsp), %rdx
movq %rbp, %rcx
callq 0x2acc2e
movq %rax, 0x20(%rsp)
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x2b1239
lock
decl (%rax)
jne 0x2b1239
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x2b1231
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2b1239
movq %rsi, %rdi
callq 0x5f3e0
movq 0x20(%rsp), %rax
addq $0x118, %rsp # imm = 0x118
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movl 0xe8(%r11), %r14d
movq 0x18(%rsp), %rdi
leal (%r14,%rdi,8), %edx
addl 0xec(%r11), %edx
testb $0x3, %dl
sete %cl
movq %r9, %r8
shrq $0x3, %r8
addb %cl, %cl
testb $0x7, %dl
movzbl %cl, %ecx
pushq $0x3
popq %r13
cmovel %r13d, %ecx
shlq %cl, %r8
orl %edx, %r14d
testb $0x7, %r14b
jne 0x2b1153
addl 0xd8(%r11), %esi
addl 0xdc(%r11), %esi
addl %eax, %r10d
addl 0xd4(%r11), %r10d
leal (,%rdi,8), %eax
cmpl %eax, %edx
movq (%rsp), %rdi
je 0x2b12cc
cmpl $0x0, 0xe0(%r11)
jne 0x2b1157
pushq $0x8
popq %r9
movl %edx, %eax
cltd
idivl %r9d
subq $0x8, %rsp
movq 0x10(%rsp), %rbx
movq %rbx, %rdi
movl %r10d, %edx
movq %rax, %r14
movl %eax, %ecx
pushq 0x8(%rbp)
callq 0x628f2
addq $0x10, %rsp
cmpq $0x0, (%rbx)
je 0x2b1239
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b1239
movq 0x10(%rsp), %r12
movq (%r12), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r12,%rax), %eax
pushq $-0x8
popq %rcx
cltd
idivl %ecx
movl %eax, %edx
xorl %ecx, %ecx
testl %r14d, %r14d
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %r14d
movq %r14, 0xc8(%rsp)
movl %edx, 0x7c(%rsp)
movl %edx, %r11d
xorl %r9d, %r9d
cmpq 0xc8(%rsp), %r9
je 0x2b1239
movq 0x8(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rcx
movq 0x40(%r10), %r13
imulq %r9, %r13
movq 0x10(%r10), %rdi
imulq %rdi, %r13
addq (%r10), %r13
movl 0x34(%r10), %esi
movl 0x18(%r10), %edx
movq 0x20(%r10), %r8
movq %r13, 0x80(%rsp)
andq $0x0, 0x88(%rsp)
movq %rdi, 0x90(%rsp)
movl %edx, 0x98(%rsp)
movq %r8, 0xa0(%rsp)
movl %eax, 0xac(%rsp)
movl %ecx, 0xb0(%rsp)
movl $0x1, 0xb4(%rsp)
movl %esi, 0xb8(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0xc0(%rsp)
movl 0x28(%r10), %edx
leal -0x1(%rdx), %edi
movl %edi, 0xa8(%rsp)
cmpl $0x4, %edx
jne 0x2b1419
movq %rcx, 0xc0(%rsp)
movq %rcx, %rax
movq (%r12), %rcx
movq -0x18(%rcx), %rdi
cmpl $0x0, 0xf0(%r12,%rdi)
je 0x2b1443
movq 0xf8(%r12,%rdi), %rdx
movq %r9, %r8
shlq $0x5, %r8
vmovdqu (%rdx,%r8), %ymm0
jmp 0x2b144d
vpbroadcastd 0xe4(%r12,%rdi), %ymm0
movl %r11d, %r11d
movl 0x7c(%rsp), %edx
movq %r9, 0xd0(%rsp)
addl %r9d, %edx
setns %r8b
cmpl 0x18(%rsp), %edx
setl %r9b
testb %r9b, %r8b
je 0x2b160a
movq (%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rsi
movl 0x34(%r10), %r8d
movq (%r10), %r12
movq 0x10(%r10), %r15
movq 0x40(%r10), %rbx
movl %edx, %ebp
imulq %rbx, %rbp
imulq %r15, %rbp
addq %r12, %rbp
movl 0x18(%r10), %edx
movq 0x20(%r10), %r9
movq %rbp, 0x30(%rsp)
andq $0x0, 0x38(%rsp)
movq %r15, 0x40(%rsp)
movl %edx, 0x48(%rsp)
movq %r9, 0x50(%rsp)
movl %eax, 0x5c(%rsp)
movl %esi, 0x60(%rsp)
movl $0x1, 0x64(%rsp)
movl %r8d, 0x68(%rsp)
imulq %rax, %rsi
movq %r15, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r15
movq %rax, 0x70(%rsp)
movl 0x28(%r10), %eax
leal -0x1(%rax), %edx
movl %edx, 0x58(%rsp)
cmpl $0x4, %eax
jne 0x2b1504
movq %rsi, 0x70(%rsp)
movq 0x10(%rsp), %r14
movl 0xe0(%r14,%rdi), %eax
testl %eax, %eax
movq %r11, 0x28(%rsp)
jne 0x2b1560
movl 0xd0(%r14,%rdi), %edx
movl 0xd4(%r14,%rdi), %ecx
movl 0xd8(%r14,%rdi), %r8d
movl 0xdc(%r14,%rdi), %r9d
leaq 0x30(%rsp), %rdi
leaq 0x80(%rsp), %rsi
callq 0x2b4597
movq 0x28(%rsp), %r11
movq (%r14), %rcx
movq -0x18(%rcx), %rdi
movl 0xe0(%r14,%rdi), %eax
cmpl $0x1, %eax
jne 0x2b1634
movq 0x10(%rsp), %rsi
movl 0xd8(%rsi,%rdi), %eax
xorl %r8d, %r8d
testl %eax, %eax
cmovlel %r8d, %eax
movl 0xdc(%rsi,%rdi), %edx
testl %edx, %edx
cmovlel %r8d, %edx
movl 0xd0(%rsi,%rdi), %r9d
testl %r9d, %r9d
cmovlel %r8d, %r9d
movl 0xd4(%rsi,%rdi), %esi
imulq %r15, %rbx
imulq %r11, %rbx
addq %rbx, %r12
cmpl %r9d, %r8d
je 0x2b163e
vmovdqa (%rbp), %ymm0
movl %eax, %edi
subl $0x1, %edi
jb 0x2b15cd
vmovdqa %ymm0, (%r13)
addq $0x20, %r13
jmp 0x2b15bc
xorl %edi, %edi
xorl %r10d, %r10d
cmpl 0x5c(%rsp), %r10d
jge 0x2b15ef
vmovdqa (%r12,%rdi), %ymm0
vmovdqa %ymm0, (%r13,%rdi)
incl %r10d
addq $0x20, %rdi
jmp 0x2b15d2
addq %rdi, %r13
movl %edx, %edi
subl $0x1, %edi
jb 0x2b1605
vmovdqa %ymm0, (%r13)
addq $0x20, %r13
jmp 0x2b15f4
incl %r8d
jmp 0x2b15ac
imull %eax, %esi
testl %esi, %esi
movl $0x0, %eax
cmovlel %eax, %esi
movq 0xd0(%rsp), %r9
subl $0x1, %esi
jb 0x2b19db
vmovdqu %ymm0, (%r13)
addq $0x20, %r13
jmp 0x2b161f
movq 0x10(%rsp), %r12
jmp 0x2b1722
xorl %edi, %edi
cmpl 0x60(%rsp), %edi
jge 0x2b169b
vmovdqa (%rbp), %ymm0
movl %eax, %r8d
subl $0x1, %r8d
jb 0x2b1660
vmovdqa %ymm0, (%r13)
addq $0x20, %r13
jmp 0x2b164e
xorl %r8d, %r8d
cmpl 0x5c(%rsp), %r8d
jge 0x2b1682
vmovdqa (%rbp), %ymm0
vmovdqa %ymm0, (%r13)
addq $0x20, %rbp
addq $0x20, %r13
incl %r8d
jmp 0x2b1663
movl %edx, %r8d
subl $0x1, %r8d
jb 0x2b1697
vmovdqa %ymm0, (%r13)
addq $0x20, %r13
jmp 0x2b1685
incl %edi
jmp 0x2b1640
movl 0x5c(%rsp), %edi
shll $0x3, %edi
movslq %edi, %rdi
shlq $0x2, %rdi
subq %rdi, %rbp
xorl %edi, %edi
testl %esi, %esi
cmovlel %edi, %esi
cmpl %esi, %edi
je 0x2b1711
vmovdqa (%rbp), %ymm0
movl %eax, %r8d
subl $0x1, %r8d
jb 0x2b16d1
vmovdqa %ymm0, (%r13)
addq $0x20, %r13
jmp 0x2b16bf
xorl %r8d, %r8d
xorl %r9d, %r9d
cmpl 0x5c(%rsp), %r9d
jge 0x2b16f5
vmovdqa (%rbp,%r8), %ymm0
vmovdqa %ymm0, (%r13,%r8)
incl %r9d
addq $0x20, %r8
jmp 0x2b16d7
addq %r8, %r13
movl %edx, %r8d
subl $0x1, %r8d
jb 0x2b170d
vmovdqa %ymm0, (%r13)
addq $0x20, %r13
jmp 0x2b16fb
incl %edi
jmp 0x2b16b3
movq -0x18(%rcx), %rdi
movq 0x10(%rsp), %r12
movl 0xe0(%r12,%rdi), %eax
cmpl $0x2, %eax
pushq $-0x40
popq %r13
jne 0x2b1943
movl 0xd0(%r12,%rdi), %r9d
movslq 0xd8(%r12,%rdi), %rax
movl 0x5c(%rsp), %ecx
imull %r9d, %ecx
shll $0x3, %ecx
movslq %ecx, %rcx
shlq $0x2, %rcx
addq 0x30(%rsp), %rcx
xorl %r10d, %r10d
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movl 0xdc(%r12,%rdi), %esi
testl %esi, %esi
cmovlel %r10d, %esi
movl 0xd4(%r12,%rdi), %edi
testl %r9d, %r9d
cmovlel %r10d, %r9d
movq 0x80(%rsp), %r8
shlq $0x5, %rax
shlq $0x5, %rdx
shlq $0x5, %rsi
cmpl %r9d, %r10d
je 0x2b1824
leaq (%rcx,%rax), %r11
xorl %r15d, %r15d
xorl %r14d, %r14d
movq %rdx, %r12
addq %r14, %r12
je 0x2b17c5
vmovdqa (%r11,%r14), %ymm0
vmovdqa %ymm0, (%r8,%r15)
addq $-0x20, %r14
addq $0x20, %r15
jmp 0x2b17a7
subq %r14, %r8
xorl %ebp, %ebp
movq %rcx, %r11
cmpl 0x5c(%rsp), %ebp
jge 0x2b17e9
vmovdqa (%r11), %ymm0
vmovdqa %ymm0, (%r8)
addq $0x20, %r11
addq $0x20, %r8
incl %ebp
jmp 0x2b17cd
movq %r13, %r14
movq 0x10(%rsp), %r12
leaq (%rsi,%r14), %r15
cmpq $-0x40, %r15
je 0x2b1810
vmovdqa (%r11,%r14), %ymm0
vmovdqa %ymm0, (%r8)
addq $0x20, %r8
addq $-0x20, %r14
jmp 0x2b17f1
movslq 0x5c(%rsp), %r11
shlq $0x5, %r11
subq %r11, %rcx
incl %r10d
jmp 0x2b1794
xorl %r9d, %r9d
cmpl 0x60(%rsp), %r9d
jge 0x2b18a0
leaq (%rcx,%rax), %r10
xorl %r14d, %r14d
xorl %r11d, %r11d
movq %rdx, %r15
addq %r11, %r15
je 0x2b1856
vmovdqa (%r10,%r11), %ymm0
vmovdqa %ymm0, (%r8,%r14)
addq $-0x20, %r11
addq $0x20, %r14
jmp 0x2b1838
subq %r11, %r8
xorl %r10d, %r10d
cmpl 0x5c(%rsp), %r10d
jge 0x2b1879
vmovdqa (%rcx), %ymm0
vmovdqa %ymm0, (%r8)
addq $0x20, %rcx
addq $0x20, %r8
incl %r10d
jmp 0x2b185c
movq %r13, %r10
leaq (%rsi,%r10), %r11
cmpq $-0x40, %r11
je 0x2b189b
vmovdqa (%rcx,%r10), %ymm0
vmovdqa %ymm0, (%r8)
addq $0x20, %r8
addq $-0x20, %r10
jmp 0x2b187c
incl %r9d
jmp 0x2b1827
movslq 0x5c(%rsp), %r9
shlq $0x6, %r9
subq %r9, %rcx
xorl %r9d, %r9d
testl %edi, %edi
cmovlel %r9d, %edi
cmpl %edi, %r9d
je 0x2b1943
leaq (%rcx,%rax), %r10
xorl %r14d, %r14d
xorl %r11d, %r11d
movq %rdx, %r15
addq %r11, %r15
je 0x2b18e6
vmovdqa (%r10,%r11), %ymm0
vmovdqa %ymm0, (%r8,%r14)
addq $-0x20, %r11
addq $0x20, %r14
jmp 0x2b18c8
subq %r11, %r8
xorl %r11d, %r11d
movq %rcx, %r10
cmpl 0x5c(%rsp), %r11d
jge 0x2b190d
vmovdqa (%r10), %ymm0
vmovdqa %ymm0, (%r8)
addq $0x20, %r10
addq $0x20, %r8
incl %r11d
jmp 0x2b18ef
movq %r13, %r11
leaq (%rsi,%r11), %r14
cmpq $-0x40, %r14
je 0x2b192f
vmovdqa (%r10,%r11), %ymm0
vmovdqa %ymm0, (%r8)
addq $0x20, %r8
addq $-0x20, %r11
jmp 0x2b1910
movslq 0x5c(%rsp), %r10
shlq $0x5, %r10
subq %r10, %rcx
incl %r9d
jmp 0x2b18b5
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x2b1977
lock
decl (%rax)
jne 0x2b1977
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x2b196c
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x2b1977
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
movq 0x88(%rsp), %rax
testq %rax, %rax
movq 0x28(%rsp), %r11
movq 0xd0(%rsp), %r9
je 0x2b19db
lock
decl (%rax)
jne 0x2b19db
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x2b19c3
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
movq 0x28(%rsp), %r11
movq 0xd0(%rsp), %r9
jmp 0x2b19db
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
movq 0x28(%rsp), %r11
movq 0xd0(%rsp), %r9
incq %r9
incl %r11d
jmp 0x2b1359
cmpl $0x0, 0xe0(%r11)
movq (%rsp), %rdi
jne 0x2b1157
movl 0xe8(%r11), %r13d
addl %r12d, %r13d
addl 0xec(%r11), %r13d
addl %eax, %r10d
addl 0xd4(%r11), %r10d
addl 0xd8(%r11), %esi
addl 0xdc(%r11), %esi
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %r10d, %edx
movl %r13d, %ecx
movq 0x18(%rsp), %r8
pushq 0x8(%rbp)
pushq $0x8
callq 0x62a26
addq $0x10, %rsp
cmpq $0x0, (%rbx)
je 0x2b1239
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b1239
xorl %ecx, %ecx
testl %r13d, %r13d
cmovlel %ecx, %r13d
movq 0x18(%rsp), %rdx
testl %edx, %edx
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %edx
movq %rdx, 0x18(%rsp)
movabsq $0x100000001, %r11 # imm = 0x100000001
xorl %r14d, %r14d
movq 0x10(%rsp), %rbp
cmpq 0x18(%rsp), %r14
je 0x2b1239
movq (%rbp), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xf0(%rbp,%rax)
je 0x2b1aca
movq 0xf8(%rbp,%rax), %rax
movq %r14, %rcx
shlq $0x5, %rcx
vmovdqu (%rax,%rcx), %ymm0
jmp 0x2b1ad4
vpbroadcastd 0xe4(%rbp,%rax), %ymm0
xorl %r15d, %r15d
vmovdqu %ymm0, 0xd0(%rsp)
cmpq %r13, %r15
je 0x2b1c86
movq 0x8(%rsp), %rax
movslq 0x2c(%rax), %rdx
movslq 0x30(%rax), %rsi
movq 0x40(%rax), %rdi
imulq %r14, %rdi
movq 0x10(%rax), %r8
imulq %r8, %rdi
addq (%rax), %rdi
movl 0x18(%rax), %r9d
movq 0x20(%rax), %r10
movq %rsi, %rax
imulq %rdx, %rax
movq %r15, %rcx
imulq %r8, %rcx
imulq %rax, %rcx
addq %rdi, %rcx
movq %rcx, 0x80(%rsp)
andq $0x0, 0x88(%rsp)
movq %r8, 0x90(%rsp)
movl %r9d, 0x98(%rsp)
movq %r10, 0xa0(%rsp)
movl $0x2, 0xa8(%rsp)
movl %edx, 0xac(%rsp)
movl %esi, 0xb0(%rsp)
movq %r11, 0xb4(%rsp)
movq %rax, 0xc0(%rsp)
movq (%rbp), %rdx
movq -0x18(%rdx), %rsi
movl 0xe8(%rbp,%rsi), %edi
movl %r15d, %edx
subl %edi, %edx
setns %dil
cmpl %r12d, %edx
setl %r8b
testb %r8b, %dil
je 0x2b1c6d
movq (%rsp), %rcx
movslq 0x2c(%rcx), %rax
movslq 0x30(%rcx), %rbp
movq 0x40(%rcx), %rdi
imulq %r14, %rdi
movq 0x10(%rcx), %r8
imulq %r8, %rdi
addq (%rcx), %rdi
movl 0x18(%rcx), %r9d
movq 0x20(%rcx), %r10
movq %rbp, %rcx
imulq %rax, %rcx
movl %edx, %edx
movq %r13, %rbx
movq %r11, %r13
movq %rcx, %r11
imulq %r8, %r11
imulq %rdx, %r11
addq %rdi, %r11
movq %r11, 0x30(%rsp)
andq $0x0, 0x38(%rsp)
movq %r8, 0x40(%rsp)
movl %r9d, 0x48(%rsp)
movq %r10, 0x50(%rsp)
movl $0x2, 0x58(%rsp)
movl %eax, 0x5c(%rsp)
movl %ebp, 0x60(%rsp)
movq 0x10(%rsp), %rbp
movq %r13, 0x64(%rsp)
movq %rcx, 0x70(%rsp)
movl 0xd0(%rbp,%rsi), %edx
movl 0xd4(%rbp,%rsi), %ecx
movl 0xd8(%rbp,%rsi), %r8d
movl 0xdc(%rbp,%rsi), %r9d
leaq 0x30(%rsp), %rdi
leaq 0x80(%rsp), %rsi
vmovups 0xd0(%rsp), %ymm0
callq 0x2b4597
vmovdqu 0xd0(%rsp), %ymm0
movq %r13, %r11
movq %rbx, %r13
incq %r15
jmp 0x2b1ae0
testl %eax, %eax
movl $0x0, %edx
cmovlel %edx, %eax
subl $0x1, %eax
jb 0x2b1c65
vmovdqu %ymm0, (%rcx)
addq $0x20, %rcx
jmp 0x2b1c77
incq %r14
jmp 0x2b1a97
movl 0xe8(%r11), %r14d
movq 0x18(%rsp), %rdi
leal (%r14,%rdi,4), %edx
addl 0xec(%r11), %edx
testb $0x3, %dl
sete %cl
movq %r9, %r8
shrq $0x2, %r8
addb %cl, %cl
movq %rdx, %r12
andl $0x7, %edx
movzbl %cl, %ecx
pushq $0x3
popq %r13
cmovel %r13d, %ecx
shlq %cl, %r8
testb $0x3, %r14b
jne 0x2b1153
cmpl $0x4, %edx
jne 0x2b1153
addl 0xd8(%r11), %esi
addl 0xdc(%r11), %esi
addl %eax, %r10d
addl 0xd4(%r11), %r10d
leal (,%rdi,4), %eax
cmpl %eax, %r12d
movq (%rsp), %rdi
je 0x2b1d11
cmpl $0x0, 0xe0(%r11)
jne 0x2b1157
movq %r12, %rcx
sarl $0x2, %ecx
subq $0x8, %rsp
pushq $0x4
popq %r9
movq 0x10(%rsp), %rbx
movq %rbx, %rdi
movl %r10d, %edx
movq %rcx, %r14
pushq 0x8(%rbp)
callq 0x628f2
addq $0x10, %rsp
cmpq $0x0, (%rbx)
je 0x2b1239
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b1239
movq 0x10(%rsp), %r12
movq (%r12), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r12,%rax), %eax
pushq $-0x4
popq %rcx
cltd
idivl %ecx
movl %eax, %edx
xorl %ecx, %ecx
testl %r14d, %r14d
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %r14d
movq %r14, 0xc8(%rsp)
movl %edx, 0x7c(%rsp)
movl %edx, %r11d
xorl %r9d, %r9d
cmpq 0xc8(%rsp), %r9
je 0x2b1239
movq 0x8(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rcx
movq 0x40(%r10), %r13
imulq %r9, %r13
movq 0x10(%r10), %rdi
imulq %rdi, %r13
addq (%r10), %r13
movl 0x34(%r10), %esi
movl 0x18(%r10), %edx
movq 0x20(%r10), %r8
movq %r13, 0x80(%rsp)
andq $0x0, 0x88(%rsp)
movq %rdi, 0x90(%rsp)
movl %edx, 0x98(%rsp)
movq %r8, 0xa0(%rsp)
movl %eax, 0xac(%rsp)
movl %ecx, 0xb0(%rsp)
movl $0x1, 0xb4(%rsp)
movl %esi, 0xb8(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0xc0(%rsp)
movl 0x28(%r10), %edx
leal -0x1(%rdx), %edi
movl %edi, 0xa8(%rsp)
cmpl $0x4, %edx
jne 0x2b1e5c
movq %rcx, 0xc0(%rsp)
movq %rcx, %rax
movq (%r12), %rcx
movq -0x18(%rcx), %rdi
cmpl $0x0, 0xf0(%r12,%rdi)
je 0x2b1e86
movq 0xf8(%r12,%rdi), %rdx
movq %r9, %r8
shlq $0x4, %r8
vmovdqu (%rdx,%r8), %xmm0
jmp 0x2b1e90
vpbroadcastd 0xe4(%r12,%rdi), %xmm0
movl %r11d, %r11d
movl 0x7c(%rsp), %edx
movq %r9, 0xd0(%rsp)
addl %r9d, %edx
setns %r8b
cmpl 0x18(%rsp), %edx
setl %r9b
testb %r9b, %r8b
je 0x2b204d
movq (%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rsi
movl 0x34(%r10), %r8d
movq (%r10), %r12
movq 0x10(%r10), %r15
movq 0x40(%r10), %rbx
movl %edx, %ebp
imulq %rbx, %rbp
imulq %r15, %rbp
addq %r12, %rbp
movl 0x18(%r10), %edx
movq 0x20(%r10), %r9
movq %rbp, 0x30(%rsp)
andq $0x0, 0x38(%rsp)
movq %r15, 0x40(%rsp)
movl %edx, 0x48(%rsp)
movq %r9, 0x50(%rsp)
movl %eax, 0x5c(%rsp)
movl %esi, 0x60(%rsp)
movl $0x1, 0x64(%rsp)
movl %r8d, 0x68(%rsp)
imulq %rax, %rsi
movq %r15, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r15
movq %rax, 0x70(%rsp)
movl 0x28(%r10), %eax
leal -0x1(%rax), %edx
movl %edx, 0x58(%rsp)
cmpl $0x4, %eax
jne 0x2b1f47
movq %rsi, 0x70(%rsp)
movq 0x10(%rsp), %r14
movl 0xe0(%r14,%rdi), %eax
testl %eax, %eax
movq %r11, 0x28(%rsp)
jne 0x2b1fa3
movl 0xd0(%r14,%rdi), %edx
movl 0xd4(%r14,%rdi), %ecx
movl 0xd8(%r14,%rdi), %r8d
movl 0xdc(%r14,%rdi), %r9d
leaq 0x30(%rsp), %rdi
leaq 0x80(%rsp), %rsi
callq 0x2b4637
movq 0x28(%rsp), %r11
movq (%r14), %rcx
movq -0x18(%rcx), %rdi
movl 0xe0(%r14,%rdi), %eax
cmpl $0x1, %eax
jne 0x2b2077
movq 0x10(%rsp), %rsi
movl 0xd8(%rsi,%rdi), %eax
xorl %r8d, %r8d
testl %eax, %eax
cmovlel %r8d, %eax
movl 0xdc(%rsi,%rdi), %edx
testl %edx, %edx
cmovlel %r8d, %edx
movl 0xd0(%rsi,%rdi), %r9d
testl %r9d, %r9d
cmovlel %r8d, %r9d
movl 0xd4(%rsi,%rdi), %esi
imulq %r15, %rbx
imulq %r11, %rbx
addq %rbx, %r12
cmpl %r9d, %r8d
je 0x2b2081
vmovdqa (%rbp), %xmm0
movl %eax, %edi
subl $0x1, %edi
jb 0x2b2010
vmovdqa %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2b1fff
xorl %edi, %edi
xorl %r10d, %r10d
cmpl 0x5c(%rsp), %r10d
jge 0x2b2032
vmovdqa (%r12,%rdi), %xmm0
vmovdqa %xmm0, (%r13,%rdi)
incl %r10d
addq $0x10, %rdi
jmp 0x2b2015
addq %rdi, %r13
movl %edx, %edi
subl $0x1, %edi
jb 0x2b2048
vmovdqa %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2b2037
incl %r8d
jmp 0x2b1fef
imull %eax, %esi
testl %esi, %esi
movl $0x0, %eax
cmovlel %eax, %esi
movq 0xd0(%rsp), %r9
subl $0x1, %esi
jb 0x2b2412
vmovdqu %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2b2062
movq 0x10(%rsp), %r12
jmp 0x2b2165
xorl %edi, %edi
cmpl 0x60(%rsp), %edi
jge 0x2b20de
vmovdqa (%rbp), %xmm0
movl %eax, %r8d
subl $0x1, %r8d
jb 0x2b20a3
vmovdqa %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2b2091
xorl %r8d, %r8d
cmpl 0x5c(%rsp), %r8d
jge 0x2b20c5
vmovdqa (%rbp), %xmm0
vmovdqa %xmm0, (%r13)
addq $0x10, %rbp
addq $0x10, %r13
incl %r8d
jmp 0x2b20a6
movl %edx, %r8d
subl $0x1, %r8d
jb 0x2b20da
vmovdqa %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2b20c8
incl %edi
jmp 0x2b2083
movl 0x5c(%rsp), %edi
shll $0x2, %edi
movslq %edi, %rdi
shlq $0x2, %rdi
subq %rdi, %rbp
xorl %edi, %edi
testl %esi, %esi
cmovlel %edi, %esi
cmpl %esi, %edi
je 0x2b2154
vmovdqa (%rbp), %xmm0
movl %eax, %r8d
subl $0x1, %r8d
jb 0x2b2114
vmovdqa %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2b2102
xorl %r8d, %r8d
xorl %r9d, %r9d
cmpl 0x5c(%rsp), %r9d
jge 0x2b2138
vmovdqa (%rbp,%r8), %xmm0
vmovdqa %xmm0, (%r13,%r8)
incl %r9d
addq $0x10, %r8
jmp 0x2b211a
addq %r8, %r13
movl %edx, %r8d
subl $0x1, %r8d
jb 0x2b2150
vmovdqa %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2b213e
incl %edi
jmp 0x2b20f6
movq -0x18(%rcx), %rdi
movq 0x10(%rsp), %r12
movl 0xe0(%r12,%rdi), %eax
cmpl $0x2, %eax
pushq $-0x20
popq %r13
jne 0x2b2386
movl 0xd0(%r12,%rdi), %r9d
movslq 0xd8(%r12,%rdi), %rax
movl 0x5c(%rsp), %ecx
imull %r9d, %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
shlq $0x2, %rcx
addq 0x30(%rsp), %rcx
xorl %r10d, %r10d
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movl 0xdc(%r12,%rdi), %esi
testl %esi, %esi
cmovlel %r10d, %esi
movl 0xd4(%r12,%rdi), %edi
testl %r9d, %r9d
cmovlel %r10d, %r9d
movq 0x80(%rsp), %r8
shlq $0x4, %rax
shlq $0x4, %rdx
shlq $0x4, %rsi
cmpl %r9d, %r10d
je 0x2b2267
leaq (%rcx,%rax), %r11
xorl %r15d, %r15d
xorl %r14d, %r14d
movq %rdx, %r12
addq %r14, %r12
je 0x2b2208
vmovdqa (%r11,%r14), %xmm0
vmovdqa %xmm0, (%r8,%r15)
addq $-0x10, %r14
addq $0x10, %r15
jmp 0x2b21ea
subq %r14, %r8
xorl %ebp, %ebp
movq %rcx, %r11
cmpl 0x5c(%rsp), %ebp
jge 0x2b222c
vmovdqa (%r11), %xmm0
vmovdqa %xmm0, (%r8)
addq $0x10, %r11
addq $0x10, %r8
incl %ebp
jmp 0x2b2210
movq %r13, %r14
movq 0x10(%rsp), %r12
leaq (%rsi,%r14), %r15
cmpq $-0x20, %r15
je 0x2b2253
vmovdqa (%r11,%r14), %xmm0
vmovdqa %xmm0, (%r8)
addq $0x10, %r8
addq $-0x10, %r14
jmp 0x2b2234
movslq 0x5c(%rsp), %r11
shlq $0x4, %r11
subq %r11, %rcx
incl %r10d
jmp 0x2b21d7
xorl %r9d, %r9d
cmpl 0x60(%rsp), %r9d
jge 0x2b22e3
leaq (%rcx,%rax), %r10
xorl %r14d, %r14d
xorl %r11d, %r11d
movq %rdx, %r15
addq %r11, %r15
je 0x2b2299
vmovdqa (%r10,%r11), %xmm0
vmovdqa %xmm0, (%r8,%r14)
addq $-0x10, %r11
addq $0x10, %r14
jmp 0x2b227b
subq %r11, %r8
xorl %r10d, %r10d
cmpl 0x5c(%rsp), %r10d
jge 0x2b22bc
vmovdqa (%rcx), %xmm0
vmovdqa %xmm0, (%r8)
addq $0x10, %rcx
addq $0x10, %r8
incl %r10d
jmp 0x2b229f
movq %r13, %r10
leaq (%rsi,%r10), %r11
cmpq $-0x20, %r11
je 0x2b22de
vmovdqa (%rcx,%r10), %xmm0
vmovdqa %xmm0, (%r8)
addq $0x10, %r8
addq $-0x10, %r10
jmp 0x2b22bf
incl %r9d
jmp 0x2b226a
movslq 0x5c(%rsp), %r9
shlq $0x5, %r9
subq %r9, %rcx
xorl %r9d, %r9d
testl %edi, %edi
cmovlel %r9d, %edi
cmpl %edi, %r9d
je 0x2b2386
leaq (%rcx,%rax), %r10
xorl %r14d, %r14d
xorl %r11d, %r11d
movq %rdx, %r15
addq %r11, %r15
je 0x2b2329
vmovdqa (%r10,%r11), %xmm0
vmovdqa %xmm0, (%r8,%r14)
addq $-0x10, %r11
addq $0x10, %r14
jmp 0x2b230b
subq %r11, %r8
xorl %r11d, %r11d
movq %rcx, %r10
cmpl 0x5c(%rsp), %r11d
jge 0x2b2350
vmovdqa (%r10), %xmm0
vmovdqa %xmm0, (%r8)
addq $0x10, %r10
addq $0x10, %r8
incl %r11d
jmp 0x2b2332
movq %r13, %r11
leaq (%rsi,%r11), %r14
cmpq $-0x20, %r14
je 0x2b2372
vmovdqa (%r10,%r11), %xmm0
vmovdqa %xmm0, (%r8)
addq $0x10, %r8
addq $-0x10, %r11
jmp 0x2b2353
movslq 0x5c(%rsp), %r10
shlq $0x4, %r10
subq %r10, %rcx
incl %r9d
jmp 0x2b22f8
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x2b23b4
lock
decl (%rax)
jne 0x2b23b4
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x2b23ac
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2b23b4
movq %rsi, %rdi
callq 0x5f3e0
movq 0x88(%rsp), %rax
testq %rax, %rax
movq 0x28(%rsp), %r11
movq 0xd0(%rsp), %r9
je 0x2b2412
lock
decl (%rax)
jne 0x2b2412
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x2b23fd
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x28(%rsp), %r11
movq 0xd0(%rsp), %r9
jmp 0x2b2412
movq %rsi, %rdi
callq 0x5f3e0
movq 0x28(%rsp), %r11
movq 0xd0(%rsp), %r9
incq %r9
incl %r11d
jmp 0x2b1d9c
movq 0x18(%rsp), %rdx
shll $0x4, %edx
movl 0xe8(%r11), %r14d
leal (%r14,%rdx), %ecx
addl 0xec(%r11), %ecx
testb $0xf, %cl
je 0x2b2bca
testb $0x7, %cl
je 0x2b2bdc
xorl %r8d, %r8d
testb $0x3, %cl
sete %r8b
leal (%r8,%r8,2), %r8d
incl %r8d
jmp 0x2b2be0
leal (%r10,%rax,8), %eax
addl 0xd4(%r11), %eax
testb $0x3, %al
sete %cl
movq %r9, %r8
shrq $0x3, %r8
addb %cl, %cl
testb $0x7, %al
movzbl %cl, %edx
pushq $0x3
popq %rcx
cmovnel %edx, %ecx
shlq %cl, %r8
orl %eax, %r10d
pushq $0x2
popq %r13
testb $0x7, %r10b
jne 0x2b1153
cmpl $0x0, 0xe0(%r11)
movq (%rsp), %rdi
jne 0x2b1157
addl 0xd8(%r11), %esi
addl 0xdc(%r11), %esi
pushq $0x8
popq %r12
cltd
idivl %r12d
movq 0x8(%rbp), %r9
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %eax, %edx
movq %r8, %rcx
movl %r12d, %r8d
callq 0x627de
cmpq $0x0, (%rbx)
je 0x2b1239
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b1239
movq 0x10(%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rsi
vpbroadcastd 0xe4(%rdi,%rsi), %ymm0
movl 0xd0(%rdi,%rsi), %eax
movl 0xd4(%rdi,%rsi), %ecx
cltd
idivl %r12d
movl %eax, %r10d
movl %ecx, %eax
cltd
idivl %r12d
movl 0xd8(%rdi,%rsi), %r8d
movl 0xdc(%rdi,%rsi), %r9d
movq (%rsp), %rdi
movq 0x8(%rsp), %rsi
movl %r10d, %edx
movl %eax, %ecx
callq 0x2b4597
jmp 0x2b3510
cmpl $0x0, 0xe0(%r11)
movq (%rsp), %rdi
jne 0x2b1157
movl 0xe8(%r11), %r13d
addl %r12d, %r13d
addl 0xec(%r11), %r13d
addl %eax, %r10d
addl 0xd4(%r11), %r10d
addl 0xd8(%r11), %esi
addl 0xdc(%r11), %esi
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %r10d, %edx
movl %r13d, %ecx
movq 0x18(%rsp), %r8
pushq 0x8(%rbp)
pushq $0x4
callq 0x62a26
addq $0x10, %rsp
cmpq $0x0, (%rbx)
je 0x2b1239
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b1239
xorl %ecx, %ecx
testl %r13d, %r13d
cmovlel %ecx, %r13d
movq 0x18(%rsp), %rdx
testl %edx, %edx
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %edx
movq %rdx, 0x18(%rsp)
movabsq $0x100000001, %r11 # imm = 0x100000001
xorl %r14d, %r14d
movq 0x10(%rsp), %rbp
cmpq 0x18(%rsp), %r14
je 0x2b1239
movq (%rbp), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xf0(%rbp,%rax)
je 0x2b2636
movq 0xf8(%rbp,%rax), %rax
movq %r14, %rcx
shlq $0x4, %rcx
vmovdqu (%rax,%rcx), %xmm0
jmp 0x2b2640
vpbroadcastd 0xe4(%rbp,%rax), %xmm0
xorl %r15d, %r15d
vmovdqa %xmm0, 0xd0(%rsp)
cmpq %r13, %r15
je 0x2b27f8
movq 0x8(%rsp), %rax
movslq 0x2c(%rax), %rdx
movslq 0x30(%rax), %rsi
movq 0x40(%rax), %rdi
imulq %r14, %rdi
movq 0x10(%rax), %r8
imulq %r8, %rdi
addq (%rax), %rdi
movl 0x18(%rax), %r9d
movq 0x20(%rax), %r10
movq %rsi, %rax
imulq %rdx, %rax
movq %r15, %rcx
imulq %r8, %rcx
imulq %rax, %rcx
addq %rdi, %rcx
movq %rcx, 0x80(%rsp)
andq $0x0, 0x88(%rsp)
movq %r8, 0x90(%rsp)
movl %r9d, 0x98(%rsp)
movq %r10, 0xa0(%rsp)
movl $0x2, 0xa8(%rsp)
movl %edx, 0xac(%rsp)
movl %esi, 0xb0(%rsp)
movq %r11, 0xb4(%rsp)
movq %rax, 0xc0(%rsp)
movq (%rbp), %rdx
movq -0x18(%rdx), %rsi
movl 0xe8(%rbp,%rsi), %edi
movl %r15d, %edx
subl %edi, %edx
setns %dil
cmpl %r12d, %edx
setl %r8b
testb %r8b, %dil
je 0x2b27df
movq (%rsp), %rcx
movslq 0x2c(%rcx), %rax
movslq 0x30(%rcx), %rbp
movq 0x40(%rcx), %rdi
imulq %r14, %rdi
movq 0x10(%rcx), %r8
imulq %r8, %rdi
addq (%rcx), %rdi
movl 0x18(%rcx), %r9d
movq 0x20(%rcx), %r10
movq %rbp, %rcx
imulq %rax, %rcx
movl %edx, %edx
movq %r13, %rbx
movl %r12d, %r13d
movq %r11, %r12
movq %rcx, %r11
imulq %r8, %r11
imulq %rdx, %r11
addq %rdi, %r11
movq %r11, 0x30(%rsp)
andq $0x0, 0x38(%rsp)
movq %r8, 0x40(%rsp)
movl %r9d, 0x48(%rsp)
movq %r10, 0x50(%rsp)
movl $0x2, 0x58(%rsp)
movl %eax, 0x5c(%rsp)
movl %ebp, 0x60(%rsp)
movq 0x10(%rsp), %rbp
movq %r12, 0x64(%rsp)
movq %rcx, 0x70(%rsp)
movl 0xd0(%rbp,%rsi), %edx
movl 0xd4(%rbp,%rsi), %ecx
movl 0xd8(%rbp,%rsi), %r8d
movl 0xdc(%rbp,%rsi), %r9d
leaq 0x30(%rsp), %rdi
leaq 0x80(%rsp), %rsi
vmovaps 0xd0(%rsp), %xmm0
callq 0x2b4637
vmovdqa 0xd0(%rsp), %xmm0
movq %r12, %r11
movl %r13d, %r12d
movq %rbx, %r13
incq %r15
jmp 0x2b264c
testl %eax, %eax
movl $0x0, %edx
cmovlel %edx, %eax
subl $0x1, %eax
jb 0x2b27d7
vmovdqu %xmm0, (%rcx)
addq $0x10, %rcx
jmp 0x2b27e9
incq %r14
jmp 0x2b2603
cmpl $0x0, 0xe0(%r11)
movq (%rsp), %rdi
jne 0x2b1157
movl 0xe8(%r11), %r13d
addl %r12d, %r13d
addl 0xec(%r11), %r13d
addl %eax, %r10d
addl 0xd4(%r11), %r10d
addl 0xd8(%r11), %esi
addl 0xdc(%r11), %esi
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %r10d, %edx
movl %r13d, %ecx
movq 0x18(%rsp), %r8
pushq 0x8(%rbp)
pushq $0x10
callq 0x62a26
addq $0x10, %rsp
cmpq $0x0, (%rbx)
je 0x2b1239
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b1239
xorl %ecx, %ecx
testl %r13d, %r13d
cmovlel %ecx, %r13d
movq 0x18(%rsp), %rdx
testl %edx, %edx
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %edx
movq %rdx, 0x18(%rsp)
movabsq $0x100000001, %rbx # imm = 0x100000001
xorl %r14d, %r14d
movq 0x10(%rsp), %rbp
cmpq 0x18(%rsp), %r14
je 0x2b1239
movq (%rbp), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xf0(%rbp,%rax)
je 0x2b28e6
movq 0xf8(%rbp,%rax), %rax
movq %r14, %rcx
shlq $0x6, %rcx
vmovdqu64 (%rax,%rcx), %zmm0
jmp 0x2b28ee
vpbroadcastd 0xe4(%rbp,%rax), %zmm0
xorl %r15d, %r15d
vmovdqu64 %zmm0, 0xd0(%rsp)
cmpq %r13, %r15
je 0x2b2a9c
movq 0x8(%rsp), %rax
movslq 0x2c(%rax), %rdx
movslq 0x30(%rax), %rsi
movq 0x40(%rax), %rdi
imulq %r14, %rdi
movq 0x10(%rax), %r8
imulq %r8, %rdi
addq (%rax), %rdi
movl 0x18(%rax), %r9d
movq 0x20(%rax), %r10
movq %rsi, %rax
imulq %rdx, %rax
movq %r15, %rcx
imulq %r8, %rcx
imulq %rax, %rcx
addq %rdi, %rcx
movq %rcx, 0x80(%rsp)
andq $0x0, 0x88(%rsp)
movq %r8, 0x90(%rsp)
movl %r9d, 0x98(%rsp)
movq %r10, 0xa0(%rsp)
movl $0x2, 0xa8(%rsp)
movl %edx, 0xac(%rsp)
movl %esi, 0xb0(%rsp)
movq %rbx, 0xb4(%rsp)
movq %rax, 0xc0(%rsp)
movq (%rbp), %rdx
movq -0x18(%rdx), %rsi
movl 0xe8(%rbp,%rsi), %edi
movl %r15d, %edx
subl %edi, %edx
setns %dil
cmpl %r12d, %edx
setl %r8b
testb %r8b, %dil
je 0x2b2a81
movq (%rsp), %rcx
movslq 0x2c(%rcx), %rax
movslq 0x30(%rcx), %rbp
movq 0x40(%rcx), %rdi
imulq %r14, %rdi
movq 0x10(%rcx), %r8
imulq %r8, %rdi
addq (%rcx), %rdi
movl 0x18(%rcx), %r9d
movq 0x20(%rcx), %r10
movq %rbp, %rcx
imulq %rax, %rcx
movl %edx, %edx
movq %rcx, %r11
imulq %r8, %r11
imulq %rdx, %r11
addq %rdi, %r11
movq %r11, 0x30(%rsp)
andq $0x0, 0x38(%rsp)
movq %r8, 0x40(%rsp)
movl %r9d, 0x48(%rsp)
movq %r10, 0x50(%rsp)
movl $0x2, 0x58(%rsp)
movl %eax, 0x5c(%rsp)
movl %ebp, 0x60(%rsp)
movq 0x10(%rsp), %rbp
movq %rbx, 0x64(%rsp)
movq %rcx, 0x70(%rsp)
movl 0xd0(%rbp,%rsi), %edx
movl 0xd4(%rbp,%rsi), %ecx
movl 0xd8(%rbp,%rsi), %r8d
movl 0xdc(%rbp,%rsi), %r9d
leaq 0x30(%rsp), %rdi
leaq 0x80(%rsp), %rsi
vmovups 0xd0(%rsp), %zmm0
callq 0x2b44ec
vmovdqu64 0xd0(%rsp), %zmm0
incq %r15
jmp 0x2b28fc
testl %eax, %eax
movl $0x0, %edx
cmovlel %edx, %eax
subl $0x1, %eax
jb 0x2b2a79
vmovdqu64 %zmm0, (%rcx)
addq $0x40, %rcx
jmp 0x2b2a8b
incq %r14
jmp 0x2b28b1
leal (%r10,%rax,4), %edx
addl 0xd4(%r11), %edx
testb $0x3, %dl
sete %cl
movq %r9, %rax
shrq $0x2, %rax
addb %cl, %cl
movl %edx, %r8d
andl $0x7, %r8d
movzbl %cl, %r14d
pushq $0x3
popq %rcx
cmovnel %r14d, %ecx
shlq %cl, %rax
pushq $0x2
popq %r13
testb $0x3, %r10b
jne 0x2b1153
cmpl $0x4, %r8d
jne 0x2b1153
cmpl $0x0, 0xe0(%r11)
movq (%rsp), %rdi
jne 0x2b1157
addl 0xd8(%r11), %esi
addl 0xdc(%r11), %esi
sarl $0x2, %edx
movq 0x8(%rbp), %r9
pushq $0x4
popq %rbp
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movq %rax, %rcx
movl %ebp, %r8d
callq 0x627de
cmpq $0x0, (%rbx)
je 0x2b1239
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b1239
movq 0x10(%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rsi
vpbroadcastd 0xe4(%rdi,%rsi), %xmm0
movl 0xd0(%rdi,%rsi), %eax
movl 0xd4(%rdi,%rsi), %ecx
cltd
idivl %ebp
movl %eax, %r10d
movl %ecx, %eax
cltd
idivl %ebp
movl 0xd8(%rdi,%rsi), %r8d
movl 0xdc(%rdi,%rsi), %r9d
movq (%rsp), %rdi
movq 0x8(%rsp), %rsi
movl %r10d, %edx
movl %eax, %ecx
callq 0x2b4637
jmp 0x2b3510
shll $0x4, %eax
addl %r10d, %eax
addl 0xd4(%r11), %eax
testb $0xf, %al
je 0x2b2bd5
testb $0x7, %al
je 0x2b343b
xorl %ecx, %ecx
testb $0x3, %al
sete %cl
leal (%rcx,%rcx,2), %ecx
incl %ecx
jmp 0x2b343e
pushq $0x10
jmp 0x2b2bde
pushq $0x10
jmp 0x2b3373
pushq $0x10
jmp 0x2b343d
pushq $0x8
popq %r8
pushq $0x3
popq %r13
testb $0xf, %r14b
jne 0x2b1153
cmpl $0x10, %r8d
jne 0x2b1153
addl 0xd8(%r11), %esi
addl 0xdc(%r11), %esi
addl %eax, %r10d
addl 0xd4(%r11), %r10d
cmpl %edx, %ecx
movq (%rsp), %rdi
je 0x2b2c26
cmpl $0x0, 0xe0(%r11)
jne 0x2b1157
shrq $0x4, %r9
movl %r8d, %r8d
imulq %r9, %r8
pushq $0x10
popq %r9
movl %ecx, %eax
cltd
idivl %r9d
subq $0x8, %rsp
movq 0x10(%rsp), %rbx
movq %rbx, %rdi
movl %r10d, %edx
movq %rax, %r14
movl %eax, %ecx
pushq 0x8(%rbp)
callq 0x628f2
addq $0x10, %rsp
cmpq $0x0, (%rbx)
je 0x2b1239
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b1239
movq 0x10(%rsp), %r12
movq (%r12), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r12,%rax), %eax
pushq $-0x10
popq %rcx
cltd
idivl %ecx
movl %eax, %edx
xorl %ecx, %ecx
testl %r14d, %r14d
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %r14d
movq %r14, 0xc8(%rsp)
movl %edx, 0x7c(%rsp)
movl %edx, %r11d
xorl %r9d, %r9d
cmpq 0xc8(%rsp), %r9
je 0x2b1239
movq 0x8(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rcx
movq 0x40(%r10), %r13
imulq %r9, %r13
movq 0x10(%r10), %rdi
imulq %rdi, %r13
addq (%r10), %r13
movl 0x34(%r10), %esi
movl 0x18(%r10), %edx
movq 0x20(%r10), %r8
movq %r13, 0x80(%rsp)
andq $0x0, 0x88(%rsp)
movq %rdi, 0x90(%rsp)
movl %edx, 0x98(%rsp)
movq %r8, 0xa0(%rsp)
movl %eax, 0xac(%rsp)
movl %ecx, 0xb0(%rsp)
movl $0x1, 0xb4(%rsp)
movl %esi, 0xb8(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0xc0(%rsp)
movl 0x28(%r10), %edx
leal -0x1(%rdx), %edi
movl %edi, 0xa8(%rsp)
cmpl $0x4, %edx
jne 0x2b2d7e
movq %rcx, 0xc0(%rsp)
movq %rcx, %rax
movq (%r12), %rcx
movq -0x18(%rcx), %rdi
cmpl $0x0, 0xf0(%r12,%rdi)
je 0x2b2da9
movq 0xf8(%r12,%rdi), %rdx
movq %r9, %r8
shlq $0x6, %r8
vmovdqu64 (%rdx,%r8), %zmm0
jmp 0x2b2db1
vpbroadcastd 0xe4(%r12,%rdi), %zmm0
movl %r11d, %r11d
movl 0x7c(%rsp), %edx
movq %r9, 0xd0(%rsp)
addl %r9d, %edx
setns %r8b
cmpl 0x18(%rsp), %edx
setl %r9b
testb %r9b, %r8b
je 0x2b2f74
movq (%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rsi
movl 0x34(%r10), %r8d
movq (%r10), %r12
movq 0x10(%r10), %r15
movq 0x40(%r10), %rbx
movl %edx, %ebp
imulq %rbx, %rbp
imulq %r15, %rbp
addq %r12, %rbp
movl 0x18(%r10), %edx
movq 0x20(%r10), %r9
movq %rbp, 0x30(%rsp)
andq $0x0, 0x38(%rsp)
movq %r15, 0x40(%rsp)
movl %edx, 0x48(%rsp)
movq %r9, 0x50(%rsp)
movl %eax, 0x5c(%rsp)
movl %esi, 0x60(%rsp)
movl $0x1, 0x64(%rsp)
movl %r8d, 0x68(%rsp)
imulq %rax, %rsi
movq %r15, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r15
movq %rax, 0x70(%rsp)
movl 0x28(%r10), %eax
leal -0x1(%rax), %edx
movl %edx, 0x58(%rsp)
cmpl $0x4, %eax
jne 0x2b2e68
movq %rsi, 0x70(%rsp)
movq 0x10(%rsp), %r14
movl 0xe0(%r14,%rdi), %eax
testl %eax, %eax
movq %r11, 0x28(%rsp)
jne 0x2b2ec4
movl 0xd0(%r14,%rdi), %edx
movl 0xd4(%r14,%rdi), %ecx
movl 0xd8(%r14,%rdi), %r8d
movl 0xdc(%r14,%rdi), %r9d
leaq 0x30(%rsp), %rdi
leaq 0x80(%rsp), %rsi
callq 0x2b44ec
movq 0x28(%rsp), %r11
movq (%r14), %rcx
movq -0x18(%rcx), %rdi
movl 0xe0(%r14,%rdi), %eax
cmpl $0x1, %eax
jne 0x2b2f9f
movq 0x10(%rsp), %rsi
movl 0xd8(%rsi,%rdi), %eax
xorl %r8d, %r8d
testl %eax, %eax
cmovlel %r8d, %eax
movl 0xdc(%rsi,%rdi), %edx
testl %edx, %edx
cmovlel %r8d, %edx
movl 0xd0(%rsi,%rdi), %r9d
testl %r9d, %r9d
cmovlel %r8d, %r9d
movl 0xd4(%rsi,%rdi), %esi
imulq %r15, %rbx
imulq %r11, %rbx
addq %rbx, %r12
cmpl %r9d, %r8d
je 0x2b2fa9
vmovdqa64 (%rbp), %zmm0
movl %eax, %edi
subl $0x1, %edi
jb 0x2b2f34
vmovdqa64 %zmm0, (%r13)
addq $0x40, %r13
jmp 0x2b2f22
xorl %edi, %edi
xorl %r10d, %r10d
cmpl 0x5c(%rsp), %r10d
jge 0x2b2f58
vmovdqa64 (%r12,%rdi), %zmm0
vmovdqa64 %zmm0, (%r13,%rdi)
incl %r10d
addq $0x40, %rdi
jmp 0x2b2f39
addq %rdi, %r13
movl %edx, %edi
subl $0x1, %edi
jb 0x2b2f6f
vmovdqa64 %zmm0, (%r13)
addq $0x40, %r13
jmp 0x2b2f5d
incl %r8d
jmp 0x2b2f10
imull %eax, %esi
testl %esi, %esi
movl $0x0, %eax
cmovlel %eax, %esi
movq 0xd0(%rsp), %r9
subl $0x1, %esi
jb 0x2b3366
vmovdqu64 %zmm0, (%r13)
addq $0x40, %r13
jmp 0x2b2f89
movq 0x10(%rsp), %r12
jmp 0x2b309a
xorl %edi, %edi
cmpl 0x60(%rsp), %edi
jge 0x2b300d
vmovdqa64 (%rbp), %zmm0
movl %eax, %r8d
subl $0x1, %r8d
jb 0x2b2fce
vmovdqa64 %zmm0, (%r13)
addq $0x40, %r13
jmp 0x2b2fbb
xorl %r8d, %r8d
cmpl 0x5c(%rsp), %r8d
jge 0x2b2ff3
vmovdqa64 (%rbp), %zmm0
vmovdqa64 %zmm0, (%r13)
addq $0x40, %rbp
addq $0x40, %r13
incl %r8d
jmp 0x2b2fd1
movl %edx, %r8d
subl $0x1, %r8d
jb 0x2b3009
vmovdqa64 %zmm0, (%r13)
addq $0x40, %r13
jmp 0x2b2ff6
incl %edi
jmp 0x2b2fab
movl 0x5c(%rsp), %edi
shll $0x4, %edi
movslq %edi, %rdi
shlq $0x2, %rdi
subq %rdi, %rbp
xorl %edi, %edi
testl %esi, %esi
cmovlel %edi, %esi
cmpl %esi, %edi
je 0x2b3089
vmovdqa64 (%rbp), %zmm0
movl %eax, %r8d
subl $0x1, %r8d
jb 0x2b3046
vmovdqa64 %zmm0, (%r13)
addq $0x40, %r13
jmp 0x2b3033
xorl %r8d, %r8d
xorl %r9d, %r9d
cmpl 0x5c(%rsp), %r9d
jge 0x2b306c
vmovdqa64 (%rbp,%r8), %zmm0
vmovdqa64 %zmm0, (%r13,%r8)
incl %r9d
addq $0x40, %r8
jmp 0x2b304c
addq %r8, %r13
movl %edx, %r8d
subl $0x1, %r8d
jb 0x2b3085
vmovdqa64 %zmm0, (%r13)
addq $0x40, %r13
jmp 0x2b3072
incl %edi
jmp 0x2b3025
movq -0x18(%rcx), %rdi
movq 0x10(%rsp), %r12
movl 0xe0(%r12,%rdi), %eax
cmpl $0x2, %eax
pushq $-0x80
popq %r13
jne 0x2b32ce
movl 0xd0(%r12,%rdi), %r9d
movslq 0xd8(%r12,%rdi), %rax
movl 0x5c(%rsp), %ecx
imull %r9d, %ecx
shll $0x4, %ecx
movslq %ecx, %rcx
shlq $0x2, %rcx
addq 0x30(%rsp), %rcx
xorl %r10d, %r10d
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movl 0xdc(%r12,%rdi), %esi
testl %esi, %esi
cmovlel %r10d, %esi
movl 0xd4(%r12,%rdi), %edi
testl %r9d, %r9d
cmovlel %r10d, %r9d
movq 0x80(%rsp), %r8
shlq $0x6, %rax
shlq $0x6, %rdx
shlq $0x6, %rsi
cmpl %r9d, %r10d
je 0x2b31a2
leaq (%rcx,%rax), %r11
xorl %r15d, %r15d
xorl %r14d, %r14d
movq %rdx, %r12
addq %r14, %r12
je 0x2b313f
vmovdqa64 (%r11,%r14), %zmm0
vmovdqa64 %zmm0, (%r8,%r15)
addq $-0x40, %r14
addq $0x40, %r15
jmp 0x2b311f
subq %r14, %r8
xorl %ebp, %ebp
movq %rcx, %r11
cmpl 0x5c(%rsp), %ebp
jge 0x2b3165
vmovdqa64 (%r11), %zmm0
vmovdqa64 %zmm0, (%r8)
addq $0x40, %r11
addq $0x40, %r8
incl %ebp
jmp 0x2b3147
movq %r13, %r14
movq 0x10(%rsp), %r12
leaq (%rsi,%r14), %r15
cmpq $-0x80, %r15
je 0x2b318e
vmovdqa64 (%r11,%r14), %zmm0
vmovdqa64 %zmm0, (%r8)
addq $0x40, %r8
addq $-0x40, %r14
jmp 0x2b316d
movslq 0x5c(%rsp), %r11
shlq $0x6, %r11
subq %r11, %rcx
incl %r10d
jmp 0x2b310c
xorl %r9d, %r9d
cmpl 0x60(%rsp), %r9d
jge 0x2b3225
leaq (%rcx,%rax), %r10
xorl %r14d, %r14d
xorl %r11d, %r11d
movq %rdx, %r15
addq %r11, %r15
je 0x2b31d6
vmovdqa64 (%r10,%r11), %zmm0
vmovdqa64 %zmm0, (%r8,%r14)
addq $-0x40, %r11
addq $0x40, %r14
jmp 0x2b31b6
subq %r11, %r8
xorl %r10d, %r10d
cmpl 0x5c(%rsp), %r10d
jge 0x2b31fc
vmovdqa64 (%rcx), %zmm0
vmovdqa64 %zmm0, (%r8)
addq $0x40, %rcx
addq $0x40, %r8
incl %r10d
jmp 0x2b31dc
movq %r13, %r10
leaq (%rsi,%r10), %r11
cmpq $-0x80, %r11
je 0x2b3220
vmovdqa64 (%rcx,%r10), %zmm0
vmovdqa64 %zmm0, (%r8)
addq $0x40, %r8
addq $-0x40, %r10
jmp 0x2b31ff
incl %r9d
jmp 0x2b31a5
movslq 0x5c(%rsp), %r9
shlq $0x7, %r9
subq %r9, %rcx
xorl %r9d, %r9d
testl %edi, %edi
cmovlel %r9d, %edi
cmpl %edi, %r9d
je 0x2b32ce
leaq (%rcx,%rax), %r10
xorl %r14d, %r14d
xorl %r11d, %r11d
movq %rdx, %r15
addq %r11, %r15
je 0x2b326d
vmovdqa64 (%r10,%r11), %zmm0
vmovdqa64 %zmm0, (%r8,%r14)
addq $-0x40, %r11
addq $0x40, %r14
jmp 0x2b324d
subq %r11, %r8
xorl %r11d, %r11d
movq %rcx, %r10
cmpl 0x5c(%rsp), %r11d
jge 0x2b3296
vmovdqa64 (%r10), %zmm0
vmovdqa64 %zmm0, (%r8)
addq $0x40, %r10
addq $0x40, %r8
incl %r11d
jmp 0x2b3276
movq %r13, %r11
leaq (%rsi,%r11), %r14
cmpq $-0x80, %r14
je 0x2b32ba
vmovdqa64 (%r10,%r11), %zmm0
vmovdqa64 %zmm0, (%r8)
addq $0x40, %r8
addq $-0x40, %r11
jmp 0x2b3299
movslq 0x5c(%rsp), %r10
shlq $0x6, %r10
subq %r10, %rcx
incl %r9d
jmp 0x2b323a
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x2b3302
lock
decl (%rax)
jne 0x2b3302
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x2b32f7
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x2b3302
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
movq 0x88(%rsp), %rax
testq %rax, %rax
movq 0x28(%rsp), %r11
movq 0xd0(%rsp), %r9
je 0x2b3366
lock
decl (%rax)
jne 0x2b3366
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x2b334e
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
movq 0x28(%rsp), %r11
movq 0xd0(%rsp), %r9
jmp 0x2b3366
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
movq 0x28(%rsp), %r11
movq 0xd0(%rsp), %r9
incq %r9
incl %r11d
jmp 0x2b2cbe
pushq $0x8
popq %rcx
pushq $0x1
popq %r13
testb $0xf, %al
jne 0x2b1153
cmpl $0x10, %ecx
jne 0x2b1153
cmpl $0x0, 0xe0(%r11)
movq (%rsp), %rdi
jne 0x2b1157
shrq $0x4, %r9
movl %ecx, %ecx
imulq %r9, %rcx
pushq $0x10
popq %r12
movl %esi, %eax
cltd
idivl %r12d
movq 0x8(%rbp), %r8
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %eax, %esi
movq %rcx, %rdx
movl %r12d, %ecx
callq 0x626da
cmpq $0x0, (%rbx)
je 0x2b1239
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b1239
movq 0x10(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rcx
vpbroadcastd 0xe4(%rdx,%rcx), %zmm0
movl 0xd8(%rdx,%rcx), %eax
movl 0xdc(%rdx,%rcx), %ecx
cltd
idivl %r12d
movl %eax, %r8d
movl %ecx, %eax
cltd
idivl %r12d
movq $0x0, 0x20(%rsp)
movq (%rsp), %rdi
movq 0x8(%rsp), %rsi
xorl %edx, %edx
xorl %ecx, %ecx
movl %eax, %r9d
callq 0x2b44ec
jmp 0x2b1239
pushq $0x8
popq %rcx
pushq $0x2
popq %r13
testb $0xf, %r10b
jne 0x2b1153
cmpl $0x10, %ecx
jne 0x2b1153
cmpl $0x0, 0xe0(%r11)
movq (%rsp), %rdi
jne 0x2b1157
addl 0xd8(%r11), %esi
addl 0xdc(%r11), %esi
shrq $0x4, %r9
movl %ecx, %ecx
imulq %r9, %rcx
pushq $0x10
popq %r12
cltd
idivl %r12d
movq 0x8(%rbp), %r9
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %eax, %edx
movl %r12d, %r8d
callq 0x627de
cmpq $0x0, (%rbx)
je 0x2b1239
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b1239
movq 0x10(%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rsi
vpbroadcastd 0xe4(%rdi,%rsi), %zmm0
movl 0xd0(%rdi,%rsi), %eax
movl 0xd4(%rdi,%rsi), %ecx
cltd
idivl %r12d
movl %eax, %r10d
movl %ecx, %eax
cltd
idivl %r12d
movl 0xd8(%rdi,%rsi), %r8d
movl 0xdc(%rdi,%rsi), %r9d
movq (%rsp), %rdi
movq 0x8(%rsp), %rsi
movl %r10d, %edx
movl %eax, %ecx
callq 0x2b44ec
movq $0x0, 0x20(%rsp)
jmp 0x2b1239
movq $0x0, 0x20(%rsp)
cmpq %r14, 0x8(%rsp)
je 0x2b1239
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x2b353e
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x2b3572
lock
decl (%rax)
jne 0x2b3572
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x2b356a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2b3572
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rcx
andq $0x0, 0x40(%rcx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%rcx)
vmovups %xmm0, 0xc(%rcx)
andl $0x0, 0x38(%rcx)
vmovups %xmm0, 0x28(%rcx)
vmovups (%r14), %xmm0
vmovups %xmm0, (%rcx)
movq 0x10(%r14), %rax
movq %rax, 0x10(%rcx)
movl 0x18(%r14), %eax
movl %eax, 0x18(%rcx)
movq 0x20(%r14), %rax
movq %rax, 0x20(%rcx)
vmovdqu 0x28(%r14), %xmm0
vmovdqu %xmm0, 0x28(%rcx)
movl 0x38(%r14), %eax
movl %eax, 0x38(%rcx)
movq 0x40(%r14), %rax
movq %rax, 0x40(%rcx)
jmp 0x2b1239
jmp 0x2b3623
jmp 0x2b3623
jmp 0x2b3623
jmp 0x2b3623
jmp 0x2b3623
jmp 0x2b3623
jmp 0x2b3623
jmp 0x2b35e1
movq %rax, %rbx
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x2b361b
lock
decl (%rax)
jne 0x2b361b
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x2b3615
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2b361b
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/padding_x86_avx512.cpp |
ncnn::Padding_x86_avx512::forward_int8(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int Padding_x86_avx512::forward_int8(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
#if __SSE2__
if (elempack == 8)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
int out_elempack = outw % 8 == 0 ? 8 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int64_t v8 = (int64_t)value;
int64_t pad_value = v8 | (v8 << 8) | (v8 << 16) | (v8 << 24) | (v8 << 32) | (v8 << 40) | (v8 << 48) | (v8 << 56);
padding_constant_pack8_int8_sse(bottom_blob, top_blob, 0, 0, left / 8, right / 8, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
int out_elempack = outh % 8 == 0 ? 8 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int64_t v8 = (int64_t)value;
int64_t pad_value = v8 | (v8 << 8) | (v8 << 16) | (v8 << 24) | (v8 << 32) | (v8 << 40) | (v8 << 48) | (v8 << 56);
padding_constant_pack8_int8_sse(bottom_blob, top_blob, top / 8, bottom / 8, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
int out_elempack = outc % 8 == 0 ? 8 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 8 == 0 && out_elempack == 8 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
// TODO perchannel
// int64_t pad_value = per_channel_pad_data_size ? vld1_s8(per_channel_pad_data + q * 8) : vdup_n_s8((signed char)value);
int64_t v8 = (int64_t)value;
int64_t pad_value = v8 | (v8 << 8) | (v8 << 16) | (v8 << 24) | (v8 << 32) | (v8 << 40) | (v8 << 48) | (v8 << 56);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill<int64_t>(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack8_int8_sse(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack8_int8_sse(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack8_int8_sse(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
// TODO perchannel
// int64_t pad_value = per_channel_pad_data_size ? vld1_s8(per_channel_pad_data + q * 8) : vdup_n_s8((signed char)value);
int64_t v8 = (int64_t)value;
int64_t pad_value = v8 | (v8 << 8) | (v8 << 16) | (v8 << 24) | (v8 << 32) | (v8 << 40) | (v8 << 48) | (v8 << 56);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill<int64_t>(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack8_int8_sse(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
return Padding::forward(bottom_blob_unpacked, top_blob, opt);
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x128, %rsp # imm = 0x128
movq %rcx, %r12
movq %rdx, %rbx
movq %rdi, %r14
movl 0x38(%rsi), %r15d
movq 0x10(%rsi), %r9
movl 0x18(%rsi), %edi
movq %rsi, 0x28(%rsp)
vmovdqu 0x28(%rsi), %xmm0
cmpl $0x8, %edi
jne 0x2b42dc
vmovd %xmm0, %eax
decl %eax
cmpl $0x3, %eax
ja 0x2b42dc
leaq 0x14512e(%rip), %rcx # 0x3f87a8
movslq (%rcx,%rax,4), %rax
vpextrd $0x1, %xmm0, %esi
addq %rcx, %rax
vpextrd $0x2, %xmm0, %edx
pushq $-0x64
popq %rcx
movq %rcx, 0x20(%rsp)
movq %r14, 0x18(%rsp)
jmpq *%rax
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd8(%r14,%rax), %r8d
leal (%r8,%rsi,8), %esi
addl 0xdc(%r14,%rax), %esi
xorl %r10d, %r10d
testb $0x7, %sil
sete %r10b
movq %r9, %rdx
shrq $0x3, %rdx
leal (%r10,%r10,2), %ecx
shlq %cl, %rdx
testb $0x7, %r8b
sete %cl
andb %cl, %r10b
cmpb $0x1, %r10b
jne 0x2b42dc
cmpl $0x0, 0xe0(%r14,%rax)
jne 0x2b42dc
sarl $0x3, %esi
movq 0x8(%r12), %r8
pushq $0x8
popq %rbp
movq %rbx, %rdi
movl %ebp, %ecx
callq 0x626da
cmpq $0x0, (%rbx)
je 0x2b43a8
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2b43a8
movq 0x18(%rsp), %r8
movq (%r8), %rax
movq -0x18(%rax), %rcx
vcvttss2si 0xe4(%r8,%rcx), %rax
movq %rax, %rdx
shlq $0x8, %rdx
movq %rax, %rsi
shlq $0x10, %rsi
orq %rdx, %rsi
movq %rax, %rdx
shlq $0x18, %rdx
movq %rax, %rdi
shlq $0x20, %rdi
orq %rdx, %rdi
orq %rsi, %rdi
movq %rax, %rdx
shlq $0x28, %rdx
movq %rax, %rsi
shlq $0x30, %rsi
orq %rdx, %rsi
movq %rax, %r9
shlq $0x38, %r9
orq %rsi, %r9
orq %rdi, %r9
orq %rax, %r9
movl 0xd8(%r8,%rcx), %eax
movl 0xdc(%r8,%rcx), %ecx
cltd
idivl %ebp
movl %eax, %r8d
movl %ecx, %eax
cltd
idivl %ebp
movq %r9, (%rsp)
movq $0x0, 0x20(%rsp)
movq 0x28(%rsp), %rdi
movq %rbx, %rsi
xorl %edx, %edx
xorl %ecx, %ecx
movl %eax, %r9d
callq 0x2b46e0
jmp 0x2b43a8
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r14,%rax), %r10d
leal (%r10,%r15,8), %r13d
addl 0xec(%r14,%rax), %r13d
xorl %r11d, %r11d
testb $0x7, %r13b
sete %r11b
movq %r9, %r8
shrq $0x3, %r8
leal (%r11,%r11,2), %ecx
shlq %cl, %r8
testb $0x7, %r10b
sete %cl
andb %cl, %r11b
cmpb $0x1, %r11b
jne 0x2b42dc
addl 0xd8(%r14,%rax), %esi
addl 0xdc(%r14,%rax), %esi
addl 0xd0(%r14,%rax), %edx
addl 0xd4(%r14,%rax), %edx
leal (,%r15,8), %ecx
cmpl %ecx, %r13d
je 0x2b3845
cmpl $0x0, 0xe0(%r14,%rax)
jne 0x2b42dc
sarl $0x3, %r13d
movq 0x8(%r12), %rax
movq %rax, (%rsp)
pushq $0x8
popq %r9
movq %rbx, %rdi
movl %r13d, %ecx
callq 0x628f2
cmpq $0x0, (%rbx)
je 0x2b43a8
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2b43a8
movq 0x18(%rsp), %rcx
movq (%rcx), %rax
movq %rax, 0x98(%rsp)
movq -0x18(%rax), %rax
movl 0xe8(%rcx,%rax), %eax
pushq $-0x8
popq %rcx
cltd
idivl %ecx
movl %eax, %r12d
xorl %ecx, %ecx
testl %r13d, %r13d
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %r13d
movl %r12d, %r10d
xorl %ecx, %ecx
movq %r15, 0xa0(%rsp)
movq %r13, 0xc0(%rsp)
movl %r12d, 0xac(%rsp)
cmpq %r13, %rcx
je 0x2b43a8
movslq 0x2c(%rbx), %rax
movslq 0x30(%rbx), %r9
movl 0x34(%rbx), %esi
movq (%rbx), %rdi
movq 0x10(%rbx), %r11
movq 0x40(%rbx), %r8
movq %r8, %rbp
movq %rcx, 0x48(%rsp)
imulq %rcx, %rbp
imulq %r11, %rbp
addq %rdi, %rbp
movl 0x18(%rbx), %ecx
movq 0x20(%rbx), %rdx
movq %rbp, 0x50(%rsp)
andq $0x0, 0x58(%rsp)
movq %r11, 0x60(%rsp)
movl %ecx, 0x68(%rsp)
movq %rdx, 0x70(%rsp)
movl %eax, 0x7c(%rsp)
movl %r9d, 0x80(%rsp)
movl $0x1, 0x84(%rsp)
movl %esi, 0x88(%rsp)
imulq %rax, %r9
movq %r11, %rax
imulq %r9, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r11
movq %rax, 0x90(%rsp)
movl 0x28(%rbx), %ecx
leal -0x1(%rcx), %edx
movl %edx, 0x78(%rsp)
cmpl $0x4, %ecx
jne 0x2b397b
movq %r9, 0x90(%rsp)
movq %r9, %rax
movq 0x98(%rsp), %rcx
movq -0x18(%rcx), %r9
movq 0x18(%rsp), %rcx
vcvttss2si 0xe4(%rcx,%r9), %rcx
movl %r10d, %r10d
movq %r10, 0x30(%rsp)
movq %rcx, %rdx
shlq $0x8, %rdx
movq %rcx, %r10
shlq $0x10, %r10
orq %rdx, %r10
movq %rcx, %rdx
shlq $0x18, %rdx
movq %rcx, %r14
shlq $0x20, %r14
orq %rdx, %r14
orq %r10, %r14
movq %rcx, %rdx
shlq $0x28, %rdx
movq %rcx, %r15
shlq $0x30, %r15
orq %rdx, %r15
movq %rcx, %r10
shlq $0x38, %r10
orq %r15, %r10
orq %r14, %r10
orq %rcx, %r10
movl %r12d, %ecx
movl %r12d, %r13d
movq 0x48(%rsp), %r12
addl %r12d, %ecx
setns %dl
cmpl 0xa0(%rsp), %ecx
setl %r14b
testb %r14b, %dl
je 0x2b3bfc
movq 0x28(%rsp), %rsi
movslq 0x2c(%rsi), %rdi
movslq 0x30(%rsi), %r8
movl 0x34(%rsi), %eax
movq (%rsi), %r13
movq 0x10(%rsi), %r11
movq 0x40(%rsi), %rdx
movl %ecx, %r12d
movq %rdx, 0x38(%rsp)
imulq %rdx, %r12
imulq %r11, %r12
addq %r13, %r12
movl 0x18(%rsi), %ecx
movq 0x20(%rsi), %rdx
movq %r12, 0xd0(%rsp)
andq $0x0, 0xd8(%rsp)
movq %r11, 0xe0(%rsp)
movl %ecx, 0xe8(%rsp)
movq %rdx, 0xf0(%rsp)
movl %edi, 0xfc(%rsp)
movl %r8d, 0x100(%rsp)
movl $0x1, 0x104(%rsp)
movl %eax, 0x108(%rsp)
movq %r8, 0xb8(%rsp)
movq %r8, %rcx
movq %rdi, 0x40(%rsp)
imulq %rdi, %rcx
movq %r11, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
movq %r11, 0xb0(%rsp)
divq %r11
movq %rax, 0x110(%rsp)
movl 0x28(%rsi), %eax
leal -0x1(%rax), %edx
movl %edx, 0xf8(%rsp)
cmpl $0x4, %eax
jne 0x2b3ad9
movq %rcx, 0x110(%rsp)
movq 0x18(%rsp), %r14
movl 0xe0(%r14,%r9), %eax
testl %eax, %eax
jne 0x2b3b37
movl 0xd0(%r14,%r9), %edx
movl 0xd4(%r14,%r9), %ecx
movl 0xd8(%r14,%r9), %r8d
movl 0xdc(%r14,%r9), %r9d
movq %r10, (%rsp)
leaq 0xd0(%rsp), %rdi
leaq 0x50(%rsp), %rsi
callq 0x2b46e0
movq (%r14), %rax
movq %rax, 0x98(%rsp)
movq -0x18(%rax), %r9
movl 0xe0(%r14,%r9), %eax
cmpl $0x1, %eax
jne 0x2b3d4c
movq 0x18(%rsp), %rax
movl 0xd0(%rax,%r9), %r8d
movl 0xd4(%rax,%r9), %ecx
movl %ecx, 0xa8(%rsp)
movl 0xd8(%rax,%r9), %edx
xorl %r10d, %r10d
testl %edx, %edx
cmovlel %r10d, %edx
movq 0x40(%rsp), %rcx
testl %ecx, %ecx
movl $0x0, %r11d
cmovgl %ecx, %r11d
movl 0xdc(%rax,%r9), %esi
testl %esi, %esi
cmovlel %r10d, %esi
testl %r8d, %r8d
cmovlel %r10d, %r8d
movq 0x38(%rsp), %r9
imulq 0xb0(%rsp), %r9
imulq 0x30(%rsp), %r9
addq %r13, %r9
movq %rbp, %rdi
cmpl %r8d, %r10d
je 0x2b3c36
movl %edx, %ecx
subl $0x1, %ecx
jb 0x2b3bca
movq (%r12), %r14
movq %r14, (%rdi)
addq $0x8, %rdi
jmp 0x2b3bb8
xorl %r14d, %r14d
cmpl %r14d, %r11d
je 0x2b3be2
movq (%r9,%r14,8), %rcx
movq %rcx, (%rdi)
addq $0x8, %rdi
incq %r14
jmp 0x2b3bcd
movl %esi, %ecx
subl $0x1, %ecx
jb 0x2b3bf7
movq -0x8(%r9,%r14,8), %r15
movq %r15, (%rdi)
addq $0x8, %rdi
jmp 0x2b3be4
incl %r10d
jmp 0x2b3bad
imull %eax, %esi
testl %esi, %esi
movl $0x0, %eax
cmovlel %eax, %esi
imulq %r11, %r8
imulq %r12, %r8
addq %r8, %rdi
xorl %eax, %eax
movq %r12, %rcx
movl %r13d, %r12d
movq 0xc0(%rsp), %r13
cmpq %rax, %rsi
je 0x2b3f6a
movq %r10, (%rdi,%rax,8)
incq %rax
jmp 0x2b3c24
movslq 0xfc(%rsp), %rax
xorl %r11d, %r11d
testl %eax, %eax
movl $0x0, %r8d
movq %rax, 0x120(%rsp)
cmovgl %eax, %r8d
movq 0xb8(%rsp), %rax
testl %eax, %eax
movl $0x0, %r14d
cmovgl %eax, %r14d
movq %r12, %r9
cmpl %r14d, %r11d
je 0x2b3cc8
movl %edx, %ecx
subl $0x1, %ecx
jb 0x2b3c84
movq (%r9), %r10
movq %r10, (%rdi)
addq $0x8, %rdi
jmp 0x2b3c73
xorl %r15d, %r15d
xorl %r10d, %r10d
cmpl %r15d, %r8d
je 0x2b3ca0
movq (%r9,%r15,8), %rcx
movq %rcx, (%rdi,%r15,8)
addq $-0x8, %r10
incq %r15
jmp 0x2b3c8a
movq %r9, %rcx
subq %r10, %rcx
subq %r10, %rdi
movl %esi, %r10d
subl $0x1, %r10d
jb 0x2b3cc0
movq -0x8(%r9,%r15,8), %rax
movq %rax, (%rdi)
addq $0x8, %rdi
jmp 0x2b3cac
incl %r11d
movq %rcx, %r9
jmp 0x2b3c6c
movq 0x120(%rsp), %rax
shlq $0x3, %rax
subq %rax, %r9
xorl %r10d, %r10d
movl 0xa8(%rsp), %r14d
testl %r14d, %r14d
cmovlel %r10d, %r14d
cmpl %r14d, %r10d
je 0x2b3d33
movl %edx, %ecx
subl $0x1, %ecx
jb 0x2b3d01
movq (%r9), %rax
movq %rax, (%rdi)
addq $0x8, %rdi
jmp 0x2b3cf0
xorl %r11d, %r11d
cmpl %r11d, %r8d
je 0x2b3d19
movq (%r9,%r11,8), %rax
movq %rax, (%rdi)
addq $0x8, %rdi
incq %r11
jmp 0x2b3d04
movl %esi, %ecx
subl $0x1, %ecx
jb 0x2b3d2e
movq -0x8(%r9,%r11,8), %rax
movq %rax, (%rdi)
addq $0x8, %rdi
jmp 0x2b3d1b
incl %r10d
jmp 0x2b3ce9
movq 0x98(%rsp), %rax
movq -0x18(%rax), %r9
movq 0x18(%rsp), %rax
movl 0xe0(%rax,%r9), %eax
cmpl $0x2, %eax
jne 0x2b3f55
movq 0x18(%rsp), %rcx
movl 0xd0(%rcx,%r9), %r14d
movslq 0xd8(%rcx,%r9), %rax
movq %rcx, %r8
movl %r14d, %r11d
movq 0x40(%rsp), %rcx
imull %ecx, %r11d
movq %rcx, %rdx
negq %rdx
xorl %r15d, %r15d
testl %eax, %eax
movl $0x0, %esi
cmovgl %eax, %esi
testl %ecx, %ecx
movl $0x0, %edi
cmovgl %ecx, %edi
movl 0xdc(%r8,%r9), %r8d
testl %r8d, %r8d
cmovlel %r15d, %r8d
testl %r14d, %r14d
cmovlel %r15d, %r14d
movq 0x38(%rsp), %r10
imulq 0xb0(%rsp), %r10
movslq %r11d, %rcx
leaq (%r12,%rcx,8), %r11
imulq 0x30(%rsp), %r10
leaq (%r10,%rax,8), %r10
leaq (%r10,%rcx,8), %r10
movq 0x18(%rsp), %r12
movl 0xd4(%r12,%r9), %r9d
movl %r9d, 0x38(%rsp)
shlq $0x3, %rax
addq %r10, %r13
movq 0x40(%rsp), %rcx
leaq (,%rcx,8), %r10
negq %r10
negq %rsi
negq %r8
cmpl %r14d, %r15d
je 0x2b3e5e
xorl %ecx, %ecx
cmpq %rcx, %rsi
je 0x2b3e1c
movq (%r13,%rcx,8), %r12
movq %r12, (%rbp)
addq $0x8, %rbp
decq %rcx
jmp 0x2b3e05
xorl %ecx, %ecx
movq %r11, %r12
cmpl %edi, %ecx
je 0x2b3e39
movq (%r12), %r9
addq $0x8, %r12
movq %r9, (%rbp)
addq $0x8, %rbp
incl %ecx
jmp 0x2b3e21
xorl %ecx, %ecx
cmpq %rcx, %r8
je 0x2b3e52
movq -0x10(%r12,%rcx,8), %r9
movq %r9, (%rbp)
addq $0x8, %rbp
decq %rcx
jmp 0x2b3e3b
leaq (%r11,%rdx,8), %r11
incl %r15d
addq %r10, %r13
jmp 0x2b3dfe
xorl %r14d, %r14d
movq 0xb8(%rsp), %rcx
testl %ecx, %ecx
movl $0x0, %r15d
cmovgl %ecx, %r15d
movl 0x38(%rsp), %r13d
cmpl %r15d, %r14d
je 0x2b3ed3
leaq (%r11,%rax), %rcx
xorl %r12d, %r12d
cmpq %r12, %rsi
je 0x2b3e9c
movq (%rcx,%r12,8), %r9
movq %r9, (%rbp)
addq $0x8, %rbp
decq %r12
jmp 0x2b3e86
xorl %ecx, %ecx
cmpl %edi, %ecx
je 0x2b3eb5
movq (%r11), %r9
addq $0x8, %r11
movq %r9, (%rbp)
addq $0x8, %rbp
incl %ecx
jmp 0x2b3e9e
xorl %ecx, %ecx
cmpq %rcx, %r8
je 0x2b3ece
movq -0x10(%r11,%rcx,8), %r9
movq %r9, (%rbp)
addq $0x8, %rbp
decq %rcx
jmp 0x2b3eb7
incl %r14d
jmp 0x2b3e7a
movq 0x40(%rsp), %rcx
addl %ecx, %ecx
movslq %ecx, %rcx
shlq $0x3, %rcx
movq %r11, %r14
subq %rcx, %r14
xorl %r15d, %r15d
testl %r13d, %r13d
cmovlel %r15d, %r13d
subq %rcx, %rax
addq %r11, %rax
cmpl %r13d, %r15d
je 0x2b3f55
xorl %ecx, %ecx
cmpq %rcx, %rsi
je 0x2b3f14
movq (%rax,%rcx,8), %r9
movq %r9, (%rbp)
addq $0x8, %rbp
decq %rcx
jmp 0x2b3efe
xorl %ecx, %ecx
movq %r14, %r11
cmpl %edi, %ecx
je 0x2b3f30
movq (%r11), %r9
addq $0x8, %r11
movq %r9, (%rbp)
addq $0x8, %rbp
incl %ecx
jmp 0x2b3f19
xorl %ecx, %ecx
cmpq %rcx, %r8
je 0x2b3f49
movq -0x10(%r11,%rcx,8), %r9
movq %r9, (%rbp)
addq $0x8, %rbp
decq %rcx
jmp 0x2b3f32
leaq (%r14,%rdx,8), %r14
incl %r15d
addq %r10, %rax
jmp 0x2b3ef7
movq 0xc0(%rsp), %r13
movl 0xac(%rsp), %r12d
movq 0x48(%rsp), %rcx
incq %rcx
movq 0x30(%rsp), %r10
incl %r10d
jmp 0x2b38d1
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xe0(%r14,%rax)
jne 0x2b42dc
vpextrd $0x3, %xmm0, %edi
movl 0xe8(%r14,%rax), %ecx
movl %edi, 0x48(%rsp)
addl %edi, %ecx
addl 0xec(%r14,%rax), %ecx
addl 0xd0(%r14,%rax), %edx
addl 0xd4(%r14,%rax), %edx
addl 0xd8(%r14,%rax), %esi
addl 0xdc(%r14,%rax), %esi
movq 0x8(%r12), %rax
movq %rax, 0x8(%rsp)
movl $0x8, (%rsp)
movq %rbx, %rdi
movq %rcx, %r14
movl %r15d, %r8d
callq 0x62a26
cmpq $0x0, (%rbx)
je 0x2b43a8
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2b43a8
xorl %ecx, %ecx
testl %r14d, %r14d
cmovlel %ecx, %r14d
movq %r14, 0x30(%rsp)
testl %r15d, %r15d
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %r15d
movq %r15, 0xa0(%rsp)
movabsq $0x100000001, %r13 # imm = 0x100000001
xorl %r14d, %r14d
movq 0x18(%rsp), %rbp
movq %rbx, 0xc8(%rsp)
cmpq 0xa0(%rsp), %r14
je 0x2b43a8
movq (%rbp), %rax
movq -0x18(%rax), %rax
vcvttss2si 0xe4(%rbp,%rax), %rax
movq %rax, %rcx
shlq $0x8, %rcx
movq %rax, %rdx
shlq $0x10, %rdx
orq %rcx, %rdx
movq %rax, %rcx
shlq $0x18, %rcx
movq %rax, %rsi
shlq $0x20, %rsi
orq %rcx, %rsi
orq %rdx, %rsi
movq %rax, %rcx
shlq $0x28, %rcx
movq %rax, %rdx
shlq $0x30, %rdx
orq %rcx, %rdx
movq %rax, %r15
shlq $0x38, %r15
orq %rdx, %r15
orq %rsi, %r15
orq %rax, %r15
xorl %r12d, %r12d
cmpq 0x30(%rsp), %r12
je 0x2b4282
movslq 0x2c(%rbx), %rdi
movslq 0x30(%rbx), %rdx
movq (%rbx), %rax
movq 0x10(%rbx), %rsi
movq 0x40(%rbx), %r8
imulq %r14, %r8
movq %r8, %r9
imulq %rsi, %r9
addq %rax, %r9
movl 0x18(%rbx), %r10d
movq %rdx, %rcx
imulq %rdi, %rcx
movq %r12, %r11
imulq %rsi, %r11
imulq %rcx, %r11
addq %r9, %r11
movq 0x20(%rbx), %r9
movq %r11, 0x50(%rsp)
andq $0x0, 0x58(%rsp)
movq %rsi, 0x60(%rsp)
movl %r10d, 0x68(%rsp)
movq %r9, 0x70(%rsp)
movl $0x2, 0x78(%rsp)
movl %edi, 0x7c(%rsp)
movl %edx, 0x80(%rsp)
movq %r13, 0x84(%rsp)
movq %rcx, 0x90(%rsp)
movq (%rbp), %r9
movq -0x18(%r9), %r9
movl 0xe8(%rbp,%r9), %r11d
movl %r12d, %r10d
subl %r11d, %r10d
setns %r11b
cmpl 0x48(%rsp), %r10d
movq %r13, %rbx
movq %rbp, %r13
setl %bpl
testb %bpl, %r11b
je 0x2b4246
movq 0x28(%rsp), %r8
movslq 0x2c(%r8), %rax
movslq 0x30(%r8), %rcx
movq 0x40(%r8), %rdx
imulq %r14, %rdx
movq 0x10(%r8), %rsi
imulq %rsi, %rdx
addq (%r8), %rdx
movl 0x18(%r8), %edi
movq 0x20(%r8), %r8
movq %rcx, %r11
imulq %rax, %r11
movl %r10d, %r10d
movq %r11, %rbp
imulq %rsi, %rbp
imulq %r10, %rbp
addq %rdx, %rbp
movq %rbp, 0xd0(%rsp)
andq $0x0, 0xd8(%rsp)
movq %rsi, 0xe0(%rsp)
movl %edi, 0xe8(%rsp)
movq %r8, 0xf0(%rsp)
movl $0x2, 0xf8(%rsp)
movl %eax, 0xfc(%rsp)
movl %ecx, 0x100(%rsp)
movq %rbx, 0x104(%rsp)
movq %r11, 0x110(%rsp)
movl 0xd0(%r13,%r9), %edx
movl 0xd4(%r13,%r9), %ecx
movl 0xd8(%r13,%r9), %r8d
movl 0xdc(%r13,%r9), %r9d
movq %r15, (%rsp)
leaq 0xd0(%rsp), %rdi
leaq 0x50(%rsp), %rsi
callq 0x2b46e0
movq %r13, %rbp
movq %rbx, %r13
movq 0xc8(%rsp), %rbx
incq %r12
jmp 0x2b40b1
testl %ecx, %ecx
movl $0x0, %r9d
cmovlel %r9d, %ecx
imulq %r12, %rdx
imulq %rdi, %rdx
addq %r8, %rdx
imulq %rdx, %rsi
addq %rsi, %rax
xorl %edx, %edx
movq %r13, %rbp
movq %rbx, %r13
movq 0xc8(%rsp), %rbx
cmpq %rdx, %rcx
je 0x2b423e
movq %r15, (%rax,%rdx,8)
incq %rdx
jmp 0x2b4274
incq %r14
jmp 0x2b4048
movq (%r14), %rax
movq -0x18(%rax), %r8
movl 0xd0(%r14,%r8), %r10d
leal (%r10,%rdx,8), %edx
addl 0xd4(%r14,%r8), %edx
xorl %r11d, %r11d
testb $0x7, %dl
sete %r11b
movq %r9, %rax
shrq $0x3, %rax
leal (%r11,%r11,2), %ecx
shlq %cl, %rax
testb $0x7, %r10b
sete %cl
andb %cl, %r11b
cmpb $0x1, %r11b
jne 0x2b42dc
cmpl $0x0, 0xe0(%r14,%r8)
je 0x2b43bf
movq 0x28(%rsp), %rdx
movq 0x8(%rdx), %rax
vmovups (%rdx), %xmm1
vmovaps %xmm1, 0x50(%rsp)
movq %r9, 0x60(%rsp)
movl %edi, 0x68(%rsp)
movq 0x20(%rdx), %rcx
movq %rcx, 0x70(%rsp)
vmovdqu %xmm0, 0x78(%rsp)
movl %r15d, 0x88(%rsp)
movq 0x40(%rdx), %rcx
movq %rcx, 0x90(%rsp)
testq %rax, %rax
je 0x2b4323
lock
incl (%rax)
cmpl $0x1, %edi
je 0x2b435b
vmovdqu64 (%r12), %zmm0
leaq 0xd0(%rsp), %rcx
vmovdqu64 %zmm0, (%rcx)
movq 0x10(%r12), %rax
movq %rax, 0x8(%rcx)
leaq 0x50(%rsp), %rsi
pushq $0x1
popq %rdx
movq 0x28(%rsp), %rdi
vzeroupper
callq 0x64e3b
movq (%r14), %rax
addq -0x18(%rax), %r14
leaq 0x50(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movq %r12, %rcx
callq 0x2acc2e
movq %rax, 0x20(%rsp)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2b43a8
lock
decl (%rax)
jne 0x2b43a8
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x2b43a0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2b43a8
movq %rsi, %rdi
callq 0x5f3e0
movq 0x20(%rsp), %rax
addq $0x128, %rsp # imm = 0x128
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
addl 0xd8(%r14,%r8), %esi
addl 0xdc(%r14,%r8), %esi
sarl $0x3, %edx
movq 0x8(%r12), %r9
pushq $0x8
popq %rbp
movq %rbx, %rdi
movq %rax, %rcx
movl %ebp, %r8d
callq 0x627de
cmpq $0x0, (%rbx)
je 0x2b43a8
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2b43a8
movq 0x18(%rsp), %r9
movq (%r9), %rax
movq -0x18(%rax), %rsi
vcvttss2si 0xe4(%r9,%rsi), %rax
movq %rax, %rcx
shlq $0x8, %rcx
movq %rax, %rdx
shlq $0x10, %rdx
orq %rcx, %rdx
movq %rax, %rcx
shlq $0x18, %rcx
movq %rax, %rdi
shlq $0x20, %rdi
orq %rcx, %rdi
orq %rdx, %rdi
movq %rax, %rcx
shlq $0x28, %rcx
movq %rax, %rdx
shlq $0x30, %rdx
orq %rcx, %rdx
movq %rax, %r11
shlq $0x38, %r11
orq %rdx, %r11
orq %rdi, %r11
orq %rax, %r11
movl 0xd0(%r9,%rsi), %eax
movl 0xd4(%r9,%rsi), %ecx
cltd
idivl %ebp
movl %eax, %r10d
movl %ecx, %eax
cltd
idivl %ebp
movl 0xd8(%r9,%rsi), %r8d
movl 0xdc(%r9,%rsi), %r9d
movq %r11, (%rsp)
movq 0x28(%rsp), %rdi
movq %rbx, %rsi
movl %r10d, %edx
movl %eax, %ecx
callq 0x2b46e0
movq $0x0, 0x20(%rsp)
jmp 0x2b43a8
jmp 0x2b44e4
jmp 0x2b44ab
movq %rax, %rbx
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2b44dc
lock
decl (%rax)
jne 0x2b44dc
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
jne 0x2b44d6
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2b44dc
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/padding_x86_avx512.cpp |
virtual thunk to ncnn::Padding_x86_avx512::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int Padding_x86_avx512::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
if (top == 0 && bottom == 0 && left == 0 && right == 0 && front == 0 && behind == 0)
{
top_blob = bottom_blob;
return 0;
}
int elembits = bottom_blob.elembits();
if (elembits == 8)
return forward_int8(bottom_blob, top_blob, opt);
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
int out_elempack = outw % 16 == 0 ? 16 : outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 16 == 0 && out_elempack == 16 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m512 pad_value = _mm512_set1_ps(value);
padding_constant_pack16_avx512(bottom_blob, top_blob, 0, 0, left / 16, right / 16, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
int out_elempack = outh % 16 == 0 ? 16 : outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 16 == 0 && out_elempack == 16 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m512 pad_value = _mm512_set1_ps(value);
padding_constant_pack16_avx512(bottom_blob, top_blob, top / 16, bottom / 16, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
int out_elempack = outc % 16 == 0 ? 16 : outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 16 == 0 && out_elempack == 16 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m512 pad_value = per_channel_pad_data_size ? _mm512_loadu_ps((const float*)per_channel_pad_data + q * 16) : _mm512_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack16_avx512(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack16_avx512(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack16_avx512(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m512 pad_value = per_channel_pad_data_size ? _mm512_loadu_ps((const float*)per_channel_pad_data + q * 16) : _mm512_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack16_avx512(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
int out_elempack = outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m256 pad_value = _mm256_set1_ps(value);
padding_constant_pack8_avx(bottom_blob, top_blob, 0, 0, left / 8, right / 8, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
int out_elempack = outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m256 pad_value = _mm256_set1_ps(value);
padding_constant_pack8_avx(bottom_blob, top_blob, top / 8, bottom / 8, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
int out_elempack = outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 8 == 0 && out_elempack == 8 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m256 pad_value = per_channel_pad_data_size ? _mm256_loadu_ps((const float*)per_channel_pad_data + q * 8) : _mm256_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack8_avx(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack8_avx(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack8_avx(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m256 pad_value = per_channel_pad_data_size ? _mm256_loadu_ps((const float*)per_channel_pad_data + q * 8) : _mm256_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack8_avx(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __AVX__
if (elempack == 4)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
#if __AVX__
int out_elempack = outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
#else
int out_elempack = outw % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 4 == 0 && out_elempack == 4 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m128 pad_value = _mm_set1_ps(value);
padding_constant_pack4_sse(bottom_blob, top_blob, 0, 0, left / 4, right / 4, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
#if __AVX__
int out_elempack = outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
#else
int out_elempack = outh % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 4 == 0 && out_elempack == 4 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m128 pad_value = _mm_set1_ps(value);
padding_constant_pack4_sse(bottom_blob, top_blob, top / 4, bottom / 4, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
#if __AVX__
int out_elempack = outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
#else
int out_elempack = outc % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 4 == 0 && out_elempack == 4 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m128 pad_value = per_channel_pad_data_size ? _mm_loadu_ps((const float*)per_channel_pad_data + q * 4) : _mm_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack4_sse(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack4_sse(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack4_sse(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m128 pad_value = per_channel_pad_data_size ? _mm_loadu_ps((const float*)per_channel_pad_data + q * 4) : _mm_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack4_sse(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
return Padding::forward(bottom_blob_unpacked, top_blob, opt);
} | movq (%rdi), %rax
addq -0x48(%rax), %rdi
jmp 0x2b0de0
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/padding_x86_avx512.cpp |
ncnn::padding_constant_pack8_int8_sse(ncnn::Mat const&, ncnn::Mat&, int, int, int, int, long) | static void padding_constant_pack8_int8_sse(const Mat& src, Mat& dst, int top, int bottom, int left, int right, int64_t _v)
{
const int64_t* ptr = src;
int64_t* outptr = dst;
// fill top
for (int y = 0; y < top; y++)
{
for (int x = 0; x < dst.w; x++)
{
*outptr++ = _v;
}
}
// fill center
for (int y = 0; y < src.h; y++)
{
for (int x = 0; x < left; x++)
{
*outptr++ = _v;
}
for (int x = 0; x < src.w; x++)
{
*outptr++ = *ptr++;
}
for (int x = 0; x < right; x++)
{
*outptr++ = _v;
}
}
// fill bottom
for (int y = 0; y < bottom; y++)
{
for (int x = 0; x < dst.w; x++)
{
*outptr++ = _v;
}
}
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movq 0x30(%rsp), %rax
movq (%rdi), %r11
movl 0x2c(%rsi), %ebx
xorl %ebp, %ebp
testl %ebx, %ebx
cmovlel %ebp, %ebx
movq (%rsi), %r10
testl %edx, %edx
cmovlel %ebp, %edx
cmpl %edx, %ebp
je 0x2b471c
movl %ebx, %r14d
subl $0x1, %r14d
jb 0x2b4718
movq %rax, (%r10)
addq $0x8, %r10
jmp 0x2b4709
incl %ebp
jmp 0x2b4702
xorl %edx, %edx
testl %r8d, %r8d
cmovlel %edx, %r8d
movl 0x2c(%rdi), %ebx
testl %ebx, %ebx
cmovlel %edx, %ebx
testl %r9d, %r9d
cmovlel %edx, %r9d
movl 0x30(%rdi), %edi
testl %edi, %edi
cmovlel %edx, %edi
cmpl %edi, %edx
je 0x2b4788
movl %r8d, %ebp
subl $0x1, %ebp
jb 0x2b4751
movq %rax, (%r10)
addq $0x8, %r10
jmp 0x2b4743
xorl %r15d, %r15d
xorl %r14d, %r14d
cmpl %r15d, %ebx
je 0x2b476d
movq (%r11,%r15,8), %r12
movq %r12, (%r10,%r15,8)
addq $-0x8, %r14
incq %r15
jmp 0x2b4757
subq %r14, %r11
subq %r14, %r10
movl %r9d, %ebp
subl $0x1, %ebp
jb 0x2b4784
movq %rax, (%r10)
addq $0x8, %r10
jmp 0x2b4776
incl %edx
jmp 0x2b473c
movl 0x2c(%rsi), %edx
xorl %esi, %esi
testl %edx, %edx
cmovlel %esi, %edx
testl %ecx, %ecx
cmovlel %esi, %ecx
movl %edx, %edi
cmpl %ecx, %esi
je 0x2b47af
subl $0x1, %edi
jb 0x2b47ab
movq %rax, (%r10)
addq $0x8, %r10
jmp 0x2b479d
incl %esi
jmp 0x2b4797
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
| /csukuangfj[P]ncnn/src/layer/x86/padding_pack8_int8.h |
ncnn::Padding_x86_fma::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int Padding_x86_fma::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
if (top == 0 && bottom == 0 && left == 0 && right == 0 && front == 0 && behind == 0)
{
top_blob = bottom_blob;
return 0;
}
int elembits = bottom_blob.elembits();
if (elembits == 8)
return forward_int8(bottom_blob, top_blob, opt);
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
int out_elempack = outw % 16 == 0 ? 16 : outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 16 == 0 && out_elempack == 16 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m512 pad_value = _mm512_set1_ps(value);
padding_constant_pack16_avx512(bottom_blob, top_blob, 0, 0, left / 16, right / 16, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
int out_elempack = outh % 16 == 0 ? 16 : outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 16 == 0 && out_elempack == 16 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m512 pad_value = _mm512_set1_ps(value);
padding_constant_pack16_avx512(bottom_blob, top_blob, top / 16, bottom / 16, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
int out_elempack = outc % 16 == 0 ? 16 : outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 16 == 0 && out_elempack == 16 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m512 pad_value = per_channel_pad_data_size ? _mm512_loadu_ps((const float*)per_channel_pad_data + q * 16) : _mm512_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack16_avx512(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack16_avx512(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack16_avx512(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m512 pad_value = per_channel_pad_data_size ? _mm512_loadu_ps((const float*)per_channel_pad_data + q * 16) : _mm512_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack16_avx512(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
int out_elempack = outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m256 pad_value = _mm256_set1_ps(value);
padding_constant_pack8_avx(bottom_blob, top_blob, 0, 0, left / 8, right / 8, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
int out_elempack = outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m256 pad_value = _mm256_set1_ps(value);
padding_constant_pack8_avx(bottom_blob, top_blob, top / 8, bottom / 8, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
int out_elempack = outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 8 == 0 && out_elempack == 8 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m256 pad_value = per_channel_pad_data_size ? _mm256_loadu_ps((const float*)per_channel_pad_data + q * 8) : _mm256_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack8_avx(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack8_avx(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack8_avx(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m256 pad_value = per_channel_pad_data_size ? _mm256_loadu_ps((const float*)per_channel_pad_data + q * 8) : _mm256_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack8_avx(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __AVX__
if (elempack == 4)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
#if __AVX__
int out_elempack = outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
#else
int out_elempack = outw % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 4 == 0 && out_elempack == 4 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m128 pad_value = _mm_set1_ps(value);
padding_constant_pack4_sse(bottom_blob, top_blob, 0, 0, left / 4, right / 4, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
#if __AVX__
int out_elempack = outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
#else
int out_elempack = outh % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 4 == 0 && out_elempack == 4 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m128 pad_value = _mm_set1_ps(value);
padding_constant_pack4_sse(bottom_blob, top_blob, top / 4, bottom / 4, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
#if __AVX__
int out_elempack = outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
#else
int out_elempack = outc % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 4 == 0 && out_elempack == 4 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m128 pad_value = per_channel_pad_data_size ? _mm_loadu_ps((const float*)per_channel_pad_data + q * 4) : _mm_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack4_sse(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack4_sse(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack4_sse(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m128 pad_value = per_channel_pad_data_size ? _mm_loadu_ps((const float*)per_channel_pad_data + q * 4) : _mm_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack4_sse(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
return Padding::forward(bottom_blob_unpacked, top_blob, opt);
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xf8, %rsp
movq %rcx, %rbp
movq %rdx, 0x8(%rsp)
movq %rsi, %r14
movq (%rdi), %rax
movq -0x18(%rax), %r11
movl 0xd0(%rdi,%r11), %r10d
testl %r10d, %r10d
jne 0x2b486b
cmpl $0x0, 0xd4(%rdi,%r11)
jne 0x2b486b
cmpl $0x0, 0xd8(%rdi,%r11)
jne 0x2b486b
cmpl $0x0, 0xdc(%rdi,%r11)
jne 0x2b486b
cmpl $0x0, 0xe8(%rdi,%r11)
jne 0x2b486b
cmpl $0x0, 0xec(%rdi,%r11)
je 0x2b62f3
movq %rdi, (%rsp)
movl 0x18(%r14), %r15d
movq 0x10(%r14), %r9
testl %r15d, %r15d
je 0x2b48b2
leal (,%r9,8), %eax
cltd
idivl %r15d
cmpl $0x8, %eax
jne 0x2b48c4
movq (%rsp), %rdi
movq %r14, %rsi
movq 0x8(%rsp), %rdx
movq %rbp, %rcx
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x2b63fc
vmovdqu 0x2c(%r14), %xmm0
movq %r14, %rdi
movl 0x28(%r14), %r13d
jmp 0x2b4b30
vmovdqu 0x2c(%r14), %xmm0
movq %r14, 0x10(%rsp)
movl 0x28(%r14), %eax
vpextrd $0x2, %xmm0, %r12d
pushq $0x4
popq %r13
vpextrd $0x1, %xmm0, %edx
vmovd %xmm0, %esi
vpextrd $0x3, %xmm0, %ecx
movq %rcx, 0x18(%rsp)
pushq $-0x64
popq %rcx
movq %rcx, 0x20(%rsp)
cmpl $0x4, %r15d
je 0x2b4a16
cmpl $0x8, %r15d
jne 0x2b4b28
leal -0x1(%rax), %ecx
cmpl $0x3, %ecx
ja 0x2b4b28
leaq 0x143ec3(%rip), %rax # 0x3f87e4
movslq (%rax,%rcx,4), %rcx
addq %rax, %rcx
pushq $0x8
popq %r14
jmpq *%rcx
movq (%rsp), %rdi
movl 0xd8(%rdi,%r11), %edx
leal (%rdx,%rsi,8), %eax
addl 0xdc(%rdi,%r11), %eax
testb $0x3, %al
sete %cl
movq %r9, %r10
shrq $0x3, %r10
addb %cl, %cl
testb $0x7, %al
movzbl %cl, %esi
pushq $0x3
popq %rcx
cmovnel %esi, %ecx
shlq %cl, %r10
orl %eax, %edx
pushq $0x1
popq %r13
testb $0x7, %dl
jne 0x2b4b2b
cmpl $0x0, 0xe0(%rdi,%r11)
movq 0x10(%rsp), %rdi
jne 0x2b4b30
cltd
idivl %r14d
movq 0x8(%rbp), %r8
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %eax, %esi
movq %r10, %rdx
movl %r14d, %ecx
callq 0x626da
cmpq $0x0, (%rbx)
je 0x2b4c17
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b4c17
movq (%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rcx
vbroadcastss 0xe4(%rdx,%rcx), %ymm0
movl 0xd8(%rdx,%rcx), %eax
movl 0xdc(%rdx,%rcx), %ecx
cltd
idivl %r14d
movl %eax, %r8d
movl %ecx, %eax
cltd
idivl %r14d
movq $0x0, 0x20(%rsp)
movq 0x10(%rsp), %rdi
movq 0x8(%rsp), %rsi
xorl %edx, %edx
xorl %ecx, %ecx
movl %eax, %r9d
callq 0x2b72c5
jmp 0x2b4c17
leal -0x1(%rax), %ecx
cmpl $0x3, %ecx
ja 0x2b4b28
leaq 0x143dab(%rip), %rax # 0x3f87d4
movslq (%rax,%rcx,4), %rcx
addq %rax, %rcx
jmpq *%rcx
movq (%rsp), %rax
movl 0xd8(%rax,%r11), %r8d
leal (%r8,%rsi,4), %esi
addl 0xdc(%rax,%r11), %esi
testb $0x3, %sil
sete %cl
movq %r9, %rdx
shrq $0x2, %rdx
addb %cl, %cl
movl %esi, %eax
andl $0x7, %eax
movzbl %cl, %r10d
pushq $0x3
popq %rcx
cmovnel %r10d, %ecx
shlq %cl, %rdx
pushq $0x1
popq %r13
testb $0x3, %r8b
jne 0x2b4b2b
cmpl $0x4, %eax
jne 0x2b4b2b
movq (%rsp), %rax
cmpl $0x0, 0xe0(%rax,%r11)
movq 0x10(%rsp), %rdi
jne 0x2b4b30
sarl $0x2, %esi
movq 0x8(%rbp), %r8
pushq $0x4
popq %rbp
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %ebp, %ecx
callq 0x626da
cmpq $0x0, (%rbx)
je 0x2b4c17
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b4c17
movq (%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rcx
vbroadcastss 0xe4(%rdx,%rcx), %xmm0
movl 0xd8(%rdx,%rcx), %eax
movl 0xdc(%rdx,%rcx), %ecx
cltd
idivl %ebp
movl %eax, %r8d
movl %ecx, %eax
cltd
idivl %ebp
movq $0x0, 0x20(%rsp)
movq 0x10(%rsp), %rdi
movq 0x8(%rsp), %rsi
xorl %edx, %edx
xorl %ecx, %ecx
movl %eax, %r9d
callq 0x2b7365
jmp 0x2b4c17
movl %eax, %r13d
movq 0x10(%rsp), %rdi
movq 0x8(%rdi), %rax
vmovups (%rdi), %xmm1
vmovaps %xmm1, 0x80(%rsp)
movq %r9, 0x90(%rsp)
movl %r15d, 0x98(%rsp)
movq 0x20(%rdi), %rcx
movq %rcx, 0xa0(%rsp)
movl %r13d, 0xa8(%rsp)
vmovdqu %xmm0, 0xac(%rsp)
movq 0x40(%rdi), %rcx
movq %rcx, 0xc0(%rsp)
testq %rax, %rax
je 0x2b4b82
lock
incl (%rax)
cmpl $0x1, %r15d
je 0x2b4bbb
vmovdqu (%rbp), %ymm0
vmovups 0x20(%rbp), %ymm1
leaq 0x30(%rsp), %rcx
vmovups %ymm1, 0x20(%rcx)
vmovdqu %ymm0, (%rcx)
movq 0x10(%rbp), %rax
movq %rax, 0x8(%rcx)
leaq 0x80(%rsp), %rsi
pushq $0x1
popq %rdx
vzeroupper
callq 0x64e3b
movq (%rsp), %rdi
movq (%rdi), %rax
addq -0x18(%rax), %rdi
leaq 0x80(%rsp), %rsi
movq 0x8(%rsp), %rdx
movq %rbp, %rcx
callq 0x2acc2e
movq %rax, 0x20(%rsp)
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x2b4c17
lock
decl (%rax)
jne 0x2b4c17
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x2b4c0f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2b4c17
movq %rsi, %rdi
callq 0x5f3e0
movq 0x20(%rsp), %rax
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
pushq $0x3
popq %r13
movq (%rsp), %rdi
movl 0xe8(%rdi,%r11), %r14d
movq 0x18(%rsp), %r12
leal (%r14,%r12,8), %eax
addl 0xec(%rdi,%r11), %eax
testb $0x3, %al
sete %cl
movq %r9, %r8
shrq $0x3, %r8
addb %cl, %cl
testb $0x7, %al
movzbl %cl, %ecx
cmovel %r13d, %ecx
shlq %cl, %r8
orl %eax, %r14d
testb $0x7, %r14b
jne 0x2b4b2b
addl 0xd8(%rdi,%r11), %esi
addl 0xdc(%rdi,%r11), %esi
addl %edx, %r10d
addl 0xd4(%rdi,%r11), %r10d
leal (,%r12,8), %ecx
cmpl %ecx, %eax
movq 0x10(%rsp), %rdi
pushq $0x8
popq %r14
je 0x2b4cbc
movq (%rsp), %rcx
cmpl $0x0, 0xe0(%rcx,%r11)
jne 0x2b4b30
cltd
idivl %r14d
subq $0x8, %rsp
movq 0x10(%rsp), %rbx
movq %rbx, %rdi
movl %r10d, %edx
movl %r14d, %r9d
movq %rax, %r14
movl %eax, %ecx
pushq 0x8(%rbp)
callq 0x628f2
addq $0x10, %rsp
cmpq $0x0, (%rbx)
je 0x2b4c17
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b4c17
movq (%rsp), %r12
movq (%r12), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r12,%rax), %eax
pushq $-0x8
popq %rcx
cltd
idivl %ecx
movl %eax, %edx
xorl %ecx, %ecx
testl %r14d, %r14d
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %r14d
movq %r14, 0xf0(%rsp)
movl %edx, 0x7c(%rsp)
movl %edx, %r11d
xorl %r9d, %r9d
cmpq 0xf0(%rsp), %r9
je 0x2b4c17
movq 0x8(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rcx
movq 0x40(%r10), %r13
imulq %r9, %r13
movq 0x10(%r10), %rdi
imulq %rdi, %r13
addq (%r10), %r13
movl 0x34(%r10), %esi
movl 0x18(%r10), %edx
movq 0x20(%r10), %r8
movq %r13, 0x80(%rsp)
andq $0x0, 0x88(%rsp)
movq %rdi, 0x90(%rsp)
movl %edx, 0x98(%rsp)
movq %r8, 0xa0(%rsp)
movl %eax, 0xac(%rsp)
movl %ecx, 0xb0(%rsp)
movl $0x1, 0xb4(%rsp)
movl %esi, 0xb8(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0xc0(%rsp)
movl 0x28(%r10), %edx
leal -0x1(%rdx), %edi
movl %edi, 0xa8(%rsp)
cmpl $0x4, %edx
jne 0x2b4e05
movq %rcx, 0xc0(%rsp)
movq %rcx, %rax
movq (%r12), %rcx
movq -0x18(%rcx), %rdi
cmpl $0x0, 0xf0(%r12,%rdi)
je 0x2b4e2f
movq 0xf8(%r12,%rdi), %rdx
movq %r9, %r8
shlq $0x5, %r8
vmovups (%rdx,%r8), %ymm0
jmp 0x2b4e39
vbroadcastss 0xe4(%r12,%rdi), %ymm0
movl %r11d, %r11d
movl 0x7c(%rsp), %edx
movq %r9, 0xd0(%rsp)
addl %r9d, %edx
setns %r8b
cmpl 0x18(%rsp), %edx
setl %r9b
testb %r9b, %r8b
je 0x2b4ff5
movq 0x10(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rsi
movl 0x34(%r10), %r8d
movq (%r10), %r12
movq 0x10(%r10), %r15
movq 0x40(%r10), %rbx
movl %edx, %ebp
imulq %rbx, %rbp
imulq %r15, %rbp
addq %r12, %rbp
movl 0x18(%r10), %edx
movq 0x20(%r10), %r9
movq %rbp, 0x30(%rsp)
andq $0x0, 0x38(%rsp)
movq %r15, 0x40(%rsp)
movl %edx, 0x48(%rsp)
movq %r9, 0x50(%rsp)
movl %eax, 0x5c(%rsp)
movl %esi, 0x60(%rsp)
movl $0x1, 0x64(%rsp)
movl %r8d, 0x68(%rsp)
imulq %rax, %rsi
movq %r15, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r15
movq %rax, 0x70(%rsp)
movl 0x28(%r10), %eax
leal -0x1(%rax), %edx
movl %edx, 0x58(%rsp)
cmpl $0x4, %eax
jne 0x2b4ef1
movq %rsi, 0x70(%rsp)
movq (%rsp), %r14
movl 0xe0(%r14,%rdi), %eax
testl %eax, %eax
movq %r11, 0x28(%rsp)
jne 0x2b4f4c
movl 0xd0(%r14,%rdi), %edx
movl 0xd4(%r14,%rdi), %ecx
movl 0xd8(%r14,%rdi), %r8d
movl 0xdc(%r14,%rdi), %r9d
leaq 0x30(%rsp), %rdi
leaq 0x80(%rsp), %rsi
callq 0x2b72c5
movq 0x28(%rsp), %r11
movq (%r14), %rcx
movq -0x18(%rcx), %rdi
movl 0xe0(%r14,%rdi), %eax
cmpl $0x1, %eax
jne 0x2b501f
movq (%rsp), %rsi
movl 0xd8(%rsi,%rdi), %eax
xorl %r8d, %r8d
testl %eax, %eax
cmovlel %r8d, %eax
movl 0xdc(%rsi,%rdi), %edx
testl %edx, %edx
cmovlel %r8d, %edx
movl 0xd0(%rsi,%rdi), %r9d
testl %r9d, %r9d
cmovlel %r8d, %r9d
movl 0xd4(%rsi,%rdi), %esi
imulq %r15, %rbx
imulq %r11, %rbx
addq %rbx, %r12
cmpl %r9d, %r8d
je 0x2b5028
vmovdqa (%rbp), %ymm0
movl %eax, %edi
subl $0x1, %edi
jb 0x2b4fb8
vmovdqa %ymm0, (%r13)
addq $0x20, %r13
jmp 0x2b4fa7
xorl %edi, %edi
xorl %r10d, %r10d
cmpl 0x5c(%rsp), %r10d
jge 0x2b4fda
vmovdqa (%r12,%rdi), %ymm0
vmovdqa %ymm0, (%r13,%rdi)
incl %r10d
addq $0x20, %rdi
jmp 0x2b4fbd
addq %rdi, %r13
movl %edx, %edi
subl $0x1, %edi
jb 0x2b4ff0
vmovdqa %ymm0, (%r13)
addq $0x20, %r13
jmp 0x2b4fdf
incl %r8d
jmp 0x2b4f97
imull %eax, %esi
testl %esi, %esi
movl $0x0, %eax
cmovlel %eax, %esi
movq 0xd0(%rsp), %r9
subl $0x1, %esi
jb 0x2b53c3
vmovups %ymm0, (%r13)
addq $0x20, %r13
jmp 0x2b500a
movq (%rsp), %r12
jmp 0x2b510b
xorl %edi, %edi
cmpl 0x60(%rsp), %edi
jge 0x2b5085
vmovdqa (%rbp), %ymm0
movl %eax, %r8d
subl $0x1, %r8d
jb 0x2b504a
vmovdqa %ymm0, (%r13)
addq $0x20, %r13
jmp 0x2b5038
xorl %r8d, %r8d
cmpl 0x5c(%rsp), %r8d
jge 0x2b506c
vmovdqa (%rbp), %ymm0
vmovdqa %ymm0, (%r13)
addq $0x20, %rbp
addq $0x20, %r13
incl %r8d
jmp 0x2b504d
movl %edx, %r8d
subl $0x1, %r8d
jb 0x2b5081
vmovdqa %ymm0, (%r13)
addq $0x20, %r13
jmp 0x2b506f
incl %edi
jmp 0x2b502a
movl 0x5c(%rsp), %edi
shll $0x3, %edi
movslq %edi, %rdi
shlq $0x2, %rdi
subq %rdi, %rbp
xorl %edi, %edi
testl %esi, %esi
cmovlel %edi, %esi
cmpl %esi, %edi
je 0x2b50fb
vmovdqa (%rbp), %ymm0
movl %eax, %r8d
subl $0x1, %r8d
jb 0x2b50bb
vmovdqa %ymm0, (%r13)
addq $0x20, %r13
jmp 0x2b50a9
xorl %r8d, %r8d
xorl %r9d, %r9d
cmpl 0x5c(%rsp), %r9d
jge 0x2b50df
vmovdqa (%rbp,%r8), %ymm0
vmovdqa %ymm0, (%r13,%r8)
incl %r9d
addq $0x20, %r8
jmp 0x2b50c1
addq %r8, %r13
movl %edx, %r8d
subl $0x1, %r8d
jb 0x2b50f7
vmovdqa %ymm0, (%r13)
addq $0x20, %r13
jmp 0x2b50e5
incl %edi
jmp 0x2b509d
movq -0x18(%rcx), %rdi
movq (%rsp), %r12
movl 0xe0(%r12,%rdi), %eax
cmpl $0x2, %eax
pushq $-0x40
popq %r13
jne 0x2b532b
movl 0xd0(%r12,%rdi), %r9d
movslq 0xd8(%r12,%rdi), %rax
movl 0x5c(%rsp), %ecx
imull %r9d, %ecx
shll $0x3, %ecx
movslq %ecx, %rcx
shlq $0x2, %rcx
addq 0x30(%rsp), %rcx
xorl %r10d, %r10d
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movl 0xdc(%r12,%rdi), %esi
testl %esi, %esi
cmovlel %r10d, %esi
movl 0xd4(%r12,%rdi), %edi
testl %r9d, %r9d
cmovlel %r10d, %r9d
movq 0x80(%rsp), %r8
shlq $0x5, %rax
shlq $0x5, %rdx
shlq $0x5, %rsi
cmpl %r9d, %r10d
je 0x2b520c
leaq (%rcx,%rax), %r11
xorl %r15d, %r15d
xorl %r14d, %r14d
movq %rdx, %r12
addq %r14, %r12
je 0x2b51ae
vmovdqa (%r11,%r14), %ymm0
vmovdqa %ymm0, (%r8,%r15)
addq $-0x20, %r14
addq $0x20, %r15
jmp 0x2b5190
subq %r14, %r8
xorl %ebp, %ebp
movq %rcx, %r11
cmpl 0x5c(%rsp), %ebp
jge 0x2b51d2
vmovdqa (%r11), %ymm0
vmovdqa %ymm0, (%r8)
addq $0x20, %r11
addq $0x20, %r8
incl %ebp
jmp 0x2b51b6
movq %r13, %r14
movq (%rsp), %r12
leaq (%rsi,%r14), %r15
cmpq $-0x40, %r15
je 0x2b51f8
vmovdqa (%r11,%r14), %ymm0
vmovdqa %ymm0, (%r8)
addq $0x20, %r8
addq $-0x20, %r14
jmp 0x2b51d9
movslq 0x5c(%rsp), %r11
shlq $0x5, %r11
subq %r11, %rcx
incl %r10d
jmp 0x2b517d
xorl %r9d, %r9d
cmpl 0x60(%rsp), %r9d
jge 0x2b5288
leaq (%rcx,%rax), %r10
xorl %r14d, %r14d
xorl %r11d, %r11d
movq %rdx, %r15
addq %r11, %r15
je 0x2b523e
vmovdqa (%r10,%r11), %ymm0
vmovdqa %ymm0, (%r8,%r14)
addq $-0x20, %r11
addq $0x20, %r14
jmp 0x2b5220
subq %r11, %r8
xorl %r10d, %r10d
cmpl 0x5c(%rsp), %r10d
jge 0x2b5261
vmovdqa (%rcx), %ymm0
vmovdqa %ymm0, (%r8)
addq $0x20, %rcx
addq $0x20, %r8
incl %r10d
jmp 0x2b5244
movq %r13, %r10
leaq (%rsi,%r10), %r11
cmpq $-0x40, %r11
je 0x2b5283
vmovdqa (%rcx,%r10), %ymm0
vmovdqa %ymm0, (%r8)
addq $0x20, %r8
addq $-0x20, %r10
jmp 0x2b5264
incl %r9d
jmp 0x2b520f
movslq 0x5c(%rsp), %r9
shlq $0x6, %r9
subq %r9, %rcx
xorl %r9d, %r9d
testl %edi, %edi
cmovlel %r9d, %edi
cmpl %edi, %r9d
je 0x2b532b
leaq (%rcx,%rax), %r10
xorl %r14d, %r14d
xorl %r11d, %r11d
movq %rdx, %r15
addq %r11, %r15
je 0x2b52ce
vmovdqa (%r10,%r11), %ymm0
vmovdqa %ymm0, (%r8,%r14)
addq $-0x20, %r11
addq $0x20, %r14
jmp 0x2b52b0
subq %r11, %r8
xorl %r11d, %r11d
movq %rcx, %r10
cmpl 0x5c(%rsp), %r11d
jge 0x2b52f5
vmovdqa (%r10), %ymm0
vmovdqa %ymm0, (%r8)
addq $0x20, %r10
addq $0x20, %r8
incl %r11d
jmp 0x2b52d7
movq %r13, %r11
leaq (%rsi,%r11), %r14
cmpq $-0x40, %r14
je 0x2b5317
vmovdqa (%r10,%r11), %ymm0
vmovdqa %ymm0, (%r8)
addq $0x20, %r8
addq $-0x20, %r11
jmp 0x2b52f8
movslq 0x5c(%rsp), %r10
shlq $0x5, %r10
subq %r10, %rcx
incl %r9d
jmp 0x2b529d
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x2b535f
lock
decl (%rax)
jne 0x2b535f
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x2b5354
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x2b535f
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
movq 0x88(%rsp), %rax
testq %rax, %rax
movq 0x28(%rsp), %r11
movq 0xd0(%rsp), %r9
je 0x2b53c3
lock
decl (%rax)
jne 0x2b53c3
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x2b53ab
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
movq 0x28(%rsp), %r11
movq 0xd0(%rsp), %r9
jmp 0x2b53c3
movq %rsi, %rdi
vzeroupper
callq 0x5f3e0
movq 0x28(%rsp), %r11
movq 0xd0(%rsp), %r9
incq %r9
incl %r11d
jmp 0x2b4d45
movq (%rsp), %rax
movl 0xe8(%rax,%r11), %r14d
movq 0x18(%rsp), %rdi
leal (%r14,%rdi,4), %r13d
addl 0xec(%rax,%r11), %r13d
testb $0x3, %r13b
sete %cl
movq %r9, %r8
shrq $0x2, %r8
addb %cl, %cl
movq %r13, %r12
movl %r13d, %eax
andl $0x7, %eax
movzbl %cl, %ecx
pushq $0x3
popq %r13
cmovel %r13d, %ecx
shlq %cl, %r8
testb $0x3, %r14b
jne 0x2b4b2b
cmpl $0x4, %eax
jne 0x2b4b2b
movq (%rsp), %rax
addl 0xd8(%rax,%r11), %esi
addl 0xdc(%rax,%r11), %esi
addl %edx, %r10d
addl 0xd4(%rax,%r11), %r10d
leal (,%rdi,4), %eax
cmpl %eax, %r12d
movq 0x10(%rsp), %rdi
je 0x2b5468
movq (%rsp), %rax
cmpl $0x0, 0xe0(%rax,%r11)
jne 0x2b4b30
movq %r12, %rcx
sarl $0x2, %ecx
subq $0x8, %rsp
pushq $0x4
popq %r9
movq 0x10(%rsp), %rbx
movq %rbx, %rdi
movl %r10d, %edx
movq %rcx, %r14
pushq 0x8(%rbp)
callq 0x628f2
addq $0x10, %rsp
cmpq $0x0, (%rbx)
je 0x2b4c17
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b4c17
movq (%rsp), %r12
movq (%r12), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r12,%rax), %eax
pushq $-0x4
popq %rcx
cltd
idivl %ecx
movl %eax, %edx
xorl %ecx, %ecx
testl %r14d, %r14d
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %r14d
movq %r14, 0xf0(%rsp)
movl %edx, 0x7c(%rsp)
movl %edx, %r11d
xorl %r9d, %r9d
cmpq 0xf0(%rsp), %r9
je 0x2b4c17
movq 0x8(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rcx
movq 0x40(%r10), %r13
imulq %r9, %r13
movq 0x10(%r10), %rdi
imulq %rdi, %r13
addq (%r10), %r13
movl 0x34(%r10), %esi
movl 0x18(%r10), %edx
movq 0x20(%r10), %r8
movq %r13, 0x80(%rsp)
andq $0x0, 0x88(%rsp)
movq %rdi, 0x90(%rsp)
movl %edx, 0x98(%rsp)
movq %r8, 0xa0(%rsp)
movl %eax, 0xac(%rsp)
movl %ecx, 0xb0(%rsp)
movl $0x1, 0xb4(%rsp)
movl %esi, 0xb8(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0xc0(%rsp)
movl 0x28(%r10), %edx
leal -0x1(%rdx), %edi
movl %edi, 0xa8(%rsp)
cmpl $0x4, %edx
jne 0x2b55b2
movq %rcx, 0xc0(%rsp)
movq %rcx, %rax
movq (%r12), %rcx
movq -0x18(%rcx), %rdi
cmpl $0x0, 0xf0(%r12,%rdi)
je 0x2b55dc
movq 0xf8(%r12,%rdi), %rdx
movq %r9, %r8
shlq $0x4, %r8
vmovups (%rdx,%r8), %xmm0
jmp 0x2b55e6
vbroadcastss 0xe4(%r12,%rdi), %xmm0
movl %r11d, %r11d
movl 0x7c(%rsp), %edx
movq %r9, 0xd0(%rsp)
addl %r9d, %edx
setns %r8b
cmpl 0x18(%rsp), %edx
setl %r9b
testb %r9b, %r8b
je 0x2b57a2
movq 0x10(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rsi
movl 0x34(%r10), %r8d
movq (%r10), %r12
movq 0x10(%r10), %r15
movq 0x40(%r10), %rbx
movl %edx, %ebp
imulq %rbx, %rbp
imulq %r15, %rbp
addq %r12, %rbp
movl 0x18(%r10), %edx
movq 0x20(%r10), %r9
movq %rbp, 0x30(%rsp)
andq $0x0, 0x38(%rsp)
movq %r15, 0x40(%rsp)
movl %edx, 0x48(%rsp)
movq %r9, 0x50(%rsp)
movl %eax, 0x5c(%rsp)
movl %esi, 0x60(%rsp)
movl $0x1, 0x64(%rsp)
movl %r8d, 0x68(%rsp)
imulq %rax, %rsi
movq %r15, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r15
movq %rax, 0x70(%rsp)
movl 0x28(%r10), %eax
leal -0x1(%rax), %edx
movl %edx, 0x58(%rsp)
cmpl $0x4, %eax
jne 0x2b569e
movq %rsi, 0x70(%rsp)
movq (%rsp), %r14
movl 0xe0(%r14,%rdi), %eax
testl %eax, %eax
movq %r11, 0x28(%rsp)
jne 0x2b56f9
movl 0xd0(%r14,%rdi), %edx
movl 0xd4(%r14,%rdi), %ecx
movl 0xd8(%r14,%rdi), %r8d
movl 0xdc(%r14,%rdi), %r9d
leaq 0x30(%rsp), %rdi
leaq 0x80(%rsp), %rsi
callq 0x2b7365
movq 0x28(%rsp), %r11
movq (%r14), %rcx
movq -0x18(%rcx), %rdi
movl 0xe0(%r14,%rdi), %eax
cmpl $0x1, %eax
jne 0x2b57cc
movq (%rsp), %rsi
movl 0xd8(%rsi,%rdi), %eax
xorl %r8d, %r8d
testl %eax, %eax
cmovlel %r8d, %eax
movl 0xdc(%rsi,%rdi), %edx
testl %edx, %edx
cmovlel %r8d, %edx
movl 0xd0(%rsi,%rdi), %r9d
testl %r9d, %r9d
cmovlel %r8d, %r9d
movl 0xd4(%rsi,%rdi), %esi
imulq %r15, %rbx
imulq %r11, %rbx
addq %rbx, %r12
cmpl %r9d, %r8d
je 0x2b57d5
vmovdqa (%rbp), %xmm0
movl %eax, %edi
subl $0x1, %edi
jb 0x2b5765
vmovdqa %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2b5754
xorl %edi, %edi
xorl %r10d, %r10d
cmpl 0x5c(%rsp), %r10d
jge 0x2b5787
vmovdqa (%r12,%rdi), %xmm0
vmovdqa %xmm0, (%r13,%rdi)
incl %r10d
addq $0x10, %rdi
jmp 0x2b576a
addq %rdi, %r13
movl %edx, %edi
subl $0x1, %edi
jb 0x2b579d
vmovdqa %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2b578c
incl %r8d
jmp 0x2b5744
imull %eax, %esi
testl %esi, %esi
movl $0x0, %eax
cmovlel %eax, %esi
movq 0xd0(%rsp), %r9
subl $0x1, %esi
jb 0x2b5b64
vmovups %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2b57b7
movq (%rsp), %r12
jmp 0x2b58b8
xorl %edi, %edi
cmpl 0x60(%rsp), %edi
jge 0x2b5832
vmovdqa (%rbp), %xmm0
movl %eax, %r8d
subl $0x1, %r8d
jb 0x2b57f7
vmovdqa %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2b57e5
xorl %r8d, %r8d
cmpl 0x5c(%rsp), %r8d
jge 0x2b5819
vmovdqa (%rbp), %xmm0
vmovdqa %xmm0, (%r13)
addq $0x10, %rbp
addq $0x10, %r13
incl %r8d
jmp 0x2b57fa
movl %edx, %r8d
subl $0x1, %r8d
jb 0x2b582e
vmovdqa %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2b581c
incl %edi
jmp 0x2b57d7
movl 0x5c(%rsp), %edi
shll $0x2, %edi
movslq %edi, %rdi
shlq $0x2, %rdi
subq %rdi, %rbp
xorl %edi, %edi
testl %esi, %esi
cmovlel %edi, %esi
cmpl %esi, %edi
je 0x2b58a8
vmovdqa (%rbp), %xmm0
movl %eax, %r8d
subl $0x1, %r8d
jb 0x2b5868
vmovdqa %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2b5856
xorl %r8d, %r8d
xorl %r9d, %r9d
cmpl 0x5c(%rsp), %r9d
jge 0x2b588c
vmovdqa (%rbp,%r8), %xmm0
vmovdqa %xmm0, (%r13,%r8)
incl %r9d
addq $0x10, %r8
jmp 0x2b586e
addq %r8, %r13
movl %edx, %r8d
subl $0x1, %r8d
jb 0x2b58a4
vmovdqa %xmm0, (%r13)
addq $0x10, %r13
jmp 0x2b5892
incl %edi
jmp 0x2b584a
movq -0x18(%rcx), %rdi
movq (%rsp), %r12
movl 0xe0(%r12,%rdi), %eax
cmpl $0x2, %eax
pushq $-0x20
popq %r13
jne 0x2b5ad8
movl 0xd0(%r12,%rdi), %r9d
movslq 0xd8(%r12,%rdi), %rax
movl 0x5c(%rsp), %ecx
imull %r9d, %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
shlq $0x2, %rcx
addq 0x30(%rsp), %rcx
xorl %r10d, %r10d
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movl 0xdc(%r12,%rdi), %esi
testl %esi, %esi
cmovlel %r10d, %esi
movl 0xd4(%r12,%rdi), %edi
testl %r9d, %r9d
cmovlel %r10d, %r9d
movq 0x80(%rsp), %r8
shlq $0x4, %rax
shlq $0x4, %rdx
shlq $0x4, %rsi
cmpl %r9d, %r10d
je 0x2b59b9
leaq (%rcx,%rax), %r11
xorl %r15d, %r15d
xorl %r14d, %r14d
movq %rdx, %r12
addq %r14, %r12
je 0x2b595b
vmovdqa (%r11,%r14), %xmm0
vmovdqa %xmm0, (%r8,%r15)
addq $-0x10, %r14
addq $0x10, %r15
jmp 0x2b593d
subq %r14, %r8
xorl %ebp, %ebp
movq %rcx, %r11
cmpl 0x5c(%rsp), %ebp
jge 0x2b597f
vmovdqa (%r11), %xmm0
vmovdqa %xmm0, (%r8)
addq $0x10, %r11
addq $0x10, %r8
incl %ebp
jmp 0x2b5963
movq %r13, %r14
movq (%rsp), %r12
leaq (%rsi,%r14), %r15
cmpq $-0x20, %r15
je 0x2b59a5
vmovdqa (%r11,%r14), %xmm0
vmovdqa %xmm0, (%r8)
addq $0x10, %r8
addq $-0x10, %r14
jmp 0x2b5986
movslq 0x5c(%rsp), %r11
shlq $0x4, %r11
subq %r11, %rcx
incl %r10d
jmp 0x2b592a
xorl %r9d, %r9d
cmpl 0x60(%rsp), %r9d
jge 0x2b5a35
leaq (%rcx,%rax), %r10
xorl %r14d, %r14d
xorl %r11d, %r11d
movq %rdx, %r15
addq %r11, %r15
je 0x2b59eb
vmovdqa (%r10,%r11), %xmm0
vmovdqa %xmm0, (%r8,%r14)
addq $-0x10, %r11
addq $0x10, %r14
jmp 0x2b59cd
subq %r11, %r8
xorl %r10d, %r10d
cmpl 0x5c(%rsp), %r10d
jge 0x2b5a0e
vmovdqa (%rcx), %xmm0
vmovdqa %xmm0, (%r8)
addq $0x10, %rcx
addq $0x10, %r8
incl %r10d
jmp 0x2b59f1
movq %r13, %r10
leaq (%rsi,%r10), %r11
cmpq $-0x20, %r11
je 0x2b5a30
vmovdqa (%rcx,%r10), %xmm0
vmovdqa %xmm0, (%r8)
addq $0x10, %r8
addq $-0x10, %r10
jmp 0x2b5a11
incl %r9d
jmp 0x2b59bc
movslq 0x5c(%rsp), %r9
shlq $0x5, %r9
subq %r9, %rcx
xorl %r9d, %r9d
testl %edi, %edi
cmovlel %r9d, %edi
cmpl %edi, %r9d
je 0x2b5ad8
leaq (%rcx,%rax), %r10
xorl %r14d, %r14d
xorl %r11d, %r11d
movq %rdx, %r15
addq %r11, %r15
je 0x2b5a7b
vmovdqa (%r10,%r11), %xmm0
vmovdqa %xmm0, (%r8,%r14)
addq $-0x10, %r11
addq $0x10, %r14
jmp 0x2b5a5d
subq %r11, %r8
xorl %r11d, %r11d
movq %rcx, %r10
cmpl 0x5c(%rsp), %r11d
jge 0x2b5aa2
vmovdqa (%r10), %xmm0
vmovdqa %xmm0, (%r8)
addq $0x10, %r10
addq $0x10, %r8
incl %r11d
jmp 0x2b5a84
movq %r13, %r11
leaq (%rsi,%r11), %r14
cmpq $-0x20, %r14
je 0x2b5ac4
vmovdqa (%r10,%r11), %xmm0
vmovdqa %xmm0, (%r8)
addq $0x10, %r8
addq $-0x10, %r11
jmp 0x2b5aa5
movslq 0x5c(%rsp), %r10
shlq $0x4, %r10
subq %r10, %rcx
incl %r9d
jmp 0x2b5a4a
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x2b5b06
lock
decl (%rax)
jne 0x2b5b06
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x2b5afe
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2b5b06
movq %rsi, %rdi
callq 0x5f3e0
movq 0x88(%rsp), %rax
testq %rax, %rax
movq 0x28(%rsp), %r11
movq 0xd0(%rsp), %r9
je 0x2b5b64
lock
decl (%rax)
jne 0x2b5b64
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x2b5b4f
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x28(%rsp), %r11
movq 0xd0(%rsp), %r9
jmp 0x2b5b64
movq %rsi, %rdi
callq 0x5f3e0
movq 0x28(%rsp), %r11
movq 0xd0(%rsp), %r9
incq %r9
incl %r11d
jmp 0x2b54f2
movq (%rsp), %rax
cmpl $0x0, 0xe0(%rax,%r11)
movq 0x10(%rsp), %rdi
jne 0x2b4b30
movq (%rsp), %rax
movl 0xe8(%rax,%r11), %r13d
addl %r12d, %r13d
addl 0xec(%rax,%r11), %r13d
addl %edx, %r10d
addl 0xd4(%rax,%r11), %r10d
addl 0xd8(%rax,%r11), %esi
addl 0xdc(%rax,%r11), %esi
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %r10d, %edx
movl %r13d, %ecx
movq 0x18(%rsp), %r8
pushq 0x8(%rbp)
pushq $0x8
callq 0x62a26
addq $0x10, %rsp
cmpq $0x0, (%rbx)
je 0x2b4c17
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b4c17
xorl %ecx, %ecx
testl %r13d, %r13d
cmovlel %ecx, %r13d
movq 0x18(%rsp), %rdx
testl %edx, %edx
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %edx
movq %rdx, 0x18(%rsp)
movabsq $0x100000001, %r11 # imm = 0x100000001
xorl %r14d, %r14d
movq (%rsp), %rbp
cmpq 0x18(%rsp), %r14
je 0x2b4c17
movq (%rbp), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xf0(%rbp,%rax)
je 0x2b5c61
movq 0xf8(%rbp,%rax), %rax
movq %r14, %rcx
shlq $0x5, %rcx
vmovups (%rax,%rcx), %ymm0
jmp 0x2b5c6b
vbroadcastss 0xe4(%rbp,%rax), %ymm0
xorl %r15d, %r15d
vmovups %ymm0, 0xd0(%rsp)
cmpq %r13, %r15
je 0x2b5e1d
movq 0x8(%rsp), %rax
movslq 0x2c(%rax), %rdx
movslq 0x30(%rax), %rsi
movq 0x40(%rax), %rdi
imulq %r14, %rdi
movq 0x10(%rax), %r8
imulq %r8, %rdi
addq (%rax), %rdi
movl 0x18(%rax), %r9d
movq 0x20(%rax), %r10
movq %rsi, %rax
imulq %rdx, %rax
movq %r15, %rcx
imulq %r8, %rcx
imulq %rax, %rcx
addq %rdi, %rcx
movq %rcx, 0x80(%rsp)
andq $0x0, 0x88(%rsp)
movq %r8, 0x90(%rsp)
movl %r9d, 0x98(%rsp)
movq %r10, 0xa0(%rsp)
movl $0x2, 0xa8(%rsp)
movl %edx, 0xac(%rsp)
movl %esi, 0xb0(%rsp)
movq %r11, 0xb4(%rsp)
movq %rax, 0xc0(%rsp)
movq (%rbp), %rdx
movq -0x18(%rdx), %rsi
movl 0xe8(%rbp,%rsi), %edi
movl %r15d, %edx
subl %edi, %edx
setns %dil
cmpl %r12d, %edx
setl %r8b
testb %r8b, %dil
je 0x2b5e04
movq 0x10(%rsp), %rcx
movslq 0x2c(%rcx), %rax
movslq 0x30(%rcx), %rbp
movq 0x40(%rcx), %rdi
imulq %r14, %rdi
movq 0x10(%rcx), %r8
imulq %r8, %rdi
addq (%rcx), %rdi
movl 0x18(%rcx), %r9d
movq 0x20(%rcx), %r10
movq %rbp, %rcx
imulq %rax, %rcx
movl %edx, %edx
movq %r13, %rbx
movq %r11, %r13
movq %rcx, %r11
imulq %r8, %r11
imulq %rdx, %r11
addq %rdi, %r11
movq %r11, 0x30(%rsp)
andq $0x0, 0x38(%rsp)
movq %r8, 0x40(%rsp)
movl %r9d, 0x48(%rsp)
movq %r10, 0x50(%rsp)
movl $0x2, 0x58(%rsp)
movl %eax, 0x5c(%rsp)
movl %ebp, 0x60(%rsp)
movq (%rsp), %rbp
movq %r13, 0x64(%rsp)
movq %rcx, 0x70(%rsp)
movl 0xd0(%rbp,%rsi), %edx
movl 0xd4(%rbp,%rsi), %ecx
movl 0xd8(%rbp,%rsi), %r8d
movl 0xdc(%rbp,%rsi), %r9d
leaq 0x30(%rsp), %rdi
leaq 0x80(%rsp), %rsi
vmovups 0xd0(%rsp), %ymm0
callq 0x2b72c5
vmovups 0xd0(%rsp), %ymm0
movq %r13, %r11
movq %rbx, %r13
incq %r15
jmp 0x2b5c77
testl %eax, %eax
movl $0x0, %edx
cmovlel %edx, %eax
subl $0x1, %eax
jb 0x2b5dfc
vmovups %ymm0, (%rcx)
addq $0x20, %rcx
jmp 0x2b5e0e
incq %r14
jmp 0x2b5c2e
leal (%r10,%rdx,8), %eax
movq (%rsp), %rdi
addl 0xd4(%rdi,%r11), %eax
testb $0x3, %al
sete %cl
movq %r9, %r8
shrq $0x3, %r8
addb %cl, %cl
testb $0x7, %al
movzbl %cl, %edx
pushq $0x3
popq %rcx
cmovnel %edx, %ecx
shlq %cl, %r8
orl %eax, %r10d
pushq $0x2
popq %r13
testb $0x7, %r10b
jne 0x2b4b2b
cmpl $0x0, 0xe0(%rdi,%r11)
movq 0x10(%rsp), %rdi
jne 0x2b4b30
movq (%rsp), %rcx
addl 0xd8(%rcx,%r11), %esi
addl 0xdc(%rcx,%r11), %esi
cltd
idivl %r14d
movq 0x8(%rbp), %r9
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %eax, %edx
movq %r8, %rcx
movl %r14d, %r8d
callq 0x627de
cmpq $0x0, (%rbx)
je 0x2b4c17
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b4c17
movq (%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rsi
vbroadcastss 0xe4(%rdi,%rsi), %ymm0
movl 0xd0(%rdi,%rsi), %eax
movl 0xd4(%rdi,%rsi), %ecx
cltd
idivl %r14d
movl %eax, %r10d
movl %ecx, %eax
cltd
idivl %r14d
movl 0xd8(%rdi,%rsi), %r8d
movl 0xdc(%rdi,%rsi), %r9d
movq 0x10(%rsp), %rdi
movq 0x8(%rsp), %rsi
movl %r10d, %edx
movl %eax, %ecx
callq 0x2b72c5
jmp 0x2b62e5
movq (%rsp), %rax
cmpl $0x0, 0xe0(%rax,%r11)
movq 0x10(%rsp), %rdi
jne 0x2b4b30
movq (%rsp), %rax
movl 0xe8(%rax,%r11), %r13d
addl %r12d, %r13d
addl 0xec(%rax,%r11), %r13d
addl %edx, %r10d
addl 0xd4(%rax,%r11), %r10d
addl 0xd8(%rax,%r11), %esi
addl 0xdc(%rax,%r11), %esi
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %r10d, %edx
movl %r13d, %ecx
movq 0x18(%rsp), %r8
pushq 0x8(%rbp)
pushq $0x4
callq 0x62a26
addq $0x10, %rsp
cmpq $0x0, (%rbx)
je 0x2b4c17
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b4c17
xorl %ecx, %ecx
testl %r13d, %r13d
cmovlel %ecx, %r13d
movq 0x18(%rsp), %rdx
testl %edx, %edx
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %edx
movq %rdx, 0x18(%rsp)
movabsq $0x100000001, %r11 # imm = 0x100000001
xorl %r14d, %r14d
movq (%rsp), %rbp
cmpq 0x18(%rsp), %r14
je 0x2b4c17
movq (%rbp), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xf0(%rbp,%rax)
je 0x2b6013
movq 0xf8(%rbp,%rax), %rax
movq %r14, %rcx
shlq $0x4, %rcx
vmovups (%rax,%rcx), %xmm0
jmp 0x2b601d
vbroadcastss 0xe4(%rbp,%rax), %xmm0
xorl %r15d, %r15d
vmovaps %xmm0, 0xd0(%rsp)
cmpq %r13, %r15
je 0x2b61d5
movq 0x8(%rsp), %rax
movslq 0x2c(%rax), %rdx
movslq 0x30(%rax), %rsi
movq 0x40(%rax), %rdi
imulq %r14, %rdi
movq 0x10(%rax), %r8
imulq %r8, %rdi
addq (%rax), %rdi
movl 0x18(%rax), %r9d
movq 0x20(%rax), %r10
movq %rsi, %rax
imulq %rdx, %rax
movq %r15, %rcx
imulq %r8, %rcx
imulq %rax, %rcx
addq %rdi, %rcx
movq %rcx, 0x80(%rsp)
andq $0x0, 0x88(%rsp)
movq %r8, 0x90(%rsp)
movl %r9d, 0x98(%rsp)
movq %r10, 0xa0(%rsp)
movl $0x2, 0xa8(%rsp)
movl %edx, 0xac(%rsp)
movl %esi, 0xb0(%rsp)
movq %r11, 0xb4(%rsp)
movq %rax, 0xc0(%rsp)
movq (%rbp), %rdx
movq -0x18(%rdx), %rsi
movl 0xe8(%rbp,%rsi), %edi
movl %r15d, %edx
subl %edi, %edx
setns %dil
cmpl %r12d, %edx
setl %r8b
testb %r8b, %dil
je 0x2b61bc
movq 0x10(%rsp), %rcx
movslq 0x2c(%rcx), %rax
movslq 0x30(%rcx), %rbp
movq 0x40(%rcx), %rdi
imulq %r14, %rdi
movq 0x10(%rcx), %r8
imulq %r8, %rdi
addq (%rcx), %rdi
movl 0x18(%rcx), %r9d
movq 0x20(%rcx), %r10
movq %rbp, %rcx
imulq %rax, %rcx
movl %edx, %edx
movq %r13, %rbx
movl %r12d, %r13d
movq %r11, %r12
movq %rcx, %r11
imulq %r8, %r11
imulq %rdx, %r11
addq %rdi, %r11
movq %r11, 0x30(%rsp)
andq $0x0, 0x38(%rsp)
movq %r8, 0x40(%rsp)
movl %r9d, 0x48(%rsp)
movq %r10, 0x50(%rsp)
movl $0x2, 0x58(%rsp)
movl %eax, 0x5c(%rsp)
movl %ebp, 0x60(%rsp)
movq (%rsp), %rbp
movq %r12, 0x64(%rsp)
movq %rcx, 0x70(%rsp)
movl 0xd0(%rbp,%rsi), %edx
movl 0xd4(%rbp,%rsi), %ecx
movl 0xd8(%rbp,%rsi), %r8d
movl 0xdc(%rbp,%rsi), %r9d
leaq 0x30(%rsp), %rdi
leaq 0x80(%rsp), %rsi
vmovaps 0xd0(%rsp), %xmm0
callq 0x2b7365
vmovaps 0xd0(%rsp), %xmm0
movq %r12, %r11
movl %r13d, %r12d
movq %rbx, %r13
incq %r15
jmp 0x2b6029
testl %eax, %eax
movl $0x0, %edx
cmovlel %edx, %eax
subl $0x1, %eax
jb 0x2b61b4
vmovups %xmm0, (%rcx)
addq $0x10, %rcx
jmp 0x2b61c6
incq %r14
jmp 0x2b5fe0
leal (%r10,%rdx,4), %edx
movq (%rsp), %rax
addl 0xd4(%rax,%r11), %edx
testb $0x3, %dl
sete %cl
movq %r9, %rax
shrq $0x2, %rax
addb %cl, %cl
movl %edx, %r8d
andl $0x7, %r8d
movzbl %cl, %r14d
pushq $0x3
popq %rcx
cmovnel %r14d, %ecx
shlq %cl, %rax
pushq $0x2
popq %r13
testb $0x3, %r10b
jne 0x2b4b2b
cmpl $0x4, %r8d
jne 0x2b4b2b
movq (%rsp), %rcx
cmpl $0x0, 0xe0(%rcx,%r11)
movq 0x10(%rsp), %rdi
jne 0x2b4b30
movq (%rsp), %rcx
addl 0xd8(%rcx,%r11), %esi
addl 0xdc(%rcx,%r11), %esi
sarl $0x2, %edx
movq 0x8(%rbp), %r9
pushq $0x4
popq %rbp
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movq %rax, %rcx
movl %ebp, %r8d
callq 0x627de
cmpq $0x0, (%rbx)
je 0x2b4c17
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x2b4c17
movq (%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rsi
vbroadcastss 0xe4(%rdi,%rsi), %xmm0
movl 0xd0(%rdi,%rsi), %eax
movl 0xd4(%rdi,%rsi), %ecx
cltd
idivl %ebp
movl %eax, %r10d
movl %ecx, %eax
cltd
idivl %ebp
movl 0xd8(%rdi,%rsi), %r8d
movl 0xdc(%rdi,%rsi), %r9d
movq 0x10(%rsp), %rdi
movq 0x8(%rsp), %rsi
movl %r10d, %edx
movl %eax, %ecx
callq 0x2b7365
movq $0x0, 0x20(%rsp)
jmp 0x2b4c17
movq $0x0, 0x20(%rsp)
cmpq %r14, 0x8(%rsp)
je 0x2b4c17
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x2b6313
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x2b6347
lock
decl (%rax)
jne 0x2b6347
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x2b633f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2b6347
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rcx
andq $0x0, 0x40(%rcx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%rcx)
vmovups %xmm0, 0xc(%rcx)
andl $0x0, 0x38(%rcx)
vmovups %xmm0, 0x28(%rcx)
vmovups (%r14), %xmm0
vmovups %xmm0, (%rcx)
movq 0x10(%r14), %rax
movq %rax, 0x10(%rcx)
movl 0x18(%r14), %eax
movl %eax, 0x18(%rcx)
movq 0x20(%r14), %rax
movq %rax, 0x20(%rcx)
vmovdqu 0x28(%r14), %xmm0
vmovdqu %xmm0, 0x28(%rcx)
movl 0x38(%r14), %eax
movl %eax, 0x38(%rcx)
movq 0x40(%r14), %rax
movq %rax, 0x40(%rcx)
jmp 0x2b4c17
jmp 0x2b63f4
jmp 0x2b63f4
jmp 0x2b63f4
jmp 0x2b63f4
jmp 0x2b63f4
jmp 0x2b63b2
movq %rax, %rbx
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x2b63ec
lock
decl (%rax)
jne 0x2b63ec
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x2b63e6
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2b63ec
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/padding_x86_fma.cpp |
ncnn::Padding_x86_fma::forward_int8(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int Padding_x86_fma::forward_int8(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
#if __SSE2__
if (elempack == 8)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
int out_elempack = outw % 8 == 0 ? 8 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int64_t v8 = (int64_t)value;
int64_t pad_value = v8 | (v8 << 8) | (v8 << 16) | (v8 << 24) | (v8 << 32) | (v8 << 40) | (v8 << 48) | (v8 << 56);
padding_constant_pack8_int8_sse(bottom_blob, top_blob, 0, 0, left / 8, right / 8, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
int out_elempack = outh % 8 == 0 ? 8 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int64_t v8 = (int64_t)value;
int64_t pad_value = v8 | (v8 << 8) | (v8 << 16) | (v8 << 24) | (v8 << 32) | (v8 << 40) | (v8 << 48) | (v8 << 56);
padding_constant_pack8_int8_sse(bottom_blob, top_blob, top / 8, bottom / 8, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
int out_elempack = outc % 8 == 0 ? 8 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 8 == 0 && out_elempack == 8 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
// TODO perchannel
// int64_t pad_value = per_channel_pad_data_size ? vld1_s8(per_channel_pad_data + q * 8) : vdup_n_s8((signed char)value);
int64_t v8 = (int64_t)value;
int64_t pad_value = v8 | (v8 << 8) | (v8 << 16) | (v8 << 24) | (v8 << 32) | (v8 << 40) | (v8 << 48) | (v8 << 56);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill<int64_t>(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack8_int8_sse(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack8_int8_sse(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack8_int8_sse(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
// TODO perchannel
// int64_t pad_value = per_channel_pad_data_size ? vld1_s8(per_channel_pad_data + q * 8) : vdup_n_s8((signed char)value);
int64_t v8 = (int64_t)value;
int64_t pad_value = v8 | (v8 << 8) | (v8 << 16) | (v8 << 24) | (v8 << 32) | (v8 << 40) | (v8 << 48) | (v8 << 56);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill<int64_t>(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack8_int8_sse(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
return Padding::forward(bottom_blob_unpacked, top_blob, opt);
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x128, %rsp # imm = 0x128
movq %rcx, %r12
movq %rdx, %rbx
movq %rdi, %r14
movl 0x38(%rsi), %r15d
movq 0x10(%rsi), %r9
movl 0x18(%rsi), %edi
movq %rsi, 0x28(%rsp)
vmovdqu 0x28(%rsi), %xmm0
cmpl $0x8, %edi
jne 0x2b70ac
vmovd %xmm0, %eax
decl %eax
cmpl $0x3, %eax
ja 0x2b70ac
leaq 0x1423aa(%rip), %rcx # 0x3f87f4
movslq (%rcx,%rax,4), %rax
vpextrd $0x1, %xmm0, %esi
addq %rcx, %rax
vpextrd $0x2, %xmm0, %edx
pushq $-0x64
popq %rcx
movq %rcx, 0x20(%rsp)
movq %r14, 0x18(%rsp)
jmpq *%rax
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd8(%r14,%rax), %r8d
leal (%r8,%rsi,8), %esi
addl 0xdc(%r14,%rax), %esi
xorl %r10d, %r10d
testb $0x7, %sil
sete %r10b
movq %r9, %rdx
shrq $0x3, %rdx
leal (%r10,%r10,2), %ecx
shlq %cl, %rdx
testb $0x7, %r8b
sete %cl
andb %cl, %r10b
cmpb $0x1, %r10b
jne 0x2b70ac
cmpl $0x0, 0xe0(%r14,%rax)
jne 0x2b70ac
sarl $0x3, %esi
movq 0x8(%r12), %r8
pushq $0x8
popq %rbp
movq %rbx, %rdi
movl %ebp, %ecx
callq 0x626da
cmpq $0x0, (%rbx)
je 0x2b7181
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2b7181
movq 0x18(%rsp), %r8
movq (%r8), %rax
movq -0x18(%rax), %rcx
vcvttss2si 0xe4(%r8,%rcx), %rax
movq %rax, %rdx
shlq $0x8, %rdx
movq %rax, %rsi
shlq $0x10, %rsi
orq %rdx, %rsi
movq %rax, %rdx
shlq $0x18, %rdx
movq %rax, %rdi
shlq $0x20, %rdi
orq %rdx, %rdi
orq %rsi, %rdi
movq %rax, %rdx
shlq $0x28, %rdx
movq %rax, %rsi
shlq $0x30, %rsi
orq %rdx, %rsi
movq %rax, %r9
shlq $0x38, %r9
orq %rsi, %r9
orq %rdi, %r9
orq %rax, %r9
movl 0xd8(%r8,%rcx), %eax
movl 0xdc(%r8,%rcx), %ecx
cltd
idivl %ebp
movl %eax, %r8d
movl %ecx, %eax
cltd
idivl %ebp
movq %r9, (%rsp)
movq $0x0, 0x20(%rsp)
movq 0x28(%rsp), %rdi
movq %rbx, %rsi
xorl %edx, %edx
xorl %ecx, %ecx
movl %eax, %r9d
callq 0x2b740e
jmp 0x2b7181
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r14,%rax), %r10d
leal (%r10,%r15,8), %r13d
addl 0xec(%r14,%rax), %r13d
xorl %r11d, %r11d
testb $0x7, %r13b
sete %r11b
movq %r9, %r8
shrq $0x3, %r8
leal (%r11,%r11,2), %ecx
shlq %cl, %r8
testb $0x7, %r10b
sete %cl
andb %cl, %r11b
cmpb $0x1, %r11b
jne 0x2b70ac
addl 0xd8(%r14,%rax), %esi
addl 0xdc(%r14,%rax), %esi
addl 0xd0(%r14,%rax), %edx
addl 0xd4(%r14,%rax), %edx
leal (,%r15,8), %ecx
cmpl %ecx, %r13d
je 0x2b6615
cmpl $0x0, 0xe0(%r14,%rax)
jne 0x2b70ac
sarl $0x3, %r13d
movq 0x8(%r12), %rax
movq %rax, (%rsp)
pushq $0x8
popq %r9
movq %rbx, %rdi
movl %r13d, %ecx
callq 0x628f2
cmpq $0x0, (%rbx)
je 0x2b7181
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2b7181
movq 0x18(%rsp), %rcx
movq (%rcx), %rax
movq %rax, 0x98(%rsp)
movq -0x18(%rax), %rax
movl 0xe8(%rcx,%rax), %eax
pushq $-0x8
popq %rcx
cltd
idivl %ecx
movl %eax, %r12d
xorl %ecx, %ecx
testl %r13d, %r13d
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %r13d
movl %r12d, %r10d
xorl %ecx, %ecx
movq %r15, 0xa0(%rsp)
movq %r13, 0xc0(%rsp)
movl %r12d, 0xac(%rsp)
cmpq %r13, %rcx
je 0x2b7181
movslq 0x2c(%rbx), %rax
movslq 0x30(%rbx), %r9
movl 0x34(%rbx), %esi
movq (%rbx), %rdi
movq 0x10(%rbx), %r11
movq 0x40(%rbx), %r8
movq %r8, %rbp
movq %rcx, 0x48(%rsp)
imulq %rcx, %rbp
imulq %r11, %rbp
addq %rdi, %rbp
movl 0x18(%rbx), %ecx
movq 0x20(%rbx), %rdx
movq %rbp, 0x50(%rsp)
andq $0x0, 0x58(%rsp)
movq %r11, 0x60(%rsp)
movl %ecx, 0x68(%rsp)
movq %rdx, 0x70(%rsp)
movl %eax, 0x7c(%rsp)
movl %r9d, 0x80(%rsp)
movl $0x1, 0x84(%rsp)
movl %esi, 0x88(%rsp)
imulq %rax, %r9
movq %r11, %rax
imulq %r9, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r11
movq %rax, 0x90(%rsp)
movl 0x28(%rbx), %ecx
leal -0x1(%rcx), %edx
movl %edx, 0x78(%rsp)
cmpl $0x4, %ecx
jne 0x2b674b
movq %r9, 0x90(%rsp)
movq %r9, %rax
movq 0x98(%rsp), %rcx
movq -0x18(%rcx), %r9
movq 0x18(%rsp), %rcx
vcvttss2si 0xe4(%rcx,%r9), %rcx
movl %r10d, %r10d
movq %r10, 0x30(%rsp)
movq %rcx, %rdx
shlq $0x8, %rdx
movq %rcx, %r10
shlq $0x10, %r10
orq %rdx, %r10
movq %rcx, %rdx
shlq $0x18, %rdx
movq %rcx, %r14
shlq $0x20, %r14
orq %rdx, %r14
orq %r10, %r14
movq %rcx, %rdx
shlq $0x28, %rdx
movq %rcx, %r15
shlq $0x30, %r15
orq %rdx, %r15
movq %rcx, %r10
shlq $0x38, %r10
orq %r15, %r10
orq %r14, %r10
orq %rcx, %r10
movl %r12d, %ecx
movl %r12d, %r13d
movq 0x48(%rsp), %r12
addl %r12d, %ecx
setns %dl
cmpl 0xa0(%rsp), %ecx
setl %r14b
testb %r14b, %dl
je 0x2b69cc
movq 0x28(%rsp), %rsi
movslq 0x2c(%rsi), %rdi
movslq 0x30(%rsi), %r8
movl 0x34(%rsi), %eax
movq (%rsi), %r13
movq 0x10(%rsi), %r11
movq 0x40(%rsi), %rdx
movl %ecx, %r12d
movq %rdx, 0x38(%rsp)
imulq %rdx, %r12
imulq %r11, %r12
addq %r13, %r12
movl 0x18(%rsi), %ecx
movq 0x20(%rsi), %rdx
movq %r12, 0xd0(%rsp)
andq $0x0, 0xd8(%rsp)
movq %r11, 0xe0(%rsp)
movl %ecx, 0xe8(%rsp)
movq %rdx, 0xf0(%rsp)
movl %edi, 0xfc(%rsp)
movl %r8d, 0x100(%rsp)
movl $0x1, 0x104(%rsp)
movl %eax, 0x108(%rsp)
movq %r8, 0xb8(%rsp)
movq %r8, %rcx
movq %rdi, 0x40(%rsp)
imulq %rdi, %rcx
movq %r11, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
movq %r11, 0xb0(%rsp)
divq %r11
movq %rax, 0x110(%rsp)
movl 0x28(%rsi), %eax
leal -0x1(%rax), %edx
movl %edx, 0xf8(%rsp)
cmpl $0x4, %eax
jne 0x2b68a9
movq %rcx, 0x110(%rsp)
movq 0x18(%rsp), %r14
movl 0xe0(%r14,%r9), %eax
testl %eax, %eax
jne 0x2b6907
movl 0xd0(%r14,%r9), %edx
movl 0xd4(%r14,%r9), %ecx
movl 0xd8(%r14,%r9), %r8d
movl 0xdc(%r14,%r9), %r9d
movq %r10, (%rsp)
leaq 0xd0(%rsp), %rdi
leaq 0x50(%rsp), %rsi
callq 0x2b740e
movq (%r14), %rax
movq %rax, 0x98(%rsp)
movq -0x18(%rax), %r9
movl 0xe0(%r14,%r9), %eax
cmpl $0x1, %eax
jne 0x2b6b1c
movq 0x18(%rsp), %rax
movl 0xd0(%rax,%r9), %r8d
movl 0xd4(%rax,%r9), %ecx
movl %ecx, 0xa8(%rsp)
movl 0xd8(%rax,%r9), %edx
xorl %r10d, %r10d
testl %edx, %edx
cmovlel %r10d, %edx
movq 0x40(%rsp), %rcx
testl %ecx, %ecx
movl $0x0, %r11d
cmovgl %ecx, %r11d
movl 0xdc(%rax,%r9), %esi
testl %esi, %esi
cmovlel %r10d, %esi
testl %r8d, %r8d
cmovlel %r10d, %r8d
movq 0x38(%rsp), %r9
imulq 0xb0(%rsp), %r9
imulq 0x30(%rsp), %r9
addq %r13, %r9
movq %rbp, %rdi
cmpl %r8d, %r10d
je 0x2b6a06
movl %edx, %ecx
subl $0x1, %ecx
jb 0x2b699a
movq (%r12), %r14
movq %r14, (%rdi)
addq $0x8, %rdi
jmp 0x2b6988
xorl %r14d, %r14d
cmpl %r14d, %r11d
je 0x2b69b2
movq (%r9,%r14,8), %rcx
movq %rcx, (%rdi)
addq $0x8, %rdi
incq %r14
jmp 0x2b699d
movl %esi, %ecx
subl $0x1, %ecx
jb 0x2b69c7
movq -0x8(%r9,%r14,8), %r15
movq %r15, (%rdi)
addq $0x8, %rdi
jmp 0x2b69b4
incl %r10d
jmp 0x2b697d
imull %eax, %esi
testl %esi, %esi
movl $0x0, %eax
cmovlel %eax, %esi
imulq %r11, %r8
imulq %r12, %r8
addq %r8, %rdi
xorl %eax, %eax
movq %r12, %rcx
movl %r13d, %r12d
movq 0xc0(%rsp), %r13
cmpq %rax, %rsi
je 0x2b6d3a
movq %r10, (%rdi,%rax,8)
incq %rax
jmp 0x2b69f4
movslq 0xfc(%rsp), %rax
xorl %r11d, %r11d
testl %eax, %eax
movl $0x0, %r8d
movq %rax, 0x120(%rsp)
cmovgl %eax, %r8d
movq 0xb8(%rsp), %rax
testl %eax, %eax
movl $0x0, %r14d
cmovgl %eax, %r14d
movq %r12, %r9
cmpl %r14d, %r11d
je 0x2b6a98
movl %edx, %ecx
subl $0x1, %ecx
jb 0x2b6a54
movq (%r9), %r10
movq %r10, (%rdi)
addq $0x8, %rdi
jmp 0x2b6a43
xorl %r15d, %r15d
xorl %r10d, %r10d
cmpl %r15d, %r8d
je 0x2b6a70
movq (%r9,%r15,8), %rcx
movq %rcx, (%rdi,%r15,8)
addq $-0x8, %r10
incq %r15
jmp 0x2b6a5a
movq %r9, %rcx
subq %r10, %rcx
subq %r10, %rdi
movl %esi, %r10d
subl $0x1, %r10d
jb 0x2b6a90
movq -0x8(%r9,%r15,8), %rax
movq %rax, (%rdi)
addq $0x8, %rdi
jmp 0x2b6a7c
incl %r11d
movq %rcx, %r9
jmp 0x2b6a3c
movq 0x120(%rsp), %rax
shlq $0x3, %rax
subq %rax, %r9
xorl %r10d, %r10d
movl 0xa8(%rsp), %r14d
testl %r14d, %r14d
cmovlel %r10d, %r14d
cmpl %r14d, %r10d
je 0x2b6b03
movl %edx, %ecx
subl $0x1, %ecx
jb 0x2b6ad1
movq (%r9), %rax
movq %rax, (%rdi)
addq $0x8, %rdi
jmp 0x2b6ac0
xorl %r11d, %r11d
cmpl %r11d, %r8d
je 0x2b6ae9
movq (%r9,%r11,8), %rax
movq %rax, (%rdi)
addq $0x8, %rdi
incq %r11
jmp 0x2b6ad4
movl %esi, %ecx
subl $0x1, %ecx
jb 0x2b6afe
movq -0x8(%r9,%r11,8), %rax
movq %rax, (%rdi)
addq $0x8, %rdi
jmp 0x2b6aeb
incl %r10d
jmp 0x2b6ab9
movq 0x98(%rsp), %rax
movq -0x18(%rax), %r9
movq 0x18(%rsp), %rax
movl 0xe0(%rax,%r9), %eax
cmpl $0x2, %eax
jne 0x2b6d25
movq 0x18(%rsp), %rcx
movl 0xd0(%rcx,%r9), %r14d
movslq 0xd8(%rcx,%r9), %rax
movq %rcx, %r8
movl %r14d, %r11d
movq 0x40(%rsp), %rcx
imull %ecx, %r11d
movq %rcx, %rdx
negq %rdx
xorl %r15d, %r15d
testl %eax, %eax
movl $0x0, %esi
cmovgl %eax, %esi
testl %ecx, %ecx
movl $0x0, %edi
cmovgl %ecx, %edi
movl 0xdc(%r8,%r9), %r8d
testl %r8d, %r8d
cmovlel %r15d, %r8d
testl %r14d, %r14d
cmovlel %r15d, %r14d
movq 0x38(%rsp), %r10
imulq 0xb0(%rsp), %r10
movslq %r11d, %rcx
leaq (%r12,%rcx,8), %r11
imulq 0x30(%rsp), %r10
leaq (%r10,%rax,8), %r10
leaq (%r10,%rcx,8), %r10
movq 0x18(%rsp), %r12
movl 0xd4(%r12,%r9), %r9d
movl %r9d, 0x38(%rsp)
shlq $0x3, %rax
addq %r10, %r13
movq 0x40(%rsp), %rcx
leaq (,%rcx,8), %r10
negq %r10
negq %rsi
negq %r8
cmpl %r14d, %r15d
je 0x2b6c2e
xorl %ecx, %ecx
cmpq %rcx, %rsi
je 0x2b6bec
movq (%r13,%rcx,8), %r12
movq %r12, (%rbp)
addq $0x8, %rbp
decq %rcx
jmp 0x2b6bd5
xorl %ecx, %ecx
movq %r11, %r12
cmpl %edi, %ecx
je 0x2b6c09
movq (%r12), %r9
addq $0x8, %r12
movq %r9, (%rbp)
addq $0x8, %rbp
incl %ecx
jmp 0x2b6bf1
xorl %ecx, %ecx
cmpq %rcx, %r8
je 0x2b6c22
movq -0x10(%r12,%rcx,8), %r9
movq %r9, (%rbp)
addq $0x8, %rbp
decq %rcx
jmp 0x2b6c0b
leaq (%r11,%rdx,8), %r11
incl %r15d
addq %r10, %r13
jmp 0x2b6bce
xorl %r14d, %r14d
movq 0xb8(%rsp), %rcx
testl %ecx, %ecx
movl $0x0, %r15d
cmovgl %ecx, %r15d
movl 0x38(%rsp), %r13d
cmpl %r15d, %r14d
je 0x2b6ca3
leaq (%r11,%rax), %rcx
xorl %r12d, %r12d
cmpq %r12, %rsi
je 0x2b6c6c
movq (%rcx,%r12,8), %r9
movq %r9, (%rbp)
addq $0x8, %rbp
decq %r12
jmp 0x2b6c56
xorl %ecx, %ecx
cmpl %edi, %ecx
je 0x2b6c85
movq (%r11), %r9
addq $0x8, %r11
movq %r9, (%rbp)
addq $0x8, %rbp
incl %ecx
jmp 0x2b6c6e
xorl %ecx, %ecx
cmpq %rcx, %r8
je 0x2b6c9e
movq -0x10(%r11,%rcx,8), %r9
movq %r9, (%rbp)
addq $0x8, %rbp
decq %rcx
jmp 0x2b6c87
incl %r14d
jmp 0x2b6c4a
movq 0x40(%rsp), %rcx
addl %ecx, %ecx
movslq %ecx, %rcx
shlq $0x3, %rcx
movq %r11, %r14
subq %rcx, %r14
xorl %r15d, %r15d
testl %r13d, %r13d
cmovlel %r15d, %r13d
subq %rcx, %rax
addq %r11, %rax
cmpl %r13d, %r15d
je 0x2b6d25
xorl %ecx, %ecx
cmpq %rcx, %rsi
je 0x2b6ce4
movq (%rax,%rcx,8), %r9
movq %r9, (%rbp)
addq $0x8, %rbp
decq %rcx
jmp 0x2b6cce
xorl %ecx, %ecx
movq %r14, %r11
cmpl %edi, %ecx
je 0x2b6d00
movq (%r11), %r9
addq $0x8, %r11
movq %r9, (%rbp)
addq $0x8, %rbp
incl %ecx
jmp 0x2b6ce9
xorl %ecx, %ecx
cmpq %rcx, %r8
je 0x2b6d19
movq -0x10(%r11,%rcx,8), %r9
movq %r9, (%rbp)
addq $0x8, %rbp
decq %rcx
jmp 0x2b6d02
leaq (%r14,%rdx,8), %r14
incl %r15d
addq %r10, %rax
jmp 0x2b6cc7
movq 0xc0(%rsp), %r13
movl 0xac(%rsp), %r12d
movq 0x48(%rsp), %rcx
incq %rcx
movq 0x30(%rsp), %r10
incl %r10d
jmp 0x2b66a1
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xe0(%r14,%rax)
jne 0x2b70ac
vpextrd $0x3, %xmm0, %edi
movl 0xe8(%r14,%rax), %ecx
movl %edi, 0x48(%rsp)
addl %edi, %ecx
addl 0xec(%r14,%rax), %ecx
addl 0xd0(%r14,%rax), %edx
addl 0xd4(%r14,%rax), %edx
addl 0xd8(%r14,%rax), %esi
addl 0xdc(%r14,%rax), %esi
movq 0x8(%r12), %rax
movq %rax, 0x8(%rsp)
movl $0x8, (%rsp)
movq %rbx, %rdi
movq %rcx, %r14
movl %r15d, %r8d
callq 0x62a26
cmpq $0x0, (%rbx)
je 0x2b7181
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2b7181
xorl %ecx, %ecx
testl %r14d, %r14d
cmovlel %ecx, %r14d
movq %r14, 0x30(%rsp)
testl %r15d, %r15d
movl $0x0, %eax
movq %rax, 0x20(%rsp)
cmovlel %ecx, %r15d
movq %r15, 0xa0(%rsp)
movabsq $0x100000001, %r13 # imm = 0x100000001
xorl %r14d, %r14d
movq 0x18(%rsp), %rbp
movq %rbx, 0xc8(%rsp)
cmpq 0xa0(%rsp), %r14
je 0x2b7181
movq (%rbp), %rax
movq -0x18(%rax), %rax
vcvttss2si 0xe4(%rbp,%rax), %rax
movq %rax, %rcx
shlq $0x8, %rcx
movq %rax, %rdx
shlq $0x10, %rdx
orq %rcx, %rdx
movq %rax, %rcx
shlq $0x18, %rcx
movq %rax, %rsi
shlq $0x20, %rsi
orq %rcx, %rsi
orq %rdx, %rsi
movq %rax, %rcx
shlq $0x28, %rcx
movq %rax, %rdx
shlq $0x30, %rdx
orq %rcx, %rdx
movq %rax, %r15
shlq $0x38, %r15
orq %rdx, %r15
orq %rsi, %r15
orq %rax, %r15
xorl %r12d, %r12d
cmpq 0x30(%rsp), %r12
je 0x2b7052
movslq 0x2c(%rbx), %rdi
movslq 0x30(%rbx), %rdx
movq (%rbx), %rax
movq 0x10(%rbx), %rsi
movq 0x40(%rbx), %r8
imulq %r14, %r8
movq %r8, %r9
imulq %rsi, %r9
addq %rax, %r9
movl 0x18(%rbx), %r10d
movq %rdx, %rcx
imulq %rdi, %rcx
movq %r12, %r11
imulq %rsi, %r11
imulq %rcx, %r11
addq %r9, %r11
movq 0x20(%rbx), %r9
movq %r11, 0x50(%rsp)
andq $0x0, 0x58(%rsp)
movq %rsi, 0x60(%rsp)
movl %r10d, 0x68(%rsp)
movq %r9, 0x70(%rsp)
movl $0x2, 0x78(%rsp)
movl %edi, 0x7c(%rsp)
movl %edx, 0x80(%rsp)
movq %r13, 0x84(%rsp)
movq %rcx, 0x90(%rsp)
movq (%rbp), %r9
movq -0x18(%r9), %r9
movl 0xe8(%rbp,%r9), %r11d
movl %r12d, %r10d
subl %r11d, %r10d
setns %r11b
cmpl 0x48(%rsp), %r10d
movq %r13, %rbx
movq %rbp, %r13
setl %bpl
testb %bpl, %r11b
je 0x2b7016
movq 0x28(%rsp), %r8
movslq 0x2c(%r8), %rax
movslq 0x30(%r8), %rcx
movq 0x40(%r8), %rdx
imulq %r14, %rdx
movq 0x10(%r8), %rsi
imulq %rsi, %rdx
addq (%r8), %rdx
movl 0x18(%r8), %edi
movq 0x20(%r8), %r8
movq %rcx, %r11
imulq %rax, %r11
movl %r10d, %r10d
movq %r11, %rbp
imulq %rsi, %rbp
imulq %r10, %rbp
addq %rdx, %rbp
movq %rbp, 0xd0(%rsp)
andq $0x0, 0xd8(%rsp)
movq %rsi, 0xe0(%rsp)
movl %edi, 0xe8(%rsp)
movq %r8, 0xf0(%rsp)
movl $0x2, 0xf8(%rsp)
movl %eax, 0xfc(%rsp)
movl %ecx, 0x100(%rsp)
movq %rbx, 0x104(%rsp)
movq %r11, 0x110(%rsp)
movl 0xd0(%r13,%r9), %edx
movl 0xd4(%r13,%r9), %ecx
movl 0xd8(%r13,%r9), %r8d
movl 0xdc(%r13,%r9), %r9d
movq %r15, (%rsp)
leaq 0xd0(%rsp), %rdi
leaq 0x50(%rsp), %rsi
callq 0x2b740e
movq %r13, %rbp
movq %rbx, %r13
movq 0xc8(%rsp), %rbx
incq %r12
jmp 0x2b6e81
testl %ecx, %ecx
movl $0x0, %r9d
cmovlel %r9d, %ecx
imulq %r12, %rdx
imulq %rdi, %rdx
addq %r8, %rdx
imulq %rdx, %rsi
addq %rsi, %rax
xorl %edx, %edx
movq %r13, %rbp
movq %rbx, %r13
movq 0xc8(%rsp), %rbx
cmpq %rdx, %rcx
je 0x2b700e
movq %r15, (%rax,%rdx,8)
incq %rdx
jmp 0x2b7044
incq %r14
jmp 0x2b6e18
movq (%r14), %rax
movq -0x18(%rax), %r8
movl 0xd0(%r14,%r8), %r10d
leal (%r10,%rdx,8), %edx
addl 0xd4(%r14,%r8), %edx
xorl %r11d, %r11d
testb $0x7, %dl
sete %r11b
movq %r9, %rax
shrq $0x3, %rax
leal (%r11,%r11,2), %ecx
shlq %cl, %rax
testb $0x7, %r10b
sete %cl
andb %cl, %r11b
cmpb $0x1, %r11b
jne 0x2b70ac
cmpl $0x0, 0xe0(%r14,%r8)
je 0x2b7198
movq 0x28(%rsp), %rdx
movq 0x8(%rdx), %rax
vmovups (%rdx), %xmm1
vmovaps %xmm1, 0x50(%rsp)
movq %r9, 0x60(%rsp)
movl %edi, 0x68(%rsp)
movq 0x20(%rdx), %rcx
movq %rcx, 0x70(%rsp)
vmovdqu %xmm0, 0x78(%rsp)
movl %r15d, 0x88(%rsp)
movq 0x40(%rdx), %rcx
movq %rcx, 0x90(%rsp)
testq %rax, %rax
je 0x2b70f3
lock
incl (%rax)
cmpl $0x1, %edi
je 0x2b7134
vmovdqu (%r12), %ymm0
vmovups 0x20(%r12), %ymm1
leaq 0xd0(%rsp), %rcx
vmovups %ymm1, 0x20(%rcx)
vmovdqu %ymm0, (%rcx)
movq 0x10(%r12), %rax
movq %rax, 0x8(%rcx)
leaq 0x50(%rsp), %rsi
pushq $0x1
popq %rdx
movq 0x28(%rsp), %rdi
vzeroupper
callq 0x64e3b
movq (%r14), %rax
addq -0x18(%rax), %r14
leaq 0x50(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movq %r12, %rcx
callq 0x2acc2e
movq %rax, 0x20(%rsp)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2b7181
lock
decl (%rax)
jne 0x2b7181
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x2b7179
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2b7181
movq %rsi, %rdi
callq 0x5f3e0
movq 0x20(%rsp), %rax
addq $0x128, %rsp # imm = 0x128
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
addl 0xd8(%r14,%r8), %esi
addl 0xdc(%r14,%r8), %esi
sarl $0x3, %edx
movq 0x8(%r12), %r9
pushq $0x8
popq %rbp
movq %rbx, %rdi
movq %rax, %rcx
movl %ebp, %r8d
callq 0x627de
cmpq $0x0, (%rbx)
je 0x2b7181
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2b7181
movq 0x18(%rsp), %r9
movq (%r9), %rax
movq -0x18(%rax), %rsi
vcvttss2si 0xe4(%r9,%rsi), %rax
movq %rax, %rcx
shlq $0x8, %rcx
movq %rax, %rdx
shlq $0x10, %rdx
orq %rcx, %rdx
movq %rax, %rcx
shlq $0x18, %rcx
movq %rax, %rdi
shlq $0x20, %rdi
orq %rcx, %rdi
orq %rdx, %rdi
movq %rax, %rcx
shlq $0x28, %rcx
movq %rax, %rdx
shlq $0x30, %rdx
orq %rcx, %rdx
movq %rax, %r11
shlq $0x38, %r11
orq %rdx, %r11
orq %rdi, %r11
orq %rax, %r11
movl 0xd0(%r9,%rsi), %eax
movl 0xd4(%r9,%rsi), %ecx
cltd
idivl %ebp
movl %eax, %r10d
movl %ecx, %eax
cltd
idivl %ebp
movl 0xd8(%r9,%rsi), %r8d
movl 0xdc(%r9,%rsi), %r9d
movq %r11, (%rsp)
movq 0x28(%rsp), %rdi
movq %rbx, %rsi
movl %r10d, %edx
movl %eax, %ecx
callq 0x2b740e
movq $0x0, 0x20(%rsp)
jmp 0x2b7181
jmp 0x2b72bd
jmp 0x2b7284
movq %rax, %rbx
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2b72b5
lock
decl (%rax)
jne 0x2b72b5
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
jne 0x2b72af
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2b72b5
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/padding_x86_fma.cpp |
virtual thunk to ncnn::Padding_x86_fma::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int Padding_x86_fma::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
if (top == 0 && bottom == 0 && left == 0 && right == 0 && front == 0 && behind == 0)
{
top_blob = bottom_blob;
return 0;
}
int elembits = bottom_blob.elembits();
if (elembits == 8)
return forward_int8(bottom_blob, top_blob, opt);
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
int out_elempack = outw % 16 == 0 ? 16 : outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 16 == 0 && out_elempack == 16 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m512 pad_value = _mm512_set1_ps(value);
padding_constant_pack16_avx512(bottom_blob, top_blob, 0, 0, left / 16, right / 16, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
int out_elempack = outh % 16 == 0 ? 16 : outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 16 == 0 && out_elempack == 16 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m512 pad_value = _mm512_set1_ps(value);
padding_constant_pack16_avx512(bottom_blob, top_blob, top / 16, bottom / 16, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
int out_elempack = outc % 16 == 0 ? 16 : outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 16 == 0 && out_elempack == 16 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m512 pad_value = per_channel_pad_data_size ? _mm512_loadu_ps((const float*)per_channel_pad_data + q * 16) : _mm512_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack16_avx512(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack16_avx512(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack16_avx512(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m512 pad_value = per_channel_pad_data_size ? _mm512_loadu_ps((const float*)per_channel_pad_data + q * 16) : _mm512_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack16_avx512(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
int out_elempack = outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m256 pad_value = _mm256_set1_ps(value);
padding_constant_pack8_avx(bottom_blob, top_blob, 0, 0, left / 8, right / 8, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
int out_elempack = outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 8 == 0 && out_elempack == 8 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m256 pad_value = _mm256_set1_ps(value);
padding_constant_pack8_avx(bottom_blob, top_blob, top / 8, bottom / 8, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
int out_elempack = outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 8 == 0 && out_elempack == 8 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m256 pad_value = per_channel_pad_data_size ? _mm256_loadu_ps((const float*)per_channel_pad_data + q * 8) : _mm256_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack8_avx(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack8_avx(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack8_avx(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m256 pad_value = per_channel_pad_data_size ? _mm256_loadu_ps((const float*)per_channel_pad_data + q * 8) : _mm256_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack8_avx(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __AVX__
if (elempack == 4)
{
if (dims == 1)
{
int outw = w * elempack + left + right;
#if __AVX__
int out_elempack = outw % 8 == 0 ? 8 : outw % 4 == 0 ? 4 : 1;
#else
int out_elempack = outw % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (left % 4 == 0 && out_elempack == 4 && type == 0)
{
top_blob.create(outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m128 pad_value = _mm_set1_ps(value);
padding_constant_pack4_sse(bottom_blob, top_blob, 0, 0, left / 4, right / 4, pad_value);
return 0;
}
}
if (dims == 2)
{
int outw = w + left + right;
int outh = h * elempack + top + bottom;
#if __AVX__
int out_elempack = outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
#else
int out_elempack = outh % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (top % 4 == 0 && out_elempack == 4 && type == 0)
{
top_blob.create(outw, outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
__m128 pad_value = _mm_set1_ps(value);
padding_constant_pack4_sse(bottom_blob, top_blob, top / 4, bottom / 4, left, right, pad_value);
return 0;
}
}
if (dims == 3)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outc = channels * elempack + front + behind;
#if __AVX__
int out_elempack = outc % 8 == 0 ? 8 : outc % 4 == 0 ? 4 : 1;
#else
int out_elempack = outc % 4 == 0 ? 4 : 1;
#endif
size_t out_elemsize = elemsize / elempack * out_elempack;
if (front % 4 == 0 && out_elempack == 4 && !(outc != channels * elempack && type != 0))
{
top_blob.create(outw, outh, outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int front_ = front / elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc / out_elempack; q++)
{
Mat borderm = top_blob.channel(q);
__m128 pad_value = per_channel_pad_data_size ? _mm_loadu_ps((const float*)per_channel_pad_data + q * 4) : _mm_set1_ps(value);
//Channel padding
if ((q - front_) < 0 || (q - front_) >= channels)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q - front_);
if (type == 0)
padding_constant_pack4_sse(m, borderm, top, bottom, left, right, pad_value);
if (type == 1)
padding_replicate_pack4_sse(m, borderm, top, bottom, left, right);
if (type == 2)
padding_reflect_pack4_sse(m, borderm, top, bottom, left, right);
}
}
return 0;
}
}
if (dims == 4)
{
int outw = w + left + right;
int outh = h + top + bottom;
int outd = d + front + behind;
if (type == 0)
{
top_blob.create(outw, outh, outd, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
__m128 pad_value = per_channel_pad_data_size ? _mm_loadu_ps((const float*)per_channel_pad_data + q * 4) : _mm_set1_ps(value);
for (int z = 0; z < outd; z++)
{
Mat borderm = top_blob.channel(q).depth(z);
// depth padding
if ((z - front) < 0 || (z - front) >= d)
{
borderm.fill(pad_value);
}
else
{
const Mat m = bottom_blob.channel(q).depth(z - front);
padding_constant_pack4_sse(m, borderm, top, bottom, left, right, pad_value);
}
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
return Padding::forward(bottom_blob_unpacked, top_blob, opt);
} | movq (%rdi), %rax
addq -0x48(%rax), %rdi
jmp 0x2b4800
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/padding_x86_fma.cpp |
ncnn::padding_constant_pack8_int8_sse(ncnn::Mat const&, ncnn::Mat&, int, int, int, int, long) | static void padding_constant_pack8_int8_sse(const Mat& src, Mat& dst, int top, int bottom, int left, int right, int64_t _v)
{
const int64_t* ptr = src;
int64_t* outptr = dst;
// fill top
for (int y = 0; y < top; y++)
{
for (int x = 0; x < dst.w; x++)
{
*outptr++ = _v;
}
}
// fill center
for (int y = 0; y < src.h; y++)
{
for (int x = 0; x < left; x++)
{
*outptr++ = _v;
}
for (int x = 0; x < src.w; x++)
{
*outptr++ = *ptr++;
}
for (int x = 0; x < right; x++)
{
*outptr++ = _v;
}
}
// fill bottom
for (int y = 0; y < bottom; y++)
{
for (int x = 0; x < dst.w; x++)
{
*outptr++ = _v;
}
}
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movq 0x30(%rsp), %rax
movq (%rdi), %r11
movl 0x2c(%rsi), %ebx
xorl %ebp, %ebp
testl %ebx, %ebx
cmovlel %ebp, %ebx
movq (%rsi), %r10
testl %edx, %edx
cmovlel %ebp, %edx
cmpl %edx, %ebp
je 0x2b744a
movl %ebx, %r14d
subl $0x1, %r14d
jb 0x2b7446
movq %rax, (%r10)
addq $0x8, %r10
jmp 0x2b7437
incl %ebp
jmp 0x2b7430
xorl %edx, %edx
testl %r8d, %r8d
cmovlel %edx, %r8d
movl 0x2c(%rdi), %ebx
testl %ebx, %ebx
cmovlel %edx, %ebx
testl %r9d, %r9d
cmovlel %edx, %r9d
movl 0x30(%rdi), %edi
testl %edi, %edi
cmovlel %edx, %edi
cmpl %edi, %edx
je 0x2b74b6
movl %r8d, %ebp
subl $0x1, %ebp
jb 0x2b747f
movq %rax, (%r10)
addq $0x8, %r10
jmp 0x2b7471
xorl %r15d, %r15d
xorl %r14d, %r14d
cmpl %r15d, %ebx
je 0x2b749b
movq (%r11,%r15,8), %r12
movq %r12, (%r10,%r15,8)
addq $-0x8, %r14
incq %r15
jmp 0x2b7485
subq %r14, %r11
subq %r14, %r10
movl %r9d, %ebp
subl $0x1, %ebp
jb 0x2b74b2
movq %rax, (%r10)
addq $0x8, %r10
jmp 0x2b74a4
incl %edx
jmp 0x2b746a
movl 0x2c(%rsi), %edx
xorl %esi, %esi
testl %edx, %edx
cmovlel %esi, %edx
testl %ecx, %ecx
cmovlel %esi, %ecx
movl %edx, %edi
cmpl %ecx, %esi
je 0x2b74dd
subl $0x1, %edi
jb 0x2b74d9
movq %rax, (%r10)
addq $0x8, %r10
jmp 0x2b74cb
incl %esi
jmp 0x2b74c5
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
nop
| /csukuangfj[P]ncnn/src/layer/x86/padding_pack8_int8.h |
ncnn::ExpandDims::load_param(ncnn::ParamDict const&) | int ExpandDims::load_param(const ParamDict& pd)
{
expand_w = pd.get(0, 0);
expand_h = pd.get(1, 0);
expand_d = pd.get(11, 0);
expand_c = pd.get(2, 0);
axes = pd.get(3, Mat());
return 0;
} | pushq %r15
pushq %r14
pushq %rbx
subq $0xa0, %rsp
movq %rsi, %r14
movq %rdi, %rbx
movq %rsi, %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xd0(%rbx)
pushq $0x1
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xd4(%rbx)
pushq $0xb
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xd8(%rbx)
pushq $0x2
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xdc(%rbx)
leaq 0x50(%rsp), %rcx
andq $0x0, 0x40(%rcx)
xorps %xmm0, %xmm0
movaps %xmm0, (%rcx)
movups %xmm0, 0xc(%rcx)
movaps %xmm0, 0x20(%rcx)
movups %xmm0, 0x2c(%rcx)
movq %rsp, %r15
pushq $0x3
popq %rdx
movq %r15, %rdi
movq %r14, %rsi
callq 0x718da
leaq 0xe0(%rbx), %rcx
movq 0x8(%rsp), %rax
cmpq %r15, %rcx
je 0x2bc8e4
testq %rax, %rax
je 0x2bc855
lock
incl (%rax)
movq 0xe8(%rbx), %rax
testq %rax, %rax
je 0x2bc889
lock
decl (%rax)
jne 0x2bc889
movq 0xe0(%rbx), %rsi
movq 0x100(%rbx), %rdi
testq %rdi, %rdi
je 0x2bc881
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2bc889
movq %rsi, %rdi
callq 0x5f3e0
movq (%rsp), %rax
movq %rax, 0xe0(%rbx)
movq 0x8(%rsp), %rax
movq %rax, 0xe8(%rbx)
movq 0x10(%rsp), %rcx
movq %rcx, 0xf0(%rbx)
movl 0x18(%rsp), %ecx
movl %ecx, 0xf8(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x100(%rbx)
movups 0x28(%rsp), %xmm0
movups %xmm0, 0x108(%rbx)
movl 0x38(%rsp), %ecx
movl %ecx, 0x118(%rbx)
movq 0x40(%rsp), %rcx
movq %rcx, 0x120(%rbx)
testq %rax, %rax
je 0x2bc90c
lock
decl (%rax)
jne 0x2bc90c
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x2bc904
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2bc90c
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
andl $0x0, 0x38(%rsp)
movups %xmm0, 0x28(%rsp)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2bc956
lock
decl (%rax)
jne 0x2bc956
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x2bc94e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2bc956
movq %rsi, %rdi
callq 0x5f3e0
xorl %eax, %eax
addq $0xa0, %rsp
popq %rbx
popq %r14
popq %r15
retq
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x2bc995
lock
decl (%rax)
jne 0x2bc995
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x2bc98f
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2bc995
movq (%rdi), %rax
callq *0x18(%rax)
andq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
movups %xmm0, 0x28(%rsp)
andl $0x0, 0x38(%rsp)
jmp 0x2bc9bc
jmp 0x2bc9f2
jmp 0x2bc9f2
jmp 0x2bc9f2
movq %rax, %rbx
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2bc9ea
lock
decl (%rax)
jne 0x2bc9ea
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
jne 0x2bc9e4
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2bc9ea
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/expanddims.cpp |
ncnn::PriorBox::load_param(ncnn::ParamDict const&) | int PriorBox::load_param(const ParamDict& pd)
{
min_sizes = pd.get(0, Mat());
max_sizes = pd.get(1, Mat());
aspect_ratios = pd.get(2, Mat());
variances[0] = pd.get(3, 0.1f);
variances[1] = pd.get(4, 0.1f);
variances[2] = pd.get(5, 0.2f);
variances[3] = pd.get(6, 0.2f);
flip = pd.get(7, 1);
clip = pd.get(8, 0);
image_width = pd.get(9, 0);
image_height = pd.get(10, 0);
step_width = pd.get(11, -233.f);
step_height = pd.get(12, -233.f);
offset = pd.get(13, 0.f);
step_mmdetection = pd.get(14, 0);
center_mmdetection = pd.get(15, 0);
return 0;
} | pushq %r15
pushq %r14
pushq %rbx
subq $0xa0, %rsp
movq %rsi, %r14
movq %rdi, %rbx
leaq 0x50(%rsp), %rcx
andq $0x0, 0x40(%rcx)
xorps %xmm0, %xmm0
movaps %xmm0, (%rcx)
movups %xmm0, 0xc(%rcx)
movaps %xmm0, 0x20(%rcx)
movups %xmm0, 0x2c(%rcx)
movq %rsp, %r15
movq %r15, %rdi
xorl %edx, %edx
callq 0x718da
leaq 0xd0(%rbx), %rcx
movq 0x8(%rsp), %rax
cmpq %r15, %rcx
je 0x2c0907
testq %rax, %rax
je 0x2c0878
lock
incl (%rax)
movq 0xd8(%rbx), %rax
testq %rax, %rax
je 0x2c08ac
lock
decl (%rax)
jne 0x2c08ac
movq 0xd0(%rbx), %rsi
movq 0xf0(%rbx), %rdi
testq %rdi, %rdi
je 0x2c08a4
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2c08ac
movq %rsi, %rdi
callq 0x5f3e0
movq (%rsp), %rax
movq %rax, 0xd0(%rbx)
movq 0x8(%rsp), %rax
movq %rax, 0xd8(%rbx)
movq 0x10(%rsp), %rcx
movq %rcx, 0xe0(%rbx)
movl 0x18(%rsp), %ecx
movl %ecx, 0xe8(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0xf0(%rbx)
movups 0x28(%rsp), %xmm0
movups %xmm0, 0xf8(%rbx)
movl 0x38(%rsp), %ecx
movl %ecx, 0x108(%rbx)
movq 0x40(%rsp), %rcx
movq %rcx, 0x110(%rbx)
testq %rax, %rax
je 0x2c092f
lock
decl (%rax)
jne 0x2c092f
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x2c0927
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2c092f
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
andl $0x0, 0x38(%rsp)
movups %xmm0, 0x28(%rsp)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2c097f
lock
decl (%rax)
jne 0x2c097f
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x2c0974
movq (%rdi), %rax
callq *0x18(%rax)
xorps %xmm0, %xmm0
jmp 0x2c097f
movq %rsi, %rdi
callq 0x5f3e0
xorps %xmm0, %xmm0
leaq 0x50(%rsp), %rcx
andq $0x0, 0x40(%rcx)
movaps %xmm0, (%rcx)
movups %xmm0, 0xc(%rcx)
movaps %xmm0, 0x20(%rcx)
movups %xmm0, 0x2c(%rcx)
movq %rsp, %r15
pushq $0x1
popq %rdx
movq %r15, %rdi
movq %r14, %rsi
callq 0x718da
leaq 0x118(%rbx), %rcx
movq 0x8(%rsp), %rax
cmpq %r15, %rcx
je 0x2c0a55
testq %rax, %rax
je 0x2c09c6
lock
incl (%rax)
movq 0x120(%rbx), %rax
testq %rax, %rax
je 0x2c09fa
lock
decl (%rax)
jne 0x2c09fa
movq 0x118(%rbx), %rsi
movq 0x138(%rbx), %rdi
testq %rdi, %rdi
je 0x2c09f2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2c09fa
movq %rsi, %rdi
callq 0x5f3e0
movq (%rsp), %rax
movq %rax, 0x118(%rbx)
movq 0x8(%rsp), %rax
movq %rax, 0x120(%rbx)
movq 0x10(%rsp), %rcx
movq %rcx, 0x128(%rbx)
movl 0x18(%rsp), %ecx
movl %ecx, 0x130(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x138(%rbx)
movups 0x28(%rsp), %xmm0
movups %xmm0, 0x140(%rbx)
movl 0x38(%rsp), %ecx
movl %ecx, 0x150(%rbx)
movq 0x40(%rsp), %rcx
movq %rcx, 0x158(%rbx)
testq %rax, %rax
je 0x2c0a7d
lock
decl (%rax)
jne 0x2c0a7d
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x2c0a75
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2c0a7d
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
andl $0x0, 0x38(%rsp)
movups %xmm0, 0x28(%rsp)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2c0acd
lock
decl (%rax)
jne 0x2c0acd
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x2c0ac2
movq (%rdi), %rax
callq *0x18(%rax)
xorps %xmm0, %xmm0
jmp 0x2c0acd
movq %rsi, %rdi
callq 0x5f3e0
xorps %xmm0, %xmm0
leaq 0x50(%rsp), %rcx
andq $0x0, 0x40(%rcx)
movaps %xmm0, (%rcx)
movups %xmm0, 0xc(%rcx)
movaps %xmm0, 0x20(%rcx)
movups %xmm0, 0x2c(%rcx)
movq %rsp, %r15
pushq $0x2
popq %rdx
movq %r15, %rdi
movq %r14, %rsi
callq 0x718da
leaq 0x160(%rbx), %rcx
movq 0x8(%rsp), %rax
cmpq %r15, %rcx
je 0x2c0ba3
testq %rax, %rax
je 0x2c0b14
lock
incl (%rax)
movq 0x168(%rbx), %rax
testq %rax, %rax
je 0x2c0b48
lock
decl (%rax)
jne 0x2c0b48
movq 0x160(%rbx), %rsi
movq 0x180(%rbx), %rdi
testq %rdi, %rdi
je 0x2c0b40
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2c0b48
movq %rsi, %rdi
callq 0x5f3e0
movq (%rsp), %rax
movq %rax, 0x160(%rbx)
movq 0x8(%rsp), %rax
movq %rax, 0x168(%rbx)
movq 0x10(%rsp), %rcx
movq %rcx, 0x170(%rbx)
movl 0x18(%rsp), %ecx
movl %ecx, 0x178(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x180(%rbx)
movups 0x28(%rsp), %xmm0
movups %xmm0, 0x188(%rbx)
movl 0x38(%rsp), %ecx
movl %ecx, 0x198(%rbx)
movq 0x40(%rsp), %rcx
movq %rcx, 0x1a0(%rbx)
testq %rax, %rax
je 0x2c0bcb
lock
decl (%rax)
jne 0x2c0bcb
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x2c0bc3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2c0bcb
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
andl $0x0, 0x38(%rsp)
movups %xmm0, 0x28(%rsp)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2c0c15
lock
decl (%rax)
jne 0x2c0c15
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x2c0c0d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2c0c15
movq %rsi, %rdi
callq 0x5f3e0
pushq $0x3
popq %rsi
movss 0x132fe8(%rip), %xmm0 # 0x3f3c08
movq %r14, %rdi
callq 0x718c0
movss %xmm0, 0x1a8(%rbx)
pushq $0x4
popq %rsi
movq %r14, %rdi
movss 0x132fca(%rip), %xmm0 # 0x3f3c08
callq 0x718c0
movss %xmm0, 0x1ac(%rbx)
pushq $0x5
popq %rsi
movss 0x137c7e(%rip), %xmm0 # 0x3f88d4
movq %r14, %rdi
callq 0x718c0
movss %xmm0, 0x1b0(%rbx)
pushq $0x6
popq %rsi
movq %r14, %rdi
movss 0x137c60(%rip), %xmm0 # 0x3f88d4
callq 0x718c0
movss %xmm0, 0x1b4(%rbx)
pushq $0x7
popq %rsi
pushq $0x1
popq %rdx
movq %r14, %rdi
callq 0x718a6
movl %eax, 0x1b8(%rbx)
pushq $0x8
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0x1bc(%rbx)
pushq $0x9
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0x1c0(%rbx)
pushq $0xa
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0x1c4(%rbx)
pushq $0xb
popq %rsi
movss 0x137bff(%rip), %xmm0 # 0x3f88d8
movq %r14, %rdi
callq 0x718c0
movss %xmm0, 0x1c8(%rbx)
pushq $0xc
popq %rsi
movq %r14, %rdi
movss 0x137be1(%rip), %xmm0 # 0x3f88d8
callq 0x718c0
movss %xmm0, 0x1cc(%rbx)
pushq $0xd
popq %rsi
xorps %xmm0, %xmm0
movq %r14, %rdi
callq 0x718c0
movss %xmm0, 0x1d0(%rbx)
pushq $0xe
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
testl %eax, %eax
setne 0x1d4(%rbx)
pushq $0xf
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
testl %eax, %eax
setne 0x1d5(%rbx)
xorl %eax, %eax
addq $0xa0, %rsp
popq %rbx
popq %r14
popq %r15
retq
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x2c0d85
lock
decl (%rax)
jne 0x2c0d85
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x2c0d7f
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2c0d85
movq (%rdi), %rax
callq *0x18(%rax)
andq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
movups %xmm0, 0x28(%rsp)
andl $0x0, 0x38(%rsp)
jmp 0x2c0e75
jmp 0x2c0f01
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x2c0ddb
lock
decl (%rax)
jne 0x2c0ddb
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x2c0dd5
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2c0ddb
movq (%rdi), %rax
callq *0x18(%rax)
andq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
movups %xmm0, 0x28(%rsp)
andl $0x0, 0x38(%rsp)
jmp 0x2c0ea0
jmp 0x2c0f01
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x2c0e31
lock
decl (%rax)
jne 0x2c0e31
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x2c0e2b
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2c0e31
movq (%rdi), %rax
callq *0x18(%rax)
andq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
movups %xmm0, 0x28(%rsp)
andl $0x0, 0x38(%rsp)
jmp 0x2c0ecb
jmp 0x2c0f01
jmp 0x2c0f01
jmp 0x2c0f01
jmp 0x2c0f01
jmp 0x2c0f01
jmp 0x2c0f01
jmp 0x2c0f01
movq %rax, %rbx
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2c0ef1
lock
decl (%rax)
jne 0x2c0ef1
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x2c0ee9
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2c0ef1
jmp 0x2c0f01
movq %rax, %rbx
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2c0ef1
lock
decl (%rax)
jne 0x2c0ef1
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x2c0ee9
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2c0ef1
jmp 0x2c0f01
movq %rax, %rbx
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2c0ef1
lock
decl (%rax)
jne 0x2c0ef1
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
jne 0x2c0ef9
movq %rsi, %rdi
callq 0x5f3e0
movq %rbx, %rdi
callq 0x5f340
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2c0ef1
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/priorbox.cpp |
ncnn::DetectionOutput::load_param(ncnn::ParamDict const&) | int DetectionOutput::load_param(const ParamDict& pd)
{
num_class = pd.get(0, 0);
nms_threshold = pd.get(1, 0.05f);
nms_top_k = pd.get(2, 300);
keep_top_k = pd.get(3, 100);
confidence_threshold = pd.get(4, 0.5f);
variances[0] = pd.get(5, 0.1f);
variances[1] = pd.get(6, 0.1f);
variances[2] = pd.get(7, 0.2f);
variances[3] = pd.get(8, 0.2f);
return 0;
} | pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %r14
movq %rdi, %rbx
movq %rsi, %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xd0(%rbx)
pushq $0x1
popq %rsi
movss 0x1371b7(%rip), %xmm0 # 0x3f88f0
movq %r14, %rdi
callq 0x718c0
movss %xmm0, 0xd4(%rbx)
pushq $0x2
popq %rsi
movq %r14, %rdi
movl $0x12c, %edx # imm = 0x12C
callq 0x718a6
movl %eax, 0xd8(%rbx)
pushq $0x3
popq %rsi
pushq $0x64
popq %rdx
movq %r14, %rdi
callq 0x718a6
movl %eax, 0xdc(%rbx)
pushq $0x4
popq %rsi
movss 0x12c896(%rip), %xmm0 # 0x3ee014
movq %r14, %rdi
callq 0x718c0
movss %xmm0, 0xe0(%rbx)
pushq $0x5
popq %rsi
movss 0x13246f(%rip), %xmm0 # 0x3f3c08
movq %r14, %rdi
callq 0x718c0
movss %xmm0, 0xe4(%rbx)
pushq $0x6
popq %rsi
movq %r14, %rdi
movss 0x132451(%rip), %xmm0 # 0x3f3c08
callq 0x718c0
movss %xmm0, 0xe8(%rbx)
pushq $0x7
popq %rsi
movss 0x137105(%rip), %xmm0 # 0x3f88d4
movq %r14, %rdi
callq 0x718c0
movss %xmm0, 0xec(%rbx)
pushq $0x8
popq %rsi
movq %r14, %rdi
movss 0x1370e7(%rip), %xmm0 # 0x3f88d4
callq 0x718c0
movss %xmm0, 0xf0(%rbx)
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
| /csukuangfj[P]ncnn/src/layer/detectionoutput.cpp |
ncnn::Interp::load_param(ncnn::ParamDict const&) | int Interp::load_param(const ParamDict& pd)
{
resize_type = pd.get(0, 0);
height_scale = pd.get(1, 1.f);
width_scale = pd.get(2, 1.f);
output_height = pd.get(3, 0);
output_width = pd.get(4, 0);
dynamic_target_size = pd.get(5, 0);
align_corner = pd.get(6, 0);
if (resize_type < 0 || resize_type > 3)
{
NCNN_LOGE("unsupported resize type %d", resize_type);
return -1;
}
if (dynamic_target_size == 1)
{
one_blob_only = false;
}
return 0;
} | pushq %r15
pushq %r14
pushq %rbx
movq %rsi, %r15
movq %rdi, %r14
xorl %ebx, %ebx
movq %rsi, %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xd0(%r14)
pushq $0x1
popq %rsi
movss 0x12bc91(%rip), %xmm0 # 0x3eec88
movq %r15, %rdi
callq 0x718c0
movss %xmm0, 0xd8(%r14)
pushq $0x2
popq %rsi
movq %r15, %rdi
movss 0x12bc72(%rip), %xmm0 # 0x3eec88
callq 0x718c0
movss %xmm0, 0xd4(%r14)
pushq $0x3
popq %rsi
movq %r15, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xe0(%r14)
pushq $0x4
popq %rsi
movq %r15, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xdc(%r14)
pushq $0x5
popq %rsi
movq %r15, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xe4(%r14)
pushq $0x6
popq %rsi
movq %r15, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xe8(%r14)
movl 0xd0(%r14), %edx
cmpl $0x4, %edx
jae 0x2c3097
cmpl $0x1, 0xe4(%r14)
jne 0x2c308f
movb $0x0, 0x8(%r14)
movl %ebx, %eax
popq %rbx
popq %r14
popq %r15
retq
movq 0x1ccf3a(%rip), %rbx # 0x48ffd8
movq (%rbx), %rdi
leaq 0x1358a8(%rip), %rsi # 0x3f8950
xorl %eax, %eax
callq 0x5f150
movq (%rbx), %rsi
pushq $0xa
popq %rdi
callq 0x5f1c0
pushq $-0x1
popq %rbx
jmp 0x2c308f
nop
| /csukuangfj[P]ncnn/src/layer/interp.cpp |
ncnn::Interp::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int Interp::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int outw = output_width;
int outh = output_height;
if (bottom_blob.dims == 1)
{
w = 1;
h = 1;
}
if (outw == 0 || outh == 0)
{
outw = static_cast<int>(w * width_scale);
outh = static_cast<int>(h * height_scale);
}
Mat reference_blob;
reference_blob.w = outw;
reference_blob.h = outh;
std::vector<Mat> bottom_blobs(2);
bottom_blobs[0] = bottom_blob;
bottom_blobs[1] = reference_blob;
std::vector<Mat> top_blobs(1);
int ret = forward(bottom_blobs, top_blobs, opt);
top_blob = top_blobs[0];
return ret;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x58, %rsp
movq %rcx, %r14
movq %rdx, %rbx
movq %rsi, %r12
movq %rdi, %r15
movq 0xdc(%rdi), %xmm0
movdqa %xmm0, 0x40(%rsp)
pshufd $0x50, %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1]
pxor %xmm1, %xmm1
pcmpeqd %xmm0, %xmm1
movmskpd %xmm1, %eax
testl %eax, %eax
je 0x2c314f
xorl %eax, %eax
cmpl $0x1, 0x28(%r12)
sete %al
movsd 0x2c(%r12), %xmm0
movd %eax, %xmm1
pshufd $0x50, %xmm1, %xmm1 # xmm1 = xmm1[0,0,1,1]
pslld $0x1f, %xmm1
psrad $0x1f, %xmm1
cvtdq2ps %xmm0, %xmm0
movdqa %xmm1, %xmm2
pandn %xmm0, %xmm2
pand 0x12af7b(%rip), %xmm1 # 0x3ee0b0
por %xmm2, %xmm1
movsd 0xd4(%r15), %xmm0
mulps %xmm1, %xmm0
cvttps2dq %xmm0, %xmm0
movdqa %xmm0, 0x40(%rsp)
leaq 0x28(%rsp), %r13
pushq $0x2
popq %rsi
leaq 0x10(%rsp), %rdx
movq %r13, %rdi
callq 0x6fa72
movq (%r13), %r13
cmpq %r12, %r13
je 0x2c3212
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x2c317e
lock
incl (%rax)
movq 0x8(%r13), %rax
testq %rax, %rax
je 0x2c31a9
lock
decl (%rax)
jne 0x2c31a9
movq (%r13), %rsi
movq 0x20(%r13), %rdi
testq %rdi, %rdi
je 0x2c31a1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2c31a9
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%r13)
pxor %xmm0, %xmm0
movdqu %xmm0, (%r13)
movdqu %xmm0, 0xc(%r13)
andl $0x0, 0x38(%r13)
movdqu %xmm0, 0x28(%r13)
movups (%r12), %xmm0
movups %xmm0, (%r13)
movq 0x10(%r12), %rax
movq %rax, 0x10(%r13)
movl 0x18(%r12), %eax
movl %eax, 0x18(%r13)
movq 0x20(%r12), %rax
movq %rax, 0x20(%r13)
movdqu 0x28(%r12), %xmm0
movdqu %xmm0, 0x28(%r13)
movl 0x38(%r12), %eax
movl %eax, 0x38(%r13)
movq 0x40(%r12), %rax
movq %rax, 0x40(%r13)
movq 0x28(%rsp), %r13
movq 0x50(%r13), %rax
testq %rax, %rax
je 0x2c323d
lock
decl (%rax)
jne 0x2c323d
movq 0x48(%r13), %rsi
movq 0x68(%r13), %rdi
testq %rdi, %rdi
je 0x2c3235
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2c323d
movq %rsi, %rdi
callq 0x5f3e0
leaq 0x48(%r13), %rax
pxor %xmm0, %xmm0
movdqu %xmm0, 0xc(%rax)
movdqu %xmm0, (%rax)
movdqu %xmm0, 0x68(%r13)
movdqa 0x40(%rsp), %xmm0
movq %xmm0, 0x74(%r13)
andq $0x0, 0x7c(%r13)
andq $0x0, 0x88(%r13)
leaq 0x10(%rsp), %rdi
pushq $0x1
popq %rsi
leaq 0xf(%rsp), %rdx
callq 0x6fa72
movq (%r15), %rax
leaq 0x28(%rsp), %rsi
leaq 0x10(%rsp), %rdx
movq %r15, %rdi
movq %r14, %rcx
callq *0x30(%rax)
movl %eax, %ebp
movq 0x10(%rsp), %r14
cmpq %rbx, %r14
je 0x2c332e
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x2c32b1
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x2c32db
lock
decl (%rax)
jne 0x2c32db
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x2c32d3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2c32db
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
pxor %xmm0, %xmm0
movdqu %xmm0, (%rbx)
movdqu %xmm0, 0xc(%rbx)
andl $0x0, 0x38(%rbx)
movdqu %xmm0, 0x28(%rbx)
movups (%r14), %xmm0
movups %xmm0, (%rbx)
movq 0x10(%r14), %rax
movq %rax, 0x10(%rbx)
movl 0x18(%r14), %eax
movl %eax, 0x18(%rbx)
movq 0x20(%r14), %rax
movq %rax, 0x20(%rbx)
movdqu 0x28(%r14), %xmm0
movdqu %xmm0, 0x28(%rbx)
movl 0x38(%r14), %eax
movl %eax, 0x38(%rbx)
movq 0x40(%r14), %rax
movq %rax, 0x40(%rbx)
leaq 0x10(%rsp), %rdi
callq 0x6fac4
leaq 0x28(%rsp), %rdi
callq 0x6fac4
movl %ebp, %eax
addq $0x58, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x2c3355
movq %rax, %rbx
jmp 0x2c3367
movq %rax, %rbx
leaq 0x10(%rsp), %rdi
callq 0x6fac4
leaq 0x28(%rsp), %rdi
callq 0x6fac4
movq %rbx, %rdi
callq 0x5f340
nop
| /csukuangfj[P]ncnn/src/layer/interp.cpp |
ncnn::linear_coeffs(int, int, int*, float*, int) | static void linear_coeffs(int w, int outw, int* xofs, float* alpha, int align_corner)
{
double scale = (double)w / outw;
if (align_corner)
{
scale = (double)(w - 1) / (outw - 1);
}
for (int dx = 0; dx < outw; dx++)
{
float fx = (float)((dx + 0.5) * scale - 0.5);
if (align_corner)
{
fx = (float)(dx * scale);
}
int sx = (int)floorf(fx);
fx -= sx;
if (sx < 0)
{
sx = 0;
fx = 0.f;
}
if (sx >= w - 1)
{
sx = w - 2;
fx = 1.f;
}
xofs[dx] = sx;
alpha[dx * 2] = 1.f - fx;
alpha[dx * 2 + 1] = fx;
}
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18, %rsp
movl %r8d, %ebx
movq %rcx, %r14
movq %rdx, 0x10(%rsp)
movl %esi, %r12d
movl %edi, %r13d
testl %r8d, %r8d
je 0x2c7078
leal -0x1(%r13), %ebp
cvtsi2sd %ebp, %xmm1
leal -0x1(%r12), %eax
cvtsi2sd %eax, %xmm0
divsd %xmm0, %xmm1
movsd %xmm1, (%rsp)
jmp 0x2c708f
cvtsi2sd %r13d, %xmm1
cvtsi2sd %r12d, %xmm0
divsd %xmm0, %xmm1
movsd %xmm1, (%rsp)
leal -0x1(%r13), %ebp
addl $-0x2, %r13d
xorl %eax, %eax
testl %r12d, %r12d
cmovlel %eax, %r12d
xorl %r15d, %r15d
cmpq %r15, %r12
je 0x2c7140
cvtsi2sd %r15d, %xmm0
movapd %xmm0, %xmm1
addsd 0x12700f(%rip), %xmm1 # 0x3ee0c8
mulsd (%rsp), %xmm1
addsd 0x127342(%rip), %xmm1 # 0x3ee408
testl %ebx, %ebx
je 0x2c70d3
mulsd (%rsp), %xmm0
movapd %xmm0, %xmm1
cvtsd2ss %xmm1, %xmm0
movss %xmm0, 0xc(%rsp)
callq 0x5f360
cvttss2si %xmm0, %eax
xorps %xmm1, %xmm1
testl %eax, %eax
js 0x2c70fe
cvttps2dq %xmm0, %xmm0
cvtdq2ps %xmm0, %xmm0
movss 0xc(%rsp), %xmm1
subss %xmm0, %xmm1
movl $0x0, %ecx
cmovlel %ecx, %eax
cmpl %ebp, %eax
jl 0x2c7112
movss 0x127b76(%rip), %xmm1 # 0x3eec88
cmovgel %r13d, %eax
movq 0x10(%rsp), %rcx
movl %eax, (%rcx,%r15,4)
movss 0x127b61(%rip), %xmm0 # 0x3eec88
subss %xmm1, %xmm0
movss %xmm0, (%r14,%r15,8)
movss %xmm1, 0x4(%r14,%r15,8)
incq %r15
jmp 0x2c709f
addq $0x18, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
| /csukuangfj[P]ncnn/src/layer/x86/interp_bilinear.h |
ncnn::cubic_coeffs(int, int, int*, float*, int) | static void cubic_coeffs(int w, int outw, int* xofs, float* alpha, int align_corner)
{
double scale = (double)w / outw;
if (align_corner)
{
scale = (double)(w - 1) / (outw - 1);
}
for (int dx = 0; dx < outw; dx++)
{
float fx = (float)((dx + 0.5) * scale - 0.5);
if (align_corner)
{
fx = (float)(dx * scale);
}
int sx = static_cast<int>(floor(fx));
fx -= sx;
interpolate_cubic(fx, alpha + dx * 4);
if (sx <= -1)
{
sx = 1;
alpha[dx * 4 + 0] = 1.f - alpha[dx * 4 + 3];
alpha[dx * 4 + 1] = alpha[dx * 4 + 3];
alpha[dx * 4 + 2] = 0.f;
alpha[dx * 4 + 3] = 0.f;
}
if (sx == 0)
{
sx = 1;
alpha[dx * 4 + 0] = alpha[dx * 4 + 0] + alpha[dx * 4 + 1];
alpha[dx * 4 + 1] = alpha[dx * 4 + 2];
alpha[dx * 4 + 2] = alpha[dx * 4 + 3];
alpha[dx * 4 + 3] = 0.f;
}
if (sx == w - 2)
{
sx = w - 3;
alpha[dx * 4 + 3] = alpha[dx * 4 + 2] + alpha[dx * 4 + 3];
alpha[dx * 4 + 2] = alpha[dx * 4 + 1];
alpha[dx * 4 + 1] = alpha[dx * 4 + 0];
alpha[dx * 4 + 0] = 0.f;
}
if (sx >= w - 1)
{
sx = w - 3;
alpha[dx * 4 + 3] = 1.f - alpha[dx * 4 + 0];
alpha[dx * 4 + 2] = alpha[dx * 4 + 0];
alpha[dx * 4 + 1] = 0.f;
alpha[dx * 4 + 0] = 0.f;
}
xofs[dx] = sx;
}
} | testl %r8d, %r8d
je 0x2d3720
leal -0x1(%rdi), %eax
vcvtsi2sd %eax, %xmm0, %xmm0
leal -0x1(%rsi), %r9d
vcvtsi2sd %r9d, %xmm1, %xmm1
vdivsd %xmm1, %xmm0, %xmm0
jmp 0x2d372f
vcvtsi2sd %edi, %xmm0, %xmm0
vcvtsi2sd %esi, %xmm1, %xmm1
vdivsd %xmm1, %xmm0, %xmm0
leal -0x1(%rdi), %eax
pushq %rbp
pushq %r14
pushq %rbx
leal -0x2(%rdi), %r9d
addl $-0x3, %edi
xorl %r10d, %r10d
testl %esi, %esi
cmovlel %r10d, %esi
leaq 0xc(%rcx), %r11
vmovss 0x11b539(%rip), %xmm3 # 0x3eec88
vmovss 0x1251b9(%rip), %xmm4 # 0x3f8910
vmovss 0x124129(%rip), %xmm5 # 0x3f7888
vmovsd 0x1251b9(%rip), %xmm6 # 0x3f8920
vmovss 0x1251a5(%rip), %xmm7 # 0x3f8914
vbroadcastss 0x1251a0(%rip), %xmm8 # 0x3f8918
vbroadcastss 0x11b507(%rip), %xmm9 # 0x3eec88
vmovss 0x11ef9b(%rip), %xmm10 # 0x3f2724
vmovss 0x125187(%rip), %xmm11 # 0x3f8918
vbroadcastss 0x11da1a(%rip), %xmm12 # 0x3f11b4
movl $0x1, %ebx
cmpq %r10, %rsi
je 0x2d3901
vcvtsi2sd %r10d, %xmm15, %xmm13
vaddsd 0x11a913(%rip), %xmm13, %xmm14 # 0x3ee0c8
vmulsd %xmm0, %xmm14, %xmm14
vaddsd 0x11ac47(%rip), %xmm14, %xmm14 # 0x3ee408
testl %r8d, %r8d
je 0x2d37ca
vmulsd %xmm0, %xmm13, %xmm14
vcvtsd2ss %xmm14, %xmm14, %xmm13
vroundss $0x9, %xmm13, %xmm13, %xmm14
vcvttss2si %xmm14, %ebp
vsubss %xmm14, %xmm13, %xmm13
vaddss %xmm3, %xmm13, %xmm14
vsubss %xmm13, %xmm3, %xmm15
vmulss %xmm4, %xmm14, %xmm1
vaddss %xmm5, %xmm1, %xmm1
vblendps $0xe, %xmm9, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm9[1,2,3]
vinsertps $0x10, %xmm13, %xmm14, %xmm13 # xmm13 = xmm14[0],xmm13[0],xmm14[2,3]
vmulps %xmm13, %xmm13, %xmm14
vmulps %xmm6, %xmm13, %xmm13
vsubps %xmm13, %xmm7, %xmm2
vaddps %xmm8, %xmm13, %xmm13
vblendps $0x2, %xmm13, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm13[1],xmm2[2,3]
vmulps %xmm2, %xmm14, %xmm2
vaddps %xmm1, %xmm2, %xmm13
vmovlps %xmm13, -0xc(%r11)
vmulss %xmm15, %xmm15, %xmm1
vmulss %xmm10, %xmm15, %xmm2
vaddss %xmm2, %xmm11, %xmm2
vmulss %xmm2, %xmm1, %xmm1
vaddss %xmm3, %xmm1, %xmm2
vmovss %xmm2, -0x4(%r11)
vmovshdup %xmm13, %xmm2 # xmm2 = xmm13[1,1,3,3]
vaddss %xmm1, %xmm13, %xmm1
vaddss %xmm2, %xmm1, %xmm1
vxorps %xmm1, %xmm12, %xmm1
vmovss %xmm1, (%r11)
testl %ebp, %ebp
js 0x2d3878
jne 0x2d38a8
vaddss -0x8(%r11), %xmm13, %xmm13
vmovss %xmm13, -0xc(%r11)
vmovsd -0x4(%r11), %xmm1
vmovsd %xmm1, -0x8(%r11)
movq %r11, %r14
jmp 0x2d38a2
leaq (,%r10,4), %r14
leaq (%rcx,%r14,4), %r14
addq $0xc, %r14
vmovss (%r11), %xmm1
vsubss %xmm1, %xmm3, %xmm13
vmovss %xmm13, -0xc(%r11)
vmovss %xmm1, -0x8(%r11)
andl $0x0, -0x4(%r11)
andl $0x0, (%r14)
movl %ebx, %ebp
cmpl %r9d, %ebp
jne 0x2d38d1
vmovss (%r11), %xmm1
vaddss -0x4(%r11), %xmm1, %xmm1
vmovss -0x8(%r11), %xmm2
vmovss %xmm1, (%r11)
vmovss %xmm2, -0x4(%r11)
vmovss %xmm13, -0x8(%r11)
jmp 0x2d38ea
cmpl %eax, %ebp
jl 0x2d38f1
vsubss %xmm13, %xmm3, %xmm1
vmovss %xmm1, (%r11)
vmovss %xmm13, -0x4(%r11)
andl $0x0, -0x8(%r11)
andl $0x0, -0xc(%r11)
movl %edi, %ebp
movl %ebp, (%rdx,%r10,4)
incq %r10
addq $0x10, %r11
jmp 0x2d379f
popq %rbx
popq %r14
popq %rbp
retq
| /csukuangfj[P]ncnn/src/layer/x86/interp_bicubic.h |
ncnn::DeconvolutionDepthWise::load_param(ncnn::ParamDict const&) | int DeconvolutionDepthWise::load_param(const ParamDict& pd)
{
num_output = pd.get(0, 0);
kernel_w = pd.get(1, 0);
kernel_h = pd.get(11, kernel_w);
dilation_w = pd.get(2, 1);
dilation_h = pd.get(12, dilation_w);
stride_w = pd.get(3, 1);
stride_h = pd.get(13, stride_w);
pad_left = pd.get(4, 0);
pad_right = pd.get(15, pad_left);
pad_top = pd.get(14, pad_left);
pad_bottom = pd.get(16, pad_top);
output_pad_right = pd.get(18, 0);
output_pad_bottom = pd.get(19, output_pad_right);
output_w = pd.get(20, 0);
output_h = pd.get(21, output_w);
bias_term = pd.get(5, 0);
weight_data_size = pd.get(6, 0);
group = pd.get(7, 1);
activation_type = pd.get(9, 0);
activation_params = pd.get(10, Mat());
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x98, %rsp
movq %rsi, %r14
movq %rdi, %rbx
movq %rsi, %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xd0(%rbx)
pushq $0x1
popq %rbp
movq %r14, %rdi
movl %ebp, %esi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xd4(%rbx)
pushq $0xb
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xd8(%rbx)
pushq $0x2
popq %rsi
movq %r14, %rdi
movl %ebp, %edx
callq 0x718a6
movl %eax, 0xdc(%rbx)
pushq $0xc
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xe0(%rbx)
pushq $0x3
popq %rsi
movq %r14, %rdi
movl %ebp, %edx
callq 0x718a6
movl %eax, 0xe4(%rbx)
pushq $0xd
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xe8(%rbx)
pushq $0x4
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xec(%rbx)
pushq $0xf
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xf0(%rbx)
movl 0xec(%rbx), %edx
pushq $0xe
popq %rsi
movq %r14, %rdi
callq 0x718a6
movl %eax, 0xf4(%rbx)
pushq $0x10
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xf8(%rbx)
pushq $0x12
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xfc(%rbx)
pushq $0x13
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0x100(%rbx)
pushq $0x14
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0x104(%rbx)
pushq $0x15
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0x108(%rbx)
pushq $0x5
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0x10c(%rbx)
pushq $0x6
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0x110(%rbx)
pushq $0x7
popq %rsi
movq %r14, %rdi
movl %ebp, %edx
callq 0x718a6
movl %eax, 0x114(%rbx)
pushq $0x9
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0x118(%rbx)
leaq 0x50(%rsp), %rcx
andq $0x0, 0x40(%rcx)
xorps %xmm0, %xmm0
movaps %xmm0, (%rcx)
movups %xmm0, 0xc(%rcx)
movaps %xmm0, 0x20(%rcx)
movups %xmm0, 0x2c(%rcx)
movq %rsp, %r15
pushq $0xa
popq %rdx
movq %r15, %rdi
movq %r14, %rsi
callq 0x718da
leaq 0x120(%rbx), %rcx
movq 0x8(%rsp), %rax
cmpq %r15, %rcx
je 0x2d3bf6
testq %rax, %rax
je 0x2d3b67
lock
incl (%rax)
movq 0x128(%rbx), %rax
testq %rax, %rax
je 0x2d3b9b
lock
decl (%rax)
jne 0x2d3b9b
movq 0x120(%rbx), %rsi
movq 0x140(%rbx), %rdi
testq %rdi, %rdi
je 0x2d3b93
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d3b9b
movq %rsi, %rdi
callq 0x5f3e0
movq (%rsp), %rax
movq %rax, 0x120(%rbx)
movq 0x8(%rsp), %rax
movq %rax, 0x128(%rbx)
movq 0x10(%rsp), %rcx
movq %rcx, 0x130(%rbx)
movl 0x18(%rsp), %ecx
movl %ecx, 0x138(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x140(%rbx)
movups 0x28(%rsp), %xmm0
movups %xmm0, 0x148(%rbx)
movl 0x38(%rsp), %ecx
movl %ecx, 0x158(%rbx)
movq 0x40(%rsp), %rcx
movq %rcx, 0x160(%rbx)
testq %rax, %rax
je 0x2d3c1e
lock
decl (%rax)
jne 0x2d3c1e
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x2d3c16
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d3c1e
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
andl $0x0, 0x38(%rsp)
movups %xmm0, 0x28(%rsp)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2d3c68
lock
decl (%rax)
jne 0x2d3c68
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x2d3c60
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d3c68
movq %rsi, %rdi
callq 0x5f3e0
xorl %eax, %eax
addq $0x98, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x2d3ca8
lock
decl (%rax)
jne 0x2d3ca8
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x2d3ca2
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d3ca8
movq (%rdi), %rax
callq *0x18(%rax)
andq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
movups %xmm0, 0x28(%rsp)
andl $0x0, 0x38(%rsp)
jmp 0x2d3ccf
jmp 0x2d3d05
jmp 0x2d3d05
jmp 0x2d3d05
movq %rax, %rbx
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2d3cfd
lock
decl (%rax)
jne 0x2d3cfd
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
jne 0x2d3cf7
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d3cfd
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/deconvolutiondepthwise.cpp |
ncnn::DeconvolutionDepthWise::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int DeconvolutionDepthWise::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
size_t elemsize = bottom_blob.elemsize;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
int outw = (w - 1) * stride_w + kernel_extent_w + output_pad_right;
int outh = (h - 1) * stride_h + kernel_extent_h + output_pad_bottom;
Mat top_blob_bordered;
if (pad_left > 0 || pad_right > 0 || pad_top > 0 || pad_bottom > 0 || (output_w > 0 && output_h > 0))
{
top_blob_bordered.create(outw, outh, num_output, elemsize, opt.workspace_allocator);
}
else
{
top_blob_bordered = top_blob;
top_blob_bordered.create(outw, outh, num_output, elemsize, opt.blob_allocator);
}
if (top_blob_bordered.empty())
return -100;
int ret = deconvolutiondepthwise(bottom_blob, top_blob_bordered, weight_data, bias_data, kernel_w, kernel_h, stride_w, stride_h, dilation_w, dilation_h, group, activation_type, activation_params, opt);
if (ret != 0)
return ret;
cut_padding(top_blob_bordered, top_blob, opt);
if (top_blob.empty())
return -100;
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x198, %rsp # imm = 0x198
movl 0x2c(%rsi), %r14d
movl 0x30(%rsi), %ebp
movl 0xd4(%rdi), %ebx
decl %ebx
imull 0xdc(%rdi), %ebx
movq %rsi, 0x20(%rsp)
movq 0x10(%rsi), %r8
decl %r14d
imull 0xe4(%rdi), %r14d
movl 0xd8(%rdi), %r12d
decl %r12d
imull 0xe0(%rdi), %r12d
decl %ebp
imull 0xe8(%rdi), %ebp
movl 0xfc(%rdi), %esi
movl 0x100(%rdi), %r10d
andq $0x0, 0xb0(%rsp)
movq %rcx, %r9
movq %rdx, %r13
pxor %xmm0, %xmm0
movdqa %xmm0, 0x70(%rsp)
movdqu %xmm0, 0x7c(%rsp)
movdqa %xmm0, 0x90(%rsp)
movdqu %xmm0, 0x9c(%rsp)
movdqu 0xec(%rdi), %xmm1
pcmpgtd %xmm0, %xmm1
movmskps %xmm1, %eax
pushq $0x10
popq %r15
testl %eax, %eax
movq %rdi, 0x18(%rsp)
movq %rcx, 0x118(%rsp)
jne 0x2d4172
cmpl $0x0, 0x104(%rdi)
jle 0x2d409b
movq 0x18(%rsp), %rax
cmpl $0x0, 0x108(%rax)
jg 0x2d4172
pushq $0x8
popq %r15
leaq 0x70(%rsp), %rax
cmpq %r13, %rax
je 0x2d4172
movq 0x8(%r13), %rax
testq %rax, %rax
je 0x2d4117
lock
incl (%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2d4117
lock
decl (%rax)
jne 0x2d4117
movq %rsi, 0x8(%rsp)
movq %r10, 0x58(%rsp)
movq %r8, 0x10(%rsp)
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2d4100
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x10(%rsp), %r8
movq 0x58(%rsp), %r10
movq 0x8(%rsp), %rsi
jmp 0x2d4117
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %r8
movq 0x58(%rsp), %r10
movq 0x8(%rsp), %rsi
movups (%r13), %xmm0
movaps %xmm0, 0x70(%rsp)
movq 0x10(%r13), %rax
movq %rax, 0x80(%rsp)
movl 0x18(%r13), %eax
movl %eax, 0x88(%rsp)
movq 0x20(%r13), %rax
movq %rax, 0x90(%rsp)
movdqu 0x28(%r13), %xmm0
movdqu %xmm0, 0x98(%rsp)
movl 0x38(%r13), %eax
movl %eax, 0xa8(%rsp)
movq 0x40(%r13), %rax
movq %rax, 0xb0(%rsp)
movq 0x118(%rsp), %r9
addl %r14d, %ebx
addl %ebp, %r12d
addl %ebx, %esi
incl %esi
leal 0x1(%r10,%r12), %edx
movq 0x18(%rsp), %rax
movl 0xd0(%rax), %ecx
movq (%r9,%r15), %r9
leaq 0x70(%rsp), %rdi
callq 0x63810
pushq $-0x64
popq %rbx
cmpq $0x0, 0x70(%rsp)
je 0x2d4c36
movslq 0xa8(%rsp), %rcx
movq 0xb0(%rsp), %rax
imulq %rcx, %rax
testq %rax, %rax
je 0x2d4c36
movq %r13, 0x150(%rsp)
movq %rcx, 0x40(%rsp)
movq 0x18(%rsp), %rax
movl 0xd4(%rax), %r14d
movl 0xd8(%rax), %r13d
movslq 0xe4(%rax), %rcx
movq %rcx, 0x58(%rsp)
movslq 0xe8(%rax), %rcx
movq %rcx, 0x60(%rsp)
movl 0xdc(%rax), %ebp
movl 0xe0(%rax), %ebx
movl 0x114(%rax), %ecx
movq %rcx, 0xc0(%rsp)
movl 0x118(%rax), %eax
movq %rax, 0x128(%rsp)
movq 0x20(%rsp), %rax
movl 0x38(%rax), %eax
movl %eax, 0x10(%rsp)
movl 0x9c(%rsp), %r15d
movl %r13d, %eax
imull %r14d, %eax
movslq %eax, %r12
leaq 0x180(%rsp), %rdi
leaq 0x37(%rsp), %rdx
movq %r12, %rsi
callq 0x73bbe
imull %ebx, %r15d
movl %ebp, %eax
imull %r14d, %eax
subl %eax, %r15d
xorl %eax, %eax
testl %r14d, %r14d
cmovlel %eax, %r14d
testl %r13d, %r13d
cmovlel %eax, %r13d
movq 0x180(%rsp), %rbx
movl %r14d, %ecx
xorl %edx, %edx
xorl %esi, %esi
cmpl %r13d, %eax
je 0x2d42ae
movslq %ecx, %rdi
movslq %esi, %r8
leal (%r8,%r14), %esi
cmpq %r8, %rdi
je 0x2d42a4
movl %edx, (%rbx,%r8,4)
incq %r8
addl %ebp, %edx
jmp 0x2d4294
addl %r15d, %edx
incl %eax
addl %r14d, %ecx
jmp 0x2d4285
movl 0x10(%rsp), %eax
movl %eax, %edx
movq 0xc0(%rsp), %rsi
xorl %esi, %edx
movq 0x40(%rsp), %r8
movl %r8d, %ecx
xorl %esi, %ecx
orl %edx, %ecx
jne 0x2d46d2
movq 0x20(%rsp), %rdi
movq 0x10(%rdi), %rax
imulq 0x40(%rdi), %rax
movq %rax, 0x38(%rsp)
movq 0x18(%rsp), %rsi
movq 0x1b0(%rsi), %rcx
movslq 0x9c(%rsp), %rax
movslq 0xa0(%rsp), %r9
movq 0x80(%rsp), %r10
movq 0xb0(%rsp), %r11
imulq %r10, %r11
movq %r9, %rdx
imulq %rax, %rdx
movq %r10, %r14
movq %rdx, 0x100(%rsp)
imulq %rdx, %r14
addq $0xf, %r14
andq $-0x10, %r14
movq %r14, 0xf8(%rsp)
movq %rcx, 0xc8(%rsp)
testq %rcx, %rcx
sete %cl
movslq 0x1e8(%rsi), %rdx
imulq 0x1f0(%rsi), %rdx
testq %rdx, %rdx
sete %bpl
movslq 0x2c(%rdi), %rdx
imull %eax, %r9d
xorl %r13d, %r13d
testl %r12d, %r12d
movl $0x0, %r14d
cmovgl %r12d, %r14d
testl %edx, %edx
movl $0x0, %r15d
movq %rdx, 0x50(%rsp)
cmovgl %edx, %r15d
movl 0x30(%rdi), %edx
testl %edx, %edx
cmovlel %r13d, %edx
movq %rdx, 0x8(%rsp)
movq (%rdi), %rdx
movq %rdx, 0xe8(%rsp)
testl %r9d, %r9d
cmovlel %r13d, %r9d
testl %r8d, %r8d
cmovlel %r13d, %r8d
movq 0x60(%rsp), %rdx
imulq %rax, %rdx
movq 0x168(%rsi), %r13
orb %cl, %bpl
movb %bpl, 0xf0(%rsp)
movq %r10, 0x130(%rsp)
imulq %r10, %rdx
movq %rdx, 0x60(%rsp)
shlq $0x2, %r12
movl 0xa4(%rsp), %eax
movl %eax, 0xe0(%rsp)
movq 0x70(%rsp), %rax
movl 0x98(%rsp), %edx
movl %edx, 0xd0(%rsp)
movq %rax, 0xd8(%rsp)
movq %rax, %rdi
xorl %esi, %esi
movq %r8, 0x40(%rsp)
movq %r9, 0x48(%rsp)
movq %r11, 0x68(%rsp)
cmpq %r8, %rsi
je 0x2d4bee
movq 0xf8(%rsp), %rax
xorl %edx, %edx
divq 0x130(%rsp)
cmpl $0x4, 0xd0(%rsp)
cmoveq 0x100(%rsp), %rax
pxor %xmm0, %xmm0
cmpb $0x0, 0xf0(%rsp)
jne 0x2d4461
movq 0xc8(%rsp), %rcx
movd (%rcx,%rsi,4), %xmm0
movq 0x38(%rsp), %rcx
imulq %rsi, %rcx
addq 0xe8(%rsp), %rcx
movq %r11, %rdx
movq %rsi, 0x28(%rsp)
imulq %rsi, %rdx
addq 0xd8(%rsp), %rdx
imull 0xe0(%rsp), %eax
testl %eax, %eax
movl $0x0, %esi
cmovlel %esi, %eax
xorl %esi, %esi
movq 0x18(%rsp), %rbp
cmpl %esi, %eax
je 0x2d44ad
movd %xmm0, (%rdi,%rsi,4)
incq %rsi
jmp 0x2d449f
xorl %eax, %eax
movq %rdi, 0x10(%rsp)
cmpq 0x8(%rsp), %rax
je 0x2d4526
movq 0x60(%rsp), %rsi
imulq %rax, %rsi
addq %rdx, %rsi
movq %rax, %rdi
imulq 0x50(%rsp), %rdi
leaq (%rcx,%rdi,4), %rdi
xorl %r8d, %r8d
cmpq %r15, %r8
je 0x2d451c
movq %r8, %r9
imulq 0x58(%rsp), %r9
leaq (%rsi,%r9,4), %r9
movss (%rdi,%r8,4), %xmm0
xorl %r10d, %r10d
cmpq %r10, %r14
je 0x2d4517
movss (%r13,%r10,4), %xmm1
mulss %xmm0, %xmm1
movslq (%rbx,%r10,4), %r11
addss (%r9,%r11,4), %xmm1
movss %xmm1, (%r9,%r11,4)
incq %r10
jmp 0x2d44f2
incq %r8
jmp 0x2d44d7
incq %rax
movq 0x10(%rsp), %rdi
jmp 0x2d44b4
movq 0x120(%rbp), %rax
movq %rax, 0x20(%rsp)
xorl %ebp, %ebp
movq 0x48(%rsp), %rcx
leaq 0x1244b4(%rip), %rdx # 0x3f89f4
cmpq %rbp, %rcx
je 0x2d46b5
movss (%rdi,%rbp,4), %xmm4
movq 0x128(%rsp), %rax
decl %eax
cmpl $0x5, %eax
ja 0x2d456e
movslq (%rdx,%rax,4), %rax
addq %rdx, %rax
jmpq *%rax
maxss 0x119aa2(%rip), %xmm4 # 0x3ee010
movaps %xmm4, %xmm0
jmp 0x2d46a8
movaps %xmm4, %xmm0
movaps %xmm4, 0x140(%rsp)
callq 0x5f410
addss 0x11a6fa(%rip), %xmm0 # 0x3eec88
callq 0x5f200
callq 0x5f160
movq 0x10(%rsp), %rdi
leaq 0x124450(%rip), %rdx # 0x3f89f4
movq 0x48(%rsp), %rcx
mulss 0x140(%rsp), %xmm0
jmp 0x2d46a8
movq 0x20(%rsp), %rax
maxss (%rax), %xmm4
movss 0x4(%rax), %xmm1
ucomiss %xmm1, %xmm4
movaps %xmm4, %xmm0
ja 0x2d46a5
jmp 0x2d46a8
movss 0x11cbda(%rip), %xmm2 # 0x3f11b8
minss %xmm2, %xmm4
movaps %xmm4, %xmm0
xorps 0x119aa4(%rip), %xmm0 # 0x3ee090
cmpltss 0x11cbc7(%rip), %xmm4 # 0x3f11bc
movaps %xmm4, %xmm1
andnps %xmm0, %xmm1
andps %xmm2, %xmm4
orps %xmm1, %xmm4
movaps %xmm4, %xmm0
callq 0x5f410
movq 0x10(%rsp), %rdi
leaq 0x1243df(%rip), %rdx # 0x3f89f4
movq 0x48(%rsp), %rcx
movaps %xmm0, %xmm1
movss 0x11a663(%rip), %xmm0 # 0x3eec88
addss %xmm0, %xmm1
divss %xmm1, %xmm0
jmp 0x2d46a8
pxor %xmm0, %xmm0
cmpltss %xmm4, %xmm0
movaps %xmm0, %xmm1
movss 0x11a645(%rip), %xmm2 # 0x3eec88
andps %xmm2, %xmm1
movq 0x20(%rsp), %rax
movss (%rax), %xmm2
andnps %xmm2, %xmm0
orps %xmm1, %xmm0
mulss %xmm4, %xmm0
jmp 0x2d46a8
movq 0x20(%rsp), %rax
movss (%rax), %xmm1
movss 0x4(%rax), %xmm2
movaps %xmm2, %xmm3
xorps 0x119a1d(%rip), %xmm3 # 0x3ee090
divss %xmm1, %xmm3
pxor %xmm0, %xmm0
ucomiss %xmm3, %xmm4
jb 0x2d46a8
movss 0x11a600(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
addss %xmm0, %xmm3
ucomiss %xmm3, %xmm4
ja 0x2d456e
mulss %xmm4, %xmm1
addss %xmm2, %xmm1
mulss %xmm4, %xmm1
movaps %xmm1, %xmm0
movd %xmm0, (%rdi,%rbp,4)
incq %rbp
jmp 0x2d4540
movq 0x28(%rsp), %rsi
incq %rsi
movq 0x68(%rsp), %r11
addq %r11, %rdi
addq %r12, %r13
movq 0x40(%rsp), %r8
jmp 0x2d441a
cltd
idivl %esi
movl %eax, %ecx
movl %r8d, %eax
cltd
idivl %esi
movl %ecx, %edx
imull %r12d, %edx
xorl %edi, %edi
testl %r12d, %r12d
movl $0x0, %ebp
cmovgl %r12d, %ebp
testl %ecx, %ecx
movl $0x0, %r15d
cmovgl %ecx, %r15d
testl %eax, %eax
movl $0x0, %r8d
cmovgl %eax, %r8d
movq %r8, 0x100(%rsp)
movslq %edx, %rdx
movslq %ecx, %rcx
movq %rcx, 0x160(%rsp)
cltq
testl %esi, %esi
cmovlel %edi, %esi
movl %eax, %ecx
imull %edx, %ecx
movl %ecx, 0x10c(%rsp)
shlq $0x2, %rdx
movq %rdx, 0x178(%rsp)
shlq $0x2, %r12
xorl %ecx, %ecx
movq $0x0, 0x120(%rsp)
xorl %r14d, %r14d
movq %rsi, 0xc0(%rsp)
movq %rax, 0x158(%rsp)
cmpq %rsi, %r14
je 0x2d4bee
movl %ecx, 0x110(%rsp)
movslq %ecx, %rdi
shlq $0x2, %rdi
movq %r14, %rcx
imulq %rax, %rcx
movq %rcx, 0xf8(%rsp)
movslq 0x9c(%rsp), %r8
movslq 0xa0(%rsp), %r9
movq 0x80(%rsp), %r10
movq 0xb0(%rsp), %r11
imulq %r10, %r11
movq %r9, %rax
imulq %r8, %rax
movq %r10, %rcx
movq %rax, 0xe8(%rsp)
imulq %rax, %rcx
addq $0xf, %rcx
andq $-0x10, %rcx
movq %rcx, 0xe0(%rsp)
movq 0x18(%rsp), %rdx
movq 0x1b0(%rdx), %rax
movq %rax, 0x170(%rsp)
testq %rax, %rax
sete 0x8(%rsp)
movslq 0x1e8(%rdx), %rcx
imulq 0x1f0(%rdx), %rcx
testq %rcx, %rcx
sete %r13b
movq 0x20(%rsp), %rcx
movl 0x2c(%rcx), %eax
movq %r14, 0x168(%rsp)
imulq 0x160(%rsp), %r14
imull %r8d, %r9d
testl %eax, %eax
movl $0x0, %esi
cmovlel %esi, %eax
movq %rax, 0x10(%rsp)
movl 0x30(%rcx), %eax
testl %eax, %eax
cmovlel %esi, %eax
movq %rax, 0x140(%rsp)
testl %r9d, %r9d
cmovlel %esi, %r9d
movq %r9, 0x48(%rsp)
movq 0x70(%rsp), %rax
orb 0x8(%rsp), %r13b
movb %r13b, 0xd8(%rsp)
imulq 0x60(%rsp), %r8
movq %r10, 0xf0(%rsp)
imulq %r10, %r8
movq %r8, 0x40(%rsp)
movq %r11, 0x68(%rsp)
imulq 0x120(%rsp), %r11
addq 0x168(%rdx), %rdi
movq %rdi, 0x8(%rsp)
movq %rax, 0xd0(%rsp)
addq %rax, %r11
movl 0xa4(%rsp), %eax
movl %eax, 0xc8(%rsp)
movl 0x98(%rsp), %eax
movl %eax, 0x114(%rsp)
xorl %edx, %edx
cmpq 0x100(%rsp), %rdx
je 0x2d4bb8
movq 0xf8(%rsp), %rax
movq %rdx, 0x38(%rsp)
leaq (%rdx,%rax), %rsi
movq 0xe0(%rsp), %rax
xorl %edx, %edx
divq 0xf0(%rsp)
cmpl $0x4, 0x114(%rsp)
cmoveq 0xe8(%rsp), %rax
pxor %xmm0, %xmm0
cmpb $0x0, 0xd8(%rsp)
jne 0x2d4916
movq 0x170(%rsp), %rcx
movd (%rcx,%rsi,4), %xmm0
imulq 0x68(%rsp), %rsi
addq 0xd0(%rsp), %rsi
imull 0xc8(%rsp), %eax
testl %eax, %eax
movl $0x0, %ecx
cmovlel %ecx, %eax
xorl %ecx, %ecx
cmpl %ecx, %eax
je 0x2d4947
movd %xmm0, (%r11,%rcx,4)
incq %rcx
jmp 0x2d4938
xorl %eax, %eax
movq %r11, 0x50(%rsp)
movq %rsi, 0x28(%rsp)
cmpq 0x140(%rsp), %rax
je 0x2d4a0e
movq 0x40(%rsp), %rdx
imulq %rax, %rdx
addq %rsi, %rdx
xorl %esi, %esi
cmpq 0x10(%rsp), %rsi
je 0x2d49fc
movq %rsi, %rcx
imulq 0x58(%rsp), %rcx
leaq (%rdx,%rcx,4), %rdi
movq 0x20(%rsp), %r10
movslq 0x2c(%r10), %rcx
movq 0x10(%r10), %r9
movq 0x40(%r10), %r8
imulq %rax, %rcx
imulq %r9, %rcx
addq (%r10), %rcx
imulq %r9, %r8
leaq (%rcx,%rsi,4), %r9
movq 0x8(%rsp), %r10
xorl %r11d, %r11d
cmpq %r15, %r11
je 0x2d49f4
leaq (%r11,%r14), %rcx
imulq %r8, %rcx
movss (%r9,%rcx), %xmm0
xorl %ecx, %ecx
cmpq %rcx, %rbp
je 0x2d49ec
movss (%r10,%rcx,4), %xmm1
mulss %xmm0, %xmm1
movslq (%rbx,%rcx,4), %r13
addss (%rdi,%r13,4), %xmm1
movss %xmm1, (%rdi,%r13,4)
incq %rcx
jmp 0x2d49c8
incq %r11
addq %r12, %r10
jmp 0x2d49b3
incq %rsi
jmp 0x2d496f
incq %rax
movq 0x50(%rsp), %r11
movq 0x28(%rsp), %rsi
jmp 0x2d4953
movq 0x18(%rsp), %rax
movq 0x120(%rax), %rax
movq %rax, 0x28(%rsp)
xorl %r13d, %r13d
movq 0x38(%rsp), %rdx
cmpq %r13, 0x48(%rsp)
je 0x2d4b99
movss (%r11,%r13,4), %xmm4
movq 0x128(%rsp), %rax
decl %eax
cmpl $0x5, %eax
ja 0x2d4a5f
leaq 0x123f8e(%rip), %rcx # 0x3f89dc
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
maxss 0x1195b1(%rip), %xmm4 # 0x3ee010
movaps %xmm4, %xmm0
jmp 0x2d4b8b
movaps %xmm4, %xmm0
movaps %xmm4, 0x130(%rsp)
callq 0x5f410
addss 0x11a209(%rip), %xmm0 # 0x3eec88
callq 0x5f200
callq 0x5f160
movq 0x38(%rsp), %rdx
movq 0x50(%rsp), %r11
mulss 0x130(%rsp), %xmm0
jmp 0x2d4b8b
movq 0x28(%rsp), %rax
maxss (%rax), %xmm4
movss 0x4(%rax), %xmm1
ucomiss %xmm1, %xmm4
movaps %xmm4, %xmm0
ja 0x2d4b88
jmp 0x2d4b8b
movss 0x11c6f0(%rip), %xmm2 # 0x3f11b8
minss %xmm2, %xmm4
movaps %xmm4, %xmm0
xorps 0x1195ba(%rip), %xmm0 # 0x3ee090
cmpltss 0x11c6dd(%rip), %xmm4 # 0x3f11bc
movaps %xmm4, %xmm1
andnps %xmm0, %xmm1
andps %xmm2, %xmm4
orps %xmm1, %xmm4
movaps %xmm4, %xmm0
callq 0x5f410
movq 0x38(%rsp), %rdx
movq 0x50(%rsp), %r11
movaps %xmm0, %xmm1
movss 0x11a180(%rip), %xmm0 # 0x3eec88
addss %xmm0, %xmm1
divss %xmm1, %xmm0
jmp 0x2d4b8b
pxor %xmm0, %xmm0
cmpltss %xmm4, %xmm0
movaps %xmm0, %xmm1
movss 0x11a162(%rip), %xmm2 # 0x3eec88
andps %xmm2, %xmm1
movq 0x28(%rsp), %rax
movss (%rax), %xmm2
andnps %xmm2, %xmm0
orps %xmm1, %xmm0
mulss %xmm4, %xmm0
jmp 0x2d4b8b
movq 0x28(%rsp), %rax
movss (%rax), %xmm1
movss 0x4(%rax), %xmm2
movaps %xmm2, %xmm3
xorps 0x11953a(%rip), %xmm3 # 0x3ee090
divss %xmm1, %xmm3
pxor %xmm0, %xmm0
ucomiss %xmm3, %xmm4
jb 0x2d4b8b
movss 0x11a11d(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
addss %xmm0, %xmm3
ucomiss %xmm3, %xmm4
ja 0x2d4a5f
mulss %xmm4, %xmm1
addss %xmm2, %xmm1
mulss %xmm4, %xmm1
movaps %xmm1, %xmm0
movd %xmm0, (%r11,%r13,4)
incq %r13
jmp 0x2d4a27
incq %rdx
addq 0x68(%rsp), %r11
movq 0x8(%rsp), %rax
addq 0x178(%rsp), %rax
movq %rax, 0x8(%rsp)
jmp 0x2d48b9
movq 0x168(%rsp), %r14
incq %r14
movq 0x158(%rsp), %rax
addq %rax, 0x120(%rsp)
movl 0x110(%rsp), %ecx
addl 0x10c(%rsp), %ecx
movq 0xc0(%rsp), %rsi
jmp 0x2d4763
leaq 0x180(%rsp), %rdi
callq 0x624be
leaq 0x70(%rsp), %rsi
movq 0x18(%rsp), %rdi
movq 0x150(%rsp), %rbx
movq %rbx, %rdx
movq 0x118(%rsp), %rcx
callq 0x2d4cc4
cmpq $0x0, (%rbx)
je 0x2d4c33
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
xorl %ebx, %ebx
testq %rax, %rax
jne 0x2d4c36
pushq $-0x64
popq %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2d4c67
lock
decl (%rax)
jne 0x2d4c67
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2d4c5f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d4c67
movq %rsi, %rdi
callq 0x5f3e0
movl %ebx, %eax
addq $0x198, %rsp # imm = 0x198
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x2d4cbb
jmp 0x2d4c7f
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2d4cb3
lock
decl (%rax)
jne 0x2d4cb3
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x2d4cad
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d4cb3
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/deconvolutiondepthwise.cpp |
ncnn::DeconvolutionDepthWise_x86::create_group_ops(ncnn::Option const&) | int DeconvolutionDepthWise_x86::create_group_ops(const Option& opt)
{
// create Deconvolution op for each group
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
for (int i = 0; i < (int)group_ops.size(); i++)
delete group_ops[i];
group_ops.clear();
const int channels_g = channels / group;
const int num_output_g = num_output / group;
group_ops.resize(group);
for (int g = 0; g < group; g++)
{
Mat weight_data_g = weight_data.range(maxk * channels_g * num_output_g * g, maxk * channels_g * num_output_g).clone();
Mat bias_data_g;
if (bias_term)
bias_data_g = bias_data.range(num_output_g * g, num_output_g);
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Deconvolution);
// set param
ncnn::ParamDict pd;
pd.set(0, num_output_g); // num_output
pd.set(1, kernel_w);
pd.set(11, kernel_h);
pd.set(2, dilation_w);
pd.set(12, dilation_h);
pd.set(3, stride_w);
pd.set(13, stride_h);
pd.set(4, 0); // pad_w
pd.set(14, 0); // pad_h
pd.set(18, output_pad_right);
pd.set(19, output_pad_bottom);
pd.set(5, bias_term);
pd.set(6, maxk * channels_g * num_output_g); // weight_data_size
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
// set weights
if (bias_term)
{
ncnn::Mat weights[2];
weights[0] = weight_data_g;
weights[1] = bias_data_g;
op->load_model(ModelBinFromMatArray(weights));
}
else
{
ncnn::Mat weights[1];
weights[0] = weight_data_g;
op->load_model(ModelBinFromMatArray(weights));
}
op->create_pipeline(opt);
group_ops[g] = op;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x158, %rsp # imm = 0x158
movq %rsi, 0x120(%rsp)
movq %rdi, %r14
movq (%rdi), %rax
movq -0x18(%rax), %rdx
movl 0xd0(%rdi,%rdx), %ecx
movl 0xd8(%rdi,%rdx), %ebp
imull 0xd4(%rdi,%rdx), %ebp
movl 0x110(%rdi,%rdx), %eax
movl 0x114(%rdi,%rdx), %r15d
cltd
idivl %r15d
cltd
idivl %ebp
movl %eax, %esi
movl %ecx, %eax
cltd
idivl %r15d
movl %eax, %ecx
movl %esi, %eax
cltd
idivl %ecx
movl %eax, %ebx
leaq 0x8(%rdi), %rax
movq %rax, 0xa8(%rsp)
xorl %r12d, %r12d
movq 0x8(%r14), %rax
movq 0x10(%r14), %rcx
movq %rcx, %rdx
subq %rax, %rdx
shrq $0x3, %rdx
movslq %edx, %rdx
cmpq %rdx, %r12
jge 0x2d5365
movq (%rax,%r12,8), %rdi
testq %rdi, %rdi
je 0x2d5360
movq (%rdi), %rax
callq *0x8(%rax)
incq %r12
jmp 0x2d5337
imull %r15d, %ebx
cmpq %rax, %rcx
je 0x2d5372
movq %rax, 0x10(%r14)
movq (%r14), %rax
movq -0x18(%rax), %rcx
movslq 0x114(%r14,%rcx), %rsi
movl %ebx, %eax
cltd
idivl %esi
movl %eax, %ebx
movl 0xd0(%r14,%rcx), %eax
cltd
idivl %esi
movl %eax, %r15d
movq 0xa8(%rsp), %rdi
callq 0x6fbc2
leaq 0x120(%r14), %rax
movq %rax, 0x128(%rsp)
imull %ebp, %ebx
imull %r15d, %ebx
movl %ebx, 0xa4(%rsp)
movslq %ebx, %rax
movq %rax, 0x130(%rsp)
movl %r15d, 0xc(%rsp)
movslq %r15d, %rax
movq %rax, 0x118(%rsp)
xorl %r12d, %r12d
leaq 0x148(%rsp), %r15
movq (%r14), %rax
movq -0x18(%rax), %rax
movslq 0x114(%r14,%rax), %rcx
cmpq %rcx, %r12
jge 0x2d5a6e
movq %r12, %rcx
movq 0x130(%rsp), %rdi
imulq %rdi, %rcx
movq 0x178(%r14,%rax), %rdx
imulq %rdx, %rcx
addq 0x168(%r14,%rax), %rcx
movl 0x180(%r14,%rax), %esi
movq 0x188(%r14,%rax), %rax
movq %rcx, 0x10(%rsp)
andq $0x0, 0x18(%rsp)
movq %rdx, 0x20(%rsp)
movl %esi, 0x28(%rsp)
movq %rax, 0x30(%rsp)
pushq $0x1
popq %rax
movl %eax, 0x38(%rsp)
movl %edi, 0x3c(%rsp)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x40(%rsp)
movl %eax, 0x48(%rsp)
movq %rdi, 0x50(%rsp)
leaq 0xd0(%rsp), %rdi
leaq 0x10(%rsp), %rsi
xorl %edx, %edx
callq 0x624f0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x2d54af
lock
decl (%rax)
jne 0x2d54af
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x2d54a7
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d54af
movq %rsi, %rdi
callq 0x5f3e0
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x10c(%r14,%rax)
je 0x2d5521
movq %r12, %r13
movq 0x118(%rsp), %rcx
imulq %rcx, %r13
movq 0x1c0(%r14,%rax), %rsi
movq %rsi, 0xc0(%rsp)
imulq %rsi, %r13
addq 0x1b0(%r14,%rax), %r13
movl 0x1c8(%r14,%rax), %edx
movl %edx, 0x8(%rsp)
movq 0x1d0(%r14,%rax), %rax
movq %rax, 0xb8(%rsp)
movl 0xc(%rsp), %eax
movq %rax, 0xc8(%rsp)
pushq $0x1
popq %rbx
movq %rcx, 0xb0(%rsp)
jmp 0x2d555e
movq $0x0, 0xc8(%rsp)
xorl %ebx, %ebx
movq $0x0, 0xb8(%rsp)
movl $0x0, 0x8(%rsp)
movq $0x0, 0xc0(%rsp)
xorl %r13d, %r13d
movq $0x0, 0xb0(%rsp)
pushq $0x8
popq %rdi
callq 0x782bf
movq %rax, %rbp
movq %r15, %rdi
callq 0x71548
movq %r15, %rdi
xorl %esi, %esi
movl 0xc(%rsp), %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd4(%r14,%rax), %edx
movq %r15, %rdi
pushq $0x1
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd8(%r14,%rax), %edx
movq %r15, %rdi
pushq $0xb
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xdc(%r14,%rax), %edx
movq %r15, %rdi
pushq $0x2
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe0(%r14,%rax), %edx
movq %r15, %rdi
pushq $0xc
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe4(%r14,%rax), %edx
movq %r15, %rdi
pushq $0x3
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r14,%rax), %edx
movq %r15, %rdi
pushq $0xd
popq %rsi
callq 0x7193a
movq %r15, %rdi
pushq $0x4
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq %r15, %rdi
pushq $0xe
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xfc(%r14,%rax), %edx
movq %r15, %rdi
pushq $0x12
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x100(%r14,%rax), %edx
movq %r15, %rdi
pushq $0x13
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x10c(%r14,%rax), %edx
movq %r15, %rdi
pushq $0x5
popq %rsi
callq 0x7193a
movq %r15, %rdi
pushq $0x6
popq %rsi
movl 0xa4(%rsp), %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x118(%r14,%rax), %edx
movq %r15, %rdi
pushq $0x9
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rdx
addq 0x128(%rsp), %rdx
movq %r15, %rdi
pushq $0xa
popq %rsi
callq 0x7196c
movq (%rbp), %rax
movq %rbp, %rdi
movq %r15, %rsi
callq *0x10(%rax)
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x10c(%r14,%rax)
je 0x2d575e
andq $0x0, 0x50(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x10(%rsp)
movups %xmm0, 0x1c(%rsp)
leaq 0x30(%rsp), %rax
movups %xmm0, 0xc(%rax)
movaps %xmm0, (%rax)
andq $0x0, 0x98(%rsp)
movups %xmm0, 0x34(%rax)
movups %xmm0, 0x28(%rax)
movups %xmm0, 0x54(%rax)
movups %xmm0, 0x48(%rax)
movq 0xd8(%rsp), %rax
testq %rax, %rax
je 0x2d5730
lock
incl (%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x2d57d6
lock
decl (%rax)
jne 0x2d57d6
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x2d57ce
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d57d6
andq $0x0, 0x50(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x10(%rsp)
movups %xmm0, 0x1c(%rsp)
leaq 0x30(%rsp), %rax
movups %xmm0, 0xc(%rax)
movaps %xmm0, (%rax)
movq 0xd8(%rsp), %rax
testq %rax, %rax
leaq 0x138(%rsp), %rbx
je 0x2d59b7
lock
incl (%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x2d59b7
lock
decl (%rax)
jne 0x2d59b7
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x2d59af
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d59b7
movq %rsi, %rdi
callq 0x5f3e0
movaps 0xd0(%rsp), %xmm0
movaps %xmm0, 0x10(%rsp)
movq 0xe0(%rsp), %rax
movq %rax, 0x20(%rsp)
movl 0xe8(%rsp), %eax
movl %eax, 0x28(%rsp)
movq 0xf0(%rsp), %rax
movq %rax, 0x30(%rsp)
movups 0xf8(%rsp), %xmm0
movups %xmm0, 0x38(%rsp)
movl 0x108(%rsp), %eax
movl %eax, 0x48(%rsp)
movq 0x110(%rsp), %rax
movq %rax, 0x50(%rsp)
movq 0x60(%rsp), %rax
testq %rax, %rax
je 0x2d585b
lock
decl (%rax)
jne 0x2d585b
movq 0x58(%rsp), %rsi
movq 0x78(%rsp), %rdi
testq %rdi, %rdi
je 0x2d5853
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d585b
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x58(%rsp)
andq $0x0, 0x60(%rsp)
movq 0xc0(%rsp), %rax
movq %rax, 0x68(%rsp)
movl 0x8(%rsp), %eax
movl %eax, 0x70(%rsp)
movq 0xb8(%rsp), %rax
movq %rax, 0x78(%rsp)
movl %ebx, 0x80(%rsp)
movq 0xc8(%rsp), %rax
movl %eax, 0x84(%rsp)
movl %ebx, 0x88(%rsp)
movl %ebx, 0x8c(%rsp)
movl %ebx, 0x90(%rsp)
movq 0xb0(%rsp), %rax
movq %rax, 0x98(%rsp)
leaq 0x138(%rsp), %rbx
movq %rbx, %rdi
leaq 0x10(%rsp), %rsi
callq 0x6b00e
movq (%rbp), %rax
movq %rbp, %rdi
movq %rbx, %rsi
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x6b03a
pushq $0x48
popq %rbx
movq 0x18(%rsp,%rbx), %rax
testq %rax, %rax
je 0x2d591e
lock
decl (%rax)
jne 0x2d591e
movq 0x10(%rsp,%rbx), %rsi
movq 0x30(%rsp,%rbx), %rdi
testq %rdi, %rdi
je 0x2d5916
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d591e
movq %rsi, %rdi
callq 0x5f3e0
leaq (%rsp,%rbx), %rax
addq $0x10, %rax
andq $0x0, 0x40(%rax)
xorps %xmm0, %xmm0
movups %xmm0, 0xc(%rax)
movups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
movups %xmm0, 0x28(%rax)
addq $-0x48, %rbx
cmpq $-0x48, %rbx
jne 0x2d58f0
movq (%rbp), %rax
movq %rbp, %rdi
movq 0x120(%rsp), %rsi
callq *0x20(%rax)
movq 0xa8(%rsp), %rax
movq (%rax), %rax
movq %rbp, (%rax,%r12,8)
movq %r15, %rdi
callq 0x71614
movq 0xd8(%rsp), %rax
testq %rax, %rax
je 0x2d59a7
lock
decl (%rax)
jne 0x2d59a7
movq 0xd0(%rsp), %rsi
movq 0xf0(%rsp), %rdi
testq %rdi, %rdi
je 0x2d599f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d59a7
movq %rsi, %rdi
callq 0x5f3e0
incq %r12
jmp 0x2d53e6
movq %rsi, %rdi
callq 0x5f3e0
movaps 0xd0(%rsp), %xmm0
movaps %xmm0, 0x10(%rsp)
movq 0xe0(%rsp), %rax
movq %rax, 0x20(%rsp)
movl 0xe8(%rsp), %eax
movl %eax, 0x28(%rsp)
movq 0xf0(%rsp), %rax
movq %rax, 0x30(%rsp)
movups 0xf8(%rsp), %xmm0
movups %xmm0, 0x38(%rsp)
movl 0x108(%rsp), %eax
movl %eax, 0x48(%rsp)
movq 0x110(%rsp), %rax
movq %rax, 0x50(%rsp)
movq %rbx, %rdi
leaq 0x10(%rsp), %rsi
callq 0x6b00e
movq (%rbp), %rax
movq %rbp, %rdi
movq %rbx, %rsi
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x6b03a
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x2d5947
lock
decl (%rax)
jne 0x2d5947
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x2d5a61
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d5947
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d5947
xorl %eax, %eax
addq $0x158, %rsp # imm = 0x158
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x2d5a95
jmp 0x2d5bed
jmp 0x2d5bed
jmp 0x2d5bed
jmp 0x2d5aff
movq %rax, %rbx
jmp 0x2d5aaa
movq %rax, %rbx
leaq 0x138(%rsp), %rdi
callq 0x6b03a
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x2d5ba1
lock
decl (%rax)
jne 0x2d5ba1
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x2d5add
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d5ba1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d5ba1
jmp 0x2d5bed
movq %rax, %rbx
leaq 0x138(%rsp), %rdi
callq 0x6b03a
jmp 0x2d5b02
movq %rax, %rbx
pushq $0x48
popq %r14
xorps %xmm0, %xmm0
movq 0x18(%rsp,%r14), %rax
testq %rax, %rax
je 0x2d5b3d
lock
decl (%rax)
jne 0x2d5b3d
movq 0x10(%rsp,%r14), %rsi
movq 0x30(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x2d5b32
movq (%rdi), %rax
callq *0x18(%rax)
xorps %xmm0, %xmm0
jmp 0x2d5b3d
movq %rsi, %rdi
callq 0x5f3e0
xorps %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0x10, %rax
andq $0x0, 0x40(%rax)
movups %xmm0, 0xc(%rax)
movups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
movups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x2d5b09
jmp 0x2d5ba1
jmp 0x2d5bed
jmp 0x2d5b97
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x2d5be5
lock
decl (%rax)
jne 0x2d5be5
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x2d5bd5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d5be5
jmp 0x2d5bed
movq %rax, %rbx
jmp 0x2d5bae
jmp 0x2d5bed
movq %rax, %rbx
leaq 0x148(%rsp), %rdi
callq 0x71614
movq 0xd8(%rsp), %rax
testq %rax, %rax
je 0x2d5be5
lock
decl (%rax)
jne 0x2d5be5
movq 0xd0(%rsp), %rsi
movq 0xf0(%rsp), %rdi
testq %rdi, %rdi
jne 0x2d5bdf
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d5be5
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/x86/deconvolutiondepthwise_x86.cpp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.