idx
int64
0
2.11M
name
stringlengths
1
118k
code
stringlengths
6
516k
asm
stringlengths
21
4.64M
file
stringlengths
39
143
opt
stringclasses
1 value
path
stringlengths
20
133
2,113,200
ncnn::UnaryOp_x86_functor::unary_op_neg::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return _mm_sub_ps(_mm_setzero_ps(), x); }
movq %rdi, -0x40(%rsp) movq %rsi, -0x48(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, -0x18(%rsp) movaps -0x18(%rsp), %xmm1 movq -0x48(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, -0x28(%rsp) movaps %xmm0, -0x38(%rsp) movaps -0x28(%rsp), %xmm0 subps -0x38(%rsp), %xmm0 retq nopw %cs:(%rax,%rax) nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,201
ncnn::UnaryOp_x86_functor::unary_op_neg::func(float const&) const
float func(const float& x) const { return -x; }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq -0x10(%rsp), %rax movss (%rax), %xmm0 movd %xmm0, %eax xorl $0x80000000, %eax # imm = 0x80000000 movd %eax, %xmm0 retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,202
ncnn::UnaryOp_x86_functor::unary_op_floor::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { #if __SSE4_1__ return _mm_floor_ps(x); #endif // __SSE4_1__ // Use negative zero as the sign bit mask. const __m128 magic_negative_zero = _mm_set_ps1(-0.0f); // The smallest float number that have no fractional part. (2^23) const __m128 magic_smallest_no_fraction = _mm_set_ps1(8388608.0f); // absolute = abs(x); __m128 absolute = _mm_andnot_ps(magic_negative_zero, x); // negative_mask = magic_negative_zero && x; __m128 negative_mask = _mm_and_ps(magic_negative_zero, x); // no_fraction = (magic_smallest_no_fraction < absolute); __m128 no_fraction = _mm_cmplt_ps(magic_smallest_no_fraction, absolute); // truncated = static_cast<float>(static_cast<uint32_t>(absolute)); __m128 truncated = _mm_cvtepi32_ps(_mm_cvttps_epi32(absolute)); // truncated_with_sign = (truncated || negative_mask); __m128 truncated_with_sign = _mm_or_ps(truncated, negative_mask); // negative_fix = ((x < truncated_with_sign) ? 1.0f : 0.0f); __m128 negative_fix = _mm_and_ps( _mm_cmplt_ps(x, truncated_with_sign), _mm_set_ps1(1.0f)); // fixed_result = truncated_with_sign - negative_fix; __m128 fixed_result = _mm_sub_ps(truncated_with_sign, negative_fix); // return ((x && no_fraction) || (!no_fraction && fixed_result)); return _mm_or_ps( _mm_and_ps(x, no_fraction), _mm_andnot_ps(no_fraction, fixed_result)); }
subq $0x1f8, %rsp # imm = 0x1F8 movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movl $0x80000000, 0x2c(%rsp) # imm = 0x80000000 movss 0x2c(%rsp), %xmm0 movss %xmm0, 0x8c(%rsp) movss 0x8c(%rsp), %xmm0 shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0] movaps %xmm0, 0x70(%rsp) movaps 0x70(%rsp), %xmm0 movaps %xmm0, (%rsp) movl $0x4b000000, 0x28(%rsp) # imm = 0x4B000000 movss 0x28(%rsp), %xmm0 movss %xmm0, 0xac(%rsp) movss 0xac(%rsp), %xmm0 shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0] movaps %xmm0, 0x90(%rsp) movaps 0x90(%rsp), %xmm0 movaps %xmm0, -0x10(%rsp) movaps (%rsp), %xmm1 movq 0x10(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x60(%rsp) movaps %xmm0, 0x50(%rsp) movaps 0x60(%rsp), %xmm0 pcmpeqd %xmm2, %xmm2 movaps 0x50(%rsp), %xmm1 pandn %xmm1, %xmm0 movaps %xmm0, -0x20(%rsp) movaps (%rsp), %xmm1 movq 0x10(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x140(%rsp) movaps %xmm0, 0x130(%rsp) movaps 0x140(%rsp), %xmm0 movaps 0x130(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, -0x30(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x20(%rsp), %xmm0 movaps %xmm1, 0x180(%rsp) movaps %xmm0, 0x170(%rsp) movaps 0x180(%rsp), %xmm0 movaps 0x170(%rsp), %xmm1 cmpltps %xmm1, %xmm0 movaps %xmm0, -0x40(%rsp) movaps -0x20(%rsp), %xmm0 movaps %xmm0, 0x1a0(%rsp) cvttps2dq 0x1a0(%rsp), %xmm0 movaps %xmm0, 0x190(%rsp) cvtdq2ps 0x190(%rsp), %xmm0 movaps %xmm0, -0x50(%rsp) movaps -0x50(%rsp), %xmm1 movaps -0x30(%rsp), %xmm0 movaps %xmm1, 0x1e0(%rsp) movaps %xmm0, 0x1d0(%rsp) movaps 0x1e0(%rsp), %xmm0 movaps 0x1d0(%rsp), %xmm1 por %xmm1, %xmm0 movaps %xmm0, -0x60(%rsp) movq 0x10(%rsp), %rax movaps (%rax), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x160(%rsp) movaps %xmm0, 0x150(%rsp) movaps 0x160(%rsp), %xmm1 movaps 0x150(%rsp), %xmm0 cmpltps %xmm0, %xmm1 movl $0x3f800000, 0x24(%rsp) # imm = 0x3F800000 movss 0x24(%rsp), %xmm0 movss %xmm0, 0xcc(%rsp) movss 0xcc(%rsp), %xmm0 shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0] movaps %xmm0, 0xb0(%rsp) movaps 0xb0(%rsp), %xmm0 movaps %xmm1, 0x120(%rsp) movaps %xmm0, 0x110(%rsp) movaps 0x120(%rsp), %xmm0 movaps 0x110(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x60(%rsp), %xmm1 movaps -0x70(%rsp), %xmm0 movaps %xmm1, 0xe0(%rsp) movaps %xmm0, 0xd0(%rsp) movaps 0xe0(%rsp), %xmm0 movaps 0xd0(%rsp), %xmm1 subps %xmm1, %xmm0 movaps %xmm0, -0x80(%rsp) movq 0x10(%rsp), %rax movaps (%rax), %xmm1 movaps -0x40(%rsp), %xmm0 movaps %xmm1, 0x100(%rsp) movaps %xmm0, 0xf0(%rsp) movaps 0x100(%rsp), %xmm1 movaps 0xf0(%rsp), %xmm0 pand %xmm0, %xmm1 movaps -0x40(%rsp), %xmm3 movaps -0x80(%rsp), %xmm0 movaps %xmm3, 0x40(%rsp) movaps %xmm0, 0x30(%rsp) movaps 0x40(%rsp), %xmm0 pxor %xmm2, %xmm0 movaps 0x30(%rsp), %xmm2 pand %xmm2, %xmm0 movaps %xmm1, 0x1c0(%rsp) movaps %xmm0, 0x1b0(%rsp) movaps 0x1c0(%rsp), %xmm0 movaps 0x1b0(%rsp), %xmm1 por %xmm1, %xmm0 addq $0x1f8, %rsp # imm = 0x1F8 retq nopw %cs:(%rax,%rax) nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,203
ncnn::UnaryOp_x86_functor::unary_op_floor::func(float const&) const
float func(const float& x) const { return (float)floor(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax movss (%rax), %xmm0 callq 0x8e720 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,204
ncnn::UnaryOp_x86_functor::unary_op_ceil::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { #if __SSE4_1__ return _mm_ceil_ps(x); #endif // __SSE4_1__ // Use negative zero as the sign bit mask. const __m128 magic_negative_zero = _mm_set_ps1(-0.0f); // The smallest float number that have no fractional part. (2^23) const __m128 magic_smallest_no_fraction = _mm_set_ps1(8388608.0f); // absolute = abs(x); __m128 absolute = _mm_andnot_ps(magic_negative_zero, x); // negative_mask = magic_negative_zero && x; __m128 negative_mask = _mm_and_ps(magic_negative_zero, x); // no_fraction = (magic_smallest_no_fraction < absolute); __m128 no_fraction = _mm_cmplt_ps(magic_smallest_no_fraction, absolute); // truncated = static_cast<float>(static_cast<uint32_t>(absolute)); __m128 truncated = _mm_cvtepi32_ps(_mm_cvttps_epi32(absolute)); // truncated_with_sign = (truncated || negative_mask); __m128 truncated_with_sign = _mm_or_ps(truncated, negative_mask); // positive_fix = ((x > -0.0f) && (x > truncated_with_sign) ? -1.0f : 0.0f); __m128 positive_fix = _mm_and_ps( _mm_and_ps( _mm_cmpgt_ps(x, magic_negative_zero), _mm_cmpgt_ps(x, truncated_with_sign)), _mm_set_ps1(-1.0f)); // fixed_result = truncated_with_sign - positive_fix; __m128 fixed_result = _mm_sub_ps(truncated_with_sign, positive_fix); // return ((x && no_fraction) || (!no_fraction && fixed_result)); return _mm_or_ps( _mm_and_ps(x, no_fraction), _mm_andnot_ps(no_fraction, fixed_result)); }
subq $0x238, %rsp # imm = 0x238 movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movl $0x80000000, 0x2c(%rsp) # imm = 0x80000000 movss 0x2c(%rsp), %xmm0 movss %xmm0, 0x8c(%rsp) movss 0x8c(%rsp), %xmm0 shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0] movaps %xmm0, 0x70(%rsp) movaps 0x70(%rsp), %xmm0 movaps %xmm0, (%rsp) movl $0x4b000000, 0x28(%rsp) # imm = 0x4B000000 movss 0x28(%rsp), %xmm0 movss %xmm0, 0xac(%rsp) movss 0xac(%rsp), %xmm0 shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0] movaps %xmm0, 0x90(%rsp) movaps 0x90(%rsp), %xmm0 movaps %xmm0, -0x10(%rsp) movaps (%rsp), %xmm1 movq 0x10(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x60(%rsp) movaps %xmm0, 0x50(%rsp) movaps 0x60(%rsp), %xmm0 pcmpeqd %xmm2, %xmm2 movaps 0x50(%rsp), %xmm1 pandn %xmm1, %xmm0 movaps %xmm0, -0x20(%rsp) movaps (%rsp), %xmm1 movq 0x10(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x160(%rsp) movaps %xmm0, 0x150(%rsp) movaps 0x160(%rsp), %xmm0 movaps 0x150(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, -0x30(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x20(%rsp), %xmm0 movaps %xmm1, 0x180(%rsp) movaps %xmm0, 0x170(%rsp) movaps 0x180(%rsp), %xmm0 movaps 0x170(%rsp), %xmm1 cmpltps %xmm1, %xmm0 movaps %xmm0, -0x40(%rsp) movaps -0x20(%rsp), %xmm0 movaps %xmm0, 0x1a0(%rsp) cvttps2dq 0x1a0(%rsp), %xmm0 movaps %xmm0, 0x190(%rsp) cvtdq2ps 0x190(%rsp), %xmm0 movaps %xmm0, -0x50(%rsp) movaps -0x50(%rsp), %xmm1 movaps -0x30(%rsp), %xmm0 movaps %xmm1, 0x1e0(%rsp) movaps %xmm0, 0x1d0(%rsp) movaps 0x1e0(%rsp), %xmm0 movaps 0x1d0(%rsp), %xmm1 por %xmm1, %xmm0 movaps %xmm0, -0x60(%rsp) movq 0x10(%rsp), %rax movaps (%rax), %xmm1 movaps (%rsp), %xmm0 movaps %xmm1, 0x220(%rsp) movaps %xmm0, 0x210(%rsp) movaps 0x210(%rsp), %xmm1 movaps 0x220(%rsp), %xmm0 cmpltps %xmm0, %xmm1 movq 0x10(%rsp), %rax movaps (%rax), %xmm3 movaps -0x60(%rsp), %xmm0 movaps %xmm3, 0x200(%rsp) movaps %xmm0, 0x1f0(%rsp) movaps 0x1f0(%rsp), %xmm0 movaps 0x200(%rsp), %xmm3 cmpltps %xmm3, %xmm0 movaps %xmm1, 0x140(%rsp) movaps %xmm0, 0x130(%rsp) movaps 0x140(%rsp), %xmm1 movaps 0x130(%rsp), %xmm0 pand %xmm0, %xmm1 movl $0xbf800000, 0x24(%rsp) # imm = 0xBF800000 movss 0x24(%rsp), %xmm0 movss %xmm0, 0xcc(%rsp) movss 0xcc(%rsp), %xmm0 shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0] movaps %xmm0, 0xb0(%rsp) movaps 0xb0(%rsp), %xmm0 movaps %xmm1, 0x120(%rsp) movaps %xmm0, 0x110(%rsp) movaps 0x120(%rsp), %xmm0 movaps 0x110(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x60(%rsp), %xmm1 movaps -0x70(%rsp), %xmm0 movaps %xmm1, 0xe0(%rsp) movaps %xmm0, 0xd0(%rsp) movaps 0xe0(%rsp), %xmm0 movaps 0xd0(%rsp), %xmm1 subps %xmm1, %xmm0 movaps %xmm0, -0x80(%rsp) movq 0x10(%rsp), %rax movaps (%rax), %xmm1 movaps -0x40(%rsp), %xmm0 movaps %xmm1, 0x100(%rsp) movaps %xmm0, 0xf0(%rsp) movaps 0x100(%rsp), %xmm1 movaps 0xf0(%rsp), %xmm0 pand %xmm0, %xmm1 movaps -0x40(%rsp), %xmm3 movaps -0x80(%rsp), %xmm0 movaps %xmm3, 0x40(%rsp) movaps %xmm0, 0x30(%rsp) movaps 0x40(%rsp), %xmm0 pxor %xmm2, %xmm0 movaps 0x30(%rsp), %xmm2 pand %xmm2, %xmm0 movaps %xmm1, 0x1c0(%rsp) movaps %xmm0, 0x1b0(%rsp) movaps 0x1c0(%rsp), %xmm0 movaps 0x1b0(%rsp), %xmm1 por %xmm1, %xmm0 addq $0x238, %rsp # imm = 0x238 retq nopw %cs:(%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,205
ncnn::UnaryOp_x86_functor::unary_op_ceil::func(float const&) const
float func(const float& x) const { return (float)ceil(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax movss (%rax), %xmm0 callq 0x1058980 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,206
ncnn::UnaryOp_x86_functor::unary_op_square::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return _mm_mul_ps(x, x); }
movq %rdi, -0x30(%rsp) movq %rsi, -0x38(%rsp) movq -0x38(%rsp), %rax movaps (%rax), %xmm1 movq -0x38(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, -0x18(%rsp) movaps %xmm0, -0x28(%rsp) movaps -0x18(%rsp), %xmm0 mulps -0x28(%rsp), %xmm0 retq nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,207
ncnn::UnaryOp_x86_functor::unary_op_square::func(float const&) const
float func(const float& x) const { return x * x; }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq -0x10(%rsp), %rax movss (%rax), %xmm0 movq -0x10(%rsp), %rax mulss (%rax), %xmm0 retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,208
ncnn::UnaryOp_x86_functor::unary_op_sqrt::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return _mm_sqrt_ps(x); }
movq %rdi, -0x20(%rsp) movq %rsi, -0x28(%rsp) movq -0x28(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm0, -0x18(%rsp) movaps -0x18(%rsp), %xmm2 rsqrtps %xmm2, %xmm3 movaps %xmm2, %xmm0 mulps %xmm3, %xmm0 movaps 0x7db5e4(%rip), %xmm4 # 0x1e1c9d0 movaps %xmm0, %xmm1 mulps %xmm4, %xmm1 mulps %xmm3, %xmm0 movaps 0x7db5e4(%rip), %xmm3 # 0x1e1c9e0 addps %xmm3, %xmm0 mulps %xmm0, %xmm1 movaps 0x7d6d17(%rip), %xmm0 # 0x1e18120 pand %xmm0, %xmm2 movaps 0x7ca6fc(%rip), %xmm0 # 0x1e0bb10 cmpleps %xmm2, %xmm0 pand %xmm1, %xmm0 retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,209
ncnn::UnaryOp_x86_functor::unary_op_sqrt::func(float const&) const
float func(const float& x) const { return (float)sqrt(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax movss (%rax), %xmm0 callq 0x671b0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,210
ncnn::UnaryOp_x86_functor::unary_op_rsqrt::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return _mm_rsqrt_ps(x); }
movq %rdi, -0x20(%rsp) movq %rsi, -0x28(%rsp) movq -0x28(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm0, -0x18(%rsp) movaps -0x18(%rsp), %xmm0 rsqrtps %xmm0, %xmm0 retq
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,211
ncnn::UnaryOp_x86_functor::unary_op_rsqrt::func(float const&) const
float func(const float& x) const { return (float)(1.f / sqrt(x)); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax movss (%rax), %xmm0 callq 0x671b0 movaps %xmm0, %xmm1 movss 0x7c7b6d(%rip), %xmm0 # 0x1e09004 divss %xmm1, %xmm0 addq $0x18, %rsp retq
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,212
ncnn::UnaryOp_x86_functor::unary_op_exp::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return exp_ps(x); }
subq $0x568, %rsp # imm = 0x568 movq %rdi, -0x78(%rsp) movq %rsi, -0x80(%rsp) movq -0x80(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm0, (%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 0x30(%rsp) movaps 0x30(%rsp), %xmm0 movaps %xmm0, -0x10(%rsp) movaps 0x7ca56a(%rip), %xmm0 # 0x1e0ba40 movaps %xmm0, -0x40(%rsp) movaps (%rsp), %xmm0 movaps %xmm0, 0x120(%rsp) movaps 0x7ca562(%rip), %xmm0 # 0x1e0ba50 movaps %xmm0, 0x110(%rsp) movaps 0x120(%rsp), %xmm0 movaps 0x110(%rsp), %xmm1 minps %xmm1, %xmm0 movaps %xmm0, (%rsp) movaps (%rsp), %xmm0 movaps %xmm0, 0x140(%rsp) movaps 0x7ca540(%rip), %xmm0 # 0x1e0ba60 movaps %xmm0, 0x130(%rsp) movaps 0x140(%rsp), %xmm0 movaps 0x130(%rsp), %xmm1 maxps %xmm1, %xmm0 movaps %xmm0, (%rsp) movaps (%rsp), %xmm0 movaps %xmm0, 0x100(%rsp) movaps 0x7ca51e(%rip), %xmm0 # 0x1e0ba70 movaps %xmm0, 0xf0(%rsp) movaps 0x100(%rsp), %xmm0 movaps 0xf0(%rsp), %xmm1 mulps %xmm1, %xmm0 movaps %xmm0, -0x20(%rsp) movaps -0x20(%rsp), %xmm0 movaps %xmm0, 0x180(%rsp) movaps 0x7ca4fa(%rip), %xmm0 # 0x1e0ba80 movaps %xmm0, 0x170(%rsp) movaps 0x180(%rsp), %xmm1 movaps 0x170(%rsp), %xmm2 addps %xmm2, %xmm1 movaps %xmm1, -0x20(%rsp) movaps -0x20(%rsp), %xmm1 movaps %xmm1, 0x80(%rsp) cvttps2dq 0x80(%rsp), %xmm1 movaps %xmm1, -0x30(%rsp) movaps -0x30(%rsp), %xmm1 movaps %xmm1, 0x60(%rsp) cvtdq2ps 0x60(%rsp), %xmm1 movaps %xmm1, -0x10(%rsp) movaps -0x10(%rsp), %xmm2 movaps -0x20(%rsp), %xmm1 movaps %xmm2, 0xa0(%rsp) movaps %xmm1, 0x90(%rsp) movaps 0x90(%rsp), %xmm1 movaps 0xa0(%rsp), %xmm2 cmpltps %xmm2, %xmm1 movaps %xmm1, -0x50(%rsp) movaps -0x50(%rsp), %xmm2 movaps -0x40(%rsp), %xmm1 movaps %xmm2, 0x50(%rsp) movaps %xmm1, 0x40(%rsp) movaps 0x50(%rsp), %xmm1 movaps 0x40(%rsp), %xmm2 pand %xmm2, %xmm1 movaps %xmm1, -0x50(%rsp) movaps -0x10(%rsp), %xmm2 movaps -0x50(%rsp), %xmm1 movaps %xmm2, 0x20(%rsp) movaps %xmm1, 0x10(%rsp) movaps 0x20(%rsp), %xmm1 movaps 0x10(%rsp), %xmm2 subps %xmm2, %xmm1 movaps %xmm1, -0x20(%rsp) movaps -0x20(%rsp), %xmm2 movaps (%rsp), %xmm1 movaps %xmm2, 0x220(%rsp) movaps 0x7ca423(%rip), %xmm2 # 0x1e0ba90 movaps %xmm2, 0x210(%rsp) movaps %xmm1, 0x200(%rsp) movaps 0x200(%rsp), %xmm2 movaps 0x220(%rsp), %xmm3 movaps 0x210(%rsp), %xmm1 movaps %xmm3, 0x260(%rsp) movaps %xmm1, 0x250(%rsp) movaps 0x260(%rsp), %xmm1 movaps 0x250(%rsp), %xmm3 mulps %xmm3, %xmm1 movaps %xmm2, 0x240(%rsp) movaps %xmm1, 0x230(%rsp) movaps 0x240(%rsp), %xmm1 movaps 0x230(%rsp), %xmm2 subps %xmm2, %xmm1 movaps %xmm1, (%rsp) movaps -0x20(%rsp), %xmm2 movaps (%rsp), %xmm1 movaps %xmm2, 0x1b0(%rsp) movaps 0x7ca3a9(%rip), %xmm2 # 0x1e0baa0 movaps %xmm2, 0x1a0(%rsp) movaps %xmm1, 0x190(%rsp) movaps 0x190(%rsp), %xmm2 movaps 0x1b0(%rsp), %xmm3 movaps 0x1a0(%rsp), %xmm1 movaps %xmm3, 0x1f0(%rsp) movaps %xmm1, 0x1e0(%rsp) movaps 0x1f0(%rsp), %xmm1 movaps 0x1e0(%rsp), %xmm3 mulps %xmm3, %xmm1 movaps %xmm2, 0x1d0(%rsp) movaps %xmm1, 0x1c0(%rsp) movaps 0x1d0(%rsp), %xmm1 movaps 0x1c0(%rsp), %xmm2 subps %xmm2, %xmm1 movaps %xmm1, (%rsp) movaps (%rsp), %xmm1 movaps %xmm1, 0xe0(%rsp) movaps %xmm1, 0xd0(%rsp) movaps 0xe0(%rsp), %xmm1 movaps 0xd0(%rsp), %xmm2 mulps %xmm2, %xmm1 movaps %xmm1, -0x10(%rsp) movaps 0x7ca314(%rip), %xmm1 # 0x1e0bab0 movaps %xmm1, -0x60(%rsp) movaps -0x60(%rsp), %xmm2 movaps (%rsp), %xmm1 movaps %xmm2, 0x4c0(%rsp) movaps %xmm1, 0x4b0(%rsp) movaps 0x7ca2ff(%rip), %xmm1 # 0x1e0bac0 movaps %xmm1, 0x4a0(%rsp) movaps 0x4c0(%rsp), %xmm2 movaps 0x4b0(%rsp), %xmm1 movaps %xmm2, 0x4e0(%rsp) movaps %xmm1, 0x4d0(%rsp) movaps 0x4e0(%rsp), %xmm2 movaps 0x4d0(%rsp), %xmm1 mulps %xmm1, %xmm2 movaps 0x4a0(%rsp), %xmm1 movaps %xmm2, 0x500(%rsp) movaps %xmm1, 0x4f0(%rsp) movaps 0x500(%rsp), %xmm1 movaps 0x4f0(%rsp), %xmm2 addps %xmm2, %xmm1 movaps %xmm1, -0x60(%rsp) movaps -0x60(%rsp), %xmm2 movaps (%rsp), %xmm1 movaps %xmm2, 0x450(%rsp) movaps %xmm1, 0x440(%rsp) movaps 0x7ca284(%rip), %xmm1 # 0x1e0bad0 movaps %xmm1, 0x430(%rsp) movaps 0x450(%rsp), %xmm2 movaps 0x440(%rsp), %xmm1 movaps %xmm2, 0x470(%rsp) movaps %xmm1, 0x460(%rsp) movaps 0x470(%rsp), %xmm2 movaps 0x460(%rsp), %xmm1 mulps %xmm1, %xmm2 movaps 0x430(%rsp), %xmm1 movaps %xmm2, 0x490(%rsp) movaps %xmm1, 0x480(%rsp) movaps 0x490(%rsp), %xmm1 movaps 0x480(%rsp), %xmm2 addps %xmm2, %xmm1 movaps %xmm1, -0x60(%rsp) movaps -0x60(%rsp), %xmm2 movaps (%rsp), %xmm1 movaps %xmm2, 0x3e0(%rsp) movaps %xmm1, 0x3d0(%rsp) movaps 0x7ca209(%rip), %xmm1 # 0x1e0bae0 movaps %xmm1, 0x3c0(%rsp) movaps 0x3e0(%rsp), %xmm2 movaps 0x3d0(%rsp), %xmm1 movaps %xmm2, 0x400(%rsp) movaps %xmm1, 0x3f0(%rsp) movaps 0x400(%rsp), %xmm2 movaps 0x3f0(%rsp), %xmm1 mulps %xmm1, %xmm2 movaps 0x3c0(%rsp), %xmm1 movaps %xmm2, 0x420(%rsp) movaps %xmm1, 0x410(%rsp) movaps 0x420(%rsp), %xmm1 movaps 0x410(%rsp), %xmm2 addps %xmm2, %xmm1 movaps %xmm1, -0x60(%rsp) movaps -0x60(%rsp), %xmm2 movaps (%rsp), %xmm1 movaps %xmm2, 0x370(%rsp) movaps %xmm1, 0x360(%rsp) movaps 0x7ca18e(%rip), %xmm1 # 0x1e0baf0 movaps %xmm1, 0x350(%rsp) movaps 0x370(%rsp), %xmm2 movaps 0x360(%rsp), %xmm1 movaps %xmm2, 0x390(%rsp) movaps %xmm1, 0x380(%rsp) movaps 0x390(%rsp), %xmm2 movaps 0x380(%rsp), %xmm1 mulps %xmm1, %xmm2 movaps 0x350(%rsp), %xmm1 movaps %xmm2, 0x3b0(%rsp) movaps %xmm1, 0x3a0(%rsp) movaps 0x3b0(%rsp), %xmm1 movaps 0x3a0(%rsp), %xmm2 addps %xmm2, %xmm1 movaps %xmm1, -0x60(%rsp) movaps -0x60(%rsp), %xmm2 movaps (%rsp), %xmm1 movaps %xmm2, 0x300(%rsp) movaps %xmm1, 0x2f0(%rsp) movaps %xmm0, 0x2e0(%rsp) movaps 0x300(%rsp), %xmm1 movaps 0x2f0(%rsp), %xmm0 movaps %xmm1, 0x320(%rsp) movaps %xmm0, 0x310(%rsp) movaps 0x320(%rsp), %xmm1 movaps 0x310(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x2e0(%rsp), %xmm0 movaps %xmm1, 0x340(%rsp) movaps %xmm0, 0x330(%rsp) movaps 0x340(%rsp), %xmm0 movaps 0x330(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, -0x60(%rsp) movaps -0x60(%rsp), %xmm2 movaps -0x10(%rsp), %xmm1 movaps (%rsp), %xmm0 movaps %xmm2, 0x290(%rsp) movaps %xmm1, 0x280(%rsp) movaps %xmm0, 0x270(%rsp) movaps 0x290(%rsp), %xmm1 movaps 0x280(%rsp), %xmm0 movaps %xmm1, 0x2b0(%rsp) movaps %xmm0, 0x2a0(%rsp) movaps 0x2b0(%rsp), %xmm1 movaps 0x2a0(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x270(%rsp), %xmm0 movaps %xmm1, 0x2d0(%rsp) movaps %xmm0, 0x2c0(%rsp) movaps 0x2d0(%rsp), %xmm0 movaps 0x2c0(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, -0x60(%rsp) movaps -0x60(%rsp), %xmm1 movaps -0x40(%rsp), %xmm0 movaps %xmm1, 0x160(%rsp) movaps %xmm0, 0x150(%rsp) movaps 0x160(%rsp), %xmm0 movaps 0x150(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, -0x60(%rsp) movaps -0x20(%rsp), %xmm0 movaps %xmm0, 0x70(%rsp) cvttps2dq 0x70(%rsp), %xmm0 movaps %xmm0, -0x30(%rsp) movaps -0x30(%rsp), %xmm0 movaps %xmm0, 0x520(%rsp) movaps 0x7c9fcb(%rip), %xmm0 # 0x1e0bb00 movaps %xmm0, 0x510(%rsp) movdqa 0x520(%rsp), %xmm0 movdqa 0x510(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, -0x30(%rsp) movdqa -0x30(%rsp), %xmm0 movdqa %xmm0, 0x540(%rsp) movl $0x17, 0x53c(%rsp) movdqa 0x540(%rsp), %xmm0 movl 0x53c(%rsp), %eax movd %eax, %xmm1 pslld %xmm1, %xmm0 movdqa %xmm0, -0x30(%rsp) movdqa -0x30(%rsp), %xmm0 movdqa %xmm0, 0x550(%rsp) movdqa 0x550(%rsp), %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x60(%rsp), %xmm1 movaps -0x70(%rsp), %xmm0 movaps %xmm1, 0xc0(%rsp) movaps %xmm0, 0xb0(%rsp) movaps 0xc0(%rsp), %xmm0 mulps 0xb0(%rsp), %xmm0 movaps %xmm0, -0x60(%rsp) movaps -0x60(%rsp), %xmm0 addq $0x568, %rsp # imm = 0x568 retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,213
ncnn::UnaryOp_x86_functor::unary_op_exp::func(float const&) const
float func(const float& x) const { return (float)exp(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax movss (%rax), %xmm0 callq 0x100cf0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,214
ncnn::UnaryOp_x86_functor::unary_op_log::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return log_ps(x); }
subq $0x768, %rsp # imm = 0x768 movq %rdi, -0x78(%rsp) movq %rsi, -0x80(%rsp) movq -0x80(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm0, 0x10(%rsp) movaps 0x7c9dfb(%rip), %xmm0 # 0x1e0ba40 movaps %xmm0, -0x10(%rsp) movaps 0x10(%rsp), %xmm1 xorps %xmm0, %xmm0 movaps %xmm0, 0x60(%rsp) movaps 0x60(%rsp), %xmm0 movaps %xmm1, 0x700(%rsp) movaps %xmm0, 0x6f0(%rsp) movaps 0x700(%rsp), %xmm0 movaps 0x6f0(%rsp), %xmm1 cmpleps %xmm1, %xmm0 movaps %xmm0, -0x20(%rsp) movaps 0x10(%rsp), %xmm0 movaps %xmm0, 0x1b0(%rsp) movaps 0x7c9e77(%rip), %xmm0 # 0x1e0bb10 movaps %xmm0, 0x1a0(%rsp) movaps 0x1b0(%rsp), %xmm0 movaps 0x1a0(%rsp), %xmm1 maxps %xmm1, %xmm0 movaps %xmm0, 0x10(%rsp) movaps 0x10(%rsp), %xmm0 movaps %xmm0, 0x730(%rsp) movaps 0x730(%rsp), %xmm0 movaps %xmm0, 0x720(%rsp) movl $0x17, 0x71c(%rsp) movaps 0x720(%rsp), %xmm0 movd 0x71c(%rsp), %xmm1 psrld %xmm1, %xmm0 movaps %xmm0, (%rsp) movaps 0x10(%rsp), %xmm0 movaps %xmm0, 0xc0(%rsp) movaps 0x7c9e12(%rip), %xmm0 # 0x1e0bb20 movaps %xmm0, 0xb0(%rsp) movaps 0xc0(%rsp), %xmm0 movaps 0xb0(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, 0x10(%rsp) movaps 0x10(%rsp), %xmm0 movaps %xmm0, 0x130(%rsp) movaps 0x7c9d3d(%rip), %xmm1 # 0x1e0ba80 movaps %xmm1, 0x120(%rsp) movaps 0x130(%rsp), %xmm0 movaps 0x120(%rsp), %xmm2 por %xmm2, %xmm0 movaps %xmm0, 0x10(%rsp) movaps (%rsp), %xmm0 movaps %xmm0, 0x750(%rsp) movaps 0x7c9d89(%rip), %xmm0 # 0x1e0bb00 movaps %xmm0, 0x740(%rsp) movaps 0x750(%rsp), %xmm0 movaps 0x740(%rsp), %xmm2 psubd %xmm2, %xmm0 movaps %xmm0, (%rsp) movaps (%rsp), %xmm0 movaps %xmm0, 0xf0(%rsp) cvtdq2ps 0xf0(%rsp), %xmm0 movaps %xmm0, -0x30(%rsp) movaps -0x30(%rsp), %xmm2 movaps -0x10(%rsp), %xmm0 movaps %xmm2, 0x210(%rsp) movaps %xmm0, 0x200(%rsp) movaps 0x210(%rsp), %xmm0 movaps 0x200(%rsp), %xmm2 addps %xmm2, %xmm0 movaps %xmm0, -0x30(%rsp) movaps 0x10(%rsp), %xmm0 movaps %xmm0, 0xe0(%rsp) movaps 0x7c9d3a(%rip), %xmm0 # 0x1e0bb30 movaps %xmm0, 0xd0(%rsp) movaps 0xe0(%rsp), %xmm0 movaps 0xd0(%rsp), %xmm2 cmpltps %xmm2, %xmm0 movaps %xmm0, -0x40(%rsp) movaps 0x10(%rsp), %xmm2 movaps -0x40(%rsp), %xmm0 movaps %xmm2, 0xa0(%rsp) movaps %xmm0, 0x90(%rsp) movaps 0xa0(%rsp), %xmm0 movaps 0x90(%rsp), %xmm2 pand %xmm2, %xmm0 movaps %xmm0, -0x50(%rsp) movaps 0x10(%rsp), %xmm2 movaps -0x10(%rsp), %xmm0 movaps %xmm2, 0x50(%rsp) movaps %xmm0, 0x40(%rsp) movaps 0x50(%rsp), %xmm0 movaps 0x40(%rsp), %xmm2 subps %xmm2, %xmm0 movaps %xmm0, 0x10(%rsp) movaps -0x30(%rsp), %xmm2 movaps -0x10(%rsp), %xmm3 movaps -0x40(%rsp), %xmm0 movaps %xmm3, 0x80(%rsp) movaps %xmm0, 0x70(%rsp) movaps 0x80(%rsp), %xmm0 movaps 0x70(%rsp), %xmm3 pand %xmm3, %xmm0 movaps %xmm2, 0x30(%rsp) movaps %xmm0, 0x20(%rsp) movaps 0x30(%rsp), %xmm0 movaps 0x20(%rsp), %xmm2 subps %xmm2, %xmm0 movaps %xmm0, -0x30(%rsp) movaps 0x10(%rsp), %xmm2 movaps -0x50(%rsp), %xmm0 movaps %xmm2, 0x1f0(%rsp) movaps %xmm0, 0x1e0(%rsp) movaps 0x1f0(%rsp), %xmm0 movaps 0x1e0(%rsp), %xmm2 addps %xmm2, %xmm0 movaps %xmm0, 0x10(%rsp) movaps 0x10(%rsp), %xmm0 movaps %xmm0, 0x190(%rsp) movaps %xmm0, 0x180(%rsp) movaps 0x190(%rsp), %xmm0 movaps 0x180(%rsp), %xmm2 mulps %xmm2, %xmm0 movaps %xmm0, -0x60(%rsp) movaps 0x7c9c21(%rip), %xmm0 # 0x1e0bb40 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm2 movaps 0x10(%rsp), %xmm0 movaps %xmm2, 0x6a0(%rsp) movaps %xmm0, 0x690(%rsp) movaps 0x7c9c0b(%rip), %xmm0 # 0x1e0bb50 movaps %xmm0, 0x680(%rsp) movaps 0x6a0(%rsp), %xmm2 movaps 0x690(%rsp), %xmm0 movaps %xmm2, 0x6c0(%rsp) movaps %xmm0, 0x6b0(%rsp) movaps 0x6c0(%rsp), %xmm2 movaps 0x6b0(%rsp), %xmm0 mulps %xmm0, %xmm2 movaps 0x680(%rsp), %xmm0 movaps %xmm2, 0x6e0(%rsp) movaps %xmm0, 0x6d0(%rsp) movaps 0x6e0(%rsp), %xmm0 movaps 0x6d0(%rsp), %xmm2 addps %xmm2, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm2 movaps 0x10(%rsp), %xmm0 movaps %xmm2, 0x630(%rsp) movaps %xmm0, 0x620(%rsp) movaps 0x7c9b8f(%rip), %xmm0 # 0x1e0bb60 movaps %xmm0, 0x610(%rsp) movaps 0x630(%rsp), %xmm2 movaps 0x620(%rsp), %xmm0 movaps %xmm2, 0x650(%rsp) movaps %xmm0, 0x640(%rsp) movaps 0x650(%rsp), %xmm2 movaps 0x640(%rsp), %xmm0 mulps %xmm0, %xmm2 movaps 0x610(%rsp), %xmm0 movaps %xmm2, 0x670(%rsp) movaps %xmm0, 0x660(%rsp) movaps 0x670(%rsp), %xmm0 movaps 0x660(%rsp), %xmm2 addps %xmm2, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm2 movaps 0x10(%rsp), %xmm0 movaps %xmm2, 0x5c0(%rsp) movaps %xmm0, 0x5b0(%rsp) movaps 0x7c9b13(%rip), %xmm0 # 0x1e0bb70 movaps %xmm0, 0x5a0(%rsp) movaps 0x5c0(%rsp), %xmm2 movaps 0x5b0(%rsp), %xmm0 movaps %xmm2, 0x5e0(%rsp) movaps %xmm0, 0x5d0(%rsp) movaps 0x5e0(%rsp), %xmm2 movaps 0x5d0(%rsp), %xmm0 mulps %xmm0, %xmm2 movaps 0x5a0(%rsp), %xmm0 movaps %xmm2, 0x600(%rsp) movaps %xmm0, 0x5f0(%rsp) movaps 0x600(%rsp), %xmm0 movaps 0x5f0(%rsp), %xmm2 addps %xmm2, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm2 movaps 0x10(%rsp), %xmm0 movaps %xmm2, 0x550(%rsp) movaps %xmm0, 0x540(%rsp) movaps 0x7c9a97(%rip), %xmm0 # 0x1e0bb80 movaps %xmm0, 0x530(%rsp) movaps 0x550(%rsp), %xmm2 movaps 0x540(%rsp), %xmm0 movaps %xmm2, 0x570(%rsp) movaps %xmm0, 0x560(%rsp) movaps 0x570(%rsp), %xmm2 movaps 0x560(%rsp), %xmm0 mulps %xmm0, %xmm2 movaps 0x530(%rsp), %xmm0 movaps %xmm2, 0x590(%rsp) movaps %xmm0, 0x580(%rsp) movaps 0x590(%rsp), %xmm0 movaps 0x580(%rsp), %xmm2 addps %xmm2, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm2 movaps 0x10(%rsp), %xmm0 movaps %xmm2, 0x4e0(%rsp) movaps %xmm0, 0x4d0(%rsp) movaps 0x7c9a1b(%rip), %xmm0 # 0x1e0bb90 movaps %xmm0, 0x4c0(%rsp) movaps 0x4e0(%rsp), %xmm2 movaps 0x4d0(%rsp), %xmm0 movaps %xmm2, 0x500(%rsp) movaps %xmm0, 0x4f0(%rsp) movaps 0x500(%rsp), %xmm2 movaps 0x4f0(%rsp), %xmm0 mulps %xmm0, %xmm2 movaps 0x4c0(%rsp), %xmm0 movaps %xmm2, 0x520(%rsp) movaps %xmm0, 0x510(%rsp) movaps 0x520(%rsp), %xmm0 movaps 0x510(%rsp), %xmm2 addps %xmm2, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm2 movaps 0x10(%rsp), %xmm0 movaps %xmm2, 0x470(%rsp) movaps %xmm0, 0x460(%rsp) movaps 0x7c999f(%rip), %xmm0 # 0x1e0bba0 movaps %xmm0, 0x450(%rsp) movaps 0x470(%rsp), %xmm2 movaps 0x460(%rsp), %xmm0 movaps %xmm2, 0x490(%rsp) movaps %xmm0, 0x480(%rsp) movaps 0x490(%rsp), %xmm2 movaps 0x480(%rsp), %xmm0 mulps %xmm0, %xmm2 movaps 0x450(%rsp), %xmm0 movaps %xmm2, 0x4b0(%rsp) movaps %xmm0, 0x4a0(%rsp) movaps 0x4b0(%rsp), %xmm0 movaps 0x4a0(%rsp), %xmm2 addps %xmm2, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm2 movaps 0x10(%rsp), %xmm0 movaps %xmm2, 0x400(%rsp) movaps %xmm0, 0x3f0(%rsp) movaps 0x7c9923(%rip), %xmm0 # 0x1e0bbb0 movaps %xmm0, 0x3e0(%rsp) movaps 0x400(%rsp), %xmm2 movaps 0x3f0(%rsp), %xmm0 movaps %xmm2, 0x420(%rsp) movaps %xmm0, 0x410(%rsp) movaps 0x420(%rsp), %xmm2 movaps 0x410(%rsp), %xmm0 mulps %xmm0, %xmm2 movaps 0x3e0(%rsp), %xmm0 movaps %xmm2, 0x440(%rsp) movaps %xmm0, 0x430(%rsp) movaps 0x440(%rsp), %xmm0 movaps 0x430(%rsp), %xmm2 addps %xmm2, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm2 movaps 0x10(%rsp), %xmm0 movaps %xmm2, 0x390(%rsp) movaps %xmm0, 0x380(%rsp) movaps 0x7c98a7(%rip), %xmm0 # 0x1e0bbc0 movaps %xmm0, 0x370(%rsp) movaps 0x390(%rsp), %xmm2 movaps 0x380(%rsp), %xmm0 movaps %xmm2, 0x3b0(%rsp) movaps %xmm0, 0x3a0(%rsp) movaps 0x3b0(%rsp), %xmm2 movaps 0x3a0(%rsp), %xmm0 mulps %xmm0, %xmm2 movaps 0x370(%rsp), %xmm0 movaps %xmm2, 0x3d0(%rsp) movaps %xmm0, 0x3c0(%rsp) movaps 0x3d0(%rsp), %xmm0 movaps 0x3c0(%rsp), %xmm2 addps %xmm2, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm2 movaps 0x10(%rsp), %xmm0 movaps %xmm2, 0x170(%rsp) movaps %xmm0, 0x160(%rsp) movaps 0x170(%rsp), %xmm0 movaps 0x160(%rsp), %xmm2 mulps %xmm2, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm2 movaps -0x60(%rsp), %xmm0 movaps %xmm2, 0x150(%rsp) movaps %xmm0, 0x140(%rsp) movaps 0x150(%rsp), %xmm0 movaps 0x140(%rsp), %xmm2 mulps %xmm2, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x30(%rsp), %xmm2 movaps -0x70(%rsp), %xmm0 movaps %xmm2, 0x320(%rsp) movaps 0x7c969f(%rip), %xmm2 # 0x1e0baa0 movaps %xmm2, 0x310(%rsp) movaps %xmm0, 0x300(%rsp) movaps 0x320(%rsp), %xmm2 movaps 0x310(%rsp), %xmm0 movaps %xmm2, 0x340(%rsp) movaps %xmm0, 0x330(%rsp) movaps 0x340(%rsp), %xmm2 movaps 0x330(%rsp), %xmm0 mulps %xmm0, %xmm2 movaps 0x300(%rsp), %xmm0 movaps %xmm2, 0x360(%rsp) movaps %xmm0, 0x350(%rsp) movaps 0x360(%rsp), %xmm0 movaps 0x350(%rsp), %xmm2 addps %xmm2, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x60(%rsp), %xmm2 movaps -0x70(%rsp), %xmm0 movaps %xmm2, 0x240(%rsp) movaps %xmm1, 0x230(%rsp) movaps %xmm0, 0x220(%rsp) movaps 0x220(%rsp), %xmm1 movaps 0x240(%rsp), %xmm2 movaps 0x230(%rsp), %xmm0 movaps %xmm2, 0x280(%rsp) movaps %xmm0, 0x270(%rsp) movaps 0x280(%rsp), %xmm0 movaps 0x270(%rsp), %xmm2 mulps %xmm2, %xmm0 movaps %xmm1, 0x260(%rsp) movaps %xmm0, 0x250(%rsp) movaps 0x260(%rsp), %xmm0 movaps 0x250(%rsp), %xmm1 subps %xmm1, %xmm0 movaps %xmm0, -0x70(%rsp) movaps 0x10(%rsp), %xmm1 movaps -0x70(%rsp), %xmm0 movaps %xmm1, 0x1d0(%rsp) movaps %xmm0, 0x1c0(%rsp) movaps 0x1d0(%rsp), %xmm0 movaps 0x1c0(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, 0x10(%rsp) movaps -0x30(%rsp), %xmm1 movaps 0x10(%rsp), %xmm0 movaps %xmm1, 0x2b0(%rsp) movaps 0x7c954c(%rip), %xmm1 # 0x1e0ba90 movaps %xmm1, 0x2a0(%rsp) movaps %xmm0, 0x290(%rsp) movaps 0x2b0(%rsp), %xmm1 movaps 0x2a0(%rsp), %xmm0 movaps %xmm1, 0x2d0(%rsp) movaps %xmm0, 0x2c0(%rsp) movaps 0x2d0(%rsp), %xmm1 mulps 0x2c0(%rsp), %xmm1 movaps 0x290(%rsp), %xmm0 movaps %xmm1, 0x2f0(%rsp) movaps %xmm0, 0x2e0(%rsp) movaps 0x2f0(%rsp), %xmm0 addps 0x2e0(%rsp), %xmm0 movaps %xmm0, 0x10(%rsp) movaps 0x10(%rsp), %xmm1 movaps -0x20(%rsp), %xmm0 movaps %xmm1, 0x110(%rsp) movaps %xmm0, 0x100(%rsp) movaps 0x110(%rsp), %xmm0 movaps 0x100(%rsp), %xmm1 por %xmm1, %xmm0 movaps %xmm0, 0x10(%rsp) movaps 0x10(%rsp), %xmm0 addq $0x768, %rsp # imm = 0x768 retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,215
ncnn::UnaryOp_x86_functor::unary_op_log::func(float const&) const
float func(const float& x) const { return (float)log(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax movss (%rax), %xmm0 callq 0x100cd0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,216
ncnn::UnaryOp_x86_functor::unary_op_sin::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return sin_ps(x); }
subq $0x708, %rsp # imm = 0x708 movq %rdi, -0x78(%rsp) movq %rsi, -0x80(%rsp) movq -0x80(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm0, 0x40(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 0x70(%rsp) movaps 0x70(%rsp), %xmm0 movaps %xmm0, 0x20(%rsp) movaps 0x40(%rsp), %xmm0 movaps %xmm0, (%rsp) movaps 0x40(%rsp), %xmm0 movaps %xmm0, 0xd0(%rsp) movaps 0x7d5aa3(%rip), %xmm0 # 0x1e18120 movaps %xmm0, 0xc0(%rsp) movaps 0xd0(%rsp), %xmm0 movaps 0xc0(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, 0x40(%rsp) movaps (%rsp), %xmm0 movaps %xmm0, 0xb0(%rsp) movaps 0x7cd4af(%rip), %xmm0 # 0x1e0fb60 movaps %xmm0, 0xa0(%rsp) movaps 0xb0(%rsp), %xmm0 movaps 0xa0(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, (%rsp) movaps 0x40(%rsp), %xmm0 movaps %xmm0, 0x190(%rsp) movaps 0x7da30b(%rip), %xmm0 # 0x1e1c9f0 movaps %xmm0, 0x180(%rsp) movaps 0x190(%rsp), %xmm0 movaps 0x180(%rsp), %xmm1 mulps %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm0 movaps %xmm0, 0xf0(%rsp) cvttps2dq 0xf0(%rsp), %xmm0 movaps %xmm0, -0x30(%rsp) movaps -0x30(%rsp), %xmm0 movaps %xmm0, 0x5e0(%rsp) movaps 0x7da2cc(%rip), %xmm0 # 0x1e1ca00 movaps %xmm0, 0x5d0(%rsp) movaps 0x5e0(%rsp), %xmm0 movaps 0x5d0(%rsp), %xmm1 paddd %xmm1, %xmm0 movaps %xmm0, -0x30(%rsp) movaps -0x30(%rsp), %xmm0 movaps %xmm0, 0x680(%rsp) movaps 0x7da2a7(%rip), %xmm0 # 0x1e1ca10 movaps %xmm0, 0x670(%rsp) movaps 0x680(%rsp), %xmm0 movaps 0x670(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, -0x30(%rsp) movaps -0x30(%rsp), %xmm0 movaps %xmm0, 0xe0(%rsp) cvtdq2ps 0xe0(%rsp), %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x30(%rsp), %xmm0 movaps %xmm0, 0x660(%rsp) movaps 0x7da268(%rip), %xmm0 # 0x1e1ca20 movaps %xmm0, 0x650(%rsp) movaps 0x660(%rsp), %xmm0 movaps 0x650(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, -0x20(%rsp) movaps -0x20(%rsp), %xmm0 movaps %xmm0, 0x600(%rsp) movl $0x1d, 0x5fc(%rsp) movaps 0x600(%rsp), %xmm0 movd 0x5fc(%rsp), %xmm1 pslld %xmm1, %xmm0 movaps %xmm0, -0x20(%rsp) movaps -0x30(%rsp), %xmm0 movaps %xmm0, 0x640(%rsp) movaps 0x7da211(%rip), %xmm0 # 0x1e1ca30 movaps %xmm0, 0x630(%rsp) movaps 0x640(%rsp), %xmm0 movaps 0x630(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, -0x30(%rsp) movaps -0x30(%rsp), %xmm1 xorps %xmm0, %xmm0 movaps %xmm0, 0x6b0(%rsp) movaps 0x6b0(%rsp), %xmm0 movaps %xmm1, 0x6a0(%rsp) movaps %xmm0, 0x690(%rsp) movaps 0x6a0(%rsp), %xmm0 movaps 0x690(%rsp), %xmm1 pcmpeqd %xmm1, %xmm0 movaps %xmm0, -0x30(%rsp) movaps -0x20(%rsp), %xmm0 movaps %xmm0, 0x620(%rsp) movaps 0x620(%rsp), %xmm0 movaps %xmm0, -0x40(%rsp) movaps -0x30(%rsp), %xmm0 movaps %xmm0, 0x610(%rsp) movaps 0x610(%rsp), %xmm0 movaps %xmm0, -0x50(%rsp) movaps (%rsp), %xmm1 movaps -0x40(%rsp), %xmm0 movaps %xmm1, 0x6f0(%rsp) movaps %xmm0, 0x6e0(%rsp) movaps 0x6f0(%rsp), %xmm0 movaps 0x6e0(%rsp), %xmm1 pxor %xmm1, %xmm0 movaps %xmm0, (%rsp) movaps 0x7da153(%rip), %xmm0 # 0x1e1ca40 movaps %xmm0, 0x30(%rsp) movaps 0x7da157(%rip), %xmm0 # 0x1e1ca50 movaps %xmm0, 0x20(%rsp) movaps 0x7da15b(%rip), %xmm0 # 0x1e1ca60 movaps %xmm0, 0x10(%rsp) movaps -0x10(%rsp), %xmm2 movaps 0x30(%rsp), %xmm1 movaps 0x40(%rsp), %xmm0 movaps %xmm2, 0x580(%rsp) movaps %xmm1, 0x570(%rsp) movaps %xmm0, 0x560(%rsp) movaps 0x580(%rsp), %xmm1 movaps 0x570(%rsp), %xmm0 movaps %xmm1, 0x5a0(%rsp) movaps %xmm0, 0x590(%rsp) movaps 0x5a0(%rsp), %xmm1 movaps 0x590(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x560(%rsp), %xmm0 movaps %xmm1, 0x5c0(%rsp) movaps %xmm0, 0x5b0(%rsp) movaps 0x5c0(%rsp), %xmm0 movaps 0x5b0(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, 0x40(%rsp) movaps -0x10(%rsp), %xmm2 movaps 0x20(%rsp), %xmm1 movaps 0x40(%rsp), %xmm0 movaps %xmm2, 0x510(%rsp) movaps %xmm1, 0x500(%rsp) movaps %xmm0, 0x4f0(%rsp) movaps 0x510(%rsp), %xmm1 movaps 0x500(%rsp), %xmm0 movaps %xmm1, 0x530(%rsp) movaps %xmm0, 0x520(%rsp) movaps 0x530(%rsp), %xmm1 movaps 0x520(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x4f0(%rsp), %xmm0 movaps %xmm1, 0x550(%rsp) movaps %xmm0, 0x540(%rsp) movaps 0x550(%rsp), %xmm0 movaps 0x540(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, 0x40(%rsp) movaps -0x10(%rsp), %xmm2 movaps 0x10(%rsp), %xmm1 movaps 0x40(%rsp), %xmm0 movaps %xmm2, 0x4a0(%rsp) movaps %xmm1, 0x490(%rsp) movaps %xmm0, 0x480(%rsp) movaps 0x4a0(%rsp), %xmm1 movaps 0x490(%rsp), %xmm0 movaps %xmm1, 0x4c0(%rsp) movaps %xmm0, 0x4b0(%rsp) movaps 0x4c0(%rsp), %xmm1 movaps 0x4b0(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x480(%rsp), %xmm0 movaps %xmm1, 0x4e0(%rsp) movaps %xmm0, 0x4d0(%rsp) movaps 0x4e0(%rsp), %xmm0 movaps 0x4d0(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, 0x40(%rsp) movaps 0x7d9fc1(%rip), %xmm0 # 0x1e1ca70 movaps %xmm0, -0x10(%rsp) movaps 0x40(%rsp), %xmm0 movaps %xmm0, 0x170(%rsp) movaps %xmm0, 0x160(%rsp) movaps 0x170(%rsp), %xmm0 movaps 0x160(%rsp), %xmm1 mulps %xmm1, %xmm0 movaps %xmm0, -0x60(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x430(%rsp) movaps %xmm0, 0x420(%rsp) movaps 0x7d9f7e(%rip), %xmm0 # 0x1e1ca80 movaps %xmm0, 0x410(%rsp) movaps 0x430(%rsp), %xmm1 movaps 0x420(%rsp), %xmm0 movaps %xmm1, 0x450(%rsp) movaps %xmm0, 0x440(%rsp) movaps 0x450(%rsp), %xmm1 movaps 0x440(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x410(%rsp), %xmm0 movaps %xmm1, 0x470(%rsp) movaps %xmm0, 0x460(%rsp) movaps 0x470(%rsp), %xmm0 movaps 0x460(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x3c0(%rsp) movaps %xmm0, 0x3b0(%rsp) movaps 0x7d9f02(%rip), %xmm0 # 0x1e1ca90 movaps %xmm0, 0x3a0(%rsp) movaps 0x3c0(%rsp), %xmm1 movaps 0x3b0(%rsp), %xmm0 movaps %xmm1, 0x3e0(%rsp) movaps %xmm0, 0x3d0(%rsp) movaps 0x3e0(%rsp), %xmm1 movaps 0x3d0(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x3a0(%rsp), %xmm0 movaps %xmm1, 0x400(%rsp) movaps %xmm0, 0x3f0(%rsp) movaps 0x400(%rsp), %xmm0 movaps 0x3f0(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x150(%rsp) movaps %xmm0, 0x140(%rsp) movaps 0x150(%rsp), %xmm0 movaps 0x140(%rsp), %xmm1 mulps %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x130(%rsp) movaps %xmm0, 0x120(%rsp) movaps 0x130(%rsp), %xmm0 movaps 0x120(%rsp), %xmm1 mulps %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x60(%rsp), %xmm1 movaps -0x10(%rsp), %xmm0 movaps %xmm1, 0x200(%rsp) movaps 0x7c8e0a(%rip), %xmm1 # 0x1e0ba80 movaps %xmm1, 0x1f0(%rsp) movaps %xmm0, 0x1e0(%rsp) movaps 0x1e0(%rsp), %xmm1 movaps 0x200(%rsp), %xmm2 movaps 0x1f0(%rsp), %xmm0 movaps %xmm2, 0x240(%rsp) movaps %xmm0, 0x230(%rsp) movaps 0x240(%rsp), %xmm0 movaps 0x230(%rsp), %xmm2 mulps %xmm2, %xmm0 movaps %xmm1, 0x220(%rsp) movaps %xmm0, 0x210(%rsp) movaps 0x220(%rsp), %xmm0 movaps 0x210(%rsp), %xmm1 subps %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm0 movaps %xmm0, 0x1d0(%rsp) movaps 0x7c8d43(%rip), %xmm0 # 0x1e0ba40 movaps %xmm0, 0x1c0(%rsp) movaps 0x1d0(%rsp), %xmm0 movaps 0x1c0(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps 0x7d9d7c(%rip), %xmm0 # 0x1e1caa0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x350(%rsp) movaps %xmm0, 0x340(%rsp) movaps 0x7d9d66(%rip), %xmm0 # 0x1e1cab0 movaps %xmm0, 0x330(%rsp) movaps 0x350(%rsp), %xmm1 movaps 0x340(%rsp), %xmm0 movaps %xmm1, 0x370(%rsp) movaps %xmm0, 0x360(%rsp) movaps 0x370(%rsp), %xmm1 movaps 0x360(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x330(%rsp), %xmm0 movaps %xmm1, 0x390(%rsp) movaps %xmm0, 0x380(%rsp) movaps 0x390(%rsp), %xmm0 movaps 0x380(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x2e0(%rsp) movaps %xmm0, 0x2d0(%rsp) movaps 0x7d9cea(%rip), %xmm0 # 0x1e1cac0 movaps %xmm0, 0x2c0(%rsp) movaps 0x2e0(%rsp), %xmm1 movaps 0x2d0(%rsp), %xmm0 movaps %xmm1, 0x300(%rsp) movaps %xmm0, 0x2f0(%rsp) movaps 0x300(%rsp), %xmm1 movaps 0x2f0(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x2c0(%rsp), %xmm0 movaps %xmm1, 0x320(%rsp) movaps %xmm0, 0x310(%rsp) movaps 0x320(%rsp), %xmm0 movaps 0x310(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x110(%rsp) movaps %xmm0, 0x100(%rsp) movaps 0x110(%rsp), %xmm0 movaps 0x100(%rsp), %xmm1 mulps %xmm1, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm1 movaps 0x40(%rsp), %xmm0 movaps %xmm1, 0x270(%rsp) movaps %xmm0, 0x260(%rsp) movaps %xmm0, 0x250(%rsp) movaps 0x270(%rsp), %xmm1 movaps 0x260(%rsp), %xmm0 movaps %xmm1, 0x290(%rsp) movaps %xmm0, 0x280(%rsp) movaps 0x290(%rsp), %xmm1 movaps 0x280(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x250(%rsp), %xmm0 movaps %xmm1, 0x2b0(%rsp) movaps %xmm0, 0x2a0(%rsp) movaps 0x2b0(%rsp), %xmm0 movaps 0x2a0(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x50(%rsp), %xmm0 movaps %xmm0, 0x10(%rsp) movaps 0x10(%rsp), %xmm1 movaps -0x70(%rsp), %xmm0 movaps %xmm1, 0x90(%rsp) movaps %xmm0, 0x80(%rsp) movaps 0x90(%rsp), %xmm0 movaps 0x80(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, -0x70(%rsp) movaps 0x10(%rsp), %xmm1 movaps -0x10(%rsp), %xmm0 movaps %xmm1, 0x60(%rsp) movaps %xmm0, 0x50(%rsp) movaps 0x60(%rsp), %xmm0 pcmpeqd %xmm1, %xmm1 pxor %xmm1, %xmm0 movaps 0x50(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x70(%rsp), %xmm0 movaps %xmm1, 0x1b0(%rsp) movaps %xmm0, 0x1a0(%rsp) movaps 0x1b0(%rsp), %xmm0 addps 0x1a0(%rsp), %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm1 movaps (%rsp), %xmm0 movaps %xmm1, 0x6d0(%rsp) movaps %xmm0, 0x6c0(%rsp) movaps 0x6d0(%rsp), %xmm0 movaps 0x6c0(%rsp), %xmm1 pxor %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm0 addq $0x708, %rsp # imm = 0x708 retq nopw %cs:(%rax,%rax) nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,217
ncnn::UnaryOp_x86_functor::unary_op_sin::func(float const&) const
float func(const float& x) const { return (float)sin(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax movss (%rax), %xmm0 callq 0x163a260 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,218
ncnn::UnaryOp_x86_functor::unary_op_cos::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return cos_ps(x); }
subq $0x6d8, %rsp # imm = 0x6D8 movq %rdi, -0x78(%rsp) movq %rsi, -0x80(%rsp) movq -0x80(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm0, 0x30(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 0x60(%rsp) movaps 0x60(%rsp), %xmm0 movaps %xmm0, 0x10(%rsp) movaps 0x30(%rsp), %xmm0 movaps %xmm0, 0xa0(%rsp) movaps 0x7d50cc(%rip), %xmm0 # 0x1e18120 movaps %xmm0, 0x90(%rsp) movaps 0xa0(%rsp), %xmm0 movaps 0x90(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, 0x30(%rsp) movaps 0x30(%rsp), %xmm0 movaps %xmm0, 0x160(%rsp) movaps 0x7d9967(%rip), %xmm0 # 0x1e1c9f0 movaps %xmm0, 0x150(%rsp) movaps 0x160(%rsp), %xmm0 movaps 0x150(%rsp), %xmm1 mulps %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm0 movaps %xmm0, 0xc0(%rsp) cvttps2dq 0xc0(%rsp), %xmm0 movaps %xmm0, -0x30(%rsp) movaps -0x30(%rsp), %xmm0 movaps %xmm0, 0x5b0(%rsp) movaps 0x7d9928(%rip), %xmm0 # 0x1e1ca00 movaps %xmm0, 0x5a0(%rsp) movaps 0x5b0(%rsp), %xmm0 movaps 0x5a0(%rsp), %xmm1 paddd %xmm1, %xmm0 movaps %xmm0, -0x30(%rsp) movaps -0x30(%rsp), %xmm0 movaps %xmm0, 0x650(%rsp) movaps 0x7d9903(%rip), %xmm0 # 0x1e1ca10 movaps %xmm0, 0x640(%rsp) movaps 0x650(%rsp), %xmm0 movaps 0x640(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, -0x30(%rsp) movaps -0x30(%rsp), %xmm0 movaps %xmm0, 0xb0(%rsp) cvtdq2ps 0xb0(%rsp), %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x30(%rsp), %xmm0 movaps %xmm0, 0x610(%rsp) movaps 0x7d98d4(%rip), %xmm0 # 0x1e1ca30 movaps %xmm0, 0x600(%rsp) movaps 0x610(%rsp), %xmm1 movaps 0x600(%rsp), %xmm2 psubd %xmm2, %xmm1 movaps %xmm1, -0x30(%rsp) movaps -0x30(%rsp), %xmm1 movaps %xmm1, 0x6c0(%rsp) movaps 0x7d988f(%rip), %xmm1 # 0x1e1ca20 movaps %xmm1, 0x6b0(%rsp) movaps 0x6c0(%rsp), %xmm1 movaps 0x6b0(%rsp), %xmm2 pandn %xmm2, %xmm1 movaps %xmm1, -0x20(%rsp) movaps -0x20(%rsp), %xmm1 movaps %xmm1, 0x5d0(%rsp) movl $0x1d, 0x5cc(%rsp) movaps 0x5d0(%rsp), %xmm1 movd 0x5cc(%rsp), %xmm2 pslld %xmm2, %xmm1 movaps %xmm1, -0x20(%rsp) movaps -0x30(%rsp), %xmm1 movaps %xmm1, 0x630(%rsp) movaps %xmm0, 0x620(%rsp) movaps 0x630(%rsp), %xmm0 movaps 0x620(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, -0x30(%rsp) movaps -0x30(%rsp), %xmm1 xorps %xmm0, %xmm0 movaps %xmm0, 0x680(%rsp) movaps 0x680(%rsp), %xmm0 movaps %xmm1, 0x670(%rsp) movaps %xmm0, 0x660(%rsp) movaps 0x670(%rsp), %xmm0 movaps 0x660(%rsp), %xmm1 pcmpeqd %xmm1, %xmm0 movaps %xmm0, -0x30(%rsp) movaps -0x20(%rsp), %xmm0 movaps %xmm0, 0x5f0(%rsp) movaps 0x5f0(%rsp), %xmm0 movaps %xmm0, -0x40(%rsp) movaps -0x30(%rsp), %xmm0 movaps %xmm0, 0x5e0(%rsp) movaps 0x5e0(%rsp), %xmm0 movaps %xmm0, -0x50(%rsp) movaps 0x7d97b2(%rip), %xmm0 # 0x1e1ca40 movaps %xmm0, 0x20(%rsp) movaps 0x7d97b6(%rip), %xmm0 # 0x1e1ca50 movaps %xmm0, 0x10(%rsp) movaps 0x7d97ba(%rip), %xmm0 # 0x1e1ca60 movaps %xmm0, (%rsp) movaps -0x10(%rsp), %xmm2 movaps 0x20(%rsp), %xmm1 movaps 0x30(%rsp), %xmm0 movaps %xmm2, 0x550(%rsp) movaps %xmm1, 0x540(%rsp) movaps %xmm0, 0x530(%rsp) movaps 0x550(%rsp), %xmm1 movaps 0x540(%rsp), %xmm0 movaps %xmm1, 0x570(%rsp) movaps %xmm0, 0x560(%rsp) movaps 0x570(%rsp), %xmm1 movaps 0x560(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x530(%rsp), %xmm0 movaps %xmm1, 0x590(%rsp) movaps %xmm0, 0x580(%rsp) movaps 0x590(%rsp), %xmm0 movaps 0x580(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, 0x30(%rsp) movaps -0x10(%rsp), %xmm2 movaps 0x10(%rsp), %xmm1 movaps 0x30(%rsp), %xmm0 movaps %xmm2, 0x4e0(%rsp) movaps %xmm1, 0x4d0(%rsp) movaps %xmm0, 0x4c0(%rsp) movaps 0x4e0(%rsp), %xmm1 movaps 0x4d0(%rsp), %xmm0 movaps %xmm1, 0x500(%rsp) movaps %xmm0, 0x4f0(%rsp) movaps 0x500(%rsp), %xmm1 movaps 0x4f0(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x4c0(%rsp), %xmm0 movaps %xmm1, 0x520(%rsp) movaps %xmm0, 0x510(%rsp) movaps 0x520(%rsp), %xmm0 movaps 0x510(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, 0x30(%rsp) movaps -0x10(%rsp), %xmm2 movaps (%rsp), %xmm1 movaps 0x30(%rsp), %xmm0 movaps %xmm2, 0x470(%rsp) movaps %xmm1, 0x460(%rsp) movaps %xmm0, 0x450(%rsp) movaps 0x470(%rsp), %xmm1 movaps 0x460(%rsp), %xmm0 movaps %xmm1, 0x490(%rsp) movaps %xmm0, 0x480(%rsp) movaps 0x490(%rsp), %xmm1 movaps 0x480(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x450(%rsp), %xmm0 movaps %xmm1, 0x4b0(%rsp) movaps %xmm0, 0x4a0(%rsp) movaps 0x4b0(%rsp), %xmm0 movaps 0x4a0(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, 0x30(%rsp) movaps 0x7d9622(%rip), %xmm0 # 0x1e1ca70 movaps %xmm0, -0x10(%rsp) movaps 0x30(%rsp), %xmm0 movaps %xmm0, 0x140(%rsp) movaps %xmm0, 0x130(%rsp) movaps 0x140(%rsp), %xmm0 movaps 0x130(%rsp), %xmm1 mulps %xmm1, %xmm0 movaps %xmm0, -0x60(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x400(%rsp) movaps %xmm0, 0x3f0(%rsp) movaps 0x7d95df(%rip), %xmm0 # 0x1e1ca80 movaps %xmm0, 0x3e0(%rsp) movaps 0x400(%rsp), %xmm1 movaps 0x3f0(%rsp), %xmm0 movaps %xmm1, 0x420(%rsp) movaps %xmm0, 0x410(%rsp) movaps 0x420(%rsp), %xmm1 movaps 0x410(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x3e0(%rsp), %xmm0 movaps %xmm1, 0x440(%rsp) movaps %xmm0, 0x430(%rsp) movaps 0x440(%rsp), %xmm0 movaps 0x430(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x390(%rsp) movaps %xmm0, 0x380(%rsp) movaps 0x7d9563(%rip), %xmm0 # 0x1e1ca90 movaps %xmm0, 0x370(%rsp) movaps 0x390(%rsp), %xmm1 movaps 0x380(%rsp), %xmm0 movaps %xmm1, 0x3b0(%rsp) movaps %xmm0, 0x3a0(%rsp) movaps 0x3b0(%rsp), %xmm1 movaps 0x3a0(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x370(%rsp), %xmm0 movaps %xmm1, 0x3d0(%rsp) movaps %xmm0, 0x3c0(%rsp) movaps 0x3d0(%rsp), %xmm0 movaps 0x3c0(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x120(%rsp) movaps %xmm0, 0x110(%rsp) movaps 0x120(%rsp), %xmm0 movaps 0x110(%rsp), %xmm1 mulps %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x100(%rsp) movaps %xmm0, 0xf0(%rsp) movaps 0x100(%rsp), %xmm0 movaps 0xf0(%rsp), %xmm1 mulps %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x60(%rsp), %xmm1 movaps -0x10(%rsp), %xmm0 movaps %xmm1, 0x1d0(%rsp) movaps 0x7c846b(%rip), %xmm1 # 0x1e0ba80 movaps %xmm1, 0x1c0(%rsp) movaps %xmm0, 0x1b0(%rsp) movaps 0x1b0(%rsp), %xmm1 movaps 0x1d0(%rsp), %xmm2 movaps 0x1c0(%rsp), %xmm0 movaps %xmm2, 0x210(%rsp) movaps %xmm0, 0x200(%rsp) movaps 0x210(%rsp), %xmm0 movaps 0x200(%rsp), %xmm2 mulps %xmm2, %xmm0 movaps %xmm1, 0x1f0(%rsp) movaps %xmm0, 0x1e0(%rsp) movaps 0x1f0(%rsp), %xmm0 movaps 0x1e0(%rsp), %xmm1 subps %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm0 movaps %xmm0, 0x1a0(%rsp) movaps 0x7c83a4(%rip), %xmm0 # 0x1e0ba40 movaps %xmm0, 0x190(%rsp) movaps 0x1a0(%rsp), %xmm0 movaps 0x190(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps 0x7d93dd(%rip), %xmm0 # 0x1e1caa0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x320(%rsp) movaps %xmm0, 0x310(%rsp) movaps 0x7d93c7(%rip), %xmm0 # 0x1e1cab0 movaps %xmm0, 0x300(%rsp) movaps 0x320(%rsp), %xmm1 movaps 0x310(%rsp), %xmm0 movaps %xmm1, 0x340(%rsp) movaps %xmm0, 0x330(%rsp) movaps 0x340(%rsp), %xmm1 movaps 0x330(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x300(%rsp), %xmm0 movaps %xmm1, 0x360(%rsp) movaps %xmm0, 0x350(%rsp) movaps 0x360(%rsp), %xmm0 movaps 0x350(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x2b0(%rsp) movaps %xmm0, 0x2a0(%rsp) movaps 0x7d934b(%rip), %xmm0 # 0x1e1cac0 movaps %xmm0, 0x290(%rsp) movaps 0x2b0(%rsp), %xmm1 movaps 0x2a0(%rsp), %xmm0 movaps %xmm1, 0x2d0(%rsp) movaps %xmm0, 0x2c0(%rsp) movaps 0x2d0(%rsp), %xmm1 movaps 0x2c0(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x290(%rsp), %xmm0 movaps %xmm1, 0x2f0(%rsp) movaps %xmm0, 0x2e0(%rsp) movaps 0x2f0(%rsp), %xmm0 movaps 0x2e0(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0xe0(%rsp) movaps %xmm0, 0xd0(%rsp) movaps 0xe0(%rsp), %xmm0 movaps 0xd0(%rsp), %xmm1 mulps %xmm1, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm1 movaps 0x30(%rsp), %xmm0 movaps %xmm1, 0x240(%rsp) movaps %xmm0, 0x230(%rsp) movaps %xmm0, 0x220(%rsp) movaps 0x240(%rsp), %xmm1 movaps 0x230(%rsp), %xmm0 movaps %xmm1, 0x260(%rsp) movaps %xmm0, 0x250(%rsp) movaps 0x260(%rsp), %xmm1 movaps 0x250(%rsp), %xmm0 mulps %xmm0, %xmm1 movaps 0x220(%rsp), %xmm0 movaps %xmm1, 0x280(%rsp) movaps %xmm0, 0x270(%rsp) movaps 0x280(%rsp), %xmm0 movaps 0x270(%rsp), %xmm1 addps %xmm1, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x50(%rsp), %xmm0 movaps %xmm0, (%rsp) movaps (%rsp), %xmm1 movaps -0x70(%rsp), %xmm0 movaps %xmm1, 0x80(%rsp) movaps %xmm0, 0x70(%rsp) movaps 0x80(%rsp), %xmm0 movaps 0x70(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, -0x70(%rsp) movaps (%rsp), %xmm1 movaps -0x10(%rsp), %xmm0 movaps %xmm1, 0x50(%rsp) movaps %xmm0, 0x40(%rsp) movaps 0x50(%rsp), %xmm0 pcmpeqd %xmm1, %xmm1 pxor %xmm1, %xmm0 movaps 0x40(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x70(%rsp), %xmm0 movaps %xmm1, 0x180(%rsp) movaps %xmm0, 0x170(%rsp) movaps 0x180(%rsp), %xmm0 addps 0x170(%rsp), %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x40(%rsp), %xmm0 movaps %xmm1, 0x6a0(%rsp) movaps %xmm0, 0x690(%rsp) movaps 0x6a0(%rsp), %xmm0 movaps 0x690(%rsp), %xmm1 pxor %xmm1, %xmm0 movaps %xmm0, -0x10(%rsp) movaps -0x10(%rsp), %xmm0 addq $0x6d8, %rsp # imm = 0x6D8 retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,219
ncnn::UnaryOp_x86_functor::unary_op_cos::func(float const&) const
float func(const float& x) const { return (float)cos(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax movss (%rax), %xmm0 callq 0x163a2b0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,220
ncnn::UnaryOp_x86_functor::unary_op_tan::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return tan_ps(x); }
subq $0x968, %rsp # imm = 0x968 movq %rdi, -0x78(%rsp) movq %rsi, -0x80(%rsp) movq -0x80(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm0, -0x10(%rsp) movl $0x322bcc77, 0x1c(%rsp) # imm = 0x322BCC77 movss 0x1c(%rsp), %xmm0 shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0] movaps %xmm0, (%rsp) movaps (%rsp), %xmm0 movaps %xmm0, -0x40(%rsp) movaps -0x10(%rsp), %xmm0 movaps %xmm0, 0x170(%rsp) leaq -0x20(%rsp), %rax movq %rax, 0x168(%rsp) leaq -0x30(%rsp), %rax movq %rax, 0x160(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 0x1e0(%rsp) movaps 0x1e0(%rsp), %xmm1 movaps %xmm1, 0x130(%rsp) movaps 0x170(%rsp), %xmm1 movaps %xmm1, 0x120(%rsp) movaps 0x170(%rsp), %xmm1 movaps %xmm1, 0x240(%rsp) movaps 0x7d46da(%rip), %xmm1 # 0x1e18120 movaps %xmm1, 0x230(%rsp) movaps 0x240(%rsp), %xmm1 movaps 0x230(%rsp), %xmm2 pand %xmm2, %xmm1 movaps %xmm1, 0x170(%rsp) movaps 0x120(%rsp), %xmm1 movaps %xmm1, 0x220(%rsp) movaps 0x7cc0df(%rip), %xmm1 # 0x1e0fb60 movaps %xmm1, 0x210(%rsp) movaps 0x220(%rsp), %xmm1 movaps 0x210(%rsp), %xmm2 pand %xmm2, %xmm1 movaps %xmm1, 0x120(%rsp) movaps 0x170(%rsp), %xmm1 movaps %xmm1, 0x300(%rsp) movaps 0x7d8f34(%rip), %xmm1 # 0x1e1c9f0 movaps %xmm1, 0x2f0(%rsp) movaps 0x300(%rsp), %xmm1 movaps 0x2f0(%rsp), %xmm2 mulps %xmm2, %xmm1 movaps %xmm1, 0x110(%rsp) movaps 0x110(%rsp), %xmm1 movaps %xmm1, 0x260(%rsp) cvttps2dq 0x260(%rsp), %xmm1 movaps %xmm1, 0xf0(%rsp) movaps 0xf0(%rsp), %xmm1 movaps %xmm1, 0x770(%rsp) movaps 0x7d8ee9(%rip), %xmm1 # 0x1e1ca00 movaps %xmm1, 0x760(%rsp) movaps 0x770(%rsp), %xmm1 movaps 0x760(%rsp), %xmm2 paddd %xmm2, %xmm1 movaps %xmm1, 0xf0(%rsp) movaps 0xf0(%rsp), %xmm1 movaps %xmm1, 0x860(%rsp) movaps 0x7d8ebe(%rip), %xmm1 # 0x1e1ca10 movaps %xmm1, 0x850(%rsp) movaps 0x860(%rsp), %xmm1 movaps 0x850(%rsp), %xmm2 pand %xmm2, %xmm1 movaps %xmm1, 0xf0(%rsp) movaps 0xf0(%rsp), %xmm1 movaps %xmm1, 0x250(%rsp) cvtdq2ps 0x250(%rsp), %xmm1 movaps %xmm1, 0x110(%rsp) movaps 0xf0(%rsp), %xmm1 movaps %xmm1, 0xe0(%rsp) movaps 0xf0(%rsp), %xmm1 movaps %xmm1, 0x840(%rsp) movaps 0x7d8e63(%rip), %xmm1 # 0x1e1ca20 movaps %xmm1, 0x830(%rsp) movaps 0x840(%rsp), %xmm2 movaps 0x830(%rsp), %xmm3 pand %xmm3, %xmm2 movaps %xmm2, 0x100(%rsp) movaps 0x100(%rsp), %xmm2 movaps %xmm2, 0x7b0(%rsp) movl $0x1d, 0x7ac(%rsp) movaps 0x7b0(%rsp), %xmm2 movd 0x7ac(%rsp), %xmm3 pslld %xmm3, %xmm2 movaps %xmm2, 0x100(%rsp) movaps 0x100(%rsp), %xmm2 movaps %xmm2, 0x7e0(%rsp) movaps 0x7e0(%rsp), %xmm2 movaps %xmm2, 0xd0(%rsp) movaps 0xf0(%rsp), %xmm2 movaps %xmm2, 0x820(%rsp) movaps 0x7d8de0(%rip), %xmm2 # 0x1e1ca30 movaps %xmm2, 0x810(%rsp) movaps 0x820(%rsp), %xmm3 movaps 0x810(%rsp), %xmm4 pand %xmm4, %xmm3 movaps %xmm3, 0xf0(%rsp) movaps 0xf0(%rsp), %xmm4 xorps %xmm3, %xmm3 movaps %xmm3, 0x890(%rsp) movaps 0x890(%rsp), %xmm3 movaps %xmm4, 0x880(%rsp) movaps %xmm3, 0x870(%rsp) movaps 0x880(%rsp), %xmm3 movaps 0x870(%rsp), %xmm4 pcmpeqd %xmm4, %xmm3 movaps %xmm3, 0xf0(%rsp) movaps 0xf0(%rsp), %xmm3 movaps %xmm3, 0x7d0(%rsp) movaps 0x7d0(%rsp), %xmm3 movaps %xmm3, 0xc0(%rsp) movaps 0x7d8d5e(%rip), %xmm3 # 0x1e1ca40 movaps %xmm3, 0x150(%rsp) movaps 0x7d8d5f(%rip), %xmm3 # 0x1e1ca50 movaps %xmm3, 0x140(%rsp) movaps 0x7d8d60(%rip), %xmm3 # 0x1e1ca60 movaps %xmm3, 0x130(%rsp) movaps 0x110(%rsp), %xmm5 movaps 0x150(%rsp), %xmm4 movaps 0x170(%rsp), %xmm3 movaps %xmm5, 0x710(%rsp) movaps %xmm4, 0x700(%rsp) movaps %xmm3, 0x6f0(%rsp) movaps 0x710(%rsp), %xmm4 movaps 0x700(%rsp), %xmm3 movaps %xmm4, 0x730(%rsp) movaps %xmm3, 0x720(%rsp) movaps 0x730(%rsp), %xmm4 movaps 0x720(%rsp), %xmm3 mulps %xmm3, %xmm4 movaps 0x6f0(%rsp), %xmm3 movaps %xmm4, 0x750(%rsp) movaps %xmm3, 0x740(%rsp) movaps 0x750(%rsp), %xmm3 movaps 0x740(%rsp), %xmm4 addps %xmm4, %xmm3 movaps %xmm3, 0x170(%rsp) movaps 0x110(%rsp), %xmm5 movaps 0x140(%rsp), %xmm4 movaps 0x170(%rsp), %xmm3 movaps %xmm5, 0x6a0(%rsp) movaps %xmm4, 0x690(%rsp) movaps %xmm3, 0x680(%rsp) movaps 0x6a0(%rsp), %xmm4 movaps 0x690(%rsp), %xmm3 movaps %xmm4, 0x6c0(%rsp) movaps %xmm3, 0x6b0(%rsp) movaps 0x6c0(%rsp), %xmm4 movaps 0x6b0(%rsp), %xmm3 mulps %xmm3, %xmm4 movaps 0x680(%rsp), %xmm3 movaps %xmm4, 0x6e0(%rsp) movaps %xmm3, 0x6d0(%rsp) movaps 0x6e0(%rsp), %xmm3 movaps 0x6d0(%rsp), %xmm4 addps %xmm4, %xmm3 movaps %xmm3, 0x170(%rsp) movaps 0x110(%rsp), %xmm5 movaps 0x130(%rsp), %xmm4 movaps 0x170(%rsp), %xmm3 movaps %xmm5, 0x630(%rsp) movaps %xmm4, 0x620(%rsp) movaps %xmm3, 0x610(%rsp) movaps 0x630(%rsp), %xmm4 movaps 0x620(%rsp), %xmm3 movaps %xmm4, 0x650(%rsp) movaps %xmm3, 0x640(%rsp) movaps 0x650(%rsp), %xmm4 movaps 0x640(%rsp), %xmm3 mulps %xmm3, %xmm4 movaps 0x610(%rsp), %xmm3 movaps %xmm4, 0x670(%rsp) movaps %xmm3, 0x660(%rsp) movaps 0x670(%rsp), %xmm3 movaps 0x660(%rsp), %xmm4 addps %xmm4, %xmm3 movaps %xmm3, 0x170(%rsp) movaps 0xe0(%rsp), %xmm3 movaps %xmm3, 0x800(%rsp) movaps %xmm2, 0x7f0(%rsp) movaps 0x800(%rsp), %xmm2 movaps 0x7f0(%rsp), %xmm3 psubd %xmm3, %xmm2 movaps %xmm2, 0xe0(%rsp) movaps 0xe0(%rsp), %xmm2 movaps %xmm2, 0x910(%rsp) movaps %xmm1, 0x900(%rsp) movaps 0x910(%rsp), %xmm1 movaps 0x900(%rsp), %xmm2 pandn %xmm2, %xmm1 movaps %xmm1, 0xe0(%rsp) movaps 0xe0(%rsp), %xmm1 movaps %xmm1, 0x790(%rsp) movl $0x1d, 0x78c(%rsp) movaps 0x790(%rsp), %xmm1 movd 0x78c(%rsp), %xmm2 pslld %xmm2, %xmm1 movaps %xmm1, 0xe0(%rsp) movaps 0xe0(%rsp), %xmm1 movaps %xmm1, 0x7c0(%rsp) movaps 0x7c0(%rsp), %xmm1 movaps %xmm1, 0xb0(%rsp) movaps 0x120(%rsp), %xmm2 movaps 0xd0(%rsp), %xmm1 movaps %xmm2, 0x8f0(%rsp) movaps %xmm1, 0x8e0(%rsp) movaps 0x8f0(%rsp), %xmm1 movaps 0x8e0(%rsp), %xmm2 pxor %xmm2, %xmm1 movaps %xmm1, 0x120(%rsp) movaps 0x170(%rsp), %xmm1 movaps %xmm1, 0x2e0(%rsp) movaps %xmm1, 0x2d0(%rsp) movaps 0x2e0(%rsp), %xmm1 movaps 0x2d0(%rsp), %xmm2 mulps %xmm2, %xmm1 movaps %xmm1, 0xa0(%rsp) movaps 0x7d8a70(%rip), %xmm1 # 0x1e1ca70 movaps %xmm1, 0x110(%rsp) movaps 0x110(%rsp), %xmm2 movaps 0xa0(%rsp), %xmm1 movaps %xmm2, 0x5c0(%rsp) movaps %xmm1, 0x5b0(%rsp) movaps 0x7d8a51(%rip), %xmm1 # 0x1e1ca80 movaps %xmm1, 0x5a0(%rsp) movaps 0x5c0(%rsp), %xmm2 movaps 0x5b0(%rsp), %xmm1 movaps %xmm2, 0x5e0(%rsp) movaps %xmm1, 0x5d0(%rsp) movaps 0x5e0(%rsp), %xmm2 movaps 0x5d0(%rsp), %xmm1 mulps %xmm1, %xmm2 movaps 0x5a0(%rsp), %xmm1 movaps %xmm2, 0x600(%rsp) movaps %xmm1, 0x5f0(%rsp) movaps 0x600(%rsp), %xmm1 movaps 0x5f0(%rsp), %xmm2 addps %xmm2, %xmm1 movaps %xmm1, 0x110(%rsp) movaps 0x110(%rsp), %xmm2 movaps 0xa0(%rsp), %xmm1 movaps %xmm2, 0x550(%rsp) movaps %xmm1, 0x540(%rsp) movaps 0x7d89cc(%rip), %xmm1 # 0x1e1ca90 movaps %xmm1, 0x530(%rsp) movaps 0x550(%rsp), %xmm2 movaps 0x540(%rsp), %xmm1 movaps %xmm2, 0x570(%rsp) movaps %xmm1, 0x560(%rsp) movaps 0x570(%rsp), %xmm2 movaps 0x560(%rsp), %xmm1 mulps %xmm1, %xmm2 movaps 0x530(%rsp), %xmm1 movaps %xmm2, 0x590(%rsp) movaps %xmm1, 0x580(%rsp) movaps 0x590(%rsp), %xmm1 movaps 0x580(%rsp), %xmm2 addps %xmm2, %xmm1 movaps %xmm1, 0x110(%rsp) movaps 0x110(%rsp), %xmm2 movaps 0xa0(%rsp), %xmm1 movaps %xmm2, 0x2c0(%rsp) movaps %xmm1, 0x2b0(%rsp) movaps 0x2c0(%rsp), %xmm1 movaps 0x2b0(%rsp), %xmm2 mulps %xmm2, %xmm1 movaps %xmm1, 0x110(%rsp) movaps 0x110(%rsp), %xmm2 movaps 0xa0(%rsp), %xmm1 movaps %xmm2, 0x2a0(%rsp) movaps %xmm1, 0x290(%rsp) movaps 0x2a0(%rsp), %xmm1 movaps 0x290(%rsp), %xmm2 mulps %xmm2, %xmm1 movaps %xmm1, 0x110(%rsp) movaps 0xa0(%rsp), %xmm2 movaps 0x110(%rsp), %xmm1 movaps %xmm2, 0x390(%rsp) movaps 0x7c78b9(%rip), %xmm2 # 0x1e0ba80 movaps %xmm2, 0x380(%rsp) movaps %xmm1, 0x370(%rsp) movaps 0x370(%rsp), %xmm2 movaps 0x390(%rsp), %xmm3 movaps 0x380(%rsp), %xmm1 movaps %xmm3, 0x3d0(%rsp) movaps %xmm1, 0x3c0(%rsp) movaps 0x3d0(%rsp), %xmm1 movaps 0x3c0(%rsp), %xmm3 mulps %xmm3, %xmm1 movaps %xmm2, 0x3b0(%rsp) movaps %xmm1, 0x3a0(%rsp) movaps 0x3b0(%rsp), %xmm1 movaps 0x3a0(%rsp), %xmm2 subps %xmm2, %xmm1 movaps %xmm1, 0x110(%rsp) movaps 0x110(%rsp), %xmm1 movaps %xmm1, 0x360(%rsp) movaps 0x7c77ec(%rip), %xmm1 # 0x1e0ba40 movaps %xmm1, 0x350(%rsp) movaps 0x360(%rsp), %xmm1 movaps 0x350(%rsp), %xmm2 addps %xmm2, %xmm1 movaps %xmm1, 0x110(%rsp) movaps 0x7d8822(%rip), %xmm1 # 0x1e1caa0 movaps %xmm1, 0x90(%rsp) movaps 0x90(%rsp), %xmm2 movaps 0xa0(%rsp), %xmm1 movaps %xmm2, 0x4e0(%rsp) movaps %xmm1, 0x4d0(%rsp) movaps 0x7d8803(%rip), %xmm1 # 0x1e1cab0 movaps %xmm1, 0x4c0(%rsp) movaps 0x4e0(%rsp), %xmm2 movaps 0x4d0(%rsp), %xmm1 movaps %xmm2, 0x500(%rsp) movaps %xmm1, 0x4f0(%rsp) movaps 0x500(%rsp), %xmm2 movaps 0x4f0(%rsp), %xmm1 mulps %xmm1, %xmm2 movaps 0x4c0(%rsp), %xmm1 movaps %xmm2, 0x520(%rsp) movaps %xmm1, 0x510(%rsp) movaps 0x520(%rsp), %xmm1 movaps 0x510(%rsp), %xmm2 addps %xmm2, %xmm1 movaps %xmm1, 0x90(%rsp) movaps 0x90(%rsp), %xmm2 movaps 0xa0(%rsp), %xmm1 movaps %xmm2, 0x470(%rsp) movaps %xmm1, 0x460(%rsp) movaps 0x7d877e(%rip), %xmm1 # 0x1e1cac0 movaps %xmm1, 0x450(%rsp) movaps 0x470(%rsp), %xmm2 movaps 0x460(%rsp), %xmm1 movaps %xmm2, 0x490(%rsp) movaps %xmm1, 0x480(%rsp) movaps 0x490(%rsp), %xmm2 movaps 0x480(%rsp), %xmm1 mulps %xmm1, %xmm2 movaps 0x450(%rsp), %xmm1 movaps %xmm2, 0x4b0(%rsp) movaps %xmm1, 0x4a0(%rsp) movaps 0x4b0(%rsp), %xmm1 movaps 0x4a0(%rsp), %xmm2 addps %xmm2, %xmm1 movaps %xmm1, 0x90(%rsp) movaps 0x90(%rsp), %xmm2 movaps 0xa0(%rsp), %xmm1 movaps %xmm2, 0x280(%rsp) movaps %xmm1, 0x270(%rsp) movaps 0x280(%rsp), %xmm1 movaps 0x270(%rsp), %xmm2 mulps %xmm2, %xmm1 movaps %xmm1, 0x90(%rsp) movaps 0x90(%rsp), %xmm2 movaps 0x170(%rsp), %xmm1 movaps %xmm2, 0x400(%rsp) movaps %xmm1, 0x3f0(%rsp) movaps %xmm1, 0x3e0(%rsp) movaps 0x400(%rsp), %xmm2 movaps 0x3f0(%rsp), %xmm1 movaps %xmm2, 0x420(%rsp) movaps %xmm1, 0x410(%rsp) movaps 0x420(%rsp), %xmm2 movaps 0x410(%rsp), %xmm1 mulps %xmm1, %xmm2 movaps 0x3e0(%rsp), %xmm1 movaps %xmm2, 0x440(%rsp) movaps %xmm1, 0x430(%rsp) movaps 0x440(%rsp), %xmm1 movaps 0x430(%rsp), %xmm2 addps %xmm2, %xmm1 movaps %xmm1, 0x90(%rsp) movaps 0xc0(%rsp), %xmm1 movaps %xmm1, 0x130(%rsp) movaps 0x130(%rsp), %xmm2 movaps 0x90(%rsp), %xmm1 movaps %xmm2, 0x200(%rsp) movaps %xmm1, 0x1f0(%rsp) movaps 0x200(%rsp), %xmm1 movaps 0x1f0(%rsp), %xmm2 pand %xmm2, %xmm1 movaps %xmm1, 0x80(%rsp) movaps 0x130(%rsp), %xmm2 movaps 0x110(%rsp), %xmm1 movaps %xmm2, 0x190(%rsp) movaps %xmm1, 0x180(%rsp) movaps 0x190(%rsp), %xmm1 movaps 0x180(%rsp), %xmm2 pandn %xmm2, %xmm1 movaps %xmm1, 0x70(%rsp) movaps 0x90(%rsp), %xmm2 movaps 0x80(%rsp), %xmm1 movaps %xmm2, 0x1d0(%rsp) movaps %xmm1, 0x1c0(%rsp) movaps 0x1d0(%rsp), %xmm1 movaps 0x1c0(%rsp), %xmm2 subps %xmm2, %xmm1 movaps %xmm1, 0x90(%rsp) movaps 0x110(%rsp), %xmm2 movaps 0x70(%rsp), %xmm1 movaps %xmm2, 0x1b0(%rsp) movaps %xmm1, 0x1a0(%rsp) movaps 0x1b0(%rsp), %xmm1 movaps 0x1a0(%rsp), %xmm2 subps %xmm2, %xmm1 movaps %xmm1, 0x110(%rsp) movaps 0x70(%rsp), %xmm2 movaps 0x80(%rsp), %xmm1 movaps %xmm2, 0x340(%rsp) movaps %xmm1, 0x330(%rsp) movaps 0x340(%rsp), %xmm1 movaps 0x330(%rsp), %xmm2 addps %xmm2, %xmm1 movaps %xmm1, 0x150(%rsp) movaps 0x110(%rsp), %xmm2 movaps 0x90(%rsp), %xmm1 movaps %xmm2, 0x320(%rsp) movaps %xmm1, 0x310(%rsp) movaps 0x320(%rsp), %xmm1 movaps 0x310(%rsp), %xmm2 addps %xmm2, %xmm1 movaps %xmm1, 0x140(%rsp) movaps 0x150(%rsp), %xmm2 movaps 0x120(%rsp), %xmm1 movaps %xmm2, 0x8d0(%rsp) movaps %xmm1, 0x8c0(%rsp) movaps 0x8d0(%rsp), %xmm1 movaps 0x8c0(%rsp), %xmm2 pxor %xmm2, %xmm1 movq 0x168(%rsp), %rax movaps %xmm1, (%rax) movaps 0x140(%rsp), %xmm2 movaps 0xb0(%rsp), %xmm1 movaps %xmm2, 0x8b0(%rsp) movaps %xmm1, 0x8a0(%rsp) movaps 0x8b0(%rsp), %xmm1 movaps 0x8a0(%rsp), %xmm2 pxor %xmm2, %xmm1 movq 0x160(%rsp), %rax movaps %xmm1, (%rax) movaps -0x30(%rsp), %xmm1 movaps %xmm0, 0x20(%rsp) movaps 0x20(%rsp), %xmm0 movaps %xmm1, 0x930(%rsp) movaps %xmm0, 0x920(%rsp) movaps 0x930(%rsp), %xmm0 movaps 0x920(%rsp), %xmm1 cmpeqps %xmm1, %xmm0 movaps %xmm0, -0x50(%rsp) movaps -0x40(%rsp), %xmm1 movaps -0x50(%rsp), %xmm0 movaps %xmm1, 0x40(%rsp) movaps %xmm0, 0x30(%rsp) movaps 0x40(%rsp), %xmm0 movaps 0x30(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, -0x60(%rsp) movaps -0x30(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x60(%rsp) movaps %xmm0, 0x50(%rsp) movaps 0x60(%rsp), %xmm0 addps 0x50(%rsp), %xmm0 movaps %xmm0, -0x30(%rsp) movaps -0x20(%rsp), %xmm1 movaps -0x30(%rsp), %xmm0 movaps %xmm1, 0x950(%rsp) movaps %xmm0, 0x940(%rsp) movaps 0x950(%rsp), %xmm0 divps 0x940(%rsp), %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm0 addq $0x968, %rsp # imm = 0x968 retq
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,221
ncnn::UnaryOp_x86_functor::unary_op_tan::func(float const&) const
float func(const float& x) const { return (float)tan(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax movss (%rax), %xmm0 callq 0x163a300 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,222
ncnn::UnaryOp_x86_functor::unary_op_asin::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { //TODO sse optimize float tmp[4]; _mm_storeu_ps(tmp, x); tmp[0] = asin(tmp[0]); tmp[1] = asin(tmp[1]); tmp[2] = asin(tmp[2]); tmp[3] = asin(tmp[3]); return _mm_loadu_ps(tmp); }
subq $0x48, %rsp movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq %rsp, %rax movq 0x10(%rsp), %rcx movaps (%rcx), %xmm0 movq %rax, 0x38(%rsp) movaps %xmm0, 0x20(%rsp) movaps 0x20(%rsp), %xmm0 movq 0x38(%rsp), %rax movups %xmm0, (%rax) movss (%rsp), %xmm0 callq 0x163a350 movss %xmm0, (%rsp) movss 0x4(%rsp), %xmm0 callq 0x163a350 movss %xmm0, 0x4(%rsp) movss 0x8(%rsp), %xmm0 callq 0x163a350 movss %xmm0, 0x8(%rsp) movss 0xc(%rsp), %xmm0 callq 0x163a350 movss %xmm0, 0xc(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x40(%rsp), %rax movups (%rax), %xmm0 addq $0x48, %rsp retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,223
ncnn::UnaryOp_x86_functor::unary_op_asin::func(float const&) const
float func(const float& x) const { return (float)asin(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax movss (%rax), %xmm0 callq 0x163a350 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,224
ncnn::UnaryOp_x86_functor::unary_op_acos::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { //TODO sse optimize float tmp[4]; _mm_storeu_ps(tmp, x); tmp[0] = acos(tmp[0]); tmp[1] = acos(tmp[1]); tmp[2] = acos(tmp[2]); tmp[3] = acos(tmp[3]); return _mm_loadu_ps(tmp); }
subq $0x48, %rsp movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq %rsp, %rax movq 0x10(%rsp), %rcx movaps (%rcx), %xmm0 movq %rax, 0x38(%rsp) movaps %xmm0, 0x20(%rsp) movaps 0x20(%rsp), %xmm0 movq 0x38(%rsp), %rax movups %xmm0, (%rax) movss (%rsp), %xmm0 callq 0x163a3a0 movss %xmm0, (%rsp) movss 0x4(%rsp), %xmm0 callq 0x163a3a0 movss %xmm0, 0x4(%rsp) movss 0x8(%rsp), %xmm0 callq 0x163a3a0 movss %xmm0, 0x8(%rsp) movss 0xc(%rsp), %xmm0 callq 0x163a3a0 movss %xmm0, 0xc(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x40(%rsp), %rax movups (%rax), %xmm0 addq $0x48, %rsp retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,225
ncnn::UnaryOp_x86_functor::unary_op_acos::func(float const&) const
float func(const float& x) const { return (float)acos(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax movss (%rax), %xmm0 callq 0x163a3a0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,226
ncnn::UnaryOp_x86_functor::unary_op_atan::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { //TODO sse optimize float tmp[4]; _mm_storeu_ps(tmp, x); tmp[0] = atan(tmp[0]); tmp[1] = atan(tmp[1]); tmp[2] = atan(tmp[2]); tmp[3] = atan(tmp[3]); return _mm_loadu_ps(tmp); }
subq $0x48, %rsp movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq %rsp, %rax movq 0x10(%rsp), %rcx movaps (%rcx), %xmm0 movq %rax, 0x38(%rsp) movaps %xmm0, 0x20(%rsp) movaps 0x20(%rsp), %xmm0 movq 0x38(%rsp), %rax movups %xmm0, (%rax) movss (%rsp), %xmm0 callq 0x163a3f0 movss %xmm0, (%rsp) movss 0x4(%rsp), %xmm0 callq 0x163a3f0 movss %xmm0, 0x4(%rsp) movss 0x8(%rsp), %xmm0 callq 0x163a3f0 movss %xmm0, 0x8(%rsp) movss 0xc(%rsp), %xmm0 callq 0x163a3f0 movss %xmm0, 0xc(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x40(%rsp), %rax movups (%rax), %xmm0 addq $0x48, %rsp retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,227
ncnn::UnaryOp_x86_functor::unary_op_atan::func(float const&) const
float func(const float& x) const { return (float)atan(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax movss (%rax), %xmm0 callq 0x163a3f0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,228
ncnn::UnaryOp_x86_functor::unary_op_reciprocal::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return _mm_div_ps(*(__m128*)_ps_1, x); }
movq %rdi, -0x30(%rsp) movq %rsi, -0x38(%rsp) movaps 0x7d814f(%rip), %xmm1 # 0x1e1caf0 movq -0x38(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, -0x18(%rsp) movaps %xmm0, -0x28(%rsp) movaps -0x18(%rsp), %xmm0 divps -0x28(%rsp), %xmm0 retq nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,229
ncnn::UnaryOp_x86_functor::unary_op_reciprocal::func(float const&) const
float func(const float& x) const { return 1.f / x; }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq -0x10(%rsp), %rax movss 0x7c462d(%rip), %xmm0 # 0x1e09004 divss (%rax), %xmm0 retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,230
ncnn::UnaryOp_x86_functor::unary_op_tanh::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return tanh_sse(x); }
subq $0x6e8, %rsp # imm = 0x6E8 movq %rdi, -0x78(%rsp) movq %rsi, -0x80(%rsp) movq -0x80(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm0, -0x50(%rsp) movl $0x3f800000, -0x4(%rsp) # imm = 0x3F800000 movss -0x4(%rsp), %xmm0 shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0] movaps %xmm0, -0x20(%rsp) movaps -0x20(%rsp), %xmm0 movaps %xmm0, -0x60(%rsp) movl $0x40000000, -0x24(%rsp) # imm = 0x40000000 movss -0x24(%rsp), %xmm0 shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0] movaps %xmm0, -0x40(%rsp) movaps -0x40(%rsp), %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x50(%rsp), %xmm1 movaps -0x70(%rsp), %xmm0 movaps %xmm1, 0x50(%rsp) movaps %xmm0, 0x40(%rsp) movaps 0x50(%rsp), %xmm0 movaps 0x40(%rsp), %xmm1 mulps %xmm1, %xmm0 movaps %xmm0, 0x70(%rsp) movl $0x3f800000, 0x9c(%rsp) # imm = 0x3F800000 movss 0x9c(%rsp), %xmm0 shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0] movaps %xmm0, 0x80(%rsp) movaps 0x80(%rsp), %xmm0 movaps %xmm0, 0x60(%rsp) movaps 0x60(%rsp), %xmm2 movaps %xmm2, %xmm1 xorps %xmm0, %xmm0 movaps %xmm0, 0xc0(%rsp) movaps 0xc0(%rsp), %xmm4 movaps 0x70(%rsp), %xmm3 movaps %xmm4, 0xb0(%rsp) movaps %xmm3, 0xa0(%rsp) movaps 0xb0(%rsp), %xmm3 movaps 0xa0(%rsp), %xmm4 subps %xmm4, %xmm3 movaps %xmm3, 0x140(%rsp) movaps %xmm0, 0x170(%rsp) movaps 0x170(%rsp), %xmm0 movaps %xmm0, 0x130(%rsp) movaps 0x7c6f43(%rip), %xmm0 # 0x1e0ba40 movaps %xmm0, 0x100(%rsp) movaps 0x140(%rsp), %xmm0 movaps %xmm0, 0x260(%rsp) movaps 0x7c6f34(%rip), %xmm0 # 0x1e0ba50 movaps %xmm0, 0x250(%rsp) movaps 0x260(%rsp), %xmm0 movaps 0x250(%rsp), %xmm3 minps %xmm3, %xmm0 movaps %xmm0, 0x140(%rsp) movaps 0x140(%rsp), %xmm0 movaps %xmm0, 0x280(%rsp) movaps 0x7c6f0a(%rip), %xmm0 # 0x1e0ba60 movaps %xmm0, 0x270(%rsp) movaps 0x280(%rsp), %xmm0 movaps 0x270(%rsp), %xmm3 maxps %xmm3, %xmm0 movaps %xmm0, 0x140(%rsp) movaps 0x140(%rsp), %xmm0 movaps %xmm0, 0x240(%rsp) movaps 0x7c6ee0(%rip), %xmm0 # 0x1e0ba70 movaps %xmm0, 0x230(%rsp) movaps 0x240(%rsp), %xmm0 movaps 0x230(%rsp), %xmm3 mulps %xmm3, %xmm0 movaps %xmm0, 0x120(%rsp) movaps 0x120(%rsp), %xmm0 movaps %xmm0, 0x2c0(%rsp) movaps 0x7c6eb6(%rip), %xmm0 # 0x1e0ba80 movaps %xmm0, 0x2b0(%rsp) movaps 0x2c0(%rsp), %xmm3 movaps 0x2b0(%rsp), %xmm4 addps %xmm4, %xmm3 movaps %xmm3, 0x120(%rsp) movaps 0x120(%rsp), %xmm3 movaps %xmm3, 0x1c0(%rsp) cvttps2dq 0x1c0(%rsp), %xmm3 movaps %xmm3, 0x110(%rsp) movaps 0x110(%rsp), %xmm3 movaps %xmm3, 0x1a0(%rsp) cvtdq2ps 0x1a0(%rsp), %xmm3 movaps %xmm3, 0x130(%rsp) movaps 0x130(%rsp), %xmm4 movaps 0x120(%rsp), %xmm3 movaps %xmm4, 0x1e0(%rsp) movaps %xmm3, 0x1d0(%rsp) movaps 0x1d0(%rsp), %xmm3 movaps 0x1e0(%rsp), %xmm4 cmpltps %xmm4, %xmm3 movaps %xmm3, 0xf0(%rsp) movaps 0xf0(%rsp), %xmm4 movaps 0x100(%rsp), %xmm3 movaps %xmm4, 0x190(%rsp) movaps %xmm3, 0x180(%rsp) movaps 0x190(%rsp), %xmm3 movaps 0x180(%rsp), %xmm4 pand %xmm4, %xmm3 movaps %xmm3, 0xf0(%rsp) movaps 0x130(%rsp), %xmm4 movaps 0xf0(%rsp), %xmm3 movaps %xmm4, 0x160(%rsp) movaps %xmm3, 0x150(%rsp) movaps 0x160(%rsp), %xmm3 movaps 0x150(%rsp), %xmm4 subps %xmm4, %xmm3 movaps %xmm3, 0x120(%rsp) movaps 0x120(%rsp), %xmm4 movaps 0x140(%rsp), %xmm3 movaps %xmm4, 0x380(%rsp) movaps 0x7c6d90(%rip), %xmm4 # 0x1e0ba90 movaps %xmm4, 0x370(%rsp) movaps %xmm3, 0x360(%rsp) movaps 0x360(%rsp), %xmm4 movaps 0x380(%rsp), %xmm5 movaps 0x370(%rsp), %xmm3 movaps %xmm5, 0x3c0(%rsp) movaps %xmm3, 0x3b0(%rsp) movaps 0x3c0(%rsp), %xmm3 movaps 0x3b0(%rsp), %xmm5 mulps %xmm5, %xmm3 movaps %xmm4, 0x3a0(%rsp) movaps %xmm3, 0x390(%rsp) movaps 0x3a0(%rsp), %xmm3 movaps 0x390(%rsp), %xmm4 subps %xmm4, %xmm3 movaps %xmm3, 0x140(%rsp) movaps 0x120(%rsp), %xmm4 movaps 0x140(%rsp), %xmm3 movaps %xmm4, 0x310(%rsp) movaps 0x7c6d0b(%rip), %xmm4 # 0x1e0baa0 movaps %xmm4, 0x300(%rsp) movaps %xmm3, 0x2f0(%rsp) movaps 0x2f0(%rsp), %xmm4 movaps 0x310(%rsp), %xmm5 movaps 0x300(%rsp), %xmm3 movaps %xmm5, 0x350(%rsp) movaps %xmm3, 0x340(%rsp) movaps 0x350(%rsp), %xmm3 movaps 0x340(%rsp), %xmm5 mulps %xmm5, %xmm3 movaps %xmm4, 0x330(%rsp) movaps %xmm3, 0x320(%rsp) movaps 0x330(%rsp), %xmm3 movaps 0x320(%rsp), %xmm4 subps %xmm4, %xmm3 movaps %xmm3, 0x140(%rsp) movaps 0x140(%rsp), %xmm3 movaps %xmm3, 0x220(%rsp) movaps %xmm3, 0x210(%rsp) movaps 0x220(%rsp), %xmm3 movaps 0x210(%rsp), %xmm4 mulps %xmm4, %xmm3 movaps %xmm3, 0x130(%rsp) movaps 0x7c6c6b(%rip), %xmm3 # 0x1e0bab0 movaps %xmm3, 0xe0(%rsp) movaps 0xe0(%rsp), %xmm4 movaps 0x140(%rsp), %xmm3 movaps %xmm4, 0x620(%rsp) movaps %xmm3, 0x610(%rsp) movaps 0x7c6c4c(%rip), %xmm3 # 0x1e0bac0 movaps %xmm3, 0x600(%rsp) movaps 0x620(%rsp), %xmm4 movaps 0x610(%rsp), %xmm3 movaps %xmm4, 0x640(%rsp) movaps %xmm3, 0x630(%rsp) movaps 0x640(%rsp), %xmm4 movaps 0x630(%rsp), %xmm3 mulps %xmm3, %xmm4 movaps 0x600(%rsp), %xmm3 movaps %xmm4, 0x660(%rsp) movaps %xmm3, 0x650(%rsp) movaps 0x660(%rsp), %xmm3 movaps 0x650(%rsp), %xmm4 addps %xmm4, %xmm3 movaps %xmm3, 0xe0(%rsp) movaps 0xe0(%rsp), %xmm4 movaps 0x140(%rsp), %xmm3 movaps %xmm4, 0x5b0(%rsp) movaps %xmm3, 0x5a0(%rsp) movaps 0x7c6bc7(%rip), %xmm3 # 0x1e0bad0 movaps %xmm3, 0x590(%rsp) movaps 0x5b0(%rsp), %xmm4 movaps 0x5a0(%rsp), %xmm3 movaps %xmm4, 0x5d0(%rsp) movaps %xmm3, 0x5c0(%rsp) movaps 0x5d0(%rsp), %xmm4 movaps 0x5c0(%rsp), %xmm3 mulps %xmm3, %xmm4 movaps 0x590(%rsp), %xmm3 movaps %xmm4, 0x5f0(%rsp) movaps %xmm3, 0x5e0(%rsp) movaps 0x5f0(%rsp), %xmm3 movaps 0x5e0(%rsp), %xmm4 addps %xmm4, %xmm3 movaps %xmm3, 0xe0(%rsp) movaps 0xe0(%rsp), %xmm4 movaps 0x140(%rsp), %xmm3 movaps %xmm4, 0x540(%rsp) movaps %xmm3, 0x530(%rsp) movaps 0x7c6b42(%rip), %xmm3 # 0x1e0bae0 movaps %xmm3, 0x520(%rsp) movaps 0x540(%rsp), %xmm4 movaps 0x530(%rsp), %xmm3 movaps %xmm4, 0x560(%rsp) movaps %xmm3, 0x550(%rsp) movaps 0x560(%rsp), %xmm4 movaps 0x550(%rsp), %xmm3 mulps %xmm3, %xmm4 movaps 0x520(%rsp), %xmm3 movaps %xmm4, 0x580(%rsp) movaps %xmm3, 0x570(%rsp) movaps 0x580(%rsp), %xmm3 movaps 0x570(%rsp), %xmm4 addps %xmm4, %xmm3 movaps %xmm3, 0xe0(%rsp) movaps 0xe0(%rsp), %xmm4 movaps 0x140(%rsp), %xmm3 movaps %xmm4, 0x4d0(%rsp) movaps %xmm3, 0x4c0(%rsp) movaps 0x7c6abd(%rip), %xmm3 # 0x1e0baf0 movaps %xmm3, 0x4b0(%rsp) movaps 0x4d0(%rsp), %xmm4 movaps 0x4c0(%rsp), %xmm3 movaps %xmm4, 0x4f0(%rsp) movaps %xmm3, 0x4e0(%rsp) movaps 0x4f0(%rsp), %xmm4 movaps 0x4e0(%rsp), %xmm3 mulps %xmm3, %xmm4 movaps 0x4b0(%rsp), %xmm3 movaps %xmm4, 0x510(%rsp) movaps %xmm3, 0x500(%rsp) movaps 0x510(%rsp), %xmm3 movaps 0x500(%rsp), %xmm4 addps %xmm4, %xmm3 movaps %xmm3, 0xe0(%rsp) movaps 0xe0(%rsp), %xmm4 movaps 0x140(%rsp), %xmm3 movaps %xmm4, 0x460(%rsp) movaps %xmm3, 0x450(%rsp) movaps %xmm0, 0x440(%rsp) movaps 0x460(%rsp), %xmm3 movaps 0x450(%rsp), %xmm0 movaps %xmm3, 0x480(%rsp) movaps %xmm0, 0x470(%rsp) movaps 0x480(%rsp), %xmm3 movaps 0x470(%rsp), %xmm0 mulps %xmm0, %xmm3 movaps 0x440(%rsp), %xmm0 movaps %xmm3, 0x4a0(%rsp) movaps %xmm0, 0x490(%rsp) movaps 0x4a0(%rsp), %xmm0 movaps 0x490(%rsp), %xmm3 addps %xmm3, %xmm0 movaps %xmm0, 0xe0(%rsp) movaps 0xe0(%rsp), %xmm4 movaps 0x130(%rsp), %xmm3 movaps 0x140(%rsp), %xmm0 movaps %xmm4, 0x3f0(%rsp) movaps %xmm3, 0x3e0(%rsp) movaps %xmm0, 0x3d0(%rsp) movaps 0x3f0(%rsp), %xmm3 movaps 0x3e0(%rsp), %xmm0 movaps %xmm3, 0x410(%rsp) movaps %xmm0, 0x400(%rsp) movaps 0x410(%rsp), %xmm3 movaps 0x400(%rsp), %xmm0 mulps %xmm0, %xmm3 movaps 0x3d0(%rsp), %xmm0 movaps %xmm3, 0x430(%rsp) movaps %xmm0, 0x420(%rsp) movaps 0x430(%rsp), %xmm0 movaps 0x420(%rsp), %xmm3 addps %xmm3, %xmm0 movaps %xmm0, 0xe0(%rsp) movaps 0xe0(%rsp), %xmm3 movaps 0x100(%rsp), %xmm0 movaps %xmm3, 0x2a0(%rsp) movaps %xmm0, 0x290(%rsp) movaps 0x2a0(%rsp), %xmm0 movaps 0x290(%rsp), %xmm3 addps %xmm3, %xmm0 movaps %xmm0, 0xe0(%rsp) movaps 0x120(%rsp), %xmm0 movaps %xmm0, 0x1b0(%rsp) cvttps2dq 0x1b0(%rsp), %xmm0 movaps %xmm0, 0x110(%rsp) movaps 0x110(%rsp), %xmm0 movaps %xmm0, 0x680(%rsp) movaps 0x7c68c8(%rip), %xmm0 # 0x1e0bb00 movaps %xmm0, 0x670(%rsp) movdqa 0x680(%rsp), %xmm0 movdqa 0x670(%rsp), %xmm3 paddd %xmm3, %xmm0 movdqa %xmm0, 0x110(%rsp) movdqa 0x110(%rsp), %xmm0 movdqa %xmm0, 0x6a0(%rsp) movl $0x17, 0x69c(%rsp) movdqa 0x6a0(%rsp), %xmm0 movl 0x69c(%rsp), %eax movd %eax, %xmm3 pslld %xmm3, %xmm0 movdqa %xmm0, 0x110(%rsp) movdqa 0x110(%rsp), %xmm0 movdqa %xmm0, 0x6b0(%rsp) movdqa 0x6b0(%rsp), %xmm0 movaps %xmm0, 0xd0(%rsp) movaps 0xe0(%rsp), %xmm3 movaps 0xd0(%rsp), %xmm0 movaps %xmm3, 0x200(%rsp) movaps %xmm0, 0x1f0(%rsp) movaps 0x200(%rsp), %xmm0 mulps 0x1f0(%rsp), %xmm0 movaps %xmm0, 0xe0(%rsp) movaps 0xe0(%rsp), %xmm0 movaps %xmm2, 0x2e0(%rsp) movaps %xmm0, 0x2d0(%rsp) movaps 0x2e0(%rsp), %xmm0 addps 0x2d0(%rsp), %xmm0 movaps %xmm1, 0x6d0(%rsp) movaps %xmm0, 0x6c0(%rsp) movaps 0x6d0(%rsp), %xmm1 divps 0x6c0(%rsp), %xmm1 movaps -0x70(%rsp), %xmm0 movaps %xmm1, 0x30(%rsp) movaps %xmm0, 0x20(%rsp) movaps 0x30(%rsp), %xmm1 mulps 0x20(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x10(%rsp) movaps %xmm0, (%rsp) movaps 0x10(%rsp), %xmm0 subps (%rsp), %xmm0 addq $0x6e8, %rsp # imm = 0x6E8 retq nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,231
ncnn::UnaryOp_x86_functor::unary_op_tanh::func(float const&) const
float func(const float& x) const { return (float)tanh(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax movss (%rax), %xmm0 callq 0x140cc0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
2,113,232
ncnn::UnaryOp_x86_avx512::UnaryOp_x86_avx512()
UnaryOp_x86_avx512::UnaryOp_x86_avx512() { #if __SSE2__ support_packing = true; #endif // __SSE2__ }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq -0x8(%rsp), %rax movq -0x10(%rsp), %rcx movq (%rcx), %rdx movq %rdx, (%rax) movq 0x8(%rcx), %rdx movq (%rax), %rcx movq -0x18(%rcx), %rcx movq %rdx, (%rax,%rcx) movq (%rax), %rcx movq -0x18(%rcx), %rcx movb $0x1, 0xb(%rax,%rcx) retq nopw %cs:(%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,233
ncnn::UnaryOp_x86_avx512::UnaryOp_x86_avx512()
UnaryOp_x86_avx512::UnaryOp_x86_avx512() { #if __SSE2__ support_packing = true; #endif // __SSE2__ }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq 0x10(%rsp), %rdi movq %rdi, 0x8(%rsp) addq $0x8, %rdi callq 0x1639190 movq 0x8(%rsp), %rax leaq 0x88c760(%rip), %rcx # 0x1ed1b78 addq $0x18, %rcx movq %rcx, (%rax) leaq 0x88c752(%rip), %rcx # 0x1ed1b78 addq $0x88, %rcx movq %rcx, 0x8(%rax) movq (%rax), %rcx movq -0x18(%rcx), %rcx movb $0x1, 0xb(%rax,%rcx) addq $0x18, %rsp retq nopw %cs:(%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,234
ncnn::UnaryOp_x86_avx512::forward_inplace(ncnn::Mat&, ncnn::Option const&) const
int UnaryOp_x86_avx512::forward_inplace(Mat& bottom_top_blob, const Option& opt) const { using namespace UnaryOp_x86_avx512_functor; if (op_type == Operation_ABS) return unary_op_inplace<unary_op_abs>(bottom_top_blob, opt); if (op_type == Operation_NEG) return unary_op_inplace<unary_op_neg>(bottom_top_blob, opt); if (op_type == Operation_FLOOR) return unary_op_inplace<unary_op_floor>(bottom_top_blob, opt); if (op_type == Operation_CEIL) return unary_op_inplace<unary_op_ceil>(bottom_top_blob, opt); if (op_type == Operation_SQUARE) return unary_op_inplace<unary_op_square>(bottom_top_blob, opt); if (op_type == Operation_SQRT) return unary_op_inplace<unary_op_sqrt>(bottom_top_blob, opt); if (op_type == Operation_RSQRT) return unary_op_inplace<unary_op_rsqrt>(bottom_top_blob, opt); if (op_type == Operation_EXP) return unary_op_inplace<unary_op_exp>(bottom_top_blob, opt); if (op_type == Operation_LOG) return unary_op_inplace<unary_op_log>(bottom_top_blob, opt); if (op_type == Operation_SIN) return unary_op_inplace<unary_op_sin>(bottom_top_blob, opt); if (op_type == Operation_COS) return unary_op_inplace<unary_op_cos>(bottom_top_blob, opt); if (op_type == Operation_TAN) return unary_op_inplace<unary_op_tan>(bottom_top_blob, opt); if (op_type == Operation_ASIN) return unary_op_inplace<unary_op_asin>(bottom_top_blob, opt); if (op_type == Operation_ACOS) return unary_op_inplace<unary_op_acos>(bottom_top_blob, opt); if (op_type == Operation_ATAN) return unary_op_inplace<unary_op_atan>(bottom_top_blob, opt); if (op_type == Operation_RECIPROCAL) return unary_op_inplace<unary_op_reciprocal>(bottom_top_blob, opt); if (op_type == Operation_TANH) return unary_op_inplace<unary_op_tanh>(bottom_top_blob, opt); return 0; }
subq $0x28, %rsp movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq %rdx, 0x8(%rsp) movq 0x18(%rsp), %rax movq %rax, (%rsp) movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x0, 0xd0(%rax,%rcx) jne 0x1645495 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x1645770 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x1, 0xd0(%rax,%rcx) jne 0x16454c2 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x1645f00 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x2, 0xd0(%rax,%rcx) jne 0x16454ef movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x1646690 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x3, 0xd0(%rax,%rcx) jne 0x164551c movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x1646e20 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x4, 0xd0(%rax,%rcx) jne 0x1645549 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x16475b0 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x5, 0xd0(%rax,%rcx) jne 0x1645576 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x1647d40 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x6, 0xd0(%rax,%rcx) jne 0x16455a3 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x16484d0 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x7, 0xd0(%rax,%rcx) jne 0x16455d0 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x1648c60 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x8, 0xd0(%rax,%rcx) jne 0x16455fd movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x16493f0 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x9, 0xd0(%rax,%rcx) jne 0x164562a movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x1649b80 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0xa, 0xd0(%rax,%rcx) jne 0x1645657 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x164a310 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0xb, 0xd0(%rax,%rcx) jne 0x1645684 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x164aaa0 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0xc, 0xd0(%rax,%rcx) jne 0x16456b1 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x164b230 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0xd, 0xd0(%rax,%rcx) jne 0x16456de movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x164b9c0 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0xe, 0xd0(%rax,%rcx) jne 0x1645708 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x164c150 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0xf, 0xd0(%rax,%rcx) jne 0x1645732 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x164c8e0 movl %eax, 0x24(%rsp) jmp 0x1645764 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x10, 0xd0(%rax,%rcx) jne 0x164575c movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x164d070 movl %eax, 0x24(%rsp) jmp 0x1645764 movl $0x0, 0x24(%rsp) movl 0x24(%rsp), %eax addq $0x28, %rsp retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,235
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_abs>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x340, %rsp # imm = 0x340 movq %rdi, 0x178(%rsp) movq %rsi, 0x170(%rsp) movq 0x178(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x168(%rsp) movq 0x178(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x164(%rsp) movq 0x178(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x160(%rsp) movq 0x178(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x15c(%rsp) movq 0x178(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x158(%rsp) movl 0x168(%rsp), %eax imull 0x164(%rsp), %eax imull 0x160(%rsp), %eax imull 0x158(%rsp), %eax movl %eax, 0x154(%rsp) movl $0x0, 0x150(%rsp) movl 0x150(%rsp), %eax cmpl 0x15c(%rsp), %eax jge 0x1645ef0 movq 0x178(%rsp), %rcx movl 0x150(%rsp), %eax leaq 0x100(%rsp), %rdx movq %rdx, 0x190(%rsp) movq %rcx, 0x188(%rsp) movl %eax, 0x184(%rsp) movq 0x188(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x183(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x184(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0x100(%rsp), %r10 movq %r10, 0x2d0(%rsp) movl %r9d, 0x2cc(%rsp) movl %r8d, 0x2c8(%rsp) movl %edi, 0x2c4(%rsp) movq %rsi, 0x2b8(%rsp) movq %rdx, 0x2b0(%rsp) movl %ecx, 0x2ac(%rsp) movq %rax, 0x2a0(%rsp) movq 0x2d0(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x2b8(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x2b0(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x2ac(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x2a0(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x2cc(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x2c8(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x2c4(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x2e0(%rsp) movl $0x10, 0x2dc(%rsp) movq 0x2e0(%rsp), %rax movslq 0x2dc(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x2dc(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0x128(%rsp) cmpl $0x4, 0x28(%rax) jne 0x16459de movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x140(%rsp) movb $0x1, 0x183(%rsp) testb $0x1, 0x183(%rsp) jne 0x1645b0d leaq 0x100(%rsp), %rax movq %rax, 0x1a0(%rsp) movq 0x1a0(%rsp), %rax movq %rax, 0x310(%rsp) movq 0x310(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x1645ab3 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x30c(%rsp) # imm = 0xFFFFFFFF movl 0x30c(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x308(%rsp) cmpl $0x1, 0x308(%rsp) jne 0x1645ab3 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x1645a84 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x1645a82 jmp 0x1645ab1 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x318(%rsp) cmpq $0x0, 0x318(%rsp) je 0x1645aaf movq 0x318(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x1645ab1 jmp 0x1645ab3 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x1645b0b movq %rax, %rdi callq 0x678a0 jmp 0x1645b0d leaq 0x100(%rsp), %rax movq %rax, 0x198(%rsp) movq 0x198(%rsp), %rax movq (%rax), %rax movq %rax, 0x30(%rsp) leaq 0x100(%rsp), %rax movq %rax, 0x1a8(%rsp) movq 0x1a8(%rsp), %rax movq %rax, 0x300(%rsp) movq 0x300(%rsp), %rax movq %rax, 0x28(%rsp) cmpq $0x0, 0x8(%rax) je 0x1645bec movq 0x28(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x2fc(%rsp) # imm = 0xFFFFFFFF movl 0x2fc(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x2f8(%rsp) cmpl $0x1, 0x2f8(%rsp) jne 0x1645bec movq 0x28(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x1645bbd movq 0x28(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x1645bbb jmp 0x1645bea movq 0x28(%rsp), %rax movq (%rax), %rax movq %rax, 0x320(%rsp) cmpq $0x0, 0x320(%rsp) je 0x1645be8 movq 0x320(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x1645bea jmp 0x1645bec movq 0x28(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x1645c44 movq %rax, %rdi callq 0x678a0 movq 0x30(%rsp), %rax movq %rax, 0x148(%rsp) movl $0x0, 0xf0(%rsp) movl 0xf0(%rsp), %eax addl $0xf, %eax cmpl 0x154(%rsp), %eax jge 0x1645d16 movq 0x148(%rsp), %rax movq %rax, 0x1b8(%rsp) movq 0x1b8(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x16f(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x164d830 vmovaps %zmm0, 0x80(%rsp) movq 0x148(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x230(%rsp) vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm0 movq 0x230(%rsp), %rax vmovups %zmm0, (%rax) movq 0x148(%rsp), %rax addq $0x40, %rax movq %rax, 0x148(%rsp) movl 0xf0(%rsp), %eax addl $0x10, %eax movl %eax, 0xf0(%rsp) jmp 0x1645c5c jmp 0x1645d18 movl 0xf0(%rsp), %eax addl $0x7, %eax cmpl 0x154(%rsp), %eax jge 0x1645dc7 movq 0x148(%rsp), %rax movq %rax, 0x238(%rsp) movq 0x238(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x16f(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x164d8e0 vmovaps %ymm0, 0x60(%rsp) movq 0x148(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x270(%rsp) vmovaps %ymm0, 0x240(%rsp) vmovaps 0x240(%rsp), %ymm0 movq 0x270(%rsp), %rax vmovups %ymm0, (%rax) movq 0x148(%rsp), %rax addq $0x20, %rax movq %rax, 0x148(%rsp) movl 0xf0(%rsp), %eax addl $0x8, %eax movl %eax, 0xf0(%rsp) jmp 0x1645d18 jmp 0x1645dc9 movl 0xf0(%rsp), %eax addl $0x3, %eax cmpl 0x154(%rsp), %eax jge 0x1645e7b movq 0x148(%rsp), %rax movq %rax, 0x278(%rsp) movq 0x278(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x16f(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x164d970 vmovaps %xmm0, 0x50(%rsp) movq 0x148(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x298(%rsp) vmovaps %xmm0, 0x280(%rsp) vmovaps 0x280(%rsp), %xmm0 movq 0x298(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x148(%rsp), %rax addq $0x10, %rax movq %rax, 0x148(%rsp) movl 0xf0(%rsp), %eax addl $0x4, %eax movl %eax, 0xf0(%rsp) jmp 0x1645dc9 jmp 0x1645e7d movl 0xf0(%rsp), %eax cmpl 0x154(%rsp), %eax jge 0x1645ed8 movq 0x148(%rsp), %rsi leaq 0x16f(%rsp), %rdi vzeroupper callq 0x164d9f0 movq 0x148(%rsp), %rax vmovss %xmm0, (%rax) movq 0x148(%rsp), %rax addq $0x4, %rax movq %rax, 0x148(%rsp) movl 0xf0(%rsp), %eax addl $0x1, %eax movl %eax, 0xf0(%rsp) jmp 0x1645e7d jmp 0x1645eda movl 0x150(%rsp), %eax addl $0x1, %eax movl %eax, 0x150(%rsp) jmp 0x164581a xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,236
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_neg>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x1646680 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x164616e movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x164629d leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x1646243 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x1646243 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x1646214 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x1646212 jmp 0x1646241 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x164623f movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x1646241 jmp 0x1646243 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164629b movq %rax, %rdi callq 0x678a0 jmp 0x164629d leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x164637c movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x164637c movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164634d movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164634b jmp 0x164637a movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x1646378 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164637a jmp 0x164637c movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x16463d4 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x16464a6 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x164da20 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x16463ec jmp 0x16464a8 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x1646557 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x164da80 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x16464a8 jmp 0x1646559 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x164660b movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x164dad0 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x1646559 jmp 0x164660d movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x1646668 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x164db10 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x164660d jmp 0x164666a movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x1645faa xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,237
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_floor>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x1646e10 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x16468fe movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x1646a2d leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x16469d3 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x16469d3 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x16469a4 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x16469a2 jmp 0x16469d1 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x16469cf movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x16469d1 jmp 0x16469d3 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x1646a2b movq %rax, %rdi callq 0x678a0 jmp 0x1646a2d leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x1646b0c movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x1646b0c movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x1646add movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x1646adb jmp 0x1646b0a movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x1646b08 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x1646b0a jmp 0x1646b0c movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x1646b64 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x1646c36 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x164db40 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x1646b7c jmp 0x1646c38 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x1646ce7 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x164db60 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x1646c38 jmp 0x1646ce9 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x1646d9b movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x164db80 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x1646ce9 jmp 0x1646d9d movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x1646df8 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x164dbb0 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x1646d9d jmp 0x1646dfa movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x164673a xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,238
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_ceil>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x16475a0 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x164708e movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x16471bd leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x1647163 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x1647163 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x1647134 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x1647132 jmp 0x1647161 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x164715f movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x1647161 jmp 0x1647163 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x16471bb movq %rax, %rdi callq 0x678a0 jmp 0x16471bd leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x164729c movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x164729c movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164726d movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164726b jmp 0x164729a movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x1647298 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164729a jmp 0x164729c movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x16472f4 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x16473c6 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x164dbe0 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x164730c jmp 0x16473c8 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x1647477 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x164dc00 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x16473c8 jmp 0x1647479 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x164752b movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x164dc20 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x1647479 jmp 0x164752d movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x1647588 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x164dc50 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x164752d jmp 0x164758a movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x1646eca xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,239
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_square>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x1647d30 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x164781e movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x164794d leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x16478f3 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x16478f3 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x16478c4 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x16478c2 jmp 0x16478f1 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x16478ef movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x16478f1 jmp 0x16478f3 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164794b movq %rax, %rdi callq 0x678a0 jmp 0x164794d leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x1647a2c movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x1647a2c movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x16479fd movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x16479fb jmp 0x1647a2a movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x1647a28 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x1647a2a jmp 0x1647a2c movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x1647a84 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x1647b56 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x164dc80 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x1647a9c jmp 0x1647b58 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x1647c07 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x164dce0 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x1647b58 jmp 0x1647c09 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x1647cbb movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x164dd30 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x1647c09 jmp 0x1647cbd movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x1647d18 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x164dd70 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x1647cbd jmp 0x1647d1a movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x164765a xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,240
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_sqrt>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x16484c0 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x1647fae movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x16480dd leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x1648083 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x1648083 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x1648054 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x1648052 jmp 0x1648081 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x164807f movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x1648081 jmp 0x1648083 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x16480db movq %rax, %rdi callq 0x678a0 jmp 0x16480dd leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x16481bc movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x16481bc movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164818d movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164818b jmp 0x16481ba movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x16481b8 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x16481ba jmp 0x16481bc movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x1648214 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x16482e6 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x164dd90 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x164822c jmp 0x16482e8 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x1648397 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x164de20 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x16482e8 jmp 0x1648399 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x164844b movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x164dea0 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x1648399 jmp 0x164844d movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x16484a8 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x164df10 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x164844d jmp 0x16484aa movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x1647dea xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,241
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_rsqrt>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x1648c50 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x164873e movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x164886d leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x1648813 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x1648813 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x16487e4 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x16487e2 jmp 0x1648811 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x164880f movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x1648811 jmp 0x1648813 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164886b movq %rax, %rdi callq 0x678a0 jmp 0x164886d leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x164894c movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x164894c movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164891d movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164891b jmp 0x164894a movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x1648948 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164894a jmp 0x164894c movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x16489a4 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x1648a76 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x164df40 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x16489bc jmp 0x1648a78 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x1648b27 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x164dfe0 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x1648a78 jmp 0x1648b29 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x1648bdb movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x164e020 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x1648b29 jmp 0x1648bdd movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x1648c38 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x164e050 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x1648bdd jmp 0x1648c3a movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x164857a xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,242
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_exp>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x16493e0 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x1648ece movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x1648ffd leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x1648fa3 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x1648fa3 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x1648f74 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x1648f72 jmp 0x1648fa1 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x1648f9f movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x1648fa1 jmp 0x1648fa3 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x1648ffb movq %rax, %rdi callq 0x678a0 jmp 0x1648ffd leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x16490dc movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x16490dc movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x16490ad movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x16490ab jmp 0x16490da movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x16490d8 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x16490da jmp 0x16490dc movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x1649134 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x1649206 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x164e090 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x164914c jmp 0x1649208 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x16492b7 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x164e6f0 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x1649208 jmp 0x16492b9 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x164936b movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x164ef70 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x16492b9 jmp 0x164936d movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x16493c8 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x164f730 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x164936d jmp 0x16493ca movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x1648d0a xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,243
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_log>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x1649b70 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x164965e movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x164978d leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x1649733 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x1649733 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x1649704 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x1649702 jmp 0x1649731 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x164972f movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x1649731 jmp 0x1649733 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164978b movq %rax, %rdi callq 0x678a0 jmp 0x164978d leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x164986c movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x164986c movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164983d movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164983b jmp 0x164986a movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x1649868 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164986a jmp 0x164986c movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x16498c4 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x1649996 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x164f760 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x16498dc jmp 0x1649998 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x1649a47 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x1650030 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x1649998 jmp 0x1649a49 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x1649afb movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x1650b60 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x1649a49 jmp 0x1649afd movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x1649b58 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x16515d0 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x1649afd jmp 0x1649b5a movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x164949a xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,244
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_sin>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x164a300 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x1649dee movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x1649f1d leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x1649ec3 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x1649ec3 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x1649e94 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x1649e92 jmp 0x1649ec1 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x1649ebf movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x1649ec1 jmp 0x1649ec3 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x1649f1b movq %rax, %rdi callq 0x678a0 jmp 0x1649f1d leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x1649ffc movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x1649ffc movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x1649fcd movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x1649fcb jmp 0x1649ffa movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x1649ff8 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x1649ffa jmp 0x1649ffc movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164a054 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x164a126 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x1651600 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x164a06c jmp 0x164a128 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x164a1d7 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x1651f20 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x164a128 jmp 0x164a1d9 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x164a28b movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x1652aa0 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x164a1d9 jmp 0x164a28d movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x164a2e8 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x1653500 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x164a28d jmp 0x164a2ea movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x1649c2a xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,245
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_cos>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x164aa90 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x164a57e movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x164a6ad leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x164a653 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x164a653 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164a624 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164a622 jmp 0x164a651 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x164a64f movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164a651 jmp 0x164a653 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164a6ab movq %rax, %rdi callq 0x678a0 jmp 0x164a6ad leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x164a78c movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x164a78c movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164a75d movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164a75b jmp 0x164a78a movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x164a788 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164a78a jmp 0x164a78c movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164a7e4 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x164a8b6 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x1653530 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x164a7fc jmp 0x164a8b8 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x164a967 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x1653e70 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x164a8b8 jmp 0x164a969 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x164aa1b movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x1654980 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x164a969 jmp 0x164aa1d movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x164aa78 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x1655390 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x164aa1d jmp 0x164aa7a movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x164a3ba xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,246
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_tan>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x164b220 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x164ad0e movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x164ae3d leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x164ade3 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x164ade3 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164adb4 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164adb2 jmp 0x164ade1 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x164addf movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164ade1 jmp 0x164ade3 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164ae3b movq %rax, %rdi callq 0x678a0 jmp 0x164ae3d leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x164af1c movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x164af1c movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164aeed movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164aeeb jmp 0x164af1a movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x164af18 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164af1a jmp 0x164af1c movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164af74 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x164b046 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x16553c0 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x164af8c jmp 0x164b048 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x164b0f7 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x1656010 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x164b048 jmp 0x164b0f9 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x164b1ab movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x1657010 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x164b0f9 jmp 0x164b1ad movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x164b208 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x1657eb0 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x164b1ad jmp 0x164b20a movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x164ab4a xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,247
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_asin>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x164b9b0 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x164b49e movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x164b5cd leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x164b573 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x164b573 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164b544 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164b542 jmp 0x164b571 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x164b56f movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164b571 jmp 0x164b573 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164b5cb movq %rax, %rdi callq 0x678a0 jmp 0x164b5cd leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x164b6ac movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x164b6ac movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164b67d movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164b67b jmp 0x164b6aa movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x164b6a8 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164b6aa jmp 0x164b6ac movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164b704 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x164b7d6 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x1657ee0 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x164b71c jmp 0x164b7d8 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x164b887 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x1657f90 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x164b7d8 jmp 0x164b889 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x164b93b movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x1658070 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x164b889 jmp 0x164b93d movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x164b998 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x1658100 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x164b93d jmp 0x164b99a movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x164b2da xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,248
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_acos>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x164c140 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x164bc2e movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x164bd5d leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x164bd03 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x164bd03 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164bcd4 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164bcd2 jmp 0x164bd01 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x164bcff movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164bd01 jmp 0x164bd03 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164bd5b movq %rax, %rdi callq 0x678a0 jmp 0x164bd5d leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x164be3c movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x164be3c movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164be0d movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164be0b jmp 0x164be3a movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x164be38 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164be3a jmp 0x164be3c movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164be94 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x164bf66 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x1658130 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x164beac jmp 0x164bf68 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x164c017 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x16581e0 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x164bf68 jmp 0x164c019 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x164c0cb movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x16582c0 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x164c019 jmp 0x164c0cd movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x164c128 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x1658350 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x164c0cd jmp 0x164c12a movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x164ba6a xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,249
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_atan>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x164c8d0 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x164c3be movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x164c4ed leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x164c493 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x164c493 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164c464 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164c462 jmp 0x164c491 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x164c48f movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164c491 jmp 0x164c493 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164c4eb movq %rax, %rdi callq 0x678a0 jmp 0x164c4ed leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x164c5cc movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x164c5cc movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164c59d movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164c59b jmp 0x164c5ca movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x164c5c8 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164c5ca jmp 0x164c5cc movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164c624 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x164c6f6 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x1658380 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x164c63c jmp 0x164c6f8 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x164c7a7 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x1658430 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x164c6f8 jmp 0x164c7a9 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x164c85b movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x1658510 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x164c7a9 jmp 0x164c85d movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x164c8b8 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x16585a0 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x164c85d jmp 0x164c8ba movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x164c1fa xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,250
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_reciprocal>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x164d060 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x164cb4e movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x164cc7d leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x164cc23 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x164cc23 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164cbf4 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164cbf2 jmp 0x164cc21 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x164cc1f movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164cc21 jmp 0x164cc23 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164cc7b movq %rax, %rdi callq 0x678a0 jmp 0x164cc7d leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x164cd5c movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x164cd5c movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164cd2d movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164cd2b jmp 0x164cd5a movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x164cd58 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164cd5a jmp 0x164cd5c movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164cdb4 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x164ce86 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x16585d0 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x164cdcc jmp 0x164ce88 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x164cf37 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x1658630 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x164ce88 jmp 0x164cf39 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x164cfeb movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x1658680 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x164cf39 jmp 0x164cfed movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x164d048 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x16586c0 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x164cfed jmp 0x164d04a movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x164c98a xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,251
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_avx512_functor::unary_op_tanh>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x2c0, %rsp # imm = 0x2C0 movq %rdi, 0x140(%rsp) movq %rsi, 0x138(%rsp) movq 0x140(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0x130(%rsp) movq 0x140(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x12c(%rsp) movq 0x140(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0x128(%rsp) movq 0x140(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x124(%rsp) movq 0x140(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x120(%rsp) movl 0x130(%rsp), %eax imull 0x12c(%rsp), %eax imull 0x128(%rsp), %eax imull 0x120(%rsp), %eax movl %eax, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movl 0x118(%rsp), %eax cmpl 0x124(%rsp), %eax jge 0x164d7f0 movq 0x140(%rsp), %rcx movl 0x118(%rsp), %eax leaq 0xc8(%rsp), %rdx movq %rdx, 0x158(%rsp) movq %rcx, 0x150(%rsp) movl %eax, 0x14c(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x48(%rsp) movb $0x0, 0x14b(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x14c(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0xc8(%rsp), %r10 movq %r10, 0x268(%rsp) movl %r9d, 0x264(%rsp) movl %r8d, 0x260(%rsp) movl %edi, 0x25c(%rsp) movq %rsi, 0x250(%rsp) movq %rdx, 0x248(%rsp) movl %ecx, 0x244(%rsp) movq %rax, 0x238(%rsp) movq 0x268(%rsp), %rcx movq %rcx, 0x40(%rsp) movq 0x250(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x248(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x244(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x238(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x264(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x260(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x25c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x278(%rsp) movl $0x10, 0x274(%rsp) movq 0x278(%rsp), %rax movslq 0x274(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x274(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x48(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xf0(%rsp) cmpl $0x4, 0x28(%rax) jne 0x164d2de movq 0x48(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0x108(%rsp) movb $0x1, 0x14b(%rsp) testb $0x1, 0x14b(%rsp) jne 0x164d40d leaq 0xc8(%rsp), %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq %rax, 0x298(%rsp) movq 0x298(%rsp), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x8(%rax) je 0x164d3b3 movq 0x38(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x294(%rsp) # imm = 0xFFFFFFFF movl 0x294(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x290(%rsp) cmpl $0x1, 0x290(%rsp) jne 0x164d3b3 movq 0x38(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164d384 movq 0x38(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164d382 jmp 0x164d3b1 movq 0x38(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a0(%rsp) cmpq $0x0, 0x2a0(%rsp) je 0x164d3af movq 0x2a0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164d3b1 jmp 0x164d3b3 movq 0x38(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164d40b movq %rax, %rdi callq 0x678a0 jmp 0x164d40d leaq 0xc8(%rsp), %rax movq %rax, 0x160(%rsp) movq 0x160(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) leaq 0xc8(%rsp), %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movq %rax, 0x288(%rsp) movq 0x288(%rsp), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x8(%rax) je 0x164d4ec movq 0x30(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x284(%rsp) # imm = 0xFFFFFFFF movl 0x284(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x280(%rsp) cmpl $0x1, 0x280(%rsp) jne 0x164d4ec movq 0x30(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x164d4bd movq 0x30(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x164d4bb jmp 0x164d4ea movq 0x30(%rsp), %rax movq (%rax), %rax movq %rax, 0x2a8(%rsp) cmpq $0x0, 0x2a8(%rsp) je 0x164d4e8 movq 0x2a8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x164d4ea jmp 0x164d4ec movq 0x30(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x164d544 movq %rax, %rdi callq 0x678a0 movq 0x28(%rsp), %rax movq %rax, 0x110(%rsp) movl $0x0, 0xc4(%rsp) movl 0xc4(%rsp), %eax addl $0xf, %eax cmpl 0x11c(%rsp), %eax jge 0x164d616 movq 0x110(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovups (%rax), %zmm0 vmovaps %zmm0, 0x80(%rsp) leaq 0x137(%rsp), %rdi leaq 0x80(%rsp), %rsi callq 0x16586e0 vmovaps %zmm0, 0x80(%rsp) movq 0x110(%rsp), %rax vmovaps 0x80(%rsp), %zmm0 movq %rax, 0x1d0(%rsp) vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 movq 0x1d0(%rsp), %rax vmovups %zmm0, (%rax) movq 0x110(%rsp), %rax addq $0x40, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x10, %eax movl %eax, 0xc4(%rsp) jmp 0x164d55c jmp 0x164d618 movl 0xc4(%rsp), %eax addl $0x7, %eax cmpl 0x11c(%rsp), %eax jge 0x164d6c7 movq 0x110(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) leaq 0x137(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x1658f00 vmovaps %ymm0, 0x60(%rsp) movq 0x110(%rsp), %rax vmovaps 0x60(%rsp), %ymm0 movq %rax, 0x210(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq 0x210(%rsp), %rax vmovups %ymm0, (%rax) movq 0x110(%rsp), %rax addq $0x20, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x8, %eax movl %eax, 0xc4(%rsp) jmp 0x164d618 jmp 0x164d6c9 movl 0xc4(%rsp), %eax addl $0x3, %eax cmpl 0x11c(%rsp), %eax jge 0x164d77b movq 0x110(%rsp), %rax movq %rax, 0x218(%rsp) movq 0x218(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x50(%rsp) leaq 0x137(%rsp), %rdi leaq 0x50(%rsp), %rsi vzeroupper callq 0x1659bd0 vmovaps %xmm0, 0x50(%rsp) movq 0x110(%rsp), %rax vmovaps 0x50(%rsp), %xmm0 movq %rax, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x220(%rsp), %xmm0 movq 0x230(%rsp), %rax vmovaps %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x10, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x4, %eax movl %eax, 0xc4(%rsp) jmp 0x164d6c9 jmp 0x164d77d movl 0xc4(%rsp), %eax cmpl 0x11c(%rsp), %eax jge 0x164d7d8 movq 0x110(%rsp), %rsi leaq 0x137(%rsp), %rdi vzeroupper callq 0x165a600 movq 0x110(%rsp), %rax vmovss %xmm0, (%rax) movq 0x110(%rsp), %rax addq $0x4, %rax movq %rax, 0x110(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x164d77d jmp 0x164d7da movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x164d11a xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,252
virtual thunk to ncnn::UnaryOp_x86_avx512::forward_inplace(ncnn::Mat&, ncnn::Option const&) const
int UnaryOp_x86_avx512::forward_inplace(Mat& bottom_top_blob, const Option& opt) const { using namespace UnaryOp_x86_avx512_functor; if (op_type == Operation_ABS) return unary_op_inplace<unary_op_abs>(bottom_top_blob, opt); if (op_type == Operation_NEG) return unary_op_inplace<unary_op_neg>(bottom_top_blob, opt); if (op_type == Operation_FLOOR) return unary_op_inplace<unary_op_floor>(bottom_top_blob, opt); if (op_type == Operation_CEIL) return unary_op_inplace<unary_op_ceil>(bottom_top_blob, opt); if (op_type == Operation_SQUARE) return unary_op_inplace<unary_op_square>(bottom_top_blob, opt); if (op_type == Operation_SQRT) return unary_op_inplace<unary_op_sqrt>(bottom_top_blob, opt); if (op_type == Operation_RSQRT) return unary_op_inplace<unary_op_rsqrt>(bottom_top_blob, opt); if (op_type == Operation_EXP) return unary_op_inplace<unary_op_exp>(bottom_top_blob, opt); if (op_type == Operation_LOG) return unary_op_inplace<unary_op_log>(bottom_top_blob, opt); if (op_type == Operation_SIN) return unary_op_inplace<unary_op_sin>(bottom_top_blob, opt); if (op_type == Operation_COS) return unary_op_inplace<unary_op_cos>(bottom_top_blob, opt); if (op_type == Operation_TAN) return unary_op_inplace<unary_op_tan>(bottom_top_blob, opt); if (op_type == Operation_ASIN) return unary_op_inplace<unary_op_asin>(bottom_top_blob, opt); if (op_type == Operation_ACOS) return unary_op_inplace<unary_op_acos>(bottom_top_blob, opt); if (op_type == Operation_ATAN) return unary_op_inplace<unary_op_atan>(bottom_top_blob, opt); if (op_type == Operation_RECIPROCAL) return unary_op_inplace<unary_op_reciprocal>(bottom_top_blob, opt); if (op_type == Operation_TANH) return unary_op_inplace<unary_op_tanh>(bottom_top_blob, opt); return 0; }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq %rdx, -0x18(%rsp) movq -0x8(%rsp), %rdi movq (%rdi), %rax movq -0x58(%rax), %rax addq %rax, %rdi movq -0x10(%rsp), %rsi movq -0x18(%rsp), %rdx jmp 0x1645450 nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,253
ncnn::UnaryOp_x86_avx512_functor::unary_op_abs::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { return abs_avx512(x); }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x200, %rsp # imm = 0x200 movq %rdi, 0x38(%rsp) movq %rsi, 0x30(%rsp) movq 0x30(%rsp), %rax vmovaps (%rax), %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm0 vmovaps %zmm0, 0x140(%rsp) vmovdqa64 0x140(%rsp), %zmm1 movl $0x7fffffff, 0x1ec(%rsp) # imm = 0x7FFFFFFF vpbroadcastd 0x1ec(%rsp), %zmm0 vmovdqa64 %zmm0, 0x180(%rsp) vmovdqa64 0x180(%rsp), %zmm0 vmovdqa64 %zmm1, 0x100(%rsp) vmovdqa64 %zmm0, 0xc0(%rsp) vmovdqa64 0x100(%rsp), %zmm0 vmovdqa64 0xc0(%rsp), %zmm1 vpandd %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x80(%rsp) vmovdqa64 0x80(%rsp), %zmm0 movq %rbp, %rsp popq %rbp retq nopw %cs:(%rax,%rax) nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,254
ncnn::UnaryOp_x86_avx512_functor::unary_op_abs::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { return abs_avx(x); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x100, %rsp # imm = 0x100 movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq 0x10(%rsp), %rax vmovaps (%rax), %ymm0 vmovaps %ymm0, 0x20(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovaps %ymm0, 0xc0(%rsp) vmovaps 0xc0(%rsp), %ymm1 vmovaps 0x20(%rsp), %ymm0 vmovaps %ymm1, 0xa0(%rsp) vmovaps %ymm0, 0x80(%rsp) vmovaps 0xa0(%rsp), %ymm0 vsubps 0x80(%rsp), %ymm0, %ymm1 vmovaps 0x20(%rsp), %ymm0 vmovaps %ymm1, 0x60(%rsp) vmovaps %ymm0, 0x40(%rsp) vmovaps 0x60(%rsp), %ymm0 vmovaps 0x40(%rsp), %ymm1 vmaxps %ymm1, %ymm0, %ymm0 movq %rbp, %rsp popq %rbp retq nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,255
ncnn::UnaryOp_x86_avx512_functor::unary_op_abs::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return abs_sse(x); }
movq %rdi, -0x70(%rsp) movq %rsi, -0x78(%rsp) movq -0x78(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, -0x58(%rsp) movl $0x80000000, -0x3c(%rsp) # imm = 0x80000000 vmovss -0x3c(%rsp), %xmm0 vmovss %xmm0, -0x4(%rsp) vbroadcastss -0x4(%rsp), %xmm0 vmovaps %xmm0, -0x18(%rsp) vmovaps -0x18(%rsp), %xmm0 vmovaps %xmm0, -0x68(%rsp) vmovaps -0x68(%rsp), %xmm1 vmovaps -0x58(%rsp), %xmm0 vmovaps %xmm1, -0x28(%rsp) vmovaps %xmm0, -0x38(%rsp) vmovdqa -0x28(%rsp), %xmm0 vpternlogq $0xf, %xmm0, %xmm0, %xmm0 vmovaps -0x38(%rsp), %xmm1 vpand %xmm1, %xmm0, %xmm0 retq nopw %cs:(%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,256
ncnn::UnaryOp_x86_avx512_functor::unary_op_abs::func(float const&) const
float func(const float& x) const { return (float)fabs(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax vmovss (%rax), %xmm0 callq 0x102e2f0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,257
ncnn::UnaryOp_x86_avx512_functor::unary_op_neg::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { return _mm512_sub_ps(_mm512_setzero_ps(), x); }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x140, %rsp # imm = 0x140 movq %rdi, 0x38(%rsp) movq %rsi, 0x30(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovaps %zmm0, 0xc0(%rsp) vmovaps 0xc0(%rsp), %zmm1 movq 0x30(%rsp), %rax vmovaps (%rax), %zmm0 vmovaps %zmm1, 0x80(%rsp) vmovaps %zmm0, 0x40(%rsp) vmovaps 0x80(%rsp), %zmm0 vsubps 0x40(%rsp), %zmm0, %zmm0 movq %rbp, %rsp popq %rbp retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,258
ncnn::UnaryOp_x86_avx512_functor::unary_op_neg::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { return _mm256_sub_ps(_mm256_setzero_ps(), x); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0xa0, %rsp movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovaps %ymm0, 0x60(%rsp) vmovaps 0x60(%rsp), %ymm1 movq 0x10(%rsp), %rax vmovaps (%rax), %ymm0 vmovaps %ymm1, 0x40(%rsp) vmovaps %ymm0, 0x20(%rsp) vmovaps 0x40(%rsp), %ymm0 vsubps 0x20(%rsp), %ymm0, %ymm0 movq %rbp, %rsp popq %rbp retq nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,259
ncnn::UnaryOp_x86_avx512_functor::unary_op_neg::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return _mm_sub_ps(_mm_setzero_ps(), x); }
movq %rdi, -0x40(%rsp) movq %rsi, -0x48(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovaps %xmm0, -0x18(%rsp) vmovaps -0x18(%rsp), %xmm1 movq -0x48(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm1, -0x28(%rsp) vmovaps %xmm0, -0x38(%rsp) vmovaps -0x28(%rsp), %xmm0 vsubps -0x38(%rsp), %xmm0, %xmm0 retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,260
ncnn::UnaryOp_x86_avx512_functor::unary_op_neg::func(float const&) const
float func(const float& x) const { return -x; }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq -0x10(%rsp), %rax vmovss (%rax), %xmm0 vmovd %xmm0, %eax xorl $0x80000000, %eax # imm = 0x80000000 vmovd %eax, %xmm0 retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,261
ncnn::UnaryOp_x86_avx512_functor::unary_op_floor::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { return _mm512_roundscale_ps(x, _MM_FROUND_TO_NEG_INF); }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq -0x10(%rsp), %rax vmovaps (%rax), %zmm0 vrndscaleps $0x1, %zmm0, %zmm0 retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,262
ncnn::UnaryOp_x86_avx512_functor::unary_op_floor::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { return _mm256_floor_ps(x); }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq -0x10(%rsp), %rax vmovaps (%rax), %ymm0 vroundps $0x1, %ymm0, %ymm0 retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,263
ncnn::UnaryOp_x86_avx512_functor::unary_op_floor::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { #if __SSE4_1__ return _mm_floor_ps(x); #endif // __SSE4_1__ // Use negative zero as the sign bit mask. const __m128 magic_negative_zero = _mm_set_ps1(-0.0f); // The smallest float number that have no fractional part. (2^23) const __m128 magic_smallest_no_fraction = _mm_set_ps1(8388608.0f); // absolute = abs(x); __m128 absolute = _mm_andnot_ps(magic_negative_zero, x); // negative_mask = magic_negative_zero && x; __m128 negative_mask = _mm_and_ps(magic_negative_zero, x); // no_fraction = (magic_smallest_no_fraction < absolute); __m128 no_fraction = _mm_cmplt_ps(magic_smallest_no_fraction, absolute); // truncated = static_cast<float>(static_cast<uint32_t>(absolute)); __m128 truncated = _mm_cvtepi32_ps(_mm_cvttps_epi32(absolute)); // truncated_with_sign = (truncated || negative_mask); __m128 truncated_with_sign = _mm_or_ps(truncated, negative_mask); // negative_fix = ((x < truncated_with_sign) ? 1.0f : 0.0f); __m128 negative_fix = _mm_and_ps( _mm_cmplt_ps(x, truncated_with_sign), _mm_set_ps1(1.0f)); // fixed_result = truncated_with_sign - negative_fix; __m128 fixed_result = _mm_sub_ps(truncated_with_sign, negative_fix); // return ((x && no_fraction) || (!no_fraction && fixed_result)); return _mm_or_ps( _mm_and_ps(x, no_fraction), _mm_andnot_ps(no_fraction, fixed_result)); }
subq $0x28, %rsp movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movq 0x18(%rsp), %rax vmovaps (%rax), %xmm0 vroundps $0x1, %xmm0, %xmm0 addq $0x28, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,264
ncnn::UnaryOp_x86_avx512_functor::unary_op_floor::func(float const&) const
float func(const float& x) const { return (float)floor(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax vmovss (%rax), %xmm0 callq 0x8e720 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,265
ncnn::UnaryOp_x86_avx512_functor::unary_op_ceil::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { return _mm512_roundscale_ps(x, _MM_FROUND_TO_POS_INF); }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq -0x10(%rsp), %rax vmovaps (%rax), %zmm0 vrndscaleps $0x2, %zmm0, %zmm0 retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,266
ncnn::UnaryOp_x86_avx512_functor::unary_op_ceil::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { return _mm256_ceil_ps(x); }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq -0x10(%rsp), %rax vmovaps (%rax), %ymm0 vroundps $0x2, %ymm0, %ymm0 retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,267
ncnn::UnaryOp_x86_avx512_functor::unary_op_ceil::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { #if __SSE4_1__ return _mm_ceil_ps(x); #endif // __SSE4_1__ // Use negative zero as the sign bit mask. const __m128 magic_negative_zero = _mm_set_ps1(-0.0f); // The smallest float number that have no fractional part. (2^23) const __m128 magic_smallest_no_fraction = _mm_set_ps1(8388608.0f); // absolute = abs(x); __m128 absolute = _mm_andnot_ps(magic_negative_zero, x); // negative_mask = magic_negative_zero && x; __m128 negative_mask = _mm_and_ps(magic_negative_zero, x); // no_fraction = (magic_smallest_no_fraction < absolute); __m128 no_fraction = _mm_cmplt_ps(magic_smallest_no_fraction, absolute); // truncated = static_cast<float>(static_cast<uint32_t>(absolute)); __m128 truncated = _mm_cvtepi32_ps(_mm_cvttps_epi32(absolute)); // truncated_with_sign = (truncated || negative_mask); __m128 truncated_with_sign = _mm_or_ps(truncated, negative_mask); // positive_fix = ((x > -0.0f) && (x > truncated_with_sign) ? -1.0f : 0.0f); __m128 positive_fix = _mm_and_ps( _mm_and_ps( _mm_cmpgt_ps(x, magic_negative_zero), _mm_cmpgt_ps(x, truncated_with_sign)), _mm_set_ps1(-1.0f)); // fixed_result = truncated_with_sign - positive_fix; __m128 fixed_result = _mm_sub_ps(truncated_with_sign, positive_fix); // return ((x && no_fraction) || (!no_fraction && fixed_result)); return _mm_or_ps( _mm_and_ps(x, no_fraction), _mm_andnot_ps(no_fraction, fixed_result)); }
subq $0x28, %rsp movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movq 0x18(%rsp), %rax vmovaps (%rax), %xmm0 vroundps $0x2, %xmm0, %xmm0 addq $0x28, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,268
ncnn::UnaryOp_x86_avx512_functor::unary_op_ceil::func(float const&) const
float func(const float& x) const { return (float)ceil(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax vmovss (%rax), %xmm0 callq 0x1058980 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,269
ncnn::UnaryOp_x86_avx512_functor::unary_op_square::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { return _mm512_mul_ps(x, x); }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x100, %rsp # imm = 0x100 movq %rdi, 0x38(%rsp) movq %rsi, 0x30(%rsp) movq 0x30(%rsp), %rax vmovaps (%rax), %zmm1 movq 0x30(%rsp), %rax vmovaps (%rax), %zmm0 vmovaps %zmm1, 0x80(%rsp) vmovaps %zmm0, 0x40(%rsp) vmovaps 0x80(%rsp), %zmm0 vmulps 0x40(%rsp), %zmm0, %zmm0 movq %rbp, %rsp popq %rbp retq nopw %cs:(%rax,%rax) nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,270
ncnn::UnaryOp_x86_avx512_functor::unary_op_square::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { return _mm256_mul_ps(x, x); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x80, %rsp movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq 0x10(%rsp), %rax vmovaps (%rax), %ymm1 movq 0x10(%rsp), %rax vmovaps (%rax), %ymm0 vmovaps %ymm1, 0x40(%rsp) vmovaps %ymm0, 0x20(%rsp) vmovaps 0x40(%rsp), %ymm0 vmulps 0x20(%rsp), %ymm0, %ymm0 movq %rbp, %rsp popq %rbp retq nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,271
ncnn::UnaryOp_x86_avx512_functor::unary_op_square::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return _mm_mul_ps(x, x); }
movq %rdi, -0x30(%rsp) movq %rsi, -0x38(%rsp) movq -0x38(%rsp), %rax vmovaps (%rax), %xmm1 movq -0x38(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm1, -0x18(%rsp) vmovaps %xmm0, -0x28(%rsp) vmovaps -0x18(%rsp), %xmm0 vmulps -0x28(%rsp), %xmm0, %xmm0 retq nopw %cs:(%rax,%rax) nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,272
ncnn::UnaryOp_x86_avx512_functor::unary_op_square::func(float const&) const
float func(const float& x) const { return x * x; }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq -0x10(%rsp), %rax vmovss (%rax), %xmm0 movq -0x10(%rsp), %rax vmulss (%rax), %xmm0, %xmm0 retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,273
ncnn::UnaryOp_x86_avx512_functor::unary_op_sqrt::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { return _mm512_sqrt_ps(x); }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0xc0, %rsp movq %rdi, 0x38(%rsp) movq %rsi, 0x30(%rsp) movq 0x30(%rsp), %rax vmovaps (%rax), %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm2 vrsqrt14ps %zmm2, %zmm1 vmulps %zmm1, %zmm2, %zmm0 vbroadcastss 0x7cefda(%rip), %zmm3 # 0x1e1cdb4 vfmadd213ps %zmm3, %zmm0, %zmm1 # zmm1 = (zmm0 * zmm1) + zmm3 vpbroadcastd 0x7cefc6(%rip), %zmm3 # 0x1e1cdb0 vpandd %zmm3, %zmm2, %zmm3 vbroadcastss 0x7bdfee(%rip), %zmm2 # 0x1e0bde8 vcmpleps %zmm3, %zmm2, %k1 vbroadcastss 0x7bb4a5(%rip), %zmm2 # 0x1e092b0 vmulps %zmm2, %zmm0, %zmm0 vmulps %zmm1, %zmm0, %zmm0 {%k1} {z} movq %rbp, %rsp popq %rbp retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,274
ncnn::UnaryOp_x86_avx512_functor::unary_op_sqrt::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { return _mm256_sqrt_ps(x); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x60, %rsp movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq 0x10(%rsp), %rax vmovaps (%rax), %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm2 vrsqrtps %ymm2, %ymm1 vmulps %ymm1, %ymm2, %ymm0 vbroadcastss 0x7cef58(%rip), %ymm3 # 0x1e1cdb4 vfmadd213ps %ymm3, %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + ymm3 vpbroadcastd 0x7cef46(%rip), %ymm3 # 0x1e1cdb0 vpand %ymm3, %ymm2, %ymm3 vbroadcastss 0x7bdf71(%rip), %ymm2 # 0x1e0bde8 vcmpleps %ymm3, %ymm2, %k1 vbroadcastss 0x7bb429(%rip), %ymm2 # 0x1e092b0 vmulps %ymm2, %ymm0, %ymm0 vmulps %ymm1, %ymm0, %ymm0 {%k1} {z} movq %rbp, %rsp popq %rbp retq nopw %cs:(%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,275
ncnn::UnaryOp_x86_avx512_functor::unary_op_sqrt::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return _mm_sqrt_ps(x); }
movq %rdi, -0x20(%rsp) movq %rsi, -0x28(%rsp) movq -0x28(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, -0x18(%rsp) vmovaps -0x18(%rsp), %xmm2 vrsqrtps %xmm2, %xmm1 vmulps %xmm1, %xmm2, %xmm0 vbroadcastss 0x7ceee4(%rip), %xmm3 # 0x1e1cdb4 vfmadd213ps %xmm3, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm3 vpbroadcastd 0x7ceed2(%rip), %xmm3 # 0x1e1cdb0 vpand %xmm3, %xmm2, %xmm3 vbroadcastss 0x7bdefd(%rip), %xmm2 # 0x1e0bde8 vcmpleps %xmm3, %xmm2, %k1 vbroadcastss 0x7bb3b5(%rip), %xmm2 # 0x1e092b0 vmulps %xmm2, %xmm0, %xmm0 vmulps %xmm1, %xmm0, %xmm0 {%k1} {z} retq nopw %cs:(%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,276
ncnn::UnaryOp_x86_avx512_functor::unary_op_sqrt::func(float const&) const
float func(const float& x) const { return (float)sqrt(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax vmovss (%rax), %xmm0 callq 0x671b0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,277
ncnn::UnaryOp_x86_avx512_functor::unary_op_rsqrt::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { __m256 _x0 = _mm512_extractf32x8_ps(x, 0); __m256 _x1 = _mm512_extractf32x8_ps(x, 1); _x0 = _mm256_rsqrt_ps(_x0); _x1 = _mm256_rsqrt_ps(_x1); return _mm512_insertf32x8(_mm512_castps256_ps512(_x0), _x1, 1); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0xe0, %rsp movq %rdi, 0x58(%rsp) movq %rsi, 0x50(%rsp) movq 0x50(%rsp), %rax vmovaps (%rax), %zmm0 vmovaps %ymm0, 0x20(%rsp) movq 0x50(%rsp), %rax vmovaps (%rax), %zmm0 vextractf64x4 $0x1, %zmm0, (%rsp) vmovaps 0x20(%rsp), %ymm0 vmovaps %ymm0, 0x80(%rsp) vrsqrtps 0x80(%rsp), %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps (%rsp), %ymm0 vmovaps %ymm0, 0x60(%rsp) vrsqrtps 0x60(%rsp), %ymm0 vmovaps %ymm0, (%rsp) vmovaps 0x20(%rsp), %ymm0 vmovaps %ymm0, 0xa0(%rsp) vmovaps 0xa0(%rsp), %ymm2 vmovaps (%rsp), %ymm1 vmovaps %ymm2, %ymm0 vinsertf64x4 $0x1, %ymm1, %zmm0, %zmm0 movq %rbp, %rsp popq %rbp retq nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,278
ncnn::UnaryOp_x86_avx512_functor::unary_op_rsqrt::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { return _mm256_rsqrt_ps(x); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x60, %rsp movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq 0x10(%rsp), %rax vmovaps (%rax), %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm0 vrsqrtps %ymm0, %ymm0 movq %rbp, %rsp popq %rbp retq nopw %cs:(%rax,%rax) nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,279
ncnn::UnaryOp_x86_avx512_functor::unary_op_rsqrt::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return _mm_rsqrt_ps(x); }
movq %rdi, -0x20(%rsp) movq %rsi, -0x28(%rsp) movq -0x28(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, -0x18(%rsp) vmovaps -0x18(%rsp), %xmm0 vrsqrtps %xmm0, %xmm0 retq nopw %cs:(%rax,%rax) nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,280
ncnn::UnaryOp_x86_avx512_functor::unary_op_rsqrt::func(float const&) const
float func(const float& x) const { return (float)(1.f / sqrt(x)); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax vmovss (%rax), %xmm0 callq 0x671b0 vmovaps %xmm0, %xmm1 vmovss 0x7baf8c(%rip), %xmm0 # 0x1e09004 vdivss %xmm1, %xmm0, %xmm0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,281
ncnn::UnaryOp_x86_avx512_functor::unary_op_exp::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { return exp512_ps(x); }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0xf40, %rsp # imm = 0xF40 movq %rdi, 0x38(%rsp) movq %rsi, 0x30(%rsp) movq 0x30(%rsp), %rax vmovaps (%rax), %zmm0 vmovaps %zmm0, 0x200(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovaps %zmm0, 0x280(%rsp) vmovaps 0x280(%rsp), %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x7ced5e(%rip), %zmm0 # 0x1e1ce40 vmovaps %zmm0, 0x100(%rsp) vmovaps 0x200(%rsp), %zmm1 vmovaps 0x7ced84(%rip), %zmm0 # 0x1e1ce80 vmovaps %zmm1, 0x400(%rsp) vmovaps %zmm0, 0x3c0(%rsp) vmovaps 0x400(%rsp), %zmm0 vmovaps 0x3c0(%rsp), %zmm1 vminps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x200(%rsp) vmovaps 0x200(%rsp), %zmm1 vmovaps 0x7ced84(%rip), %zmm0 # 0x1e1cec0 vmovaps %zmm1, 0x480(%rsp) vmovaps %zmm0, 0x440(%rsp) vmovaps 0x480(%rsp), %zmm0 vmovaps 0x440(%rsp), %zmm1 vmaxps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x200(%rsp) vmovaps 0x200(%rsp), %zmm2 vmovaps 0x7ced84(%rip), %zmm1 # 0x1e1cf00 vmovaps 0x7cedba(%rip), %zmm0 # 0x1e1cf40 vmovaps %zmm2, 0x9c0(%rsp) vmovaps %zmm1, 0x980(%rsp) vmovaps %zmm0, 0x940(%rsp) vmovaps 0x9c0(%rsp), %zmm1 vmovaps 0x980(%rsp), %zmm0 vmovaps 0x940(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 vrndscaleps $0x1, %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm1 vmovaps 0x180(%rsp), %zmm0 vcmpltps %zmm1, %zmm0, %k0 kmovw %k0, 0xfe(%rsp) vmovaps 0x1c0(%rsp), %zmm1 movw 0xfe(%rsp), %ax vmovaps 0x100(%rsp), %zmm0 vmovaps %zmm1, 0xac0(%rsp) movw %ax, 0xabe(%rsp) vmovaps %zmm1, 0xa40(%rsp) vmovaps %zmm0, 0xa00(%rsp) vmovaps 0xa40(%rsp), %zmm1 vmovaps 0xa00(%rsp), %zmm0 kmovw 0xabe(%rsp), %k1 vmovaps %zmm1, 0xb40(%rsp) vmovaps %zmm0, 0xb00(%rsp) vmovaps 0xb40(%rsp), %zmm1 vmovaps 0xb00(%rsp), %zmm2 vmovaps 0xac0(%rsp), %zmm0 vsubps %zmm2, %zmm1, %zmm0 {%k1} vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm2 vmovaps 0x7cecec(%rip), %zmm1 # 0x1e1cf80 vmovaps 0x200(%rsp), %zmm0 vmovaps %zmm2, 0xcc0(%rsp) vmovaps %zmm1, 0xc80(%rsp) vmovaps %zmm0, 0xc40(%rsp) vmovaps 0xcc0(%rsp), %zmm1 vmovaps 0xc80(%rsp), %zmm0 vmovaps 0xc40(%rsp), %zmm2 vfnmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = -(zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x200(%rsp) vmovaps 0x180(%rsp), %zmm2 vmovaps 0x7cecd4(%rip), %zmm1 # 0x1e1cfc0 vmovaps 0x200(%rsp), %zmm0 vmovaps %zmm2, 0xc00(%rsp) vmovaps %zmm1, 0xbc0(%rsp) vmovaps %zmm0, 0xb80(%rsp) vmovaps 0xc00(%rsp), %zmm1 vmovaps 0xbc0(%rsp), %zmm0 vmovaps 0xb80(%rsp), %zmm2 vfnmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = -(zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x200(%rsp) vmovaps 0x200(%rsp), %zmm0 vmovaps %zmm0, 0x380(%rsp) vmovaps %zmm0, 0x340(%rsp) vmovaps 0x380(%rsp), %zmm0 vmovaps 0x340(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x7cec8e(%rip), %zmm0 # 0x1e1d000 vmovaps %zmm0, 0x80(%rsp) vmovaps 0x80(%rsp), %zmm2 vmovaps 0x200(%rsp), %zmm1 vmovaps 0x7cecac(%rip), %zmm0 # 0x1e1d040 vmovaps %zmm2, 0x900(%rsp) vmovaps %zmm1, 0x8c0(%rsp) vmovaps %zmm0, 0x880(%rsp) vmovaps 0x900(%rsp), %zmm1 vmovaps 0x8c0(%rsp), %zmm0 vmovaps 0x880(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x80(%rsp) vmovaps 0x80(%rsp), %zmm2 vmovaps 0x200(%rsp), %zmm1 vmovaps 0x7cec94(%rip), %zmm0 # 0x1e1d080 vmovaps %zmm2, 0x840(%rsp) vmovaps %zmm1, 0x800(%rsp) vmovaps %zmm0, 0x7c0(%rsp) vmovaps 0x840(%rsp), %zmm1 vmovaps 0x800(%rsp), %zmm0 vmovaps 0x7c0(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x80(%rsp) vmovaps 0x80(%rsp), %zmm2 vmovaps 0x200(%rsp), %zmm1 vmovaps 0x7cec7c(%rip), %zmm0 # 0x1e1d0c0 vmovaps %zmm2, 0x780(%rsp) vmovaps %zmm1, 0x740(%rsp) vmovaps %zmm0, 0x700(%rsp) vmovaps 0x780(%rsp), %zmm1 vmovaps 0x740(%rsp), %zmm0 vmovaps 0x700(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x80(%rsp) vmovaps 0x80(%rsp), %zmm2 vmovaps 0x200(%rsp), %zmm1 vmovaps 0x7cec64(%rip), %zmm0 # 0x1e1d100 vmovaps %zmm2, 0x6c0(%rsp) vmovaps %zmm1, 0x680(%rsp) vmovaps %zmm0, 0x640(%rsp) vmovaps 0x6c0(%rsp), %zmm1 vmovaps 0x680(%rsp), %zmm0 vmovaps 0x640(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x80(%rsp) vmovaps 0x80(%rsp), %zmm2 vmovaps 0x200(%rsp), %zmm1 vmovaps 0x7cec4c(%rip), %zmm0 # 0x1e1d140 vmovaps %zmm2, 0x600(%rsp) vmovaps %zmm1, 0x5c0(%rsp) vmovaps %zmm0, 0x580(%rsp) vmovaps 0x600(%rsp), %zmm1 vmovaps 0x5c0(%rsp), %zmm0 vmovaps 0x580(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x80(%rsp) vmovaps 0x80(%rsp), %zmm2 vmovaps 0x1c0(%rsp), %zmm1 vmovaps 0x200(%rsp), %zmm0 vmovaps %zmm2, 0x540(%rsp) vmovaps %zmm1, 0x500(%rsp) vmovaps %zmm0, 0x4c0(%rsp) vmovaps 0x540(%rsp), %zmm1 vmovaps 0x500(%rsp), %zmm0 vmovaps 0x4c0(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x80(%rsp) vmovaps 0x80(%rsp), %zmm1 vmovaps 0x100(%rsp), %zmm0 vmovaps %zmm1, 0xd40(%rsp) vmovaps %zmm0, 0xd00(%rsp) vmovaps 0xd40(%rsp), %zmm0 vmovaps 0xd00(%rsp), %zmm1 vaddps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x80(%rsp) vmovaps 0x180(%rsp), %zmm0 vmovaps %zmm0, 0xd80(%rsp) vmovaps 0xd80(%rsp), %zmm0 vpxor %xmm1, %xmm1, %xmm1 vmovdqa64 %zmm1, 0xec0(%rsp) vmovdqa64 0xec0(%rsp), %zmm1 vcvttps2dq %zmm0, %zmm0 vmovdqa64 %zmm0, 0x140(%rsp) vmovdqa64 0x140(%rsp), %zmm1 vmovdqa64 0x7ceb6e(%rip), %zmm0 # 0x1e1d180 vmovdqa64 %zmm1, 0xe00(%rsp) vmovdqa64 %zmm0, 0xdc0(%rsp) vmovdqa64 0xe00(%rsp), %zmm0 vmovdqa64 0xdc0(%rsp), %zmm1 vpaddd %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x140(%rsp) vmovdqa64 0x140(%rsp), %zmm0 vmovdqa64 %zmm0, 0xe80(%rsp) movl $0x17, 0xe7c(%rsp) vmovdqa64 0xe80(%rsp), %zmm0 movl 0xe7c(%rsp), %eax vmovd %eax, %xmm1 vpslld %xmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x140(%rsp) vmovdqa64 0x140(%rsp), %zmm0 vmovdqa64 %zmm0, 0x240(%rsp) vmovdqa64 0x240(%rsp), %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x80(%rsp), %zmm1 vmovaps 0x40(%rsp), %zmm0 vmovaps %zmm1, 0x300(%rsp) vmovaps %zmm0, 0x2c0(%rsp) vmovaps 0x300(%rsp), %zmm0 vmulps 0x2c0(%rsp), %zmm0, %zmm0 vmovaps %zmm0, 0x80(%rsp) vmovaps 0x80(%rsp), %zmm0 movq %rbp, %rsp popq %rbp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,282
ncnn::UnaryOp_x86_avx512_functor::unary_op_exp::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { return exp256_ps(x); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0xb20, %rsp # imm = 0xB20 movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq 0x10(%rsp), %rax vmovaps (%rax), %ymm0 vmovaps %ymm0, 0x100(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovaps %ymm0, 0x1a0(%rsp) vmovaps 0x1a0(%rsp), %ymm0 vmovaps %ymm0, 0xe0(%rsp) vbroadcastss 0x7ba8c1(%rip), %ymm0 # 0x1e09004 vmovaps %ymm0, 0x80(%rsp) vmovaps 0x100(%rsp), %ymm0 vmovaps %ymm0, 0x260(%rsp) vbroadcastss 0x7bd659(%rip), %ymm0 # 0x1e0bdc0 vmovaps %ymm0, 0x240(%rsp) vmovaps 0x260(%rsp), %ymm0 vmovaps 0x240(%rsp), %ymm1 vminps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x100(%rsp) vmovaps 0x100(%rsp), %ymm0 vmovaps %ymm0, 0x140(%rsp) vbroadcastss 0x7bd61a(%rip), %ymm0 # 0x1e0bdc4 vmovaps %ymm0, 0x120(%rsp) vmovaps 0x140(%rsp), %ymm0 vmovaps 0x120(%rsp), %ymm1 vmaxps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x100(%rsp) vmovaps 0x100(%rsp), %ymm0 vmovaps %ymm0, 0x500(%rsp) vbroadcastss 0x7bd5db(%rip), %ymm0 # 0x1e0bdc8 vmovaps %ymm0, 0x4e0(%rsp) vbroadcastss 0x7ba81d(%rip), %ymm0 # 0x1e0901c vmovaps %ymm0, 0x4c0(%rsp) vmovaps 0x500(%rsp), %ymm3 vmovaps 0x4e0(%rsp), %ymm2 vmovaps 0x4c0(%rsp), %ymm1 vmovaps %ymm3, 0x760(%rsp) vmovaps %ymm2, 0x740(%rsp) vmovaps %ymm1, 0x720(%rsp) vmovaps 0x760(%rsp), %ymm2 vmovaps 0x740(%rsp), %ymm1 vmovaps 0x720(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm1) + ymm3 vmovaps %ymm1, 0xc0(%rsp) vmovaps 0xc0(%rsp), %ymm1 vroundps $0x1, %ymm1, %ymm1 vmovaps %ymm1, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm2 vmovaps 0xc0(%rsp), %ymm1 vcmpltps %ymm2, %ymm1, %ymm1 vmovdqa %ymm1, 0x60(%rsp) vmovaps 0x60(%rsp), %ymm2 vmovaps 0x80(%rsp), %ymm1 vmovaps %ymm2, 0x540(%rsp) vmovaps %ymm1, 0x520(%rsp) vmovdqa 0x540(%rsp), %ymm1 vmovdqa 0x520(%rsp), %ymm2 vpand %ymm2, %ymm1, %ymm1 vmovdqa %ymm1, 0x60(%rsp) vmovaps 0xe0(%rsp), %ymm2 vmovaps 0x60(%rsp), %ymm1 vmovaps %ymm2, 0x180(%rsp) vmovaps %ymm1, 0x160(%rsp) vmovaps 0x180(%rsp), %ymm1 vmovaps 0x160(%rsp), %ymm2 vsubps %ymm2, %ymm1, %ymm1 vmovaps %ymm1, 0xc0(%rsp) vmovaps 0xc0(%rsp), %ymm2 vmovaps 0x100(%rsp), %ymm1 vmovaps %ymm2, 0x600(%rsp) vbroadcastss 0x7bd48f(%rip), %ymm2 # 0x1e0bdcc vmovaps %ymm2, 0x5e0(%rsp) vmovaps %ymm1, 0x5c0(%rsp) vmovaps 0x600(%rsp), %ymm3 vmovaps 0x5e0(%rsp), %ymm2 vmovaps 0x5c0(%rsp), %ymm1 vmovaps %ymm3, 0xa00(%rsp) vmovaps %ymm2, 0x9e0(%rsp) vmovaps %ymm1, 0x9c0(%rsp) vmovaps 0xa00(%rsp), %ymm2 vmovaps 0x9e0(%rsp), %ymm1 vmovaps 0x9c0(%rsp), %ymm3 vfnmadd213ps %ymm3, %ymm2, %ymm1 # ymm1 = -(ymm2 * ymm1) + ymm3 vmovaps %ymm1, 0x100(%rsp) vmovaps 0xc0(%rsp), %ymm2 vmovaps 0x100(%rsp), %ymm1 vmovaps %ymm2, 0x5a0(%rsp) vbroadcastss 0x7bd3fe(%rip), %ymm2 # 0x1e0bdd0 vmovaps %ymm2, 0x580(%rsp) vmovaps %ymm1, 0x560(%rsp) vmovaps 0x5a0(%rsp), %ymm3 vmovaps 0x580(%rsp), %ymm2 vmovaps 0x560(%rsp), %ymm1 vmovaps %ymm3, 0xa60(%rsp) vmovaps %ymm2, 0xa40(%rsp) vmovaps %ymm1, 0xa20(%rsp) vmovaps 0xa60(%rsp), %ymm2 vmovaps 0xa40(%rsp), %ymm1 vmovaps 0xa20(%rsp), %ymm3 vfnmadd213ps %ymm3, %ymm2, %ymm1 # ymm1 = -(ymm2 * ymm1) + ymm3 vmovaps %ymm1, 0x100(%rsp) vmovaps 0x100(%rsp), %ymm1 vmovaps %ymm1, 0x220(%rsp) vmovaps %ymm1, 0x200(%rsp) vmovaps 0x220(%rsp), %ymm1 vmovaps 0x200(%rsp), %ymm2 vmulps %ymm2, %ymm1, %ymm1 vmovaps %ymm1, 0xe0(%rsp) vbroadcastss 0x7bd34e(%rip), %ymm1 # 0x1e0bdd4 vmovaps %ymm1, 0x40(%rsp) vmovaps 0x40(%rsp), %ymm2 vmovaps 0x100(%rsp), %ymm1 vmovaps %ymm2, 0x4a0(%rsp) vmovaps %ymm1, 0x480(%rsp) vbroadcastss 0x7bd322(%rip), %ymm1 # 0x1e0bdd8 vmovaps %ymm1, 0x460(%rsp) vmovaps 0x4a0(%rsp), %ymm3 vmovaps 0x480(%rsp), %ymm2 vmovaps 0x460(%rsp), %ymm1 vmovaps %ymm3, 0x7c0(%rsp) vmovaps %ymm2, 0x7a0(%rsp) vmovaps %ymm1, 0x780(%rsp) vmovaps 0x7c0(%rsp), %ymm2 vmovaps 0x7a0(%rsp), %ymm1 vmovaps 0x780(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm1) + ymm3 vmovaps %ymm1, 0x40(%rsp) vmovaps 0x40(%rsp), %ymm2 vmovaps 0x100(%rsp), %ymm1 vmovaps %ymm2, 0x440(%rsp) vmovaps %ymm1, 0x420(%rsp) vbroadcastss 0x7bd297(%rip), %ymm1 # 0x1e0bddc vmovaps %ymm1, 0x400(%rsp) vmovaps 0x440(%rsp), %ymm3 vmovaps 0x420(%rsp), %ymm2 vmovaps 0x400(%rsp), %ymm1 vmovaps %ymm3, 0x820(%rsp) vmovaps %ymm2, 0x800(%rsp) vmovaps %ymm1, 0x7e0(%rsp) vmovaps 0x820(%rsp), %ymm2 vmovaps 0x800(%rsp), %ymm1 vmovaps 0x7e0(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm1) + ymm3 vmovaps %ymm1, 0x40(%rsp) vmovaps 0x40(%rsp), %ymm2 vmovaps 0x100(%rsp), %ymm1 vmovaps %ymm2, 0x3e0(%rsp) vmovaps %ymm1, 0x3c0(%rsp) vbroadcastss 0x7bd20c(%rip), %ymm1 # 0x1e0bde0 vmovaps %ymm1, 0x3a0(%rsp) vmovaps 0x3e0(%rsp), %ymm3 vmovaps 0x3c0(%rsp), %ymm2 vmovaps 0x3a0(%rsp), %ymm1 vmovaps %ymm3, 0x880(%rsp) vmovaps %ymm2, 0x860(%rsp) vmovaps %ymm1, 0x840(%rsp) vmovaps 0x880(%rsp), %ymm2 vmovaps 0x860(%rsp), %ymm1 vmovaps 0x840(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm1) + ymm3 vmovaps %ymm1, 0x40(%rsp) vmovaps 0x40(%rsp), %ymm2 vmovaps 0x100(%rsp), %ymm1 vmovaps %ymm2, 0x380(%rsp) vmovaps %ymm1, 0x360(%rsp) vbroadcastss 0x7bd181(%rip), %ymm1 # 0x1e0bde4 vmovaps %ymm1, 0x340(%rsp) vmovaps 0x380(%rsp), %ymm3 vmovaps 0x360(%rsp), %ymm2 vmovaps 0x340(%rsp), %ymm1 vmovaps %ymm3, 0x8e0(%rsp) vmovaps %ymm2, 0x8c0(%rsp) vmovaps %ymm1, 0x8a0(%rsp) vmovaps 0x8e0(%rsp), %ymm2 vmovaps 0x8c0(%rsp), %ymm1 vmovaps 0x8a0(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm1) + ymm3 vmovaps %ymm1, 0x40(%rsp) vmovaps 0x40(%rsp), %ymm2 vmovaps 0x100(%rsp), %ymm1 vmovaps %ymm2, 0x320(%rsp) vmovaps %ymm1, 0x300(%rsp) vmovaps %ymm0, 0x2e0(%rsp) vmovaps 0x320(%rsp), %ymm2 vmovaps 0x300(%rsp), %ymm1 vmovaps 0x2e0(%rsp), %ymm0 vmovaps %ymm2, 0x940(%rsp) vmovaps %ymm1, 0x920(%rsp) vmovaps %ymm0, 0x900(%rsp) vmovaps 0x940(%rsp), %ymm1 vmovaps 0x920(%rsp), %ymm0 vmovaps 0x900(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0x40(%rsp) vmovaps 0x40(%rsp), %ymm2 vmovaps 0xe0(%rsp), %ymm1 vmovaps 0x100(%rsp), %ymm0 vmovaps %ymm2, 0x2c0(%rsp) vmovaps %ymm1, 0x2a0(%rsp) vmovaps %ymm0, 0x280(%rsp) vmovaps 0x2c0(%rsp), %ymm2 vmovaps 0x2a0(%rsp), %ymm1 vmovaps 0x280(%rsp), %ymm0 vmovaps %ymm2, 0x9a0(%rsp) vmovaps %ymm1, 0x980(%rsp) vmovaps %ymm0, 0x960(%rsp) vmovaps 0x9a0(%rsp), %ymm1 vmovaps 0x980(%rsp), %ymm0 vmovaps 0x960(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0x40(%rsp) vmovaps 0x40(%rsp), %ymm1 vmovaps 0x80(%rsp), %ymm0 vmovaps %ymm1, 0x640(%rsp) vmovaps %ymm0, 0x620(%rsp) vmovaps 0x640(%rsp), %ymm0 vmovaps 0x620(%rsp), %ymm1 vaddps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x40(%rsp) vmovaps 0xc0(%rsp), %ymm0 vmovaps %ymm0, 0x660(%rsp) vcvttps2dq 0x660(%rsp), %ymm0 vmovdqa %ymm0, 0xa0(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0x6a0(%rsp) vpbroadcastq 0x7bcfbf(%rip), %ymm0 # 0x1e0be18 vmovdqa %ymm0, 0x680(%rsp) vmovdqa 0x6a0(%rsp), %ymm1 vmovdqa 0x680(%rsp), %ymm0 vmovdqa %ymm1, 0xaa0(%rsp) vmovdqa %ymm0, 0xa80(%rsp) vmovdqa 0xaa0(%rsp), %ymm0 vmovdqa 0xa80(%rsp), %ymm1 vpaddd %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0xa0(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0x6e0(%rsp) movl $0x17, 0x6dc(%rsp) vmovdqa 0x6e0(%rsp), %ymm0 movl 0x6dc(%rsp), %eax vmovdqa %ymm0, 0xae0(%rsp) movl %eax, 0xadc(%rsp) vmovdqa 0xae0(%rsp), %ymm0 movl 0xadc(%rsp), %eax vmovd %eax, %xmm1 vpslld %xmm1, %ymm0, %ymm0 vmovdqa %ymm0, 0xa0(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0x700(%rsp) vmovdqa 0x700(%rsp), %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x40(%rsp), %ymm1 vmovaps 0x20(%rsp), %ymm0 vmovaps %ymm1, 0x1e0(%rsp) vmovaps %ymm0, 0x1c0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 vmulps 0x1c0(%rsp), %ymm0, %ymm0 vmovaps %ymm0, 0x40(%rsp) vmovaps 0x40(%rsp), %ymm0 movq %rbp, %rsp popq %rbp retq nopw %cs:(%rax,%rax) nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,283
ncnn::UnaryOp_x86_avx512_functor::unary_op_exp::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return exp_ps(x); }
subq $0x4e8, %rsp # imm = 0x4E8 movq %rdi, -0x78(%rsp) movq %rsi, -0x80(%rsp) movq -0x80(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, (%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovaps %xmm0, 0x30(%rsp) vmovaps 0x30(%rsp), %xmm0 vmovaps %xmm0, -0x10(%rsp) vbroadcastss 0x7ba056(%rip), %xmm0 # 0x1e09004 vmovaps %xmm0, -0x40(%rsp) vmovaps (%rsp), %xmm0 vmovaps %xmm0, 0xb0(%rsp) vbroadcastss 0x7bcdf5(%rip), %xmm0 # 0x1e0bdc0 vmovaps %xmm0, 0xa0(%rsp) vmovaps 0xb0(%rsp), %xmm0 vmovaps 0xa0(%rsp), %xmm1 vminps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, (%rsp) vmovaps (%rsp), %xmm0 vmovaps %xmm0, 0xd0(%rsp) vbroadcastss 0x7bcdbe(%rip), %xmm0 # 0x1e0bdc4 vmovaps %xmm0, 0xc0(%rsp) vmovaps 0xd0(%rsp), %xmm0 vmovaps 0xc0(%rsp), %xmm1 vmaxps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, (%rsp) vmovaps (%rsp), %xmm0 vmovaps %xmm0, 0x90(%rsp) vbroadcastss 0x7bcd87(%rip), %xmm0 # 0x1e0bdc8 vmovaps %xmm0, 0x80(%rsp) vmovaps 0x90(%rsp), %xmm0 vmovaps 0x80(%rsp), %xmm1 vmulps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x20(%rsp) vmovaps -0x20(%rsp), %xmm0 vmovaps %xmm0, 0x110(%rsp) vbroadcastss 0x7b9f9e(%rip), %xmm0 # 0x1e0901c vmovaps %xmm0, 0x100(%rsp) vmovaps 0x110(%rsp), %xmm1 vmovaps 0x100(%rsp), %xmm2 vaddps %xmm2, %xmm1, %xmm1 vmovaps %xmm1, -0x20(%rsp) vmovaps -0x20(%rsp), %xmm1 vmovaps %xmm1, 0x130(%rsp) vcvttps2dq 0x130(%rsp), %xmm1 vmovdqa %xmm1, -0x30(%rsp) vmovdqa -0x30(%rsp), %xmm1 vmovdqa %xmm1, 0x140(%rsp) vcvtdq2ps 0x140(%rsp), %xmm1 vmovaps %xmm1, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm2 vmovaps -0x20(%rsp), %xmm1 vmovaps %xmm2, 0x160(%rsp) vmovaps %xmm1, 0x150(%rsp) vmovaps 0x150(%rsp), %xmm1 vmovaps 0x160(%rsp), %xmm2 vcmpltps %xmm2, %xmm1, %xmm1 vmovaps %xmm1, -0x50(%rsp) vmovaps -0x50(%rsp), %xmm2 vmovaps -0x40(%rsp), %xmm1 vmovaps %xmm2, 0x180(%rsp) vmovaps %xmm1, 0x170(%rsp) vmovdqa 0x180(%rsp), %xmm1 vmovdqa 0x170(%rsp), %xmm2 vpand %xmm2, %xmm1, %xmm1 vmovdqa %xmm1, -0x50(%rsp) vmovaps -0x10(%rsp), %xmm2 vmovaps -0x50(%rsp), %xmm1 vmovaps %xmm2, 0x20(%rsp) vmovaps %xmm1, 0x10(%rsp) vmovaps 0x20(%rsp), %xmm1 vmovaps 0x10(%rsp), %xmm2 vsubps %xmm2, %xmm1, %xmm1 vmovaps %xmm1, -0x20(%rsp) vmovaps -0x20(%rsp), %xmm2 vmovaps (%rsp), %xmm1 vmovaps %xmm2, 0x1e0(%rsp) vbroadcastss 0x7bcc2d(%rip), %xmm2 # 0x1e0bdcc vmovaps %xmm2, 0x1d0(%rsp) vmovaps %xmm1, 0x1c0(%rsp) vmovaps 0x1e0(%rsp), %xmm3 vmovaps 0x1d0(%rsp), %xmm2 vmovaps 0x1c0(%rsp), %xmm1 vmovaps %xmm3, 0x380(%rsp) vmovaps %xmm2, 0x370(%rsp) vmovaps %xmm1, 0x360(%rsp) vmovaps 0x380(%rsp), %xmm2 vmovaps 0x370(%rsp), %xmm1 vmovaps 0x360(%rsp), %xmm3 vfnmadd213ps %xmm3, %xmm2, %xmm1 # xmm1 = -(xmm2 * xmm1) + xmm3 vmovaps %xmm1, (%rsp) vmovaps -0x20(%rsp), %xmm2 vmovaps (%rsp), %xmm1 vmovaps %xmm2, 0x1b0(%rsp) vbroadcastss 0x7bcba7(%rip), %xmm2 # 0x1e0bdd0 vmovaps %xmm2, 0x1a0(%rsp) vmovaps %xmm1, 0x190(%rsp) vmovaps 0x1b0(%rsp), %xmm3 vmovaps 0x1a0(%rsp), %xmm2 vmovaps 0x190(%rsp), %xmm1 vmovaps %xmm3, 0x3b0(%rsp) vmovaps %xmm2, 0x3a0(%rsp) vmovaps %xmm1, 0x390(%rsp) vmovaps 0x3b0(%rsp), %xmm2 vmovaps 0x3a0(%rsp), %xmm1 vmovaps 0x390(%rsp), %xmm3 vfnmadd213ps %xmm3, %xmm2, %xmm1 # xmm1 = -(xmm2 * xmm1) + xmm3 vmovaps %xmm1, (%rsp) vmovaps (%rsp), %xmm1 vmovaps %xmm1, 0x70(%rsp) vmovaps %xmm1, 0x60(%rsp) vmovaps 0x70(%rsp), %xmm1 vmovaps 0x60(%rsp), %xmm2 vmulps %xmm2, %xmm1, %xmm1 vmovaps %xmm1, -0x10(%rsp) vbroadcastss 0x7bcb0e(%rip), %xmm1 # 0x1e0bdd4 vmovaps %xmm1, -0x60(%rsp) vmovaps -0x60(%rsp), %xmm2 vmovaps (%rsp), %xmm1 vmovaps %xmm2, 0x300(%rsp) vmovaps %xmm1, 0x2f0(%rsp) vbroadcastss 0x7bcae6(%rip), %xmm1 # 0x1e0bdd8 vmovaps %xmm1, 0x2e0(%rsp) vmovaps 0x300(%rsp), %xmm3 vmovaps 0x2f0(%rsp), %xmm2 vmovaps 0x2e0(%rsp), %xmm1 vmovaps %xmm3, 0x3e0(%rsp) vmovaps %xmm2, 0x3d0(%rsp) vmovaps %xmm1, 0x3c0(%rsp) vmovaps 0x3e0(%rsp), %xmm2 vmovaps 0x3d0(%rsp), %xmm1 vmovaps 0x3c0(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm1) + xmm3 vmovaps %xmm1, -0x60(%rsp) vmovaps -0x60(%rsp), %xmm2 vmovaps (%rsp), %xmm1 vmovaps %xmm2, 0x2d0(%rsp) vmovaps %xmm1, 0x2c0(%rsp) vbroadcastss 0x7bca5f(%rip), %xmm1 # 0x1e0bddc vmovaps %xmm1, 0x2b0(%rsp) vmovaps 0x2d0(%rsp), %xmm3 vmovaps 0x2c0(%rsp), %xmm2 vmovaps 0x2b0(%rsp), %xmm1 vmovaps %xmm3, 0x410(%rsp) vmovaps %xmm2, 0x400(%rsp) vmovaps %xmm1, 0x3f0(%rsp) vmovaps 0x410(%rsp), %xmm2 vmovaps 0x400(%rsp), %xmm1 vmovaps 0x3f0(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm1) + xmm3 vmovaps %xmm1, -0x60(%rsp) vmovaps -0x60(%rsp), %xmm2 vmovaps (%rsp), %xmm1 vmovaps %xmm2, 0x2a0(%rsp) vmovaps %xmm1, 0x290(%rsp) vbroadcastss 0x7bc9d8(%rip), %xmm1 # 0x1e0bde0 vmovaps %xmm1, 0x280(%rsp) vmovaps 0x2a0(%rsp), %xmm3 vmovaps 0x290(%rsp), %xmm2 vmovaps 0x280(%rsp), %xmm1 vmovaps %xmm3, 0x440(%rsp) vmovaps %xmm2, 0x430(%rsp) vmovaps %xmm1, 0x420(%rsp) vmovaps 0x440(%rsp), %xmm2 vmovaps 0x430(%rsp), %xmm1 vmovaps 0x420(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm1) + xmm3 vmovaps %xmm1, -0x60(%rsp) vmovaps -0x60(%rsp), %xmm2 vmovaps (%rsp), %xmm1 vmovaps %xmm2, 0x270(%rsp) vmovaps %xmm1, 0x260(%rsp) vbroadcastss 0x7bc951(%rip), %xmm1 # 0x1e0bde4 vmovaps %xmm1, 0x250(%rsp) vmovaps 0x270(%rsp), %xmm3 vmovaps 0x260(%rsp), %xmm2 vmovaps 0x250(%rsp), %xmm1 vmovaps %xmm3, 0x470(%rsp) vmovaps %xmm2, 0x460(%rsp) vmovaps %xmm1, 0x450(%rsp) vmovaps 0x470(%rsp), %xmm2 vmovaps 0x460(%rsp), %xmm1 vmovaps 0x450(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm1) + xmm3 vmovaps %xmm1, -0x60(%rsp) vmovaps -0x60(%rsp), %xmm2 vmovaps (%rsp), %xmm1 vmovaps %xmm2, 0x240(%rsp) vmovaps %xmm1, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x240(%rsp), %xmm2 vmovaps 0x230(%rsp), %xmm1 vmovaps 0x220(%rsp), %xmm0 vmovaps %xmm2, 0x4a0(%rsp) vmovaps %xmm1, 0x490(%rsp) vmovaps %xmm0, 0x480(%rsp) vmovaps 0x4a0(%rsp), %xmm1 vmovaps 0x490(%rsp), %xmm0 vmovaps 0x480(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, -0x60(%rsp) vmovaps -0x60(%rsp), %xmm2 vmovaps -0x10(%rsp), %xmm1 vmovaps (%rsp), %xmm0 vmovaps %xmm2, 0x210(%rsp) vmovaps %xmm1, 0x200(%rsp) vmovaps %xmm0, 0x1f0(%rsp) vmovaps 0x210(%rsp), %xmm2 vmovaps 0x200(%rsp), %xmm1 vmovaps 0x1f0(%rsp), %xmm0 vmovaps %xmm2, 0x4d0(%rsp) vmovaps %xmm1, 0x4c0(%rsp) vmovaps %xmm0, 0x4b0(%rsp) vmovaps 0x4d0(%rsp), %xmm1 vmovaps 0x4c0(%rsp), %xmm0 vmovaps 0x4b0(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, -0x60(%rsp) vmovaps -0x60(%rsp), %xmm1 vmovaps -0x40(%rsp), %xmm0 vmovaps %xmm1, 0xf0(%rsp) vmovaps %xmm0, 0xe0(%rsp) vmovaps 0xf0(%rsp), %xmm0 vmovaps 0xe0(%rsp), %xmm1 vaddps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x60(%rsp) vmovaps -0x20(%rsp), %xmm0 vmovaps %xmm0, 0x120(%rsp) vcvttps2dq 0x120(%rsp), %xmm0 vmovdqa %xmm0, -0x30(%rsp) vmovdqa -0x30(%rsp), %xmm0 vmovdqa %xmm0, 0x320(%rsp) vpbroadcastq 0x7bc7a6(%rip), %xmm0 # 0x1e0be18 vmovdqa %xmm0, 0x310(%rsp) vmovdqa 0x320(%rsp), %xmm0 vmovdqa 0x310(%rsp), %xmm1 vpaddd %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, -0x30(%rsp) vmovdqa -0x30(%rsp), %xmm0 vmovdqa %xmm0, 0x340(%rsp) movl $0x17, 0x33c(%rsp) vmovdqa 0x340(%rsp), %xmm0 movl 0x33c(%rsp), %eax vmovd %eax, %xmm1 vpslld %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, -0x30(%rsp) vmovdqa -0x30(%rsp), %xmm0 vmovdqa %xmm0, 0x350(%rsp) vmovdqa 0x350(%rsp), %xmm0 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x60(%rsp), %xmm1 vmovaps -0x70(%rsp), %xmm0 vmovaps %xmm1, 0x50(%rsp) vmovaps %xmm0, 0x40(%rsp) vmovaps 0x50(%rsp), %xmm0 vmulps 0x40(%rsp), %xmm0, %xmm0 vmovaps %xmm0, -0x60(%rsp) vmovaps -0x60(%rsp), %xmm0 addq $0x4e8, %rsp # imm = 0x4E8 retq nopw %cs:(%rax,%rax) nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,284
ncnn::UnaryOp_x86_avx512_functor::unary_op_exp::func(float const&) const
float func(const float& x) const { return (float)exp(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax vmovss (%rax), %xmm0 callq 0x100cf0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,285
ncnn::UnaryOp_x86_avx512_functor::unary_op_log::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { return log512_ps(x); }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x1500, %rsp # imm = 0x1500 movq %rdi, 0x38(%rsp) movq %rsi, 0x30(%rsp) movq 0x30(%rsp), %rax vmovaps (%rax), %zmm0 vmovaps %zmm0, 0x240(%rsp) vmovaps 0x7cd6aa(%rip), %zmm0 # 0x1e1ce40 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x240(%rsp), %zmm0 vpxor %xmm1, %xmm1, %xmm1 vmovaps %zmm1, 0x380(%rsp) vmovaps 0x380(%rsp), %zmm1 vcmpleps %zmm1, %zmm0, %k0 kmovw %k0, 0x1be(%rsp) vmovaps 0x240(%rsp), %zmm1 vmovaps 0x7cdca4(%rip), %zmm0 # 0x1e1d480 vmovaps %zmm1, 0x580(%rsp) vmovaps %zmm0, 0x540(%rsp) vmovaps 0x580(%rsp), %zmm0 vmovaps 0x540(%rsp), %zmm1 vmaxps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x240(%rsp) vmovaps 0x240(%rsp), %zmm0 vmovaps %zmm0, 0x2c0(%rsp) vmovaps 0x2c0(%rsp), %zmm0 vmovaps %zmm0, 0x10c0(%rsp) movl $0x17, 0x10bc(%rsp) vmovdqa64 0x10c0(%rsp), %zmm0 vmovd 0x10bc(%rsp), %xmm1 vpsrld %xmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x200(%rsp) vmovaps 0x240(%rsp), %zmm1 vmovaps 0x7cdc5a(%rip), %zmm0 # 0x1e1d4c0 vmovaps %zmm1, 0x1140(%rsp) vmovaps %zmm0, 0x1100(%rsp) vmovdqa64 0x1140(%rsp), %zmm0 vmovdqa64 0x1100(%rsp), %zmm1 vpandd %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x240(%rsp) vmovaps 0x240(%rsp), %zmm1 vmovaps 0x7cd69a(%rip), %zmm0 # 0x1e1cf40 vmovaps %zmm1, 0x1240(%rsp) vmovaps %zmm0, 0x1200(%rsp) vmovdqa64 0x1240(%rsp), %zmm0 vmovdqa64 0x1200(%rsp), %zmm1 vpord %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x240(%rsp) vmovdqa64 0x200(%rsp), %zmm1 vmovdqa64 0x7cd89a(%rip), %zmm0 # 0x1e1d180 vmovdqa64 %zmm1, 0x12c0(%rsp) vmovdqa64 %zmm0, 0x1280(%rsp) vmovdqa64 0x12c0(%rsp), %zmm0 vmovdqa64 0x1280(%rsp), %zmm1 vpsubd %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x200(%rsp) vmovdqa64 0x200(%rsp), %zmm0 vmovdqa64 %zmm0, 0x1300(%rsp) vcvtdq2ps 0x1300(%rsp), %zmm0 vmovaps %zmm0, 0x140(%rsp) vmovaps 0x140(%rsp), %zmm1 vmovaps 0x1c0(%rsp), %zmm0 vmovaps %zmm1, 0x1040(%rsp) vmovaps %zmm0, 0x1000(%rsp) vmovaps 0x1040(%rsp), %zmm0 vmovaps 0x1000(%rsp), %zmm1 vaddps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x140(%rsp) vmovaps 0x240(%rsp), %zmm0 vmovaps 0x7cdb7c(%rip), %zmm1 # 0x1e1d500 vcmpltps %zmm1, %zmm0, %k0 kmovw %k0, 0x13e(%rsp) vmovaps 0x240(%rsp), %zmm1 vmovaps 0x1c0(%rsp), %zmm0 vmovaps %zmm1, 0x340(%rsp) vmovaps %zmm0, 0x300(%rsp) vmovaps 0x340(%rsp), %zmm0 vmovaps 0x300(%rsp), %zmm1 vsubps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0xc0(%rsp) vmovaps 0x140(%rsp), %zmm1 movw 0x13e(%rsp), %ax vmovaps 0x1c0(%rsp), %zmm0 vmovaps %zmm1, 0xe00(%rsp) movw %ax, 0xdfe(%rsp) vmovaps %zmm1, 0xd80(%rsp) vmovaps %zmm0, 0xd40(%rsp) vmovaps 0xd80(%rsp), %zmm1 vmovaps 0xd40(%rsp), %zmm0 kmovw 0xdfe(%rsp), %k1 vmovaps %zmm1, 0xe80(%rsp) vmovaps %zmm0, 0xe40(%rsp) vmovaps 0xe80(%rsp), %zmm1 vmovaps 0xe40(%rsp), %zmm2 vmovaps 0xe00(%rsp), %zmm0 vsubps %zmm2, %zmm1, %zmm0 {%k1} vmovaps %zmm0, 0x140(%rsp) vmovaps 0xc0(%rsp), %zmm1 movw 0x13e(%rsp), %ax vmovaps 0x240(%rsp), %zmm0 vmovaps %zmm1, 0x1400(%rsp) movw %ax, 0x13fe(%rsp) vmovaps %zmm1, 0x1380(%rsp) vmovaps %zmm0, 0x1340(%rsp) vmovaps 0x1380(%rsp), %zmm1 vmovaps 0x1340(%rsp), %zmm0 kmovw 0x13fe(%rsp), %k1 vmovaps %zmm1, 0x1480(%rsp) vmovaps %zmm0, 0x1440(%rsp) vmovaps 0x1480(%rsp), %zmm1 vmovaps 0x1440(%rsp), %zmm2 vmovaps 0x1400(%rsp), %zmm0 vaddps %zmm2, %zmm1, %zmm0 {%k1} vmovaps %zmm0, 0x240(%rsp) vmovaps 0x240(%rsp), %zmm0 vmovaps %zmm0, 0x500(%rsp) vmovaps %zmm0, 0x4c0(%rsp) vmovaps 0x500(%rsp), %zmm0 vmovaps 0x4c0(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x80(%rsp) vmovaps 0x7cda20(%rip), %zmm0 # 0x1e1d540 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm2 vmovaps 0x240(%rsp), %zmm1 vmovaps 0x7cda3e(%rip), %zmm0 # 0x1e1d580 vmovaps %zmm2, 0xd00(%rsp) vmovaps %zmm1, 0xcc0(%rsp) vmovaps %zmm0, 0xc80(%rsp) vmovaps 0xd00(%rsp), %zmm1 vmovaps 0xcc0(%rsp), %zmm0 vmovaps 0xc80(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm2 vmovaps 0x240(%rsp), %zmm1 vmovaps 0x7cda26(%rip), %zmm0 # 0x1e1d5c0 vmovaps %zmm2, 0xc40(%rsp) vmovaps %zmm1, 0xc00(%rsp) vmovaps %zmm0, 0xbc0(%rsp) vmovaps 0xc40(%rsp), %zmm1 vmovaps 0xc00(%rsp), %zmm0 vmovaps 0xbc0(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm2 vmovaps 0x240(%rsp), %zmm1 vmovaps 0x7cda0e(%rip), %zmm0 # 0x1e1d600 vmovaps %zmm2, 0xb80(%rsp) vmovaps %zmm1, 0xb40(%rsp) vmovaps %zmm0, 0xb00(%rsp) vmovaps 0xb80(%rsp), %zmm1 vmovaps 0xb40(%rsp), %zmm0 vmovaps 0xb00(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm2 vmovaps 0x240(%rsp), %zmm1 vmovaps 0x7cd9f6(%rip), %zmm0 # 0x1e1d640 vmovaps %zmm2, 0xac0(%rsp) vmovaps %zmm1, 0xa80(%rsp) vmovaps %zmm0, 0xa40(%rsp) vmovaps 0xac0(%rsp), %zmm1 vmovaps 0xa80(%rsp), %zmm0 vmovaps 0xa40(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm2 vmovaps 0x240(%rsp), %zmm1 vmovaps 0x7cd9de(%rip), %zmm0 # 0x1e1d680 vmovaps %zmm2, 0xa00(%rsp) vmovaps %zmm1, 0x9c0(%rsp) vmovaps %zmm0, 0x980(%rsp) vmovaps 0xa00(%rsp), %zmm1 vmovaps 0x9c0(%rsp), %zmm0 vmovaps 0x980(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm2 vmovaps 0x240(%rsp), %zmm1 vmovaps 0x7cd9c6(%rip), %zmm0 # 0x1e1d6c0 vmovaps %zmm2, 0x940(%rsp) vmovaps %zmm1, 0x900(%rsp) vmovaps %zmm0, 0x8c0(%rsp) vmovaps 0x940(%rsp), %zmm1 vmovaps 0x900(%rsp), %zmm0 vmovaps 0x8c0(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm2 vmovaps 0x240(%rsp), %zmm1 vmovaps 0x7cd9ae(%rip), %zmm0 # 0x1e1d700 vmovaps %zmm2, 0x880(%rsp) vmovaps %zmm1, 0x840(%rsp) vmovaps %zmm0, 0x800(%rsp) vmovaps 0x880(%rsp), %zmm1 vmovaps 0x840(%rsp), %zmm0 vmovaps 0x800(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm2 vmovaps 0x240(%rsp), %zmm1 vmovaps 0x7cd996(%rip), %zmm0 # 0x1e1d740 vmovaps %zmm2, 0x7c0(%rsp) vmovaps %zmm1, 0x780(%rsp) vmovaps %zmm0, 0x740(%rsp) vmovaps 0x7c0(%rsp), %zmm1 vmovaps 0x780(%rsp), %zmm0 vmovaps 0x740(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm1 vmovaps 0x240(%rsp), %zmm0 vmovaps %zmm1, 0x480(%rsp) vmovaps %zmm0, 0x440(%rsp) vmovaps 0x480(%rsp), %zmm0 vmovaps 0x440(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm1 vmovaps 0x80(%rsp), %zmm0 vmovaps %zmm1, 0x400(%rsp) vmovaps %zmm0, 0x3c0(%rsp) vmovaps 0x400(%rsp), %zmm0 vmovaps 0x3c0(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x140(%rsp), %zmm2 vmovaps 0x7cd90a(%rip), %zmm1 # 0x1e1d780 vmovaps 0x40(%rsp), %zmm0 vmovaps %zmm2, 0x700(%rsp) vmovaps %zmm1, 0x6c0(%rsp) vmovaps %zmm0, 0x680(%rsp) vmovaps 0x700(%rsp), %zmm1 vmovaps 0x6c0(%rsp), %zmm0 vmovaps 0x680(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x80(%rsp), %zmm2 vmovaps 0x7cd072(%rip), %zmm1 # 0x1e1cf40 vmovaps 0x40(%rsp), %zmm0 vmovaps %zmm2, 0xf40(%rsp) vmovaps %zmm1, 0xf00(%rsp) vmovaps %zmm0, 0xec0(%rsp) vmovaps 0xf40(%rsp), %zmm1 vmovaps 0xf00(%rsp), %zmm0 vmovaps 0xec0(%rsp), %zmm2 vfnmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = -(zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x240(%rsp), %zmm1 vmovaps 0x40(%rsp), %zmm0 vmovaps %zmm1, 0xfc0(%rsp) vmovaps %zmm0, 0xf80(%rsp) vmovaps 0xfc0(%rsp), %zmm0 vmovaps 0xf80(%rsp), %zmm1 vaddps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x240(%rsp) vmovaps 0x140(%rsp), %zmm2 vmovaps 0x7cd85c(%rip), %zmm1 # 0x1e1d7c0 vmovaps 0x240(%rsp), %zmm0 vmovaps %zmm2, 0x640(%rsp) vmovaps %zmm1, 0x600(%rsp) vmovaps %zmm0, 0x5c0(%rsp) vmovaps 0x640(%rsp), %zmm1 vmovaps 0x600(%rsp), %zmm0 vmovaps 0x5c0(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x240(%rsp) vmovaps 0x240(%rsp), %zmm1 movw 0x1be(%rsp), %ax movw %ax, 0x14ee(%rsp) movw 0x14ee(%rsp), %cx movw %cx, %ax kmovd %eax, %k0 vpmovm2d %k0, %zmm0 vmovdqa64 %zmm0, 0x280(%rsp) vmovdqa64 0x280(%rsp), %zmm0 vmovaps %zmm1, 0x11c0(%rsp) vmovaps %zmm0, 0x1180(%rsp) vmovaps 0x11c0(%rsp), %zmm0 vmovaps 0x1180(%rsp), %zmm1 vpord %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm0 movq %rbp, %rsp popq %rbp retq nopw %cs:(%rax,%rax) nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,286
ncnn::UnaryOp_x86_avx512_functor::unary_op_log::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { return log256_ps(x); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0xe80, %rsp # imm = 0xE80 movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq 0x10(%rsp), %rax vmovaps (%rax), %ymm0 vmovaps %ymm0, 0x120(%rsp) vbroadcastss 0x7b8fa0(%rip), %ymm0 # 0x1e09004 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0x120(%rsp), %ymm0 vpxor %xmm1, %xmm1, %xmm1 vmovaps %ymm1, 0x200(%rsp) vmovaps 0x200(%rsp), %ymm1 vcmpleps %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0xc0(%rsp) vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm0, 0x160(%rsp) vbroadcastss 0x7bbd33(%rip), %ymm0 # 0x1e0bde8 vmovaps %ymm0, 0x140(%rsp) vmovaps 0x160(%rsp), %ymm0 vmovaps 0x140(%rsp), %ymm1 vmaxps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x120(%rsp) vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm0, 0xce0(%rsp) vmovaps 0xce0(%rsp), %ymm0 vmovaps %ymm0, 0xcc0(%rsp) movl $0x17, 0xcbc(%rsp) vmovdqa 0xcc0(%rsp), %ymm0 movl 0xcbc(%rsp), %eax vmovdqa %ymm0, 0xe00(%rsp) movl %eax, 0xdfc(%rsp) vmovdqa 0xe00(%rsp), %ymm0 vmovd 0xdfc(%rsp), %xmm1 vpsrld %xmm1, %ymm0, %ymm0 vmovdqa %ymm0, 0x100(%rsp) vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm0, 0x740(%rsp) vbroadcastss 0x7bbc86(%rip), %ymm0 # 0x1e0bdec vmovaps %ymm0, 0x720(%rsp) vmovdqa 0x740(%rsp), %ymm0 vmovdqa 0x720(%rsp), %ymm1 vpand %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0x120(%rsp) vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm0, 0xd60(%rsp) vbroadcastss 0x7b8e73(%rip), %ymm1 # 0x1e0901c vmovaps %ymm1, 0xd40(%rsp) vmovdqa 0xd60(%rsp), %ymm0 vmovdqa 0xd40(%rsp), %ymm2 vpor %ymm2, %ymm0, %ymm0 vmovdqa %ymm0, 0x120(%rsp) vmovdqa 0x100(%rsp), %ymm0 vmovdqa %ymm0, 0xda0(%rsp) vpbroadcastq 0x7bbc2c(%rip), %ymm0 # 0x1e0be18 vmovdqa %ymm0, 0xd80(%rsp) vmovdqa 0xda0(%rsp), %ymm2 vmovdqa 0xd80(%rsp), %ymm0 vmovdqa %ymm2, 0xe40(%rsp) vmovdqa %ymm0, 0xe20(%rsp) vmovdqa 0xe40(%rsp), %ymm0 vmovdqa 0xe20(%rsp), %ymm2 vpsubd %ymm2, %ymm0, %ymm0 vmovdqa %ymm0, 0x100(%rsp) vmovdqa 0x100(%rsp), %ymm0 vmovdqa %ymm0, 0xdc0(%rsp) vcvtdq2ps 0xdc0(%rsp), %ymm0 vmovaps %ymm0, 0xa0(%rsp) vmovaps 0xa0(%rsp), %ymm2 vmovaps 0xe0(%rsp), %ymm0 vmovaps %ymm2, 0x860(%rsp) vmovaps %ymm0, 0x840(%rsp) vmovaps 0x860(%rsp), %ymm0 vmovaps 0x840(%rsp), %ymm2 vaddps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0xa0(%rsp) vmovaps 0x120(%rsp), %ymm0 vbroadcastss 0x7bbb3f(%rip), %ymm2 # 0x1e0bdf0 vcmpltps %ymm2, %ymm0, %ymm0 vmovdqa %ymm0, 0x80(%rsp) vmovaps 0x120(%rsp), %ymm2 vmovaps 0x80(%rsp), %ymm0 vmovaps %ymm2, 0x700(%rsp) vmovaps %ymm0, 0x6e0(%rsp) vmovdqa 0x700(%rsp), %ymm0 vmovdqa 0x6e0(%rsp), %ymm2 vpand %ymm2, %ymm0, %ymm0 vmovdqa %ymm0, 0x60(%rsp) vmovaps 0x120(%rsp), %ymm2 vmovaps 0xe0(%rsp), %ymm0 vmovaps %ymm2, 0x1e0(%rsp) vmovaps %ymm0, 0x1c0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 vmovaps 0x1c0(%rsp), %ymm2 vsubps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x120(%rsp) vmovaps 0xa0(%rsp), %ymm2 vmovaps 0xe0(%rsp), %ymm3 vmovaps 0x80(%rsp), %ymm0 vmovaps %ymm3, 0x6c0(%rsp) vmovaps %ymm0, 0x6a0(%rsp) vmovdqa 0x6c0(%rsp), %ymm0 vmovdqa 0x6a0(%rsp), %ymm3 vpand %ymm3, %ymm0, %ymm0 vmovaps %ymm2, 0x1a0(%rsp) vmovdqa %ymm0, 0x180(%rsp) vmovaps 0x1a0(%rsp), %ymm0 vmovaps 0x180(%rsp), %ymm2 vsubps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0xa0(%rsp) vmovaps 0x120(%rsp), %ymm2 vmovaps 0x60(%rsp), %ymm0 vmovaps %ymm2, 0x820(%rsp) vmovaps %ymm0, 0x800(%rsp) vmovaps 0x820(%rsp), %ymm0 vmovaps 0x800(%rsp), %ymm2 vaddps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x120(%rsp) vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm0, 0x2c0(%rsp) vmovaps %ymm0, 0x2a0(%rsp) vmovaps 0x2c0(%rsp), %ymm0 vmovaps 0x2a0(%rsp), %ymm2 vmulps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x40(%rsp) vbroadcastss 0x7bb9be(%rip), %ymm0 # 0x1e0bdf4 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm2 vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm2, 0x680(%rsp) vmovaps %ymm0, 0x660(%rsp) vbroadcastss 0x7bb992(%rip), %ymm0 # 0x1e0bdf8 vmovaps %ymm0, 0x640(%rsp) vmovaps 0x680(%rsp), %ymm3 vmovaps 0x660(%rsp), %ymm2 vmovaps 0x640(%rsp), %ymm0 vmovaps %ymm3, 0x8c0(%rsp) vmovaps %ymm2, 0x8a0(%rsp) vmovaps %ymm0, 0x880(%rsp) vmovaps 0x8c0(%rsp), %ymm2 vmovaps 0x8a0(%rsp), %ymm0 vmovaps 0x880(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm2 vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm2, 0x620(%rsp) vmovaps %ymm0, 0x600(%rsp) vbroadcastss 0x7bb907(%rip), %ymm0 # 0x1e0bdfc vmovaps %ymm0, 0x5e0(%rsp) vmovaps 0x620(%rsp), %ymm3 vmovaps 0x600(%rsp), %ymm2 vmovaps 0x5e0(%rsp), %ymm0 vmovaps %ymm3, 0x920(%rsp) vmovaps %ymm2, 0x900(%rsp) vmovaps %ymm0, 0x8e0(%rsp) vmovaps 0x920(%rsp), %ymm2 vmovaps 0x900(%rsp), %ymm0 vmovaps 0x8e0(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm2 vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm2, 0x5c0(%rsp) vmovaps %ymm0, 0x5a0(%rsp) vbroadcastss 0x7bb87c(%rip), %ymm0 # 0x1e0be00 vmovaps %ymm0, 0x580(%rsp) vmovaps 0x5c0(%rsp), %ymm3 vmovaps 0x5a0(%rsp), %ymm2 vmovaps 0x580(%rsp), %ymm0 vmovaps %ymm3, 0x980(%rsp) vmovaps %ymm2, 0x960(%rsp) vmovaps %ymm0, 0x940(%rsp) vmovaps 0x980(%rsp), %ymm2 vmovaps 0x960(%rsp), %ymm0 vmovaps 0x940(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm2 vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm2, 0x560(%rsp) vmovaps %ymm0, 0x540(%rsp) vbroadcastss 0x7bb7f1(%rip), %ymm0 # 0x1e0be04 vmovaps %ymm0, 0x520(%rsp) vmovaps 0x560(%rsp), %ymm3 vmovaps 0x540(%rsp), %ymm2 vmovaps 0x520(%rsp), %ymm0 vmovaps %ymm3, 0x9e0(%rsp) vmovaps %ymm2, 0x9c0(%rsp) vmovaps %ymm0, 0x9a0(%rsp) vmovaps 0x9e0(%rsp), %ymm2 vmovaps 0x9c0(%rsp), %ymm0 vmovaps 0x9a0(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm2 vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm2, 0x500(%rsp) vmovaps %ymm0, 0x4e0(%rsp) vbroadcastss 0x7bb766(%rip), %ymm0 # 0x1e0be08 vmovaps %ymm0, 0x4c0(%rsp) vmovaps 0x500(%rsp), %ymm3 vmovaps 0x4e0(%rsp), %ymm2 vmovaps 0x4c0(%rsp), %ymm0 vmovaps %ymm3, 0xa40(%rsp) vmovaps %ymm2, 0xa20(%rsp) vmovaps %ymm0, 0xa00(%rsp) vmovaps 0xa40(%rsp), %ymm2 vmovaps 0xa20(%rsp), %ymm0 vmovaps 0xa00(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm2 vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm2, 0x4a0(%rsp) vmovaps %ymm0, 0x480(%rsp) vbroadcastss 0x7bb6db(%rip), %ymm0 # 0x1e0be0c vmovaps %ymm0, 0x460(%rsp) vmovaps 0x4a0(%rsp), %ymm3 vmovaps 0x480(%rsp), %ymm2 vmovaps 0x460(%rsp), %ymm0 vmovaps %ymm3, 0xaa0(%rsp) vmovaps %ymm2, 0xa80(%rsp) vmovaps %ymm0, 0xa60(%rsp) vmovaps 0xaa0(%rsp), %ymm2 vmovaps 0xa80(%rsp), %ymm0 vmovaps 0xa60(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm2 vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm2, 0x440(%rsp) vmovaps %ymm0, 0x420(%rsp) vbroadcastss 0x7bb650(%rip), %ymm0 # 0x1e0be10 vmovaps %ymm0, 0x400(%rsp) vmovaps 0x440(%rsp), %ymm3 vmovaps 0x420(%rsp), %ymm2 vmovaps 0x400(%rsp), %ymm0 vmovaps %ymm3, 0xb00(%rsp) vmovaps %ymm2, 0xae0(%rsp) vmovaps %ymm0, 0xac0(%rsp) vmovaps 0xb00(%rsp), %ymm2 vmovaps 0xae0(%rsp), %ymm0 vmovaps 0xac0(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm2 vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm2, 0x3e0(%rsp) vmovaps %ymm0, 0x3c0(%rsp) vbroadcastss 0x7bb5c5(%rip), %ymm0 # 0x1e0be14 vmovaps %ymm0, 0x3a0(%rsp) vmovaps 0x3e0(%rsp), %ymm3 vmovaps 0x3c0(%rsp), %ymm2 vmovaps 0x3a0(%rsp), %ymm0 vmovaps %ymm3, 0xb60(%rsp) vmovaps %ymm2, 0xb40(%rsp) vmovaps %ymm0, 0xb20(%rsp) vmovaps 0xb60(%rsp), %ymm2 vmovaps 0xb40(%rsp), %ymm0 vmovaps 0xb20(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm2 vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm2, 0x280(%rsp) vmovaps %ymm0, 0x260(%rsp) vmovaps 0x280(%rsp), %ymm0 vmovaps 0x260(%rsp), %ymm2 vmulps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm2 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm2, 0x240(%rsp) vmovaps %ymm0, 0x220(%rsp) vmovaps 0x240(%rsp), %ymm0 vmovaps 0x220(%rsp), %ymm2 vmulps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0xa0(%rsp), %ymm2 vmovaps 0x20(%rsp), %ymm0 vmovaps %ymm2, 0x380(%rsp) vbroadcastss 0x7bb484(%rip), %ymm2 # 0x1e0bdd0 vmovaps %ymm2, 0x360(%rsp) vmovaps %ymm0, 0x340(%rsp) vmovaps 0x380(%rsp), %ymm3 vmovaps 0x360(%rsp), %ymm2 vmovaps 0x340(%rsp), %ymm0 vmovaps %ymm3, 0xbc0(%rsp) vmovaps %ymm2, 0xba0(%rsp) vmovaps %ymm0, 0xb80(%rsp) vmovaps 0xbc0(%rsp), %ymm2 vmovaps 0xba0(%rsp), %ymm0 vmovaps 0xb80(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x40(%rsp), %ymm2 vmovaps 0x20(%rsp), %ymm0 vmovaps %ymm2, 0x7a0(%rsp) vmovaps %ymm1, 0x780(%rsp) vmovaps %ymm0, 0x760(%rsp) vmovaps 0x7a0(%rsp), %ymm2 vmovaps 0x780(%rsp), %ymm1 vmovaps 0x760(%rsp), %ymm0 vmovaps %ymm2, 0xc80(%rsp) vmovaps %ymm1, 0xc60(%rsp) vmovaps %ymm0, 0xc40(%rsp) vmovaps 0xc80(%rsp), %ymm1 vmovaps 0xc60(%rsp), %ymm0 vmovaps 0xc40(%rsp), %ymm2 vfnmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = -(ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x120(%rsp), %ymm1 vmovaps 0x20(%rsp), %ymm0 vmovaps %ymm1, 0x7e0(%rsp) vmovaps %ymm0, 0x7c0(%rsp) vmovaps 0x7e0(%rsp), %ymm0 vmovaps 0x7c0(%rsp), %ymm1 vaddps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x120(%rsp) vmovaps 0xa0(%rsp), %ymm1 vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm1, 0x320(%rsp) vbroadcastss 0x7bb32b(%rip), %ymm1 # 0x1e0bdcc vmovaps %ymm1, 0x300(%rsp) vmovaps %ymm0, 0x2e0(%rsp) vmovaps 0x320(%rsp), %ymm2 vmovaps 0x300(%rsp), %ymm1 vmovaps 0x2e0(%rsp), %ymm0 vmovaps %ymm2, 0xc20(%rsp) vmovaps %ymm1, 0xc00(%rsp) vmovaps %ymm0, 0xbe0(%rsp) vmovaps 0xc20(%rsp), %ymm1 vmovaps 0xc00(%rsp), %ymm0 vmovaps 0xbe0(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0x120(%rsp) vmovaps 0x120(%rsp), %ymm1 vmovaps 0xc0(%rsp), %ymm0 vmovaps %ymm1, 0xd20(%rsp) vmovaps %ymm0, 0xd00(%rsp) vmovaps 0xd20(%rsp), %ymm0 vmovaps 0xd00(%rsp), %ymm1 vpor %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm0 movq %rbp, %rsp popq %rbp retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,287
ncnn::UnaryOp_x86_avx512_functor::unary_op_log::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return log_ps(x); }
subq $0x6b8, %rsp # imm = 0x6B8 movq %rdi, -0x78(%rsp) movq %rsi, -0x80(%rsp) movq -0x80(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x10(%rsp) vbroadcastss 0x7b847b(%rip), %xmm0 # 0x1e09004 vmovaps %xmm0, -0x10(%rsp) vmovaps 0x10(%rsp), %xmm1 vpxor %xmm0, %xmm0, %xmm0 vmovaps %xmm0, 0x60(%rsp) vmovaps 0x60(%rsp), %xmm0 vmovaps %xmm1, 0x5f0(%rsp) vmovaps %xmm0, 0x5e0(%rsp) vmovaps 0x5f0(%rsp), %xmm0 vmovaps 0x5e0(%rsp), %xmm1 vcmpleps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x20(%rsp) vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm0, 0xe0(%rsp) vbroadcastss 0x7bb1fc(%rip), %xmm0 # 0x1e0bde8 vmovaps %xmm0, 0xd0(%rsp) vmovaps 0xe0(%rsp), %xmm0 vmovaps 0xd0(%rsp), %xmm1 vmaxps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, 0x10(%rsp) vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm0, 0x620(%rsp) vmovaps 0x620(%rsp), %xmm0 vmovaps %xmm0, 0x610(%rsp) movl $0x17, 0x60c(%rsp) vmovdqa 0x610(%rsp), %xmm0 vmovd 0x60c(%rsp), %xmm1 vpsrld %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, (%rsp) vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm0, 0x1b0(%rsp) vbroadcastss 0x7bb17c(%rip), %xmm0 # 0x1e0bdec vmovaps %xmm0, 0x1a0(%rsp) vmovdqa 0x1b0(%rsp), %xmm0 vmovdqa 0x1a0(%rsp), %xmm1 vpand %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, 0x10(%rsp) vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm0, 0x660(%rsp) vbroadcastss 0x7b836f(%rip), %xmm1 # 0x1e0901c vmovaps %xmm1, 0x650(%rsp) vmovdqa 0x660(%rsp), %xmm0 vmovdqa 0x650(%rsp), %xmm2 vpor %xmm2, %xmm0, %xmm0 vmovdqa %xmm0, 0x10(%rsp) vmovdqa (%rsp), %xmm0 vmovdqa %xmm0, 0x680(%rsp) vpbroadcastq 0x7bb12f(%rip), %xmm0 # 0x1e0be18 vmovdqa %xmm0, 0x670(%rsp) vmovdqa 0x680(%rsp), %xmm0 vmovdqa 0x670(%rsp), %xmm2 vpsubd %xmm2, %xmm0, %xmm0 vmovdqa %xmm0, (%rsp) vmovdqa (%rsp), %xmm0 vmovdqa %xmm0, 0x150(%rsp) vcvtdq2ps 0x150(%rsp), %xmm0 vmovaps %xmm0, -0x30(%rsp) vmovaps -0x30(%rsp), %xmm2 vmovaps -0x10(%rsp), %xmm0 vmovaps %xmm2, 0x140(%rsp) vmovaps %xmm0, 0x130(%rsp) vmovaps 0x140(%rsp), %xmm0 vmovaps 0x130(%rsp), %xmm2 vaddps %xmm2, %xmm0, %xmm0 vmovaps %xmm0, -0x30(%rsp) vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm0, 0x6a0(%rsp) vbroadcastss 0x7bb074(%rip), %xmm0 # 0x1e0bdf0 vmovaps %xmm0, 0x690(%rsp) vmovaps 0x6a0(%rsp), %xmm0 vmovaps 0x690(%rsp), %xmm2 vcmpltps %xmm2, %xmm0, %xmm0 vmovaps %xmm0, -0x40(%rsp) vmovaps 0x10(%rsp), %xmm2 vmovaps -0x40(%rsp), %xmm0 vmovaps %xmm2, 0x190(%rsp) vmovaps %xmm0, 0x180(%rsp) vmovdqa 0x190(%rsp), %xmm0 vmovdqa 0x180(%rsp), %xmm2 vpand %xmm2, %xmm0, %xmm0 vmovdqa %xmm0, -0x50(%rsp) vmovaps 0x10(%rsp), %xmm2 vmovaps -0x10(%rsp), %xmm0 vmovaps %xmm2, 0x50(%rsp) vmovaps %xmm0, 0x40(%rsp) vmovaps 0x50(%rsp), %xmm0 vmovaps 0x40(%rsp), %xmm2 vsubps %xmm2, %xmm0, %xmm0 vmovaps %xmm0, 0x10(%rsp) vmovaps -0x30(%rsp), %xmm2 vmovaps -0x10(%rsp), %xmm3 vmovaps -0x40(%rsp), %xmm0 vmovaps %xmm3, 0x170(%rsp) vmovaps %xmm0, 0x160(%rsp) vmovdqa 0x170(%rsp), %xmm0 vmovdqa 0x160(%rsp), %xmm3 vpand %xmm3, %xmm0, %xmm0 vmovaps %xmm2, 0x30(%rsp) vmovdqa %xmm0, 0x20(%rsp) vmovaps 0x30(%rsp), %xmm0 vmovaps 0x20(%rsp), %xmm2 vsubps %xmm2, %xmm0, %xmm0 vmovaps %xmm0, -0x30(%rsp) vmovaps 0x10(%rsp), %xmm2 vmovaps -0x50(%rsp), %xmm0 vmovaps %xmm2, 0x120(%rsp) vmovaps %xmm0, 0x110(%rsp) vmovaps 0x120(%rsp), %xmm0 vmovaps 0x110(%rsp), %xmm2 vaddps %xmm2, %xmm0, %xmm0 vmovaps %xmm0, 0x10(%rsp) vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm0, 0xc0(%rsp) vmovaps %xmm0, 0xb0(%rsp) vmovaps 0xc0(%rsp), %xmm0 vmovaps 0xb0(%rsp), %xmm2 vmulps %xmm2, %xmm0, %xmm0 vmovaps %xmm0, -0x60(%rsp) vbroadcastss 0x7baf17(%rip), %xmm0 # 0x1e0bdf4 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm2 vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm2, 0x3c0(%rsp) vmovaps %xmm0, 0x3b0(%rsp) vbroadcastss 0x7baeee(%rip), %xmm0 # 0x1e0bdf8 vmovaps %xmm0, 0x3a0(%rsp) vmovaps 0x3c0(%rsp), %xmm3 vmovaps 0x3b0(%rsp), %xmm2 vmovaps 0x3a0(%rsp), %xmm0 vmovaps %xmm3, 0x420(%rsp) vmovaps %xmm2, 0x410(%rsp) vmovaps %xmm0, 0x400(%rsp) vmovaps 0x420(%rsp), %xmm2 vmovaps 0x410(%rsp), %xmm0 vmovaps 0x400(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm0) + xmm3 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm2 vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm2, 0x390(%rsp) vmovaps %xmm0, 0x380(%rsp) vbroadcastss 0x7bae66(%rip), %xmm0 # 0x1e0bdfc vmovaps %xmm0, 0x370(%rsp) vmovaps 0x390(%rsp), %xmm3 vmovaps 0x380(%rsp), %xmm2 vmovaps 0x370(%rsp), %xmm0 vmovaps %xmm3, 0x450(%rsp) vmovaps %xmm2, 0x440(%rsp) vmovaps %xmm0, 0x430(%rsp) vmovaps 0x450(%rsp), %xmm2 vmovaps 0x440(%rsp), %xmm0 vmovaps 0x430(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm0) + xmm3 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm2 vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm2, 0x360(%rsp) vmovaps %xmm0, 0x350(%rsp) vbroadcastss 0x7badde(%rip), %xmm0 # 0x1e0be00 vmovaps %xmm0, 0x340(%rsp) vmovaps 0x360(%rsp), %xmm3 vmovaps 0x350(%rsp), %xmm2 vmovaps 0x340(%rsp), %xmm0 vmovaps %xmm3, 0x480(%rsp) vmovaps %xmm2, 0x470(%rsp) vmovaps %xmm0, 0x460(%rsp) vmovaps 0x480(%rsp), %xmm2 vmovaps 0x470(%rsp), %xmm0 vmovaps 0x460(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm0) + xmm3 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm2 vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm2, 0x330(%rsp) vmovaps %xmm0, 0x320(%rsp) vbroadcastss 0x7bad56(%rip), %xmm0 # 0x1e0be04 vmovaps %xmm0, 0x310(%rsp) vmovaps 0x330(%rsp), %xmm3 vmovaps 0x320(%rsp), %xmm2 vmovaps 0x310(%rsp), %xmm0 vmovaps %xmm3, 0x4b0(%rsp) vmovaps %xmm2, 0x4a0(%rsp) vmovaps %xmm0, 0x490(%rsp) vmovaps 0x4b0(%rsp), %xmm2 vmovaps 0x4a0(%rsp), %xmm0 vmovaps 0x490(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm0) + xmm3 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm2 vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm2, 0x300(%rsp) vmovaps %xmm0, 0x2f0(%rsp) vbroadcastss 0x7bacce(%rip), %xmm0 # 0x1e0be08 vmovaps %xmm0, 0x2e0(%rsp) vmovaps 0x300(%rsp), %xmm3 vmovaps 0x2f0(%rsp), %xmm2 vmovaps 0x2e0(%rsp), %xmm0 vmovaps %xmm3, 0x4e0(%rsp) vmovaps %xmm2, 0x4d0(%rsp) vmovaps %xmm0, 0x4c0(%rsp) vmovaps 0x4e0(%rsp), %xmm2 vmovaps 0x4d0(%rsp), %xmm0 vmovaps 0x4c0(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm0) + xmm3 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm2 vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm2, 0x2d0(%rsp) vmovaps %xmm0, 0x2c0(%rsp) vbroadcastss 0x7bac46(%rip), %xmm0 # 0x1e0be0c vmovaps %xmm0, 0x2b0(%rsp) vmovaps 0x2d0(%rsp), %xmm3 vmovaps 0x2c0(%rsp), %xmm2 vmovaps 0x2b0(%rsp), %xmm0 vmovaps %xmm3, 0x510(%rsp) vmovaps %xmm2, 0x500(%rsp) vmovaps %xmm0, 0x4f0(%rsp) vmovaps 0x510(%rsp), %xmm2 vmovaps 0x500(%rsp), %xmm0 vmovaps 0x4f0(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm0) + xmm3 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm2 vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm2, 0x2a0(%rsp) vmovaps %xmm0, 0x290(%rsp) vbroadcastss 0x7babbe(%rip), %xmm0 # 0x1e0be10 vmovaps %xmm0, 0x280(%rsp) vmovaps 0x2a0(%rsp), %xmm3 vmovaps 0x290(%rsp), %xmm2 vmovaps 0x280(%rsp), %xmm0 vmovaps %xmm3, 0x540(%rsp) vmovaps %xmm2, 0x530(%rsp) vmovaps %xmm0, 0x520(%rsp) vmovaps 0x540(%rsp), %xmm2 vmovaps 0x530(%rsp), %xmm0 vmovaps 0x520(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm0) + xmm3 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm2 vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm2, 0x270(%rsp) vmovaps %xmm0, 0x260(%rsp) vbroadcastss 0x7bab36(%rip), %xmm0 # 0x1e0be14 vmovaps %xmm0, 0x250(%rsp) vmovaps 0x270(%rsp), %xmm3 vmovaps 0x260(%rsp), %xmm2 vmovaps 0x250(%rsp), %xmm0 vmovaps %xmm3, 0x570(%rsp) vmovaps %xmm2, 0x560(%rsp) vmovaps %xmm0, 0x550(%rsp) vmovaps 0x570(%rsp), %xmm2 vmovaps 0x560(%rsp), %xmm0 vmovaps 0x550(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm0) + xmm3 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm2 vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm2, 0xa0(%rsp) vmovaps %xmm0, 0x90(%rsp) vmovaps 0xa0(%rsp), %xmm0 vmovaps 0x90(%rsp), %xmm2 vmulps %xmm2, %xmm0, %xmm0 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm2 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm2, 0x80(%rsp) vmovaps %xmm0, 0x70(%rsp) vmovaps 0x80(%rsp), %xmm0 vmovaps 0x70(%rsp), %xmm2 vmulps %xmm2, %xmm0, %xmm0 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x30(%rsp), %xmm2 vmovaps -0x70(%rsp), %xmm0 vmovaps %xmm2, 0x240(%rsp) vbroadcastss 0x7baa01(%rip), %xmm2 # 0x1e0bdd0 vmovaps %xmm2, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x240(%rsp), %xmm3 vmovaps 0x230(%rsp), %xmm2 vmovaps 0x220(%rsp), %xmm0 vmovaps %xmm3, 0x5a0(%rsp) vmovaps %xmm2, 0x590(%rsp) vmovaps %xmm0, 0x580(%rsp) vmovaps 0x5a0(%rsp), %xmm2 vmovaps 0x590(%rsp), %xmm0 vmovaps 0x580(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm0) + xmm3 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x60(%rsp), %xmm2 vmovaps -0x70(%rsp), %xmm0 vmovaps %xmm2, 0x1e0(%rsp) vmovaps %xmm1, 0x1d0(%rsp) vmovaps %xmm0, 0x1c0(%rsp) vmovaps 0x1e0(%rsp), %xmm2 vmovaps 0x1d0(%rsp), %xmm1 vmovaps 0x1c0(%rsp), %xmm0 vmovaps %xmm2, 0x3f0(%rsp) vmovaps %xmm1, 0x3e0(%rsp) vmovaps %xmm0, 0x3d0(%rsp) vmovaps 0x3f0(%rsp), %xmm1 vmovaps 0x3e0(%rsp), %xmm0 vmovaps 0x3d0(%rsp), %xmm2 vfnmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = -(xmm1 * xmm0) + xmm2 vmovaps %xmm0, -0x70(%rsp) vmovaps 0x10(%rsp), %xmm1 vmovaps -0x70(%rsp), %xmm0 vmovaps %xmm1, 0x100(%rsp) vmovaps %xmm0, 0xf0(%rsp) vmovaps 0x100(%rsp), %xmm0 vmovaps 0xf0(%rsp), %xmm1 vaddps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, 0x10(%rsp) vmovaps -0x30(%rsp), %xmm1 vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm1, 0x210(%rsp) vbroadcastss 0x7ba8b4(%rip), %xmm1 # 0x1e0bdcc vmovaps %xmm1, 0x200(%rsp) vmovaps %xmm0, 0x1f0(%rsp) vmovaps 0x210(%rsp), %xmm2 vmovaps 0x200(%rsp), %xmm1 vmovaps 0x1f0(%rsp), %xmm0 vmovaps %xmm2, 0x5d0(%rsp) vmovaps %xmm1, 0x5c0(%rsp) vmovaps %xmm0, 0x5b0(%rsp) vmovaps 0x5d0(%rsp), %xmm1 vmovaps 0x5c0(%rsp), %xmm0 vmovaps 0x5b0(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, 0x10(%rsp) vmovaps 0x10(%rsp), %xmm1 vmovaps -0x20(%rsp), %xmm0 vmovaps %xmm1, 0x640(%rsp) vmovaps %xmm0, 0x630(%rsp) vmovaps 0x640(%rsp), %xmm0 vmovaps 0x630(%rsp), %xmm1 vpor %xmm1, %xmm0, %xmm0 vmovaps %xmm0, 0x10(%rsp) vmovaps 0x10(%rsp), %xmm0 addq $0x6b8, %rsp # imm = 0x6B8 retq nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,288
ncnn::UnaryOp_x86_avx512_functor::unary_op_log::func(float const&) const
float func(const float& x) const { return (float)log(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax vmovss (%rax), %xmm0 callq 0x100cd0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,289
ncnn::UnaryOp_x86_avx512_functor::unary_op_sin::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { return sin512_ps(x); }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x15c0, %rsp # imm = 0x15C0 movq %rdi, 0x38(%rsp) movq %rsi, 0x30(%rsp) movq 0x30(%rsp), %rax vmovaps (%rax), %zmm0 vmovaps %zmm0, 0x300(%rsp) vmovaps 0x300(%rsp), %zmm0 vmovaps %zmm0, 0x200(%rsp) vmovaps 0x300(%rsp), %zmm0 vmovaps %zmm0, 0x480(%rsp) vmovaps 0x480(%rsp), %zmm1 movl $0x7fffffff, 0x53c(%rsp) # imm = 0x7FFFFFFF vpbroadcastd 0x53c(%rsp), %zmm0 vmovdqa64 %zmm0, 0x4c0(%rsp) vmovdqa64 0x4c0(%rsp), %zmm0 vmovaps %zmm1, 0x440(%rsp) vmovdqa64 %zmm0, 0x400(%rsp) vmovdqa64 0x440(%rsp), %zmm0 vmovdqa64 0x400(%rsp), %zmm1 vpandq %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x3c0(%rsp) vmovdqa64 0x3c0(%rsp), %zmm0 vmovdqa64 %zmm0, 0x300(%rsp) vmovaps 0x200(%rsp), %zmm1 vmovaps 0x7cc3f6(%rip), %zmm0 # 0x1e1dac0 vmovaps %zmm1, 0x11c0(%rsp) vmovaps %zmm0, 0x1180(%rsp) vmovdqa64 0x11c0(%rsp), %zmm0 vmovdqa64 0x1180(%rsp), %zmm1 vpandd %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x200(%rsp) vmovaps 0x300(%rsp), %zmm1 vmovaps 0x7cc3f6(%rip), %zmm0 # 0x1e1db00 vmovaps %zmm1, 0x780(%rsp) vmovaps %zmm0, 0x740(%rsp) vmovaps 0x780(%rsp), %zmm0 vmovaps 0x740(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm0 vmovaps %zmm0, 0xf80(%rsp) vpxor %xmm1, %xmm1, %xmm1 vcvttps2dq 0xf80(%rsp), %zmm0 vmovdqa64 %zmm1, 0x10c0(%rsp) vmovdqa64 %zmm0, 0x140(%rsp) vmovdqa64 0x140(%rsp), %zmm1 vmovdqa64 0x7cc3ca(%rip), %zmm0 # 0x1e1db40 vmovdqa64 %zmm1, 0x1000(%rsp) vmovdqa64 %zmm0, 0xfc0(%rsp) vmovdqa64 0x1000(%rsp), %zmm0 vmovdqa64 0xfc0(%rsp), %zmm1 vpaddd %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x140(%rsp) vmovdqa64 0x140(%rsp), %zmm1 vmovdqa64 0x7cc3ca(%rip), %zmm0 # 0x1e1db80 vmovdqa64 %zmm1, 0x13c0(%rsp) vmovdqa64 %zmm0, 0x1380(%rsp) vmovdqa64 0x13c0(%rsp), %zmm0 vmovdqa64 0x1380(%rsp), %zmm1 vpandq %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x140(%rsp) vmovdqa64 0x140(%rsp), %zmm0 vmovdqa64 %zmm0, 0x1200(%rsp) vcvtdq2ps 0x1200(%rsp), %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovdqa64 0x140(%rsp), %zmm1 vmovdqa64 0x7cc3aa(%rip), %zmm0 # 0x1e1dbc0 vmovdqa64 %zmm1, 0x1340(%rsp) vmovdqa64 %zmm0, 0x1300(%rsp) vmovdqa64 0x1340(%rsp), %zmm0 vmovdqa64 0x1300(%rsp), %zmm1 vpandq %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x180(%rsp) vmovdqa64 0x180(%rsp), %zmm0 vmovdqa64 %zmm0, 0x1080(%rsp) movl $0x1d, 0x107c(%rsp) vmovdqa64 0x1080(%rsp), %zmm0 vmovd 0x107c(%rsp), %xmm1 vpslld %xmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x180(%rsp) vmovdqa64 0x140(%rsp), %zmm1 vmovdqa64 0x7cc370(%rip), %zmm0 # 0x1e1dc00 vmovdqa64 %zmm1, 0x12c0(%rsp) vmovdqa64 %zmm0, 0x1280(%rsp) vmovdqa64 0x12c0(%rsp), %zmm0 vmovdqa64 0x1280(%rsp), %zmm1 vpandq %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x140(%rsp) vmovdqa64 0x140(%rsp), %zmm0 vptestnmd %zmm0, %zmm0, %k0 kmovw %k0, 0x127e(%rsp) kmovw 0x127e(%rsp), %k0 vpmovm2d %k0, %zmm0 vmovdqa64 %zmm0, 0x140(%rsp) vmovdqa64 0x180(%rsp), %zmm0 vmovdqa64 %zmm0, 0x380(%rsp) vmovdqa64 0x380(%rsp), %zmm0 vmovdqa64 %zmm0, 0x100(%rsp) vmovdqa64 0x140(%rsp), %zmm0 vmovdqa64 %zmm0, 0x340(%rsp) vmovdqa64 0x340(%rsp), %zmm0 vmovdqa64 %zmm0, 0xc0(%rsp) vmovaps 0x200(%rsp), %zmm1 vmovaps 0x100(%rsp), %zmm0 vmovaps %zmm1, 0x14c0(%rsp) vmovaps %zmm0, 0x1480(%rsp) vmovdqa64 0x14c0(%rsp), %zmm0 vmovdqa64 0x1480(%rsp), %zmm1 vpxord %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x200(%rsp) vmovaps 0x7cc30c(%rip), %zmm0 # 0x1e1dc80 vmovaps %zmm0, 0x2c0(%rsp) vmovaps 0x7cc33a(%rip), %zmm0 # 0x1e1dcc0 vmovaps %zmm0, 0x280(%rsp) vmovaps 0x7cc368(%rip), %zmm0 # 0x1e1dd00 vmovaps %zmm0, 0x240(%rsp) vmovaps 0x1c0(%rsp), %zmm2 vmovaps 0x2c0(%rsp), %zmm1 vmovaps 0x300(%rsp), %zmm0 vmovaps %zmm2, 0xd80(%rsp) vmovaps %zmm1, 0xd40(%rsp) vmovaps %zmm0, 0xd00(%rsp) vmovaps 0xd80(%rsp), %zmm1 vmovaps 0xd40(%rsp), %zmm0 vmovaps 0xd00(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x300(%rsp) vmovaps 0x1c0(%rsp), %zmm2 vmovaps 0x280(%rsp), %zmm1 vmovaps 0x300(%rsp), %zmm0 vmovaps %zmm2, 0xcc0(%rsp) vmovaps %zmm1, 0xc80(%rsp) vmovaps %zmm0, 0xc40(%rsp) vmovaps 0xcc0(%rsp), %zmm1 vmovaps 0xc80(%rsp), %zmm0 vmovaps 0xc40(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x300(%rsp) vmovaps 0x1c0(%rsp), %zmm2 vmovaps 0x240(%rsp), %zmm1 vmovaps 0x300(%rsp), %zmm0 vmovaps %zmm2, 0xc00(%rsp) vmovaps %zmm1, 0xbc0(%rsp) vmovaps %zmm0, 0xb80(%rsp) vmovaps 0xc00(%rsp), %zmm1 vmovaps 0xbc0(%rsp), %zmm0 vmovaps 0xb80(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x300(%rsp) vmovaps 0x7cc294(%rip), %zmm0 # 0x1e1dd40 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x300(%rsp), %zmm0 vmovaps %zmm0, 0x700(%rsp) vmovaps %zmm0, 0x6c0(%rsp) vmovaps 0x700(%rsp), %zmm0 vmovaps 0x6c0(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x80(%rsp) vmovaps 0x1c0(%rsp), %zmm2 vmovaps 0x80(%rsp), %zmm1 vmovaps 0x7cc27c(%rip), %zmm0 # 0x1e1dd80 vmovaps %zmm2, 0xb40(%rsp) vmovaps %zmm1, 0xb00(%rsp) vmovaps %zmm0, 0xac0(%rsp) vmovaps 0xb40(%rsp), %zmm1 vmovaps 0xb00(%rsp), %zmm0 vmovaps 0xac0(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm2 vmovaps 0x80(%rsp), %zmm1 vmovaps 0x7cc264(%rip), %zmm0 # 0x1e1ddc0 vmovaps %zmm2, 0xa80(%rsp) vmovaps %zmm1, 0xa40(%rsp) vmovaps %zmm0, 0xa00(%rsp) vmovaps 0xa80(%rsp), %zmm1 vmovaps 0xa40(%rsp), %zmm0 vmovaps 0xa00(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm1 vmovaps 0x80(%rsp), %zmm0 vmovaps %zmm1, 0x680(%rsp) vmovaps %zmm0, 0x640(%rsp) vmovaps 0x680(%rsp), %zmm0 vmovaps 0x640(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm1 vmovaps 0x80(%rsp), %zmm0 vmovaps %zmm1, 0x600(%rsp) vmovaps %zmm0, 0x5c0(%rsp) vmovaps 0x600(%rsp), %zmm0 vmovaps 0x5c0(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x80(%rsp), %zmm2 vmovaps 0x7cb318(%rip), %zmm1 # 0x1e1cf40 vmovaps 0x1c0(%rsp), %zmm0 vmovaps %zmm2, 0xe40(%rsp) vmovaps %zmm1, 0xe00(%rsp) vmovaps %zmm0, 0xdc0(%rsp) vmovaps 0xe40(%rsp), %zmm1 vmovaps 0xe00(%rsp), %zmm0 vmovaps 0xdc0(%rsp), %zmm2 vfnmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = -(zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm1 vmovaps 0x7cb1c0(%rip), %zmm0 # 0x1e1ce40 vmovaps %zmm1, 0xf40(%rsp) vmovaps %zmm0, 0xf00(%rsp) vmovaps 0xf40(%rsp), %zmm0 vmovaps 0xf00(%rsp), %zmm1 vaddps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x7cc148(%rip), %zmm0 # 0x1e1de00 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm2 vmovaps 0x80(%rsp), %zmm1 vmovaps 0x7cc166(%rip), %zmm0 # 0x1e1de40 vmovaps %zmm2, 0x9c0(%rsp) vmovaps %zmm1, 0x980(%rsp) vmovaps %zmm0, 0x940(%rsp) vmovaps 0x9c0(%rsp), %zmm1 vmovaps 0x980(%rsp), %zmm0 vmovaps 0x940(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm2 vmovaps 0x80(%rsp), %zmm1 vmovaps 0x7cc14e(%rip), %zmm0 # 0x1e1de80 vmovaps %zmm2, 0x900(%rsp) vmovaps %zmm1, 0x8c0(%rsp) vmovaps %zmm0, 0x880(%rsp) vmovaps 0x900(%rsp), %zmm1 vmovaps 0x8c0(%rsp), %zmm0 vmovaps 0x880(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm1 vmovaps 0x80(%rsp), %zmm0 vmovaps %zmm1, 0x580(%rsp) vmovaps %zmm0, 0x540(%rsp) vmovaps 0x580(%rsp), %zmm0 vmovaps 0x540(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm1 vmovaps 0x300(%rsp), %zmm0 vmovaps %zmm1, 0x840(%rsp) vmovaps %zmm0, 0x800(%rsp) vmovaps %zmm0, 0x7c0(%rsp) vmovaps 0x840(%rsp), %zmm1 vmovaps 0x800(%rsp), %zmm0 vmovaps 0x7c0(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x40(%rsp) vmovaps 0xc0(%rsp), %zmm0 vmovaps %zmm0, 0x240(%rsp) vmovaps 0x240(%rsp), %zmm1 vmovaps 0x40(%rsp), %zmm0 vmovaps %zmm1, 0x1140(%rsp) vmovaps %zmm0, 0x1100(%rsp) vmovdqa64 0x1140(%rsp), %zmm0 vmovdqa64 0x1100(%rsp), %zmm1 vpandd %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x40(%rsp) vmovaps 0x240(%rsp), %zmm1 vmovaps 0x1c0(%rsp), %zmm0 vmovaps %zmm1, 0x1540(%rsp) vmovaps %zmm0, 0x1500(%rsp) vmovdqa64 0x1540(%rsp), %zmm0 vpternlogq $0xf, %zmm0, %zmm0, %zmm0 vmovaps 0x1500(%rsp), %zmm1 vpandd %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm1 vmovaps 0x40(%rsp), %zmm0 vmovaps %zmm1, 0xec0(%rsp) vmovaps %zmm0, 0xe80(%rsp) vmovaps 0xec0(%rsp), %zmm0 vaddps 0xe80(%rsp), %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm1 vmovaps 0x200(%rsp), %zmm0 vmovaps %zmm1, 0x1440(%rsp) vmovaps %zmm0, 0x1400(%rsp) vmovaps 0x1440(%rsp), %zmm0 vmovaps 0x1400(%rsp), %zmm1 vpxord %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm0 movq %rbp, %rsp popq %rbp retq nopw %cs:(%rax,%rax) nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,290
ncnn::UnaryOp_x86_avx512_functor::unary_op_sin::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { return sin256_ps(x); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0xe60, %rsp # imm = 0xE60 movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq 0x10(%rsp), %rax vmovaps (%rax), %ymm0 vmovaps %ymm0, 0x180(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovaps %ymm0, 0x1a0(%rsp) vmovaps 0x1a0(%rsp), %ymm0 vmovaps %ymm0, 0x140(%rsp) vmovaps 0x180(%rsp), %ymm0 vmovaps %ymm0, 0x100(%rsp) vmovaps 0x180(%rsp), %ymm0 vmovaps %ymm0, 0x6a0(%rsp) vbroadcastss 0x7cae19(%rip), %ymm0 # 0x1e1cdb0 vmovaps %ymm0, 0x680(%rsp) vmovdqa 0x6a0(%rsp), %ymm0 vmovdqa 0x680(%rsp), %ymm1 vpand %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0x180(%rsp) vmovaps 0x100(%rsp), %ymm0 vmovaps %ymm0, 0x660(%rsp) vbroadcastss 0x7bbede(%rip), %ymm0 # 0x1e0deb8 vmovaps %ymm0, 0x640(%rsp) vmovdqa 0x660(%rsp), %ymm0 vmovdqa 0x640(%rsp), %ymm1 vpand %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0x100(%rsp) vmovaps 0x180(%rsp), %ymm0 vmovaps %ymm0, 0x2e0(%rsp) vbroadcastss 0x7cad9b(%rip), %ymm0 # 0x1e1cdb8 vmovaps %ymm0, 0x2c0(%rsp) vmovaps 0x2e0(%rsp), %ymm0 vmovaps 0x2c0(%rsp), %ymm1 vmulps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm0 vmovaps %ymm0, 0x7a0(%rsp) vcvttps2dq 0x7a0(%rsp), %ymm0 vmovdqa %ymm0, 0xa0(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0x7e0(%rsp) vpbroadcastq 0x7cad5c(%rip), %ymm0 # 0x1e1cde0 vmovdqa %ymm0, 0x7c0(%rsp) vmovdqa 0x7e0(%rsp), %ymm1 vmovdqa 0x7c0(%rsp), %ymm0 vmovdqa %ymm1, 0xc00(%rsp) vmovdqa %ymm0, 0xbe0(%rsp) vmovdqa 0xc00(%rsp), %ymm0 vmovdqa 0xbe0(%rsp), %ymm1 vpaddd %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0xa0(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0xd20(%rsp) vpbroadcastq 0x7cacfd(%rip), %ymm0 # 0x1e1cde8 vmovdqa %ymm0, 0xd00(%rsp) vmovdqa 0xd20(%rsp), %ymm0 vmovdqa 0xd00(%rsp), %ymm1 vpand %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0xa0(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0xc60(%rsp) vcvtdq2ps 0xc60(%rsp), %ymm0 vmovaps %ymm0, 0xe0(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0xce0(%rsp) vpbroadcastq 0x7cac9e(%rip), %ymm0 # 0x1e1cdf0 vmovdqa %ymm0, 0xcc0(%rsp) vmovdqa 0xce0(%rsp), %ymm0 vmovdqa 0xcc0(%rsp), %ymm1 vpand %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0xc0(%rsp) vmovdqa 0xc0(%rsp), %ymm0 vmovdqa %ymm0, 0x820(%rsp) movl $0x1d, 0x81c(%rsp) vmovdqa 0x820(%rsp), %ymm0 movl 0x81c(%rsp), %eax vmovdqa %ymm0, 0xc40(%rsp) movl %eax, 0xc3c(%rsp) vmovdqa 0xc40(%rsp), %ymm0 vmovd 0xc3c(%rsp), %xmm1 vpslld %xmm1, %ymm0, %ymm0 vmovdqa %ymm0, 0xc0(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0xca0(%rsp) vpbroadcastq 0x7cac07(%rip), %ymm0 # 0x1e1cdf8 vmovdqa %ymm0, 0xc80(%rsp) vmovdqa 0xca0(%rsp), %ymm0 vmovdqa 0xc80(%rsp), %ymm1 vpand %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0xa0(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0xd60(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovdqa %ymm0, 0xd40(%rsp) vmovdqa 0xd60(%rsp), %ymm0 vmovdqa 0xd40(%rsp), %ymm1 vpcmpeqd %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0xa0(%rsp) vmovdqa 0xc0(%rsp), %ymm0 vmovdqa %ymm0, 0x860(%rsp) vmovdqa 0x860(%rsp), %ymm0 vmovdqa %ymm0, 0x80(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0x840(%rsp) vmovdqa 0x840(%rsp), %ymm0 vmovdqa %ymm0, 0x60(%rsp) vmovaps 0x100(%rsp), %ymm1 vmovaps 0x80(%rsp), %ymm0 vmovaps %ymm1, 0xde0(%rsp) vmovaps %ymm0, 0xdc0(%rsp) vmovdqa 0xde0(%rsp), %ymm0 vmovdqa 0xdc0(%rsp), %ymm1 vpxor %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0x100(%rsp) vbroadcastss 0x7caad4(%rip), %ymm0 # 0x1e1cdbc vmovaps %ymm0, 0x160(%rsp) vbroadcastss 0x7caac6(%rip), %ymm0 # 0x1e1cdc0 vmovaps %ymm0, 0x140(%rsp) vbroadcastss 0x7caab8(%rip), %ymm0 # 0x1e1cdc4 vmovaps %ymm0, 0x120(%rsp) vmovaps 0xe0(%rsp), %ymm2 vmovaps 0x160(%rsp), %ymm1 vmovaps 0x180(%rsp), %ymm0 vmovaps %ymm2, 0x5e0(%rsp) vmovaps %ymm1, 0x5c0(%rsp) vmovaps %ymm0, 0x5a0(%rsp) vmovaps 0x5e0(%rsp), %ymm2 vmovaps 0x5c0(%rsp), %ymm1 vmovaps 0x5a0(%rsp), %ymm0 vmovaps %ymm2, 0x8c0(%rsp) vmovaps %ymm1, 0x8a0(%rsp) vmovaps %ymm0, 0x880(%rsp) vmovaps 0x8c0(%rsp), %ymm1 vmovaps 0x8a0(%rsp), %ymm0 vmovaps 0x880(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0x180(%rsp) vmovaps 0xe0(%rsp), %ymm2 vmovaps 0x140(%rsp), %ymm1 vmovaps 0x180(%rsp), %ymm0 vmovaps %ymm2, 0x580(%rsp) vmovaps %ymm1, 0x560(%rsp) vmovaps %ymm0, 0x540(%rsp) vmovaps 0x580(%rsp), %ymm2 vmovaps 0x560(%rsp), %ymm1 vmovaps 0x540(%rsp), %ymm0 vmovaps %ymm2, 0x920(%rsp) vmovaps %ymm1, 0x900(%rsp) vmovaps %ymm0, 0x8e0(%rsp) vmovaps 0x920(%rsp), %ymm1 vmovaps 0x900(%rsp), %ymm0 vmovaps 0x8e0(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0x180(%rsp) vmovaps 0xe0(%rsp), %ymm2 vmovaps 0x120(%rsp), %ymm1 vmovaps 0x180(%rsp), %ymm0 vmovaps %ymm2, 0x520(%rsp) vmovaps %ymm1, 0x500(%rsp) vmovaps %ymm0, 0x4e0(%rsp) vmovaps 0x520(%rsp), %ymm2 vmovaps 0x500(%rsp), %ymm1 vmovaps 0x4e0(%rsp), %ymm0 vmovaps %ymm2, 0x980(%rsp) vmovaps %ymm1, 0x960(%rsp) vmovaps %ymm0, 0x940(%rsp) vmovaps 0x980(%rsp), %ymm1 vmovaps 0x960(%rsp), %ymm0 vmovaps 0x940(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0x180(%rsp) vbroadcastss 0x7ca8eb(%rip), %ymm0 # 0x1e1cdc8 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0x180(%rsp), %ymm0 vmovaps %ymm0, 0x2a0(%rsp) vmovaps %ymm0, 0x280(%rsp) vmovaps 0x2a0(%rsp), %ymm0 vmovaps 0x280(%rsp), %ymm1 vmulps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x40(%rsp) vmovaps 0xe0(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm1, 0x4c0(%rsp) vmovaps %ymm0, 0x4a0(%rsp) vbroadcastss 0x7ca885(%rip), %ymm0 # 0x1e1cdcc vmovaps %ymm0, 0x480(%rsp) vmovaps 0x4c0(%rsp), %ymm2 vmovaps 0x4a0(%rsp), %ymm1 vmovaps 0x480(%rsp), %ymm0 vmovaps %ymm2, 0x9e0(%rsp) vmovaps %ymm1, 0x9c0(%rsp) vmovaps %ymm0, 0x9a0(%rsp) vmovaps 0x9e0(%rsp), %ymm1 vmovaps 0x9c0(%rsp), %ymm0 vmovaps 0x9a0(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm1, 0x460(%rsp) vmovaps %ymm0, 0x440(%rsp) vbroadcastss 0x7ca7f7(%rip), %ymm0 # 0x1e1cdd0 vmovaps %ymm0, 0x420(%rsp) vmovaps 0x460(%rsp), %ymm2 vmovaps 0x440(%rsp), %ymm1 vmovaps 0x420(%rsp), %ymm0 vmovaps %ymm2, 0xa40(%rsp) vmovaps %ymm1, 0xa20(%rsp) vmovaps %ymm0, 0xa00(%rsp) vmovaps 0xa40(%rsp), %ymm1 vmovaps 0xa20(%rsp), %ymm0 vmovaps 0xa00(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm1, 0x260(%rsp) vmovaps %ymm0, 0x240(%rsp) vmovaps 0x260(%rsp), %ymm0 vmovaps 0x240(%rsp), %ymm1 vmulps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm1, 0x220(%rsp) vmovaps %ymm0, 0x200(%rsp) vmovaps 0x220(%rsp), %ymm0 vmovaps 0x200(%rsp), %ymm1 vmulps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0x40(%rsp), %ymm1 vmovaps 0xe0(%rsp), %ymm0 vmovaps %ymm1, 0x700(%rsp) vbroadcastss 0x7b693a(%rip), %ymm1 # 0x1e0901c vmovaps %ymm1, 0x6e0(%rsp) vmovaps %ymm0, 0x6c0(%rsp) vmovaps 0x700(%rsp), %ymm2 vmovaps 0x6e0(%rsp), %ymm1 vmovaps 0x6c0(%rsp), %ymm0 vmovaps %ymm2, 0xbc0(%rsp) vmovaps %ymm1, 0xba0(%rsp) vmovaps %ymm0, 0xb80(%rsp) vmovaps 0xbc0(%rsp), %ymm1 vmovaps 0xba0(%rsp), %ymm0 vmovaps 0xb80(%rsp), %ymm2 vfnmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = -(ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm0 vmovaps %ymm0, 0x780(%rsp) vbroadcastss 0x7b6896(%rip), %ymm0 # 0x1e09004 vmovaps %ymm0, 0x760(%rsp) vmovaps 0x780(%rsp), %ymm0 vmovaps 0x760(%rsp), %ymm1 vaddps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0xe0(%rsp) vbroadcastss 0x7ca635(%rip), %ymm0 # 0x1e1cdd4 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm1, 0x400(%rsp) vmovaps %ymm0, 0x3e0(%rsp) vbroadcastss 0x7ca60c(%rip), %ymm0 # 0x1e1cdd8 vmovaps %ymm0, 0x3c0(%rsp) vmovaps 0x400(%rsp), %ymm2 vmovaps 0x3e0(%rsp), %ymm1 vmovaps 0x3c0(%rsp), %ymm0 vmovaps %ymm2, 0xaa0(%rsp) vmovaps %ymm1, 0xa80(%rsp) vmovaps %ymm0, 0xa60(%rsp) vmovaps 0xaa0(%rsp), %ymm1 vmovaps 0xa80(%rsp), %ymm0 vmovaps 0xa60(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm1, 0x3a0(%rsp) vmovaps %ymm0, 0x380(%rsp) vbroadcastss 0x7ca584(%rip), %ymm0 # 0x1e1cddc vmovaps %ymm0, 0x360(%rsp) vmovaps 0x3a0(%rsp), %ymm2 vmovaps 0x380(%rsp), %ymm1 vmovaps 0x360(%rsp), %ymm0 vmovaps %ymm2, 0xb00(%rsp) vmovaps %ymm1, 0xae0(%rsp) vmovaps %ymm0, 0xac0(%rsp) vmovaps 0xb00(%rsp), %ymm1 vmovaps 0xae0(%rsp), %ymm0 vmovaps 0xac0(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm1, 0x1e0(%rsp) vmovaps %ymm0, 0x1c0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 vmovaps 0x1c0(%rsp), %ymm1 vmulps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm1 vmovaps 0x180(%rsp), %ymm0 vmovaps %ymm1, 0x340(%rsp) vmovaps %ymm0, 0x320(%rsp) vmovaps %ymm0, 0x300(%rsp) vmovaps 0x340(%rsp), %ymm2 vmovaps 0x320(%rsp), %ymm1 vmovaps 0x300(%rsp), %ymm0 vmovaps %ymm2, 0xb60(%rsp) vmovaps %ymm1, 0xb40(%rsp) vmovaps %ymm0, 0xb20(%rsp) vmovaps 0xb60(%rsp), %ymm1 vmovaps 0xb40(%rsp), %ymm0 vmovaps 0xb20(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x60(%rsp), %ymm0 vmovaps %ymm0, 0x120(%rsp) vmovaps 0x120(%rsp), %ymm1 vmovaps 0x20(%rsp), %ymm0 vmovaps %ymm1, 0x620(%rsp) vmovaps %ymm0, 0x600(%rsp) vmovdqa 0x620(%rsp), %ymm0 vmovdqa 0x600(%rsp), %ymm1 vpand %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0x20(%rsp) vmovaps 0x120(%rsp), %ymm1 vmovaps 0xe0(%rsp), %ymm0 vmovaps %ymm1, 0xe20(%rsp) vmovaps %ymm0, 0xe00(%rsp) vmovdqa 0xe20(%rsp), %ymm0 vpternlogq $0xf, %ymm0, %ymm0, %ymm0 vmovaps 0xe00(%rsp), %ymm1 vpand %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm1 vmovaps 0x20(%rsp), %ymm0 vmovaps %ymm1, 0x740(%rsp) vmovaps %ymm0, 0x720(%rsp) vmovaps 0x740(%rsp), %ymm0 vaddps 0x720(%rsp), %ymm0, %ymm0 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm1 vmovaps 0x100(%rsp), %ymm0 vmovaps %ymm1, 0xda0(%rsp) vmovaps %ymm0, 0xd80(%rsp) vmovaps 0xda0(%rsp), %ymm0 vmovaps 0xd80(%rsp), %ymm1 vpxor %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm0 movq %rbp, %rsp popq %rbp retq
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,291
ncnn::UnaryOp_x86_avx512_functor::unary_op_sin::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return sin_ps(x); }
subq $0x678, %rsp # imm = 0x678 movq %rdi, -0x78(%rsp) movq %rsi, -0x80(%rsp) movq -0x80(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x40(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovaps %xmm0, 0x70(%rsp) vmovaps 0x70(%rsp), %xmm0 vmovaps %xmm0, 0x20(%rsp) vmovaps 0x40(%rsp), %xmm0 vmovaps %xmm0, (%rsp) vmovaps 0x40(%rsp), %xmm0 vmovaps %xmm0, 0x1d0(%rsp) vbroadcastss 0x7ca2b7(%rip), %xmm0 # 0x1e1cdb0 vmovaps %xmm0, 0x1c0(%rsp) vmovdqa 0x1d0(%rsp), %xmm0 vmovdqa 0x1c0(%rsp), %xmm1 vpand %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, 0x40(%rsp) vmovaps (%rsp), %xmm0 vmovaps %xmm0, 0x1b0(%rsp) vbroadcastss 0x7bb383(%rip), %xmm0 # 0x1e0deb8 vmovaps %xmm0, 0x1a0(%rsp) vmovdqa 0x1b0(%rsp), %xmm0 vmovdqa 0x1a0(%rsp), %xmm1 vpand %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, (%rsp) vmovaps 0x40(%rsp), %xmm0 vmovaps %xmm0, 0x110(%rsp) vbroadcastss 0x7ca247(%rip), %xmm0 # 0x1e1cdb8 vmovaps %xmm0, 0x100(%rsp) vmovaps 0x110(%rsp), %xmm0 vmovaps 0x100(%rsp), %xmm1 vmulps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm0 vmovaps %xmm0, 0x160(%rsp) vcvttps2dq 0x160(%rsp), %xmm0 vmovdqa %xmm0, -0x30(%rsp) vmovdqa -0x30(%rsp), %xmm0 vmovdqa %xmm0, 0x3a0(%rsp) vpbroadcastq 0x7ca214(%rip), %xmm0 # 0x1e1cde0 vmovdqa %xmm0, 0x390(%rsp) vmovdqa 0x3a0(%rsp), %xmm0 vmovdqa 0x390(%rsp), %xmm1 vpaddd %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, -0x30(%rsp) vmovdqa -0x30(%rsp), %xmm0 vmovdqa %xmm0, 0x5f0(%rsp) vpbroadcastq 0x7ca1df(%rip), %xmm0 # 0x1e1cde8 vmovdqa %xmm0, 0x5e0(%rsp) vmovdqa 0x5f0(%rsp), %xmm0 vmovdqa 0x5e0(%rsp), %xmm1 vpand %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, -0x30(%rsp) vmovdqa -0x30(%rsp), %xmm0 vmovdqa %xmm0, 0x170(%rsp) vcvtdq2ps 0x170(%rsp), %xmm0 vmovaps %xmm0, -0x10(%rsp) vmovdqa -0x30(%rsp), %xmm0 vmovdqa %xmm0, 0x5d0(%rsp) vpbroadcastq 0x7ca18c(%rip), %xmm0 # 0x1e1cdf0 vmovdqa %xmm0, 0x5c0(%rsp) vmovdqa 0x5d0(%rsp), %xmm0 vmovdqa 0x5c0(%rsp), %xmm1 vpand %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, -0x20(%rsp) vmovdqa -0x20(%rsp), %xmm0 vmovdqa %xmm0, 0x3c0(%rsp) movl $0x1d, 0x3bc(%rsp) vmovdqa 0x3c0(%rsp), %xmm0 vmovd 0x3bc(%rsp), %xmm1 vpslld %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, -0x20(%rsp) vmovdqa -0x30(%rsp), %xmm0 vmovdqa %xmm0, 0x5b0(%rsp) vpbroadcastq 0x7ca121(%rip), %xmm0 # 0x1e1cdf8 vmovdqa %xmm0, 0x5a0(%rsp) vmovdqa 0x5b0(%rsp), %xmm0 vmovdqa 0x5a0(%rsp), %xmm1 vpand %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, -0x30(%rsp) vmovdqa -0x30(%rsp), %xmm1 vpxor %xmm0, %xmm0, %xmm0 vmovdqa %xmm0, 0x620(%rsp) vmovdqa 0x620(%rsp), %xmm0 vmovdqa %xmm1, 0x610(%rsp) vmovdqa %xmm0, 0x600(%rsp) vmovdqa 0x610(%rsp), %xmm0 vmovdqa 0x600(%rsp), %xmm1 vpcmpeqd %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, -0x30(%rsp) vmovdqa -0x20(%rsp), %xmm0 vmovdqa %xmm0, 0x3e0(%rsp) vmovdqa 0x3e0(%rsp), %xmm0 vmovdqa %xmm0, -0x40(%rsp) vmovdqa -0x30(%rsp), %xmm0 vmovdqa %xmm0, 0x3d0(%rsp) vmovdqa 0x3d0(%rsp), %xmm0 vmovdqa %xmm0, -0x50(%rsp) vmovaps (%rsp), %xmm1 vmovaps -0x40(%rsp), %xmm0 vmovaps %xmm1, 0x660(%rsp) vmovaps %xmm0, 0x650(%rsp) vmovdqa 0x660(%rsp), %xmm0 vmovdqa 0x650(%rsp), %xmm1 vpxor %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, (%rsp) vbroadcastss 0x7c9ff9(%rip), %xmm0 # 0x1e1cdbc vmovaps %xmm0, 0x30(%rsp) vbroadcastss 0x7c9fee(%rip), %xmm0 # 0x1e1cdc0 vmovaps %xmm0, 0x20(%rsp) vbroadcastss 0x7c9fe3(%rip), %xmm0 # 0x1e1cdc4 vmovaps %xmm0, 0x10(%rsp) vmovaps -0x10(%rsp), %xmm2 vmovaps 0x30(%rsp), %xmm1 vmovaps 0x40(%rsp), %xmm0 vmovaps %xmm2, 0x380(%rsp) vmovaps %xmm1, 0x370(%rsp) vmovaps %xmm0, 0x360(%rsp) vmovaps 0x380(%rsp), %xmm2 vmovaps 0x370(%rsp), %xmm1 vmovaps 0x360(%rsp), %xmm0 vmovaps %xmm2, 0x440(%rsp) vmovaps %xmm1, 0x430(%rsp) vmovaps %xmm0, 0x420(%rsp) vmovaps 0x440(%rsp), %xmm1 vmovaps 0x430(%rsp), %xmm0 vmovaps 0x420(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, 0x40(%rsp) vmovaps -0x10(%rsp), %xmm2 vmovaps 0x20(%rsp), %xmm1 vmovaps 0x40(%rsp), %xmm0 vmovaps %xmm2, 0x350(%rsp) vmovaps %xmm1, 0x340(%rsp) vmovaps %xmm0, 0x330(%rsp) vmovaps 0x350(%rsp), %xmm2 vmovaps 0x340(%rsp), %xmm1 vmovaps 0x330(%rsp), %xmm0 vmovaps %xmm2, 0x470(%rsp) vmovaps %xmm1, 0x460(%rsp) vmovaps %xmm0, 0x450(%rsp) vmovaps 0x470(%rsp), %xmm1 vmovaps 0x460(%rsp), %xmm0 vmovaps 0x450(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, 0x40(%rsp) vmovaps -0x10(%rsp), %xmm2 vmovaps 0x10(%rsp), %xmm1 vmovaps 0x40(%rsp), %xmm0 vmovaps %xmm2, 0x320(%rsp) vmovaps %xmm1, 0x310(%rsp) vmovaps %xmm0, 0x300(%rsp) vmovaps 0x320(%rsp), %xmm2 vmovaps 0x310(%rsp), %xmm1 vmovaps 0x300(%rsp), %xmm0 vmovaps %xmm2, 0x4a0(%rsp) vmovaps %xmm1, 0x490(%rsp) vmovaps %xmm0, 0x480(%rsp) vmovaps 0x4a0(%rsp), %xmm1 vmovaps 0x490(%rsp), %xmm0 vmovaps 0x480(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, 0x40(%rsp) vbroadcastss 0x7c9e3d(%rip), %xmm0 # 0x1e1cdc8 vmovaps %xmm0, -0x10(%rsp) vmovaps 0x40(%rsp), %xmm0 vmovaps %xmm0, 0xf0(%rsp) vmovaps %xmm0, 0xe0(%rsp) vmovaps 0xf0(%rsp), %xmm0 vmovaps 0xe0(%rsp), %xmm1 vmulps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x60(%rsp) vmovaps -0x10(%rsp), %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0x2f0(%rsp) vmovaps %xmm0, 0x2e0(%rsp) vbroadcastss 0x7c9de0(%rip), %xmm0 # 0x1e1cdcc vmovaps %xmm0, 0x2d0(%rsp) vmovaps 0x2f0(%rsp), %xmm2 vmovaps 0x2e0(%rsp), %xmm1 vmovaps 0x2d0(%rsp), %xmm0 vmovaps %xmm2, 0x4d0(%rsp) vmovaps %xmm1, 0x4c0(%rsp) vmovaps %xmm0, 0x4b0(%rsp) vmovaps 0x4d0(%rsp), %xmm1 vmovaps 0x4c0(%rsp), %xmm0 vmovaps 0x4b0(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0x2c0(%rsp) vmovaps %xmm0, 0x2b0(%rsp) vbroadcastss 0x7c9d58(%rip), %xmm0 # 0x1e1cdd0 vmovaps %xmm0, 0x2a0(%rsp) vmovaps 0x2c0(%rsp), %xmm2 vmovaps 0x2b0(%rsp), %xmm1 vmovaps 0x2a0(%rsp), %xmm0 vmovaps %xmm2, 0x500(%rsp) vmovaps %xmm1, 0x4f0(%rsp) vmovaps %xmm0, 0x4e0(%rsp) vmovaps 0x500(%rsp), %xmm1 vmovaps 0x4f0(%rsp), %xmm0 vmovaps 0x4e0(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0xd0(%rsp) vmovaps %xmm0, 0xc0(%rsp) vmovaps 0xd0(%rsp), %xmm0 vmovaps 0xc0(%rsp), %xmm1 vmulps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0xb0(%rsp) vmovaps %xmm0, 0xa0(%rsp) vmovaps 0xb0(%rsp), %xmm0 vmovaps 0xa0(%rsp), %xmm1 vmulps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x60(%rsp), %xmm1 vmovaps -0x10(%rsp), %xmm0 vmovaps %xmm1, 0x200(%rsp) vbroadcastss 0x7b5ead(%rip), %xmm1 # 0x1e0901c vmovaps %xmm1, 0x1f0(%rsp) vmovaps %xmm0, 0x1e0(%rsp) vmovaps 0x200(%rsp), %xmm2 vmovaps 0x1f0(%rsp), %xmm1 vmovaps 0x1e0(%rsp), %xmm0 vmovaps %xmm2, 0x410(%rsp) vmovaps %xmm1, 0x400(%rsp) vmovaps %xmm0, 0x3f0(%rsp) vmovaps 0x410(%rsp), %xmm1 vmovaps 0x400(%rsp), %xmm0 vmovaps 0x3f0(%rsp), %xmm2 vfnmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = -(xmm1 * xmm0) + xmm2 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm0 vmovaps %xmm0, 0x150(%rsp) vbroadcastss 0x7b5e0f(%rip), %xmm0 # 0x1e09004 vmovaps %xmm0, 0x140(%rsp) vmovaps 0x150(%rsp), %xmm0 vmovaps 0x140(%rsp), %xmm1 vaddps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x10(%rsp) vbroadcastss 0x7c9bb1(%rip), %xmm0 # 0x1e1cdd4 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0x290(%rsp) vmovaps %xmm0, 0x280(%rsp) vbroadcastss 0x7c9b88(%rip), %xmm0 # 0x1e1cdd8 vmovaps %xmm0, 0x270(%rsp) vmovaps 0x290(%rsp), %xmm2 vmovaps 0x280(%rsp), %xmm1 vmovaps 0x270(%rsp), %xmm0 vmovaps %xmm2, 0x530(%rsp) vmovaps %xmm1, 0x520(%rsp) vmovaps %xmm0, 0x510(%rsp) vmovaps 0x530(%rsp), %xmm1 vmovaps 0x520(%rsp), %xmm0 vmovaps 0x510(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0x260(%rsp) vmovaps %xmm0, 0x250(%rsp) vbroadcastss 0x7c9b00(%rip), %xmm0 # 0x1e1cddc vmovaps %xmm0, 0x240(%rsp) vmovaps 0x260(%rsp), %xmm2 vmovaps 0x250(%rsp), %xmm1 vmovaps 0x240(%rsp), %xmm0 vmovaps %xmm2, 0x560(%rsp) vmovaps %xmm1, 0x550(%rsp) vmovaps %xmm0, 0x540(%rsp) vmovaps 0x560(%rsp), %xmm1 vmovaps 0x550(%rsp), %xmm0 vmovaps 0x540(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0x90(%rsp) vmovaps %xmm0, 0x80(%rsp) vmovaps 0x90(%rsp), %xmm0 vmovaps 0x80(%rsp), %xmm1 vmulps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm1 vmovaps 0x40(%rsp), %xmm0 vmovaps %xmm1, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps %xmm0, 0x210(%rsp) vmovaps 0x230(%rsp), %xmm2 vmovaps 0x220(%rsp), %xmm1 vmovaps 0x210(%rsp), %xmm0 vmovaps %xmm2, 0x590(%rsp) vmovaps %xmm1, 0x580(%rsp) vmovaps %xmm0, 0x570(%rsp) vmovaps 0x590(%rsp), %xmm1 vmovaps 0x580(%rsp), %xmm0 vmovaps 0x570(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x50(%rsp), %xmm0 vmovaps %xmm0, 0x10(%rsp) vmovaps 0x10(%rsp), %xmm1 vmovaps -0x70(%rsp), %xmm0 vmovaps %xmm1, 0x190(%rsp) vmovaps %xmm0, 0x180(%rsp) vmovdqa 0x190(%rsp), %xmm0 vmovdqa 0x180(%rsp), %xmm1 vpand %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, -0x70(%rsp) vmovaps 0x10(%rsp), %xmm1 vmovaps -0x10(%rsp), %xmm0 vmovaps %xmm1, 0x60(%rsp) vmovaps %xmm0, 0x50(%rsp) vmovdqa 0x60(%rsp), %xmm0 vpternlogq $0xf, %xmm0, %xmm0, %xmm0 vmovaps 0x50(%rsp), %xmm1 vpand %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm1 vmovaps -0x70(%rsp), %xmm0 vmovaps %xmm1, 0x130(%rsp) vmovaps %xmm0, 0x120(%rsp) vmovaps 0x130(%rsp), %xmm0 vaddps 0x120(%rsp), %xmm0, %xmm0 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm1 vmovaps (%rsp), %xmm0 vmovaps %xmm1, 0x640(%rsp) vmovaps %xmm0, 0x630(%rsp) vmovaps 0x640(%rsp), %xmm0 vmovaps 0x630(%rsp), %xmm1 vpxor %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm0 addq $0x678, %rsp # imm = 0x678 retq nopw %cs:(%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,292
ncnn::UnaryOp_x86_avx512_functor::unary_op_sin::func(float const&) const
float func(const float& x) const { return (float)sin(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax vmovss (%rax), %xmm0 callq 0x163a260 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,293
ncnn::UnaryOp_x86_avx512_functor::unary_op_cos::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { return cos512_ps(x); }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x15c0, %rsp # imm = 0x15C0 movq %rdi, 0x38(%rsp) movq %rsi, 0x30(%rsp) movq 0x30(%rsp), %rax vmovaps (%rax), %zmm0 vmovaps %zmm0, 0x2c0(%rsp) vmovaps 0x2c0(%rsp), %zmm0 vmovaps %zmm0, 0x440(%rsp) vmovaps 0x440(%rsp), %zmm1 movl $0x7fffffff, 0x4fc(%rsp) # imm = 0x7FFFFFFF vpbroadcastd 0x4fc(%rsp), %zmm0 vmovdqa64 %zmm0, 0x480(%rsp) vmovdqa64 0x480(%rsp), %zmm0 vmovaps %zmm1, 0x400(%rsp) vmovdqa64 %zmm0, 0x3c0(%rsp) vmovdqa64 0x400(%rsp), %zmm0 vmovdqa64 0x3c0(%rsp), %zmm1 vpandq %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x380(%rsp) vmovdqa64 0x380(%rsp), %zmm0 vmovdqa64 %zmm0, 0x2c0(%rsp) vmovaps 0x2c0(%rsp), %zmm1 vmovaps 0x7ca516(%rip), %zmm0 # 0x1e1db00 vmovaps %zmm1, 0x8c0(%rsp) vmovaps %zmm0, 0x880(%rsp) vmovaps 0x8c0(%rsp), %zmm0 vmovaps 0x880(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm0 vmovaps %zmm0, 0x1000(%rsp) vpxor %xmm1, %xmm1, %xmm1 vcvttps2dq 0x1000(%rsp), %zmm0 vmovdqa64 %zmm1, 0x1140(%rsp) vmovdqa64 %zmm0, 0x140(%rsp) vmovdqa64 0x140(%rsp), %zmm1 vmovdqa64 0x7ca4ea(%rip), %zmm0 # 0x1e1db40 vmovdqa64 %zmm1, 0x1080(%rsp) vmovdqa64 %zmm0, 0x1040(%rsp) vmovdqa64 0x1080(%rsp), %zmm0 vmovdqa64 0x1040(%rsp), %zmm1 vpaddd %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x140(%rsp) vmovdqa64 0x140(%rsp), %zmm1 vmovdqa64 0x7ca4ea(%rip), %zmm0 # 0x1e1db80 vmovdqa64 %zmm1, 0x13c0(%rsp) vmovdqa64 %zmm0, 0x1380(%rsp) vmovdqa64 0x13c0(%rsp), %zmm0 vmovdqa64 0x1380(%rsp), %zmm1 vpandq %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x140(%rsp) vmovdqa64 0x140(%rsp), %zmm0 vmovdqa64 %zmm0, 0x1280(%rsp) vcvtdq2ps 0x1280(%rsp), %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovdqa64 0x140(%rsp), %zmm1 vmovdqa64 0x7ca50a(%rip), %zmm0 # 0x1e1dc00 vmovdqa64 %zmm1, 0x1240(%rsp) vmovdqa64 %zmm0, 0x1200(%rsp) vmovdqa64 0x1240(%rsp), %zmm0 vmovdqa64 0x1200(%rsp), %zmm1 vpsubd %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x140(%rsp) vmovdqa64 0x140(%rsp), %zmm1 vmovdqa64 0x7ca48a(%rip), %zmm0 # 0x1e1dbc0 vmovdqa64 %zmm1, 0x1540(%rsp) vmovdqa64 %zmm0, 0x1500(%rsp) vmovdqa64 0x1540(%rsp), %zmm0 vmovdqa64 0x1500(%rsp), %zmm1 vpandnq %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x180(%rsp) vmovdqa64 0x180(%rsp), %zmm0 vmovdqa64 %zmm0, 0x1100(%rsp) movl $0x1d, 0x10fc(%rsp) vmovdqa64 0x1100(%rsp), %zmm0 vmovd 0x10fc(%rsp), %xmm1 vpslld %xmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x180(%rsp) vmovdqa64 0x140(%rsp), %zmm1 vmovdqa64 0x7ca450(%rip), %zmm0 # 0x1e1dc00 vmovdqa64 %zmm1, 0x1340(%rsp) vmovdqa64 %zmm0, 0x1300(%rsp) vmovdqa64 0x1340(%rsp), %zmm0 vmovdqa64 0x1300(%rsp), %zmm1 vpandq %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x140(%rsp) vmovdqa64 0x140(%rsp), %zmm0 vptestnmd %zmm0, %zmm0, %k0 kmovw %k0, 0x12fe(%rsp) kmovw 0x12fe(%rsp), %k0 vpmovm2d %k0, %zmm0 vmovdqa64 %zmm0, 0x140(%rsp) vmovdqa64 0x180(%rsp), %zmm0 vmovdqa64 %zmm0, 0x340(%rsp) vmovdqa64 0x340(%rsp), %zmm0 vmovdqa64 %zmm0, 0x100(%rsp) vmovdqa64 0x140(%rsp), %zmm0 vmovdqa64 %zmm0, 0x300(%rsp) vmovdqa64 0x300(%rsp), %zmm0 vmovdqa64 %zmm0, 0xc0(%rsp) vmovaps 0x7ca42a(%rip), %zmm0 # 0x1e1dc80 vmovaps %zmm0, 0x280(%rsp) vmovaps 0x7ca458(%rip), %zmm0 # 0x1e1dcc0 vmovaps %zmm0, 0x240(%rsp) vmovaps 0x7ca486(%rip), %zmm0 # 0x1e1dd00 vmovaps %zmm0, 0x200(%rsp) vmovaps 0x1c0(%rsp), %zmm2 vmovaps 0x280(%rsp), %zmm1 vmovaps 0x2c0(%rsp), %zmm0 vmovaps %zmm2, 0xc80(%rsp) vmovaps %zmm1, 0xc40(%rsp) vmovaps %zmm0, 0xc00(%rsp) vmovaps 0xc80(%rsp), %zmm1 vmovaps 0xc40(%rsp), %zmm0 vmovaps 0xc00(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x2c0(%rsp) vmovaps 0x1c0(%rsp), %zmm2 vmovaps 0x240(%rsp), %zmm1 vmovaps 0x2c0(%rsp), %zmm0 vmovaps %zmm2, 0xbc0(%rsp) vmovaps %zmm1, 0xb80(%rsp) vmovaps %zmm0, 0xb40(%rsp) vmovaps 0xbc0(%rsp), %zmm1 vmovaps 0xb80(%rsp), %zmm0 vmovaps 0xb40(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x2c0(%rsp) vmovaps 0x1c0(%rsp), %zmm2 vmovaps 0x200(%rsp), %zmm1 vmovaps 0x2c0(%rsp), %zmm0 vmovaps %zmm2, 0xb00(%rsp) vmovaps %zmm1, 0xac0(%rsp) vmovaps %zmm0, 0xa80(%rsp) vmovaps 0xb00(%rsp), %zmm1 vmovaps 0xac0(%rsp), %zmm0 vmovaps 0xa80(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x2c0(%rsp) vmovaps 0x7ca3b2(%rip), %zmm0 # 0x1e1dd40 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x2c0(%rsp), %zmm0 vmovaps %zmm0, 0x840(%rsp) vmovaps %zmm0, 0x800(%rsp) vmovaps 0x840(%rsp), %zmm0 vmovaps 0x800(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x80(%rsp) vmovaps 0x1c0(%rsp), %zmm2 vmovaps 0x80(%rsp), %zmm1 vmovaps 0x7ca39a(%rip), %zmm0 # 0x1e1dd80 vmovaps %zmm2, 0xa40(%rsp) vmovaps %zmm1, 0xa00(%rsp) vmovaps %zmm0, 0x9c0(%rsp) vmovaps 0xa40(%rsp), %zmm1 vmovaps 0xa00(%rsp), %zmm0 vmovaps 0x9c0(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm2 vmovaps 0x80(%rsp), %zmm1 vmovaps 0x7ca382(%rip), %zmm0 # 0x1e1ddc0 vmovaps %zmm2, 0x980(%rsp) vmovaps %zmm1, 0x940(%rsp) vmovaps %zmm0, 0x900(%rsp) vmovaps 0x980(%rsp), %zmm1 vmovaps 0x940(%rsp), %zmm0 vmovaps 0x900(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm1 vmovaps 0x80(%rsp), %zmm0 vmovaps %zmm1, 0x7c0(%rsp) vmovaps %zmm0, 0x780(%rsp) vmovaps 0x7c0(%rsp), %zmm0 vmovaps 0x780(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm1 vmovaps 0x80(%rsp), %zmm0 vmovaps %zmm1, 0x740(%rsp) vmovaps %zmm0, 0x700(%rsp) vmovaps 0x740(%rsp), %zmm0 vmovaps 0x700(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x80(%rsp), %zmm2 vmovaps 0x7c9436(%rip), %zmm1 # 0x1e1cf40 vmovaps 0x1c0(%rsp), %zmm0 vmovaps %zmm2, 0xd40(%rsp) vmovaps %zmm1, 0xd00(%rsp) vmovaps %zmm0, 0xcc0(%rsp) vmovaps 0xd40(%rsp), %zmm1 vmovaps 0xd00(%rsp), %zmm0 vmovaps 0xcc0(%rsp), %zmm2 vfnmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = -(zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm1 vmovaps 0x7c92de(%rip), %zmm0 # 0x1e1ce40 vmovaps %zmm1, 0xfc0(%rsp) vmovaps %zmm0, 0xf80(%rsp) vmovaps 0xfc0(%rsp), %zmm0 vmovaps 0xf80(%rsp), %zmm1 vaddps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x7ca266(%rip), %zmm0 # 0x1e1de00 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm1 vmovaps 0x80(%rsp), %zmm0 vmovaps %zmm1, 0x6c0(%rsp) vmovaps %zmm0, 0x680(%rsp) vmovaps 0x6c0(%rsp), %zmm0 vmovaps 0x680(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm1 vmovaps 0x7ca24e(%rip), %zmm0 # 0x1e1de40 vmovaps %zmm1, 0xf40(%rsp) vmovaps %zmm0, 0xf00(%rsp) vmovaps 0xf40(%rsp), %zmm0 vmovaps 0xf00(%rsp), %zmm1 vaddps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm1 vmovaps 0x80(%rsp), %zmm0 vmovaps %zmm1, 0x640(%rsp) vmovaps %zmm0, 0x600(%rsp) vmovaps 0x640(%rsp), %zmm0 vmovaps 0x600(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm1 vmovaps 0x7ca210(%rip), %zmm0 # 0x1e1de80 vmovaps %zmm1, 0xec0(%rsp) vmovaps %zmm0, 0xe80(%rsp) vmovaps 0xec0(%rsp), %zmm0 vmovaps 0xe80(%rsp), %zmm1 vaddps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm1 vmovaps 0x80(%rsp), %zmm0 vmovaps %zmm1, 0x5c0(%rsp) vmovaps %zmm0, 0x580(%rsp) vmovaps 0x5c0(%rsp), %zmm0 vmovaps 0x580(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm1 vmovaps 0x2c0(%rsp), %zmm0 vmovaps %zmm1, 0x540(%rsp) vmovaps %zmm0, 0x500(%rsp) vmovaps 0x540(%rsp), %zmm0 vmovaps 0x500(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm1 vmovaps 0x2c0(%rsp), %zmm0 vmovaps %zmm1, 0xe40(%rsp) vmovaps %zmm0, 0xe00(%rsp) vmovaps 0xe40(%rsp), %zmm0 vmovaps 0xe00(%rsp), %zmm1 vaddps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0xc0(%rsp), %zmm0 vmovaps %zmm0, 0x200(%rsp) vmovaps 0x200(%rsp), %zmm1 vmovaps 0x40(%rsp), %zmm0 vmovaps %zmm1, 0x11c0(%rsp) vmovaps %zmm0, 0x1180(%rsp) vmovdqa64 0x11c0(%rsp), %zmm0 vmovdqa64 0x1180(%rsp), %zmm1 vpandd %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x40(%rsp) vmovaps 0x200(%rsp), %zmm1 vmovaps 0x1c0(%rsp), %zmm0 vmovaps %zmm1, 0x14c0(%rsp) vmovaps %zmm0, 0x1480(%rsp) vmovdqa64 0x14c0(%rsp), %zmm0 vpternlogq $0xf, %zmm0, %zmm0, %zmm0 vmovaps 0x1480(%rsp), %zmm1 vpandd %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm1 vmovaps 0x40(%rsp), %zmm0 vmovaps %zmm1, 0xdc0(%rsp) vmovaps %zmm0, 0xd80(%rsp) vmovaps 0xdc0(%rsp), %zmm0 vaddps 0xd80(%rsp), %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm1 vmovaps 0x100(%rsp), %zmm0 vmovaps %zmm1, 0x1440(%rsp) vmovaps %zmm0, 0x1400(%rsp) vmovaps 0x1440(%rsp), %zmm0 vmovaps 0x1400(%rsp), %zmm1 vpxord %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm0 movq %rbp, %rsp popq %rbp retq nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,294
ncnn::UnaryOp_x86_avx512_functor::unary_op_cos::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { return cos256_ps(x); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0xd80, %rsp # imm = 0xD80 movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq 0x10(%rsp), %rax vmovaps (%rax), %ymm0 vmovaps %ymm0, 0x160(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovaps %ymm0, 0x180(%rsp) vmovaps 0x180(%rsp), %ymm0 vmovaps %ymm0, 0x120(%rsp) vmovaps 0x160(%rsp), %ymm0 vmovaps %ymm0, 0x5e0(%rsp) vbroadcastss 0x7c8edb(%rip), %ymm0 # 0x1e1cdb0 vmovaps %ymm0, 0x5c0(%rsp) vmovdqa 0x5e0(%rsp), %ymm0 vmovdqa 0x5c0(%rsp), %ymm1 vpand %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0x160(%rsp) vmovaps 0x160(%rsp), %ymm0 vmovaps %ymm0, 0x380(%rsp) vbroadcastss 0x7c8ea0(%rip), %ymm0 # 0x1e1cdb8 vmovaps %ymm0, 0x360(%rsp) vmovaps 0x380(%rsp), %ymm0 vmovaps 0x360(%rsp), %ymm1 vmulps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm0 vmovaps %ymm0, 0x7a0(%rsp) vcvttps2dq 0x7a0(%rsp), %ymm0 vmovdqa %ymm0, 0xa0(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0x7e0(%rsp) vpbroadcastq 0x7c8e61(%rip), %ymm0 # 0x1e1cde0 vmovdqa %ymm0, 0x7c0(%rsp) vmovdqa 0x7e0(%rsp), %ymm1 vmovdqa 0x7c0(%rsp), %ymm0 vmovdqa %ymm1, 0xae0(%rsp) vmovdqa %ymm0, 0xac0(%rsp) vmovdqa 0xae0(%rsp), %ymm0 vmovdqa 0xac0(%rsp), %ymm1 vpaddd %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0xa0(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0xc40(%rsp) vpbroadcastq 0x7c8e02(%rip), %ymm0 # 0x1e1cde8 vmovdqa %ymm0, 0xc20(%rsp) vmovdqa 0xc40(%rsp), %ymm0 vmovdqa 0xc20(%rsp), %ymm1 vpand %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0xa0(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0xb80(%rsp) vcvtdq2ps 0xb80(%rsp), %ymm0 vmovaps %ymm0, 0xe0(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0xb60(%rsp) vpbroadcastq 0x7c8dab(%rip), %ymm0 # 0x1e1cdf8 vmovdqa %ymm0, 0xb40(%rsp) vmovdqa 0xb60(%rsp), %ymm2 vmovdqa 0xb40(%rsp), %ymm1 vmovdqa %ymm2, 0xbc0(%rsp) vmovdqa %ymm1, 0xba0(%rsp) vmovdqa 0xbc0(%rsp), %ymm1 vmovdqa 0xba0(%rsp), %ymm2 vpsubd %ymm2, %ymm1, %ymm1 vmovdqa %ymm1, 0xa0(%rsp) vmovdqa 0xa0(%rsp), %ymm1 vmovdqa %ymm1, 0xd40(%rsp) vpbroadcastq 0x7c8d3c(%rip), %ymm1 # 0x1e1cdf0 vmovdqa %ymm1, 0xd20(%rsp) vmovdqa 0xd40(%rsp), %ymm1 vmovdqa 0xd20(%rsp), %ymm2 vpandn %ymm2, %ymm1, %ymm1 vmovdqa %ymm1, 0xc0(%rsp) vmovdqa 0xc0(%rsp), %ymm1 vmovdqa %ymm1, 0x820(%rsp) movl $0x1d, 0x81c(%rsp) vmovdqa 0x820(%rsp), %ymm1 movl 0x81c(%rsp), %eax vmovdqa %ymm1, 0xb20(%rsp) movl %eax, 0xb1c(%rsp) vmovdqa 0xb20(%rsp), %ymm1 vmovd 0xb1c(%rsp), %xmm2 vpslld %xmm2, %ymm1, %ymm1 vmovdqa %ymm1, 0xc0(%rsp) vmovdqa 0xa0(%rsp), %ymm1 vmovdqa %ymm1, 0xc00(%rsp) vmovdqa %ymm0, 0xbe0(%rsp) vmovdqa 0xc00(%rsp), %ymm0 vmovdqa 0xbe0(%rsp), %ymm1 vpand %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0xa0(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0xc80(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovdqa %ymm0, 0xc60(%rsp) vmovdqa 0xc80(%rsp), %ymm0 vmovdqa 0xc60(%rsp), %ymm1 vpcmpeqd %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0xa0(%rsp) vmovdqa 0xc0(%rsp), %ymm0 vmovdqa %ymm0, 0x860(%rsp) vmovdqa 0x860(%rsp), %ymm0 vmovdqa %ymm0, 0x80(%rsp) vmovdqa 0xa0(%rsp), %ymm0 vmovdqa %ymm0, 0x840(%rsp) vmovdqa 0x840(%rsp), %ymm0 vmovdqa %ymm0, 0x60(%rsp) vbroadcastss 0x7c8bbe(%rip), %ymm0 # 0x1e1cdbc vmovaps %ymm0, 0x140(%rsp) vbroadcastss 0x7c8bb0(%rip), %ymm0 # 0x1e1cdc0 vmovaps %ymm0, 0x120(%rsp) vbroadcastss 0x7c8ba2(%rip), %ymm0 # 0x1e1cdc4 vmovaps %ymm0, 0x100(%rsp) vmovaps 0xe0(%rsp), %ymm2 vmovaps 0x140(%rsp), %ymm1 vmovaps 0x160(%rsp), %ymm0 vmovaps %ymm2, 0x560(%rsp) vmovaps %ymm1, 0x540(%rsp) vmovaps %ymm0, 0x520(%rsp) vmovaps 0x560(%rsp), %ymm2 vmovaps 0x540(%rsp), %ymm1 vmovaps 0x520(%rsp), %ymm0 vmovaps %ymm2, 0x8c0(%rsp) vmovaps %ymm1, 0x8a0(%rsp) vmovaps %ymm0, 0x880(%rsp) vmovaps 0x8c0(%rsp), %ymm1 vmovaps 0x8a0(%rsp), %ymm0 vmovaps 0x880(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0x160(%rsp) vmovaps 0xe0(%rsp), %ymm2 vmovaps 0x120(%rsp), %ymm1 vmovaps 0x160(%rsp), %ymm0 vmovaps %ymm2, 0x500(%rsp) vmovaps %ymm1, 0x4e0(%rsp) vmovaps %ymm0, 0x4c0(%rsp) vmovaps 0x500(%rsp), %ymm2 vmovaps 0x4e0(%rsp), %ymm1 vmovaps 0x4c0(%rsp), %ymm0 vmovaps %ymm2, 0x920(%rsp) vmovaps %ymm1, 0x900(%rsp) vmovaps %ymm0, 0x8e0(%rsp) vmovaps 0x920(%rsp), %ymm1 vmovaps 0x900(%rsp), %ymm0 vmovaps 0x8e0(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0x160(%rsp) vmovaps 0xe0(%rsp), %ymm2 vmovaps 0x100(%rsp), %ymm1 vmovaps 0x160(%rsp), %ymm0 vmovaps %ymm2, 0x4a0(%rsp) vmovaps %ymm1, 0x480(%rsp) vmovaps %ymm0, 0x460(%rsp) vmovaps 0x4a0(%rsp), %ymm2 vmovaps 0x480(%rsp), %ymm1 vmovaps 0x460(%rsp), %ymm0 vmovaps %ymm2, 0x980(%rsp) vmovaps %ymm1, 0x960(%rsp) vmovaps %ymm0, 0x940(%rsp) vmovaps 0x980(%rsp), %ymm1 vmovaps 0x960(%rsp), %ymm0 vmovaps 0x940(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0x160(%rsp) vbroadcastss 0x7c89d5(%rip), %ymm0 # 0x1e1cdc8 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0x160(%rsp), %ymm0 vmovaps %ymm0, 0x340(%rsp) vmovaps %ymm0, 0x320(%rsp) vmovaps 0x340(%rsp), %ymm0 vmovaps 0x320(%rsp), %ymm1 vmulps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x40(%rsp) vmovaps 0xe0(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm1, 0x440(%rsp) vmovaps %ymm0, 0x420(%rsp) vbroadcastss 0x7c896f(%rip), %ymm0 # 0x1e1cdcc vmovaps %ymm0, 0x400(%rsp) vmovaps 0x440(%rsp), %ymm2 vmovaps 0x420(%rsp), %ymm1 vmovaps 0x400(%rsp), %ymm0 vmovaps %ymm2, 0x9e0(%rsp) vmovaps %ymm1, 0x9c0(%rsp) vmovaps %ymm0, 0x9a0(%rsp) vmovaps 0x9e0(%rsp), %ymm1 vmovaps 0x9c0(%rsp), %ymm0 vmovaps 0x9a0(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm1, 0x3e0(%rsp) vmovaps %ymm0, 0x3c0(%rsp) vbroadcastss 0x7c88e1(%rip), %ymm0 # 0x1e1cdd0 vmovaps %ymm0, 0x3a0(%rsp) vmovaps 0x3e0(%rsp), %ymm2 vmovaps 0x3c0(%rsp), %ymm1 vmovaps 0x3a0(%rsp), %ymm0 vmovaps %ymm2, 0xa40(%rsp) vmovaps %ymm1, 0xa20(%rsp) vmovaps %ymm0, 0xa00(%rsp) vmovaps 0xa40(%rsp), %ymm1 vmovaps 0xa20(%rsp), %ymm0 vmovaps 0xa00(%rsp), %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm1, 0x300(%rsp) vmovaps %ymm0, 0x2e0(%rsp) vmovaps 0x300(%rsp), %ymm0 vmovaps 0x2e0(%rsp), %ymm1 vmulps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm1, 0x2c0(%rsp) vmovaps %ymm0, 0x2a0(%rsp) vmovaps 0x2c0(%rsp), %ymm0 vmovaps 0x2a0(%rsp), %ymm1 vmulps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0x40(%rsp), %ymm1 vmovaps 0xe0(%rsp), %ymm0 vmovaps %ymm1, 0x640(%rsp) vbroadcastss 0x7b4a24(%rip), %ymm1 # 0x1e0901c vmovaps %ymm1, 0x620(%rsp) vmovaps %ymm0, 0x600(%rsp) vmovaps 0x640(%rsp), %ymm2 vmovaps 0x620(%rsp), %ymm1 vmovaps 0x600(%rsp), %ymm0 vmovaps %ymm2, 0xaa0(%rsp) vmovaps %ymm1, 0xa80(%rsp) vmovaps %ymm0, 0xa60(%rsp) vmovaps 0xaa0(%rsp), %ymm1 vmovaps 0xa80(%rsp), %ymm0 vmovaps 0xa60(%rsp), %ymm2 vfnmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = -(ymm1 * ymm0) + ymm2 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm0 vmovaps %ymm0, 0x780(%rsp) vbroadcastss 0x7b4980(%rip), %ymm0 # 0x1e09004 vmovaps %ymm0, 0x760(%rsp) vmovaps 0x780(%rsp), %ymm0 vmovaps 0x760(%rsp), %ymm1 vaddps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0xe0(%rsp) vbroadcastss 0x7c871f(%rip), %ymm0 # 0x1e1cdd4 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm1, 0x280(%rsp) vmovaps %ymm0, 0x260(%rsp) vmovaps 0x280(%rsp), %ymm0 vmovaps 0x260(%rsp), %ymm1 vmulps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm0 vmovaps %ymm0, 0x740(%rsp) vbroadcastss 0x7c86cb(%rip), %ymm0 # 0x1e1cdd8 vmovaps %ymm0, 0x720(%rsp) vmovaps 0x740(%rsp), %ymm0 vmovaps 0x720(%rsp), %ymm1 vaddps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm1, 0x240(%rsp) vmovaps %ymm0, 0x220(%rsp) vmovaps 0x240(%rsp), %ymm0 vmovaps 0x220(%rsp), %ymm1 vmulps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm0 vmovaps %ymm0, 0x700(%rsp) vbroadcastss 0x7c8658(%rip), %ymm0 # 0x1e1cddc vmovaps %ymm0, 0x6e0(%rsp) vmovaps 0x700(%rsp), %ymm0 vmovaps 0x6e0(%rsp), %ymm1 vaddps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm1, 0x200(%rsp) vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x200(%rsp), %ymm0 vmovaps 0x1e0(%rsp), %ymm1 vmulps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm1 vmovaps 0x160(%rsp), %ymm0 vmovaps %ymm1, 0x1c0(%rsp) vmovaps %ymm0, 0x1a0(%rsp) vmovaps 0x1c0(%rsp), %ymm0 vmovaps 0x1a0(%rsp), %ymm1 vmulps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm1 vmovaps 0x160(%rsp), %ymm0 vmovaps %ymm1, 0x6c0(%rsp) vmovaps %ymm0, 0x6a0(%rsp) vmovaps 0x6c0(%rsp), %ymm0 vmovaps 0x6a0(%rsp), %ymm1 vaddps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x60(%rsp), %ymm0 vmovaps %ymm0, 0x100(%rsp) vmovaps 0x100(%rsp), %ymm1 vmovaps 0x20(%rsp), %ymm0 vmovaps %ymm1, 0x5a0(%rsp) vmovaps %ymm0, 0x580(%rsp) vmovdqa 0x5a0(%rsp), %ymm0 vmovdqa 0x580(%rsp), %ymm1 vpand %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0x20(%rsp) vmovaps 0x100(%rsp), %ymm1 vmovaps 0xe0(%rsp), %ymm0 vmovaps %ymm1, 0xd00(%rsp) vmovaps %ymm0, 0xce0(%rsp) vmovdqa 0xd00(%rsp), %ymm0 vpternlogq $0xf, %ymm0, %ymm0, %ymm0 vmovaps 0xce0(%rsp), %ymm1 vpand %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm1 vmovaps 0x20(%rsp), %ymm0 vmovaps %ymm1, 0x680(%rsp) vmovaps %ymm0, 0x660(%rsp) vmovaps 0x680(%rsp), %ymm0 vaddps 0x660(%rsp), %ymm0, %ymm0 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm1 vmovaps 0x80(%rsp), %ymm0 vmovaps %ymm1, 0xcc0(%rsp) vmovaps %ymm0, 0xca0(%rsp) vmovaps 0xcc0(%rsp), %ymm0 vmovaps 0xca0(%rsp), %ymm1 vpxor %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm0 movq %rbp, %rsp popq %rbp retq
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,295
ncnn::UnaryOp_x86_avx512_functor::unary_op_cos::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return cos_ps(x); }
subq $0x648, %rsp # imm = 0x648 movq %rdi, -0x78(%rsp) movq %rsi, -0x80(%rsp) movq -0x80(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x30(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovaps %xmm0, 0x60(%rsp) vmovaps 0x60(%rsp), %xmm0 vmovaps %xmm0, 0x10(%rsp) vmovaps 0x30(%rsp), %xmm0 vmovaps %xmm0, 0x1a0(%rsp) vbroadcastss 0x7c83e2(%rip), %xmm0 # 0x1e1cdb0 vmovaps %xmm0, 0x190(%rsp) vmovdqa 0x1a0(%rsp), %xmm0 vmovdqa 0x190(%rsp), %xmm1 vpand %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, 0x30(%rsp) vmovaps 0x30(%rsp), %xmm0 vmovaps %xmm0, 0x100(%rsp) vbroadcastss 0x7c83ad(%rip), %xmm0 # 0x1e1cdb8 vmovaps %xmm0, 0xf0(%rsp) vmovaps 0x100(%rsp), %xmm0 vmovaps 0xf0(%rsp), %xmm1 vmulps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm0 vmovaps %xmm0, 0x150(%rsp) vcvttps2dq 0x150(%rsp), %xmm0 vmovdqa %xmm0, -0x30(%rsp) vmovdqa -0x30(%rsp), %xmm0 vmovdqa %xmm0, 0x370(%rsp) vpbroadcastq 0x7c837a(%rip), %xmm0 # 0x1e1cde0 vmovdqa %xmm0, 0x360(%rsp) vmovdqa 0x370(%rsp), %xmm0 vmovdqa 0x360(%rsp), %xmm1 vpaddd %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, -0x30(%rsp) vmovdqa -0x30(%rsp), %xmm0 vmovdqa %xmm0, 0x5c0(%rsp) vpbroadcastq 0x7c8345(%rip), %xmm0 # 0x1e1cde8 vmovdqa %xmm0, 0x5b0(%rsp) vmovdqa 0x5c0(%rsp), %xmm0 vmovdqa 0x5b0(%rsp), %xmm1 vpand %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, -0x30(%rsp) vmovdqa -0x30(%rsp), %xmm0 vmovdqa %xmm0, 0x160(%rsp) vcvtdq2ps 0x160(%rsp), %xmm0 vmovaps %xmm0, -0x10(%rsp) vmovdqa -0x30(%rsp), %xmm0 vmovdqa %xmm0, 0x580(%rsp) vpbroadcastq 0x7c82fa(%rip), %xmm0 # 0x1e1cdf8 vmovdqa %xmm0, 0x570(%rsp) vmovdqa 0x580(%rsp), %xmm1 vmovdqa 0x570(%rsp), %xmm2 vpsubd %xmm2, %xmm1, %xmm1 vmovdqa %xmm1, -0x30(%rsp) vmovdqa -0x30(%rsp), %xmm1 vmovdqa %xmm1, 0x630(%rsp) vpbroadcastq 0x7c82b5(%rip), %xmm1 # 0x1e1cdf0 vmovdqa %xmm1, 0x620(%rsp) vmovdqa 0x630(%rsp), %xmm1 vmovdqa 0x620(%rsp), %xmm2 vpandn %xmm2, %xmm1, %xmm1 vmovdqa %xmm1, -0x20(%rsp) vmovdqa -0x20(%rsp), %xmm1 vmovdqa %xmm1, 0x390(%rsp) movl $0x1d, 0x38c(%rsp) vmovdqa 0x390(%rsp), %xmm1 vmovd 0x38c(%rsp), %xmm2 vpslld %xmm2, %xmm1, %xmm1 vmovdqa %xmm1, -0x20(%rsp) vmovdqa -0x30(%rsp), %xmm1 vmovdqa %xmm1, 0x5a0(%rsp) vmovdqa %xmm0, 0x590(%rsp) vmovdqa 0x5a0(%rsp), %xmm0 vmovdqa 0x590(%rsp), %xmm1 vpand %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, -0x30(%rsp) vmovdqa -0x30(%rsp), %xmm1 vpxor %xmm0, %xmm0, %xmm0 vmovdqa %xmm0, 0x5f0(%rsp) vmovdqa 0x5f0(%rsp), %xmm0 vmovdqa %xmm1, 0x5e0(%rsp) vmovdqa %xmm0, 0x5d0(%rsp) vmovdqa 0x5e0(%rsp), %xmm0 vmovdqa 0x5d0(%rsp), %xmm1 vpcmpeqd %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, -0x30(%rsp) vmovdqa -0x20(%rsp), %xmm0 vmovdqa %xmm0, 0x3b0(%rsp) vmovdqa 0x3b0(%rsp), %xmm0 vmovdqa %xmm0, -0x40(%rsp) vmovdqa -0x30(%rsp), %xmm0 vmovdqa %xmm0, 0x3a0(%rsp) vmovdqa 0x3a0(%rsp), %xmm0 vmovdqa %xmm0, -0x50(%rsp) vbroadcastss 0x7c8163(%rip), %xmm0 # 0x1e1cdbc vmovaps %xmm0, 0x20(%rsp) vbroadcastss 0x7c8158(%rip), %xmm0 # 0x1e1cdc0 vmovaps %xmm0, 0x10(%rsp) vbroadcastss 0x7c814d(%rip), %xmm0 # 0x1e1cdc4 vmovaps %xmm0, (%rsp) vmovaps -0x10(%rsp), %xmm2 vmovaps 0x20(%rsp), %xmm1 vmovaps 0x30(%rsp), %xmm0 vmovaps %xmm2, 0x350(%rsp) vmovaps %xmm1, 0x340(%rsp) vmovaps %xmm0, 0x330(%rsp) vmovaps 0x350(%rsp), %xmm2 vmovaps 0x340(%rsp), %xmm1 vmovaps 0x330(%rsp), %xmm0 vmovaps %xmm2, 0x410(%rsp) vmovaps %xmm1, 0x400(%rsp) vmovaps %xmm0, 0x3f0(%rsp) vmovaps 0x410(%rsp), %xmm1 vmovaps 0x400(%rsp), %xmm0 vmovaps 0x3f0(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, 0x30(%rsp) vmovaps -0x10(%rsp), %xmm2 vmovaps 0x10(%rsp), %xmm1 vmovaps 0x30(%rsp), %xmm0 vmovaps %xmm2, 0x320(%rsp) vmovaps %xmm1, 0x310(%rsp) vmovaps %xmm0, 0x300(%rsp) vmovaps 0x320(%rsp), %xmm2 vmovaps 0x310(%rsp), %xmm1 vmovaps 0x300(%rsp), %xmm0 vmovaps %xmm2, 0x440(%rsp) vmovaps %xmm1, 0x430(%rsp) vmovaps %xmm0, 0x420(%rsp) vmovaps 0x440(%rsp), %xmm1 vmovaps 0x430(%rsp), %xmm0 vmovaps 0x420(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, 0x30(%rsp) vmovaps -0x10(%rsp), %xmm2 vmovaps (%rsp), %xmm1 vmovaps 0x30(%rsp), %xmm0 vmovaps %xmm2, 0x2f0(%rsp) vmovaps %xmm1, 0x2e0(%rsp) vmovaps %xmm0, 0x2d0(%rsp) vmovaps 0x2f0(%rsp), %xmm2 vmovaps 0x2e0(%rsp), %xmm1 vmovaps 0x2d0(%rsp), %xmm0 vmovaps %xmm2, 0x470(%rsp) vmovaps %xmm1, 0x460(%rsp) vmovaps %xmm0, 0x450(%rsp) vmovaps 0x470(%rsp), %xmm1 vmovaps 0x460(%rsp), %xmm0 vmovaps 0x450(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, 0x30(%rsp) vbroadcastss 0x7c7fa9(%rip), %xmm0 # 0x1e1cdc8 vmovaps %xmm0, -0x10(%rsp) vmovaps 0x30(%rsp), %xmm0 vmovaps %xmm0, 0xe0(%rsp) vmovaps %xmm0, 0xd0(%rsp) vmovaps 0xe0(%rsp), %xmm0 vmovaps 0xd0(%rsp), %xmm1 vmulps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x60(%rsp) vmovaps -0x10(%rsp), %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0x2c0(%rsp) vmovaps %xmm0, 0x2b0(%rsp) vbroadcastss 0x7c7f4c(%rip), %xmm0 # 0x1e1cdcc vmovaps %xmm0, 0x2a0(%rsp) vmovaps 0x2c0(%rsp), %xmm2 vmovaps 0x2b0(%rsp), %xmm1 vmovaps 0x2a0(%rsp), %xmm0 vmovaps %xmm2, 0x4a0(%rsp) vmovaps %xmm1, 0x490(%rsp) vmovaps %xmm0, 0x480(%rsp) vmovaps 0x4a0(%rsp), %xmm1 vmovaps 0x490(%rsp), %xmm0 vmovaps 0x480(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0x290(%rsp) vmovaps %xmm0, 0x280(%rsp) vbroadcastss 0x7c7ec4(%rip), %xmm0 # 0x1e1cdd0 vmovaps %xmm0, 0x270(%rsp) vmovaps 0x290(%rsp), %xmm2 vmovaps 0x280(%rsp), %xmm1 vmovaps 0x270(%rsp), %xmm0 vmovaps %xmm2, 0x4d0(%rsp) vmovaps %xmm1, 0x4c0(%rsp) vmovaps %xmm0, 0x4b0(%rsp) vmovaps 0x4d0(%rsp), %xmm1 vmovaps 0x4c0(%rsp), %xmm0 vmovaps 0x4b0(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0xc0(%rsp) vmovaps %xmm0, 0xb0(%rsp) vmovaps 0xc0(%rsp), %xmm0 vmovaps 0xb0(%rsp), %xmm1 vmulps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0xa0(%rsp) vmovaps %xmm0, 0x90(%rsp) vmovaps 0xa0(%rsp), %xmm0 vmovaps 0x90(%rsp), %xmm1 vmulps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x60(%rsp), %xmm1 vmovaps -0x10(%rsp), %xmm0 vmovaps %xmm1, 0x1d0(%rsp) vbroadcastss 0x7b4019(%rip), %xmm1 # 0x1e0901c vmovaps %xmm1, 0x1c0(%rsp) vmovaps %xmm0, 0x1b0(%rsp) vmovaps 0x1d0(%rsp), %xmm2 vmovaps 0x1c0(%rsp), %xmm1 vmovaps 0x1b0(%rsp), %xmm0 vmovaps %xmm2, 0x3e0(%rsp) vmovaps %xmm1, 0x3d0(%rsp) vmovaps %xmm0, 0x3c0(%rsp) vmovaps 0x3e0(%rsp), %xmm1 vmovaps 0x3d0(%rsp), %xmm0 vmovaps 0x3c0(%rsp), %xmm2 vfnmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = -(xmm1 * xmm0) + xmm2 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm0 vmovaps %xmm0, 0x140(%rsp) vbroadcastss 0x7b3f7b(%rip), %xmm0 # 0x1e09004 vmovaps %xmm0, 0x130(%rsp) vmovaps 0x140(%rsp), %xmm0 vmovaps 0x130(%rsp), %xmm1 vaddps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x10(%rsp) vbroadcastss 0x7c7d1d(%rip), %xmm0 # 0x1e1cdd4 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0x260(%rsp) vmovaps %xmm0, 0x250(%rsp) vbroadcastss 0x7c7cf4(%rip), %xmm0 # 0x1e1cdd8 vmovaps %xmm0, 0x240(%rsp) vmovaps 0x260(%rsp), %xmm2 vmovaps 0x250(%rsp), %xmm1 vmovaps 0x240(%rsp), %xmm0 vmovaps %xmm2, 0x500(%rsp) vmovaps %xmm1, 0x4f0(%rsp) vmovaps %xmm0, 0x4e0(%rsp) vmovaps 0x500(%rsp), %xmm1 vmovaps 0x4f0(%rsp), %xmm0 vmovaps 0x4e0(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vbroadcastss 0x7c7c6c(%rip), %xmm0 # 0x1e1cddc vmovaps %xmm0, 0x210(%rsp) vmovaps 0x230(%rsp), %xmm2 vmovaps 0x220(%rsp), %xmm1 vmovaps 0x210(%rsp), %xmm0 vmovaps %xmm2, 0x530(%rsp) vmovaps %xmm1, 0x520(%rsp) vmovaps %xmm0, 0x510(%rsp) vmovaps 0x530(%rsp), %xmm1 vmovaps 0x520(%rsp), %xmm0 vmovaps 0x510(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0x80(%rsp) vmovaps %xmm0, 0x70(%rsp) vmovaps 0x80(%rsp), %xmm0 vmovaps 0x70(%rsp), %xmm1 vmulps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm1 vmovaps 0x30(%rsp), %xmm0 vmovaps %xmm1, 0x200(%rsp) vmovaps %xmm0, 0x1f0(%rsp) vmovaps %xmm0, 0x1e0(%rsp) vmovaps 0x200(%rsp), %xmm2 vmovaps 0x1f0(%rsp), %xmm1 vmovaps 0x1e0(%rsp), %xmm0 vmovaps %xmm2, 0x560(%rsp) vmovaps %xmm1, 0x550(%rsp) vmovaps %xmm0, 0x540(%rsp) vmovaps 0x560(%rsp), %xmm1 vmovaps 0x550(%rsp), %xmm0 vmovaps 0x540(%rsp), %xmm2 vfmadd213ps %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x50(%rsp), %xmm0 vmovaps %xmm0, (%rsp) vmovaps (%rsp), %xmm1 vmovaps -0x70(%rsp), %xmm0 vmovaps %xmm1, 0x180(%rsp) vmovaps %xmm0, 0x170(%rsp) vmovdqa 0x180(%rsp), %xmm0 vmovdqa 0x170(%rsp), %xmm1 vpand %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, -0x70(%rsp) vmovaps (%rsp), %xmm1 vmovaps -0x10(%rsp), %xmm0 vmovaps %xmm1, 0x50(%rsp) vmovaps %xmm0, 0x40(%rsp) vmovdqa 0x50(%rsp), %xmm0 vpternlogq $0xf, %xmm0, %xmm0, %xmm0 vmovaps 0x40(%rsp), %xmm1 vpand %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm1 vmovaps -0x70(%rsp), %xmm0 vmovaps %xmm1, 0x120(%rsp) vmovaps %xmm0, 0x110(%rsp) vmovaps 0x120(%rsp), %xmm0 vaddps 0x110(%rsp), %xmm0, %xmm0 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm1 vmovaps -0x40(%rsp), %xmm0 vmovaps %xmm1, 0x610(%rsp) vmovaps %xmm0, 0x600(%rsp) vmovaps 0x610(%rsp), %xmm0 vmovaps 0x600(%rsp), %xmm1 vpxor %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x10(%rsp) vmovaps -0x10(%rsp), %xmm0 addq $0x648, %rsp # imm = 0x648 retq nopw %cs:(%rax,%rax) nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,296
ncnn::UnaryOp_x86_avx512_functor::unary_op_cos::func(float const&) const
float func(const float& x) const { return (float)cos(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax vmovss (%rax), %xmm0 callq 0x163a2b0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,297
ncnn::UnaryOp_x86_avx512_functor::unary_op_tan::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { return tan512_ps(x); }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x1f00, %rsp # imm = 0x1F00 movq %rdi, 0x38(%rsp) movq %rsi, 0x30(%rsp) movq 0x30(%rsp), %rax vmovaps (%rax), %zmm0 vmovaps %zmm0, 0x180(%rsp) movl $0x322bcc77, 0x3fc(%rsp) # imm = 0x322BCC77 vbroadcastss 0x3fc(%rsp), %zmm0 vmovaps %zmm0, 0x380(%rsp) vmovaps 0x380(%rsp), %zmm0 vmovaps %zmm0, 0xc0(%rsp) vmovaps 0x180(%rsp), %zmm0 vmovaps %zmm0, 0x800(%rsp) leaq 0x140(%rsp), %rax movq %rax, 0x7f8(%rsp) leaq 0x100(%rsp), %rax movq %rax, 0x7f0(%rsp) vmovaps 0x800(%rsp), %zmm0 vmovaps %zmm0, 0x6c0(%rsp) vmovaps 0x800(%rsp), %zmm0 vmovaps %zmm0, 0x9c0(%rsp) vmovaps 0x9c0(%rsp), %zmm1 movl $0x7fffffff, 0xa7c(%rsp) # imm = 0x7FFFFFFF vpbroadcastd 0xa7c(%rsp), %zmm0 vmovdqa64 %zmm0, 0xa00(%rsp) vmovdqa64 0xa00(%rsp), %zmm0 vmovaps %zmm1, 0x980(%rsp) vmovdqa64 %zmm0, 0x940(%rsp) vmovdqa64 0x980(%rsp), %zmm0 vmovdqa64 0x940(%rsp), %zmm1 vpandq %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x900(%rsp) vmovdqa64 0x900(%rsp), %zmm0 vmovdqa64 %zmm0, 0x800(%rsp) vmovaps 0x6c0(%rsp), %zmm1 vmovaps 0x7c85d8(%rip), %zmm0 # 0x1e1dac0 vmovaps %zmm1, 0x1900(%rsp) vmovaps %zmm0, 0x18c0(%rsp) vmovdqa64 0x1900(%rsp), %zmm0 vmovdqa64 0x18c0(%rsp), %zmm1 vpandd %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x6c0(%rsp) vmovaps 0x800(%rsp), %zmm1 vmovaps 0x7c85d8(%rip), %zmm0 # 0x1e1db00 vmovaps %zmm1, 0xdc0(%rsp) vmovaps %zmm0, 0xd80(%rsp) vmovaps 0xdc0(%rsp), %zmm0 vmovaps 0xd80(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x680(%rsp) vmovaps 0x680(%rsp), %zmm0 vmovaps %zmm0, 0x1640(%rsp) vpxor %xmm1, %xmm1, %xmm1 vcvttps2dq 0x1640(%rsp), %zmm0 vmovdqa64 %zmm1, 0x1800(%rsp) vmovdqa64 %zmm0, 0x600(%rsp) vmovdqa64 0x600(%rsp), %zmm1 vmovdqa64 0x7c85ac(%rip), %zmm0 # 0x1e1db40 vmovdqa64 %zmm1, 0x16c0(%rsp) vmovdqa64 %zmm0, 0x1680(%rsp) vmovdqa64 0x16c0(%rsp), %zmm0 vmovdqa64 0x1680(%rsp), %zmm1 vpaddd %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x600(%rsp) vmovdqa64 0x600(%rsp), %zmm1 vmovdqa64 0x7c85ac(%rip), %zmm0 # 0x1e1db80 vmovdqa64 %zmm1, 0x1b80(%rsp) vmovdqa64 %zmm0, 0x1b40(%rsp) vmovdqa64 0x1b80(%rsp), %zmm0 vmovdqa64 0x1b40(%rsp), %zmm1 vpandq %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x600(%rsp) vmovdqa64 0x600(%rsp), %zmm0 vmovdqa64 %zmm0, 0x19c0(%rsp) vcvtdq2ps 0x19c0(%rsp), %zmm0 vmovaps %zmm0, 0x680(%rsp) vmovdqa64 0x600(%rsp), %zmm0 vmovdqa64 %zmm0, 0x5c0(%rsp) vmovdqa64 0x600(%rsp), %zmm1 vmovdqa64 0x7c857c(%rip), %zmm0 # 0x1e1dbc0 vmovdqa64 %zmm1, 0x1b00(%rsp) vmovdqa64 %zmm0, 0x1ac0(%rsp) vmovdqa64 0x1b00(%rsp), %zmm0 vmovdqa64 0x1ac0(%rsp), %zmm1 vpandq %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x640(%rsp) vmovdqa64 0x640(%rsp), %zmm0 vmovdqa64 %zmm0, 0x17c0(%rsp) movl $0x1d, 0x17bc(%rsp) vmovdqa64 0x17c0(%rsp), %zmm0 vmovd 0x17bc(%rsp), %xmm1 vpslld %xmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x640(%rsp) vmovdqa64 0x600(%rsp), %zmm1 vmovdqa64 0x7c8542(%rip), %zmm0 # 0x1e1dc00 vmovdqa64 %zmm1, 0x1a80(%rsp) vmovdqa64 %zmm0, 0x1a40(%rsp) vmovdqa64 0x1a80(%rsp), %zmm0 vmovdqa64 0x1a40(%rsp), %zmm1 vpandq %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x600(%rsp) vmovdqa64 0x600(%rsp), %zmm0 vptestnmd %zmm0, %zmm0, %k0 kmovw %k0, 0x1a3e(%rsp) kmovw 0x1a3e(%rsp), %k0 vpmovm2d %k0, %zmm0 vmovdqa64 %zmm0, 0x600(%rsp) vmovdqa64 0x640(%rsp), %zmm0 vmovdqa64 %zmm0, 0x8c0(%rsp) vmovdqa64 0x8c0(%rsp), %zmm0 vmovdqa64 %zmm0, 0x580(%rsp) vmovdqa64 0x600(%rsp), %zmm0 vmovdqa64 %zmm0, 0x880(%rsp) vmovdqa64 0x880(%rsp), %zmm0 vmovdqa64 %zmm0, 0x540(%rsp) vmovaps 0x7c851c(%rip), %zmm0 # 0x1e1dc80 vmovaps %zmm0, 0x780(%rsp) vmovaps 0x7c854a(%rip), %zmm0 # 0x1e1dcc0 vmovaps %zmm0, 0x740(%rsp) vmovaps 0x7c8578(%rip), %zmm0 # 0x1e1dd00 vmovaps %zmm0, 0x700(%rsp) vmovaps 0x680(%rsp), %zmm2 vmovaps 0x780(%rsp), %zmm1 vmovaps 0x800(%rsp), %zmm0 vmovaps %zmm2, 0x13c0(%rsp) vmovaps %zmm1, 0x1380(%rsp) vmovaps %zmm0, 0x1340(%rsp) vmovaps 0x13c0(%rsp), %zmm1 vmovaps 0x1380(%rsp), %zmm0 vmovaps 0x1340(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x800(%rsp) vmovaps 0x680(%rsp), %zmm2 vmovaps 0x740(%rsp), %zmm1 vmovaps 0x800(%rsp), %zmm0 vmovaps %zmm2, 0x1300(%rsp) vmovaps %zmm1, 0x12c0(%rsp) vmovaps %zmm0, 0x1280(%rsp) vmovaps 0x1300(%rsp), %zmm1 vmovaps 0x12c0(%rsp), %zmm0 vmovaps 0x1280(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x800(%rsp) vmovaps 0x680(%rsp), %zmm2 vmovaps 0x700(%rsp), %zmm1 vmovaps 0x800(%rsp), %zmm0 vmovaps %zmm2, 0x1240(%rsp) vmovaps %zmm1, 0x1200(%rsp) vmovaps %zmm0, 0x11c0(%rsp) vmovaps 0x1240(%rsp), %zmm1 vmovaps 0x1200(%rsp), %zmm0 vmovaps 0x11c0(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x800(%rsp) vmovdqa64 0x5c0(%rsp), %zmm1 vmovdqa64 0x7c835c(%rip), %zmm0 # 0x1e1dc00 vmovdqa64 %zmm1, 0x1980(%rsp) vmovdqa64 %zmm0, 0x1940(%rsp) vmovdqa64 0x1980(%rsp), %zmm0 vmovdqa64 0x1940(%rsp), %zmm1 vpsubd %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x5c0(%rsp) vmovdqa64 0x5c0(%rsp), %zmm1 vmovdqa64 0x7c82dc(%rip), %zmm0 # 0x1e1dbc0 vmovdqa64 %zmm1, 0x1e00(%rsp) vmovdqa64 %zmm0, 0x1dc0(%rsp) vmovdqa64 0x1e00(%rsp), %zmm0 vmovdqa64 0x1dc0(%rsp), %zmm1 vpandnq %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x5c0(%rsp) vmovdqa64 0x5c0(%rsp), %zmm0 vmovdqa64 %zmm0, 0x1740(%rsp) movl $0x1d, 0x173c(%rsp) vmovdqa64 0x1740(%rsp), %zmm0 vmovd 0x173c(%rsp), %xmm1 vpslld %xmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x5c0(%rsp) vmovdqa64 0x5c0(%rsp), %zmm0 vmovdqa64 %zmm0, 0x840(%rsp) vmovdqa64 0x840(%rsp), %zmm0 vmovdqa64 %zmm0, 0x500(%rsp) vmovaps 0x6c0(%rsp), %zmm1 vmovaps 0x580(%rsp), %zmm0 vmovaps %zmm1, 0x1d00(%rsp) vmovaps %zmm0, 0x1cc0(%rsp) vmovdqa64 0x1d00(%rsp), %zmm0 vmovdqa64 0x1cc0(%rsp), %zmm1 vpxord %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x6c0(%rsp) vmovaps 0x800(%rsp), %zmm0 vmovaps %zmm0, 0xd40(%rsp) vmovaps %zmm0, 0xd00(%rsp) vmovaps 0xd40(%rsp), %zmm0 vmovaps 0xd00(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x4c0(%rsp) vmovaps 0x7c8356(%rip), %zmm0 # 0x1e1dd40 vmovaps %zmm0, 0x680(%rsp) vmovaps 0x680(%rsp), %zmm2 vmovaps 0x4c0(%rsp), %zmm1 vmovaps 0x7c8374(%rip), %zmm0 # 0x1e1dd80 vmovaps %zmm2, 0x1180(%rsp) vmovaps %zmm1, 0x1140(%rsp) vmovaps %zmm0, 0x1100(%rsp) vmovaps 0x1180(%rsp), %zmm1 vmovaps 0x1140(%rsp), %zmm0 vmovaps 0x1100(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x680(%rsp) vmovaps 0x680(%rsp), %zmm2 vmovaps 0x4c0(%rsp), %zmm1 vmovaps 0x7c835c(%rip), %zmm0 # 0x1e1ddc0 vmovaps %zmm2, 0x10c0(%rsp) vmovaps %zmm1, 0x1080(%rsp) vmovaps %zmm0, 0x1040(%rsp) vmovaps 0x10c0(%rsp), %zmm1 vmovaps 0x1080(%rsp), %zmm0 vmovaps 0x1040(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x680(%rsp) vmovaps 0x680(%rsp), %zmm1 vmovaps 0x4c0(%rsp), %zmm0 vmovaps %zmm1, 0xcc0(%rsp) vmovaps %zmm0, 0xc80(%rsp) vmovaps 0xcc0(%rsp), %zmm0 vmovaps 0xc80(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x680(%rsp) vmovaps 0x680(%rsp), %zmm1 vmovaps 0x4c0(%rsp), %zmm0 vmovaps %zmm1, 0xc40(%rsp) vmovaps %zmm0, 0xc00(%rsp) vmovaps 0xc40(%rsp), %zmm0 vmovaps 0xc00(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x680(%rsp) vmovaps 0x4c0(%rsp), %zmm2 vmovaps 0x7c7410(%rip), %zmm1 # 0x1e1cf40 vmovaps 0x680(%rsp), %zmm0 vmovaps %zmm2, 0x1480(%rsp) vmovaps %zmm1, 0x1440(%rsp) vmovaps %zmm0, 0x1400(%rsp) vmovaps 0x1480(%rsp), %zmm1 vmovaps 0x1440(%rsp), %zmm0 vmovaps 0x1400(%rsp), %zmm2 vfnmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = -(zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x680(%rsp) vmovaps 0x680(%rsp), %zmm1 vmovaps 0x7c72b8(%rip), %zmm0 # 0x1e1ce40 vmovaps %zmm1, 0x1600(%rsp) vmovaps %zmm0, 0x15c0(%rsp) vmovaps 0x1600(%rsp), %zmm0 vmovaps 0x15c0(%rsp), %zmm1 vaddps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x680(%rsp) vmovaps 0x7c8240(%rip), %zmm0 # 0x1e1de00 vmovaps %zmm0, 0x480(%rsp) vmovaps 0x480(%rsp), %zmm2 vmovaps 0x4c0(%rsp), %zmm1 vmovaps 0x7c825e(%rip), %zmm0 # 0x1e1de40 vmovaps %zmm2, 0x1000(%rsp) vmovaps %zmm1, 0xfc0(%rsp) vmovaps %zmm0, 0xf80(%rsp) vmovaps 0x1000(%rsp), %zmm1 vmovaps 0xfc0(%rsp), %zmm0 vmovaps 0xf80(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x480(%rsp) vmovaps 0x480(%rsp), %zmm2 vmovaps 0x4c0(%rsp), %zmm1 vmovaps 0x7c8246(%rip), %zmm0 # 0x1e1de80 vmovaps %zmm2, 0xf40(%rsp) vmovaps %zmm1, 0xf00(%rsp) vmovaps %zmm0, 0xec0(%rsp) vmovaps 0xf40(%rsp), %zmm1 vmovaps 0xf00(%rsp), %zmm0 vmovaps 0xec0(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x480(%rsp) vmovaps 0x480(%rsp), %zmm1 vmovaps 0x4c0(%rsp), %zmm0 vmovaps %zmm1, 0xbc0(%rsp) vmovaps %zmm0, 0xb80(%rsp) vmovaps 0xbc0(%rsp), %zmm0 vmovaps 0xb80(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x480(%rsp) vmovaps 0x480(%rsp), %zmm1 vmovaps 0x800(%rsp), %zmm0 vmovaps %zmm1, 0xe80(%rsp) vmovaps %zmm0, 0xe40(%rsp) vmovaps %zmm0, 0xe00(%rsp) vmovaps 0xe80(%rsp), %zmm1 vmovaps 0xe40(%rsp), %zmm0 vmovaps 0xe00(%rsp), %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 vmovaps %zmm0, 0x480(%rsp) vmovaps 0x540(%rsp), %zmm0 vmovaps %zmm0, 0x700(%rsp) vmovaps 0x700(%rsp), %zmm1 vmovaps 0x480(%rsp), %zmm0 vmovaps %zmm1, 0x1880(%rsp) vmovaps %zmm0, 0x1840(%rsp) vmovdqa64 0x1880(%rsp), %zmm0 vmovdqa64 0x1840(%rsp), %zmm1 vpandd %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x440(%rsp) vmovaps 0x700(%rsp), %zmm1 vmovaps 0x680(%rsp), %zmm0 vmovaps %zmm1, 0x1d80(%rsp) vmovaps %zmm0, 0x1d40(%rsp) vmovdqa64 0x1d80(%rsp), %zmm0 vmovdqa64 0x1d40(%rsp), %zmm1 vpandnd %zmm1, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x400(%rsp) vmovaps 0x480(%rsp), %zmm1 vmovaps 0x440(%rsp), %zmm0 vmovaps %zmm1, 0xb40(%rsp) vmovaps %zmm0, 0xb00(%rsp) vmovaps 0xb40(%rsp), %zmm0 vmovaps 0xb00(%rsp), %zmm1 vsubps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x480(%rsp) vmovaps 0x680(%rsp), %zmm1 vmovaps 0x400(%rsp), %zmm0 vmovaps %zmm1, 0xac0(%rsp) vmovaps %zmm0, 0xa80(%rsp) vmovaps 0xac0(%rsp), %zmm0 vmovaps 0xa80(%rsp), %zmm1 vsubps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x680(%rsp) vmovaps 0x400(%rsp), %zmm1 vmovaps 0x440(%rsp), %zmm0 vmovaps %zmm1, 0x1580(%rsp) vmovaps %zmm0, 0x1540(%rsp) vmovaps 0x1580(%rsp), %zmm0 vmovaps 0x1540(%rsp), %zmm1 vaddps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x780(%rsp) vmovaps 0x680(%rsp), %zmm1 vmovaps 0x480(%rsp), %zmm0 vmovaps %zmm1, 0x1500(%rsp) vmovaps %zmm0, 0x14c0(%rsp) vmovaps 0x1500(%rsp), %zmm0 vmovaps 0x14c0(%rsp), %zmm1 vaddps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x740(%rsp) vmovaps 0x780(%rsp), %zmm1 vmovaps 0x6c0(%rsp), %zmm0 vmovaps %zmm1, 0x1c80(%rsp) vmovaps %zmm0, 0x1c40(%rsp) vmovdqa64 0x1c80(%rsp), %zmm0 vmovdqa64 0x1c40(%rsp), %zmm1 vpxord %zmm1, %zmm0, %zmm0 movq 0x7f8(%rsp), %rax vmovdqa64 %zmm0, (%rax) vmovaps 0x740(%rsp), %zmm1 vmovaps 0x500(%rsp), %zmm0 vmovaps %zmm1, 0x1c00(%rsp) vmovaps %zmm0, 0x1bc0(%rsp) vmovdqa64 0x1c00(%rsp), %zmm0 vmovdqa64 0x1bc0(%rsp), %zmm1 vpxord %zmm1, %zmm0, %zmm0 movq 0x7f0(%rsp), %rax vmovdqa64 %zmm0, (%rax) vmovaps 0x100(%rsp), %zmm0 vpxor %xmm1, %xmm1, %xmm1 vmovaps %zmm1, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %zmm1 vcmpeqps %zmm1, %zmm0, %k0 kmovw %k0, 0xbe(%rsp) vmovaps 0x100(%rsp), %zmm1 movw 0xbe(%rsp), %ax vmovaps 0xc0(%rsp), %zmm0 vmovaps %zmm1, 0x2c0(%rsp) movw %ax, 0x2be(%rsp) vmovaps %zmm1, 0x240(%rsp) vmovaps %zmm0, 0x200(%rsp) vmovaps 0x240(%rsp), %zmm1 vmovaps 0x200(%rsp), %zmm0 kmovw 0x2be(%rsp), %k1 vmovaps %zmm1, 0x340(%rsp) vmovaps %zmm0, 0x300(%rsp) vmovaps 0x340(%rsp), %zmm1 vmovaps 0x300(%rsp), %zmm2 vmovaps 0x2c0(%rsp), %zmm0 vaddps %zmm2, %zmm1, %zmm0 {%k1} vmovaps %zmm0, 0x100(%rsp) vmovaps 0x140(%rsp), %zmm1 vmovaps 0x100(%rsp), %zmm0 vmovaps %zmm1, 0x1e80(%rsp) vmovaps %zmm0, 0x1e40(%rsp) vmovaps 0x1e80(%rsp), %zmm0 vdivps 0x1e40(%rsp), %zmm0, %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0x40(%rsp), %zmm0 movq %rbp, %rsp popq %rbp retq nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,298
ncnn::UnaryOp_x86_avx512_functor::unary_op_tan::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { return tan256_ps(x); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x1380, %rsp # imm = 0x1380 movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq 0x10(%rsp), %rax vmovaps (%rax), %ymm0 vmovaps %ymm0, 0xe0(%rsp) movl $0x322bcc77, 0x1bc(%rsp) # imm = 0x322BCC77 vmovss 0x1bc(%rsp), %xmm0 vmovss %xmm0, 0x136c(%rsp) vmovss %xmm0, 0x1368(%rsp) vmovss %xmm0, 0x1364(%rsp) vmovss %xmm0, 0x1360(%rsp) vmovss %xmm0, 0x135c(%rsp) vmovss %xmm0, 0x1358(%rsp) vmovss %xmm0, 0x1354(%rsp) vmovss %xmm0, 0x1350(%rsp) vmovss 0x1354(%rsp), %xmm1 vmovss 0x1350(%rsp), %xmm0 vinsertps $0x10, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[2,3] vmovss 0x1358(%rsp), %xmm1 vinsertps $0x20, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1],xmm1[0],xmm0[3] vmovss 0x135c(%rsp), %xmm1 vinsertps $0x30, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm1[0] vmovss 0x1364(%rsp), %xmm2 vmovss 0x1360(%rsp), %xmm1 vinsertps $0x10, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[2,3] vmovss 0x1368(%rsp), %xmm2 vinsertps $0x20, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1],xmm2[0],xmm1[3] vmovss 0x136c(%rsp), %xmm2 vinsertps $0x30, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm2[0] vmovaps %xmm1, 0x1330(%rsp) vmovaps %xmm0, 0x1320(%rsp) vmovaps 0x1320(%rsp), %ymm0 vmovaps %ymm0, 0x80(%rsp) vmovaps 0xe0(%rsp), %ymm0 vmovaps %ymm0, 0x3c0(%rsp) leaq 0xc0(%rsp), %rax movq %rax, 0x3b8(%rsp) leaq 0xa0(%rsp), %rax movq %rax, 0x3b0(%rsp) vpxor %xmm1, %xmm1, %xmm1 vmovaps %ymm1, 0x460(%rsp) vmovaps 0x460(%rsp), %ymm0 vmovaps %ymm0, 0x340(%rsp) vmovaps 0x3c0(%rsp), %ymm0 vmovaps %ymm0, 0x320(%rsp) vmovaps 0x3c0(%rsp), %ymm0 vmovaps %ymm0, 0x960(%rsp) vbroadcastss 0x7c6c0b(%rip), %ymm0 # 0x1e1cdb0 vmovaps %ymm0, 0x940(%rsp) vmovdqa 0x960(%rsp), %ymm0 vmovdqa 0x940(%rsp), %ymm2 vpand %ymm2, %ymm0, %ymm0 vmovdqa %ymm0, 0x3c0(%rsp) vmovaps 0x320(%rsp), %ymm0 vmovaps %ymm0, 0x920(%rsp) vbroadcastss 0x7b7cd0(%rip), %ymm0 # 0x1e0deb8 vmovaps %ymm0, 0x900(%rsp) vmovdqa 0x920(%rsp), %ymm0 vmovdqa 0x900(%rsp), %ymm2 vpand %ymm2, %ymm0, %ymm0 vmovdqa %ymm0, 0x320(%rsp) vmovaps 0x3c0(%rsp), %ymm0 vmovaps %ymm0, 0x5a0(%rsp) vbroadcastss 0x7c6b8d(%rip), %ymm0 # 0x1e1cdb8 vmovaps %ymm0, 0x580(%rsp) vmovaps 0x5a0(%rsp), %ymm0 vmovaps 0x580(%rsp), %ymm2 vmulps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x300(%rsp) vmovaps 0x300(%rsp), %ymm0 vmovaps %ymm0, 0xaa0(%rsp) vcvttps2dq 0xaa0(%rsp), %ymm0 vmovdqa %ymm0, 0x2c0(%rsp) vmovdqa 0x2c0(%rsp), %ymm0 vmovdqa %ymm0, 0xae0(%rsp) vpbroadcastq 0x7c6b4e(%rip), %ymm0 # 0x1e1cde0 vmovdqa %ymm0, 0xac0(%rsp) vmovdqa 0xae0(%rsp), %ymm2 vmovdqa 0xac0(%rsp), %ymm0 vmovdqa %ymm2, 0xf60(%rsp) vmovdqa %ymm0, 0xf40(%rsp) vmovdqa 0xf60(%rsp), %ymm0 vmovdqa 0xf40(%rsp), %ymm2 vpaddd %ymm2, %ymm0, %ymm0 vmovdqa %ymm0, 0x2c0(%rsp) vmovdqa 0x2c0(%rsp), %ymm0 vmovdqa %ymm0, 0x1140(%rsp) vpbroadcastq 0x7c6aef(%rip), %ymm0 # 0x1e1cde8 vmovdqa %ymm0, 0x1120(%rsp) vmovdqa 0x1140(%rsp), %ymm0 vmovdqa 0x1120(%rsp), %ymm2 vpand %ymm2, %ymm0, %ymm0 vmovdqa %ymm0, 0x2c0(%rsp) vmovdqa 0x2c0(%rsp), %ymm0 vmovdqa %ymm0, 0x1040(%rsp) vcvtdq2ps 0x1040(%rsp), %ymm0 vmovaps %ymm0, 0x300(%rsp) vmovdqa 0x2c0(%rsp), %ymm0 vmovdqa %ymm0, 0x2a0(%rsp) vmovdqa 0x2c0(%rsp), %ymm0 vmovdqa %ymm0, 0x1100(%rsp) vpbroadcastq 0x7c6a7e(%rip), %ymm0 # 0x1e1cdf0 vmovdqa %ymm0, 0x10e0(%rsp) vmovdqa 0x1100(%rsp), %ymm2 vmovdqa 0x10e0(%rsp), %ymm3 vpand %ymm3, %ymm2, %ymm2 vmovdqa %ymm2, 0x2e0(%rsp) vmovdqa 0x2e0(%rsp), %ymm2 vmovdqa %ymm2, 0xb60(%rsp) movl $0x1d, 0xb5c(%rsp) vmovdqa 0xb60(%rsp), %ymm2 movl 0xb5c(%rsp), %eax vmovdqa %ymm2, 0xfa0(%rsp) movl %eax, 0xf9c(%rsp) vmovdqa 0xfa0(%rsp), %ymm2 vmovd 0xf9c(%rsp), %xmm3 vpslld %xmm3, %ymm2, %ymm2 vmovdqa %ymm2, 0x2e0(%rsp) vmovdqa 0x2c0(%rsp), %ymm2 vmovdqa %ymm2, 0x10c0(%rsp) vpbroadcastq 0x7c69e7(%rip), %ymm2 # 0x1e1cdf8 vmovdqa %ymm2, 0x10a0(%rsp) vmovdqa 0x10c0(%rsp), %ymm3 vmovdqa 0x10a0(%rsp), %ymm4 vpand %ymm4, %ymm3, %ymm3 vmovdqa %ymm3, 0x2c0(%rsp) vmovdqa 0x2c0(%rsp), %ymm3 vmovdqa %ymm3, 0x1180(%rsp) vpxor %xmm3, %xmm3, %xmm3 vmovdqa %ymm3, 0x1160(%rsp) vmovdqa 0x1180(%rsp), %ymm3 vmovdqa 0x1160(%rsp), %ymm4 vpcmpeqd %ymm4, %ymm3, %ymm3 vmovdqa %ymm3, 0x2c0(%rsp) vmovdqa 0x2e0(%rsp), %ymm3 vmovdqa %ymm3, 0xbc0(%rsp) vmovdqa 0xbc0(%rsp), %ymm3 vmovdqa %ymm3, 0x280(%rsp) vmovdqa 0x2c0(%rsp), %ymm3 vmovdqa %ymm3, 0xba0(%rsp) vmovdqa 0xba0(%rsp), %ymm3 vmovdqa %ymm3, 0x260(%rsp) vbroadcastss 0x7c68f4(%rip), %ymm3 # 0x1e1cdbc vmovaps %ymm3, 0x380(%rsp) vbroadcastss 0x7c68e6(%rip), %ymm3 # 0x1e1cdc0 vmovaps %ymm3, 0x360(%rsp) vbroadcastss 0x7c68d8(%rip), %ymm3 # 0x1e1cdc4 vmovaps %ymm3, 0x340(%rsp) vmovaps 0x300(%rsp), %ymm5 vmovaps 0x380(%rsp), %ymm4 vmovaps 0x3c0(%rsp), %ymm3 vmovaps %ymm5, 0x8a0(%rsp) vmovaps %ymm4, 0x880(%rsp) vmovaps %ymm3, 0x860(%rsp) vmovaps 0x8a0(%rsp), %ymm5 vmovaps 0x880(%rsp), %ymm4 vmovaps 0x860(%rsp), %ymm3 vmovaps %ymm5, 0xc20(%rsp) vmovaps %ymm4, 0xc00(%rsp) vmovaps %ymm3, 0xbe0(%rsp) vmovaps 0xc20(%rsp), %ymm4 vmovaps 0xc00(%rsp), %ymm3 vmovaps 0xbe0(%rsp), %ymm5 vfmadd213ps %ymm5, %ymm4, %ymm3 # ymm3 = (ymm4 * ymm3) + ymm5 vmovaps %ymm3, 0x3c0(%rsp) vmovaps 0x300(%rsp), %ymm5 vmovaps 0x360(%rsp), %ymm4 vmovaps 0x3c0(%rsp), %ymm3 vmovaps %ymm5, 0x840(%rsp) vmovaps %ymm4, 0x820(%rsp) vmovaps %ymm3, 0x800(%rsp) vmovaps 0x840(%rsp), %ymm5 vmovaps 0x820(%rsp), %ymm4 vmovaps 0x800(%rsp), %ymm3 vmovaps %ymm5, 0xc80(%rsp) vmovaps %ymm4, 0xc60(%rsp) vmovaps %ymm3, 0xc40(%rsp) vmovaps 0xc80(%rsp), %ymm4 vmovaps 0xc60(%rsp), %ymm3 vmovaps 0xc40(%rsp), %ymm5 vfmadd213ps %ymm5, %ymm4, %ymm3 # ymm3 = (ymm4 * ymm3) + ymm5 vmovaps %ymm3, 0x3c0(%rsp) vmovaps 0x300(%rsp), %ymm5 vmovaps 0x340(%rsp), %ymm4 vmovaps 0x3c0(%rsp), %ymm3 vmovaps %ymm5, 0x7e0(%rsp) vmovaps %ymm4, 0x7c0(%rsp) vmovaps %ymm3, 0x7a0(%rsp) vmovaps 0x7e0(%rsp), %ymm5 vmovaps 0x7c0(%rsp), %ymm4 vmovaps 0x7a0(%rsp), %ymm3 vmovaps %ymm5, 0xce0(%rsp) vmovaps %ymm4, 0xcc0(%rsp) vmovaps %ymm3, 0xca0(%rsp) vmovaps 0xce0(%rsp), %ymm4 vmovaps 0xcc0(%rsp), %ymm3 vmovaps 0xca0(%rsp), %ymm5 vfmadd213ps %ymm5, %ymm4, %ymm3 # ymm3 = (ymm4 * ymm3) + ymm5 vmovaps %ymm3, 0x3c0(%rsp) vmovdqa 0x2a0(%rsp), %ymm3 vmovdqa %ymm3, 0x1020(%rsp) vmovdqa %ymm2, 0x1000(%rsp) vmovdqa 0x1020(%rsp), %ymm3 vmovdqa 0x1000(%rsp), %ymm2 vmovdqa %ymm3, 0x1080(%rsp) vmovdqa %ymm2, 0x1060(%rsp) vmovdqa 0x1080(%rsp), %ymm2 vmovdqa 0x1060(%rsp), %ymm3 vpsubd %ymm3, %ymm2, %ymm2 vmovdqa %ymm2, 0x2a0(%rsp) vmovdqa 0x2a0(%rsp), %ymm2 vmovdqa %ymm2, 0x12c0(%rsp) vmovdqa %ymm0, 0x12a0(%rsp) vmovdqa 0x12c0(%rsp), %ymm0 vmovdqa 0x12a0(%rsp), %ymm2 vpandn %ymm2, %ymm0, %ymm0 vmovdqa %ymm0, 0x2a0(%rsp) vmovdqa 0x2a0(%rsp), %ymm0 vmovdqa %ymm0, 0xb20(%rsp) movl $0x1d, 0xb1c(%rsp) vmovdqa 0xb20(%rsp), %ymm0 movl 0xb1c(%rsp), %eax vmovdqa %ymm0, 0xfe0(%rsp) movl %eax, 0xfdc(%rsp) vmovdqa 0xfe0(%rsp), %ymm0 vmovd 0xfdc(%rsp), %xmm2 vpslld %xmm2, %ymm0, %ymm0 vmovdqa %ymm0, 0x2a0(%rsp) vmovdqa 0x2a0(%rsp), %ymm0 vmovdqa %ymm0, 0xb80(%rsp) vmovdqa 0xb80(%rsp), %ymm0 vmovdqa %ymm0, 0x240(%rsp) vmovaps 0x320(%rsp), %ymm2 vmovaps 0x280(%rsp), %ymm0 vmovaps %ymm2, 0x1240(%rsp) vmovaps %ymm0, 0x1220(%rsp) vmovdqa 0x1240(%rsp), %ymm0 vmovdqa 0x1220(%rsp), %ymm2 vpxor %ymm2, %ymm0, %ymm0 vmovdqa %ymm0, 0x320(%rsp) vmovaps 0x3c0(%rsp), %ymm0 vmovaps %ymm0, 0x560(%rsp) vmovaps %ymm0, 0x540(%rsp) vmovaps 0x560(%rsp), %ymm0 vmovaps 0x540(%rsp), %ymm2 vmulps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x220(%rsp) vbroadcastss 0x7c6576(%rip), %ymm0 # 0x1e1cdc8 vmovaps %ymm0, 0x300(%rsp) vmovaps 0x300(%rsp), %ymm2 vmovaps 0x220(%rsp), %ymm0 vmovaps %ymm2, 0x780(%rsp) vmovaps %ymm0, 0x760(%rsp) vbroadcastss 0x7c6544(%rip), %ymm0 # 0x1e1cdcc vmovaps %ymm0, 0x740(%rsp) vmovaps 0x780(%rsp), %ymm3 vmovaps 0x760(%rsp), %ymm2 vmovaps 0x740(%rsp), %ymm0 vmovaps %ymm3, 0xd40(%rsp) vmovaps %ymm2, 0xd20(%rsp) vmovaps %ymm0, 0xd00(%rsp) vmovaps 0xd40(%rsp), %ymm2 vmovaps 0xd20(%rsp), %ymm0 vmovaps 0xd00(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x300(%rsp) vmovaps 0x300(%rsp), %ymm2 vmovaps 0x220(%rsp), %ymm0 vmovaps %ymm2, 0x720(%rsp) vmovaps %ymm0, 0x700(%rsp) vbroadcastss 0x7c64b3(%rip), %ymm0 # 0x1e1cdd0 vmovaps %ymm0, 0x6e0(%rsp) vmovaps 0x720(%rsp), %ymm3 vmovaps 0x700(%rsp), %ymm2 vmovaps 0x6e0(%rsp), %ymm0 vmovaps %ymm3, 0xda0(%rsp) vmovaps %ymm2, 0xd80(%rsp) vmovaps %ymm0, 0xd60(%rsp) vmovaps 0xda0(%rsp), %ymm2 vmovaps 0xd80(%rsp), %ymm0 vmovaps 0xd60(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x300(%rsp) vmovaps 0x300(%rsp), %ymm2 vmovaps 0x220(%rsp), %ymm0 vmovaps %ymm2, 0x520(%rsp) vmovaps %ymm0, 0x500(%rsp) vmovaps 0x520(%rsp), %ymm0 vmovaps 0x500(%rsp), %ymm2 vmulps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x300(%rsp) vmovaps 0x300(%rsp), %ymm2 vmovaps 0x220(%rsp), %ymm0 vmovaps %ymm2, 0x4e0(%rsp) vmovaps %ymm0, 0x4c0(%rsp) vmovaps 0x4e0(%rsp), %ymm0 vmovaps 0x4c0(%rsp), %ymm2 vmulps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x300(%rsp) vmovaps 0x220(%rsp), %ymm2 vmovaps 0x300(%rsp), %ymm0 vmovaps %ymm2, 0x9c0(%rsp) vbroadcastss 0x7b25ed(%rip), %ymm2 # 0x1e0901c vmovaps %ymm2, 0x9a0(%rsp) vmovaps %ymm0, 0x980(%rsp) vmovaps 0x9c0(%rsp), %ymm3 vmovaps 0x9a0(%rsp), %ymm2 vmovaps 0x980(%rsp), %ymm0 vmovaps %ymm3, 0xf20(%rsp) vmovaps %ymm2, 0xf00(%rsp) vmovaps %ymm0, 0xee0(%rsp) vmovaps 0xf20(%rsp), %ymm2 vmovaps 0xf00(%rsp), %ymm0 vmovaps 0xee0(%rsp), %ymm3 vfnmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = -(ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x300(%rsp) vmovaps 0x300(%rsp), %ymm0 vmovaps %ymm0, 0xa80(%rsp) vbroadcastss 0x7b2549(%rip), %ymm0 # 0x1e09004 vmovaps %ymm0, 0xa60(%rsp) vmovaps 0xa80(%rsp), %ymm0 vmovaps 0xa60(%rsp), %ymm2 vaddps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x300(%rsp) vbroadcastss 0x7c62e8(%rip), %ymm0 # 0x1e1cdd4 vmovaps %ymm0, 0x200(%rsp) vmovaps 0x200(%rsp), %ymm2 vmovaps 0x220(%rsp), %ymm0 vmovaps %ymm2, 0x6c0(%rsp) vmovaps %ymm0, 0x6a0(%rsp) vbroadcastss 0x7c62b6(%rip), %ymm0 # 0x1e1cdd8 vmovaps %ymm0, 0x680(%rsp) vmovaps 0x6c0(%rsp), %ymm3 vmovaps 0x6a0(%rsp), %ymm2 vmovaps 0x680(%rsp), %ymm0 vmovaps %ymm3, 0xe00(%rsp) vmovaps %ymm2, 0xde0(%rsp) vmovaps %ymm0, 0xdc0(%rsp) vmovaps 0xe00(%rsp), %ymm2 vmovaps 0xde0(%rsp), %ymm0 vmovaps 0xdc0(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x200(%rsp) vmovaps 0x200(%rsp), %ymm2 vmovaps 0x220(%rsp), %ymm0 vmovaps %ymm2, 0x660(%rsp) vmovaps %ymm0, 0x640(%rsp) vbroadcastss 0x7c6225(%rip), %ymm0 # 0x1e1cddc vmovaps %ymm0, 0x620(%rsp) vmovaps 0x660(%rsp), %ymm3 vmovaps 0x640(%rsp), %ymm2 vmovaps 0x620(%rsp), %ymm0 vmovaps %ymm3, 0xe60(%rsp) vmovaps %ymm2, 0xe40(%rsp) vmovaps %ymm0, 0xe20(%rsp) vmovaps 0xe60(%rsp), %ymm2 vmovaps 0xe40(%rsp), %ymm0 vmovaps 0xe20(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x200(%rsp) vmovaps 0x200(%rsp), %ymm2 vmovaps 0x220(%rsp), %ymm0 vmovaps %ymm2, 0x4a0(%rsp) vmovaps %ymm0, 0x480(%rsp) vmovaps 0x4a0(%rsp), %ymm0 vmovaps 0x480(%rsp), %ymm2 vmulps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x200(%rsp) vmovaps 0x200(%rsp), %ymm2 vmovaps 0x3c0(%rsp), %ymm0 vmovaps %ymm2, 0x600(%rsp) vmovaps %ymm0, 0x5e0(%rsp) vmovaps %ymm0, 0x5c0(%rsp) vmovaps 0x600(%rsp), %ymm3 vmovaps 0x5e0(%rsp), %ymm2 vmovaps 0x5c0(%rsp), %ymm0 vmovaps %ymm3, 0xec0(%rsp) vmovaps %ymm2, 0xea0(%rsp) vmovaps %ymm0, 0xe80(%rsp) vmovaps 0xec0(%rsp), %ymm2 vmovaps 0xea0(%rsp), %ymm0 vmovaps 0xe80(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x200(%rsp) vmovaps 0x260(%rsp), %ymm0 vmovaps %ymm0, 0x340(%rsp) vmovaps 0x340(%rsp), %ymm2 vmovaps 0x200(%rsp), %ymm0 vmovaps %ymm2, 0x8e0(%rsp) vmovaps %ymm0, 0x8c0(%rsp) vmovdqa 0x8e0(%rsp), %ymm0 vmovdqa 0x8c0(%rsp), %ymm2 vpand %ymm2, %ymm0, %ymm0 vmovdqa %ymm0, 0x1e0(%rsp) vmovaps 0x340(%rsp), %ymm2 vmovaps 0x300(%rsp), %ymm0 vmovaps %ymm2, 0x1280(%rsp) vmovaps %ymm0, 0x1260(%rsp) vmovdqa 0x1280(%rsp), %ymm0 vmovdqa 0x1260(%rsp), %ymm2 vpandn %ymm2, %ymm0, %ymm0 vmovdqa %ymm0, 0x1c0(%rsp) vmovaps 0x200(%rsp), %ymm2 vmovaps 0x1e0(%rsp), %ymm0 vmovaps %ymm2, 0x440(%rsp) vmovaps %ymm0, 0x420(%rsp) vmovaps 0x440(%rsp), %ymm0 vmovaps 0x420(%rsp), %ymm2 vsubps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x200(%rsp) vmovaps 0x300(%rsp), %ymm2 vmovaps 0x1c0(%rsp), %ymm0 vmovaps %ymm2, 0x400(%rsp) vmovaps %ymm0, 0x3e0(%rsp) vmovaps 0x400(%rsp), %ymm0 vmovaps 0x3e0(%rsp), %ymm2 vsubps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x300(%rsp) vmovaps 0x1c0(%rsp), %ymm2 vmovaps 0x1e0(%rsp), %ymm0 vmovaps %ymm2, 0xa40(%rsp) vmovaps %ymm0, 0xa20(%rsp) vmovaps 0xa40(%rsp), %ymm0 vmovaps 0xa20(%rsp), %ymm2 vaddps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x380(%rsp) vmovaps 0x300(%rsp), %ymm2 vmovaps 0x200(%rsp), %ymm0 vmovaps %ymm2, 0xa00(%rsp) vmovaps %ymm0, 0x9e0(%rsp) vmovaps 0xa00(%rsp), %ymm0 vmovaps 0x9e0(%rsp), %ymm2 vaddps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x360(%rsp) vmovaps 0x380(%rsp), %ymm2 vmovaps 0x320(%rsp), %ymm0 vmovaps %ymm2, 0x1200(%rsp) vmovaps %ymm0, 0x11e0(%rsp) vmovdqa 0x1200(%rsp), %ymm0 vmovdqa 0x11e0(%rsp), %ymm2 vpxor %ymm2, %ymm0, %ymm0 movq 0x3b8(%rsp), %rax vmovdqa %ymm0, (%rax) vmovaps 0x360(%rsp), %ymm2 vmovaps 0x240(%rsp), %ymm0 vmovaps %ymm2, 0x11c0(%rsp) vmovaps %ymm0, 0x11a0(%rsp) vmovdqa 0x11c0(%rsp), %ymm0 vmovdqa 0x11a0(%rsp), %ymm2 vpxor %ymm2, %ymm0, %ymm0 movq 0x3b0(%rsp), %rax vmovdqa %ymm0, (%rax) vmovaps 0xa0(%rsp), %ymm0 vmovaps %ymm1, 0x100(%rsp) vmovaps 0x100(%rsp), %ymm1 vcmpeqps %ymm1, %ymm0, %k0 vpmovm2d %k0, %ymm0 vmovaps %ymm0, 0x60(%rsp) vmovaps 0x80(%rsp), %ymm1 vmovaps 0x60(%rsp), %ymm0 vmovaps %ymm1, 0x140(%rsp) vmovaps %ymm0, 0x120(%rsp) vmovaps 0x140(%rsp), %ymm0 vmovaps 0x120(%rsp), %ymm1 vpand %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x40(%rsp) vmovaps 0xa0(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm1, 0x180(%rsp) vmovaps %ymm0, 0x160(%rsp) vmovaps 0x180(%rsp), %ymm0 vaddps 0x160(%rsp), %ymm0, %ymm0 vmovaps %ymm0, 0xa0(%rsp) vmovaps 0xc0(%rsp), %ymm1 vmovaps 0xa0(%rsp), %ymm0 vmovaps %ymm1, 0x1300(%rsp) vmovaps %ymm0, 0x12e0(%rsp) vmovaps 0x1300(%rsp), %ymm0 vdivps 0x12e0(%rsp), %ymm0, %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x20(%rsp), %ymm0 movq %rbp, %rsp popq %rbp retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,299
ncnn::UnaryOp_x86_avx512_functor::unary_op_tan::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return tan_ps(x); }
subq $0x8d8, %rsp # imm = 0x8D8 movq %rdi, -0x78(%rsp) movq %rsi, -0x80(%rsp) movq -0x80(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, -0x10(%rsp) movl $0x322bcc77, 0x1c(%rsp) # imm = 0x322BCC77 vbroadcastss 0x1c(%rsp), %xmm0 vmovaps %xmm0, (%rsp) vmovaps (%rsp), %xmm0 vmovaps %xmm0, -0x40(%rsp) vmovaps -0x10(%rsp), %xmm0 vmovaps %xmm0, 0x170(%rsp) leaq -0x20(%rsp), %rax movq %rax, 0x168(%rsp) leaq -0x30(%rsp), %rax movq %rax, 0x160(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovaps %xmm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %xmm1 vmovaps %xmm1, 0x130(%rsp) vmovaps 0x170(%rsp), %xmm1 vmovaps %xmm1, 0x120(%rsp) vmovaps 0x170(%rsp), %xmm1 vmovaps %xmm1, 0x360(%rsp) vbroadcastss 0x7c5cec(%rip), %xmm1 # 0x1e1cdb0 vmovaps %xmm1, 0x350(%rsp) vmovdqa 0x360(%rsp), %xmm1 vmovdqa 0x350(%rsp), %xmm2 vpand %xmm2, %xmm1, %xmm1 vmovdqa %xmm1, 0x170(%rsp) vmovaps 0x120(%rsp), %xmm1 vmovaps %xmm1, 0x340(%rsp) vbroadcastss 0x7b6db1(%rip), %xmm1 # 0x1e0deb8 vmovaps %xmm1, 0x330(%rsp) vmovdqa 0x340(%rsp), %xmm1 vmovdqa 0x330(%rsp), %xmm2 vpand %xmm2, %xmm1, %xmm1 vmovdqa %xmm1, 0x120(%rsp) vmovaps 0x170(%rsp), %xmm1 vmovaps %xmm1, 0x280(%rsp) vbroadcastss 0x7c5c6e(%rip), %xmm1 # 0x1e1cdb8 vmovaps %xmm1, 0x270(%rsp) vmovaps 0x280(%rsp), %xmm1 vmovaps 0x270(%rsp), %xmm2 vmulps %xmm2, %xmm1, %xmm1 vmovaps %xmm1, 0x110(%rsp) vmovaps 0x110(%rsp), %xmm1 vmovaps %xmm1, 0x2f0(%rsp) vcvttps2dq 0x2f0(%rsp), %xmm1 vmovdqa %xmm1, 0xf0(%rsp) vmovdqa 0xf0(%rsp), %xmm1 vmovdqa %xmm1, 0x530(%rsp) vpbroadcastq 0x7c5c2f(%rip), %xmm1 # 0x1e1cde0 vmovdqa %xmm1, 0x520(%rsp) vmovdqa 0x530(%rsp), %xmm1 vmovdqa 0x520(%rsp), %xmm2 vpaddd %xmm2, %xmm1, %xmm1 vmovdqa %xmm1, 0xf0(%rsp) vmovdqa 0xf0(%rsp), %xmm1 vmovdqa %xmm1, 0x7d0(%rsp) vpbroadcastq 0x7c5bf4(%rip), %xmm1 # 0x1e1cde8 vmovdqa %xmm1, 0x7c0(%rsp) vmovdqa 0x7d0(%rsp), %xmm1 vmovdqa 0x7c0(%rsp), %xmm2 vpand %xmm2, %xmm1, %xmm1 vmovdqa %xmm1, 0xf0(%rsp) vmovdqa 0xf0(%rsp), %xmm1 vmovdqa %xmm1, 0x300(%rsp) vcvtdq2ps 0x300(%rsp), %xmm1 vmovaps %xmm1, 0x110(%rsp) vmovdqa 0xf0(%rsp), %xmm1 vmovdqa %xmm1, 0xe0(%rsp) vmovdqa 0xf0(%rsp), %xmm1 vmovdqa %xmm1, 0x7b0(%rsp) vpbroadcastq 0x7c5b83(%rip), %xmm1 # 0x1e1cdf0 vmovdqa %xmm1, 0x7a0(%rsp) vmovdqa 0x7b0(%rsp), %xmm2 vmovdqa 0x7a0(%rsp), %xmm3 vpand %xmm3, %xmm2, %xmm2 vmovdqa %xmm2, 0x100(%rsp) vmovdqa 0x100(%rsp), %xmm2 vmovdqa %xmm2, 0x570(%rsp) movl $0x1d, 0x56c(%rsp) vmovdqa 0x570(%rsp), %xmm2 vmovd 0x56c(%rsp), %xmm3 vpslld %xmm3, %xmm2, %xmm2 vmovdqa %xmm2, 0x100(%rsp) vmovdqa 0x100(%rsp), %xmm2 vmovdqa %xmm2, 0x5a0(%rsp) vmovdqa 0x5a0(%rsp), %xmm2 vmovdqa %xmm2, 0xd0(%rsp) vmovdqa 0xf0(%rsp), %xmm2 vmovdqa %xmm2, 0x790(%rsp) vpbroadcastq 0x7c5ae8(%rip), %xmm2 # 0x1e1cdf8 vmovdqa %xmm2, 0x780(%rsp) vmovdqa 0x790(%rsp), %xmm3 vmovdqa 0x780(%rsp), %xmm4 vpand %xmm4, %xmm3, %xmm3 vmovdqa %xmm3, 0xf0(%rsp) vmovdqa 0xf0(%rsp), %xmm4 vpxor %xmm3, %xmm3, %xmm3 vmovdqa %xmm3, 0x800(%rsp) vmovdqa 0x800(%rsp), %xmm3 vmovdqa %xmm4, 0x7f0(%rsp) vmovdqa %xmm3, 0x7e0(%rsp) vmovdqa 0x7f0(%rsp), %xmm3 vmovdqa 0x7e0(%rsp), %xmm4 vpcmpeqd %xmm4, %xmm3, %xmm3 vmovdqa %xmm3, 0xf0(%rsp) vmovdqa 0xf0(%rsp), %xmm3 vmovdqa %xmm3, 0x590(%rsp) vmovdqa 0x590(%rsp), %xmm3 vmovdqa %xmm3, 0xc0(%rsp) vbroadcastss 0x7c5a07(%rip), %xmm3 # 0x1e1cdbc vmovaps %xmm3, 0x150(%rsp) vbroadcastss 0x7c59f9(%rip), %xmm3 # 0x1e1cdc0 vmovaps %xmm3, 0x140(%rsp) vbroadcastss 0x7c59eb(%rip), %xmm3 # 0x1e1cdc4 vmovaps %xmm3, 0x130(%rsp) vmovaps 0x110(%rsp), %xmm5 vmovaps 0x150(%rsp), %xmm4 vmovaps 0x170(%rsp), %xmm3 vmovaps %xmm5, 0x510(%rsp) vmovaps %xmm4, 0x500(%rsp) vmovaps %xmm3, 0x4f0(%rsp) vmovaps 0x510(%rsp), %xmm5 vmovaps 0x500(%rsp), %xmm4 vmovaps 0x4f0(%rsp), %xmm3 vmovaps %xmm5, 0x600(%rsp) vmovaps %xmm4, 0x5f0(%rsp) vmovaps %xmm3, 0x5e0(%rsp) vmovaps 0x600(%rsp), %xmm4 vmovaps 0x5f0(%rsp), %xmm3 vmovaps 0x5e0(%rsp), %xmm5 vfmadd213ps %xmm5, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + xmm5 vmovaps %xmm3, 0x170(%rsp) vmovaps 0x110(%rsp), %xmm5 vmovaps 0x140(%rsp), %xmm4 vmovaps 0x170(%rsp), %xmm3 vmovaps %xmm5, 0x4e0(%rsp) vmovaps %xmm4, 0x4d0(%rsp) vmovaps %xmm3, 0x4c0(%rsp) vmovaps 0x4e0(%rsp), %xmm5 vmovaps 0x4d0(%rsp), %xmm4 vmovaps 0x4c0(%rsp), %xmm3 vmovaps %xmm5, 0x630(%rsp) vmovaps %xmm4, 0x620(%rsp) vmovaps %xmm3, 0x610(%rsp) vmovaps 0x630(%rsp), %xmm4 vmovaps 0x620(%rsp), %xmm3 vmovaps 0x610(%rsp), %xmm5 vfmadd213ps %xmm5, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + xmm5 vmovaps %xmm3, 0x170(%rsp) vmovaps 0x110(%rsp), %xmm5 vmovaps 0x130(%rsp), %xmm4 vmovaps 0x170(%rsp), %xmm3 vmovaps %xmm5, 0x4b0(%rsp) vmovaps %xmm4, 0x4a0(%rsp) vmovaps %xmm3, 0x490(%rsp) vmovaps 0x4b0(%rsp), %xmm5 vmovaps 0x4a0(%rsp), %xmm4 vmovaps 0x490(%rsp), %xmm3 vmovaps %xmm5, 0x660(%rsp) vmovaps %xmm4, 0x650(%rsp) vmovaps %xmm3, 0x640(%rsp) vmovaps 0x660(%rsp), %xmm4 vmovaps 0x650(%rsp), %xmm3 vmovaps 0x640(%rsp), %xmm5 vfmadd213ps %xmm5, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + xmm5 vmovaps %xmm3, 0x170(%rsp) vmovdqa 0xe0(%rsp), %xmm3 vmovdqa %xmm3, 0x770(%rsp) vmovdqa %xmm2, 0x760(%rsp) vmovdqa 0x770(%rsp), %xmm2 vmovdqa 0x760(%rsp), %xmm3 vpsubd %xmm3, %xmm2, %xmm2 vmovdqa %xmm2, 0xe0(%rsp) vmovdqa 0xe0(%rsp), %xmm2 vmovdqa %xmm2, 0x880(%rsp) vmovdqa %xmm1, 0x870(%rsp) vmovdqa 0x880(%rsp), %xmm1 vmovdqa 0x870(%rsp), %xmm2 vpandn %xmm2, %xmm1, %xmm1 vmovdqa %xmm1, 0xe0(%rsp) vmovdqa 0xe0(%rsp), %xmm1 vmovdqa %xmm1, 0x550(%rsp) movl $0x1d, 0x54c(%rsp) vmovdqa 0x550(%rsp), %xmm1 vmovd 0x54c(%rsp), %xmm2 vpslld %xmm2, %xmm1, %xmm1 vmovdqa %xmm1, 0xe0(%rsp) vmovdqa 0xe0(%rsp), %xmm1 vmovdqa %xmm1, 0x580(%rsp) vmovdqa 0x580(%rsp), %xmm1 vmovdqa %xmm1, 0xb0(%rsp) vmovaps 0x120(%rsp), %xmm2 vmovaps 0xd0(%rsp), %xmm1 vmovaps %xmm2, 0x860(%rsp) vmovaps %xmm1, 0x850(%rsp) vmovdqa 0x860(%rsp), %xmm1 vmovdqa 0x850(%rsp), %xmm2 vpxor %xmm2, %xmm1, %xmm1 vmovdqa %xmm1, 0x120(%rsp) vmovaps 0x170(%rsp), %xmm1 vmovaps %xmm1, 0x260(%rsp) vmovaps %xmm1, 0x250(%rsp) vmovaps 0x260(%rsp), %xmm1 vmovaps 0x250(%rsp), %xmm2 vmulps %xmm2, %xmm1, %xmm1 vmovaps %xmm1, 0xa0(%rsp) vbroadcastss 0x7c56cd(%rip), %xmm1 # 0x1e1cdc8 vmovaps %xmm1, 0x110(%rsp) vmovaps 0x110(%rsp), %xmm2 vmovaps 0xa0(%rsp), %xmm1 vmovaps %xmm2, 0x480(%rsp) vmovaps %xmm1, 0x470(%rsp) vbroadcastss 0x7c569b(%rip), %xmm1 # 0x1e1cdcc vmovaps %xmm1, 0x460(%rsp) vmovaps 0x480(%rsp), %xmm3 vmovaps 0x470(%rsp), %xmm2 vmovaps 0x460(%rsp), %xmm1 vmovaps %xmm3, 0x690(%rsp) vmovaps %xmm2, 0x680(%rsp) vmovaps %xmm1, 0x670(%rsp) vmovaps 0x690(%rsp), %xmm2 vmovaps 0x680(%rsp), %xmm1 vmovaps 0x670(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm1) + xmm3 vmovaps %xmm1, 0x110(%rsp) vmovaps 0x110(%rsp), %xmm2 vmovaps 0xa0(%rsp), %xmm1 vmovaps %xmm2, 0x450(%rsp) vmovaps %xmm1, 0x440(%rsp) vbroadcastss 0x7c560a(%rip), %xmm1 # 0x1e1cdd0 vmovaps %xmm1, 0x430(%rsp) vmovaps 0x450(%rsp), %xmm3 vmovaps 0x440(%rsp), %xmm2 vmovaps 0x430(%rsp), %xmm1 vmovaps %xmm3, 0x6c0(%rsp) vmovaps %xmm2, 0x6b0(%rsp) vmovaps %xmm1, 0x6a0(%rsp) vmovaps 0x6c0(%rsp), %xmm2 vmovaps 0x6b0(%rsp), %xmm1 vmovaps 0x6a0(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm1) + xmm3 vmovaps %xmm1, 0x110(%rsp) vmovaps 0x110(%rsp), %xmm2 vmovaps 0xa0(%rsp), %xmm1 vmovaps %xmm2, 0x240(%rsp) vmovaps %xmm1, 0x230(%rsp) vmovaps 0x240(%rsp), %xmm1 vmovaps 0x230(%rsp), %xmm2 vmulps %xmm2, %xmm1, %xmm1 vmovaps %xmm1, 0x110(%rsp) vmovaps 0x110(%rsp), %xmm2 vmovaps 0xa0(%rsp), %xmm1 vmovaps %xmm2, 0x220(%rsp) vmovaps %xmm1, 0x210(%rsp) vmovaps 0x220(%rsp), %xmm1 vmovaps 0x210(%rsp), %xmm2 vmulps %xmm2, %xmm1, %xmm1 vmovaps %xmm1, 0x110(%rsp) vmovaps 0xa0(%rsp), %xmm2 vmovaps 0x110(%rsp), %xmm1 vmovaps %xmm2, 0x390(%rsp) vbroadcastss 0x7b1744(%rip), %xmm2 # 0x1e0901c vmovaps %xmm2, 0x380(%rsp) vmovaps %xmm1, 0x370(%rsp) vmovaps 0x390(%rsp), %xmm3 vmovaps 0x380(%rsp), %xmm2 vmovaps 0x370(%rsp), %xmm1 vmovaps %xmm3, 0x5d0(%rsp) vmovaps %xmm2, 0x5c0(%rsp) vmovaps %xmm1, 0x5b0(%rsp) vmovaps 0x5d0(%rsp), %xmm2 vmovaps 0x5c0(%rsp), %xmm1 vmovaps 0x5b0(%rsp), %xmm3 vfnmadd213ps %xmm3, %xmm2, %xmm1 # xmm1 = -(xmm2 * xmm1) + xmm3 vmovaps %xmm1, 0x110(%rsp) vmovaps 0x110(%rsp), %xmm1 vmovaps %xmm1, 0x2e0(%rsp) vbroadcastss 0x7b16a0(%rip), %xmm1 # 0x1e09004 vmovaps %xmm1, 0x2d0(%rsp) vmovaps 0x2e0(%rsp), %xmm1 vmovaps 0x2d0(%rsp), %xmm2 vaddps %xmm2, %xmm1, %xmm1 vmovaps %xmm1, 0x110(%rsp) vbroadcastss 0x7c543f(%rip), %xmm1 # 0x1e1cdd4 vmovaps %xmm1, 0x90(%rsp) vmovaps 0x90(%rsp), %xmm2 vmovaps 0xa0(%rsp), %xmm1 vmovaps %xmm2, 0x420(%rsp) vmovaps %xmm1, 0x410(%rsp) vbroadcastss 0x7c540d(%rip), %xmm1 # 0x1e1cdd8 vmovaps %xmm1, 0x400(%rsp) vmovaps 0x420(%rsp), %xmm3 vmovaps 0x410(%rsp), %xmm2 vmovaps 0x400(%rsp), %xmm1 vmovaps %xmm3, 0x6f0(%rsp) vmovaps %xmm2, 0x6e0(%rsp) vmovaps %xmm1, 0x6d0(%rsp) vmovaps 0x6f0(%rsp), %xmm2 vmovaps 0x6e0(%rsp), %xmm1 vmovaps 0x6d0(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm1) + xmm3 vmovaps %xmm1, 0x90(%rsp) vmovaps 0x90(%rsp), %xmm2 vmovaps 0xa0(%rsp), %xmm1 vmovaps %xmm2, 0x3f0(%rsp) vmovaps %xmm1, 0x3e0(%rsp) vbroadcastss 0x7c537c(%rip), %xmm1 # 0x1e1cddc vmovaps %xmm1, 0x3d0(%rsp) vmovaps 0x3f0(%rsp), %xmm3 vmovaps 0x3e0(%rsp), %xmm2 vmovaps 0x3d0(%rsp), %xmm1 vmovaps %xmm3, 0x720(%rsp) vmovaps %xmm2, 0x710(%rsp) vmovaps %xmm1, 0x700(%rsp) vmovaps 0x720(%rsp), %xmm2 vmovaps 0x710(%rsp), %xmm1 vmovaps 0x700(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm1) + xmm3 vmovaps %xmm1, 0x90(%rsp) vmovaps 0x90(%rsp), %xmm2 vmovaps 0xa0(%rsp), %xmm1 vmovaps %xmm2, 0x200(%rsp) vmovaps %xmm1, 0x1f0(%rsp) vmovaps 0x200(%rsp), %xmm1 vmovaps 0x1f0(%rsp), %xmm2 vmulps %xmm2, %xmm1, %xmm1 vmovaps %xmm1, 0x90(%rsp) vmovaps 0x90(%rsp), %xmm2 vmovaps 0x170(%rsp), %xmm1 vmovaps %xmm2, 0x3c0(%rsp) vmovaps %xmm1, 0x3b0(%rsp) vmovaps %xmm1, 0x3a0(%rsp) vmovaps 0x3c0(%rsp), %xmm3 vmovaps 0x3b0(%rsp), %xmm2 vmovaps 0x3a0(%rsp), %xmm1 vmovaps %xmm3, 0x750(%rsp) vmovaps %xmm2, 0x740(%rsp) vmovaps %xmm1, 0x730(%rsp) vmovaps 0x750(%rsp), %xmm2 vmovaps 0x740(%rsp), %xmm1 vmovaps 0x730(%rsp), %xmm3 vfmadd213ps %xmm3, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm1) + xmm3 vmovaps %xmm1, 0x90(%rsp) vmovaps 0xc0(%rsp), %xmm1 vmovaps %xmm1, 0x130(%rsp) vmovaps 0x130(%rsp), %xmm2 vmovaps 0x90(%rsp), %xmm1 vmovaps %xmm2, 0x320(%rsp) vmovaps %xmm1, 0x310(%rsp) vmovdqa 0x320(%rsp), %xmm1 vmovdqa 0x310(%rsp), %xmm2 vpand %xmm2, %xmm1, %xmm1 vmovdqa %xmm1, 0x80(%rsp) vmovaps 0x130(%rsp), %xmm2 vmovaps 0x110(%rsp), %xmm1 vmovaps %xmm2, 0x190(%rsp) vmovaps %xmm1, 0x180(%rsp) vmovdqa 0x190(%rsp), %xmm1 vmovdqa 0x180(%rsp), %xmm2 vpandn %xmm2, %xmm1, %xmm1 vmovdqa %xmm1, 0x70(%rsp) vmovaps 0x90(%rsp), %xmm2 vmovaps 0x80(%rsp), %xmm1 vmovaps %xmm2, 0x1d0(%rsp) vmovaps %xmm1, 0x1c0(%rsp) vmovaps 0x1d0(%rsp), %xmm1 vmovaps 0x1c0(%rsp), %xmm2 vsubps %xmm2, %xmm1, %xmm1 vmovaps %xmm1, 0x90(%rsp) vmovaps 0x110(%rsp), %xmm2 vmovaps 0x70(%rsp), %xmm1 vmovaps %xmm2, 0x1b0(%rsp) vmovaps %xmm1, 0x1a0(%rsp) vmovaps 0x1b0(%rsp), %xmm1 vmovaps 0x1a0(%rsp), %xmm2 vsubps %xmm2, %xmm1, %xmm1 vmovaps %xmm1, 0x110(%rsp) vmovaps 0x70(%rsp), %xmm2 vmovaps 0x80(%rsp), %xmm1 vmovaps %xmm2, 0x2c0(%rsp) vmovaps %xmm1, 0x2b0(%rsp) vmovaps 0x2c0(%rsp), %xmm1 vmovaps 0x2b0(%rsp), %xmm2 vaddps %xmm2, %xmm1, %xmm1 vmovaps %xmm1, 0x150(%rsp) vmovaps 0x110(%rsp), %xmm2 vmovaps 0x90(%rsp), %xmm1 vmovaps %xmm2, 0x2a0(%rsp) vmovaps %xmm1, 0x290(%rsp) vmovaps 0x2a0(%rsp), %xmm1 vmovaps 0x290(%rsp), %xmm2 vaddps %xmm2, %xmm1, %xmm1 vmovaps %xmm1, 0x140(%rsp) vmovaps 0x150(%rsp), %xmm2 vmovaps 0x120(%rsp), %xmm1 vmovaps %xmm2, 0x840(%rsp) vmovaps %xmm1, 0x830(%rsp) vmovdqa 0x840(%rsp), %xmm1 vmovdqa 0x830(%rsp), %xmm2 vpxor %xmm2, %xmm1, %xmm1 movq 0x168(%rsp), %rax vmovdqa %xmm1, (%rax) vmovaps 0x140(%rsp), %xmm2 vmovaps 0xb0(%rsp), %xmm1 vmovaps %xmm2, 0x820(%rsp) vmovaps %xmm1, 0x810(%rsp) vmovdqa 0x820(%rsp), %xmm1 vmovdqa 0x810(%rsp), %xmm2 vpxor %xmm2, %xmm1, %xmm1 movq 0x160(%rsp), %rax vmovdqa %xmm1, (%rax) vmovaps -0x30(%rsp), %xmm1 vmovaps %xmm0, 0x20(%rsp) vmovaps 0x20(%rsp), %xmm0 vmovaps %xmm1, 0x8a0(%rsp) vmovaps %xmm0, 0x890(%rsp) vmovaps 0x8a0(%rsp), %xmm0 vmovaps 0x890(%rsp), %xmm1 vcmpeqps %xmm1, %xmm0, %k0 vpmovm2d %k0, %xmm0 vmovaps %xmm0, -0x50(%rsp) vmovaps -0x40(%rsp), %xmm1 vmovaps -0x50(%rsp), %xmm0 vmovaps %xmm1, 0x60(%rsp) vmovaps %xmm0, 0x50(%rsp) vmovaps 0x60(%rsp), %xmm0 vmovaps 0x50(%rsp), %xmm1 vpand %xmm1, %xmm0, %xmm0 vmovaps %xmm0, -0x60(%rsp) vmovaps -0x30(%rsp), %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0x40(%rsp) vmovaps %xmm0, 0x30(%rsp) vmovaps 0x40(%rsp), %xmm0 vaddps 0x30(%rsp), %xmm0, %xmm0 vmovaps %xmm0, -0x30(%rsp) vmovaps -0x20(%rsp), %xmm1 vmovaps -0x30(%rsp), %xmm0 vmovaps %xmm1, 0x8c0(%rsp) vmovaps %xmm0, 0x8b0(%rsp) vmovaps 0x8c0(%rsp), %xmm0 vdivps 0x8b0(%rsp), %xmm0, %xmm0 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x70(%rsp), %xmm0 addq $0x8d8, %rsp # imm = 0x8D8 retq nopw %cs:(%rax,%rax) nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp