name
stringlengths 1
473k
| code
stringlengths 7
647k
| asm
stringlengths 4
3.39M
| file
stringlengths 8
196
|
---|---|---|---|
virtual thunk to ncnn::DeconvolutionDepthWise_x86::create_pipeline(ncnn::Option const&) | int DeconvolutionDepthWise_x86::create_pipeline(const Option& opt)
{
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
// depth-wise
if (channels == group && group == num_output)
{
int elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = channels % 16 == 0 ? 16 : channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#else
elempack = channels % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
Mat weight_data_transposed(weight_data.w);
{
float* pt = weight_data_transposed;
const float* p = weight_data;
for (int i = 0; i < (channels / group) * (num_output / group) * group; i++)
{
for (int k = 0; k < maxk; k++)
{
pt[maxk - 1 - k] = p[k];
}
p += maxk;
pt += maxk;
}
}
#if __SSE2__
#if __AVX__
// pack16
#if __AVX512F__
if (elempack == 16)
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 16, opt);
}
#endif // __AVX512F__
// pack8
if (elempack == 8)
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 8, opt);
}
#endif // __AVX__
// pack4
if (elempack == 4)
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 4, opt);
}
#endif // __SSE2__
if (elempack == 1)
{
weight_data_tm = weight_data_transposed;
}
return 0;
}
// group convolution
create_group_ops(opt);
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x30(%rax), %rdi
callq 0x2d4f38
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/src/layer/x86/deconvolutiondepthwise_x86.cpp |
ncnn::DeconvolutionDepthWise_x86::destroy_pipeline(ncnn::Option const&) | int DeconvolutionDepthWise_x86::destroy_pipeline(const Option& opt)
{
for (int i = 0; i < (int)group_ops.size(); i++)
{
group_ops[i]->destroy_pipeline(opt);
delete group_ops[i];
}
group_ops.clear();
return 0;
} | pushq %r15
pushq %r14
pushq %rbx
movq %rsi, %r14
movq %rdi, %rbx
xorl %r15d, %r15d
movq 0x8(%rbx), %rax
movq 0x10(%rbx), %rcx
movq %rcx, %rdx
subq %rax, %rdx
shrq $0x3, %rdx
movslq %edx, %rdx
cmpq %rdx, %r15
jge 0x2d5c55
movq (%rax,%r15,8), %rdi
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x8(%rbx), %rax
movq (%rax,%r15,8), %rdi
testq %rdi, %rdi
je 0x2d5c50
movq (%rdi), %rax
callq *0x8(%rax)
incq %r15
jmp 0x2d5c16
cmpq %rax, %rcx
je 0x2d5c5e
movq %rax, 0x10(%rbx)
xorl %eax, %eax
popq %rbx
popq %r14
popq %r15
retq
| /csukuangfj[P]ncnn/src/layer/x86/deconvolutiondepthwise_x86.cpp |
ncnn::DeconvolutionDepthWise_x86::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int DeconvolutionDepthWise_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
// convolv with NxN kernel
// value = value + bias
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
int outw = (w - 1) * stride_w + kernel_extent_w + output_pad_right;
int outh = (h - 1) * stride_h + kernel_extent_h + output_pad_bottom;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
size_t out_elemsize = elemsize / elempack * out_elempack;
Mat top_blob_bordered;
if (pad_left > 0 || pad_right > 0 || pad_top > 0 || pad_bottom > 0 || (output_w > 0 && output_h > 0))
{
top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.workspace_allocator);
}
else
{
top_blob_bordered = top_blob;
top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
}
if (top_blob_bordered.empty())
return -100;
const int maxk = kernel_w * kernel_h;
// depth-wise
if (channels * elempack == group && group == num_output)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 16;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m512 _sum = _mm512_setzero_ps();
if (bias_term)
{
_sum = _mm512_loadu_ps((const float*)bias_data + g * 16);
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 16;
int k = y * kernel_w + x;
__m512 _val = _mm512_loadu_ps(sptr);
__m512 _w = _mm512_loadu_ps(kptr + k * 16);
_sum = _mm512_fmadd_ps(_val, _w, _sum);
}
}
_sum = activation_avx512(_sum, activation_type, activation_params);
_mm512_storeu_ps(outptr, _sum);
outptr += 16;
}
}
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 8;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_setzero_ps();
if (bias_term)
{
_sum = _mm256_loadu_ps((const float*)bias_data + g * 8);
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 8;
int k = y * kernel_w + x;
__m256 _val = _mm256_loadu_ps(sptr);
__m256 _w = _mm256_loadu_ps(kptr + k * 8);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
}
}
_sum = activation_avx(_sum, activation_type, activation_params);
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
}
}
#endif // __AVX__
if (elempack == 4)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 4;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_setzero_ps();
if (bias_term)
{
_sum = _mm_loadu_ps((const float*)bias_data + g * 4);
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 4;
int k = y * kernel_w + x;
__m128 _val = _mm_loadu_ps(sptr);
__m128 _w = _mm_loadu_ps(kptr + k * 4);
_sum = _mm_comp_fmadd_ps(_val, _w, _sum);
}
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr, _sum);
outptr += 4;
}
}
}
}
}
#endif // __SSE2__
if (elempack == 1)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_term)
{
sum = bias_data[g];
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
const float* sptr = m.row(sy);
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
float val = sptr[sx];
int k = y * kernel_w + x;
float w = kptr[k];
sum += val * w;
}
}
sum = activation_ss(sum, activation_type, activation_params);
outptr[0] = sum;
outptr++;
}
}
}
}
}
else
{
// group deconvolution
const int channels_g = channels * elempack / group;
const int num_output_g = num_output / group;
int g_elempack = 1;
int out_g_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
g_elempack = channels_g % 16 == 0 ? 16 : channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 16 == 0 ? 16 : num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#elif __AVX__
g_elempack = channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#else
g_elempack = channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
// unpacking
Mat bottom_blob_unpacked = bottom_blob;
if (elempack > g_elempack)
{
Option opt_p = opt;
opt_p.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, g_elempack, opt_p);
}
Mat top_blob_bordered_unpacked = top_blob_bordered;
if (out_g_elempack < out_elempack)
{
top_blob_bordered_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
if (top_blob_bordered_unpacked.empty())
return -100;
}
for (int g = 0; g < group; g++)
{
const Mat bottom_blob_g = bottom_blob_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
Mat top_blob_bordered_g = top_blob_bordered_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
const ncnn::Layer* op = group_ops[g];
Option opt_g = opt;
opt_g.blob_allocator = top_blob_bordered_unpacked.allocator;
// forward
op->forward(bottom_blob_g, top_blob_bordered_g, opt_g);
}
// packing
if (out_g_elempack < out_elempack)
{
convert_packing(top_blob_bordered_unpacked, top_blob_bordered, out_elempack, opt);
}
else
{
top_blob_bordered = top_blob_bordered_unpacked;
}
}
cut_padding(top_blob_bordered, top_blob, opt);
if (top_blob.empty())
return -100;
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x268, %rsp # imm = 0x268
movq %rdx, 0x110(%rsp)
movq %rdi, %r15
movl 0x2c(%rsi), %ebx
movl 0x30(%rsi), %r10d
movq 0x10(%rsi), %rax
movq %rsi, 0x50(%rsp)
movslq 0x18(%rsi), %r11
movq (%rdi), %rdi
movq -0x18(%rdi), %rdx
movl 0xd4(%r15,%rdx), %r8d
movl 0xd8(%r15,%rdx), %r9d
decl %r8d
movl %r8d, 0x2c(%rsp)
leal -0x1(%rbx), %r14d
decl %r9d
movl %r9d, 0x28(%rsp)
movq %r10, 0x38(%rsp)
leal -0x1(%r10), %r13d
movq %rcx, 0x48(%rsp)
cmpb $0x1, 0x27(%rcx)
jne 0x2d5cff
xorl %esi, %esi
testb $0x3, 0xd0(%r15,%rdx)
sete %sil
leal (%rsi,%rsi,2), %r10d
incl %r10d
jmp 0x2d5d03
pushq $0x1
popq %r10
movl 0x2c(%rsp), %ecx
imull 0xdc(%r15,%rdx), %ecx
movl %ecx, 0x2c(%rsp)
imull 0xe4(%r15,%rdx), %r14d
movl 0x28(%rsp), %ecx
imull 0xe0(%r15,%rdx), %ecx
movl %ecx, 0x28(%rsp)
imull 0xe8(%r15,%rdx), %r13d
movl 0xfc(%r15,%rdx), %r12d
movl 0x100(%r15,%rdx), %ebp
movq 0x50(%rsp), %rcx
movl 0x38(%rcx), %ecx
movq %rcx, 0x40(%rsp)
xorl %edx, %edx
divq %r11
movq %rax, %r9
andq $0x0, 0xb0(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x70(%rsp)
movups %xmm0, 0x7c(%rsp)
movaps %xmm0, 0x90(%rsp)
movups %xmm0, 0x9c(%rsp)
movq -0x18(%rdi), %rdx
leaq (%r15,%rdx), %rax
pushq $0x10
popq %rcx
cmpl $0x0, 0xec(%r15,%rdx)
movq %r11, 0x18(%rsp)
jg 0x2d5eae
cmpl $0x0, 0xf0(%rax)
jg 0x2d5eae
cmpl $0x0, 0xf4(%rax)
jg 0x2d5eae
cmpl $0x0, 0xf8(%rax)
jg 0x2d5eae
cmpl $0x0, 0x104(%rax)
jle 0x2d5dde
cmpl $0x0, 0x108(%rax)
jg 0x2d5eae
leaq 0x70(%rsp), %rax
movq 0x110(%rsp), %rcx
cmpq %rcx, %rax
je 0x2d5ea5
movq %r9, 0x20(%rsp)
movq 0x8(%rcx), %rax
testq %rax, %rax
je 0x2d5e45
lock
incl (%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2d5e45
lock
decl (%rax)
jne 0x2d5e45
movl %r10d, 0x14(%rsp)
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2d5e38
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x14(%rsp), %r10d
jmp 0x2d5e45
movq %rsi, %rdi
callq 0x5f3e0
movl 0x14(%rsp), %r10d
movq 0x110(%rsp), %rcx
movups (%rcx), %xmm0
movaps %xmm0, 0x70(%rsp)
movq 0x10(%rcx), %rax
movq %rax, 0x80(%rsp)
movl 0x18(%rcx), %eax
movl %eax, 0x88(%rsp)
movq 0x20(%rcx), %rax
movq %rax, 0x90(%rsp)
movups 0x28(%rcx), %xmm0
movups %xmm0, 0x98(%rsp)
movl 0x38(%rcx), %eax
movl %eax, 0xa8(%rsp)
movq 0x40(%rcx), %rax
movq %rax, 0xb0(%rsp)
movq (%r15), %rax
movq -0x18(%rax), %rdx
movq 0x20(%rsp), %r9
addq %r15, %rdx
pushq $0x8
popq %rcx
movq %rdx, %rax
addl 0x2c(%rsp), %r14d
addl 0x28(%rsp), %r13d
leal (%r12,%r14), %esi
incl %esi
leal 0x1(%rbp,%r13), %r8d
movl %r10d, %r12d
imulq %r12, %r9
movl 0xd0(%rax), %eax
cltd
idivl %r12d
movq 0x48(%rsp), %rdx
movq (%rdx,%rcx), %rcx
movq %rcx, (%rsp)
leaq 0x70(%rsp), %rdi
movl %esi, %ebp
movl %r8d, %r14d
movl %r8d, %edx
movl %eax, %ecx
movq %r9, %r13
movq %r9, %r8
movl %r12d, %r9d
callq 0x628f2
movq 0x70(%rsp), %rax
pushq $-0x64
popq %rcx
movq %rax, 0x118(%rsp)
testq %rax, %rax
movq 0x18(%rsp), %r8
je 0x2d6f8c
movq 0xb0(%rsp), %rdx
movslq 0xa8(%rsp), %rax
movq %rdx, 0x68(%rsp)
imulq %rdx, %rax
testq %rax, %rax
je 0x2d6f8c
movq (%r15), %rax
movq -0x18(%rax), %rdx
movl 0xd0(%r15,%rdx), %ecx
movl %r8d, %eax
imull 0x40(%rsp), %eax
movl 0x114(%r15,%rdx), %esi
cmpl %esi, %eax
jne 0x2d657b
cmpl %ecx, %eax
jne 0x2d657b
movl 0xd8(%r15,%rdx), %eax
imull 0xd4(%r15,%rdx), %eax
movl %eax, 0x60(%rsp)
cmpl $0x1, %r8d
je 0x2d6aa6
cmpl $0x4, %r8d
jne 0x2d6f4f
shll $0x2, 0x60(%rsp)
xorl %ecx, %ecx
testl %ebp, %ebp
cmovlel %ecx, %ebp
movl %ebp, 0x14(%rsp)
testl %r14d, %r14d
cmovlel %ecx, %r14d
movl %r14d, 0x5c(%rsp)
movq 0x40(%rsp), %rax
testl %eax, %eax
cmovlel %ecx, %eax
movq %rax, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps 0x11b044(%rip), %xmm15 # 0x3f1010
movaps 0x11b04c(%rip), %xmm9 # 0x3f1020
movaps 0x11b054(%rip), %xmm8 # 0x3f1030
movaps 0x1180bd(%rip), %xmm5 # 0x3ee0a0
movaps 0x11b056(%rip), %xmm6 # 0x3f1040
xorl %eax, %eax
movaps 0x11b07c(%rip), %xmm11 # 0x3f1070
movaps 0x11b084(%rip), %xmm12 # 0x3f1080
movaps 0x11b08c(%rip), %xmm13 # 0x3f1090
cmpq 0x40(%rsp), %rax
je 0x2d6f4f
movq %rcx, 0x118(%rsp)
movslq %ecx, %r11
movq 0xb0(%rsp), %rdi
imulq %rax, %rdi
imulq 0x80(%rsp), %rdi
shlq $0x2, %r11
addq 0x70(%rsp), %rdi
movq 0x50(%rsp), %rdx
movq %rax, %rsi
movslq 0x2c(%rdx), %rcx
movq 0x40(%rdx), %r8
imulq %rax, %r8
movq 0x10(%rdx), %rax
imulq %rax, %r8
addq (%rdx), %r8
movq %r8, 0x20(%rsp)
imulq %rax, %rcx
movq %rsi, 0x68(%rsp)
shlq $0x4, %rsi
movq %rsi, 0xb8(%rsp)
addq 0x20(%r15), %r11
xorl %esi, %esi
cmpl 0x5c(%rsp), %esi
je 0x2d6562
movq (%r15), %rax
movq %rax, 0x120(%rsp)
movl %esi, 0x170(%rsp)
subl 0x28(%rsp), %esi
xorl %r13d, %r13d
cmpl 0x14(%rsp), %r13d
je 0x2d6554
movq 0x120(%rsp), %rax
movq -0x18(%rax), %r14
cmpl $0x0, 0x10c(%r15,%r14)
movq %rdi, 0x18(%rsp)
je 0x2d60d7
movq 0x1b0(%r15,%r14), %rax
movq 0xb8(%rsp), %rdx
movups (%rax,%rdx), %xmm1
jmp 0x2d60da
xorps %xmm1, %xmm1
movl 0xd8(%r15,%r14), %ebp
movl %r13d, 0x30(%rsp)
subl 0x2c(%rsp), %r13d
xorl %edi, %edi
testl %ebp, %ebp
cmovlel %edi, %ebp
cmpl %ebp, %edi
je 0x2d619c
movl 0xe0(%r15,%r14), %eax
imull %edi, %eax
addl %esi, %eax
js 0x2d6195
cltd
idivl 0xe8(%r15,%r14)
testl %edx, %edx
jne 0x2d6195
cmpl 0x38(%rsp), %eax
jge 0x2d6195
movl 0xd4(%r15,%r14), %edx
movslq %eax, %r12
imulq %rcx, %r12
addq 0x20(%rsp), %r12
testl %edx, %edx
movl $0x0, %r10d
cmovgl %edx, %r10d
imull %edi, %edx
movslq %edx, %r8
shlq $0x4, %r8
addq %r11, %r8
xorl %r9d, %r9d
cmpq %r9, %r10
je 0x2d6195
movl 0xdc(%r15,%r14), %eax
imull %r9d, %eax
addl %r13d, %eax
js 0x2d618c
cltd
idivl 0xe4(%r15,%r14)
testl %edx, %edx
jne 0x2d618c
cmpl %ebx, %eax
jge 0x2d618c
shll $0x2, %eax
cltq
movups (%r12,%rax,4), %xmm2
movups (%r8), %xmm3
mulps %xmm2, %xmm3
addps %xmm3, %xmm1
incq %r9
addq $0x10, %r8
jmp 0x2d6151
incl %edi
jmp 0x2d60f3
movl 0x118(%r15,%r14), %eax
decl %eax
cmpl $0x5, %eax
ja 0x2d6538
leaq 0x12288e(%rip), %rdx # 0x3f8a44
movslq (%rdx,%rax,4), %rax
addq %rdx, %rax
movaps %xmm1, %xmm7
maxps %xmm0, %xmm7
movq 0x18(%rsp), %rdi
movl 0x30(%rsp), %r13d
jmpq *%rax
movq 0x120(%r15,%r14), %rax
minps %xmm0, %xmm1
movss (%rax), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps %xmm1, %xmm2
addps %xmm2, %xmm7
jmp 0x2d6545
movaps %xmm1, %xmm2
minps %xmm15, %xmm2
maxps %xmm9, %xmm2
movaps %xmm2, %xmm14
mulps %xmm8, %xmm14
addps %xmm5, %xmm14
cvttps2dq %xmm14, %xmm7
cvtdq2ps %xmm7, %xmm7
cmpltps %xmm7, %xmm14
andps %xmm6, %xmm14
subps %xmm14, %xmm7
movaps %xmm15, %xmm8
cvttps2dq %xmm7, %xmm15
movaps 0x11ae27(%rip), %xmm0 # 0x3f1050
mulps %xmm0, %xmm7
subps %xmm7, %xmm2
movaps %xmm2, %xmm7
mulps %xmm2, %xmm7
movaps %xmm2, %xmm14
movaps 0x11ae1f(%rip), %xmm10 # 0x3f1060
mulps %xmm10, %xmm14
addps %xmm11, %xmm14
mulps %xmm2, %xmm14
addps %xmm12, %xmm14
mulps %xmm2, %xmm14
addps %xmm13, %xmm14
mulps %xmm2, %xmm14
xorps %xmm4, %xmm4
movaps %xmm5, %xmm0
movaps 0x11ae36(%rip), %xmm5 # 0x3f10a0
addps %xmm5, %xmm14
mulps %xmm2, %xmm14
addps %xmm0, %xmm14
mulps %xmm7, %xmm14
addps %xmm6, %xmm2
addps %xmm14, %xmm2
pslld $0x17, %xmm15
paddd %xmm6, %xmm15
mulps %xmm2, %xmm15
addps %xmm6, %xmm15
movaps %xmm15, %xmm2
maxps 0x11ae10(%rip), %xmm15 # 0x3f10b0
movaps %xmm15, %xmm7
andps 0x11ae14(%rip), %xmm15 # 0x3f10c0
orps %xmm0, %xmm15
movaps %xmm15, %xmm3
cmpltps 0x11ae24(%rip), %xmm3 # 0x3f10e0
movaps %xmm3, %xmm14
andps %xmm15, %xmm14
movaps 0x11ae24(%rip), %xmm10 # 0x3f10f0
addps %xmm10, %xmm15
addps %xmm14, %xmm15
cmpleps %xmm4, %xmm2
psrld $0x17, %xmm7
paddd 0x11adeb(%rip), %xmm7 # 0x3f10d0
cvtdq2ps %xmm7, %xmm14
andps %xmm6, %xmm3
subps %xmm3, %xmm14
movaps %xmm15, %xmm7
mulps 0x11ae05(%rip), %xmm7 # 0x3f1100
addps 0x11ae0e(%rip), %xmm7 # 0x3f1110
mulps %xmm15, %xmm7
addps 0x11ae13(%rip), %xmm7 # 0x3f1120
mulps %xmm15, %xmm7
addps 0x11ae18(%rip), %xmm7 # 0x3f1130
mulps %xmm15, %xmm7
addps 0x11ae1d(%rip), %xmm7 # 0x3f1140
mulps %xmm15, %xmm7
addps 0x11ae22(%rip), %xmm7 # 0x3f1150
mulps %xmm15, %xmm7
addps 0x11ae27(%rip), %xmm7 # 0x3f1160
mulps %xmm15, %xmm7
addps 0x11ae2c(%rip), %xmm7 # 0x3f1170
mulps %xmm15, %xmm7
addps 0x11ae31(%rip), %xmm7 # 0x3f1180
mulps %xmm15, %xmm7
movaps 0x11acf6(%rip), %xmm4 # 0x3f1050
mulps %xmm4, %xmm14
addps %xmm15, %xmm14
mulps %xmm15, %xmm15
addps 0x11ae23(%rip), %xmm7 # 0x3f1190
mulps %xmm15, %xmm7
movaps %xmm8, %xmm15
movaps 0x11acb3(%rip), %xmm8 # 0x3f1030
addps %xmm7, %xmm14
mulps 0x11b937(%rip), %xmm14 # 0x3f1cc0
movaps %xmm2, %xmm3
andnps %xmm14, %xmm3
andps 0x11b939(%rip), %xmm2 # 0x3f1cd0
orps %xmm3, %xmm2
minps %xmm15, %xmm2
maxps %xmm9, %xmm2
movaps %xmm2, %xmm3
mulps %xmm8, %xmm3
addps %xmm0, %xmm3
cvttps2dq %xmm3, %xmm7
cvtdq2ps %xmm7, %xmm7
cmpltps %xmm7, %xmm3
andps %xmm6, %xmm3
subps %xmm3, %xmm7
cvttps2dq %xmm7, %xmm14
mulps %xmm4, %xmm7
subps %xmm7, %xmm2
movaps %xmm2, %xmm3
mulps %xmm2, %xmm3
movaps %xmm2, %xmm7
mulps 0x11ac88(%rip), %xmm7 # 0x3f1060
addps %xmm11, %xmm7
mulps %xmm2, %xmm7
addps %xmm12, %xmm7
mulps %xmm2, %xmm7
addps %xmm13, %xmm7
mulps %xmm2, %xmm7
addps %xmm5, %xmm7
movaps %xmm0, %xmm5
xorps %xmm0, %xmm0
mulps %xmm2, %xmm7
addps %xmm5, %xmm7
mulps %xmm3, %xmm7
addps %xmm6, %xmm2
addps %xmm7, %xmm2
pslld $0x17, %xmm14
paddd %xmm6, %xmm14
mulps %xmm2, %xmm14
addps %xmm6, %xmm14
rcpps %xmm14, %xmm2
movaps %xmm2, %xmm7
addps %xmm2, %xmm7
mulps %xmm7, %xmm14
movaps 0x11b8b3(%rip), %xmm3 # 0x3f1ce0
subps %xmm14, %xmm3
mulps %xmm2, %xmm3
addps %xmm10, %xmm7
addps %xmm3, %xmm7
jmp 0x2d6533
movq 0x120(%r15,%r14), %rax
movss (%rax), %xmm2
movss 0x4(%rax), %xmm7
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
maxps %xmm2, %xmm1
minps %xmm7, %xmm1
movaps %xmm1, %xmm7
jmp 0x2d6545
xorps 0x117c22(%rip), %xmm1 # 0x3ee090
minps %xmm15, %xmm1
maxps %xmm9, %xmm1
movaps %xmm1, %xmm7
mulps %xmm8, %xmm7
addps %xmm5, %xmm7
cvttps2dq %xmm7, %xmm2
cvtdq2ps %xmm2, %xmm2
cmpltps %xmm2, %xmm7
andps %xmm6, %xmm7
subps %xmm7, %xmm2
cvttps2dq %xmm2, %xmm14
mulps 0x11b853(%rip), %xmm2 # 0x3f1cf0
addps %xmm1, %xmm2
movaps %xmm2, %xmm1
mulps %xmm2, %xmm1
movaps %xmm2, %xmm7
mulps 0x11abb0(%rip), %xmm7 # 0x3f1060
addps 0x11abb9(%rip), %xmm7 # 0x3f1070
mulps %xmm2, %xmm7
addps 0x11abbf(%rip), %xmm7 # 0x3f1080
mulps %xmm2, %xmm7
addps 0x11abc5(%rip), %xmm7 # 0x3f1090
mulps %xmm2, %xmm7
addps 0x11abcb(%rip), %xmm7 # 0x3f10a0
mulps %xmm2, %xmm7
addps %xmm5, %xmm7
mulps %xmm1, %xmm7
addps %xmm6, %xmm2
addps %xmm7, %xmm2
pslld $0x17, %xmm14
paddd %xmm6, %xmm14
mulps %xmm2, %xmm14
addps %xmm6, %xmm14
rcpps %xmm14, %xmm1
mulps %xmm1, %xmm14
movaps %xmm6, %xmm7
subps %xmm14, %xmm7
mulps %xmm1, %xmm7
addps %xmm1, %xmm7
jmp 0x2d6545
movq 0x120(%r15,%r14), %rax
movss (%rax), %xmm7
movss 0x4(%rax), %xmm2
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps %xmm1, %xmm7
addps %xmm2, %xmm7
maxps %xmm0, %xmm7
minps %xmm6, %xmm7
mulps %xmm1, %xmm7
jmp 0x2d6545
movaps %xmm1, %xmm7
movq 0x18(%rsp), %rdi
movl 0x30(%rsp), %r13d
movups %xmm7, (%rdi)
addq $0x10, %rdi
incl %r13d
jmp 0x2d609a
movl 0x170(%rsp), %esi
incl %esi
jmp 0x2d6077
movq 0x68(%rsp), %rax
incq %rax
movq 0x118(%rsp), %rcx
addl 0x60(%rsp), %ecx
jmp 0x2d6004
cltd
idivl %esi
movl %eax, %edi
movl %ecx, %eax
cltd
idivl %esi
movl %eax, %r9d
movq 0x48(%rsp), %rax
cmpb $0x1, 0x27(%rax)
jne 0x2d65b5
xorl %eax, %eax
testb $0x3, %dil
sete %al
leal (%rax,%rax,2), %eax
incl %eax
movl %eax, 0x20(%rsp)
xorl %eax, %eax
testb $0x3, %r9b
sete %al
leal (%rax,%rax,2), %eax
incl %eax
jmp 0x2d65bc
pushq $0x1
popq %rax
movl %eax, 0x20(%rsp)
movl %eax, 0x38(%rsp)
movq 0x50(%rsp), %rdx
movq (%rdx), %rax
movq %rax, 0x128(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x130(%rsp)
movq 0x10(%rdx), %rcx
movq %rcx, 0x138(%rsp)
movl 0x18(%rdx), %ecx
movl %ecx, 0x140(%rsp)
movq 0x20(%rdx), %rcx
movq %rcx, 0x148(%rsp)
movups 0x28(%rdx), %xmm0
movups %xmm0, 0x150(%rsp)
movl 0x38(%rdx), %ecx
movl %ecx, 0x160(%rsp)
movq 0x40(%rdx), %rcx
movq %rcx, 0x168(%rsp)
testq %rax, %rax
je 0x2d6628
lock
incl (%rax)
cmpl 0x20(%rsp), %r8d
movl %r9d, 0x14(%rsp)
jle 0x2d6686
movl %edi, %ebx
movq 0x48(%rsp), %rax
movups (%rax), %xmm0
movups 0x10(%rax), %xmm1
movups 0x20(%rax), %xmm2
movups 0x30(%rax), %xmm3
leaq 0xc0(%rsp), %rcx
movaps %xmm3, 0x30(%rcx)
movaps %xmm2, 0x20(%rcx)
movaps %xmm1, 0x10(%rcx)
movaps %xmm0, (%rcx)
movq 0x10(%rax), %rax
movq %rax, 0x8(%rcx)
leaq 0x128(%rsp), %rsi
movq 0x50(%rsp), %rdi
movl 0x20(%rsp), %edx
callq 0x64e3b
movl %ebx, %edi
movl 0x14(%rsp), %r9d
movq 0x70(%rsp), %rcx
movq 0x78(%rsp), %rax
movq %rcx, 0xc0(%rsp)
movq %rax, 0xc8(%rsp)
movq 0x80(%rsp), %rcx
movq %rcx, 0xd0(%rsp)
movl 0x88(%rsp), %ecx
movl %ecx, 0xd8(%rsp)
movq 0x90(%rsp), %rcx
movq %rcx, 0xe0(%rsp)
movups 0x98(%rsp), %xmm0
movups %xmm0, 0xe8(%rsp)
movl 0xa8(%rsp), %ecx
movl %ecx, 0xf8(%rsp)
movq 0xb0(%rsp), %rcx
movq %rcx, 0x100(%rsp)
testq %rax, %rax
je 0x2d6704
lock
incl (%rax)
cmpl %r12d, 0x38(%rsp)
jae 0x2d6785
movl %edi, %ebx
movq (%r15), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r15,%rax), %eax
cltd
movl 0x38(%rsp), %r9d
idivl %r9d
movl %eax, %ecx
movq %r13, %rax
xorl %edx, %edx
divq %r12
movl %r9d, %r8d
imulq %rax, %r8
movq 0x48(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
leaq 0xc0(%rsp), %rdi
movl %ebp, %esi
movl %r14d, %edx
callq 0x628f2
cmpq $0x0, 0xc0(%rsp)
movl 0x14(%rsp), %r9d
je 0x2d6e2a
movl %ebx, %edi
movslq 0xf8(%rsp), %rax
imulq 0x100(%rsp), %rax
testq %rax, %rax
je 0x2d6e2a
movq %r12, 0x18(%rsp)
xorl %r12d, %r12d
xorl %ebp, %ebp
xorl %r14d, %r14d
movq (%r15), %rax
movq -0x18(%rax), %rax
movslq 0x114(%r15,%rax), %rax
cmpq %rax, %r14
jge 0x2d6a2f
movl %r12d, %eax
cltd
movl 0x20(%rsp), %esi
idivl %esi
movl %eax, %ecx
movl %edi, %ebx
movl %edi, %eax
cltd
idivl %esi
movl %eax, %edx
movslq 0x154(%rsp), %rdi
movslq 0x158(%rsp), %rsi
movslq 0x15c(%rsp), %rax
movslq %ecx, %r8
imulq 0x168(%rsp), %r8
movq 0x138(%rsp), %rcx
imulq %rcx, %r8
addq 0x128(%rsp), %r8
movq %r8, 0x1d8(%rsp)
movl 0x140(%rsp), %r8d
andq $0x0, 0x1e0(%rsp)
movq %rcx, 0x1e8(%rsp)
movl %r8d, 0x1f0(%rsp)
movq 0x148(%rsp), %r8
movq %r8, 0x1f8(%rsp)
movl %edi, 0x204(%rsp)
movl %esi, 0x208(%rsp)
movl %eax, 0x20c(%rsp)
movl %edx, 0x210(%rsp)
imulq %rdi, %rsi
imulq %rcx, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x218(%rsp)
movl 0x150(%rsp), %eax
movl %eax, 0x200(%rsp)
movl %ebp, %eax
cltd
movl 0x38(%rsp), %esi
idivl %esi
movl %eax, %ecx
movl %r9d, %r13d
movl %r9d, %eax
cltd
idivl %esi
movl %eax, %edx
movslq 0xec(%rsp), %r8
movslq 0xf0(%rsp), %rdi
movslq 0xf4(%rsp), %rax
movslq %ecx, %rcx
imulq 0x100(%rsp), %rcx
movq 0xd0(%rsp), %rsi
imulq %rsi, %rcx
addq 0xc0(%rsp), %rcx
movq %rcx, 0x190(%rsp)
movl 0xd8(%rsp), %ecx
andq $0x0, 0x198(%rsp)
movq %rsi, 0x1a0(%rsp)
movl %ecx, 0x1a8(%rsp)
movq 0xe0(%rsp), %rcx
movq %rcx, 0x1b0(%rsp)
movl %r8d, 0x1bc(%rsp)
movl %edi, 0x1c0(%rsp)
movl %eax, 0x1c4(%rsp)
movl %edx, 0x1c8(%rsp)
imulq %r8, %rdi
imulq %rsi, %rax
imulq %rdi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rsi
movq %rax, 0x1d0(%rsp)
movl 0xe8(%rsp), %eax
movl %eax, 0x1b8(%rsp)
movq 0x8(%r15), %rax
movq (%rax,%r14,8), %rdi
movq 0x48(%rsp), %rax
movups (%rax), %xmm0
movups 0x10(%rax), %xmm1
movups 0x20(%rax), %xmm2
movups 0x30(%rax), %xmm3
movaps %xmm0, 0x220(%rsp)
movaps %xmm3, 0x250(%rsp)
movaps %xmm2, 0x240(%rsp)
movaps %xmm1, 0x230(%rsp)
movq %rcx, 0x228(%rsp)
movq (%rdi), %rax
leaq 0x1d8(%rsp), %rsi
leaq 0x190(%rsp), %rdx
leaq 0x220(%rsp), %rcx
callq *0x38(%rax)
movq 0x198(%rsp), %rax
testq %rax, %rax
je 0x2d69e5
lock
decl (%rax)
jne 0x2d69e5
movq 0x190(%rsp), %rsi
movq 0x1b0(%rsp), %rdi
testq %rdi, %rdi
je 0x2d69dd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d69e5
movq %rsi, %rdi
callq 0x5f3e0
movq 0x1e0(%rsp), %rax
testq %rax, %rax
je 0x2d6a1c
lock
decl (%rax)
jne 0x2d6a1c
movq 0x1d8(%rsp), %rsi
movq 0x1f8(%rsp), %rdi
testq %rdi, %rdi
je 0x2d6a14
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d6a1c
movq %rsi, %rdi
callq 0x5f3e0
incq %r14
movl %r13d, %r9d
addl %r13d, %ebp
movl %ebx, %edi
addl %ebx, %r12d
jmp 0x2d6792
movq 0x18(%rsp), %rdx
cmpl %edx, 0x38(%rsp)
jae 0x2d6a5e
leaq 0xc0(%rsp), %rdi
leaq 0x70(%rsp), %rsi
movq 0x48(%rsp), %rcx
callq 0x64e3b
movq 0xc8(%rsp), %rax
jmp 0x2d6ee9
movq 0xc8(%rsp), %rax
testq %rax, %rax
je 0x2d6a6e
lock
incl (%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2d6e73
lock
decl (%rax)
jne 0x2d6e73
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2d6e6b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d6e73
movq 0x68(%rsp), %rax
imulq 0x80(%rsp), %rax
movq %rax, 0x68(%rsp)
movq 0x20(%r15), %rax
movq %rax, 0x20(%rsp)
movq 0x50(%rsp), %rcx
movslq 0x2c(%rcx), %r13
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rdx
imulq %rax, %rdx
movq %rdx, 0x188(%rsp)
imulq %rax, %r13
xorl %eax, %eax
testl %ebp, %ebp
cmovlel %eax, %ebp
movl %ebp, 0x14(%rsp)
xorl %edx, %edx
movq (%rcx), %rax
movq %rax, 0x180(%rsp)
testl %r14d, %r14d
cmovlel %edx, %r14d
movl %r14d, 0x5c(%rsp)
movslq 0x60(%rsp), %rcx
movq 0x40(%rsp), %rax
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0x40(%rsp)
shlq $0x2, %rcx
movq %rcx, 0x60(%rsp)
cmpq 0x40(%rsp), %rdx
je 0x2d6f4f
movq 0x68(%rsp), %rcx
imulq %rdx, %rcx
addq 0x118(%rsp), %rcx
movq 0x188(%rsp), %rbp
movq %rdx, 0xb8(%rsp)
imulq %rdx, %rbp
addq 0x180(%rsp), %rbp
xorl %r14d, %r14d
cmpl 0x5c(%rsp), %r14d
je 0x2d6e10
movq (%r15), %rax
movq %rax, 0x120(%rsp)
movl %r14d, 0x50(%rsp)
subl 0x28(%rsp), %r14d
xorl %edi, %edi
cmpl 0x14(%rsp), %edi
je 0x2d6e03
movq %rcx, 0x30(%rsp)
movq 0x120(%rsp), %rax
movq -0x18(%rax), %rcx
cmpl $0x0, 0x10c(%r15,%rcx)
je 0x2d6bbe
movq 0x1b0(%r15,%rcx), %rax
movq 0xb8(%rsp), %rdx
movss (%rax,%rdx,4), %xmm4
jmp 0x2d6bc1
xorps %xmm4, %xmm4
movl 0xd8(%r15,%rcx), %esi
movl %edi, 0x18(%rsp)
subl 0x2c(%rsp), %edi
xorl %r8d, %r8d
testl %esi, %esi
cmovlel %r8d, %esi
cmpl %esi, %r8d
je 0x2d6c80
movl 0xe0(%r15,%rcx), %eax
imull %r8d, %eax
addl %r14d, %eax
js 0x2d6c78
cltd
idivl 0xe8(%r15,%rcx)
testl %edx, %edx
jne 0x2d6c78
cmpl 0x38(%rsp), %eax
jge 0x2d6c78
movslq %eax, %r9
imulq %r13, %r9
addq %rbp, %r9
movl 0xd4(%r15,%rcx), %eax
testl %eax, %eax
movl $0x0, %r10d
cmovgl %eax, %r10d
imull %r8d, %eax
cltq
movq 0x20(%rsp), %rdx
leaq (%rdx,%rax,4), %r11
xorl %r12d, %r12d
cmpq %r12, %r10
je 0x2d6c78
movl 0xdc(%r15,%rcx), %eax
imull %r12d, %eax
addl %edi, %eax
js 0x2d6c73
cltd
idivl 0xe4(%r15,%rcx)
testl %edx, %edx
jne 0x2d6c73
cmpl %ebx, %eax
jge 0x2d6c73
cltq
movss (%r11,%r12,4), %xmm0
mulss (%r9,%rax,4), %xmm0
addss %xmm0, %xmm4
incq %r12
jmp 0x2d6c3b
incl %r8d
jmp 0x2d6bda
movl 0x118(%r15,%rcx), %eax
decl %eax
cmpl $0x5, %eax
ja 0x2d6de4
leaq 0x121d92(%rip), %rdx # 0x3f8a2c
movslq (%rdx,%rax,4), %rax
addq %rdx, %rax
movl 0x18(%rsp), %edi
jmpq *%rax
maxss 0x117361(%rip), %xmm4 # 0x3ee010
movaps %xmm4, %xmm0
jmp 0x2d6d8a
movaps %xmm4, %xmm0
movaps %xmm4, 0x170(%rsp)
callq 0x5f410
addss 0x117fb9(%rip), %xmm0 # 0x3eec88
callq 0x5f200
callq 0x5f160
movl 0x18(%rsp), %edi
mulss 0x170(%rsp), %xmm0
jmp 0x2d6d8a
movq 0x120(%r15,%rcx), %rax
maxss (%rax), %xmm4
movss 0x4(%rax), %xmm1
ucomiss %xmm1, %xmm4
movaps %xmm4, %xmm0
movq 0x30(%rsp), %rcx
jbe 0x2d6d8f
jmp 0x2d6dfe
movss 0x11a49e(%rip), %xmm2 # 0x3f11b8
minss %xmm2, %xmm4
movaps %xmm4, %xmm0
xorps 0x117368(%rip), %xmm0 # 0x3ee090
cmpltss 0x11a48b(%rip), %xmm4 # 0x3f11bc
movaps %xmm4, %xmm1
andnps %xmm0, %xmm1
andps %xmm2, %xmm4
orps %xmm1, %xmm4
movaps %xmm4, %xmm0
callq 0x5f410
movl 0x18(%rsp), %edi
movaps %xmm0, %xmm1
movss 0x117f34(%rip), %xmm0 # 0x3eec88
addss %xmm0, %xmm1
divss %xmm1, %xmm0
jmp 0x2d6d8a
movq 0x120(%r15,%rcx), %rax
movss (%rax), %xmm1
xorps %xmm0, %xmm0
cmpltss %xmm4, %xmm0
movaps %xmm0, %xmm2
andnps %xmm1, %xmm2
movss 0x117f08(%rip), %xmm1 # 0x3eec88
andps %xmm1, %xmm0
orps %xmm2, %xmm0
mulss %xmm4, %xmm0
movq 0x30(%rsp), %rcx
movss %xmm0, (%rcx)
addq $0x4, %rcx
incl %edi
jmp 0x2d6b81
movq 0x120(%r15,%rcx), %rax
movss (%rax), %xmm1
movss 0x4(%rax), %xmm2
movaps %xmm2, %xmm3
xorps 0x1172d7(%rip), %xmm3 # 0x3ee090
divss %xmm1, %xmm3
xorps %xmm0, %xmm0
ucomiss %xmm3, %xmm4
movq 0x30(%rsp), %rcx
jb 0x2d6d8f
movss 0x117eb6(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
addss %xmm0, %xmm3
ucomiss %xmm3, %xmm4
jbe 0x2d6df2
movaps %xmm4, %xmm0
jmp 0x2d6d8f
movaps %xmm4, %xmm0
movq 0x30(%rsp), %rcx
movl 0x18(%rsp), %edi
jmp 0x2d6d8f
mulss %xmm4, %xmm1
addss %xmm2, %xmm1
mulss %xmm4, %xmm1
movaps %xmm1, %xmm0
jmp 0x2d6d8f
movl 0x50(%rsp), %r14d
incl %r14d
jmp 0x2d6b5f
movq 0xb8(%rsp), %rdx
incq %rdx
movq 0x60(%rsp), %rax
addq %rax, 0x20(%rsp)
jmp 0x2d6b24
movq 0xc8(%rsp), %rax
testq %rax, %rax
pushq $-0x64
popq %rbx
je 0x2d6fdb
lock
decl (%rax)
jne 0x2d6fdb
movq 0xc0(%rsp), %rsi
movq 0xe0(%rsp), %rdi
testq %rdi, %rdi
je 0x2d6fd3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d6fdb
movq %rsi, %rdi
callq 0x5f3e0
movq 0xc0(%rsp), %rcx
movq 0xc8(%rsp), %rax
movq %rcx, 0x70(%rsp)
movq %rax, 0x78(%rsp)
movq 0xd0(%rsp), %rcx
movq %rcx, 0x80(%rsp)
movl 0xd8(%rsp), %ecx
movl %ecx, 0x88(%rsp)
movq 0xe0(%rsp), %rcx
movq %rcx, 0x90(%rsp)
movups 0xe8(%rsp), %xmm0
movups %xmm0, 0x98(%rsp)
movl 0xf8(%rsp), %ecx
movl %ecx, 0xa8(%rsp)
movq 0x100(%rsp), %rcx
movq %rcx, 0xb0(%rsp)
testq %rax, %rax
je 0x2d6f18
lock
decl (%rax)
jne 0x2d6f18
movq 0xc0(%rsp), %rsi
movq 0xe0(%rsp), %rdi
testq %rdi, %rdi
je 0x2d6f10
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d6f18
movq %rsi, %rdi
callq 0x5f3e0
movq 0x130(%rsp), %rax
testq %rax, %rax
je 0x2d6f4f
lock
decl (%rax)
jne 0x2d6f4f
movq 0x128(%rsp), %rsi
movq 0x148(%rsp), %rdi
testq %rdi, %rdi
je 0x2d6f47
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d6f4f
movq %rsi, %rdi
callq 0x5f3e0
movq (%r15), %rax
addq -0x18(%rax), %r15
leaq 0x70(%rsp), %rsi
movq %r15, %rdi
movq 0x110(%rsp), %rbx
movq %rbx, %rdx
movq 0x48(%rsp), %rcx
callq 0x2d4cc4
cmpq $0x0, (%rbx)
pushq $-0x64
popq %rcx
je 0x2d6f8c
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
xorl %ebx, %ebx
testq %rax, %rax
jne 0x2d6f8e
movl %ecx, %ebx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2d6fbf
lock
decl (%rax)
jne 0x2d6fbf
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2d6fb7
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d6fbf
movq %rsi, %rdi
callq 0x5f3e0
movl %ebx, %eax
addq $0x268, %rsp # imm = 0x268
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rsi, %rdi
callq 0x5f3e0
movq 0x130(%rsp), %rax
testq %rax, %rax
je 0x2d6f8e
lock
decl (%rax)
jne 0x2d6f8e
movq 0x128(%rsp), %rsi
movq 0x148(%rsp), %rdi
testq %rdi, %rdi
je 0x2d700a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d6f8e
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d6f8e
jmp 0x2d7174
jmp 0x2d7174
jmp 0x2d7174
jmp 0x2d7174
movq %rax, %rbx
jmp 0x2d70fc
movq %rax, %rbx
jmp 0x2d70c5
jmp 0x2d7174
jmp 0x2d704c
jmp 0x2d7174
jmp 0x2d7174
movq %rax, %rbx
jmp 0x2d7133
movq %rax, %rbx
movq 0x198(%rsp), %rax
testq %rax, %rax
je 0x2d708e
lock
decl (%rax)
jne 0x2d708e
movq 0x190(%rsp), %rsi
movq 0x1b0(%rsp), %rdi
testq %rdi, %rdi
jne 0x2d7088
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d708e
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1e0(%rsp), %rax
testq %rax, %rax
je 0x2d70c5
lock
decl (%rax)
jne 0x2d70c5
movq 0x1d8(%rsp), %rsi
movq 0x1f8(%rsp), %rdi
testq %rdi, %rdi
jne 0x2d70bf
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d70c5
movq (%rdi), %rax
callq *0x18(%rax)
movq 0xc8(%rsp), %rax
testq %rax, %rax
je 0x2d70fc
lock
decl (%rax)
jne 0x2d70fc
movq 0xc0(%rsp), %rsi
movq 0xe0(%rsp), %rdi
testq %rdi, %rdi
jne 0x2d70f6
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d70fc
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x130(%rsp), %rax
testq %rax, %rax
je 0x2d7133
lock
decl (%rax)
jne 0x2d7133
movq 0x128(%rsp), %rsi
movq 0x148(%rsp), %rdi
testq %rdi, %rdi
jne 0x2d712d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d7133
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2d7164
lock
decl (%rax)
jne 0x2d7164
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x2d715e
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d7164
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x2d7174
jmp 0x2d7174
jmp 0x2d7174
jmp 0x2d7174
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/x86/deconvolutiondepthwise_x86.cpp |
virtual thunk to ncnn::DeconvolutionDepthWise_x86::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int DeconvolutionDepthWise_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
// convolv with NxN kernel
// value = value + bias
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
int outw = (w - 1) * stride_w + kernel_extent_w + output_pad_right;
int outh = (h - 1) * stride_h + kernel_extent_h + output_pad_bottom;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
size_t out_elemsize = elemsize / elempack * out_elempack;
Mat top_blob_bordered;
if (pad_left > 0 || pad_right > 0 || pad_top > 0 || pad_bottom > 0 || (output_w > 0 && output_h > 0))
{
top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.workspace_allocator);
}
else
{
top_blob_bordered = top_blob;
top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
}
if (top_blob_bordered.empty())
return -100;
const int maxk = kernel_w * kernel_h;
// depth-wise
if (channels * elempack == group && group == num_output)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 16;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m512 _sum = _mm512_setzero_ps();
if (bias_term)
{
_sum = _mm512_loadu_ps((const float*)bias_data + g * 16);
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 16;
int k = y * kernel_w + x;
__m512 _val = _mm512_loadu_ps(sptr);
__m512 _w = _mm512_loadu_ps(kptr + k * 16);
_sum = _mm512_fmadd_ps(_val, _w, _sum);
}
}
_sum = activation_avx512(_sum, activation_type, activation_params);
_mm512_storeu_ps(outptr, _sum);
outptr += 16;
}
}
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 8;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_setzero_ps();
if (bias_term)
{
_sum = _mm256_loadu_ps((const float*)bias_data + g * 8);
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 8;
int k = y * kernel_w + x;
__m256 _val = _mm256_loadu_ps(sptr);
__m256 _w = _mm256_loadu_ps(kptr + k * 8);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
}
}
_sum = activation_avx(_sum, activation_type, activation_params);
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
}
}
#endif // __AVX__
if (elempack == 4)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 4;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_setzero_ps();
if (bias_term)
{
_sum = _mm_loadu_ps((const float*)bias_data + g * 4);
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 4;
int k = y * kernel_w + x;
__m128 _val = _mm_loadu_ps(sptr);
__m128 _w = _mm_loadu_ps(kptr + k * 4);
_sum = _mm_comp_fmadd_ps(_val, _w, _sum);
}
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr, _sum);
outptr += 4;
}
}
}
}
}
#endif // __SSE2__
if (elempack == 1)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_term)
{
sum = bias_data[g];
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
const float* sptr = m.row(sy);
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
float val = sptr[sx];
int k = y * kernel_w + x;
float w = kptr[k];
sum += val * w;
}
}
sum = activation_ss(sum, activation_type, activation_params);
outptr[0] = sum;
outptr++;
}
}
}
}
}
else
{
// group deconvolution
const int channels_g = channels * elempack / group;
const int num_output_g = num_output / group;
int g_elempack = 1;
int out_g_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
g_elempack = channels_g % 16 == 0 ? 16 : channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 16 == 0 ? 16 : num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#elif __AVX__
g_elempack = channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#else
g_elempack = channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
// unpacking
Mat bottom_blob_unpacked = bottom_blob;
if (elempack > g_elempack)
{
Option opt_p = opt;
opt_p.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, g_elempack, opt_p);
}
Mat top_blob_bordered_unpacked = top_blob_bordered;
if (out_g_elempack < out_elempack)
{
top_blob_bordered_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
if (top_blob_bordered_unpacked.empty())
return -100;
}
for (int g = 0; g < group; g++)
{
const Mat bottom_blob_g = bottom_blob_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
Mat top_blob_bordered_g = top_blob_bordered_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
const ncnn::Layer* op = group_ops[g];
Option opt_g = opt;
opt_g.blob_allocator = top_blob_bordered_unpacked.allocator;
// forward
op->forward(bottom_blob_g, top_blob_bordered_g, opt_g);
}
// packing
if (out_g_elempack < out_elempack)
{
convert_packing(top_blob_bordered_unpacked, top_blob_bordered, out_elempack, opt);
}
else
{
top_blob_bordered = top_blob_bordered_unpacked;
}
}
cut_padding(top_blob_bordered, top_blob, opt);
if (top_blob.empty())
return -100;
return 0;
} | movq (%rdi), %rax
addq -0x48(%rax), %rdi
jmp 0x2d5c78
| /csukuangfj[P]ncnn/src/layer/x86/deconvolutiondepthwise_x86.cpp |
ncnn::DeconvolutionDepthWise_x86_avx512::create_pipeline(ncnn::Option const&) | int DeconvolutionDepthWise_x86_avx512::create_pipeline(const Option& opt)
{
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
// depth-wise
if (channels == group && group == num_output)
{
int elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = channels % 16 == 0 ? 16 : channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#else
elempack = channels % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
Mat weight_data_transposed(weight_data.w);
{
float* pt = weight_data_transposed;
const float* p = weight_data;
for (int i = 0; i < (channels / group) * (num_output / group) * group; i++)
{
for (int k = 0; k < maxk; k++)
{
pt[maxk - 1 - k] = p[k];
}
p += maxk;
pt += maxk;
}
}
#if __SSE2__
#if __AVX__
// pack16
#if __AVX512F__
if (elempack == 16)
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 16, opt);
}
#endif // __AVX512F__
// pack8
if (elempack == 8)
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 8, opt);
}
#endif // __AVX__
// pack4
if (elempack == 4)
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 4, opt);
}
#endif // __SSE2__
if (elempack == 1)
{
weight_data_tm = weight_data_transposed;
}
return 0;
}
// group convolution
create_group_ops(opt);
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa8, %rsp
movq %rsi, %r14
movq %rdi, %rbx
movq (%rdi), %rax
movq -0x18(%rax), %r8
movslq 0xd4(%rdi,%r8), %rax
movslq 0xd8(%rdi,%r8), %r15
imulq %rax, %r15
movl 0xd0(%rdi,%r8), %ecx
movl 0x110(%rdi,%r8), %eax
movl 0x114(%rdi,%r8), %ebp
cltd
idivl %ebp
cltd
idivl %r15d
movl %eax, %esi
movl %ecx, %eax
cltd
idivl %ebp
movl %eax, %edi
movl %esi, %eax
cltd
idivl %edi
cmpl %ecx, %ebp
jne 0x2d72aa
imull %ebp, %eax
cmpl %ebp, %eax
jne 0x2d72aa
cmpb $0x1, 0x27(%r14)
movq %r14, 0x10(%rsp)
jne 0x2d730c
testb $0xf, %bpl
je 0x2d7310
testb $0x7, %bpl
je 0x2d7314
xorl %eax, %eax
testb $0x3, %bpl
sete %al
leal (%rax,%rax,2), %eax
incl %eax
jmp 0x2d7317
movq %rbx, %rdi
movq %r14, %rsi
callq 0x2d7732
cmpb $0x1, (%r14)
jne 0x2d7626
movq (%rbx), %rax
movq -0x18(%rax), %rax
leaq (%rbx,%rax), %r14
addq %rax, %rbx
addq $0x168, %rbx # imm = 0x168
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x2d7577
lock
decl (%rax)
jne 0x2d7577
movq 0x168(%r14), %rsi
movq 0x188(%r14), %rdi
testq %rdi, %rdi
je 0x2d756f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d7577
pushq $0x1
jmp 0x2d7316
pushq $0x10
jmp 0x2d7316
pushq $0x8
popq %rax
movl %eax, 0xc(%rsp)
movl 0x194(%rbx,%r8), %esi
leaq 0x60(%rsp), %r12
andq $0x0, 0x40(%r12)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%r12)
vmovups %xmm0, 0xc(%r12)
vmovaps %xmm0, 0x20(%r12)
vmovups %xmm0, 0x2c(%r12)
xorl %r14d, %r14d
pushq $0x4
popq %rdx
movq %r12, %rdi
xorl %ecx, %ecx
callq 0x635fa
movq (%r12), %rdi
movq (%rbx), %r8
movq -0x18(%r8), %rcx
movq 0x168(%rbx,%rcx), %r9
testl %r15d, %r15d
movl $0x0, %r10d
cmovgl %r15d, %r10d
leal -0x1(%r15), %r11d
leaq (,%r15,4), %r13
movl 0xd0(%rbx,%rcx), %eax
movl 0x114(%rbx,%rcx), %ecx
cltd
idivl %ecx
movl %eax, %esi
movl %ebp, %eax
cltd
idivl %ecx
movl %ebp, %eax
subl %edx, %eax
imull %esi, %eax
cmpl %eax, %r14d
jge 0x2d73db
movl %r11d, %eax
xorl %ecx, %ecx
cmpq %rcx, %r10
je 0x2d73cb
vmovss (%r9,%rcx,4), %xmm0
cltq
vmovss %xmm0, (%rdi,%rax,4)
incq %rcx
decl %eax
jmp 0x2d73b2
leaq (%rdi,%r15,4), %rdi
incl %r14d
movq -0x18(%r8), %rcx
addq %r13, %r9
jmp 0x2d7389
movl 0xc(%rsp), %eax
cmpl $0x1, %eax
je 0x2d7525
cmpl $0x4, %eax
je 0x2d7465
cmpl $0x8, %eax
movq 0x10(%rsp), %r14
je 0x2d74c8
cmpl $0x10, %eax
jne 0x2d75f5
leaq 0x18(%rsp), %rdi
leaq 0x60(%rsp), %rsi
movl %r15d, %edx
xorl %r8d, %r8d
callq 0x62e4e
addq $0x20, %rbx
leaq 0x18(%rsp), %rdi
pushq $0x10
popq %rdx
movq %rbx, %rsi
movq %r14, %rcx
callq 0x64e3b
movq 0x20(%rsp), %rax
testq %rax, %rax
je 0x2d75f5
lock
decl (%rax)
jne 0x2d75f5
movq 0x18(%rsp), %rsi
movq 0x38(%rsp), %rdi
testq %rdi, %rdi
je 0x2d7562
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d75f5
leaq 0x18(%rsp), %rdi
leaq 0x60(%rsp), %rsi
movl %r15d, %edx
xorl %r8d, %r8d
callq 0x62e4e
movq 0x10(%rsp), %rcx
addq $0x20, %rbx
leaq 0x18(%rsp), %rdi
pushq $0x4
popq %rdx
movq %rbx, %rsi
callq 0x64e3b
movq 0x20(%rsp), %rax
testq %rax, %rax
je 0x2d75f5
lock
decl (%rax)
jne 0x2d75f5
movq 0x18(%rsp), %rsi
movq 0x38(%rsp), %rdi
testq %rdi, %rdi
je 0x2d7562
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d75f5
leaq 0x18(%rsp), %rdi
leaq 0x60(%rsp), %rsi
movl %r15d, %edx
xorl %r8d, %r8d
callq 0x62e4e
addq $0x20, %rbx
leaq 0x18(%rsp), %rdi
pushq $0x8
popq %rdx
movq %rbx, %rsi
movq %r14, %rcx
callq 0x64e3b
movq 0x20(%rsp), %rax
testq %rax, %rax
je 0x2d75f5
lock
decl (%rax)
jne 0x2d75f5
movq 0x18(%rsp), %rsi
movq 0x38(%rsp), %rdi
testq %rdi, %rdi
je 0x2d7562
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d75f5
leaq 0x20(%rbx), %rax
cmpq %r12, %rax
je 0x2d75f5
movq 0x68(%rsp), %rax
testq %rax, %rax
je 0x2d753f
lock
incl (%rax)
movq 0x28(%rbx), %rax
testq %rax, %rax
je 0x2d75aa
lock
decl (%rax)
jne 0x2d75aa
movq 0x20(%rbx), %rsi
movq 0x40(%rbx), %rdi
testq %rdi, %rdi
je 0x2d75a2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d75aa
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d75f5
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1a8(%r14)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rbx)
vmovups %xmm0, (%rbx)
vmovups %xmm0, 0x190(%r14)
andl $0x0, 0x1a0(%r14)
jmp 0x2d7626
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x60(%rsp), %xmm0
vmovups %xmm0, 0x20(%rbx)
movq 0x70(%rsp), %rax
movq %rax, 0x30(%rbx)
movl 0x78(%rsp), %eax
movl %eax, 0x38(%rbx)
movq 0x80(%rsp), %rax
movq %rax, 0x40(%rbx)
vmovups 0x88(%rsp), %xmm0
vmovups %xmm0, 0x48(%rbx)
movl 0x98(%rsp), %eax
movl %eax, 0x58(%rbx)
movq 0xa0(%rsp), %rax
movq %rax, 0x60(%rbx)
movq 0x68(%rsp), %rax
testq %rax, %rax
je 0x2d7626
lock
decl (%rax)
jne 0x2d7626
movq 0x60(%rsp), %rsi
movq 0x80(%rsp), %rdi
testq %rdi, %rdi
je 0x2d761e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d7626
movq %rsi, %rdi
callq 0x5f3e0
xorl %eax, %eax
addq $0xa8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x2d76ed
jmp 0x2d7729
jmp 0x2d7729
jmp 0x2d7729
jmp 0x2d7729
movq %rax, %rbx
movq 0x20(%rsp), %rax
testq %rax, %rax
je 0x2d76f0
lock
decl (%rax)
jne 0x2d76f0
movq 0x18(%rsp), %rsi
movq 0x38(%rsp), %rdi
testq %rdi, %rdi
je 0x2d76d5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d76f0
jmp 0x2d7729
movq %rax, %rbx
movq 0x20(%rsp), %rax
testq %rax, %rax
je 0x2d76f0
lock
decl (%rax)
jne 0x2d76f0
movq 0x18(%rsp), %rsi
movq 0x38(%rsp), %rdi
testq %rdi, %rdi
je 0x2d76d5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d76f0
jmp 0x2d7729
movq %rax, %rbx
movq 0x20(%rsp), %rax
testq %rax, %rax
je 0x2d76f0
lock
decl (%rax)
jne 0x2d76f0
movq 0x18(%rsp), %rsi
movq 0x38(%rsp), %rdi
testq %rdi, %rdi
jne 0x2d76df
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d76f0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d76f0
jmp 0x2d7729
jmp 0x2d76ed
jmp 0x2d76ed
movq %rax, %rbx
movq 0x68(%rsp), %rax
testq %rax, %rax
je 0x2d7721
lock
decl (%rax)
jne 0x2d7721
movq 0x60(%rsp), %rsi
movq 0x80(%rsp), %rdi
testq %rdi, %rdi
jne 0x2d771b
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d7721
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deconvolutiondepthwise_x86_avx512.cpp |
ncnn::DeconvolutionDepthWise_x86_avx512::create_group_ops(ncnn::Option const&) | int DeconvolutionDepthWise_x86_avx512::create_group_ops(const Option& opt)
{
// create Deconvolution op for each group
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
for (int i = 0; i < (int)group_ops.size(); i++)
delete group_ops[i];
group_ops.clear();
const int channels_g = channels / group;
const int num_output_g = num_output / group;
group_ops.resize(group);
for (int g = 0; g < group; g++)
{
Mat weight_data_g = weight_data.range(maxk * channels_g * num_output_g * g, maxk * channels_g * num_output_g).clone();
Mat bias_data_g;
if (bias_term)
bias_data_g = bias_data.range(num_output_g * g, num_output_g);
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Deconvolution);
// set param
ncnn::ParamDict pd;
pd.set(0, num_output_g); // num_output
pd.set(1, kernel_w);
pd.set(11, kernel_h);
pd.set(2, dilation_w);
pd.set(12, dilation_h);
pd.set(3, stride_w);
pd.set(13, stride_h);
pd.set(4, 0); // pad_w
pd.set(14, 0); // pad_h
pd.set(18, output_pad_right);
pd.set(19, output_pad_bottom);
pd.set(5, bias_term);
pd.set(6, maxk * channels_g * num_output_g); // weight_data_size
pd.set(9, activation_type);
pd.set(10, activation_params);
op->load_param(pd);
// set weights
if (bias_term)
{
ncnn::Mat weights[2];
weights[0] = weight_data_g;
weights[1] = bias_data_g;
op->load_model(ModelBinFromMatArray(weights));
}
else
{
ncnn::Mat weights[1];
weights[0] = weight_data_g;
op->load_model(ModelBinFromMatArray(weights));
}
op->create_pipeline(opt);
group_ops[g] = op;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x158, %rsp # imm = 0x158
movq %rsi, 0x120(%rsp)
movq %rdi, %r14
movq (%rdi), %rax
movq -0x18(%rax), %rdx
movl 0xd0(%rdi,%rdx), %ecx
movl 0xd8(%rdi,%rdx), %ebp
imull 0xd4(%rdi,%rdx), %ebp
movl 0x110(%rdi,%rdx), %eax
movl 0x114(%rdi,%rdx), %r15d
cltd
idivl %r15d
cltd
idivl %ebp
movl %eax, %esi
movl %ecx, %eax
cltd
idivl %r15d
movl %eax, %ecx
movl %esi, %eax
cltd
idivl %ecx
movl %eax, %ebx
leaq 0x8(%rdi), %rax
movq %rax, 0xa8(%rsp)
xorl %r12d, %r12d
movq 0x8(%r14), %rax
movq 0x10(%r14), %rcx
movq %rcx, %rdx
subq %rax, %rdx
shrq $0x3, %rdx
movslq %edx, %rdx
cmpq %rdx, %r12
jge 0x2d77cf
movq (%rax,%r12,8), %rdi
testq %rdi, %rdi
je 0x2d77ca
movq (%rdi), %rax
callq *0x8(%rax)
incq %r12
jmp 0x2d77a1
imull %r15d, %ebx
cmpq %rax, %rcx
je 0x2d77dc
movq %rax, 0x10(%r14)
movq (%r14), %rax
movq -0x18(%rax), %rcx
movslq 0x114(%r14,%rcx), %rsi
movl %ebx, %eax
cltd
idivl %esi
movl %eax, %ebx
movl 0xd0(%r14,%rcx), %eax
cltd
idivl %esi
movl %eax, %r15d
movq 0xa8(%rsp), %rdi
callq 0x6fbc2
leaq 0x120(%r14), %rax
movq %rax, 0x128(%rsp)
imull %ebp, %ebx
imull %r15d, %ebx
movl %ebx, 0xa4(%rsp)
movslq %ebx, %rax
movq %rax, 0x130(%rsp)
movl %r15d, 0xc(%rsp)
movslq %r15d, %rax
movq %rax, 0x118(%rsp)
xorl %r12d, %r12d
leaq 0x148(%rsp), %r15
movq (%r14), %rax
movq -0x18(%rax), %rax
movslq 0x114(%r14,%rax), %rcx
cmpq %rcx, %r12
jge 0x2d7ef2
movq %r12, %rcx
movq 0x130(%rsp), %rdi
imulq %rdi, %rcx
movq 0x178(%r14,%rax), %rdx
imulq %rdx, %rcx
addq 0x168(%r14,%rax), %rcx
movl 0x180(%r14,%rax), %esi
movq 0x188(%r14,%rax), %rax
movq %rcx, 0x10(%rsp)
andq $0x0, 0x18(%rsp)
movq %rdx, 0x20(%rsp)
movl %esi, 0x28(%rsp)
movq %rax, 0x30(%rsp)
pushq $0x1
popq %rax
movl %eax, 0x38(%rsp)
movl %edi, 0x3c(%rsp)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x40(%rsp)
movl %eax, 0x48(%rsp)
movq %rdi, 0x50(%rsp)
leaq 0xd0(%rsp), %rdi
leaq 0x10(%rsp), %rsi
xorl %edx, %edx
callq 0x624f0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x2d7919
lock
decl (%rax)
jne 0x2d7919
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x2d7911
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d7919
movq %rsi, %rdi
callq 0x5f3e0
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x10c(%r14,%rax)
je 0x2d798b
movq %r12, %r13
movq 0x118(%rsp), %rcx
imulq %rcx, %r13
movq 0x1c0(%r14,%rax), %rsi
movq %rsi, 0xc0(%rsp)
imulq %rsi, %r13
addq 0x1b0(%r14,%rax), %r13
movl 0x1c8(%r14,%rax), %edx
movl %edx, 0x8(%rsp)
movq 0x1d0(%r14,%rax), %rax
movq %rax, 0xb8(%rsp)
movl 0xc(%rsp), %eax
movq %rax, 0xc8(%rsp)
pushq $0x1
popq %rbx
movq %rcx, 0xb0(%rsp)
jmp 0x2d79c8
movq $0x0, 0xc8(%rsp)
xorl %ebx, %ebx
movq $0x0, 0xb8(%rsp)
movl $0x0, 0x8(%rsp)
movq $0x0, 0xc0(%rsp)
xorl %r13d, %r13d
movq $0x0, 0xb0(%rsp)
pushq $0x8
popq %rdi
callq 0x782bf
movq %rax, %rbp
movq %r15, %rdi
callq 0x71548
movq %r15, %rdi
xorl %esi, %esi
movl 0xc(%rsp), %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd4(%r14,%rax), %edx
movq %r15, %rdi
pushq $0x1
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd8(%r14,%rax), %edx
movq %r15, %rdi
pushq $0xb
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xdc(%r14,%rax), %edx
movq %r15, %rdi
pushq $0x2
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe0(%r14,%rax), %edx
movq %r15, %rdi
pushq $0xc
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe4(%r14,%rax), %edx
movq %r15, %rdi
pushq $0x3
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xe8(%r14,%rax), %edx
movq %r15, %rdi
pushq $0xd
popq %rsi
callq 0x7193a
movq %r15, %rdi
pushq $0x4
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq %r15, %rdi
pushq $0xe
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xfc(%r14,%rax), %edx
movq %r15, %rdi
pushq $0x12
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x100(%r14,%rax), %edx
movq %r15, %rdi
pushq $0x13
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x10c(%r14,%rax), %edx
movq %r15, %rdi
pushq $0x5
popq %rsi
callq 0x7193a
movq %r15, %rdi
pushq $0x6
popq %rsi
movl 0xa4(%rsp), %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0x118(%r14,%rax), %edx
movq %r15, %rdi
pushq $0x9
popq %rsi
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rdx
addq 0x128(%rsp), %rdx
movq %r15, %rdi
pushq $0xa
popq %rsi
callq 0x7196c
movq (%rbp), %rax
movq %rbp, %rdi
movq %r15, %rsi
callq *0x10(%rax)
movq (%r14), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0x10c(%r14,%rax)
je 0x2d7bd1
andq $0x0, 0x50(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovups %xmm0, 0x1c(%rsp)
leaq 0x30(%rsp), %rax
vmovups %xmm0, 0xc(%rax)
vmovaps %xmm0, (%rax)
andq $0x0, 0x98(%rsp)
vmovups %xmm0, 0x34(%rax)
vmovups %xmm0, 0x28(%rax)
vmovups %xmm0, 0x54(%rax)
vmovups %xmm0, 0x48(%rax)
movq 0xd8(%rsp), %rax
testq %rax, %rax
je 0x2d7ba3
lock
incl (%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x2d7c4e
lock
decl (%rax)
jne 0x2d7c4e
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x2d7c46
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d7c4e
andq $0x0, 0x50(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovups %xmm0, 0x1c(%rsp)
leaq 0x30(%rsp), %rax
vmovups %xmm0, 0xc(%rax)
vmovaps %xmm0, (%rax)
movq 0xd8(%rsp), %rax
testq %rax, %rax
leaq 0x138(%rsp), %rbx
je 0x2d7e37
lock
incl (%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x2d7e37
lock
decl (%rax)
jne 0x2d7e37
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x2d7e2f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d7e37
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0xd0(%rsp), %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq 0xe0(%rsp), %rax
movq %rax, 0x20(%rsp)
movl 0xe8(%rsp), %eax
movl %eax, 0x28(%rsp)
movq 0xf0(%rsp), %rax
movq %rax, 0x30(%rsp)
vmovups 0xf8(%rsp), %xmm0
vmovups %xmm0, 0x38(%rsp)
movl 0x108(%rsp), %eax
movl %eax, 0x48(%rsp)
movq 0x110(%rsp), %rax
movq %rax, 0x50(%rsp)
movq 0x60(%rsp), %rax
testq %rax, %rax
je 0x2d7cd7
lock
decl (%rax)
jne 0x2d7cd7
movq 0x58(%rsp), %rsi
movq 0x78(%rsp), %rdi
testq %rdi, %rdi
je 0x2d7ccf
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d7cd7
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, 0x58(%rsp)
andq $0x0, 0x60(%rsp)
movq 0xc0(%rsp), %rax
movq %rax, 0x68(%rsp)
movl 0x8(%rsp), %eax
movl %eax, 0x70(%rsp)
movq 0xb8(%rsp), %rax
movq %rax, 0x78(%rsp)
movl %ebx, 0x80(%rsp)
movq 0xc8(%rsp), %rax
movl %eax, 0x84(%rsp)
movl %ebx, 0x88(%rsp)
movl %ebx, 0x8c(%rsp)
movl %ebx, 0x90(%rsp)
movq 0xb0(%rsp), %rax
movq %rax, 0x98(%rsp)
leaq 0x138(%rsp), %rbx
movq %rbx, %rdi
leaq 0x10(%rsp), %rsi
callq 0x6b00e
movq (%rbp), %rax
movq %rbp, %rdi
movq %rbx, %rsi
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x6b03a
pushq $0x48
popq %rbx
movq 0x18(%rsp,%rbx), %rax
testq %rax, %rax
je 0x2d7d9a
lock
decl (%rax)
jne 0x2d7d9a
movq 0x10(%rsp,%rbx), %rsi
movq 0x30(%rsp,%rbx), %rdi
testq %rdi, %rdi
je 0x2d7d92
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d7d9a
movq %rsi, %rdi
callq 0x5f3e0
leaq (%rsp,%rbx), %rax
addq $0x10, %rax
andq $0x0, 0x40(%rax)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %rbx
cmpq $-0x48, %rbx
jne 0x2d7d6c
movq (%rbp), %rax
movq %rbp, %rdi
movq 0x120(%rsp), %rsi
callq *0x20(%rax)
movq 0xa8(%rsp), %rax
movq (%rax), %rax
movq %rbp, (%rax,%r12,8)
movq %r15, %rdi
callq 0x71614
movq 0xd8(%rsp), %rax
testq %rax, %rax
je 0x2d7e27
lock
decl (%rax)
jne 0x2d7e27
movq 0xd0(%rsp), %rsi
movq 0xf0(%rsp), %rdi
testq %rdi, %rdi
je 0x2d7e1f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d7e27
movq %rsi, %rdi
callq 0x5f3e0
incq %r12
jmp 0x2d7850
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0xd0(%rsp), %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq 0xe0(%rsp), %rax
movq %rax, 0x20(%rsp)
movl 0xe8(%rsp), %eax
movl %eax, 0x28(%rsp)
movq 0xf0(%rsp), %rax
movq %rax, 0x30(%rsp)
vmovups 0xf8(%rsp), %xmm0
vmovups %xmm0, 0x38(%rsp)
movl 0x108(%rsp), %eax
movl %eax, 0x48(%rsp)
movq 0x110(%rsp), %rax
movq %rax, 0x50(%rsp)
movq %rbx, %rdi
leaq 0x10(%rsp), %rsi
callq 0x6b00e
movq (%rbp), %rax
movq %rbp, %rdi
movq %rbx, %rsi
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x6b03a
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x2d7dc7
lock
decl (%rax)
jne 0x2d7dc7
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x2d7ee5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d7dc7
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d7dc7
xorl %eax, %eax
addq $0x158, %rsp # imm = 0x158
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x2d7f19
jmp 0x2d8077
jmp 0x2d8077
jmp 0x2d8077
jmp 0x2d7f83
movq %rax, %rbx
jmp 0x2d7f2e
movq %rax, %rbx
leaq 0x138(%rsp), %rdi
callq 0x6b03a
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x2d802b
lock
decl (%rax)
jne 0x2d802b
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x2d7f61
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d802b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d802b
jmp 0x2d8077
movq %rax, %rbx
leaq 0x138(%rsp), %rdi
callq 0x6b03a
jmp 0x2d7f86
movq %rax, %rbx
pushq $0x48
popq %r14
vxorps %xmm0, %xmm0, %xmm0
movq 0x18(%rsp,%r14), %rax
testq %rax, %rax
je 0x2d7fc4
lock
decl (%rax)
jne 0x2d7fc4
movq 0x10(%rsp,%r14), %rsi
movq 0x30(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x2d7fb8
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x2d7fc4
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0x10, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x2d7f8e
jmp 0x2d802b
jmp 0x2d8077
jmp 0x2d8021
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x2d806f
lock
decl (%rax)
jne 0x2d806f
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x2d805f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2d806f
jmp 0x2d8077
movq %rax, %rbx
jmp 0x2d8038
jmp 0x2d8077
movq %rax, %rbx
leaq 0x148(%rsp), %rdi
callq 0x71614
movq 0xd8(%rsp), %rax
testq %rax, %rax
je 0x2d806f
lock
decl (%rax)
jne 0x2d806f
movq 0xd0(%rsp), %rsi
movq 0xf0(%rsp), %rdi
testq %rdi, %rdi
jne 0x2d8069
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2d806f
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deconvolutiondepthwise_x86_avx512.cpp |
virtual thunk to ncnn::DeconvolutionDepthWise_x86_avx512::create_pipeline(ncnn::Option const&) | int DeconvolutionDepthWise_x86_avx512::create_pipeline(const Option& opt)
{
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
// depth-wise
if (channels == group && group == num_output)
{
int elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = channels % 16 == 0 ? 16 : channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#else
elempack = channels % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
Mat weight_data_transposed(weight_data.w);
{
float* pt = weight_data_transposed;
const float* p = weight_data;
for (int i = 0; i < (channels / group) * (num_output / group) * group; i++)
{
for (int k = 0; k < maxk; k++)
{
pt[maxk - 1 - k] = p[k];
}
p += maxk;
pt += maxk;
}
}
#if __SSE2__
#if __AVX__
// pack16
#if __AVX512F__
if (elempack == 16)
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 16, opt);
}
#endif // __AVX512F__
// pack8
if (elempack == 8)
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 8, opt);
}
#endif // __AVX__
// pack4
if (elempack == 4)
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 4, opt);
}
#endif // __SSE2__
if (elempack == 1)
{
weight_data_tm = weight_data_transposed;
}
return 0;
}
// group convolution
create_group_ops(opt);
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x30(%rax), %rdi
callq 0x2d7218
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deconvolutiondepthwise_x86_avx512.cpp |
ncnn::DeconvolutionDepthWise_x86_avx512::destroy_pipeline(ncnn::Option const&) | int DeconvolutionDepthWise_x86_avx512::destroy_pipeline(const Option& opt)
{
for (int i = 0; i < (int)group_ops.size(); i++)
{
group_ops[i]->destroy_pipeline(opt);
delete group_ops[i];
}
group_ops.clear();
return 0;
} | pushq %r15
pushq %r14
pushq %rbx
movq %rsi, %r14
movq %rdi, %rbx
xorl %r15d, %r15d
movq 0x8(%rbx), %rax
movq 0x10(%rbx), %rcx
movq %rcx, %rdx
subq %rax, %rdx
shrq $0x3, %rdx
movslq %edx, %rdx
cmpq %rdx, %r15
jge 0x2d80df
movq (%rax,%r15,8), %rdi
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x8(%rbx), %rax
movq (%rax,%r15,8), %rdi
testq %rdi, %rdi
je 0x2d80da
movq (%rdi), %rax
callq *0x8(%rax)
incq %r15
jmp 0x2d80a0
cmpq %rax, %rcx
je 0x2d80e8
movq %rax, 0x10(%rbx)
xorl %eax, %eax
popq %rbx
popq %r14
popq %r15
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deconvolutiondepthwise_x86_avx512.cpp |
virtual thunk to ncnn::DeconvolutionDepthWise_x86_avx512::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int DeconvolutionDepthWise_x86_avx512::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
// convolv with NxN kernel
// value = value + bias
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
int outw = (w - 1) * stride_w + kernel_extent_w + output_pad_right;
int outh = (h - 1) * stride_h + kernel_extent_h + output_pad_bottom;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
size_t out_elemsize = elemsize / elempack * out_elempack;
Mat top_blob_bordered;
if (pad_left > 0 || pad_right > 0 || pad_top > 0 || pad_bottom > 0 || (output_w > 0 && output_h > 0))
{
top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.workspace_allocator);
}
else
{
top_blob_bordered = top_blob;
top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
}
if (top_blob_bordered.empty())
return -100;
const int maxk = kernel_w * kernel_h;
// depth-wise
if (channels * elempack == group && group == num_output)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 16;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m512 _sum = _mm512_setzero_ps();
if (bias_term)
{
_sum = _mm512_loadu_ps((const float*)bias_data + g * 16);
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 16;
int k = y * kernel_w + x;
__m512 _val = _mm512_loadu_ps(sptr);
__m512 _w = _mm512_loadu_ps(kptr + k * 16);
_sum = _mm512_fmadd_ps(_val, _w, _sum);
}
}
_sum = activation_avx512(_sum, activation_type, activation_params);
_mm512_storeu_ps(outptr, _sum);
outptr += 16;
}
}
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 8;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_setzero_ps();
if (bias_term)
{
_sum = _mm256_loadu_ps((const float*)bias_data + g * 8);
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 8;
int k = y * kernel_w + x;
__m256 _val = _mm256_loadu_ps(sptr);
__m256 _w = _mm256_loadu_ps(kptr + k * 8);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
}
}
_sum = activation_avx(_sum, activation_type, activation_params);
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
}
}
#endif // __AVX__
if (elempack == 4)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 4;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_setzero_ps();
if (bias_term)
{
_sum = _mm_loadu_ps((const float*)bias_data + g * 4);
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 4;
int k = y * kernel_w + x;
__m128 _val = _mm_loadu_ps(sptr);
__m128 _w = _mm_loadu_ps(kptr + k * 4);
_sum = _mm_comp_fmadd_ps(_val, _w, _sum);
}
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr, _sum);
outptr += 4;
}
}
}
}
}
#endif // __SSE2__
if (elempack == 1)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_term)
{
sum = bias_data[g];
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
const float* sptr = m.row(sy);
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
float val = sptr[sx];
int k = y * kernel_w + x;
float w = kptr[k];
sum += val * w;
}
}
sum = activation_ss(sum, activation_type, activation_params);
outptr[0] = sum;
outptr++;
}
}
}
}
}
else
{
// group deconvolution
const int channels_g = channels * elempack / group;
const int num_output_g = num_output / group;
int g_elempack = 1;
int out_g_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
g_elempack = channels_g % 16 == 0 ? 16 : channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 16 == 0 ? 16 : num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#elif __AVX__
g_elempack = channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#else
g_elempack = channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
// unpacking
Mat bottom_blob_unpacked = bottom_blob;
if (elempack > g_elempack)
{
Option opt_p = opt;
opt_p.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, g_elempack, opt_p);
}
Mat top_blob_bordered_unpacked = top_blob_bordered;
if (out_g_elempack < out_elempack)
{
top_blob_bordered_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
if (top_blob_bordered_unpacked.empty())
return -100;
}
for (int g = 0; g < group; g++)
{
const Mat bottom_blob_g = bottom_blob_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
Mat top_blob_bordered_g = top_blob_bordered_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
const ncnn::Layer* op = group_ops[g];
Option opt_g = opt;
opt_g.blob_allocator = top_blob_bordered_unpacked.allocator;
// forward
op->forward(bottom_blob_g, top_blob_bordered_g, opt_g);
}
// packing
if (out_g_elempack < out_elempack)
{
convert_packing(top_blob_bordered_unpacked, top_blob_bordered, out_elempack, opt);
}
else
{
top_blob_bordered = top_blob_bordered_unpacked;
}
}
cut_padding(top_blob_bordered, top_blob, opt);
if (top_blob.empty())
return -100;
return 0;
} | movq (%rdi), %rax
addq -0x48(%rax), %rdi
jmp 0x2d8102
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deconvolutiondepthwise_x86_avx512.cpp |
virtual thunk to ncnn::DeconvolutionDepthWise_x86_fma::create_pipeline(ncnn::Option const&) | int DeconvolutionDepthWise_x86_fma::create_pipeline(const Option& opt)
{
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
// depth-wise
if (channels == group && group == num_output)
{
int elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = channels % 16 == 0 ? 16 : channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#else
elempack = channels % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
Mat weight_data_transposed(weight_data.w);
{
float* pt = weight_data_transposed;
const float* p = weight_data;
for (int i = 0; i < (channels / group) * (num_output / group) * group; i++)
{
for (int k = 0; k < maxk; k++)
{
pt[maxk - 1 - k] = p[k];
}
p += maxk;
pt += maxk;
}
}
#if __SSE2__
#if __AVX__
// pack16
#if __AVX512F__
if (elempack == 16)
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 16, opt);
}
#endif // __AVX512F__
// pack8
if (elempack == 8)
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 8, opt);
}
#endif // __AVX__
// pack4
if (elempack == 4)
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 4, opt);
}
#endif // __SSE2__
if (elempack == 1)
{
weight_data_tm = weight_data_transposed;
}
return 0;
}
// group convolution
create_group_ops(opt);
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x30(%rax), %rdi
callq 0x2da31c
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deconvolutiondepthwise_x86_fma.cpp |
ncnn::DeconvolutionDepthWise_x86_fma::destroy_pipeline(ncnn::Option const&) | int DeconvolutionDepthWise_x86_fma::destroy_pipeline(const Option& opt)
{
for (int i = 0; i < (int)group_ops.size(); i++)
{
group_ops[i]->destroy_pipeline(opt);
delete group_ops[i];
}
group_ops.clear();
return 0;
} | pushq %r15
pushq %r14
pushq %rbx
movq %rsi, %r14
movq %rdi, %rbx
xorl %r15d, %r15d
movq 0x8(%rbx), %rax
movq 0x10(%rbx), %rcx
movq %rcx, %rdx
subq %rax, %rdx
shrq $0x3, %rdx
movslq %edx, %rdx
cmpq %rdx, %r15
jge 0x2db125
movq (%rax,%r15,8), %rdi
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x8(%rbx), %rax
movq (%rax,%r15,8), %rdi
testq %rdi, %rdi
je 0x2db120
movq (%rdi), %rax
callq *0x8(%rax)
incq %r15
jmp 0x2db0e6
cmpq %rax, %rcx
je 0x2db12e
movq %rax, 0x10(%rbx)
xorl %eax, %eax
popq %rbx
popq %r14
popq %r15
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deconvolutiondepthwise_x86_fma.cpp |
ncnn::DeconvolutionDepthWise_x86_fma::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int DeconvolutionDepthWise_x86_fma::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
// convolv with NxN kernel
// value = value + bias
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
int outw = (w - 1) * stride_w + kernel_extent_w + output_pad_right;
int outh = (h - 1) * stride_h + kernel_extent_h + output_pad_bottom;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
size_t out_elemsize = elemsize / elempack * out_elempack;
Mat top_blob_bordered;
if (pad_left > 0 || pad_right > 0 || pad_top > 0 || pad_bottom > 0 || (output_w > 0 && output_h > 0))
{
top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.workspace_allocator);
}
else
{
top_blob_bordered = top_blob;
top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
}
if (top_blob_bordered.empty())
return -100;
const int maxk = kernel_w * kernel_h;
// depth-wise
if (channels * elempack == group && group == num_output)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 16;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m512 _sum = _mm512_setzero_ps();
if (bias_term)
{
_sum = _mm512_loadu_ps((const float*)bias_data + g * 16);
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 16;
int k = y * kernel_w + x;
__m512 _val = _mm512_loadu_ps(sptr);
__m512 _w = _mm512_loadu_ps(kptr + k * 16);
_sum = _mm512_fmadd_ps(_val, _w, _sum);
}
}
_sum = activation_avx512(_sum, activation_type, activation_params);
_mm512_storeu_ps(outptr, _sum);
outptr += 16;
}
}
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 8;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_setzero_ps();
if (bias_term)
{
_sum = _mm256_loadu_ps((const float*)bias_data + g * 8);
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 8;
int k = y * kernel_w + x;
__m256 _val = _mm256_loadu_ps(sptr);
__m256 _w = _mm256_loadu_ps(kptr + k * 8);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
}
}
_sum = activation_avx(_sum, activation_type, activation_params);
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
}
}
#endif // __AVX__
if (elempack == 4)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 4;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_setzero_ps();
if (bias_term)
{
_sum = _mm_loadu_ps((const float*)bias_data + g * 4);
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 4;
int k = y * kernel_w + x;
__m128 _val = _mm_loadu_ps(sptr);
__m128 _w = _mm_loadu_ps(kptr + k * 4);
_sum = _mm_comp_fmadd_ps(_val, _w, _sum);
}
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr, _sum);
outptr += 4;
}
}
}
}
}
#endif // __SSE2__
if (elempack == 1)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_term)
{
sum = bias_data[g];
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
const float* sptr = m.row(sy);
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
float val = sptr[sx];
int k = y * kernel_w + x;
float w = kptr[k];
sum += val * w;
}
}
sum = activation_ss(sum, activation_type, activation_params);
outptr[0] = sum;
outptr++;
}
}
}
}
}
else
{
// group deconvolution
const int channels_g = channels * elempack / group;
const int num_output_g = num_output / group;
int g_elempack = 1;
int out_g_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
g_elempack = channels_g % 16 == 0 ? 16 : channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 16 == 0 ? 16 : num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#elif __AVX__
g_elempack = channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#else
g_elempack = channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
// unpacking
Mat bottom_blob_unpacked = bottom_blob;
if (elempack > g_elempack)
{
Option opt_p = opt;
opt_p.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, g_elempack, opt_p);
}
Mat top_blob_bordered_unpacked = top_blob_bordered;
if (out_g_elempack < out_elempack)
{
top_blob_bordered_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
if (top_blob_bordered_unpacked.empty())
return -100;
}
for (int g = 0; g < group; g++)
{
const Mat bottom_blob_g = bottom_blob_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
Mat top_blob_bordered_g = top_blob_bordered_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
const ncnn::Layer* op = group_ops[g];
Option opt_g = opt;
opt_g.blob_allocator = top_blob_bordered_unpacked.allocator;
// forward
op->forward(bottom_blob_g, top_blob_bordered_g, opt_g);
}
// packing
if (out_g_elempack < out_elempack)
{
convert_packing(top_blob_bordered_unpacked, top_blob_bordered, out_elempack, opt);
}
else
{
top_blob_bordered = top_blob_bordered_unpacked;
}
}
cut_padding(top_blob_bordered, top_blob, opt);
if (top_blob.empty())
return -100;
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x258, %rsp # imm = 0x258
movq %rdx, 0x118(%rsp)
movq %rdi, %r15
movl 0x2c(%rsi), %ebx
movl 0x30(%rsi), %r10d
movq 0x10(%rsi), %rax
movq %rsi, 0x48(%rsp)
movslq 0x18(%rsi), %r11
movq (%rdi), %r8
movq -0x18(%r8), %rdx
movl 0xd4(%rdi,%rdx), %edi
movl 0xd8(%r15,%rdx), %r9d
decl %edi
movl %edi, 0x38(%rsp)
leal -0x1(%rbx), %r14d
decl %r9d
movl %r9d, 0x24(%rsp)
movq %r10, 0x40(%rsp)
leal -0x1(%r10), %r13d
movq %rcx, 0x58(%rsp)
cmpb $0x1, 0x27(%rcx)
pushq $0x8
popq %rcx
jne 0x2db1d6
movl 0xd0(%r15,%rdx), %esi
xorl %edi, %edi
testb $0x3, %sil
sete %dil
testb $0x7, %sil
leal 0x1(%rdi,%rdi,2), %ebp
cmovel %ecx, %ebp
jmp 0x2db1d9
pushq $0x1
popq %rbp
movl 0x38(%rsp), %ecx
imull 0xdc(%r15,%rdx), %ecx
movl %ecx, 0x38(%rsp)
imull 0xe4(%r15,%rdx), %r14d
movl 0x24(%rsp), %ecx
imull 0xe0(%r15,%rdx), %ecx
movl %ecx, 0x24(%rsp)
imull 0xe8(%r15,%rdx), %r13d
movl 0xfc(%r15,%rdx), %r12d
movl 0x100(%r15,%rdx), %edi
movq 0x48(%rsp), %rcx
movl 0x38(%rcx), %ecx
movq %rcx, 0x30(%rsp)
xorl %edx, %edx
divq %r11
movq %rax, %r9
andq $0x0, 0xb0(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x70(%rsp)
vmovups %xmm0, 0x7c(%rsp)
vmovaps %xmm0, 0x90(%rsp)
vmovups %xmm0, 0x9c(%rsp)
movq -0x18(%r8), %rdx
leaq (%r15,%rdx), %rax
pushq $0x10
popq %rcx
cmpl $0x0, 0xec(%r15,%rdx)
movq %r11, 0x60(%rsp)
jg 0x2db38d
cmpl $0x0, 0xf0(%rax)
jg 0x2db38d
cmpl $0x0, 0xf4(%rax)
jg 0x2db38d
cmpl $0x0, 0xf8(%rax)
jg 0x2db38d
cmpl $0x0, 0x104(%rax)
jle 0x2db2b9
cmpl $0x0, 0x108(%rax)
jg 0x2db38d
leaq 0x70(%rsp), %rax
movq 0x118(%rsp), %rcx
cmpq %rcx, %rax
je 0x2db384
movq %r9, 0x18(%rsp)
movq 0x8(%rcx), %rax
testq %rax, %rax
je 0x2db320
lock
incl (%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2db320
lock
decl (%rax)
jne 0x2db320
movq %rdi, 0x10(%rsp)
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2db313
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x10(%rsp), %rdi
jmp 0x2db320
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rdi
movq 0x118(%rsp), %rcx
vmovups (%rcx), %xmm0
vmovaps %xmm0, 0x70(%rsp)
movq 0x10(%rcx), %rax
movq %rax, 0x80(%rsp)
movl 0x18(%rcx), %eax
movl %eax, 0x88(%rsp)
movq 0x20(%rcx), %rax
movq %rax, 0x90(%rsp)
vmovups 0x28(%rcx), %xmm0
vmovups %xmm0, 0x98(%rsp)
movl 0x38(%rcx), %eax
movl %eax, 0xa8(%rsp)
movq 0x40(%rcx), %rax
movq %rax, 0xb0(%rsp)
movq (%r15), %rax
movq -0x18(%rax), %rdx
movq 0x18(%rsp), %r9
addq %r15, %rdx
pushq $0x8
popq %rcx
movq %rdx, %rax
addl 0x38(%rsp), %r14d
addl 0x24(%rsp), %r13d
leal (%r12,%r14), %esi
incl %esi
leal (%rdi,%r13), %r8d
incl %r8d
movl %ebp, %r14d
imulq %r14, %r9
movl 0xd0(%rax), %eax
cltd
idivl %r14d
movq 0x58(%rsp), %rdx
movq (%rdx,%rcx), %rcx
movq %rcx, (%rsp)
leaq 0x70(%rsp), %rdi
movl %esi, %ebp
movl %r8d, %r12d
movl %r8d, %edx
movl %eax, %ecx
movq %r9, %r13
movq %r9, %r8
movl %r14d, %r9d
callq 0x628f2
pushq $-0x64
popq %rcx
cmpq $0x0, 0x70(%rsp)
movq 0x60(%rsp), %r9
je 0x2dcb30
movslq 0xa8(%rsp), %rax
imulq 0xb0(%rsp), %rax
testq %rax, %rax
je 0x2dcb30
movq (%r15), %rax
movq -0x18(%rax), %rdx
movl 0xd0(%r15,%rdx), %ecx
movl %r9d, %eax
imull 0x30(%rsp), %eax
movl 0x114(%r15,%rdx), %esi
cmpl %esi, %eax
jne 0x2dbb1d
cmpl %ecx, %eax
jne 0x2dbb1d
movl 0xd8(%r15,%rdx), %eax
imull 0xd4(%r15,%rdx), %eax
movl %eax, 0x50(%rsp)
cmpl $0x1, %r9d
je 0x2dc695
cmpl $0x4, %r9d
je 0x2dc050
cmpl $0x8, %r9d
jne 0x2dcaf0
shll $0x3, 0x50(%rsp)
xorl %ecx, %ecx
testl %ebp, %ebp
cmovlel %ecx, %ebp
movl %ebp, 0x60(%rsp)
testl %r12d, %r12d
cmovlel %ecx, %r12d
movl %r12d, 0x28(%rsp)
movq 0x30(%rsp), %rax
testl %eax, %eax
cmovlel %ecx, %eax
movq %rax, 0x30(%rsp)
vbroadcastss 0x115d19(%rip), %ymm11 # 0x3f11bc
vbroadcastss 0x112b68(%rip), %ymm3 # 0x3ee014
vbroadcastss 0x115d0b(%rip), %ymm12 # 0x3f11c0
vbroadcastss 0x1137ca(%rip), %ymm4 # 0x3eec88
vbroadcastss 0x115cfd(%rip), %ymm6 # 0x3f11c4
vbroadcastss 0x115d00(%rip), %ymm13 # 0x3f11d0
vbroadcastss 0x115cf3(%rip), %ymm8 # 0x3f11cc
vbroadcastss 0x115cf2(%rip), %ymm7 # 0x3f11d4
vbroadcastss 0x115ced(%rip), %ymm15 # 0x3f11d8
vbroadcastss 0x115ce8(%rip), %ymm5 # 0x3f11dc
vxorps %xmm9, %xmm9, %xmm9
xorl %eax, %eax
cmpq 0x30(%rsp), %rax
je 0x2dcaf0
movq %rcx, 0xc0(%rsp)
movslq %ecx, %r11
movq 0xb0(%rsp), %rdi
imulq %rax, %rdi
imulq 0x80(%rsp), %rdi
shlq $0x2, %r11
addq 0x70(%rsp), %rdi
movq 0x48(%rsp), %rdx
movq %rax, %rsi
movslq 0x2c(%rdx), %rcx
movq 0x40(%rdx), %r8
imulq %rax, %r8
movq 0x10(%rdx), %rax
imulq %rax, %r8
addq (%rdx), %r8
movq %r8, 0x18(%rsp)
imulq %rax, %rcx
movq %rsi, 0xb8(%rsp)
shlq $0x5, %rsi
movq %rsi, 0x68(%rsp)
addq 0x20(%r15), %r11
xorl %esi, %esi
cmpl 0x28(%rsp), %esi
je 0x2dbb01
movq (%r15), %rax
movq %rax, 0xc8(%rsp)
movl %esi, 0x2c(%rsp)
subl 0x24(%rsp), %esi
xorl %r13d, %r13d
cmpl 0x60(%rsp), %r13d
je 0x2dbaf6
movq 0xc8(%rsp), %rax
movq -0x18(%rax), %r14
cmpl $0x0, 0x10c(%r15,%r14)
movq %rdi, 0x10(%rsp)
je 0x2db5c9
movq 0x1b0(%r15,%r14), %rax
movq 0x68(%rsp), %rdx
vmovups (%rax,%rdx), %ymm0
jmp 0x2db5cd
vxorps %xmm0, %xmm0, %xmm0
movl 0xd8(%r15,%r14), %ebp
movl %r13d, 0x8(%rsp)
subl 0x38(%rsp), %r13d
xorl %edi, %edi
testl %ebp, %ebp
cmovlel %edi, %ebp
cmpl %ebp, %edi
je 0x2db68b
movl 0xe0(%r15,%r14), %eax
imull %edi, %eax
addl %esi, %eax
js 0x2db684
cltd
idivl 0xe8(%r15,%r14)
testl %edx, %edx
jne 0x2db684
cmpl 0x40(%rsp), %eax
jge 0x2db684
movl 0xd4(%r15,%r14), %edx
movslq %eax, %r12
imulq %rcx, %r12
addq 0x18(%rsp), %r12
testl %edx, %edx
movl $0x0, %r10d
cmovgl %edx, %r10d
imull %edi, %edx
movslq %edx, %r8
shlq $0x5, %r8
addq %r11, %r8
xorl %r9d, %r9d
cmpq %r9, %r10
je 0x2db684
movl 0xdc(%r15,%r14), %eax
imull %r9d, %eax
addl %r13d, %eax
js 0x2db67b
cltd
idivl 0xe4(%r15,%r14)
testl %edx, %edx
jne 0x2db67b
cmpl %ebx, %eax
jge 0x2db67b
shll $0x3, %eax
cltq
vmovups (%r12,%rax,4), %ymm1
vfmadd231ps (%r8), %ymm1, %ymm0 # ymm0 = (ymm1 * mem) + ymm0
incq %r9
addq $0x20, %r8
jmp 0x2db644
incl %edi
jmp 0x2db5e6
movl 0x118(%r15,%r14), %eax
decl %eax
cmpl $0x5, %eax
ja 0x2dbad8
leaq 0x11db0b(%rip), %rdx # 0x3f91b0
movslq (%rdx,%rax,4), %rax
addq %rdx, %rax
vmaxps %ymm0, %ymm9, %ymm14
movq 0x10(%rsp), %rdi
movl 0x8(%rsp), %r13d
jmpq *%rax
movq 0x120(%r15,%r14), %rax
vminps %ymm0, %ymm9, %ymm0
vbroadcastss (%rax), %ymm1
vfmadd231ps %ymm1, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm1) + ymm14
jmp 0x2dbae6
vbroadcastss 0x115ad8(%rip), %ymm10 # 0x3f11b8
vminps %ymm0, %ymm10, %ymm1
vmaxps %ymm1, %ymm11, %ymm1
vmovaps %ymm12, %ymm9
vfmadd213ps %ymm3, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm3
vroundps $0x1, %ymm9, %ymm14
vcmpltps %ymm14, %ymm9, %ymm9
vandps %ymm4, %ymm9, %ymm9
vsubps %ymm9, %ymm14, %ymm9
vfmsub231ps %ymm6, %ymm9, %ymm1 # ymm1 = (ymm9 * ymm6) - ymm1
vbroadcastss 0x115ab3(%rip), %ymm2 # 0x3f11c8
vfnmsub231ps %ymm2, %ymm9, %ymm1 # ymm1 = -(ymm9 * ymm2) - ymm1
vmulps %ymm1, %ymm1, %ymm14
vmovaps %ymm8, %ymm2
vfmadd213ps %ymm13, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm13
vfmadd213ps %ymm7, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm7
vfmadd213ps %ymm15, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm15
vfmadd213ps %ymm5, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm5
vfmadd213ps %ymm3, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm3
vfmadd213ps %ymm1, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm2) + ymm1
vaddps %ymm4, %ymm2, %ymm1
vcvttps2dq %ymm9, %ymm2
vpslld $0x17, %xmm2, %xmm9
vextractf128 $0x1, %ymm2, %xmm2
vpslld $0x17, %xmm2, %xmm2
vbroadcastss 0x113526(%rip), %xmm5 # 0x3eec88
vpaddd %xmm5, %xmm2, %xmm2
vpaddd %xmm5, %xmm9, %xmm9
vinsertf128 $0x1, %xmm2, %ymm9, %ymm14
vfmadd213ps %ymm4, %ymm1, %ymm14 # ymm14 = (ymm1 * ymm14) + ymm4
vbroadcastss 0x115a62(%rip), %ymm1 # 0x3f11e0
vmaxps %ymm1, %ymm14, %ymm1
vpsrld $0x17, %xmm1, %xmm2
vextractf128 $0x1, %ymm1, %xmm9
vpsrld $0x17, %xmm9, %xmm9
vbroadcastss 0x115a48(%rip), %ymm4 # 0x3f11e4
vandps %ymm4, %ymm1, %ymm1
vorps %ymm3, %ymm1, %ymm1
vbroadcastss 0x115a3f(%rip), %ymm4 # 0x3f11ec
vcmpleps %ymm1, %ymm4, %ymm4
vmovaps %ymm3, %ymm15
vmovaps %ymm7, %ymm3
vmovaps %ymm6, %ymm7
vmovaps %ymm8, %ymm6
vmovaps %ymm13, %ymm8
vmovaps %ymm12, %ymm13
vmovaps %ymm11, %ymm12
vmovaps %ymm10, %ymm11
vandnps %ymm1, %ymm4, %ymm10
vbroadcastss 0x115a0d(%rip), %ymm5 # 0x3f11f0
vaddps %ymm5, %ymm1, %ymm1
vaddps %ymm1, %ymm10, %ymm1
vextractf128 $0x1, %ymm4, %xmm10
vpsubd %xmm10, %xmm9, %xmm9
vbroadcastss 0x116105(%rip), %xmm10 # 0x3f1904
vpaddd %xmm10, %xmm9, %xmm9
vpsubd %xmm4, %xmm2, %xmm2
vpaddd %xmm2, %xmm10, %xmm2
vinsertf128 $0x1, %xmm9, %ymm2, %ymm2
vmulps %ymm1, %ymm1, %ymm4
vbroadcastss 0x1159d5(%rip), %ymm9 # 0x3f11f4
vbroadcastss 0x1159d0(%rip), %ymm5 # 0x3f11f8
vfmadd213ps %ymm5, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm5
vbroadcastss 0x1159c6(%rip), %ymm5 # 0x3f11fc
vfmadd213ps %ymm5, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm5
vbroadcastss 0x1159bc(%rip), %ymm5 # 0x3f1200
vfmadd213ps %ymm5, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm5
vbroadcastss 0x1159b2(%rip), %ymm5 # 0x3f1204
vfmadd213ps %ymm5, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm5
vbroadcastss 0x1159a8(%rip), %ymm5 # 0x3f1208
vfmadd213ps %ymm5, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm5
vbroadcastss 0x11599e(%rip), %ymm5 # 0x3f120c
vfmadd213ps %ymm5, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm5
vbroadcastss 0x115994(%rip), %ymm5 # 0x3f1210
vfmadd213ps %ymm5, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm5
vbroadcastss 0x11598a(%rip), %ymm5 # 0x3f1214
vfmadd213ps %ymm5, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm5
vmulps %ymm1, %ymm4, %ymm10
vmulps %ymm9, %ymm10, %ymm9
vmovaps %ymm11, %ymm5
vmovaps %ymm12, %ymm11
vmovaps %ymm13, %ymm12
vmovaps %ymm8, %ymm13
vmovaps %ymm6, %ymm8
vmovaps %ymm7, %ymm6
vmovaps %ymm3, %ymm7
vmovaps %ymm15, %ymm3
vbroadcastss 0x115914(%rip), %ymm15 # 0x3f11d8
vcvtdq2ps %ymm2, %ymm2
vbroadcastss 0x1158f7(%rip), %ymm10 # 0x3f11c8
vfmadd231ps %ymm10, %ymm2, %ymm9 # ymm9 = (ymm2 * ymm10) + ymm9
vfmsub231ps %ymm4, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm4) - ymm9
vcmpleps 0x1170dc(%rip), %ymm14, %ymm4 # 0x3f29c0
vsubps %ymm1, %ymm9, %ymm1
vfmsub231ps %ymm2, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm2) - ymm1
vbroadcastss 0x116e22(%rip), %ymm14 # 0x3f2718
vmulps %ymm1, %ymm14, %ymm1
vbroadcastss 0x115ffd(%rip), %ymm2 # 0x3f1900
vblendvps %ymm4, %ymm2, %ymm1, %ymm1
vminps %ymm5, %ymm1, %ymm1
vmaxps %ymm1, %ymm11, %ymm1
vmovaps %ymm12, %ymm2
vfmadd213ps %ymm3, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm3
vroundps $0x1, %ymm2, %ymm4
vcmpltps %ymm4, %ymm2, %ymm2
vbroadcastss 0x11335a(%rip), %ymm5 # 0x3eec88
vandps %ymm5, %ymm2, %ymm2
vbroadcastss 0x1158a1(%rip), %ymm5 # 0x3f11dc
vsubps %ymm2, %ymm4, %ymm2
vfmsub231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) - ymm1
vfnmsub231ps %ymm10, %ymm2, %ymm1 # ymm1 = -(ymm2 * ymm10) - ymm1
vmulps %ymm1, %ymm1, %ymm4
vmovaps %ymm8, %ymm9
vfmadd213ps %ymm13, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm13
vfmadd213ps %ymm7, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm7
vfmadd213ps %ymm15, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm15
vfmadd213ps %ymm5, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm5
vfmadd213ps %ymm3, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm3
vfmadd213ps %ymm1, %ymm4, %ymm9 # ymm9 = (ymm4 * ymm9) + ymm1
vbroadcastss 0x11330f(%rip), %ymm4 # 0x3eec88
vcvttps2dq %ymm2, %ymm1
vpslld $0x17, %xmm1, %xmm2
vextractf128 $0x1, %ymm1, %xmm1
vpslld $0x17, %xmm1, %xmm1
vbroadcastss 0x1132f2(%rip), %xmm10 # 0x3eec88
vpaddd %xmm1, %xmm10, %xmm1
vpaddd %xmm2, %xmm10, %xmm2
vinsertf128 $0x1, %xmm1, %ymm2, %ymm1
vaddps %ymm4, %ymm9, %ymm2
vxorps %xmm9, %xmm9, %xmm9
vfmadd213ps %ymm4, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm1) + ymm4
vrcpps %ymm1, %ymm2
vfmsub213ps %ymm4, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm1) - ymm4
vfnmadd132ps %ymm2, %ymm2, %ymm1 # ymm1 = -(ymm1 * ymm2) + ymm2
vbroadcastss 0x115827(%rip), %ymm2 # 0x3f11f0
vfnmadd213ps %ymm2, %ymm14, %ymm1 # ymm1 = -(ymm14 * ymm1) + ymm2
jmp 0x2dbad2
movq 0x120(%r15,%r14), %rax
vbroadcastss (%rax), %ymm1
vbroadcastss 0x4(%rax), %ymm14
vmaxps %ymm1, %ymm0, %ymm0
vminps %ymm0, %ymm14, %ymm14
jmp 0x2dbae6
vbroadcastss 0x1157b8(%rip), %ymm1 # 0x3f11b4
vxorps %ymm1, %ymm0, %ymm0
vbroadcastss 0x1157af(%rip), %ymm1 # 0x3f11b8
vminps %ymm1, %ymm0, %ymm0
vmaxps %ymm0, %ymm11, %ymm0
vmovaps %ymm12, %ymm1
vfmadd213ps %ymm3, %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + ymm3
vroundps $0x1, %ymm1, %ymm14
vcmpltps %ymm14, %ymm1, %ymm1
vandps %ymm4, %ymm1, %ymm1
vsubps %ymm1, %ymm14, %ymm1
vfmsub231ps %ymm6, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm6) - ymm0
vbroadcastss 0x116ce0(%rip), %ymm2 # 0x3f271c
vfmsub231ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm2) - ymm0
vmulps %ymm0, %ymm0, %ymm14
vmovaps %ymm8, %ymm9
vfmadd213ps %ymm13, %ymm0, %ymm9 # ymm9 = (ymm0 * ymm9) + ymm13
vfmadd213ps %ymm7, %ymm0, %ymm9 # ymm9 = (ymm0 * ymm9) + ymm7
vfmadd213ps %ymm15, %ymm0, %ymm9 # ymm9 = (ymm0 * ymm9) + ymm15
vfmadd213ps %ymm5, %ymm0, %ymm9 # ymm9 = (ymm0 * ymm9) + ymm5
vfmadd213ps %ymm3, %ymm0, %ymm9 # ymm9 = (ymm0 * ymm9) + ymm3
vfmadd213ps %ymm0, %ymm14, %ymm9 # ymm9 = (ymm14 * ymm9) + ymm0
vcvttps2dq %ymm1, %ymm0
vpslld $0x17, %xmm0, %xmm1
vextractf128 $0x1, %ymm0, %xmm0
vpslld $0x17, %xmm0, %xmm0
vbroadcastss 0x113203(%rip), %xmm2 # 0x3eec88
vpaddd %xmm2, %xmm0, %xmm0
vpaddd %xmm2, %xmm1, %xmm1
vinsertf128 $0x1, %xmm0, %ymm1, %ymm14
vaddps %ymm4, %ymm9, %ymm0
vxorps %xmm9, %xmm9, %xmm9
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vrcpps %ymm14, %ymm0
vfmsub213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) - ymm4
vfnmadd132ps %ymm0, %ymm0, %ymm14 # ymm14 = -(ymm14 * ymm0) + ymm0
jmp 0x2dbae6
movq 0x120(%r15,%r14), %rax
vbroadcastss (%rax), %ymm1
vbroadcastss 0x4(%rax), %ymm2
vfmadd231ps %ymm1, %ymm0, %ymm2 # ymm2 = (ymm0 * ymm1) + ymm2
vmaxps %ymm2, %ymm9, %ymm1
vminps %ymm4, %ymm1, %ymm1
vmulps %ymm0, %ymm1, %ymm14
jmp 0x2dbae6
vmovaps %ymm0, %ymm14
movq 0x10(%rsp), %rdi
movl 0x8(%rsp), %r13d
vmovups %ymm14, (%rdi)
addq $0x20, %rdi
incl %r13d
jmp 0x2db58e
movl 0x2c(%rsp), %esi
incl %esi
jmp 0x2db56e
movq 0xb8(%rsp), %rax
incq %rax
movq 0xc0(%rsp), %rcx
addl 0x50(%rsp), %ecx
jmp 0x2db4fb
movq %r14, 0x38(%rsp)
cltd
idivl %esi
movl %eax, %edi
movl %ecx, %eax
cltd
idivl %esi
movl %eax, %r8d
movq 0x58(%rsp), %rax
cmpb $0x1, 0x27(%rax)
jne 0x2dbb6b
xorl %eax, %eax
testb $0x3, %dil
sete %al
testb $0x7, %dil
leal 0x1(%rax,%rax,2), %eax
pushq $0x8
popq %rcx
cmovel %ecx, %eax
movl %eax, 0x18(%rsp)
xorl %eax, %eax
testb $0x3, %r8b
sete %al
testb $0x7, %r8b
leal 0x1(%rax,%rax,2), %eax
cmovel %ecx, %eax
jmp 0x2dbb72
pushq $0x1
popq %rax
movl %eax, 0x18(%rsp)
movl %eax, 0x40(%rsp)
movq 0x48(%rsp), %rdx
movq 0x8(%rdx), %rax
vmovups (%rdx), %xmm0
vmovaps %xmm0, 0x120(%rsp)
movq 0x10(%rdx), %rcx
movq %rcx, 0x130(%rsp)
movl 0x18(%rdx), %ecx
movl %ecx, 0x138(%rsp)
movq 0x20(%rdx), %rcx
movq %rcx, 0x140(%rsp)
vmovups 0x28(%rdx), %xmm0
vmovups %xmm0, 0x148(%rsp)
movl 0x38(%rdx), %ecx
movl %ecx, 0x158(%rsp)
movq 0x40(%rdx), %rcx
movq %rcx, 0x160(%rsp)
testq %rax, %rax
je 0x2dbbda
lock
incl (%rax)
cmpl 0x18(%rsp), %r9d
jle 0x2dbc2b
movl %r8d, %r14d
movl %edi, %ebx
movq 0x58(%rsp), %rax
vmovups (%rax), %ymm0
vmovups 0x20(%rax), %ymm1
leaq 0xd0(%rsp), %rcx
vmovups %ymm1, 0x20(%rcx)
vmovups %ymm0, (%rcx)
movq 0x10(%rax), %rax
movq %rax, 0x8(%rcx)
leaq 0x120(%rsp), %rsi
movq 0x48(%rsp), %rdi
movl 0x18(%rsp), %edx
vzeroupper
callq 0x64e3b
movl %ebx, %edi
movl %r14d, %r8d
movq 0x78(%rsp), %rax
vmovaps 0x70(%rsp), %xmm0
vmovaps %xmm0, 0xd0(%rsp)
movq 0x80(%rsp), %rcx
movq %rcx, 0xe0(%rsp)
movl 0x88(%rsp), %ecx
movl %ecx, 0xe8(%rsp)
movq 0x90(%rsp), %rcx
movq %rcx, 0xf0(%rsp)
vmovups 0x98(%rsp), %xmm0
vmovups %xmm0, 0xf8(%rsp)
movl 0xa8(%rsp), %ecx
movl %ecx, 0x108(%rsp)
movq 0xb0(%rsp), %rcx
movq %rcx, 0x110(%rsp)
testq %rax, %rax
je 0x2dbca5
lock
incl (%rax)
movl 0x40(%rsp), %eax
cmpl 0x38(%rsp), %eax
jae 0x2dbd2c
movl %r8d, %r14d
movl %edi, %ebx
movq (%r15), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r15,%rax), %eax
cltd
movl 0x40(%rsp), %r9d
idivl %r9d
movl %eax, %ecx
movq %r13, %rax
xorl %edx, %edx
divq 0x38(%rsp)
movl %r9d, %r8d
imulq %rax, %r8
movq 0x58(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, (%rsp)
leaq 0xd0(%rsp), %rdi
movl %ebp, %esi
movl %r12d, %edx
callq 0x628f2
cmpq $0x0, 0xd0(%rsp)
je 0x2dc00f
movl %ebx, %edi
movl %r14d, %r8d
movslq 0x108(%rsp), %rax
imulq 0x110(%rsp), %rax
testq %rax, %rax
je 0x2dc00f
xorl %r12d, %r12d
xorl %ebp, %ebp
xorl %ebx, %ebx
movq (%r15), %rax
movq -0x18(%rax), %rax
movslq 0x114(%r15,%rax), %rax
cmpq %rax, %rbx
jge 0x2dbf98
movl %r12d, %eax
cltd
movl 0x18(%rsp), %esi
idivl %esi
movl %eax, %ecx
movl %edi, %r14d
movl %edi, %eax
cltd
idivl %esi
movslq %ecx, %rdx
imulq 0x160(%rsp), %rdx
movq 0x130(%rsp), %rcx
imulq %rcx, %rdx
addq 0x120(%rsp), %rdx
movl 0x138(%rsp), %esi
movq 0x140(%rsp), %rdi
movq %rdx, 0x1c8(%rsp)
andq $0x0, 0x1d0(%rsp)
movq %rcx, 0x1d8(%rsp)
movl %esi, 0x1e0(%rsp)
movq %rdi, 0x1e8(%rsp)
movl %eax, 0x200(%rsp)
vmovups 0x148(%rsp), %xmm0
movslq 0x154(%rsp), %rax
movslq 0x14c(%rsp), %rdx
movslq 0x150(%rsp), %rsi
imulq %rdx, %rsi
imulq %rcx, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rcx
movq %rax, 0x208(%rsp)
vmovups %xmm0, 0x1f0(%rsp)
movl %ebp, %eax
cltd
movl 0x40(%rsp), %esi
idivl %esi
movl %eax, %ecx
movl %r8d, %r13d
movl %r8d, %eax
cltd
idivl %esi
movslq %ecx, %rdx
imulq 0x110(%rsp), %rdx
movq 0xe0(%rsp), %rsi
imulq %rsi, %rdx
addq 0xd0(%rsp), %rdx
movl 0xe8(%rsp), %edi
movq 0xf0(%rsp), %rcx
movq %rdx, 0x180(%rsp)
andq $0x0, 0x188(%rsp)
movq %rsi, 0x190(%rsp)
movl %edi, 0x198(%rsp)
movq %rcx, 0x1a0(%rsp)
movl %eax, 0x1b8(%rsp)
vmovups 0xf8(%rsp), %xmm0
movslq 0x104(%rsp), %rax
movslq 0xfc(%rsp), %rdx
movslq 0x100(%rsp), %rdi
imulq %rdx, %rdi
imulq %rsi, %rax
imulq %rdi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rsi
movq %rax, 0x1c0(%rsp)
vmovups %xmm0, 0x1a8(%rsp)
movq 0x8(%r15), %rax
movq (%rax,%rbx,8), %rdi
movq 0x58(%rsp), %rax
vmovups (%rax), %ymm0
vmovups 0x20(%rax), %ymm1
vmovups %ymm0, 0x210(%rsp)
vmovups %ymm1, 0x230(%rsp)
movq %rcx, 0x218(%rsp)
movq (%rdi), %rax
leaq 0x1c8(%rsp), %rsi
leaq 0x180(%rsp), %rdx
leaq 0x210(%rsp), %rcx
vzeroupper
callq *0x38(%rax)
movq 0x188(%rsp), %rax
testq %rax, %rax
je 0x2dbf4d
lock
decl (%rax)
jne 0x2dbf4d
movq 0x180(%rsp), %rsi
movq 0x1a0(%rsp), %rdi
testq %rdi, %rdi
je 0x2dbf45
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2dbf4d
movq %rsi, %rdi
callq 0x5f3e0
movq 0x1d0(%rsp), %rax
testq %rax, %rax
je 0x2dbf84
lock
decl (%rax)
jne 0x2dbf84
movq 0x1c8(%rsp), %rsi
movq 0x1e8(%rsp), %rdi
testq %rdi, %rdi
je 0x2dbf7c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2dbf84
movq %rsi, %rdi
callq 0x5f3e0
incq %rbx
movl %r13d, %r8d
addl %r13d, %ebp
movl %r14d, %edi
addl %r14d, %r12d
jmp 0x2dbd33
movq 0x38(%rsp), %rdx
cmpl %edx, 0x40(%rsp)
jae 0x2dbfc7
leaq 0xd0(%rsp), %rdi
leaq 0x70(%rsp), %rsi
movq 0x58(%rsp), %rcx
callq 0x64e3b
movq 0xd8(%rsp), %rax
jmp 0x2dca8a
movq 0xd8(%rsp), %rax
testq %rax, %rax
je 0x2dbfd7
lock
incl (%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2dca15
lock
decl (%rax)
jne 0x2dca15
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2dca0d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2dca15
movq 0xd8(%rsp), %rax
testq %rax, %rax
pushq $-0x64
popq %rbx
je 0x2dcb7f
lock
decl (%rax)
jne 0x2dcb7f
movq 0xd0(%rsp), %rsi
movq 0xf0(%rsp), %rdi
testq %rdi, %rdi
je 0x2dcb77
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2dcb7f
xorl %ecx, %ecx
testl %ebp, %ebp
cmovlel %ecx, %ebp
movl %ebp, 0x60(%rsp)
testl %r12d, %r12d
cmovlel %ecx, %r12d
movl %r12d, 0x28(%rsp)
movq 0x30(%rsp), %rax
testl %eax, %eax
cmovlel %ecx, %eax
movq %rax, 0x30(%rsp)
shll $0x2, 0x50(%rsp)
vbroadcastss 0x111f90(%rip), %xmm7 # 0x3ee014
vbroadcastss 0x112bfb(%rip), %xmm5 # 0x3eec88
xorl %eax, %eax
vbroadcastss 0x115124(%rip), %xmm12 # 0x3f11bc
vbroadcastss 0x11512f(%rip), %xmm14 # 0x3f11d0
cmpq 0x30(%rsp), %rax
je 0x2dcaf0
movq %rcx, 0xc0(%rsp)
movslq %ecx, %r10
movq 0xb0(%rsp), %rdi
imulq %rax, %rdi
imulq 0x80(%rsp), %rdi
shlq $0x2, %r10
addq 0x70(%rsp), %rdi
movq 0x48(%rsp), %rcx
movq %rax, %rdx
movslq 0x2c(%rcx), %r11
movq 0x40(%rcx), %rsi
imulq %rax, %rsi
movq 0x10(%rcx), %rax
imulq %rax, %rsi
addq (%rcx), %rsi
movq %rsi, 0x18(%rsp)
imulq %rax, %r11
movq %rdx, 0xb8(%rsp)
shlq $0x4, %rdx
movq %rdx, 0x68(%rsp)
addq 0x20(%r15), %r10
xorl %esi, %esi
cmpl 0x28(%rsp), %esi
je 0x2dc679
movq (%r15), %rax
movq %rax, 0xc8(%rsp)
movl %esi, 0x2c(%rsp)
subl 0x24(%rsp), %esi
xorl %r13d, %r13d
cmpl 0x60(%rsp), %r13d
je 0x2dc66e
movq 0xc8(%rsp), %rax
movq -0x18(%rax), %r14
cmpl $0x0, 0x10c(%r15,%r14)
movq %rdi, 0x10(%rsp)
je 0x2dc16f
movq 0x1b0(%r15,%r14), %rax
movq 0x68(%rsp), %rcx
vmovups (%rax,%rcx), %xmm15
jmp 0x2dc174
vxorps %xmm15, %xmm15, %xmm15
movl 0xd8(%r15,%r14), %ebp
movl %r13d, 0x8(%rsp)
subl 0x38(%rsp), %r13d
xorl %edi, %edi
testl %ebp, %ebp
cmovlel %edi, %ebp
cmpl %ebp, %edi
je 0x2dc231
movl 0xe0(%r15,%r14), %eax
imull %edi, %eax
addl %esi, %eax
js 0x2dc22a
cltd
idivl 0xe8(%r15,%r14)
testl %edx, %edx
jne 0x2dc22a
cmpl 0x40(%rsp), %eax
jge 0x2dc22a
movl 0xd4(%r15,%r14), %ecx
movslq %eax, %r8
imulq %r11, %r8
addq 0x18(%rsp), %r8
testl %ecx, %ecx
movl $0x0, %r9d
cmovgl %ecx, %r9d
imull %edi, %ecx
movslq %ecx, %r12
shlq $0x4, %r12
addq %r10, %r12
xorl %ecx, %ecx
cmpq %rcx, %r9
je 0x2dc22a
movl 0xdc(%r15,%r14), %eax
imull %ecx, %eax
addl %r13d, %eax
js 0x2dc221
cltd
idivl 0xe4(%r15,%r14)
testl %edx, %edx
jne 0x2dc221
cmpl %ebx, %eax
jge 0x2dc221
shll $0x2, %eax
cltq
vmovups (%r8,%rax,4), %xmm1
vfmadd231ps (%r12), %xmm1, %xmm15 # xmm15 = (xmm1 * mem) + xmm15
incq %rcx
addq $0x10, %r12
jmp 0x2dc1ea
incl %edi
jmp 0x2dc18d
movl 0x118(%r15,%r14), %eax
decl %eax
cmpl $0x5, %eax
ja 0x2dc650
leaq 0x11cf4d(%rip), %rcx # 0x3f9198
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
vmaxps 0x111e26(%rip), %xmm15, %xmm1 # 0x3ee080
movq 0x10(%rsp), %rdi
movl 0x8(%rsp), %r13d
jmpq *%rax
movq 0x120(%r15,%r14), %rax
vminps 0x111e0a(%rip), %xmm15, %xmm15 # 0x3ee080
vbroadcastss (%rax), %xmm8
vfmadd231ps %xmm8, %xmm15, %xmm1 # xmm1 = (xmm15 * xmm8) + xmm1
jmp 0x2dc65e
vbroadcastss 0x114f2a(%rip), %xmm11 # 0x3f11b8
vminps %xmm11, %xmm15, %xmm1
vmaxps %xmm1, %xmm12, %xmm1
vbroadcastss 0x114f20(%rip), %xmm13 # 0x3f11c0
vmovaps %xmm13, %xmm8
vfmadd213ps %xmm7, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm8) + xmm7
vcvttps2dq %xmm8, %xmm10
vcvtdq2ps %xmm10, %xmm10
vcmpltps %xmm10, %xmm8, %xmm8
vandps %xmm5, %xmm8, %xmm8
vsubps %xmm8, %xmm10, %xmm8
vbroadcastss 0x114ef8(%rip), %xmm0 # 0x3f11c4
vfmsub231ps %xmm0, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm0) - xmm1
vbroadcastss 0x114eee(%rip), %xmm2 # 0x3f11c8
vfnmsub231ps %xmm2, %xmm8, %xmm1 # xmm1 = -(xmm8 * xmm2) - xmm1
vmulps %xmm1, %xmm1, %xmm10
vbroadcastss 0x114ee0(%rip), %xmm11 # 0x3f11cc
vmovaps %xmm11, %xmm2
vfmadd213ps %xmm14, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm14
vbroadcastss 0x114ed6(%rip), %xmm3 # 0x3f11d4
vfmadd213ps %xmm3, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm3
vbroadcastss 0x114ecc(%rip), %xmm6 # 0x3f11d8
vfmadd213ps %xmm6, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm6
vbroadcastss 0x114ec2(%rip), %xmm4 # 0x3f11dc
vfmadd213ps %xmm4, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm4
vfmadd213ps %xmm7, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm7
vfmadd213ps %xmm1, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm2) + xmm1
vaddps %xmm5, %xmm2, %xmm2
vcvttps2dq %xmm8, %xmm1
vpslld $0x17, %xmm1, %xmm1
vpaddd %xmm5, %xmm1, %xmm1
vfmadd213ps %xmm5, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm1) + xmm5
vbroadcastss 0x114e97(%rip), %xmm2 # 0x3f11e0
vmaxps %xmm2, %xmm1, %xmm2
vpsrld $0x17, %xmm2, %xmm8
vbroadcastss 0x114e8d(%rip), %xmm4 # 0x3f11e8
vpaddd %xmm4, %xmm8, %xmm8
vbroadcastss 0x114e7c(%rip), %xmm4 # 0x3f11e4
vandps %xmm4, %xmm2, %xmm2
vorps %xmm7, %xmm2, %xmm2
vcvtdq2ps %xmm8, %xmm8
vbroadcastss 0x114e6e(%rip), %xmm4 # 0x3f11ec
vcmpltps %xmm4, %xmm2, %xmm10
vandps %xmm2, %xmm10, %xmm4
vbroadcastss 0x114e60(%rip), %xmm9 # 0x3f11f0
vaddps %xmm2, %xmm9, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vandps %xmm5, %xmm10, %xmm4
vsubps %xmm4, %xmm8, %xmm4
vmulps %xmm2, %xmm2, %xmm8
vbroadcastss 0x114e47(%rip), %xmm10 # 0x3f11f4
vbroadcastss 0x114e42(%rip), %xmm5 # 0x3f11f8
vfmadd213ps %xmm5, %xmm2, %xmm10 # xmm10 = (xmm2 * xmm10) + xmm5
vbroadcastss 0x114e38(%rip), %xmm5 # 0x3f11fc
vfmadd213ps %xmm5, %xmm2, %xmm10 # xmm10 = (xmm2 * xmm10) + xmm5
vbroadcastss 0x114e2e(%rip), %xmm5 # 0x3f1200
vfmadd213ps %xmm5, %xmm2, %xmm10 # xmm10 = (xmm2 * xmm10) + xmm5
vbroadcastss 0x114e24(%rip), %xmm5 # 0x3f1204
vfmadd213ps %xmm5, %xmm2, %xmm10 # xmm10 = (xmm2 * xmm10) + xmm5
vbroadcastss 0x114e1a(%rip), %xmm5 # 0x3f1208
vfmadd213ps %xmm5, %xmm2, %xmm10 # xmm10 = (xmm2 * xmm10) + xmm5
vbroadcastss 0x114e10(%rip), %xmm5 # 0x3f120c
vfmadd213ps %xmm5, %xmm2, %xmm10 # xmm10 = (xmm2 * xmm10) + xmm5
vbroadcastss 0x114e06(%rip), %xmm5 # 0x3f1210
vfmadd213ps %xmm5, %xmm2, %xmm10 # xmm10 = (xmm2 * xmm10) + xmm5
vbroadcastss 0x114dfc(%rip), %xmm5 # 0x3f1214
vfmadd213ps %xmm5, %xmm2, %xmm10 # xmm10 = (xmm2 * xmm10) + xmm5
vmovaps %xmm7, %xmm5
vmovaps %xmm11, %xmm7
vmulps %xmm2, %xmm8, %xmm9
vmulps %xmm10, %xmm9, %xmm9
vxorps %xmm11, %xmm11, %xmm11
vbroadcastss 0x114d8c(%rip), %xmm10 # 0x3f11c8
vfmadd231ps %xmm10, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm10) + xmm9
vfmsub231ps %xmm8, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm8) - xmm9
vcmpleps %xmm11, %xmm1, %xmm1
vsubps %xmm2, %xmm9, %xmm2
vbroadcastss 0x114d5f(%rip), %xmm11 # 0x3f11b8
vmovaps %xmm6, %xmm9
vbroadcastss 0x112822(%rip), %xmm6 # 0x3eec88
vfnmadd231ps %xmm4, %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm4) + xmm2
vaddps %xmm2, %xmm2, %xmm2
vbroadcastss 0x115488(%rip), %xmm4 # 0x3f1900
vblendvps %xmm1, %xmm4, %xmm2, %xmm1
vminps %xmm1, %xmm11, %xmm1
vmaxps %xmm1, %xmm12, %xmm1
vfmadd213ps %xmm5, %xmm1, %xmm13 # xmm13 = (xmm1 * xmm13) + xmm5
vcvttps2dq %xmm13, %xmm4
vcvtdq2ps %xmm4, %xmm4
vcmpltps %xmm4, %xmm13, %xmm2
vandps %xmm6, %xmm2, %xmm2
vsubps %xmm2, %xmm4, %xmm2
vfmsub231ps %xmm0, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm0) - xmm1
vbroadcastss 0x114d19(%rip), %xmm0 # 0x3f11c8
vfnmsub231ps %xmm0, %xmm2, %xmm1 # xmm1 = -(xmm2 * xmm0) - xmm1
vmulps %xmm1, %xmm1, %xmm4
vmovaps %xmm7, %xmm8
vfmadd213ps %xmm14, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm8) + xmm14
vfmadd213ps %xmm3, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm8) + xmm3
vfmadd213ps %xmm9, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm8) + xmm9
vbroadcastss 0x114d08(%rip), %xmm0 # 0x3f11dc
vfmadd213ps %xmm0, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm8) + xmm0
vmovaps %xmm5, %xmm7
vfmadd213ps %xmm5, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm8) + xmm5
vfmadd213ps %xmm1, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm8) + xmm1
vaddps %xmm6, %xmm8, %xmm4
vcvttps2dq %xmm2, %xmm1
vpslld $0x17, %xmm1, %xmm1
vpaddd %xmm6, %xmm1, %xmm1
vmovaps %xmm6, %xmm5
vfmadd213ps %xmm6, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm1) + xmm6
vrcpps %xmm1, %xmm2
vaddps %xmm2, %xmm2, %xmm4
vbroadcastss 0x1161f6(%rip), %xmm3 # 0x3f2708
vfmsub213ps %xmm3, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm1) - xmm3
vfnmadd213ps %xmm4, %xmm2, %xmm1 # xmm1 = -(xmm2 * xmm1) + xmm4
vfmsub213ps %xmm15, %xmm15, %xmm1 # xmm1 = (xmm15 * xmm1) - xmm15
jmp 0x2dc65e
movq 0x120(%r15,%r14), %rax
vbroadcastss (%rax), %xmm1
vbroadcastss 0x4(%rax), %xmm8
vmaxps %xmm1, %xmm15, %xmm1
vminps %xmm1, %xmm8, %xmm1
jmp 0x2dc65e
vbroadcastss 0x114c65(%rip), %xmm1 # 0x3f11b4
vxorps %xmm1, %xmm15, %xmm1
vbroadcastss 0x114c5c(%rip), %xmm2 # 0x3f11b8
vminps %xmm2, %xmm1, %xmm1
vbroadcastss 0x114c53(%rip), %xmm2 # 0x3f11bc
vmaxps %xmm2, %xmm1, %xmm1
vbroadcastss 0x114c4a(%rip), %xmm8 # 0x3f11c0
vfmadd213ps %xmm7, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm8) + xmm7
vcvttps2dq %xmm8, %xmm15
vcvtdq2ps %xmm15, %xmm15
vcmpltps %xmm15, %xmm8, %xmm8
vandps %xmm5, %xmm8, %xmm8
vsubps %xmm8, %xmm15, %xmm8
vbroadcastss 0x114c27(%rip), %xmm2 # 0x3f11c4
vfmsub231ps %xmm2, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm2) - xmm1
vbroadcastss 0x116171(%rip), %xmm2 # 0x3f271c
vfmsub231ps %xmm2, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm2) - xmm1
vmulps %xmm1, %xmm1, %xmm15
vbroadcastss 0x114c0f(%rip), %xmm10 # 0x3f11cc
vbroadcastss 0x114c0a(%rip), %xmm2 # 0x3f11d0
vfmadd213ps %xmm2, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm2
vbroadcastss 0x114c00(%rip), %xmm2 # 0x3f11d4
vfmadd213ps %xmm2, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm2
vbroadcastss 0x114bf6(%rip), %xmm2 # 0x3f11d8
vfmadd213ps %xmm2, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm2
vbroadcastss 0x114bec(%rip), %xmm2 # 0x3f11dc
vfmadd213ps %xmm2, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm2
vfmadd213ps %xmm7, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm7
vfmadd213ps %xmm1, %xmm15, %xmm10 # xmm10 = (xmm15 * xmm10) + xmm1
vaddps %xmm5, %xmm10, %xmm10
vcvttps2dq %xmm8, %xmm1
vpslld $0x17, %xmm1, %xmm1
vpaddd %xmm5, %xmm1, %xmm1
vfmadd213ps %xmm5, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm1) + xmm5
vrcpps %xmm1, %xmm8
vfmsub213ps %xmm5, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) - xmm5
vfnmadd132ps %xmm8, %xmm8, %xmm1 # xmm1 = -(xmm1 * xmm8) + xmm8
jmp 0x2dc65e
movq 0x120(%r15,%r14), %rax
vbroadcastss (%rax), %xmm1
vbroadcastss 0x4(%rax), %xmm2
vfmadd231ps %xmm1, %xmm15, %xmm2 # xmm2 = (xmm15 * xmm1) + xmm2
vmaxps 0x111a3a(%rip), %xmm2, %xmm1 # 0x3ee080
vminps %xmm5, %xmm1, %xmm1
vmulps %xmm1, %xmm15, %xmm1
jmp 0x2dc65e
vmovaps %xmm15, %xmm1
movq 0x10(%rsp), %rdi
movl 0x8(%rsp), %r13d
vmovups %xmm1, (%rdi)
addq $0x10, %rdi
incl %r13d
jmp 0x2dc134
movl 0x2c(%rsp), %esi
incl %esi
jmp 0x2dc114
movq 0xb8(%rsp), %rax
incq %rax
movq 0xc0(%rsp), %rcx
addl 0x50(%rsp), %ecx
jmp 0x2dc0a1
xorl %eax, %eax
testl %ebp, %ebp
cmovlel %eax, %ebp
movl %ebp, 0x60(%rsp)
xorl %edx, %edx
testl %r12d, %r12d
cmovlel %edx, %r12d
movl %r12d, 0x28(%rsp)
movq 0x30(%rsp), %rax
testl %eax, %eax
cmovlel %edx, %eax
movq %rax, 0x30(%rsp)
movq 0x70(%rsp), %rax
movq %rax, 0xc0(%rsp)
movq 0xb0(%rsp), %rax
imulq 0x80(%rsp), %rax
movq %rax, 0xb8(%rsp)
movq 0x20(%r15), %rax
movq %rax, 0x18(%rsp)
movq 0x48(%rsp), %rcx
movslq 0x2c(%rcx), %r13
movq (%rcx), %rax
movq %rax, 0x178(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0x170(%rsp)
imulq %rax, %r13
movslq 0x50(%rsp), %rax
shlq $0x2, %rax
movq %rax, 0x50(%rsp)
cmpq 0x30(%rsp), %rdx
je 0x2dcaf0
movq 0xb8(%rsp), %rcx
imulq %rdx, %rcx
addq 0xc0(%rsp), %rcx
movq 0x170(%rsp), %rbp
movq %rdx, 0x68(%rsp)
imulq %rdx, %rbp
addq 0x178(%rsp), %rbp
xorl %r14d, %r14d
cmpl 0x28(%rsp), %r14d
je 0x2dc9f6
movq (%r15), %rax
movq %rax, 0xc8(%rsp)
movl %r14d, 0x48(%rsp)
subl 0x24(%rsp), %r14d
xorl %edi, %edi
cmpl 0x60(%rsp), %edi
je 0x2dc9e9
movq %rcx, 0x8(%rsp)
movq 0xc8(%rsp), %rax
movq -0x18(%rax), %rcx
cmpl $0x0, 0x10c(%r15,%rcx)
je 0x2dc7bd
movq 0x1b0(%r15,%rcx), %rax
movq 0x68(%rsp), %rdx
vmovss (%rax,%rdx,4), %xmm4
jmp 0x2dc7c1
vxorps %xmm4, %xmm4, %xmm4
movl 0xd8(%r15,%rcx), %esi
movl %edi, 0x10(%rsp)
subl 0x38(%rsp), %edi
xorl %r8d, %r8d
testl %esi, %esi
cmovlel %r8d, %esi
cmpl %esi, %r8d
je 0x2dc878
movl 0xe0(%r15,%rcx), %eax
imull %r8d, %eax
addl %r14d, %eax
js 0x2dc870
cltd
idivl 0xe8(%r15,%rcx)
testl %edx, %edx
jne 0x2dc870
cmpl 0x40(%rsp), %eax
jge 0x2dc870
movslq %eax, %r9
imulq %r13, %r9
addq %rbp, %r9
movl 0xd4(%r15,%rcx), %eax
testl %eax, %eax
movl $0x0, %r10d
cmovgl %eax, %r10d
imull %r8d, %eax
cltq
movq 0x18(%rsp), %rdx
leaq (%rdx,%rax,4), %r11
xorl %r12d, %r12d
cmpq %r12, %r10
je 0x2dc870
movl 0xdc(%r15,%rcx), %eax
imull %r12d, %eax
addl %edi, %eax
js 0x2dc86b
cltd
idivl 0xe4(%r15,%rcx)
testl %edx, %edx
jne 0x2dc86b
cmpl %ebx, %eax
jge 0x2dc86b
cltq
vmovss (%r11,%r12,4), %xmm0
vfmadd231ss (%r9,%rax,4), %xmm0, %xmm4 # xmm4 = (xmm0 * mem) + xmm4
incq %r12
jmp 0x2dc837
incl %r8d
jmp 0x2dc7da
movl 0x118(%r15,%rcx), %eax
decl %eax
cmpl $0x5, %eax
ja 0x2dc9cf
leaq 0x11c8ee(%rip), %rdx # 0x3f9180
movslq (%rdx,%rax,4), %rax
addq %rdx, %rax
movl 0x10(%rsp), %edi
jmpq *%rax
vmaxss 0x111769(%rip), %xmm4, %xmm0 # 0x3ee010
jmp 0x2dc96e
vmovaps %xmm4, %xmm0
vmovss %xmm4, 0x2c(%rsp)
callq 0x5f410
vaddss 0x1123c5(%rip), %xmm0, %xmm0 # 0x3eec88
callq 0x5f200
callq 0x5f160
movl 0x10(%rsp), %edi
vmulss 0x2c(%rsp), %xmm0, %xmm0
jmp 0x2dc96e
movq 0x120(%r15,%rcx), %rax
vmovss 0x4(%rax), %xmm1
vmaxss (%rax), %xmm4, %xmm0
vucomiss %xmm1, %xmm0
movq 0x8(%rsp), %rcx
jbe 0x2dc973
vmovaps %xmm1, %xmm0
jmp 0x2dc973
vminss 0x1148b2(%rip), %xmm4, %xmm0 # 0x3f11b8
vbroadcastss 0x1148a5(%rip), %xmm1 # 0x3f11b4
vxorps %xmm1, %xmm0, %xmm1
vcmpltss 0x1148a0(%rip), %xmm0, %xmm0 # 0x3f11bc
vbroadcastss 0x114893(%rip), %xmm2 # 0x3f11b8
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
callq 0x5f410
movl 0x10(%rsp), %edi
vmovss 0x11234c(%rip), %xmm1 # 0x3eec88
vaddss %xmm1, %xmm0, %xmm0
vdivss %xmm0, %xmm1, %xmm0
jmp 0x2dc96e
movq 0x120(%r15,%rcx), %rax
vxorps %xmm0, %xmm0, %xmm0
vcmpltss %xmm4, %xmm0, %xmm0
vmovss (%rax), %xmm1
vbroadcastss 0x112324(%rip), %xmm2 # 0x3eec88
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmulss %xmm4, %xmm0, %xmm0
movq 0x8(%rsp), %rcx
vmovss %xmm0, (%rcx)
addq $0x4, %rcx
incl %edi
jmp 0x2dc783
movq 0x120(%r15,%rcx), %rax
vmovss (%rax), %xmm1
vmovss 0x4(%rax), %xmm2
vbroadcastss 0x114818(%rip), %xmm0 # 0x3f11b4
vxorps %xmm0, %xmm2, %xmm0
vdivss %xmm1, %xmm0, %xmm3
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm3, %xmm4
movq 0x8(%rsp), %rcx
jb 0x2dc973
vmovss 0x1122cd(%rip), %xmm0 # 0x3eec88
vdivss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm3, %xmm0
vucomiss %xmm0, %xmm4
jbe 0x2dc9de
vmovaps %xmm4, %xmm0
jmp 0x2dc973
vmovaps %xmm4, %xmm0
movq 0x8(%rsp), %rcx
movl 0x10(%rsp), %edi
jmp 0x2dc973
vfmadd213ss %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm1) + xmm2
vmulss %xmm4, %xmm1, %xmm0
jmp 0x2dc973
movl 0x48(%rsp), %r14d
incl %r14d
jmp 0x2dc761
movq 0x68(%rsp), %rdx
incq %rdx
movq 0x50(%rsp), %rax
addq %rax, 0x18(%rsp)
jmp 0x2dc726
movq %rsi, %rdi
callq 0x5f3e0
movq 0xd8(%rsp), %rax
vmovaps 0xd0(%rsp), %xmm0
vmovaps %xmm0, 0x70(%rsp)
movq 0xe0(%rsp), %rcx
movq %rcx, 0x80(%rsp)
movl 0xe8(%rsp), %ecx
movl %ecx, 0x88(%rsp)
movq 0xf0(%rsp), %rcx
movq %rcx, 0x90(%rsp)
vmovups 0xf8(%rsp), %xmm0
vmovups %xmm0, 0x98(%rsp)
movl 0x108(%rsp), %ecx
movl %ecx, 0xa8(%rsp)
movq 0x110(%rsp), %rcx
movq %rcx, 0xb0(%rsp)
testq %rax, %rax
je 0x2dcab9
lock
decl (%rax)
jne 0x2dcab9
movq 0xd0(%rsp), %rsi
movq 0xf0(%rsp), %rdi
testq %rdi, %rdi
je 0x2dcab1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2dcab9
movq %rsi, %rdi
callq 0x5f3e0
movq 0x128(%rsp), %rax
testq %rax, %rax
je 0x2dcaf0
lock
decl (%rax)
jne 0x2dcaf0
movq 0x120(%rsp), %rsi
movq 0x140(%rsp), %rdi
testq %rdi, %rdi
je 0x2dcae8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2dcaf0
movq %rsi, %rdi
callq 0x5f3e0
movq (%r15), %rax
addq -0x18(%rax), %r15
leaq 0x70(%rsp), %rsi
movq %r15, %rdi
movq 0x118(%rsp), %rbx
movq %rbx, %rdx
movq 0x58(%rsp), %rcx
vzeroupper
callq 0x2d4cc4
cmpq $0x0, (%rbx)
pushq $-0x64
popq %rcx
je 0x2dcb30
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
xorl %ebx, %ebx
testq %rax, %rax
jne 0x2dcb32
movl %ecx, %ebx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2dcb63
lock
decl (%rax)
jne 0x2dcb63
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x2dcb5b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2dcb63
movq %rsi, %rdi
callq 0x5f3e0
movl %ebx, %eax
addq $0x258, %rsp # imm = 0x258
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rsi, %rdi
callq 0x5f3e0
movq 0x128(%rsp), %rax
testq %rax, %rax
je 0x2dcb32
lock
decl (%rax)
jne 0x2dcb32
movq 0x120(%rsp), %rsi
movq 0x140(%rsp), %rdi
testq %rdi, %rdi
je 0x2dcbae
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2dcb32
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2dcb32
jmp 0x2dcd18
jmp 0x2dcd18
jmp 0x2dcd18
jmp 0x2dcd18
movq %rax, %rbx
jmp 0x2dcca0
movq %rax, %rbx
jmp 0x2dcc69
jmp 0x2dcd18
jmp 0x2dcbf0
jmp 0x2dcd18
jmp 0x2dcd18
movq %rax, %rbx
jmp 0x2dccd7
movq %rax, %rbx
movq 0x188(%rsp), %rax
testq %rax, %rax
je 0x2dcc32
lock
decl (%rax)
jne 0x2dcc32
movq 0x180(%rsp), %rsi
movq 0x1a0(%rsp), %rdi
testq %rdi, %rdi
jne 0x2dcc2c
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2dcc32
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1d0(%rsp), %rax
testq %rax, %rax
je 0x2dcc69
lock
decl (%rax)
jne 0x2dcc69
movq 0x1c8(%rsp), %rsi
movq 0x1e8(%rsp), %rdi
testq %rdi, %rdi
jne 0x2dcc63
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2dcc69
movq (%rdi), %rax
callq *0x18(%rax)
movq 0xd8(%rsp), %rax
testq %rax, %rax
je 0x2dcca0
lock
decl (%rax)
jne 0x2dcca0
movq 0xd0(%rsp), %rsi
movq 0xf0(%rsp), %rdi
testq %rdi, %rdi
jne 0x2dcc9a
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2dcca0
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x128(%rsp), %rax
testq %rax, %rax
je 0x2dccd7
lock
decl (%rax)
jne 0x2dccd7
movq 0x120(%rsp), %rsi
movq 0x140(%rsp), %rdi
testq %rdi, %rdi
jne 0x2dccd1
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2dccd7
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x2dcd08
lock
decl (%rax)
jne 0x2dcd08
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x2dcd02
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2dcd08
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x2dcd18
jmp 0x2dcd18
jmp 0x2dcd18
jmp 0x2dcd18
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deconvolutiondepthwise_x86_fma.cpp |
virtual thunk to ncnn::DeconvolutionDepthWise_x86_fma::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int DeconvolutionDepthWise_x86_fma::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
// convolv with NxN kernel
// value = value + bias
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
int outw = (w - 1) * stride_w + kernel_extent_w + output_pad_right;
int outh = (h - 1) * stride_h + kernel_extent_h + output_pad_bottom;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
size_t out_elemsize = elemsize / elempack * out_elempack;
Mat top_blob_bordered;
if (pad_left > 0 || pad_right > 0 || pad_top > 0 || pad_bottom > 0 || (output_w > 0 && output_h > 0))
{
top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.workspace_allocator);
}
else
{
top_blob_bordered = top_blob;
top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
}
if (top_blob_bordered.empty())
return -100;
const int maxk = kernel_w * kernel_h;
// depth-wise
if (channels * elempack == group && group == num_output)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 16;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m512 _sum = _mm512_setzero_ps();
if (bias_term)
{
_sum = _mm512_loadu_ps((const float*)bias_data + g * 16);
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 16;
int k = y * kernel_w + x;
__m512 _val = _mm512_loadu_ps(sptr);
__m512 _w = _mm512_loadu_ps(kptr + k * 16);
_sum = _mm512_fmadd_ps(_val, _w, _sum);
}
}
_sum = activation_avx512(_sum, activation_type, activation_params);
_mm512_storeu_ps(outptr, _sum);
outptr += 16;
}
}
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 8;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_setzero_ps();
if (bias_term)
{
_sum = _mm256_loadu_ps((const float*)bias_data + g * 8);
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 8;
int k = y * kernel_w + x;
__m256 _val = _mm256_loadu_ps(sptr);
__m256 _w = _mm256_loadu_ps(kptr + k * 8);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
}
}
_sum = activation_avx(_sum, activation_type, activation_params);
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
}
}
#endif // __AVX__
if (elempack == 4)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g * 4;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_setzero_ps();
if (bias_term)
{
_sum = _mm_loadu_ps((const float*)bias_data + g * 4);
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 4;
int k = y * kernel_w + x;
__m128 _val = _mm_loadu_ps(sptr);
__m128 _w = _mm_loadu_ps(kptr + k * 4);
_sum = _mm_comp_fmadd_ps(_val, _w, _sum);
}
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr, _sum);
outptr += 4;
}
}
}
}
}
#endif // __SSE2__
if (elempack == 1)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < channels; g++)
{
float* outptr = top_blob_bordered.channel(g);
const float* kptr = (const float*)weight_data_tm + maxk * g;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_term)
{
sum = bias_data[g];
}
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
const float* sptr = m.row(sy);
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
float val = sptr[sx];
int k = y * kernel_w + x;
float w = kptr[k];
sum += val * w;
}
}
sum = activation_ss(sum, activation_type, activation_params);
outptr[0] = sum;
outptr++;
}
}
}
}
}
else
{
// group deconvolution
const int channels_g = channels * elempack / group;
const int num_output_g = num_output / group;
int g_elempack = 1;
int out_g_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
g_elempack = channels_g % 16 == 0 ? 16 : channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 16 == 0 ? 16 : num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#elif __AVX__
g_elempack = channels_g % 8 == 0 ? 8 : channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 8 == 0 ? 8 : num_output_g % 4 == 0 ? 4 : 1;
#else
g_elempack = channels_g % 4 == 0 ? 4 : 1;
out_g_elempack = num_output_g % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
// unpacking
Mat bottom_blob_unpacked = bottom_blob;
if (elempack > g_elempack)
{
Option opt_p = opt;
opt_p.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, g_elempack, opt_p);
}
Mat top_blob_bordered_unpacked = top_blob_bordered;
if (out_g_elempack < out_elempack)
{
top_blob_bordered_unpacked.create(outw, outh, num_output / out_g_elempack, out_elemsize / out_elempack * out_g_elempack, out_g_elempack, opt.workspace_allocator);
if (top_blob_bordered_unpacked.empty())
return -100;
}
for (int g = 0; g < group; g++)
{
const Mat bottom_blob_g = bottom_blob_unpacked.channel_range(channels_g * g / g_elempack, channels_g / g_elempack);
Mat top_blob_bordered_g = top_blob_bordered_unpacked.channel_range(num_output_g * g / out_g_elempack, num_output_g / out_g_elempack);
const ncnn::Layer* op = group_ops[g];
Option opt_g = opt;
opt_g.blob_allocator = top_blob_bordered_unpacked.allocator;
// forward
op->forward(bottom_blob_g, top_blob_bordered_g, opt_g);
}
// packing
if (out_g_elempack < out_elempack)
{
convert_packing(top_blob_bordered_unpacked, top_blob_bordered, out_elempack, opt);
}
else
{
top_blob_bordered = top_blob_bordered_unpacked;
}
}
cut_padding(top_blob_bordered, top_blob, opt);
if (top_blob.empty())
return -100;
return 0;
} | movq (%rdi), %rax
addq -0x48(%rax), %rdi
jmp 0x2db148
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deconvolutiondepthwise_x86_fma.cpp |
virtual thunk to ncnn::DeconvolutionDepthWise_x86_avx::create_pipeline(ncnn::Option const&) | int DeconvolutionDepthWise_x86_avx::create_pipeline(const Option& opt)
{
const int maxk = kernel_w * kernel_h;
int channels = (weight_data_size / group) / maxk / (num_output / group) * group;
// depth-wise
if (channels == group && group == num_output)
{
int elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = channels % 16 == 0 ? 16 : channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1;
#else
elempack = channels % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
Mat weight_data_transposed(weight_data.w);
{
float* pt = weight_data_transposed;
const float* p = weight_data;
for (int i = 0; i < (channels / group) * (num_output / group) * group; i++)
{
for (int k = 0; k < maxk; k++)
{
pt[maxk - 1 - k] = p[k];
}
p += maxk;
pt += maxk;
}
}
#if __SSE2__
#if __AVX__
// pack16
#if __AVX512F__
if (elempack == 16)
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 16, opt);
}
#endif // __AVX512F__
// pack8
if (elempack == 8)
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 8, opt);
}
#endif // __AVX__
// pack4
if (elempack == 4)
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, group);
convert_packing(weight_data_r2, weight_data_tm, 4, opt);
}
#endif // __SSE2__
if (elempack == 1)
{
weight_data_tm = weight_data_transposed;
}
return 0;
}
// group convolution
create_group_ops(opt);
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x30(%rax), %rdi
callq 0x2dcdbc
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deconvolutiondepthwise_x86_avx.cpp |
ncnn::InstanceNorm::load_model(ncnn::ModelBin const&) | int InstanceNorm::load_model(const ModelBin& mb)
{
if (affine == 0)
return 0;
gamma_data = mb.load(channels, 1);
if (gamma_data.empty())
return -100;
beta_data = mb.load(channels, 1);
if (beta_data.empty())
return -100;
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
xorl %ebx, %ebx
cmpl $0x0, 0xd8(%rdi)
je 0x2dfd3e
movq %rsi, %r15
movq %rdi, %r14
movl 0xd0(%rdi), %edx
movq (%rsi), %rax
movq %rsp, %r12
pushq $0x1
popq %rcx
movq %r12, %rdi
callq *0x10(%rax)
leaq 0xe0(%r14), %r13
movq 0x8(%r12), %rax
cmpq %r12, %r13
je 0x2dfbe7
testq %rax, %rax
je 0x2dfb55
lock
incl (%rax)
movq 0xe8(%r14), %rax
testq %rax, %rax
je 0x2dfb89
lock
decl (%rax)
jne 0x2dfb89
movq 0xe0(%r14), %rsi
movq 0x100(%r14), %rdi
testq %rdi, %rdi
je 0x2dfb81
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2dfb89
movq %rsi, %rdi
callq 0x5f3e0
movq (%rsp), %rax
movq %rax, 0xe0(%r14)
movq 0x8(%rsp), %rax
movq %rax, 0xe8(%r14)
movq 0x10(%rsp), %rcx
movq %rcx, 0xf0(%r14)
movl 0x18(%rsp), %ecx
movl %ecx, 0xf8(%r14)
movq 0x20(%rsp), %rcx
movq %rcx, 0x100(%r14)
movups 0x28(%rsp), %xmm0
movups %xmm0, 0x108(%r14)
movl 0x38(%rsp), %ecx
movl %ecx, 0x118(%r14)
movq 0x40(%rsp), %rcx
movq %rcx, 0x120(%r14)
testq %rax, %rax
je 0x2dfc0f
lock
decl (%rax)
jne 0x2dfc0f
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x2dfc07
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2dfc0f
movq %rsi, %rdi
callq 0x5f3e0
pushq $-0x64
popq %rbp
cmpq $0x0, (%r13)
je 0x2dfd3c
movslq 0x118(%r14), %rax
imulq 0x120(%r14), %rax
testq %rax, %rax
je 0x2dfd3c
movl 0xd0(%r14), %edx
movq (%r15), %rax
pushq $0x1
popq %rcx
movq %r12, %rdi
movq %r15, %rsi
callq *0x10(%rax)
leaq 0x128(%r14), %r15
movq 0x8(%rsp), %rax
cmpq %r12, %r15
je 0x2dfcfa
testq %rax, %rax
je 0x2dfc68
lock
incl (%rax)
movq 0x130(%r14), %rax
testq %rax, %rax
je 0x2dfc9c
lock
decl (%rax)
jne 0x2dfc9c
movq 0x128(%r14), %rsi
movq 0x148(%r14), %rdi
testq %rdi, %rdi
je 0x2dfc94
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2dfc9c
movq %rsi, %rdi
callq 0x5f3e0
movq (%rsp), %rax
movq %rax, 0x128(%r14)
movq 0x8(%rsp), %rax
movq %rax, 0x130(%r14)
movq 0x10(%rsp), %rcx
movq %rcx, 0x138(%r14)
movl 0x18(%rsp), %ecx
movl %ecx, 0x140(%r14)
movq 0x20(%rsp), %rcx
movq %rcx, 0x148(%r14)
movups 0x28(%rsp), %xmm0
movups %xmm0, 0x150(%r14)
movl 0x38(%rsp), %ecx
movl %ecx, 0x160(%r14)
movq 0x40(%rsp), %rcx
movq %rcx, 0x168(%r14)
testq %rax, %rax
je 0x2dfd22
lock
decl (%rax)
jne 0x2dfd22
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x2dfd1a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2dfd22
movq %rsi, %rdi
callq 0x5f3e0
cmpq $0x0, (%r15)
je 0x2dfd3c
movslq 0x160(%r14), %rax
imulq 0x168(%r14), %rax
testq %rax, %rax
jne 0x2dfd3e
movl %ebp, %ebx
movl %ebx, %eax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x2dfdab
lock
decl (%rax)
jne 0x2dfdab
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x2dfd9b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2dfdab
jmp 0x2dfdb5
jmp 0x2dfdb5
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x2dfdab
lock
decl (%rax)
jne 0x2dfdab
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x2dfda5
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2dfdab
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x2dfdb5
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/instancenorm.cpp |
ncnn::InstanceNorm::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int InstanceNorm::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
// x = (x - mean) / (sqrt(var + eps)) * gamma + beta
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int c = bottom_top_blob.c;
int size = w * h;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < c; q++)
{
float* ptr = bottom_top_blob.channel(q);
// mean and var
float sum = 0.f;
float sqsum = 0.f;
for (int i = 0; i < size; i++)
{
sum += ptr[i];
//sqsum += ptr[i] * ptr[i];
}
float mean = sum / size;
float tmp = 0.f;
for (int i = 0; i < size; i++)
{
tmp = ptr[i] - mean;
sqsum += tmp * tmp;
}
float var = sqsum / size;
// the var maybe minus due to accuracy
//float var = sqsum / size - mean * mean;
float a;
float b;
if (affine)
{
float gamma = gamma_data[q];
float beta = beta_data[q];
a = gamma / (sqrtf(var + eps));
b = -mean * a + beta;
}
else
{
a = 1.f / (sqrtf(var + eps));
b = -mean * a;
}
for (int i = 0; i < size; i++)
{
ptr[i] = ptr[i] * a + b;
}
}
return 0;
} | pushq %rbx
movl 0x30(%rsi), %eax
movl 0x38(%rsi), %ecx
imull 0x2c(%rsi), %eax
movq (%rsi), %rdx
movq 0x40(%rsi), %r8
cvtsi2ss %eax, %xmm1
movl 0xd8(%rdi), %r9d
xorl %r10d, %r10d
testl %eax, %eax
cmovlel %r10d, %eax
movq 0xe0(%rdi), %r11
testl %ecx, %ecx
cmovlel %r10d, %ecx
movq 0x128(%rdi), %rbx
movss 0x10ee88(%rip), %xmm0 # 0x3eec88
imulq 0x10(%rsi), %r8
divss %xmm1, %xmm0
movss 0x1159b3(%rip), %xmm1 # 0x3f57c4
movss 0x111b0b(%rip), %xmm2 # 0x3f1924
movaps 0x10e270(%rip), %xmm3 # 0x3ee090
cmpq %rcx, %r10
je 0x2dfef3
xorps %xmm4, %xmm4
xorl %esi, %esi
cmpq %rsi, %rax
je 0x2dfe3d
addss (%rdx,%rsi,4), %xmm4
incq %rsi
jmp 0x2dfe2e
mulss %xmm0, %xmm4
xorps %xmm5, %xmm5
xorl %esi, %esi
cmpq %rsi, %rax
je 0x2dfe61
movss (%rdx,%rsi,4), %xmm6
subss %xmm4, %xmm6
mulss %xmm6, %xmm6
addss %xmm6, %xmm5
incq %rsi
jmp 0x2dfe46
mulss %xmm0, %xmm5
testl %r9d, %r9d
je 0x2dfea0
movss (%rbx,%r10,4), %xmm7
addss 0xd4(%rdi), %xmm5
rsqrtss %xmm5, %xmm6
mulss %xmm6, %xmm5
mulss %xmm6, %xmm5
addss %xmm1, %xmm5
mulss %xmm2, %xmm6
mulss %xmm5, %xmm6
mulss (%r11,%r10,4), %xmm6
mulss %xmm6, %xmm4
subss %xmm4, %xmm7
jmp 0x2dfeca
addss 0xd4(%rdi), %xmm5
rsqrtss %xmm5, %xmm6
mulss %xmm6, %xmm5
mulss %xmm6, %xmm5
addss %xmm1, %xmm5
mulss %xmm2, %xmm6
mulss %xmm5, %xmm6
xorps %xmm3, %xmm4
mulss %xmm6, %xmm4
movaps %xmm4, %xmm7
xorl %esi, %esi
cmpq %rsi, %rax
je 0x2dfee8
movss (%rdx,%rsi,4), %xmm4
mulss %xmm6, %xmm4
addss %xmm7, %xmm4
movss %xmm4, (%rdx,%rsi,4)
incq %rsi
jmp 0x2dfecc
incq %r10
addq %r8, %rdx
jmp 0x2dfe20
xorl %eax, %eax
popq %rbx
retq
nop
| /csukuangfj[P]ncnn/src/layer/instancenorm.cpp |
virtual thunk to ncnn::Clip_x86::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int Clip_x86::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int d = bottom_top_blob.d;
int channels = bottom_top_blob.c;
int elempack = bottom_top_blob.elempack;
int size = w * h * d * elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _min_avx512 = _mm512_set1_ps(min);
__m512 _max_avx512 = _mm512_set1_ps(max);
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
_p = _mm512_max_ps(_p, _min_avx512);
_p = _mm512_min_ps(_p, _max_avx512);
_mm512_storeu_ps(ptr, _p);
ptr += 16;
}
#endif // __AVX512F__
__m256 _min_avx = _mm256_set1_ps(min);
__m256 _max_avx = _mm256_set1_ps(max);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_max_ps(_p, _min_avx);
_p = _mm256_min_ps(_p, _max_avx);
_mm256_storeu_ps(ptr, _p);
ptr += 8;
}
#endif // __AVX__
__m128 _min = _mm_set1_ps(min);
__m128 _max = _mm_set1_ps(max);
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_load_ps(ptr);
_p = _mm_max_ps(_p, _min);
_p = _mm_min_ps(_p, _max);
_mm_store_ps(ptr, _p);
ptr += 4;
}
#endif // __SSE2__
for (; i < size; i++)
{
if (*ptr < min)
*ptr = min;
if (*ptr > max)
*ptr = max;
ptr++;
}
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x58(%rax), %rdi
callq 0x2e004c
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/src/layer/x86/clip_x86.cpp |
ncnn::Clip_x86_avx512::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int Clip_x86_avx512::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int d = bottom_top_blob.d;
int channels = bottom_top_blob.c;
int elempack = bottom_top_blob.elempack;
int size = w * h * d * elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _min_avx512 = _mm512_set1_ps(min);
__m512 _max_avx512 = _mm512_set1_ps(max);
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
_p = _mm512_max_ps(_p, _min_avx512);
_p = _mm512_min_ps(_p, _max_avx512);
_mm512_storeu_ps(ptr, _p);
ptr += 16;
}
#endif // __AVX512F__
__m256 _min_avx = _mm256_set1_ps(min);
__m256 _max_avx = _mm256_set1_ps(max);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_max_ps(_p, _min_avx);
_p = _mm256_min_ps(_p, _max_avx);
_mm256_storeu_ps(ptr, _p);
ptr += 8;
}
#endif // __AVX__
__m128 _min = _mm_set1_ps(min);
__m128 _max = _mm_set1_ps(max);
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_load_ps(ptr);
_p = _mm_max_ps(_p, _min);
_p = _mm_min_ps(_p, _max);
_mm_store_ps(ptr, _p);
ptr += 4;
}
#endif // __SSE2__
for (; i < size; i++)
{
if (*ptr < min)
*ptr = min;
if (*ptr > max)
*ptr = max;
ptr++;
}
}
return 0;
} | movl 0x30(%rsi), %eax
imull 0x2c(%rsi), %eax
imull 0x34(%rsi), %eax
movl 0x38(%rsi), %ecx
imull 0x18(%rsi), %eax
movq (%rdi), %rdx
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
cmpq %rcx, %r8
je 0x2e02cc
movq 0x40(%rsi), %r9
imulq %r8, %r9
imulq 0x10(%rsi), %r9
addq (%rsi), %r9
movq -0x18(%rdx), %r10
vbroadcastss 0xd0(%rdi,%r10), %zmm0
vbroadcastss 0xd4(%rdi,%r10), %zmm1
xorl %r10d, %r10d
leal 0xf(%r10), %r11d
cmpl %eax, %r11d
jge 0x2e020b
vmaxps (%r9), %zmm0, %zmm2
vminps %zmm1, %zmm2, %zmm2
vmovups %zmm2, (%r9)
addq $0x40, %r9
addl $0x10, %r10d
jmp 0x2e01e6
movq -0x18(%rdx), %r11
vbroadcastss 0xd0(%rdi,%r11), %ymm0
vbroadcastss 0xd4(%rdi,%r11), %ymm1
leal 0x7(%r10), %r11d
cmpl %eax, %r11d
jge 0x2e0244
vmaxps (%r9), %ymm0, %ymm2
vminps %ymm1, %ymm2, %ymm2
vmovups %ymm2, (%r9)
addq $0x20, %r9
addl $0x8, %r10d
jmp 0x2e0223
movq -0x18(%rdx), %r11
vbroadcastss 0xd0(%rdi,%r11), %xmm0
vbroadcastss 0xd4(%rdi,%r11), %xmm1
leal 0x3(%r10), %r11d
cmpl %eax, %r11d
jge 0x2e02bf
vmaxps (%r9), %xmm0, %xmm2
vminps %xmm1, %xmm2, %xmm2
vmovaps %xmm2, (%r9)
addq $0x10, %r9
addl $0x4, %r10d
jmp 0x2e025c
vmovss (%r9), %xmm0
movq -0x18(%rdx), %r11
vmovss 0xd0(%rdi,%r11), %xmm1
vucomiss %xmm1, %xmm0
jae 0x2e02a3
vmovss %xmm1, (%r9)
movq -0x18(%rdx), %r11
vmovaps %xmm1, %xmm0
vmovss 0xd4(%rdi,%r11), %xmm1
vucomiss %xmm1, %xmm0
jbe 0x2e02b8
vmovss %xmm1, (%r9)
addq $0x4, %r9
incl %r10d
cmpl %eax, %r10d
jl 0x2e027d
incq %r8
jmp 0x2e01b6
xorl %eax, %eax
vzeroupper
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/clip_x86_avx512.cpp |
virtual thunk to ncnn::Clip_x86_avx512::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int Clip_x86_avx512::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int d = bottom_top_blob.d;
int channels = bottom_top_blob.c;
int elempack = bottom_top_blob.elempack;
int size = w * h * d * elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _min_avx512 = _mm512_set1_ps(min);
__m512 _max_avx512 = _mm512_set1_ps(max);
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
_p = _mm512_max_ps(_p, _min_avx512);
_p = _mm512_min_ps(_p, _max_avx512);
_mm512_storeu_ps(ptr, _p);
ptr += 16;
}
#endif // __AVX512F__
__m256 _min_avx = _mm256_set1_ps(min);
__m256 _max_avx = _mm256_set1_ps(max);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_max_ps(_p, _min_avx);
_p = _mm256_min_ps(_p, _max_avx);
_mm256_storeu_ps(ptr, _p);
ptr += 8;
}
#endif // __AVX__
__m128 _min = _mm_set1_ps(min);
__m128 _max = _mm_set1_ps(max);
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_load_ps(ptr);
_p = _mm_max_ps(_p, _min);
_p = _mm_min_ps(_p, _max);
_mm_store_ps(ptr, _p);
ptr += 4;
}
#endif // __SSE2__
for (; i < size; i++)
{
if (*ptr < min)
*ptr = min;
if (*ptr > max)
*ptr = max;
ptr++;
}
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x58(%rax), %rdi
callq 0x2e0198
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/clip_x86_avx512.cpp |
virtual thunk to ncnn::Clip_x86_fma::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int Clip_x86_fma::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int d = bottom_top_blob.d;
int channels = bottom_top_blob.c;
int elempack = bottom_top_blob.elempack;
int size = w * h * d * elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _min_avx512 = _mm512_set1_ps(min);
__m512 _max_avx512 = _mm512_set1_ps(max);
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
_p = _mm512_max_ps(_p, _min_avx512);
_p = _mm512_min_ps(_p, _max_avx512);
_mm512_storeu_ps(ptr, _p);
ptr += 16;
}
#endif // __AVX512F__
__m256 _min_avx = _mm256_set1_ps(min);
__m256 _max_avx = _mm256_set1_ps(max);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_max_ps(_p, _min_avx);
_p = _mm256_min_ps(_p, _max_avx);
_mm256_storeu_ps(ptr, _p);
ptr += 8;
}
#endif // __AVX__
__m128 _min = _mm_set1_ps(min);
__m128 _max = _mm_set1_ps(max);
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_load_ps(ptr);
_p = _mm_max_ps(_p, _min);
_p = _mm_min_ps(_p, _max);
_mm_store_ps(ptr, _p);
ptr += 4;
}
#endif // __SSE2__
for (; i < size; i++)
{
if (*ptr < min)
*ptr = min;
if (*ptr > max)
*ptr = max;
ptr++;
}
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x58(%rax), %rdi
callq 0x2e032c
xorl %eax, %eax
popq %rcx
retq
nopl (%rax)
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/clip_x86_fma.cpp |
ncnn::Clip_x86_avx::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int Clip_x86_avx::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int d = bottom_top_blob.d;
int channels = bottom_top_blob.c;
int elempack = bottom_top_blob.elempack;
int size = w * h * d * elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _min_avx512 = _mm512_set1_ps(min);
__m512 _max_avx512 = _mm512_set1_ps(max);
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
_p = _mm512_max_ps(_p, _min_avx512);
_p = _mm512_min_ps(_p, _max_avx512);
_mm512_storeu_ps(ptr, _p);
ptr += 16;
}
#endif // __AVX512F__
__m256 _min_avx = _mm256_set1_ps(min);
__m256 _max_avx = _mm256_set1_ps(max);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_max_ps(_p, _min_avx);
_p = _mm256_min_ps(_p, _max_avx);
_mm256_storeu_ps(ptr, _p);
ptr += 8;
}
#endif // __AVX__
__m128 _min = _mm_set1_ps(min);
__m128 _max = _mm_set1_ps(max);
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_load_ps(ptr);
_p = _mm_max_ps(_p, _min);
_p = _mm_min_ps(_p, _max);
_mm_store_ps(ptr, _p);
ptr += 4;
}
#endif // __SSE2__
for (; i < size; i++)
{
if (*ptr < min)
*ptr = min;
if (*ptr > max)
*ptr = max;
ptr++;
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movl 0x30(%rsi), %eax
imull 0x2c(%rsi), %eax
imull 0x34(%rsi), %eax
movl 0x38(%rsi), %ecx
imull 0x18(%rsi), %eax
movq (%rdi), %rdx
xorl %r8d, %r8d
testl %ecx, %ecx
cmovlel %r8d, %ecx
cmpq %rcx, %r8
je 0x2e05d9
movq (%rsi), %r9
movq 0x10(%rsi), %rbx
movq 0x40(%rsi), %r11
movq %r11, %r14
imulq %r8, %r14
imulq %rbx, %r14
addq %r9, %r14
movq -0x18(%rdx), %r10
vbroadcastss 0xd0(%rdi,%r10), %ymm0
vbroadcastss 0xd4(%rdi,%r10), %ymm1
xorl %r10d, %r10d
xorl %r15d, %r15d
leal 0x7(%r15), %ebp
cmpl %eax, %ebp
jge 0x2e0542
vmaxps (%r14), %ymm0, %ymm2
vminps %ymm1, %ymm2, %ymm2
vmovups %ymm2, (%r14)
addq $0x20, %r14
addl $0x8, %r15d
addq $0x8, %r10
jmp 0x2e051e
movq -0x18(%rdx), %r12
vbroadcastss 0xd0(%rdi,%r12), %xmm0
vbroadcastss 0xd4(%rdi,%r12), %xmm1
leal 0x3(%r15), %ebp
cmpl %eax, %ebp
jge 0x2e057e
vmaxps (%r14), %xmm0, %xmm2
vminps %xmm1, %xmm2, %xmm2
vmovaps %xmm2, (%r14)
addq $0x10, %r14
addl $0x4, %r15d
addq $0x4, %r10
jmp 0x2e055a
imulq %rbx, %r11
imulq %r8, %r11
addq %r11, %r9
cmpl %eax, %r10d
jge 0x2e05d1
vmovss (%r9,%r10,4), %xmm0
movq -0x18(%rdx), %r11
vmovss 0xd0(%rdi,%r11), %xmm1
vucomiss %xmm1, %xmm0
jae 0x2e05b6
vmovss %xmm1, (%r9,%r10,4)
movq -0x18(%rdx), %r11
vmovaps %xmm1, %xmm0
vmovss 0xd4(%rdi,%r11), %xmm1
vucomiss %xmm1, %xmm0
jbe 0x2e05cc
vmovss %xmm1, (%r9,%r10,4)
incq %r10
jmp 0x2e0589
incq %r8
jmp 0x2e04de
xorl %eax, %eax
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/clip_x86_avx.cpp |
ncnn::YoloDetectionOutput::load_param(ncnn::ParamDict const&) | int YoloDetectionOutput::load_param(const ParamDict& pd)
{
num_class = pd.get(0, 20);
num_box = pd.get(1, 5);
confidence_threshold = pd.get(2, 0.01f);
nms_threshold = pd.get(3, 0.45f);
biases = pd.get(4, Mat());
return 0;
} | pushq %r15
pushq %r14
pushq %rbx
subq $0xa0, %rsp
movq %rsi, %r14
movq %rdi, %rbx
pushq $0x14
popq %rdx
movq %rsi, %rdi
xorl %esi, %esi
callq 0x718a6
movl %eax, 0xd0(%rbx)
pushq $0x1
popq %rsi
pushq $0x5
popq %rdx
movq %r14, %rdi
callq 0x718a6
movl %eax, 0xd4(%rbx)
pushq $0x2
popq %rsi
movss 0x1189de(%rip), %xmm0 # 0x3f9304
movq %r14, %rdi
callq 0x718c0
movss %xmm0, 0xd8(%rbx)
pushq $0x3
popq %rsi
movss 0x10d6cb(%rip), %xmm0 # 0x3ee00c
movq %r14, %rdi
callq 0x718c0
movss %xmm0, 0xdc(%rbx)
leaq 0x50(%rsp), %rcx
andq $0x0, 0x40(%rcx)
xorps %xmm0, %xmm0
movaps %xmm0, (%rcx)
movups %xmm0, 0xc(%rcx)
movaps %xmm0, 0x20(%rcx)
movups %xmm0, 0x2c(%rcx)
movq %rsp, %r15
pushq $0x4
popq %rdx
movq %r15, %rdi
movq %r14, %rsi
callq 0x718da
leaq 0xe0(%rbx), %rcx
movq 0x8(%rsp), %rax
cmpq %r15, %rcx
je 0x2e0a2a
testq %rax, %rax
je 0x2e099b
lock
incl (%rax)
movq 0xe8(%rbx), %rax
testq %rax, %rax
je 0x2e09cf
lock
decl (%rax)
jne 0x2e09cf
movq 0xe0(%rbx), %rsi
movq 0x100(%rbx), %rdi
testq %rdi, %rdi
je 0x2e09c7
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2e09cf
movq %rsi, %rdi
callq 0x5f3e0
movq (%rsp), %rax
movq %rax, 0xe0(%rbx)
movq 0x8(%rsp), %rax
movq %rax, 0xe8(%rbx)
movq 0x10(%rsp), %rcx
movq %rcx, 0xf0(%rbx)
movl 0x18(%rsp), %ecx
movl %ecx, 0xf8(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x100(%rbx)
movups 0x28(%rsp), %xmm0
movups %xmm0, 0x108(%rbx)
movl 0x38(%rsp), %ecx
movl %ecx, 0x118(%rbx)
movq 0x40(%rsp), %rcx
movq %rcx, 0x120(%rbx)
testq %rax, %rax
je 0x2e0a52
lock
decl (%rax)
jne 0x2e0a52
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x2e0a4a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2e0a52
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
andl $0x0, 0x38(%rsp)
movups %xmm0, 0x28(%rsp)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2e0a9c
lock
decl (%rax)
jne 0x2e0a9c
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x2e0a94
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2e0a9c
movq %rsi, %rdi
callq 0x5f3e0
xorl %eax, %eax
addq $0xa0, %rsp
popq %rbx
popq %r14
popq %r15
retq
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x2e0adb
lock
decl (%rax)
jne 0x2e0adb
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x2e0ad5
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2e0adb
movq (%rdi), %rax
callq *0x18(%rax)
andq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
movups %xmm0, 0x28(%rsp)
andl $0x0, 0x38(%rsp)
jmp 0x2e0b02
jmp 0x2e0b38
jmp 0x2e0b38
jmp 0x2e0b38
movq %rax, %rbx
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x2e0b30
lock
decl (%rax)
jne 0x2e0b30
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
jne 0x2e0b2a
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2e0b30
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/yolodetectionoutput.cpp |
ncnn::YoloDetectionOutput::create_pipeline(ncnn::Option const&) | int YoloDetectionOutput::create_pipeline(const Option& opt)
{
{
softmax = ncnn::create_layer(ncnn::LayerType::Softmax);
ncnn::ParamDict pd;
pd.set(0, 0); // axis
softmax->load_param(pd);
softmax->create_pipeline(opt);
}
return 0;
} | pushq %r15
pushq %r14
pushq %rbx
subq $0x10, %rsp
movq %rsi, %rbx
movq %rdi, %r14
pushq $0x20
popq %rdi
callq 0x782bf
movq %rax, 0x128(%r14)
movq %rsp, %r15
movq %r15, %rdi
callq 0x71548
movq %r15, %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x7193a
movq 0x128(%r14), %rdi
movq (%rdi), %rax
movq %rsp, %rsi
callq *0x10(%rax)
movq 0x128(%r14), %rdi
movq (%rdi), %rax
movq %rbx, %rsi
callq *0x20(%rax)
movq %rsp, %rdi
callq 0x71614
xorl %eax, %eax
addq $0x10, %rsp
popq %rbx
popq %r14
popq %r15
retq
movq %rax, %rbx
movq %rsp, %rdi
callq 0x71614
movq %rbx, %rdi
callq 0x5f340
| /csukuangfj[P]ncnn/src/layer/yolodetectionoutput.cpp |
ncnn::YoloDetectionOutput::forward_inplace(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int YoloDetectionOutput::forward_inplace(std::vector<Mat>& bottom_top_blobs, const Option& opt) const
{
// gather all box
std::vector<BBoxRect> all_bbox_rects;
std::vector<float> all_bbox_scores;
for (size_t b = 0; b < bottom_top_blobs.size(); b++)
{
Mat& bottom_top_blob = bottom_top_blobs[b];
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
const int channels_per_box = channels / num_box;
// anchor coord + box score + num_class
if (channels_per_box != 4 + 1 + num_class)
return -1;
std::vector<std::vector<BBoxRect> > all_box_bbox_rects;
std::vector<std::vector<float> > all_box_bbox_scores;
all_box_bbox_rects.resize(num_box);
all_box_bbox_scores.resize(num_box);
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < num_box; pp++)
{
int p = pp * channels_per_box;
const float bias_w = biases[pp * 2];
const float bias_h = biases[pp * 2 + 1];
const float* xptr = bottom_top_blob.channel(p);
const float* yptr = bottom_top_blob.channel(p + 1);
const float* wptr = bottom_top_blob.channel(p + 2);
const float* hptr = bottom_top_blob.channel(p + 3);
const float* box_score_ptr = bottom_top_blob.channel(p + 4);
// softmax class scores
Mat scores = bottom_top_blob.channel_range(p + 5, num_class);
softmax->forward_inplace(scores, opt);
for (int i = 0; i < h; i++)
{
for (int j = 0; j < w; j++)
{
// region box
float bbox_cx = (j + sigmoid(xptr[0])) / w;
float bbox_cy = (i + sigmoid(yptr[0])) / h;
float bbox_w = expf(wptr[0]) * bias_w / w;
float bbox_h = expf(hptr[0]) * bias_h / h;
float bbox_xmin = bbox_cx - bbox_w * 0.5f;
float bbox_ymin = bbox_cy - bbox_h * 0.5f;
float bbox_xmax = bbox_cx + bbox_w * 0.5f;
float bbox_ymax = bbox_cy + bbox_h * 0.5f;
// box score
float box_score = sigmoid(box_score_ptr[0]);
// find class index with max class score
int class_index = 0;
float class_score = 0.f;
for (int q = 0; q < num_class; q++)
{
float score = scores.channel(q).row(i)[j];
if (score > class_score)
{
class_index = q;
class_score = score;
}
}
// NCNN_LOGE("%d %f %f", class_index, box_score, class_score);
float confidence = box_score * class_score;
if (confidence >= confidence_threshold)
{
BBoxRect c = {bbox_xmin, bbox_ymin, bbox_xmax, bbox_ymax, class_index};
all_box_bbox_rects[pp].push_back(c);
all_box_bbox_scores[pp].push_back(confidence);
}
xptr++;
yptr++;
wptr++;
hptr++;
box_score_ptr++;
}
}
}
for (int i = 0; i < num_box; i++)
{
const std::vector<BBoxRect>& box_bbox_rects = all_box_bbox_rects[i];
const std::vector<float>& box_bbox_scores = all_box_bbox_scores[i];
all_bbox_rects.insert(all_bbox_rects.end(), box_bbox_rects.begin(), box_bbox_rects.end());
all_bbox_scores.insert(all_bbox_scores.end(), box_bbox_scores.begin(), box_bbox_scores.end());
}
}
// global sort inplace
qsort_descent_inplace(all_bbox_rects, all_bbox_scores);
// apply nms
std::vector<size_t> picked;
nms_sorted_bboxes(all_bbox_rects, picked, nms_threshold);
// select
std::vector<BBoxRect> bbox_rects;
std::vector<float> bbox_scores;
for (size_t i = 0; i < picked.size(); i++)
{
size_t z = picked[i];
bbox_rects.push_back(all_bbox_rects[z]);
bbox_scores.push_back(all_bbox_scores[z]);
}
// fill result
int num_detected = static_cast<int>(bbox_rects.size());
if (num_detected == 0)
return 0;
Mat& top_blob = bottom_top_blobs[0];
top_blob.create(6, num_detected, 4u, opt.blob_allocator);
if (top_blob.empty())
return -100;
for (int i = 0; i < num_detected; i++)
{
const BBoxRect& r = bbox_rects[i];
float score = bbox_scores[i];
float* outptr = top_blob.row(i);
outptr[0] = r.label + 1.0f; // +1 for prepend background class
outptr[1] = score;
outptr[2] = r.xmin;
outptr[3] = r.ymin;
outptr[4] = r.xmax;
outptr[5] = r.ymax;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1c8, %rsp # imm = 0x1C8
movq %rdx, 0x108(%rsp)
movq %rsi, 0x100(%rsp)
movq %rdi, %rbp
xorps %xmm0, %xmm0
leaq 0x60(%rsp), %rax
andq $0x0, 0x10(%rax)
leaq 0xe0(%rsp), %rcx
andq $0x0, 0x10(%rcx)
movaps %xmm0, (%rax)
movaps %xmm0, (%rcx)
pushq $-0x1
popq %rax
movq %rax, 0x40(%rsp)
xorl %edi, %edi
movq %rbp, 0x48(%rsp)
movq 0x100(%rsp), %rax
movq (%rax), %r12
movq 0x8(%rax), %rax
subq %r12, %rax
cqto
pushq $0x48
popq %rcx
idivq %rcx
cmpq %rax, %rdi
jae 0x2e1248
imulq $0x48, %rdi, %rbx
movl 0x38(%r12,%rbx), %eax
movslq 0xd4(%rbp), %rsi
cltd
idivl %esi
movq %rbp, %rcx
movl %eax, %ebp
movl 0xd0(%rcx), %eax
addl $0x5, %eax
cmpl %eax, %ebp
jne 0x2e15a2
movq %rdi, 0x160(%rsp)
movsd 0x2c(%r12,%rbx), %xmm0
movaps %xmm0, 0x50(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
andq $0x0, 0x10(%rsp)
movaps %xmm0, 0x20(%rsp)
andq $0x0, 0x30(%rsp)
movq %rsp, %rdi
callq 0x2c228a
movq 0x48(%rsp), %rax
movslq 0xd4(%rax), %rsi
leaq 0x20(%rsp), %rdi
callq 0x2c22c0
addq %rbx, %r12
movaps 0x50(%rsp), %xmm4
cvtdq2ps %xmm4, %xmm0
rcpps %xmm0, %xmm1
movaps %xmm0, %xmm2
mulps %xmm1, %xmm2
movaps 0x10d3bc(%rip), %xmm3 # 0x3ee0b0
subps %xmm2, %xmm3
mulps %xmm1, %xmm3
movd %xmm4, %ecx
testl %ecx, %ecx
movl $0x0, %eax
cmovlel %eax, %ecx
movq %rcx, 0x180(%rsp)
pshufd $0x55, %xmm4, %xmm2 # xmm2 = xmm4[1,1,1,1]
movd %xmm2, %ecx
testl %ecx, %ecx
cmovlel %eax, %ecx
movq %rcx, 0x178(%rsp)
addps %xmm1, %xmm3
movaps %xmm3, 0x1a0(%rsp)
movslq %ebp, %rax
movq %rax, 0x170(%rsp)
movss 0x10df44(%rip), %xmm1 # 0x3eec88
movaps %xmm1, %xmm2
divss %xmm0, %xmm2
movss %xmm2, 0x84(%rsp)
shufps $0x55, %xmm0, %xmm0 # xmm0 = xmm0[1,1,1,1]
divss %xmm0, %xmm1
movss %xmm1, 0x80(%rsp)
xorl %r15d, %r15d
movq 0x48(%rsp), %rbp
movq %r12, 0x168(%rsp)
movslq 0xd4(%rbp), %rax
cmpq %rax, %r15
jge 0x2e11cb
movq %r15, %r14
imulq 0x170(%rsp), %r14
movq 0xe0(%rbp), %rax
movsd (%rax,%r15,8), %xmm0
movaps %xmm0, 0x130(%rsp)
movq (%r12), %r13
movq %rbp, %r10
movq 0x10(%r12), %rbp
movq 0x40(%r12), %rbx
leaq 0x5(%r14), %rcx
movl 0xd0(%r10), %edx
movslq 0x2c(%r12), %rsi
movslq 0x30(%r12), %rdi
movslq 0x34(%r12), %rax
imulq %rbx, %rcx
imulq %rbp, %rcx
addq %r13, %rcx
movl 0x18(%r12), %r8d
movq 0x20(%r12), %r9
movq %rcx, 0x90(%rsp)
andq $0x0, 0x98(%rsp)
movq %rbp, 0xa0(%rsp)
movl %r8d, 0xa8(%rsp)
movq %r9, 0xb0(%rsp)
movl $0x4, 0xb8(%rsp)
movl %esi, 0xbc(%rsp)
movl %edi, 0xc0(%rsp)
movl %eax, 0xc4(%rsp)
movl %edx, 0xc8(%rsp)
imulq %rsi, %rdi
imulq %rbp, %rax
imulq %rdi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rbp
movq %rax, 0xd0(%rsp)
movl 0x28(%r12), %eax
movl %eax, 0xb8(%rsp)
movq 0x128(%r10), %rdi
movq (%rdi), %rax
leaq 0x90(%rsp), %rsi
movq 0x108(%rsp), %rdx
callq *0x48(%rax)
movq %r15, 0x110(%rsp)
leaq 0x4(%r14), %rax
imulq %rbx, %rax
imulq %rbp, %rax
addq %r13, %rax
movq %rax, 0x128(%rsp)
leaq 0x3(%r14), %rax
imulq %rbx, %rax
imulq %rbp, %rax
addq %r13, %rax
movq %rax, 0x120(%rsp)
leaq 0x2(%r14), %r15
imulq %rbx, %r15
imulq %rbp, %r15
addq %r13, %r15
leaq 0x1(%r14), %r12
imulq %rbx, %r12
imulq %rbp, %r12
addq %r13, %r12
imulq %r14, %rbx
imulq %rbp, %rbx
addq %r13, %rbx
movaps 0x130(%rsp), %xmm0
mulps 0x10d542(%rip), %xmm0 # 0x3ee430
xorl %ecx, %ecx
movq 0x48(%rsp), %rbp
movq %rbx, %rax
movaps %xmm0, 0x130(%rsp)
cmpq 0x178(%rsp), %rcx
je 0x2e117c
movq %rcx, 0x118(%rsp)
cvtsi2ss %ecx, %xmm1
movss %xmm1, 0x88(%rsp)
xorl %r14d, %r14d
xorl %r13d, %r13d
cmpq 0x180(%rsp), %r13
je 0x2e116c
cvtsi2ss %r13d, %xmm1
movss %xmm1, 0x50(%rsp)
movq %rax, %rbx
movss (%rax), %xmm0
movaps 0x10d140(%rip), %xmm1 # 0x3ee090
xorps %xmm1, %xmm0
callq 0x5f410
movss 0x10dd28(%rip), %xmm1 # 0x3eec88
addss %xmm1, %xmm0
divss %xmm0, %xmm1
addss 0x50(%rsp), %xmm1
mulss 0x84(%rsp), %xmm1
movaps %xmm1, 0x50(%rsp)
movss (%r12), %xmm0
xorps 0x10d107(%rip), %xmm0 # 0x3ee090
callq 0x5f410
movss 0x10dcf2(%rip), %xmm1 # 0x3eec88
addss %xmm1, %xmm0
divss %xmm0, %xmm1
addss 0x88(%rsp), %xmm1
movaps %xmm1, 0x1b0(%rsp)
movss (%r15), %xmm0
movss %xmm0, 0x150(%rsp)
movq 0x120(%rsp), %rax
movss (%rax), %xmm0
callq 0x5f410
movaps %xmm0, 0x140(%rsp)
movss 0x150(%rsp), %xmm0
callq 0x5f410
unpcklps 0x140(%rsp), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
mulps 0x130(%rsp), %xmm0
mulps 0x1a0(%rsp), %xmm0
movaps 0x50(%rsp), %xmm1
movaps %xmm0, 0x150(%rsp)
addss %xmm0, %xmm1
movaps %xmm1, 0x140(%rsp)
movq 0x128(%rsp), %rax
movss (%rax), %xmm0
xorps 0x10d068(%rip), %xmm0 # 0x3ee090
callq 0x5f410
movl 0xd0(%rbp), %ecx
movslq 0xbc(%rsp), %rax
movq 0xa0(%rsp), %rdx
xorl %esi, %esi
testl %ecx, %ecx
cmovlel %esi, %ecx
movq 0x118(%rsp), %rdi
imulq %rdx, %rdi
imulq %rax, %rdi
addq %r14, %rdi
addq 0x90(%rsp), %rdi
imulq 0xd0(%rsp), %rdx
xorps %xmm1, %xmm1
xorl %eax, %eax
cmpq %rsi, %rcx
je 0x2e108e
movss (%rdi), %xmm2
ucomiss %xmm2, %xmm1
maxss %xmm2, %xmm1
cmovbl %esi, %eax
incq %rsi
addq %rdx, %rdi
jmp 0x2e1073
addss 0x10dbf2(%rip), %xmm0 # 0x3eec88
divss %xmm0, %xmm1
movss %xmm1, 0x8c(%rsp)
ucomiss 0xd8(%rbp), %xmm1
jb 0x2e113f
movaps 0x1b0(%rsp), %xmm2
mulss 0x80(%rsp), %xmm2
movaps 0x150(%rsp), %xmm3
movaps %xmm3, %xmm0
shufps $0x55, %xmm3, %xmm0 # xmm0 = xmm0[1,1],xmm3[1,1]
addss %xmm2, %xmm0
movaps 0x50(%rsp), %xmm1
unpcklps %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
subps %xmm3, %xmm1
movlps %xmm1, 0x188(%rsp)
movaps 0x140(%rsp), %xmm1
movss %xmm1, 0x190(%rsp)
movss %xmm0, 0x194(%rsp)
movl %eax, 0x198(%rsp)
imulq $0x18, 0x110(%rsp), %rbp
movq (%rsp), %rdi
addq %rbp, %rdi
leaq 0x188(%rsp), %rsi
callq 0x2c22f6
addq 0x20(%rsp), %rbp
movq %rbp, %rdi
leaq 0x8c(%rsp), %rsi
callq 0x1ea12c
movq 0x48(%rsp), %rbp
movq %rbx, %rax
addq $0x4, %rax
addq $0x4, %r12
addq $0x4, %r15
addq $0x4, 0x120(%rsp)
addq $0x4, 0x128(%rsp)
incq %r13
addq $0x4, %r14
jmp 0x2e0f29
movq 0x118(%rsp), %rcx
incq %rcx
jmp 0x2e0f00
movq 0x98(%rsp), %rax
testq %rax, %rax
movq 0x168(%rsp), %r12
movq 0x110(%rsp), %r15
je 0x2e11c3
lock
decl (%rax)
jne 0x2e11c3
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x2e11bb
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2e11c3
movq %rsi, %rdi
callq 0x5f3e0
incq %r15
jmp 0x2e0d75
pushq $0x8
popq %rbx
xorl %r14d, %r14d
cltq
cmpq %rax, %r14
jge 0x2e1226
movq (%rsp), %rax
movq 0x20(%rsp), %r15
movq 0x68(%rsp), %rsi
movq -0x8(%rax,%rbx), %rdx
movq (%rax,%rbx), %rcx
leaq 0x60(%rsp), %rdi
callq 0x2c2352
movq 0xe8(%rsp), %rsi
movq -0x8(%r15,%rbx), %rdx
movq (%r15,%rbx), %rcx
leaq 0xe0(%rsp), %rdi
callq 0x2c2372
incq %r14
movl 0xd4(%rbp), %eax
addq $0x18, %rbx
jmp 0x2e11d1
leaq 0x20(%rsp), %rdi
callq 0x2c2392
movq %rsp, %rdi
callq 0x2c23b4
movq 0x160(%rsp), %rdi
incq %rdi
jmp 0x2e0c43
movq 0x60(%rsp), %rax
cmpq 0x68(%rsp), %rax
je 0x2e1286
movq 0xe0(%rsp), %rax
movq 0xe8(%rsp), %rcx
cmpq %rcx, %rax
je 0x2e1286
subq %rax, %rcx
shrq $0x2, %rcx
decl %ecx
leaq 0x60(%rsp), %rdi
leaq 0xe0(%rsp), %rsi
xorl %edx, %edx
callq 0x2e168d
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
andq $0x0, 0x10(%rsp)
movss 0xdc(%rbp), %xmm0
movss %xmm0, 0x50(%rsp)
movq 0x68(%rsp), %rax
subq 0x60(%rsp), %rax
pushq $0x14
popq %rcx
cqto
idivq %rcx
movq %rax, %r14
leaq 0x90(%rsp), %rdi
leaq 0x20(%rsp), %rdx
movq %rax, %rsi
callq 0x62126
movq 0x60(%rsp), %rax
movq 0x90(%rsp), %rcx
addq $0x8, %rax
xorl %edx, %edx
cmpq %rdx, %r14
je 0x2e1308
movsd -0x8(%rax), %xmm0
movsd (%rax), %xmm1
subps %xmm0, %xmm1
movaps %xmm1, %xmm0
shufps $0x55, %xmm1, %xmm0 # xmm0 = xmm0[1,1],xmm1[1,1]
mulss %xmm1, %xmm0
movss %xmm0, (%rcx,%rdx,4)
incq %rdx
addq $0x14, %rax
jmp 0x2e12de
xorl %r13d, %r13d
pushq $0x1
popq %rbp
movq %rsp, %r15
leaq 0x20(%rsp), %r12
xorl %eax, %eax
movq %rax, 0x20(%rsp)
cmpq %r14, %rax
jae 0x2e140c
movq 0x60(%rsp), %rcx
imulq $0x14, %rax, %rdx
addq %rcx, %rdx
movq (%rsp), %rsi
movq 0x8(%rsp), %rdi
subq %rsi, %rdi
shrq $0x3, %rdi
movq 0x90(%rsp), %r8
testl %edi, %edi
cmovlel %r13d, %edi
xorl %r10d, %r10d
movl %ebp, %r9d
cmpq %r10, %rdi
je 0x2e13ef
movq (%rsi,%r10,8), %r11
imulq $0x14, %r11, %rbx
movss (%rdx), %xmm2
movss 0x8(%rcx,%rbx), %xmm1
xorps %xmm0, %xmm0
ucomiss %xmm1, %xmm2
ja 0x2e13ca
addq %rcx, %rbx
movss 0x8(%rdx), %xmm6
movss (%rbx), %xmm4
ucomiss %xmm4, %xmm6
jb 0x2e13ca
movss 0x4(%rdx), %xmm5
movss 0xc(%rbx), %xmm3
ucomiss %xmm3, %xmm5
ja 0x2e13ca
movss 0xc(%rdx), %xmm8
movss 0x4(%rbx), %xmm7
ucomiss %xmm7, %xmm8
jb 0x2e13ca
minss %xmm6, %xmm1
maxss %xmm2, %xmm4
subss %xmm4, %xmm1
minss %xmm8, %xmm3
maxss %xmm5, %xmm7
subss %xmm7, %xmm3
mulss %xmm1, %xmm3
movaps %xmm3, %xmm0
movss (%r8,%rax,4), %xmm1
subss %xmm0, %xmm1
addss (%r8,%r11,4), %xmm1
divss %xmm1, %xmm0
ucomiss 0x50(%rsp), %xmm0
cmoval %r13d, %r9d
incq %r10
jmp 0x2e1356
testl %r9d, %r9d
je 0x2e1404
movq %r15, %rdi
movq %r12, %rsi
callq 0x1ea1cc
movq 0x20(%rsp), %rax
incq %rax
jmp 0x2e1318
leaq 0x90(%rsp), %r14
movq %r14, %rdi
callq 0x621c2
andq $0x0, 0x10(%r14)
xorps %xmm0, %xmm0
movaps %xmm0, (%r14)
leaq 0x20(%rsp), %r15
movaps %xmm0, (%r15)
andq $0x0, 0x10(%r15)
xorl %r13d, %r13d
movq (%rsp), %rax
movq 0x8(%rsp), %rcx
subq %rax, %rcx
sarq $0x3, %rcx
cmpq %rcx, %r13
jae 0x2e147f
movq (%rax,%r13,8), %r12
imulq $0x14, %r12, %rsi
addq 0x60(%rsp), %rsi
movq %r14, %rdi
callq 0x2c22f6
shlq $0x2, %r12
addq 0xe0(%rsp), %r12
movq %r15, %rdi
movq %r12, %rsi
callq 0x1ea12c
incq %r13
jmp 0x2e1439
movq 0x98(%rsp), %rax
subq 0x90(%rsp), %rax
cqto
pushq $0x14
popq %rcx
idivq %rcx
movq %rax, %r14
testl %r14d, %r14d
je 0x2e157a
movq 0x100(%rsp), %rax
movq (%rax), %r15
movq 0x108(%rsp), %rax
movq 0x8(%rax), %r8
pushq $0x6
popq %rsi
pushq $0x4
popq %rcx
movq %r15, %rdi
movl %r14d, %edx
callq 0x636fa
movq (%r15), %rax
pushq $-0x64
popq %rcx
movq %rcx, 0x40(%rsp)
testq %rax, %rax
je 0x2e1583
movslq 0x38(%r15), %rcx
imulq 0x40(%r15), %rcx
testq %rcx, %rcx
je 0x2e1583
movq 0x90(%rsp), %rcx
movq 0x20(%rsp), %rdx
xorl %edi, %edi
testl %r14d, %r14d
movl $0x0, %esi
movq %rsi, 0x40(%rsp)
cmovlel %edi, %r14d
addq $0x10, %rcx
xorl %esi, %esi
cmpq %rsi, %r14
je 0x2e1583
movss (%rdx,%rsi,4), %xmm0
movslq 0x2c(%r15), %rdi
imulq %rsi, %rdi
imulq 0x10(%r15), %rdi
cvtsi2ssl (%rcx), %xmm1
addss 0x10d74e(%rip), %xmm1 # 0x3eec88
movss %xmm1, (%rax,%rdi)
movss %xmm0, 0x4(%rax,%rdi)
movss -0x10(%rcx), %xmm0
movss %xmm0, 0x8(%rax,%rdi)
movss -0xc(%rcx), %xmm0
movss %xmm0, 0xc(%rax,%rdi)
movss -0x8(%rcx), %xmm0
movss %xmm0, 0x10(%rax,%rdi)
movss -0x4(%rcx), %xmm0
movss %xmm0, 0x14(%rax,%rdi)
incq %rsi
addq $0x14, %rcx
jmp 0x2e1517
movq $0x0, 0x40(%rsp)
leaq 0x20(%rsp), %rdi
callq 0x621c2
leaq 0x90(%rsp), %rdi
callq 0x2c2932
movq %rsp, %rdi
callq 0x1ea816
leaq 0xe0(%rsp), %rdi
callq 0x621c2
leaq 0x60(%rsp), %rdi
callq 0x2c2932
movq 0x40(%rsp), %rax
addq $0x1c8, %rsp # imm = 0x1C8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x2e15eb
movq %rax, %rbx
jmp 0x2e1605
movq %rax, %rbx
leaq 0x90(%rsp), %rdi
callq 0x621c2
jmp 0x2e1605
jmp 0x2e1613
movq %rax, %rbx
leaq 0x20(%rsp), %rdi
callq 0x621c2
leaq 0x90(%rsp), %rdi
callq 0x2c2932
movq %rsp, %rdi
callq 0x1ea816
jmp 0x2e1666
jmp 0x2e1685
jmp 0x2e1613
movq %rax, %rbx
jmp 0x2e1654
jmp 0x2e161a
movq %rax, %rbx
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x2e1654
lock
decl (%rax)
jne 0x2e1654
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
jne 0x2e164e
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2e1654
movq (%rdi), %rax
callq *0x18(%rax)
leaq 0x20(%rsp), %rdi
callq 0x2c2392
movq %rsp, %rdi
callq 0x2c23b4
leaq 0xe0(%rsp), %rdi
callq 0x621c2
leaq 0x60(%rsp), %rdi
callq 0x2c2932
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/yolodetectionoutput.cpp |
void ncnn::qsort_descent_inplace<ncnn::BBoxRect>(std::vector<ncnn::BBoxRect, std::allocator<ncnn::BBoxRect>>&, std::vector<float, std::allocator<float>>&, int, int) | static void qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores, int left, int right)
{
int i = left;
int j = right;
float p = scores[(left + right) / 2];
while (i <= j)
{
while (scores[i] > p)
i++;
while (scores[j] < p)
j--;
if (i <= j)
{
// swap
std::swap(datas[i], datas[j]);
std::swap(scores[i], scores[j]);
i++;
j--;
}
}
if (left < j)
qsort_descent_inplace(datas, scores, left, j);
if (i < right)
qsort_descent_inplace(datas, scores, i, right);
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x20, %rsp
movl %ecx, %ebx
movl %edx, %ebp
movq %rsi, %r14
movq %rdi, %r15
movq (%rsi), %rsi
pushq $0x2
popq %r12
movl %ebp, %r8d
leal (%r8,%rbx), %eax
cltd
idivl %r12d
cltq
movss (%rsi,%rax,4), %xmm0
movl %ebx, %r9d
movl %ebp, %eax
movl %r9d, %ecx
movl %eax, %ebp
cmpl %ecx, %ebp
jg 0x2e1774
movslq %ebp, %rdx
leaq (%rdx,%rdx,4), %rdi
addq $-0x5, %rdi
decq %rdx
movl %ebp, %eax
addq $0x5, %rdi
incl %eax
ucomiss 0x4(%rsi,%rdx,4), %xmm0
leaq 0x1(%rdx), %rdx
jb 0x2e16de
movslq %ecx, %r11
leaq (%r11,%r11,4), %r10
addq $0x5, %r10
incq %r11
movl %ecx, %r9d
addq $-0x5, %r10
decl %r9d
ucomiss -0x4(%rsi,%r11,4), %xmm0
leaq -0x1(%r11), %r11
ja 0x2e1700
leal -0x1(%rax), %ebp
leal 0x1(%r9), %ecx
cmpq %r11, %rdx
jg 0x2e16c6
movq (%r15), %rcx
movl 0x10(%rcx,%rdi,4), %esi
movl %esi, 0x10(%rsp)
movups (%rcx,%rdi,4), %xmm1
movaps %xmm1, (%rsp)
movl 0x10(%rcx,%r10,4), %esi
movl %esi, 0x10(%rcx,%rdi,4)
movups (%rcx,%r10,4), %xmm1
movups %xmm1, (%rcx,%rdi,4)
movl 0x10(%rsp), %esi
movl %esi, 0x10(%rcx,%r10,4)
movaps (%rsp), %xmm1
movups %xmm1, (%rcx,%r10,4)
movq (%r14), %rsi
movss (%rsi,%rdx,4), %xmm1
movss (%rsi,%r11,4), %xmm2
movss %xmm2, (%rsi,%rdx,4)
movss %xmm1, (%rsi,%r11,4)
jmp 0x2e16c1
cmpl %r8d, %ecx
jle 0x2e178a
movq %r15, %rdi
movq %r14, %rsi
movl %r8d, %edx
callq 0x2e168d
movq (%r14), %rsi
cmpl %ebx, %ebp
jl 0x2e16aa
addq $0x20, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
nop
| /csukuangfj[P]ncnn/src/layer/yolodetectionoutput.cpp |
ncnn::Quantize::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int Quantize::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int dims = bottom_blob.dims;
if (dims == 1)
{
int w = bottom_blob.w;
top_blob.create(w, (size_t)1u, opt.blob_allocator);
if (top_blob.empty())
return -100;
const float* ptr = bottom_blob;
signed char* outptr = top_blob;
if (scale_data_size == 1)
{
const float scale = scale_data[0];
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < w; i++)
{
outptr[i] = float2int8(ptr[i] * scale);
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < w; i++)
{
outptr[i] = float2int8(ptr[i] * scale_data[i]);
}
}
}
if (dims == 2)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
top_blob.create(w, h, (size_t)1u, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; i++)
{
const float* ptr0 = bottom_blob.row(i);
signed char* outptr0 = top_blob.row<signed char>(i);
const float scale = scale_data_size == 1 ? scale_data[0] : scale_data[i];
for (int j = 0; j < w; j++)
{
outptr0[j] = float2int8(ptr0[j] * scale);
}
}
}
if (dims == 3)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int size = w * h;
top_blob.create(w, h, channels, (size_t)1u, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_blob.channel(q);
signed char* outptr = top_blob.channel(q);
const float scale = scale_data_size == 1 ? scale_data[0] : scale_data[q];
for (int i = 0; i < size; i++)
{
outptr[i] = float2int8(ptr[i] * scale);
}
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x38, %rsp
movq %rdx, %rbx
movq %rsi, 0x10(%rsp)
movl 0x28(%rsi), %eax
cmpl $0x3, %eax
movq %rdi, 0x28(%rsp)
movq %rdx, 0x30(%rsp)
je 0x2e1b99
movq %rdi, %r14
cmpl $0x2, %eax
je 0x2e1a80
movq $0x0, 0x8(%rsp)
cmpl $0x1, %eax
jne 0x2e1d14
movq 0x10(%rsp), %rax
movl 0x2c(%rax), %r13d
movq 0x8(%rcx), %rcx
pushq $0x1
popq %rdx
movq %rbx, %rdi
movl %r13d, %esi
callq 0x635fa
movq (%rbx), %rbp
pushq $-0x64
popq %rax
movq %rax, 0x8(%rsp)
testq %rbp, %rbp
je 0x2e1d14
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2e1d14
movq 0x10(%rsp), %rax
movq (%rax), %r15
cmpl $0x1, 0xd0(%r14)
jne 0x2e1cb8
movq 0xd8(%r14), %rax
xorl %edx, %edx
testl %r13d, %r13d
movl $0x0, %ecx
movq %rcx, 0x8(%rsp)
cmovlel %edx, %r13d
movss (%rax), %xmm0
movss %xmm0, 0x4(%rsp)
pushq $-0x7f
popq %rbx
pushq $0x7f
popq %r14
xorl %r12d, %r12d
cmpq %r12, %r13
je 0x2e1d14
movss (%r15,%r12,4), %xmm0
mulss 0x4(%rsp), %xmm0
callq 0x5f2d0
cvttss2si %xmm0, %eax
cmpl $-0x7e, %eax
jge 0x2e1a6e
movl %ebx, %eax
cmpl $0x7f, %eax
jl 0x2e1a76
movl %r14d, %eax
movb %al, (%rbp,%r12)
incq %r12
jmp 0x2e1a49
movq 0x10(%rsp), %rax
movl 0x2c(%rax), %r13d
movl 0x30(%rax), %edx
movq 0x8(%rcx), %r8
pushq $0x1
popq %rcx
movq %rbx, %rdi
movl %r13d, %esi
movq %rdx, %r15
callq 0x636fa
pushq $-0x64
popq %rax
movq %rax, 0x8(%rsp)
cmpq $0x0, (%rbx)
je 0x2e1d14
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2e1d14
xorl %ecx, %ecx
testl %r13d, %r13d
cmovlel %ecx, %r13d
testl %r15d, %r15d
movl $0x0, %eax
movq %rax, 0x8(%rsp)
cmovlel %ecx, %r15d
movq %r15, 0x18(%rsp)
pushq $-0x7f
popq %rbx
pushq $0x7f
popq %rbp
xorl %r10d, %r10d
cmpq 0x18(%rsp), %r10
je 0x2e1d14
movq 0x10(%rsp), %r8
movslq 0x2c(%r8), %rax
movq 0x30(%rsp), %r9
movslq 0x2c(%r9), %rcx
cmpl $0x1, 0xd0(%r14)
movq 0xd8(%r14), %rdx
movl %r10d, %esi
movl $0x0, %edi
cmoveq %rdi, %rsi
movss (%rdx,%rsi,4), %xmm0
movss %xmm0, 0x4(%rsp)
movq 0x10(%r9), %r15
imulq %r10, %r15
imulq %rcx, %r15
addq (%r9), %r15
movq 0x10(%r8), %r12
movq %r10, 0x20(%rsp)
imulq %r10, %r12
imulq %rax, %r12
addq (%r8), %r12
xorl %r14d, %r14d
cmpq %r14, %r13
je 0x2e1b87
movss (%r12,%r14,4), %xmm0
mulss 0x4(%rsp), %xmm0
callq 0x5f2d0
cvttss2si %xmm0, %eax
cmpl $-0x7e, %eax
jge 0x2e1b77
movl %ebx, %eax
cmpl $0x7f, %eax
jl 0x2e1b7e
movl %ebp, %eax
movb %al, (%r15,%r14)
incq %r14
jmp 0x2e1b56
movq 0x20(%rsp), %r10
incq %r10
movq 0x28(%rsp), %r14
jmp 0x2e1aed
movq 0x10(%rsp), %rax
movl 0x2c(%rax), %ebp
movl 0x30(%rax), %r13d
movl 0x38(%rax), %eax
movq 0x8(%rcx), %r9
pushq $0x1
popq %r8
movq %rbx, %rdi
movl %ebp, %esi
movl %r13d, %edx
movq %rax, %r14
movl %eax, %ecx
callq 0x63810
pushq $-0x64
popq %rax
movq %rax, 0x8(%rsp)
cmpq $0x0, (%rbx)
je 0x2e1d14
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x2e1d14
imull %ebp, %r13d
xorl %ecx, %ecx
testl %r13d, %r13d
cmovlel %ecx, %r13d
testl %r14d, %r14d
movl $0x0, %eax
movq %rax, 0x8(%rsp)
cmovlel %ecx, %r14d
movq %r14, 0x18(%rsp)
pushq $-0x7f
popq %rbx
pushq $0x7f
popq %r14
xorl %r8d, %r8d
cmpq 0x18(%rsp), %r8
je 0x2e1d14
movq 0x10(%rsp), %rsi
movq 0x40(%rsi), %r15
movq 0x30(%rsp), %rdi
movq 0x40(%rdi), %r12
movq 0x28(%rsp), %rax
cmpl $0x1, 0xd0(%rax)
movq 0xd8(%rax), %rax
movl %r8d, %ecx
movl $0x0, %edx
cmoveq %rdx, %rcx
movss (%rax,%rcx,4), %xmm0
movss %xmm0, 0x4(%rsp)
imulq 0x10(%rdi), %r12
imulq %r8, %r12
addq (%rdi), %r12
imulq 0x10(%rsi), %r15
movq %r8, 0x20(%rsp)
imulq %r8, %r15
addq (%rsi), %r15
xorl %ebp, %ebp
cmpq %rbp, %r13
je 0x2e1cab
movss (%r15,%rbp,4), %xmm0
mulss 0x4(%rsp), %xmm0
callq 0x5f2d0
cvttss2si %xmm0, %eax
cmpl $-0x7e, %eax
jge 0x2e1c9a
movl %ebx, %eax
cmpl $0x7f, %eax
jl 0x2e1ca2
movl %r14d, %eax
movb %al, (%r12,%rbp)
incq %rbp
jmp 0x2e1c79
movq 0x20(%rsp), %r8
incq %r8
jmp 0x2e1c13
xorl %ecx, %ecx
testl %r13d, %r13d
movl $0x0, %eax
movq %rax, 0x8(%rsp)
cmovlel %ecx, %r13d
pushq $-0x7f
popq %rbx
pushq $0x7f
popq %r14
xorl %r12d, %r12d
cmpq %r12, %r13
je 0x2e1d14
movq 0x28(%rsp), %rax
movq 0xd8(%rax), %rax
movss (%rax,%r12,4), %xmm0
mulss (%r15,%r12,4), %xmm0
callq 0x5f2d0
cvttss2si %xmm0, %eax
cmpl $-0x7e, %eax
jge 0x2e1d02
movl %ebx, %eax
cmpl $0x7f, %eax
jl 0x2e1d0a
movl %r14d, %eax
movb %al, (%rbp,%r12)
incq %r12
jmp 0x2e1cd5
movq 0x8(%rsp), %rax
addq $0x38, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
| /csukuangfj[P]ncnn/src/layer/quantize.cpp |
ncnn::Dequantize::load_model(ncnn::ModelBin const&) | int Dequantize::load_model(const ModelBin& mb)
{
scale_data = mb.load(scale_data_size, 1);
if (scale_data.empty())
return -100;
if (bias_data_size)
{
bias_data = mb.load(bias_data_size, 1);
if (bias_data.empty())
return -100;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x50, %rsp
movq %rsi, %r15
movq %rdi, %rbx
movl 0xd0(%rdi), %edx
movq (%rsi), %rax
leaq 0x8(%rsp), %r14
pushq $0x1
popq %rcx
movq %r14, %rdi
callq *0x10(%rax)
leaq 0xd8(%rbx), %r12
movq 0x8(%r14), %rax
cmpq %r14, %r12
je 0x2e8ef1
testq %rax, %rax
je 0x2e8e61
lock
incl (%rax)
movq 0xe0(%rbx), %rax
testq %rax, %rax
je 0x2e8e95
lock
decl (%rax)
jne 0x2e8e95
movq 0xd8(%rbx), %rsi
movq 0xf8(%rbx), %rdi
testq %rdi, %rdi
je 0x2e8e8d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2e8e95
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rax
movq %rax, 0xd8(%rbx)
movq 0x10(%rsp), %rax
movq %rax, 0xe0(%rbx)
movq 0x18(%rsp), %rcx
movq %rcx, 0xe8(%rbx)
movl 0x20(%rsp), %ecx
movl %ecx, 0xf0(%rbx)
movq 0x28(%rsp), %rcx
movq %rcx, 0xf8(%rbx)
movups 0x30(%rsp), %xmm0
movups %xmm0, 0x100(%rbx)
movl 0x40(%rsp), %ecx
movl %ecx, 0x110(%rbx)
movq 0x48(%rsp), %rcx
movq %rcx, 0x118(%rbx)
testq %rax, %rax
je 0x2e8f1a
lock
decl (%rax)
jne 0x2e8f1a
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x2e8f12
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2e8f1a
movq %rsi, %rdi
callq 0x5f3e0
pushq $-0x64
popq %rbp
cmpq $0x0, (%r12)
je 0x2e904f
movslq 0x110(%rbx), %rax
imulq 0x118(%rbx), %rax
testq %rax, %rax
je 0x2e904f
movl 0xd4(%rbx), %edx
testl %edx, %edx
je 0x2e904d
movq (%r15), %rax
pushq $0x1
popq %rcx
movq %r14, %rdi
movq %r15, %rsi
callq *0x10(%rax)
leaq 0x120(%rbx), %r15
movq 0x10(%rsp), %rax
cmpq %r14, %r15
je 0x2e900a
testq %rax, %rax
je 0x2e8f7a
lock
incl (%rax)
movq 0x128(%rbx), %rax
testq %rax, %rax
je 0x2e8fae
lock
decl (%rax)
jne 0x2e8fae
movq 0x120(%rbx), %rsi
movq 0x140(%rbx), %rdi
testq %rdi, %rdi
je 0x2e8fa6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2e8fae
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rax
movq %rax, 0x120(%rbx)
movq 0x10(%rsp), %rax
movq %rax, 0x128(%rbx)
movq 0x18(%rsp), %rcx
movq %rcx, 0x130(%rbx)
movl 0x20(%rsp), %ecx
movl %ecx, 0x138(%rbx)
movq 0x28(%rsp), %rcx
movq %rcx, 0x140(%rbx)
movups 0x30(%rsp), %xmm0
movups %xmm0, 0x148(%rbx)
movl 0x40(%rsp), %ecx
movl %ecx, 0x158(%rbx)
movq 0x48(%rsp), %rcx
movq %rcx, 0x160(%rbx)
testq %rax, %rax
je 0x2e9033
lock
decl (%rax)
jne 0x2e9033
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x2e902b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2e9033
movq %rsi, %rdi
callq 0x5f3e0
cmpq $0x0, (%r15)
je 0x2e904f
movslq 0x158(%rbx), %rax
imulq 0x160(%rbx), %rax
testq %rax, %rax
je 0x2e904f
xorl %ebp, %ebp
movl %ebp, %eax
addq $0x50, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x2e90bc
lock
decl (%rax)
jne 0x2e90bc
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x2e90ac
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2e90bc
jmp 0x2e90c6
jmp 0x2e90c6
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x2e90bc
lock
decl (%rax)
jne 0x2e90bc
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
jne 0x2e90b6
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x2e90bc
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x2e90c6
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/dequantize.cpp |
ncnn::Dequantize::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int Dequantize::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int dims = bottom_blob.dims;
if (dims == 1)
{
int w = bottom_blob.w;
top_blob.create(w, (size_t)4u, opt.blob_allocator);
if (top_blob.empty())
return -100;
const int* intptr = bottom_blob;
float* ptr = top_blob;
if (scale_data_size == 1)
{
const float scale = scale_data[0];
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < w; i++)
{
ptr[i] = intptr[i] * scale;
}
}
else if (bias_data_size == 1)
{
const float bias = bias_data[0];
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < w; i++)
{
ptr[i] = intptr[i] * scale + bias;
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < w; i++)
{
ptr[i] = intptr[i] * scale + bias_data[i];
}
}
}
else
{
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < w; i++)
{
ptr[i] = intptr[i] * scale_data[i];
}
}
else if (bias_data_size == 1)
{
const float bias = bias_data[0];
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < w; i++)
{
ptr[i] = intptr[i] * scale_data[i] + bias;
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < w; i++)
{
ptr[i] = intptr[i] * scale_data[i] + bias_data[i];
}
}
}
}
if (dims == 2)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
top_blob.create(w, h, (size_t)4u, opt.blob_allocator);
if (top_blob.empty())
return -100;
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; i++)
{
const int* intptr = bottom_blob.row<const int>(i);
float* ptr = top_blob.row(i);
const float scale = scale_data_size == 1 ? scale_data[0] : scale_data[i];
for (int j = 0; j < w; j++)
{
ptr[j] = intptr[j] * scale;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; i++)
{
const int* intptr = bottom_blob.row<const int>(i);
float* ptr = top_blob.row(i);
const float scale = scale_data_size == 1 ? scale_data[0] : scale_data[i];
const float bias = bias_data_size == 1 ? bias_data[0] : bias_data[i];
for (int j = 0; j < w; j++)
{
ptr[j] = intptr[j] * scale + bias;
}
}
}
}
if (dims == 3)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int size = w * h;
top_blob.create(w, h, channels, (size_t)4u, opt.blob_allocator);
if (top_blob.empty())
return -100;
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
float* ptr = top_blob.channel(q);
const float scale = scale_data_size == 1 ? scale_data[0] : scale_data[q];
for (int i = 0; i < size; i++)
{
ptr[i] = intptr[i] * scale;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
float* ptr = top_blob.channel(q);
const float scale = scale_data_size == 1 ? scale_data[0] : scale_data[q];
const float bias = bias_data_size == 1 ? bias_data[0] : bias_data[q];
for (int i = 0; i < size; i++)
{
ptr[i] = intptr[i] * scale + bias;
}
}
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq %rdx, %r13
movq %rsi, %r12
movq %rdi, %rbx
movl 0x28(%rsi), %edx
cmpl $0x3, %edx
je 0x2e927d
cmpl $0x2, %edx
je 0x2e9195
xorl %eax, %eax
cmpl $0x1, %edx
jne 0x2e9547
movl 0x2c(%r12), %r14d
movq 0x8(%rcx), %rcx
pushq $0x4
popq %rdx
movq %r13, %rdi
movl %r14d, %esi
callq 0x635fa
movq (%r13), %rcx
pushq $-0x64
popq %rax
testq %rcx, %rcx
je 0x2e9547
movslq 0x38(%r13), %rdx
imulq 0x40(%r13), %rdx
testq %rdx, %rdx
je 0x2e9547
movq (%r12), %rdx
cmpl $0x1, 0xd0(%rbx)
jne 0x2e9370
movq 0xd8(%rbx), %rax
movss (%rax), %xmm0
movl 0xd4(%rbx), %eax
cmpl $0x1, %eax
je 0x2e9468
testl %eax, %eax
jne 0x2e94dc
xorl %eax, %eax
testl %r14d, %r14d
cmovlel %eax, %r14d
xorl %esi, %esi
cmpq %rsi, %r14
je 0x2e9547
cvtsi2ssl (%rdx,%rsi,4), %xmm1
mulss %xmm0, %xmm1
movss %xmm1, (%rcx,%rsi,4)
incq %rsi
jmp 0x2e9179
movl 0x2c(%r12), %r14d
movl 0x30(%r12), %r15d
movq 0x8(%rcx), %r8
pushq $0x4
popq %rcx
movq %r13, %rdi
movl %r14d, %esi
movl %r15d, %edx
callq 0x636fa
movq (%r13), %rcx
pushq $-0x64
popq %rax
testq %rcx, %rcx
je 0x2e9547
movslq 0x38(%r13), %rdx
imulq 0x40(%r13), %rdx
testq %rdx, %rdx
je 0x2e9547
movl 0xd0(%rbx), %edx
movl 0xd4(%rbx), %r9d
movslq 0x2c(%r12), %rsi
movq (%r12), %rdi
imulq 0x10(%r12), %rsi
movslq 0x2c(%r13), %r8
imulq 0x10(%r13), %r8
xorl %eax, %eax
testl %r9d, %r9d
je 0x2e93b6
testl %r14d, %r14d
cmovlel %eax, %r14d
testl %r15d, %r15d
cmovlel %eax, %r15d
xorl %r10d, %r10d
cmpq %r15, %r10
je 0x2e9547
cmpl $0x1, %edx
movl %r10d, %r11d
movq %r11, %r12
cmoveq %rax, %r12
cmpl $0x1, %r9d
movq 0xd8(%rbx), %r13
movq 0x120(%rbx), %rbp
movss (%r13,%r12,4), %xmm0
cmoveq %rax, %r11
movss (%rbp,%r11,4), %xmm1
xorl %r11d, %r11d
cmpq %r11, %r14
je 0x2e9272
cvtsi2ssl (%rdi,%r11,4), %xmm2
mulss %xmm0, %xmm2
addss %xmm1, %xmm2
movss %xmm2, (%rcx,%r11,4)
incq %r11
jmp 0x2e9254
incq %r10
addq %r8, %rcx
addq %rsi, %rdi
jmp 0x2e9217
movl 0x2c(%r12), %ebp
movl 0x30(%r12), %r14d
movl 0x38(%r12), %r15d
movq 0x8(%rcx), %r9
pushq $0x4
popq %r8
movq %r13, %rdi
movl %ebp, %esi
movl %r14d, %edx
movl %r15d, %ecx
callq 0x63810
movq (%r13), %rcx
pushq $-0x64
popq %rax
testq %rcx, %rcx
je 0x2e9547
movq 0x40(%r13), %rdx
movslq 0x38(%r13), %rsi
imulq %rdx, %rsi
testq %rsi, %rsi
je 0x2e9547
imull %ebp, %r14d
movl 0xd0(%rbx), %esi
movl 0xd4(%rbx), %r9d
movq 0x10(%r12), %rdi
imulq 0x40(%r12), %rdi
movq (%r12), %r8
imulq 0x10(%r13), %rdx
xorl %eax, %eax
testl %r9d, %r9d
je 0x2e940f
testl %r14d, %r14d
cmovlel %eax, %r14d
testl %r15d, %r15d
cmovlel %eax, %r15d
xorl %r10d, %r10d
cmpq %r15, %r10
je 0x2e9547
cmpl $0x1, %esi
movl %r10d, %r11d
movq %r11, %r12
cmoveq %rax, %r12
cmpl $0x1, %r9d
movq 0xd8(%rbx), %r13
movq 0x120(%rbx), %rbp
movss (%r13,%r12,4), %xmm0
cmoveq %rax, %r11
movss (%rbp,%r11,4), %xmm1
xorl %r11d, %r11d
cmpq %r11, %r14
je 0x2e9365
cvtsi2ssl (%r8,%r11,4), %xmm2
mulss %xmm0, %xmm2
addss %xmm1, %xmm2
movss %xmm2, (%rcx,%r11,4)
incq %r11
jmp 0x2e9347
incq %r10
addq %rdx, %rcx
addq %rdi, %r8
jmp 0x2e930a
movl 0xd4(%rbx), %eax
cmpl $0x1, %eax
je 0x2e949e
testl %eax, %eax
jne 0x2e950b
movq 0xd8(%rbx), %rsi
xorl %eax, %eax
testl %r14d, %r14d
cmovlel %eax, %r14d
xorl %edi, %edi
cmpq %rdi, %r14
je 0x2e9547
cvtsi2ssl (%rdx,%rdi,4), %xmm0
mulss (%rsi,%rdi,4), %xmm0
movss %xmm0, (%rcx,%rdi,4)
incq %rdi
jmp 0x2e9399
testl %r14d, %r14d
cmovlel %eax, %r14d
testl %r15d, %r15d
cmovlel %eax, %r15d
xorl %r9d, %r9d
cmpq %r15, %r9
je 0x2e9547
cmpl $0x1, %edx
movq 0xd8(%rbx), %r10
movl %r9d, %r11d
cmoveq %rax, %r11
movss (%r10,%r11,4), %xmm0
xorl %r10d, %r10d
cmpq %r10, %r14
je 0x2e9404
cvtsi2ssl (%rdi,%r10,4), %xmm1
mulss %xmm0, %xmm1
movss %xmm1, (%rcx,%r10,4)
incq %r10
jmp 0x2e93ea
incq %r9
addq %r8, %rcx
addq %rsi, %rdi
jmp 0x2e93c7
testl %r14d, %r14d
cmovlel %eax, %r14d
testl %r15d, %r15d
cmovlel %eax, %r15d
xorl %r9d, %r9d
cmpq %r15, %r9
je 0x2e9547
cmpl $0x1, %esi
movq 0xd8(%rbx), %r10
movl %r9d, %r11d
cmoveq %rax, %r11
movss (%r10,%r11,4), %xmm0
xorl %r10d, %r10d
cmpq %r10, %r14
je 0x2e945d
cvtsi2ssl (%r8,%r10,4), %xmm1
mulss %xmm0, %xmm1
movss %xmm1, (%rcx,%r10,4)
incq %r10
jmp 0x2e9443
incq %r9
addq %rdx, %rcx
addq %rdi, %r8
jmp 0x2e9420
movq 0x120(%rbx), %rax
movss (%rax), %xmm1
xorl %eax, %eax
testl %r14d, %r14d
cmovlel %eax, %r14d
xorl %esi, %esi
cmpq %rsi, %r14
je 0x2e9547
cvtsi2ssl (%rdx,%rsi,4), %xmm2
mulss %xmm0, %xmm2
addss %xmm1, %xmm2
movss %xmm2, (%rcx,%rsi,4)
incq %rsi
jmp 0x2e947e
movq 0xd8(%rbx), %rsi
movq 0x120(%rbx), %rax
movss (%rax), %xmm0
xorl %eax, %eax
testl %r14d, %r14d
cmovlel %eax, %r14d
xorl %edi, %edi
cmpq %rdi, %r14
je 0x2e9547
cvtsi2ssl (%rdx,%rdi,4), %xmm1
mulss (%rsi,%rdi,4), %xmm1
addss %xmm0, %xmm1
movss %xmm1, (%rcx,%rdi,4)
incq %rdi
jmp 0x2e94bb
movq 0x120(%rbx), %rsi
xorl %eax, %eax
testl %r14d, %r14d
cmovlel %eax, %r14d
xorl %edi, %edi
cmpq %rdi, %r14
je 0x2e9547
cvtsi2ssl (%rdx,%rdi,4), %xmm1
mulss %xmm0, %xmm1
addss (%rsi,%rdi,4), %xmm1
movss %xmm1, (%rcx,%rdi,4)
incq %rdi
jmp 0x2e94ee
movq 0xd8(%rbx), %rsi
movq 0x120(%rbx), %rdi
xorl %eax, %eax
testl %r14d, %r14d
cmovlel %eax, %r14d
xorl %r8d, %r8d
cmpq %r8, %r14
je 0x2e9547
cvtsi2ssl (%rdx,%r8,4), %xmm0
mulss (%rsi,%r8,4), %xmm0
addss (%rdi,%r8,4), %xmm0
movss %xmm0, (%rcx,%r8,4)
incq %r8
jmp 0x2e9525
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
| /csukuangfj[P]ncnn/src/layer/dequantize.cpp |
ncnn::Yolov3DetectionOutput::~Yolov3DetectionOutput() | Yolov3DetectionOutput::~Yolov3DetectionOutput()
{
//delete softmax;
} | pushq %rbx
movq %rdi, %rbx
leaq 0x19ba15(%rip), %rax # 0x48a0a0
movq %rax, (%rdi)
movq 0x178(%rdi), %rax
testq %rax, %rax
je 0x2ee6c2
lock
decl (%rax)
jne 0x2ee6c2
movq 0x170(%rbx), %rsi
movq 0x190(%rbx), %rdi
testq %rdi, %rdi
je 0x2ee6ba
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2ee6c2
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x1b0(%rbx)
leaq 0x170(%rbx), %rax
xorps %xmm0, %xmm0
movups %xmm0, 0xc(%rax)
movups %xmm0, (%rax)
andl $0x0, 0x1a8(%rbx)
movups %xmm0, 0x198(%rbx)
movq 0x130(%rbx), %rax
testq %rax, %rax
je 0x2ee723
lock
decl (%rax)
jne 0x2ee723
movq 0x128(%rbx), %rsi
movq 0x148(%rbx), %rdi
testq %rdi, %rdi
je 0x2ee718
movq (%rdi), %rax
callq *0x18(%rax)
xorps %xmm0, %xmm0
jmp 0x2ee723
movq %rsi, %rdi
callq 0x5f3e0
xorps %xmm0, %xmm0
leaq 0x128(%rbx), %rax
andq $0x0, 0x168(%rbx)
movups %xmm0, 0xc(%rax)
movups %xmm0, (%rax)
andl $0x0, 0x160(%rbx)
movups %xmm0, 0x150(%rbx)
movq 0xe8(%rbx), %rax
testq %rax, %rax
je 0x2ee77b
lock
decl (%rax)
jne 0x2ee77b
movq 0xe0(%rbx), %rsi
movq 0x100(%rbx), %rdi
testq %rdi, %rdi
je 0x2ee773
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x2ee77b
movq %rsi, %rdi
callq 0x5f3e0
leaq 0xe0(%rbx), %rax
andq $0x0, 0x120(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, 0xc(%rax)
movups %xmm0, (%rax)
movups %xmm0, 0x108(%rbx)
andl $0x0, 0x118(%rbx)
movq %rbx, %rdi
popq %rbx
jmp 0x7833c
jmp 0x2ee7af
jmp 0x2ee7af
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/yolov3detectionoutput.cpp |
virtual thunk to ncnn::Swish_x86::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int Swish_x86::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int d = bottom_top_blob.d;
int channels = bottom_top_blob.c;
int elempack = bottom_top_blob.elempack;
int size = w * h * d * elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _one_avx512 = _mm512_set1_ps(1.f);
__m512 _zero_avx512 = _mm512_setzero_ps();
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
_p = _mm512_div_ps(_p, _mm512_add_ps(_one_avx512, exp512_ps(_mm512_sub_ps(_zero_avx512, _p))));
_mm512_storeu_ps(ptr, _p);
ptr += 16;
}
#endif // __AVX512F__
__m256 _one_avx = _mm256_set1_ps(1.f);
__m256 _zero_avx = _mm256_setzero_ps();
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_div_ps(_p, _mm256_add_ps(_one_avx, exp256_ps(_mm256_sub_ps(_zero_avx, _p))));
_mm256_storeu_ps(ptr, _p);
ptr += 8;
}
#endif // __AVX__
__m128 _one = _mm_set1_ps(1.f);
__m128 _zero = _mm_setzero_ps();
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_load_ps(ptr);
_p = _mm_div_ps(_p, _mm_add_ps(_one, exp_ps(_mm_sub_ps(_zero, _p))));
_mm_store_ps(ptr, _p);
ptr += 4;
}
#endif // __SSE2__
for (; i < size; i++)
{
*ptr = *ptr / (1.f + expf(-*ptr));
ptr++;
}
}
return 0;
} | pushq %rax
callq 0x34fb08
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/src/layer/x86/swish_x86.cpp |
ncnn::transpose_pack_A_tile(ncnn::Mat const&, ncnn::Mat&, int, int, int, int) | static void transpose_pack_A_tile(const Mat& A, Mat& AT, int i, int max_ii, int k, int max_kk)
{
const int elempack = A.elempack;
const int A_hstep = A.dims == 3 ? (int)A.cstep : A.w;
float* pp = AT;
int ii = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
for (; ii + 15 < max_ii; ii += 16)
{
if (elempack == 16)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii) * 16;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
__m512 _r0 = _mm512_load_ps(p0);
__m512 _r1 = _mm512_load_ps(p0 + 16 * 1);
__m512 _r2 = _mm512_load_ps(p0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(p0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(p0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(p0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(p0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(p0 + 16 * 7);
__m512 _r8 = _mm512_load_ps(p0 + 16 * 8);
__m512 _r9 = _mm512_load_ps(p0 + 16 * 9);
__m512 _ra = _mm512_load_ps(p0 + 16 * 10);
__m512 _rb = _mm512_load_ps(p0 + 16 * 11);
__m512 _rc = _mm512_load_ps(p0 + 16 * 12);
__m512 _rd = _mm512_load_ps(p0 + 16 * 13);
__m512 _re = _mm512_load_ps(p0 + 16 * 14);
__m512 _rf = _mm512_load_ps(p0 + 16 * 15);
transpose16x16_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _ra, _rb, _rc, _rd, _re, _rf);
_mm512_store_ps(pp, _r0);
_mm512_store_ps(pp + 16 * 1, _r1);
_mm512_store_ps(pp + 16 * 2, _r2);
_mm512_store_ps(pp + 16 * 3, _r3);
_mm512_store_ps(pp + 16 * 4, _r4);
_mm512_store_ps(pp + 16 * 5, _r5);
_mm512_store_ps(pp + 16 * 6, _r6);
_mm512_store_ps(pp + 16 * 7, _r7);
_mm512_store_ps(pp + 16 * 8, _r8);
_mm512_store_ps(pp + 16 * 9, _r9);
_mm512_store_ps(pp + 16 * 10, _ra);
_mm512_store_ps(pp + 16 * 11, _rb);
_mm512_store_ps(pp + 16 * 12, _rc);
_mm512_store_ps(pp + 16 * 13, _rd);
_mm512_store_ps(pp + 16 * 14, _re);
_mm512_store_ps(pp + 16 * 15, _rf);
pp += 256;
p0 += A_hstep * 16;
}
}
if (elempack == 8)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii) * 8;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
__m256 _r0 = _mm256_load_ps(p0);
__m256 _r1 = _mm256_load_ps(p0 + 8 * 1);
__m256 _r2 = _mm256_load_ps(p0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(p0 + 8 * 3);
__m256 _r4 = _mm256_load_ps(p0 + 8 * 4);
__m256 _r5 = _mm256_load_ps(p0 + 8 * 5);
__m256 _r6 = _mm256_load_ps(p0 + 8 * 6);
__m256 _r7 = _mm256_load_ps(p0 + 8 * 7);
__m256 _r8 = _mm256_load_ps(p0 + 8 * 8);
__m256 _r9 = _mm256_load_ps(p0 + 8 * 9);
__m256 _ra = _mm256_load_ps(p0 + 8 * 10);
__m256 _rb = _mm256_load_ps(p0 + 8 * 11);
__m256 _rc = _mm256_load_ps(p0 + 8 * 12);
__m256 _rd = _mm256_load_ps(p0 + 8 * 13);
__m256 _re = _mm256_load_ps(p0 + 8 * 14);
__m256 _rf = _mm256_load_ps(p0 + 8 * 15);
transpose8x16_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _ra, _rb, _rc, _rd, _re, _rf);
__m512 _rr0 = _mm512_insertf32x8(_mm512_castps256_ps512(_r0), _r1, 1);
__m512 _rr1 = _mm512_insertf32x8(_mm512_castps256_ps512(_r2), _r3, 1);
__m512 _rr2 = _mm512_insertf32x8(_mm512_castps256_ps512(_r4), _r5, 1);
__m512 _rr3 = _mm512_insertf32x8(_mm512_castps256_ps512(_r6), _r7, 1);
__m512 _rr4 = _mm512_insertf32x8(_mm512_castps256_ps512(_r8), _r9, 1);
__m512 _rr5 = _mm512_insertf32x8(_mm512_castps256_ps512(_ra), _rb, 1);
__m512 _rr6 = _mm512_insertf32x8(_mm512_castps256_ps512(_rc), _rd, 1);
__m512 _rr7 = _mm512_insertf32x8(_mm512_castps256_ps512(_re), _rf, 1);
_mm512_store_ps(pp, _rr0);
_mm512_store_ps(pp + 16 * 1, _rr1);
_mm512_store_ps(pp + 16 * 2, _rr2);
_mm512_store_ps(pp + 16 * 3, _rr3);
_mm512_store_ps(pp + 16 * 4, _rr4);
_mm512_store_ps(pp + 16 * 5, _rr5);
_mm512_store_ps(pp + 16 * 6, _rr6);
_mm512_store_ps(pp + 16 * 7, _rr7);
pp += 128;
p0 += A_hstep * 8;
}
}
if (elempack == 4)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii) * 4;
int kk = 0;
for (; kk + 3 < max_kk; kk += 4)
{
__m128 _r0 = _mm_load_ps(p0);
__m128 _r1 = _mm_load_ps(p0 + 4 * 1);
__m128 _r2 = _mm_load_ps(p0 + 4 * 2);
__m128 _r3 = _mm_load_ps(p0 + 4 * 3);
__m128 _r4 = _mm_load_ps(p0 + 4 * 4);
__m128 _r5 = _mm_load_ps(p0 + 4 * 5);
__m128 _r6 = _mm_load_ps(p0 + 4 * 6);
__m128 _r7 = _mm_load_ps(p0 + 4 * 7);
__m128 _r8 = _mm_load_ps(p0 + 4 * 8);
__m128 _r9 = _mm_load_ps(p0 + 4 * 9);
__m128 _ra = _mm_load_ps(p0 + 4 * 10);
__m128 _rb = _mm_load_ps(p0 + 4 * 11);
__m128 _rc = _mm_load_ps(p0 + 4 * 12);
__m128 _rd = _mm_load_ps(p0 + 4 * 13);
__m128 _re = _mm_load_ps(p0 + 4 * 14);
__m128 _rf = _mm_load_ps(p0 + 4 * 15);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb);
_MM_TRANSPOSE4_PS(_rc, _rd, _re, _rf);
_mm_store_ps(pp, _r0);
_mm_store_ps(pp + 4 * 1, _r4);
_mm_store_ps(pp + 4 * 2, _r8);
_mm_store_ps(pp + 4 * 3, _rc);
_mm_store_ps(pp + 4 * 4, _r1);
_mm_store_ps(pp + 4 * 5, _r5);
_mm_store_ps(pp + 4 * 6, _r9);
_mm_store_ps(pp + 4 * 7, _rd);
_mm_store_ps(pp + 4 * 8, _r2);
_mm_store_ps(pp + 4 * 9, _r6);
_mm_store_ps(pp + 4 * 10, _ra);
_mm_store_ps(pp + 4 * 11, _re);
_mm_store_ps(pp + 4 * 12, _r3);
_mm_store_ps(pp + 4 * 13, _r7);
_mm_store_ps(pp + 4 * 14, _rb);
_mm_store_ps(pp + 4 * 15, _rf);
pp += 64;
p0 += A_hstep * 4;
}
}
if (elempack == 1)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii);
int kk = 0;
for (; kk < max_kk; kk++)
{
_mm512_store_ps(pp, _mm512_loadu_ps(p0));
pp += 16;
p0 += A_hstep;
}
}
}
#endif // __AVX512F__
for (; ii + 7 < max_ii; ii += 8)
{
#if __AVX512F__
if (elempack == 16)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii) * 16;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
__m512 _r0 = _mm512_load_ps(p0);
__m512 _r1 = _mm512_load_ps(p0 + 16 * 1);
__m512 _r2 = _mm512_load_ps(p0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(p0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(p0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(p0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(p0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(p0 + 16 * 7);
transpose16x8_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7);
_mm512_store_ps(pp, _r0);
_mm512_store_ps(pp + 16 * 1, _r1);
_mm512_store_ps(pp + 16 * 2, _r2);
_mm512_store_ps(pp + 16 * 3, _r3);
_mm512_store_ps(pp + 16 * 4, _r4);
_mm512_store_ps(pp + 16 * 5, _r5);
_mm512_store_ps(pp + 16 * 6, _r6);
_mm512_store_ps(pp + 16 * 7, _r7);
pp += 128;
p0 += A_hstep * 16;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii) * 8;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
__m256 _r0 = _mm256_load_ps(p0);
__m256 _r1 = _mm256_load_ps(p0 + 8 * 1);
__m256 _r2 = _mm256_load_ps(p0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(p0 + 8 * 3);
__m256 _r4 = _mm256_load_ps(p0 + 8 * 4);
__m256 _r5 = _mm256_load_ps(p0 + 8 * 5);
__m256 _r6 = _mm256_load_ps(p0 + 8 * 6);
__m256 _r7 = _mm256_load_ps(p0 + 8 * 7);
transpose8x8_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7);
_mm256_store_ps(pp, _r0);
_mm256_store_ps(pp + 8 * 1, _r1);
_mm256_store_ps(pp + 8 * 2, _r2);
_mm256_store_ps(pp + 8 * 3, _r3);
_mm256_store_ps(pp + 8 * 4, _r4);
_mm256_store_ps(pp + 8 * 5, _r5);
_mm256_store_ps(pp + 8 * 6, _r6);
_mm256_store_ps(pp + 8 * 7, _r7);
pp += 64;
p0 += A_hstep * 8;
}
}
if (elempack == 4)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii) * 4;
int kk = 0;
for (; kk + 3 < max_kk; kk += 4)
{
__m128 _r0 = _mm_load_ps(p0);
__m128 _r1 = _mm_load_ps(p0 + 4 * 1);
__m128 _r2 = _mm_load_ps(p0 + 4 * 2);
__m128 _r3 = _mm_load_ps(p0 + 4 * 3);
__m128 _r4 = _mm_load_ps(p0 + 4 * 4);
__m128 _r5 = _mm_load_ps(p0 + 4 * 5);
__m128 _r6 = _mm_load_ps(p0 + 4 * 6);
__m128 _r7 = _mm_load_ps(p0 + 4 * 7);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_mm_store_ps(pp, _r0);
_mm_store_ps(pp + 4 * 1, _r4);
_mm_store_ps(pp + 4 * 2, _r1);
_mm_store_ps(pp + 4 * 3, _r5);
_mm_store_ps(pp + 4 * 4, _r2);
_mm_store_ps(pp + 4 * 5, _r6);
_mm_store_ps(pp + 4 * 6, _r3);
_mm_store_ps(pp + 4 * 7, _r7);
pp += 32;
p0 += A_hstep * 4;
}
}
if (elempack == 1)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii);
int kk = 0;
for (; kk < max_kk; kk++)
{
_mm256_store_ps(pp, _mm256_loadu_ps(p0));
pp += 8;
p0 += A_hstep;
}
}
}
#endif // __AVX__
for (; ii + 3 < max_ii; ii += 4)
{
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii) * 16;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
__m512 _r0 = _mm512_load_ps(p0);
__m512 _r1 = _mm512_load_ps(p0 + 16 * 1);
__m512 _r2 = _mm512_load_ps(p0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(p0 + 16 * 3);
transpose16x4_ps(_r0, _r1, _r2, _r3);
_mm512_store_ps(pp, _r0);
_mm512_store_ps(pp + 16 * 1, _r1);
_mm512_store_ps(pp + 16 * 2, _r2);
_mm512_store_ps(pp + 16 * 3, _r3);
pp += 64;
p0 += A_hstep * 16;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii) * 8;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
__m256 _r0 = _mm256_load_ps(p0);
__m256 _r1 = _mm256_load_ps(p0 + 8 * 1);
__m256 _r2 = _mm256_load_ps(p0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(p0 + 8 * 3);
transpose8x4_ps(_r0, _r1, _r2, _r3);
_mm256_store_ps(pp, _r0);
_mm256_store_ps(pp + 8 * 1, _r1);
_mm256_store_ps(pp + 8 * 2, _r2);
_mm256_store_ps(pp + 8 * 3, _r3);
pp += 32;
p0 += A_hstep * 8;
}
}
#endif // __AVX__
if (elempack == 4)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii) * 4;
int kk = 0;
for (; kk + 3 < max_kk; kk += 4)
{
__m128 _r0 = _mm_load_ps(p0);
__m128 _r1 = _mm_load_ps(p0 + 4 * 1);
__m128 _r2 = _mm_load_ps(p0 + 4 * 2);
__m128 _r3 = _mm_load_ps(p0 + 4 * 3);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_mm_store_ps(pp, _r0);
_mm_store_ps(pp + 4 * 1, _r1);
_mm_store_ps(pp + 4 * 2, _r2);
_mm_store_ps(pp + 4 * 3, _r3);
pp += 16;
p0 += A_hstep * 4;
}
}
if (elempack == 1)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii);
int kk = 0;
for (; kk < max_kk; kk++)
{
_mm_store_ps(pp, _mm_loadu_ps(p0));
pp += 4;
p0 += A_hstep;
}
}
}
#endif // __SSE2__
for (; ii + 1 < max_ii; ii += 2)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii) * 16;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
__m512 _r0 = _mm512_load_ps(p0);
__m512 _r1 = _mm512_load_ps(p0 + 16);
transpose16x2_ps(_r0, _r1);
_mm512_store_ps(pp, _r0);
_mm512_store_ps(pp + 16, _r1);
pp += 32;
p0 += A_hstep * 16;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii) * 8;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
__m256 _r0 = _mm256_load_ps(p0);
__m256 _r1 = _mm256_load_ps(p0 + 8);
transpose8x2_ps(_r0, _r1);
_mm256_store_ps(pp, _r0);
_mm256_store_ps(pp + 8, _r1);
pp += 16;
p0 += A_hstep * 8;
}
}
#endif // __AVX__
if (elempack == 4)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii) * 4;
int kk = 0;
for (; kk + 3 < max_kk; kk += 4)
{
__m128 _r0 = _mm_load_ps(p0);
__m128 _r1 = _mm_load_ps(p0 + 4);
__m128 _tmp0 = _mm_unpacklo_ps(_r0, _r1);
__m128 _tmp1 = _mm_unpackhi_ps(_r0, _r1);
_mm_store_ps(pp, _tmp0);
_mm_store_ps(pp + 4, _tmp1);
pp += 8;
p0 += A_hstep * 4;
}
}
#endif // __SSE2__
if (elempack == 1)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii);
int kk = 0;
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
pp[1] = p0[1];
pp += 2;
p0 += A_hstep;
}
}
}
for (; ii < max_ii; ii += 1)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii) * 16;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
_mm512_store_ps(pp, _mm512_load_ps(p0));
pp += 16;
p0 += A_hstep * 16;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii) * 8;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
_mm256_store_ps(pp, _mm256_load_ps(p0));
pp += 8;
p0 += A_hstep * 8;
}
}
#endif // __AVX__
if (elempack == 4)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii) * 4;
int kk = 0;
for (; kk + 3 < max_kk; kk += 4)
{
_mm_store_ps(pp, _mm_load_ps(p0));
pp += 4;
p0 += A_hstep * 4;
}
}
#endif // __SSE2__
if (elempack == 1)
{
const float* p0 = (const float*)A + k * A_hstep + (i + ii);
int kk = 0;
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
pp += 1;
p0 += A_hstep;
}
}
}
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movl %ecx, -0x38(%rsp)
movq %rdi, %rbp
leaq 0x2c(%rdi), %rax
leaq 0x40(%rdi), %rcx
cmpl $0x3, 0x28(%rdi)
cmoveq %rcx, %rax
movslq (%rax), %rax
imull %eax, %r8d
movslq %r8d, %rcx
xorl %r8d, %r8d
testl %r9d, %r9d
movl $0x0, %edi
cmovgl %r9d, %edi
movl %edi, -0x2c(%rsp)
movslq %edx, %rdi
leaq (,%rcx,4), %r10
movq %rdi, %r14
leaq (%r10,%rdi,4), %rdx
leaq (%r10,%rdi,4), %r12
addq $0x4, %r12
shlq $0x5, %rdi
leaq (%rdi,%rcx,4), %r11
shlq $0x4, %r14
leaq (%rdi,%rcx,4), %r13
addq $0x20, %r13
leaq 0x60(%rdi,%rcx,4), %r10
movq %r10, -0x18(%rsp)
leaq 0xe0(%rdi,%rcx,4), %rdi
movq %rdi, -0x8(%rsp)
leaq (%r14,%rcx,4), %rbx
leaq 0x10(%r14,%rcx,4), %r10
leaq 0x30(%r14,%rcx,4), %rdi
movq %rdi, -0x20(%rsp)
leaq 0x70(%r14,%rcx,4), %rcx
movq %rcx, -0x10(%rsp)
movq (%rsi), %rsi
leal (,%rax,8), %ecx
movslq %ecx, %r14
leal (,%rax,4), %ecx
movslq %ecx, %r15
movslq -0x38(%rsp), %rcx
movq %rcx, -0x28(%rsp)
shlq $0x2, %r14
shlq $0x2, %r15
shlq $0x2, %rax
movq %rbp, -0x38(%rsp)
movl 0x18(%rbp), %ebp
movq %r8, %rcx
orq $0x7, %rcx
cmpq -0x28(%rsp), %rcx
jge 0x3690cc
cmpl $0x8, %ebp
jne 0x368e56
movq -0x38(%rsp), %rcx
movq (%rcx), %rdi
addq -0x8(%rsp), %rdi
movl $0x7, %ecx
cmpl %r9d, %ecx
jge 0x368e56
vmovaps -0xe0(%rdi), %ymm0
vmovaps -0xc0(%rdi), %ymm1
vmovaps -0xa0(%rdi), %ymm2
vmovaps -0x80(%rdi), %ymm3
vmovaps -0x60(%rdi), %ymm4
vmovaps -0x40(%rdi), %ymm5
vmovaps -0x20(%rdi), %ymm6
vmovaps (%rdi), %ymm7
vunpcklps %ymm1, %ymm0, %ymm8 # ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklps %ymm5, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
vunpckhps %ymm5, %ymm4, %ymm4 # ymm4 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
vunpcklps %ymm7, %ymm6, %ymm5 # ymm5 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[4],ymm7[4],ymm6[5],ymm7[5]
vunpckhps %ymm7, %ymm6, %ymm6 # ymm6 = ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[6],ymm7[6],ymm6[7],ymm7[7]
vunpcklpd %ymm1, %ymm8, %ymm7 # ymm7 = ymm8[0],ymm1[0],ymm8[2],ymm1[2]
vunpckhpd %ymm1, %ymm8, %ymm1 # ymm1 = ymm8[1],ymm1[1],ymm8[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm8 # ymm8 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vunpcklpd %ymm5, %ymm3, %ymm2 # ymm2 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
vunpckhpd %ymm5, %ymm3, %ymm3 # ymm3 = ymm3[1],ymm5[1],ymm3[3],ymm5[3]
vunpcklpd %ymm6, %ymm4, %ymm5 # ymm5 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
vunpckhpd %ymm6, %ymm4, %ymm4 # ymm4 = ymm4[1],ymm6[1],ymm4[3],ymm6[3]
vinsertf128 $0x1, %xmm2, %ymm7, %ymm6
vinsertf128 $0x1, %xmm3, %ymm1, %ymm9
vinsertf128 $0x1, %xmm5, %ymm8, %ymm10
vinsertf128 $0x1, %xmm4, %ymm0, %ymm11
vperm2f128 $0x31, %ymm2, %ymm7, %ymm2 # ymm2 = ymm7[2,3],ymm2[2,3]
vperm2f128 $0x31, %ymm3, %ymm1, %ymm1 # ymm1 = ymm1[2,3],ymm3[2,3]
vperm2f128 $0x31, %ymm5, %ymm8, %ymm3 # ymm3 = ymm8[2,3],ymm5[2,3]
vperm2f128 $0x31, %ymm4, %ymm0, %ymm0 # ymm0 = ymm0[2,3],ymm4[2,3]
vmovaps %ymm6, (%rsi)
vmovaps %ymm9, 0x20(%rsi)
vmovaps %ymm10, 0x40(%rsi)
vmovaps %ymm11, 0x60(%rsi)
vmovaps %ymm2, 0x80(%rsi)
vmovaps %ymm1, 0xa0(%rsi)
vmovaps %ymm3, 0xc0(%rsi)
vmovaps %ymm0, 0xe0(%rsi)
addq $0x100, %rsi # imm = 0x100
addl $0x8, %ecx
addq %r14, %rdi
jmp 0x368d68
cmpl $0x4, %ebp
jne 0x368f1b
movq -0x38(%rsp), %rcx
movq (%rcx), %rcx
addq -0x10(%rsp), %rcx
movl $0x3, %edi
cmpl %r9d, %edi
jge 0x368f1b
vmovaps -0x70(%rcx), %xmm0
vmovaps -0x60(%rcx), %xmm1
vmovaps -0x50(%rcx), %xmm2
vmovaps -0x40(%rcx), %xmm3
vmovaps -0x30(%rcx), %xmm4
vmovaps -0x20(%rcx), %xmm5
vmovaps -0x10(%rcx), %xmm6
vmovaps (%rcx), %xmm7
vunpcklps %xmm1, %xmm0, %xmm8 # xmm8 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm3, %xmm2, %xmm9 # xmm9 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vunpckhps %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vmovlhps %xmm9, %xmm8, %xmm2 # xmm2 = xmm8[0],xmm9[0]
vunpckhpd %xmm9, %xmm8, %xmm3 # xmm3 = xmm8[1],xmm9[1]
vmovlhps %xmm1, %xmm0, %xmm8 # xmm8 = xmm0[0],xmm1[0]
vunpckhpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vunpcklps %xmm5, %xmm4, %xmm1 # xmm1 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpcklps %xmm7, %xmm6, %xmm9 # xmm9 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
vunpckhps %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
vunpckhps %xmm7, %xmm6, %xmm5 # xmm5 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
vmovlhps %xmm9, %xmm1, %xmm6 # xmm6 = xmm1[0],xmm9[0]
vunpckhpd %xmm9, %xmm1, %xmm1 # xmm1 = xmm1[1],xmm9[1]
vmovlhps %xmm5, %xmm4, %xmm7 # xmm7 = xmm4[0],xmm5[0]
vunpckhpd %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[1],xmm5[1]
vmovaps %xmm2, (%rsi)
vmovaps %xmm6, 0x10(%rsi)
vmovaps %xmm3, 0x20(%rsi)
vmovaps %xmm1, 0x30(%rsi)
vmovaps %xmm8, 0x40(%rsi)
vmovaps %xmm7, 0x50(%rsi)
vmovaps %xmm0, 0x60(%rsi)
vmovaps %xmm4, 0x70(%rsi)
subq $-0x80, %rsi
addl $0x4, %edi
addq %r15, %rcx
jmp 0x368e71
cmpl $0x1, %ebp
jne 0x368f45
movq -0x38(%rsp), %rcx
movq (%rcx), %rcx
addq %rdx, %rcx
movl -0x2c(%rsp), %edi
subl $0x1, %edi
jb 0x368f45
vmovups (%rcx), %ymm0
vmovaps %ymm0, (%rsi)
addq $0x20, %rsi
addq %rax, %rcx
jmp 0x368f2f
addq $0x8, %r8
movl $0x100, %ecx # imm = 0x100
addq %rcx, %r11
movl $0x80, %edi
addq %rdi, %rbx
addq $0x20, %rdx
addq %rcx, %r13
addq %rdi, %r10
addq $0x20, %r12
addq %rcx, -0x18(%rsp)
addq %rdi, -0x20(%rsp)
addq %rcx, -0x8(%rsp)
addq %rdi, -0x10(%rsp)
jmp 0x368d3b
cmpl $0x8, %ebp
jne 0x36900a
movq -0x38(%rsp), %rcx
movq (%rcx), %rcx
addq -0x18(%rsp), %rcx
movl $0x7, %edi
cmpl %r9d, %edi
jge 0x36900a
vmovaps -0x60(%rcx), %ymm0
vmovaps -0x40(%rcx), %ymm1
vmovaps -0x20(%rcx), %ymm2
vmovaps (%rcx), %ymm3
vunpcklps %ymm1, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklpd %ymm1, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
vunpckhpd %ymm1, %ymm4, %ymm1 # ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vinsertf128 $0x1, %xmm1, %ymm3, %ymm2
vinsertf128 $0x1, %xmm0, %ymm4, %ymm5
vperm2f128 $0x31, %ymm1, %ymm3, %ymm1 # ymm1 = ymm3[2,3],ymm1[2,3]
vperm2f128 $0x31, %ymm0, %ymm4, %ymm0 # ymm0 = ymm4[2,3],ymm0[2,3]
vmovaps %ymm2, (%rsi)
vmovaps %ymm5, 0x20(%rsi)
vmovaps %ymm1, 0x40(%rsi)
vmovaps %ymm0, 0x60(%rsi)
subq $-0x80, %rsi
addl $0x8, %edi
addq %r14, %rcx
jmp 0x368f9b
cmpl $0x4, %ebp
jne 0x369078
movq -0x38(%rsp), %rcx
movq (%rcx), %rcx
addq -0x20(%rsp), %rcx
movl $0x3, %edi
cmpl %r9d, %edi
jge 0x369078
vmovaps -0x30(%rcx), %xmm0
vmovaps -0x20(%rcx), %xmm1
vmovaps -0x10(%rcx), %xmm2
vmovaps (%rcx), %xmm3
vunpcklps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm3, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vunpckhps %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vmovlhps %xmm5, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm5[0]
vunpckhpd %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[1],xmm5[1]
vmovlhps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0]
vunpckhpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vmovaps %xmm2, (%rsi)
vmovaps %xmm3, 0x10(%rsi)
vmovaps %xmm4, 0x20(%rsi)
vmovaps %xmm0, 0x30(%rsi)
addq $0x40, %rsi
addl $0x4, %edi
addq %r15, %rcx
jmp 0x369021
cmpl $0x1, %ebp
jne 0x3690a2
movq -0x38(%rsp), %rcx
movq (%rcx), %rcx
addq %rdx, %rcx
movl -0x2c(%rsp), %edi
subl $0x1, %edi
jb 0x3690a2
vmovups (%rcx), %xmm0
vmovaps %xmm0, (%rsi)
addq $0x10, %rsi
addq %rax, %rcx
jmp 0x36908c
addq $0x4, %r8
movl $0x80, %ecx
addq %rcx, %r11
addq $0x40, %rbx
addq $0x10, %rdx
addq %rcx, %r13
addq $0x40, %r10
addq $0x10, %r12
addq %rcx, -0x18(%rsp)
addq $0x40, -0x20(%rsp)
movq %r8, %rcx
orq $0x3, %rcx
cmpq -0x28(%rsp), %rcx
jl 0x368f80
jmp 0x3691bf
cmpl $0x8, %ebp
jne 0x36912f
movq -0x38(%rsp), %rcx
movq (%rcx), %rcx
addq %r13, %rcx
movl $0x7, %edi
cmpl %r9d, %edi
jge 0x36912f
vmovaps -0x20(%rcx), %ymm0
vmovaps (%rcx), %ymm1
vunpcklps %ymm1, %ymm0, %ymm2 # ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vinsertf128 $0x1, %xmm0, %ymm2, %ymm1
vperm2f128 $0x31, %ymm0, %ymm2, %ymm0 # ymm0 = ymm2[2,3],ymm0[2,3]
vmovaps %ymm1, (%rsi)
vmovaps %ymm0, 0x20(%rsi)
addq $0x40, %rsi
addq %r14, %rcx
addl $0x8, %edi
jmp 0x3690f8
cmpl $0x4, %ebp
jne 0x36916f
movq -0x38(%rsp), %rcx
movq (%rcx), %rcx
addq %r10, %rcx
movl $0x3, %edi
cmpl %r9d, %edi
jge 0x36916f
vmovaps -0x10(%rcx), %xmm0
vmovaps (%rcx), %xmm1
vunpcklps %xmm1, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovaps %xmm2, (%rsi)
vmovaps %xmm0, 0x10(%rsi)
addq $0x20, %rsi
addq %r15, %rcx
addl $0x4, %edi
jmp 0x369144
cmpl $0x1, %ebp
jne 0x3691a3
movq -0x38(%rsp), %rcx
movq (%rcx), %rcx
addq %r12, %rcx
movl -0x2c(%rsp), %edi
subl $0x1, %edi
jb 0x3691a3
vmovss -0x4(%rcx), %xmm0
vmovss %xmm0, (%rsi)
vmovss (%rcx), %xmm0
vmovss %xmm0, 0x4(%rsi)
addq $0x8, %rsi
addq %rax, %rcx
jmp 0x369183
addq $0x2, %r8
addq $0x40, %r11
addq $0x20, %rbx
addq $0x8, %rdx
addq $0x40, %r13
addq $0x20, %r10
addq $0x8, %r12
movq %r8, %rcx
orq $0x1, %rcx
cmpq -0x28(%rsp), %rcx
jl 0x3690e3
movl $0x7, %ecx
movl $0x3, %edi
cmpq -0x28(%rsp), %r8
jge 0x369283
cmpl $0x8, %ebp
jne 0x369214
movq -0x38(%rsp), %r10
movq (%r10), %r10
addq %r11, %r10
movl %ecx, %r12d
cmpl %r9d, %r12d
jge 0x369214
vmovaps (%r10), %ymm0
vmovaps %ymm0, (%rsi)
addq $0x20, %rsi
addq %r14, %r10
addl $0x8, %r12d
jmp 0x3691f9
cmpl $0x4, %ebp
jne 0x369242
movq -0x38(%rsp), %r10
movq (%r10), %r10
addq %rbx, %r10
movl %edi, %r12d
cmpl %r9d, %r12d
jge 0x369242
vmovaps (%r10), %xmm0
vmovaps %xmm0, (%rsi)
addq $0x10, %rsi
addq %r15, %r10
addl $0x4, %r12d
jmp 0x369227
cmpl $0x1, %ebp
jne 0x36926f
movq -0x38(%rsp), %r10
movq (%r10), %r10
addq %rdx, %r10
movl -0x2c(%rsp), %r12d
subl $0x1, %r12d
jb 0x36926f
vmovss (%r10), %xmm0
vmovss %xmm0, (%rsi)
addq $0x4, %rsi
addq %rax, %r10
jmp 0x369257
incq %r8
addq $0x20, %r11
addq $0x10, %rbx
addq $0x4, %rdx
jmp 0x3691db
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/gemm_x86_fma.cpp |
ncnn::pack_A_tile(ncnn::Mat const&, ncnn::Mat&, int, int, int, int) | static void pack_A_tile(const Mat& A, Mat& AT, int i, int max_ii, int k, int max_kk)
{
const int elempack = A.elempack;
const int A_hstep = A.dims == 3 ? (int)A.cstep : A.w;
float* pp = AT;
int ii = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
for (; ii + 15 < max_ii; ii += 16)
{
if (elempack == 16)
{
const float* p0 = (const float*)A + (i + ii) * A_hstep + k * 16;
for (int kk = 0; kk < max_kk; kk++)
{
_mm512_store_ps(pp, _mm512_load_ps(p0));
pp += 16;
p0 += 16;
}
}
if (elempack == 8)
{
const float* p0 = (const float*)A + (i + ii) * A_hstep + k * 8;
const float* p1 = (const float*)A + (i + ii + 8) * A_hstep + k * 8;
for (int kk = 0; kk < max_kk; kk++)
{
_mm256_store_ps(pp, _mm256_load_ps(p0));
_mm256_store_ps(pp + 8, _mm256_load_ps(p1));
pp += 16;
p0 += 8;
p1 += 8;
}
}
if (elempack == 4)
{
const float* p0 = (const float*)A + (i + ii) * A_hstep + k * 4;
const float* p1 = (const float*)A + (i + ii + 4) * A_hstep + k * 4;
const float* p2 = (const float*)A + (i + ii + 8) * A_hstep + k * 4;
const float* p3 = (const float*)A + (i + ii + 12) * A_hstep + k * 4;
for (int kk = 0; kk < max_kk; kk++)
{
_mm_store_ps(pp, _mm_load_ps(p0));
_mm_store_ps(pp + 4, _mm_load_ps(p1));
_mm_store_ps(pp + 8, _mm_load_ps(p2));
_mm_store_ps(pp + 12, _mm_load_ps(p3));
pp += 16;
p0 += 4;
p1 += 4;
p2 += 4;
p3 += 4;
}
}
if (elempack == 1)
{
const float* p0 = (const float*)A + (i + ii) * A_hstep + k;
const float* p1 = (const float*)A + (i + ii + 1) * A_hstep + k;
const float* p2 = (const float*)A + (i + ii + 2) * A_hstep + k;
const float* p3 = (const float*)A + (i + ii + 3) * A_hstep + k;
const float* p4 = (const float*)A + (i + ii + 4) * A_hstep + k;
const float* p5 = (const float*)A + (i + ii + 5) * A_hstep + k;
const float* p6 = (const float*)A + (i + ii + 6) * A_hstep + k;
const float* p7 = (const float*)A + (i + ii + 7) * A_hstep + k;
const float* p8 = (const float*)A + (i + ii + 8) * A_hstep + k;
const float* p9 = (const float*)A + (i + ii + 9) * A_hstep + k;
const float* pa = (const float*)A + (i + ii + 10) * A_hstep + k;
const float* pb = (const float*)A + (i + ii + 11) * A_hstep + k;
const float* pc = (const float*)A + (i + ii + 12) * A_hstep + k;
const float* pd = (const float*)A + (i + ii + 13) * A_hstep + k;
const float* pe = (const float*)A + (i + ii + 14) * A_hstep + k;
const float* pf = (const float*)A + (i + ii + 15) * A_hstep + k;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
__m512 _r0 = _mm512_loadu_ps(p0);
__m512 _r1 = _mm512_loadu_ps(p1);
__m512 _r2 = _mm512_loadu_ps(p2);
__m512 _r3 = _mm512_loadu_ps(p3);
__m512 _r4 = _mm512_loadu_ps(p4);
__m512 _r5 = _mm512_loadu_ps(p5);
__m512 _r6 = _mm512_loadu_ps(p6);
__m512 _r7 = _mm512_loadu_ps(p7);
__m512 _r8 = _mm512_loadu_ps(p8);
__m512 _r9 = _mm512_loadu_ps(p9);
__m512 _ra = _mm512_loadu_ps(pa);
__m512 _rb = _mm512_loadu_ps(pb);
__m512 _rc = _mm512_loadu_ps(pc);
__m512 _rd = _mm512_loadu_ps(pd);
__m512 _re = _mm512_loadu_ps(pe);
__m512 _rf = _mm512_loadu_ps(pf);
transpose16x16_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _ra, _rb, _rc, _rd, _re, _rf);
_mm512_store_ps(pp, _r0);
_mm512_store_ps(pp + 16, _r1);
_mm512_store_ps(pp + 16 * 2, _r2);
_mm512_store_ps(pp + 16 * 3, _r3);
_mm512_store_ps(pp + 16 * 4, _r4);
_mm512_store_ps(pp + 16 * 5, _r5);
_mm512_store_ps(pp + 16 * 6, _r6);
_mm512_store_ps(pp + 16 * 7, _r7);
_mm512_store_ps(pp + 16 * 8, _r8);
_mm512_store_ps(pp + 16 * 9, _r9);
_mm512_store_ps(pp + 16 * 10, _ra);
_mm512_store_ps(pp + 16 * 11, _rb);
_mm512_store_ps(pp + 16 * 12, _rc);
_mm512_store_ps(pp + 16 * 13, _rd);
_mm512_store_ps(pp + 16 * 14, _re);
_mm512_store_ps(pp + 16 * 15, _rf);
pp += 256;
p0 += 16;
p1 += 16;
p2 += 16;
p3 += 16;
p4 += 16;
p5 += 16;
p6 += 16;
p7 += 16;
p8 += 16;
p9 += 16;
pa += 16;
pb += 16;
pc += 16;
pd += 16;
pe += 16;
pf += 16;
}
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
pp[1] = p1[0];
pp[2] = p2[0];
pp[3] = p3[0];
pp[4] = p4[0];
pp[5] = p5[0];
pp[6] = p6[0];
pp[7] = p7[0];
pp[8] = p8[0];
pp[9] = p9[0];
pp[10] = pa[0];
pp[11] = pb[0];
pp[12] = pc[0];
pp[13] = pd[0];
pp[14] = pe[0];
pp[15] = pf[0];
pp += 16;
p0++;
p1++;
p2++;
p3++;
p4++;
p5++;
p6++;
p7++;
p8++;
p9++;
pa++;
pb++;
pc++;
pd++;
pe++;
pf++;
}
}
}
#endif // __AVX512F__
for (; ii + 7 < max_ii; ii += 8)
{
if (elempack == 8)
{
const float* p0 = (const float*)A + (i + ii) * A_hstep + k * 8;
for (int kk = 0; kk < max_kk; kk++)
{
_mm256_store_ps(pp, _mm256_load_ps(p0));
pp += 8;
p0 += 8;
}
}
if (elempack == 4)
{
const float* p0 = (const float*)A + (i + ii) * A_hstep + k * 4;
const float* p1 = (const float*)A + (i + ii + 4) * A_hstep + k * 4;
for (int kk = 0; kk < max_kk; kk++)
{
_mm_store_ps(pp, _mm_load_ps(p0));
_mm_store_ps(pp + 4, _mm_load_ps(p1));
pp += 8;
p0 += 4;
p1 += 4;
}
}
if (elempack == 1)
{
const float* p0 = (const float*)A + (i + ii) * A_hstep + k;
const float* p1 = (const float*)A + (i + ii + 1) * A_hstep + k;
const float* p2 = (const float*)A + (i + ii + 2) * A_hstep + k;
const float* p3 = (const float*)A + (i + ii + 3) * A_hstep + k;
const float* p4 = (const float*)A + (i + ii + 4) * A_hstep + k;
const float* p5 = (const float*)A + (i + ii + 5) * A_hstep + k;
const float* p6 = (const float*)A + (i + ii + 6) * A_hstep + k;
const float* p7 = (const float*)A + (i + ii + 7) * A_hstep + k;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
__m256 _r0 = _mm256_loadu_ps(p0);
__m256 _r1 = _mm256_loadu_ps(p1);
__m256 _r2 = _mm256_loadu_ps(p2);
__m256 _r3 = _mm256_loadu_ps(p3);
__m256 _r4 = _mm256_loadu_ps(p4);
__m256 _r5 = _mm256_loadu_ps(p5);
__m256 _r6 = _mm256_loadu_ps(p6);
__m256 _r7 = _mm256_loadu_ps(p7);
transpose8x8_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7);
_mm256_store_ps(pp, _r0);
_mm256_store_ps(pp + 8, _r1);
_mm256_store_ps(pp + 8 * 2, _r2);
_mm256_store_ps(pp + 8 * 3, _r3);
_mm256_store_ps(pp + 8 * 4, _r4);
_mm256_store_ps(pp + 8 * 5, _r5);
_mm256_store_ps(pp + 8 * 6, _r6);
_mm256_store_ps(pp + 8 * 7, _r7);
pp += 64;
p0 += 8;
p1 += 8;
p2 += 8;
p3 += 8;
p4 += 8;
p5 += 8;
p6 += 8;
p7 += 8;
}
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
pp[1] = p1[0];
pp[2] = p2[0];
pp[3] = p3[0];
pp[4] = p4[0];
pp[5] = p5[0];
pp[6] = p6[0];
pp[7] = p7[0];
pp += 8;
p0++;
p1++;
p2++;
p3++;
p4++;
p5++;
p6++;
p7++;
}
}
}
#endif // __AVX__
for (; ii + 3 < max_ii; ii += 4)
{
if (elempack == 4)
{
const float* p0 = (const float*)A + (i + ii) * A_hstep + k * 4;
for (int kk = 0; kk < max_kk; kk++)
{
_mm_store_ps(pp, _mm_load_ps(p0));
pp += 4;
p0 += 4;
}
}
if (elempack == 1)
{
const float* p0 = (const float*)A + (i + ii) * A_hstep + k;
const float* p1 = (const float*)A + (i + ii + 1) * A_hstep + k;
const float* p2 = (const float*)A + (i + ii + 2) * A_hstep + k;
const float* p3 = (const float*)A + (i + ii + 3) * A_hstep + k;
int kk = 0;
#if __AVX__
for (; kk + 7 < max_kk; kk += 8)
{
__m256 _r0 = _mm256_loadu_ps(p0);
__m256 _r1 = _mm256_loadu_ps(p1);
__m256 _r2 = _mm256_loadu_ps(p2);
__m256 _r3 = _mm256_loadu_ps(p3);
transpose8x4_ps(_r0, _r1, _r2, _r3);
_mm256_store_ps(pp, _r0);
_mm256_store_ps(pp + 8, _r1);
_mm256_store_ps(pp + 16, _r2);
_mm256_store_ps(pp + 24, _r3);
pp += 32;
p0 += 8;
p1 += 8;
p2 += 8;
p3 += 8;
}
#endif // __AVX__
for (; kk + 3 < max_kk; kk += 4)
{
__m128 _r0 = _mm_loadu_ps(p0);
__m128 _r1 = _mm_loadu_ps(p1);
__m128 _r2 = _mm_loadu_ps(p2);
__m128 _r3 = _mm_loadu_ps(p3);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_mm_store_ps(pp, _r0);
_mm_store_ps(pp + 4, _r1);
_mm_store_ps(pp + 8, _r2);
_mm_store_ps(pp + 12, _r3);
pp += 16;
p0 += 4;
p1 += 4;
p2 += 4;
p3 += 4;
}
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
pp[1] = p1[0];
pp[2] = p2[0];
pp[3] = p3[0];
pp += 4;
p0++;
p1++;
p2++;
p3++;
}
}
}
#endif // __SSE2__
for (; ii + 1 < max_ii; ii += 2)
{
// if (elempack == 1)
{
const float* p0 = (const float*)A + (i + ii) * A_hstep + k;
const float* p1 = (const float*)A + (i + ii + 1) * A_hstep + k;
int kk = 0;
#if __SSE2__
#if __AVX__
for (; kk + 7 < max_kk; kk += 8)
{
__m256 _r0 = _mm256_loadu_ps(p0);
__m256 _r1 = _mm256_loadu_ps(p1);
transpose8x2_ps(_r0, _r1);
_mm256_storeu_ps(pp, _r0);
_mm256_storeu_ps(pp + 8, _r1);
pp += 16;
p0 += 8;
p1 += 8;
}
#endif // __AVX__
for (; kk + 3 < max_kk; kk += 4)
{
__m128 _r0 = _mm_loadu_ps(p0);
__m128 _r1 = _mm_loadu_ps(p1);
__m128 _tmp0 = _mm_unpacklo_ps(_r0, _r1);
__m128 _tmp1 = _mm_unpackhi_ps(_r0, _r1);
_mm_store_ps(pp, _tmp0);
_mm_store_ps(pp + 4, _tmp1);
pp += 8;
p0 += 4;
p1 += 4;
}
#endif // __SSE2__
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
pp[1] = p1[0];
pp += 2;
p0++;
p1++;
}
}
}
for (; ii < max_ii; ii += 1)
{
// if (elempack == 1)
{
const float* p0 = (const float*)A + (i + ii) * A_hstep + k;
int kk = 0;
#if __SSE2__
#if __AVX__
for (; kk + 7 < max_kk; kk += 8)
{
_mm256_storeu_ps(pp, _mm256_loadu_ps(p0));
pp += 8;
p0 += 8;
}
#endif // __AVX__
for (; kk + 3 < max_kk; kk += 4)
{
_mm_storeu_ps(pp, _mm_loadu_ps(p0));
pp += 4;
p0 += 4;
}
#endif // __SSE2__
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
pp += 1;
p0++;
}
}
}
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x30, %rsp
movl %ecx, -0x70(%rsp)
movq %rsi, %r14
leaq 0x2c(%rdi), %rax
leaq 0x40(%rdi), %rcx
cmpl $0x3, 0x28(%rdi)
cmoveq %rcx, %rax
movslq (%rax), %r13
leal (,%r8,4), %eax
movslq %eax, %rbx
movslq %r8d, %rax
xorl %r12d, %r12d
testl %r9d, %r9d
movl $0x0, %ecx
cmovgl %r9d, %ecx
movl %ecx, -0x7c(%rsp)
movslq %edx, %rbp
movq %r13, %rcx
imulq %rbp, %rcx
movq %rax, 0x28(%rsp)
leaq (,%rax,4), %rax
leaq 0x1(%rbp), %rdx
imulq %r13, %rdx
leaq (%rax,%rdx,4), %r10
leaq 0x2(%rbp), %rdx
imulq %r13, %rdx
leaq (%rax,%rdx,4), %rdx
movq %rdx, -0x78(%rsp)
leaq 0x3(%rbp), %rdx
imulq %r13, %rdx
leaq (%rax,%rdx,4), %r15
leaq 0x4(%rbp), %rdx
imulq %r13, %rdx
leaq (%rax,%rcx,4), %r11
movq %rbx, -0x8(%rsp)
leaq (,%rbx,4), %rbx
leaq (%rbx,%rcx,4), %rsi
leaq (%rbx,%rdx,4), %rcx
movq %rcx, -0x60(%rsp)
leaq 0x5(%rbp), %rcx
imulq %r13, %rcx
leaq (%rax,%rcx,4), %rcx
movq %rcx, -0x20(%rsp)
leaq 0x6(%rbp), %rcx
imulq %r13, %rcx
leaq (%rax,%rcx,4), %rcx
movq %rcx, -0x28(%rsp)
leaq 0x7(%rbp), %rcx
imulq %r13, %rcx
leaq (%rax,%rcx,4), %rcx
movq %rcx, -0x30(%rsp)
leaq (%rax,%rdx,4), %rax
movq %rax, -0x38(%rsp)
movq %rsi, %rdx
movq (%r14), %r14
movq %r15, %rsi
leal (,%r8,8), %eax
movq -0x78(%rsp), %r8
cltq
movq %rax, -0x10(%rsp)
movslq -0x70(%rsp), %rax
movq %rax, -0x70(%rsp)
movq %r13, %rax
shlq $0x5, %rax
movq %rax, 0x20(%rsp)
movl 0x18(%rdi), %r15d
leaq (,%r13,4), %rax
movq %rax, -0x18(%rsp)
movq %r13, -0x40(%rsp)
movq %rbp, -0x48(%rsp)
movl %r15d, -0x64(%rsp)
movq %rdi, (%rsp)
movq %r12, %rax
orq $0x7, %rax
cmpq -0x70(%rsp), %rax
jge 0x3696bb
cmpl $0x8, %r15d
jne 0x36941f
leaq (%r12,%rbp), %rax
imulq %r13, %rax
shlq $0x2, %rax
addq (%rdi), %rax
movq -0x10(%rsp), %rcx
leaq (%rax,%rcx,4), %rax
movl -0x7c(%rsp), %ecx
subl $0x1, %ecx
jb 0x36941f
vmovaps (%rax), %ymm0
vmovaps %ymm0, (%r14)
addq $0x20, %r14
addq $0x20, %rax
jmp 0x369407
cmpl $0x4, %r15d
jne 0x369455
movq (%rdi), %rax
movl -0x7c(%rsp), %ecx
subl $0x1, %ecx
jb 0x369455
vmovaps (%rax,%rdx), %xmm0
vmovaps %xmm0, (%r14)
movq -0x60(%rsp), %rbx
vmovaps (%rax,%rbx), %xmm0
vmovaps %xmm0, 0x10(%r14)
addq $0x20, %r14
addq $0x10, %rax
jmp 0x36942c
cmpl $0x1, %r15d
jne 0x369685
movq %rdx, 0x8(%rsp)
movq %r12, -0x50(%rsp)
movq (%rdi), %rcx
movq -0x20(%rsp), %rax
leaq (%rcx,%rax), %rdi
movq -0x28(%rsp), %rax
leaq (%rcx,%rax), %r13
movq -0x30(%rsp), %rax
leaq (%rcx,%rax), %r15
movq -0x38(%rsp), %rax
leaq (%rcx,%rax), %rbx
movq %r11, %rax
movq %rsi, 0x10(%rsp)
leaq (%rcx,%rsi), %r11
movq %r8, -0x78(%rsp)
leaq (%rcx,%r8), %rbp
movq %r10, -0x58(%rsp)
addq %rcx, %r10
movq %rax, 0x18(%rsp)
addq %rax, %rcx
xorl %r12d, %r12d
xorl %edx, %edx
xorl %eax, %eax
movq %r14, %r8
leal 0x7(%rax), %esi
cmpl %r9d, %esi
jge 0x3695c3
vmovups (%rcx,%rdx), %ymm0
vmovups (%r10,%rdx), %ymm1
vmovups (%rbp,%rdx), %ymm2
vmovups (%r11,%rdx), %ymm3
vmovups (%rbx,%rdx), %ymm4
vmovups (%rdi,%rdx), %ymm5
vmovups (%r13,%rdx), %ymm6
vmovups (%r15,%rdx), %ymm7
vunpcklps %ymm1, %ymm0, %ymm8 # ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklps %ymm5, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
vunpckhps %ymm5, %ymm4, %ymm4 # ymm4 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
vunpcklps %ymm7, %ymm6, %ymm5 # ymm5 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[4],ymm7[4],ymm6[5],ymm7[5]
vunpckhps %ymm7, %ymm6, %ymm6 # ymm6 = ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[6],ymm7[6],ymm6[7],ymm7[7]
vunpcklpd %ymm1, %ymm8, %ymm7 # ymm7 = ymm8[0],ymm1[0],ymm8[2],ymm1[2]
vunpckhpd %ymm1, %ymm8, %ymm1 # ymm1 = ymm8[1],ymm1[1],ymm8[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm8 # ymm8 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vunpcklpd %ymm5, %ymm3, %ymm2 # ymm2 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
vunpckhpd %ymm5, %ymm3, %ymm3 # ymm3 = ymm3[1],ymm5[1],ymm3[3],ymm5[3]
vunpcklpd %ymm6, %ymm4, %ymm5 # ymm5 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
vunpckhpd %ymm6, %ymm4, %ymm4 # ymm4 = ymm4[1],ymm6[1],ymm4[3],ymm6[3]
vinsertf128 $0x1, %xmm2, %ymm7, %ymm6
vinsertf128 $0x1, %xmm3, %ymm1, %ymm9
vinsertf128 $0x1, %xmm5, %ymm8, %ymm10
vinsertf128 $0x1, %xmm4, %ymm0, %ymm11
vperm2f128 $0x31, %ymm2, %ymm7, %ymm2 # ymm2 = ymm7[2,3],ymm2[2,3]
vperm2f128 $0x31, %ymm3, %ymm1, %ymm1 # ymm1 = ymm1[2,3],ymm3[2,3]
vperm2f128 $0x31, %ymm5, %ymm8, %ymm3 # ymm3 = ymm8[2,3],ymm5[2,3]
vperm2f128 $0x31, %ymm4, %ymm0, %ymm0 # ymm0 = ymm0[2,3],ymm4[2,3]
vmovaps %ymm6, (%r14,%rdx,8)
vmovaps %ymm9, 0x20(%r14,%rdx,8)
vmovaps %ymm10, 0x40(%r14,%rdx,8)
vmovaps %ymm11, 0x60(%r14,%rdx,8)
vmovaps %ymm2, 0x80(%r14,%rdx,8)
vmovaps %ymm1, 0xa0(%r14,%rdx,8)
vmovaps %ymm3, 0xc0(%r14,%rdx,8)
vmovaps %ymm0, 0xe0(%r14,%rdx,8)
addq $0x100, %r8 # imm = 0x100
addl $0x8, %eax
addq $0x20, %rdx
addq $0x8, %r12
jmp 0x3694bf
movq (%rsp), %rdi
movq -0x40(%rsp), %r13
movq -0x48(%rsp), %rbp
movq 0x18(%rsp), %r11
movq -0x18(%rsp), %rsi
movq -0x58(%rsp), %r10
movl -0x64(%rsp), %r15d
cmpl %r9d, %r12d
jge 0x36966e
leaq (%rcx,%r12,4), %rax
vmovss (%rax), %xmm0
vmovss %xmm0, (%r8)
leaq (%rax,%rsi), %rdx
vmovss (%rax,%r13,4), %xmm0
vmovss %xmm0, 0x4(%r8)
addq %rsi, %rdx
vmovss (%rax,%r13,8), %xmm0
vmovss %xmm0, 0x8(%r8)
vmovss (%rdx,%r13,4), %xmm0
vmovss %xmm0, 0xc(%r8)
leaq (%rsi,%rsi), %rax
addq %rdx, %rax
vmovss (%rdx,%r13,8), %xmm0
vmovss %xmm0, 0x10(%r8)
leaq (%rax,%r13,4), %rdx
vmovss (%rax,%r13,4), %xmm0
vmovss %xmm0, 0x14(%r8)
vmovss (%rsi,%rdx), %xmm0
addq %rsi, %rdx
vmovss %xmm0, 0x18(%r8)
vmovss (%rsi,%rdx), %xmm0
vmovss %xmm0, 0x1c(%r8)
addq $0x20, %r8
incq %r12
jmp 0x3695e5
movq %r8, %r14
movq -0x50(%rsp), %r12
movq -0x78(%rsp), %r8
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %rdx
addq $0x8, %r12
movq 0x20(%rsp), %rax
addq %rax, %r11
addq %rax, %r10
addq %rax, %r8
addq %rax, %rsi
addq %rax, -0x60(%rsp)
addq %rax, %rdx
addq %rax, -0x20(%rsp)
addq %rax, -0x28(%rsp)
addq %rax, -0x30(%rsp)
addq %rax, -0x38(%rsp)
jmp 0x3693d3
movq %r13, %rcx
shlq $0x4, %rcx
movq %r12, %rax
orq $0x3, %rax
cmpq -0x70(%rsp), %rax
jge 0x3698b9
cmpl $0x4, %r15d
jne 0x36970e
leaq (%r12,%rbp), %rax
imulq %r13, %rax
shlq $0x2, %rax
addq (%rdi), %rax
movq -0x8(%rsp), %rdx
leaq (%rax,%rdx,4), %rax
movl -0x7c(%rsp), %edx
subl $0x1, %edx
jb 0x36970e
vmovaps (%rax), %xmm0
vmovaps %xmm0, (%r14)
addq $0x10, %r14
addq $0x10, %rax
jmp 0x3696f6
cmpl $0x1, %r15d
jne 0x3698a4
movq %r12, -0x50(%rsp)
movq (%rdi), %rdx
movq %rsi, %rax
leaq (%rdx,%r11), %rsi
movq %r10, -0x58(%rsp)
addq %rdx, %r10
movq %r8, -0x78(%rsp)
leaq (%rdx,%r8), %rbp
movq %rax, %r15
addq %rax, %rdx
xorl %r13d, %r13d
xorl %r8d, %r8d
movq %r14, %r12
leal 0x7(%r8), %eax
cmpl %r9d, %eax
jge 0x369831
vmovups (%rsi,%r13), %ymm0
vmovups (%r10,%r13), %ymm1
vmovups (%rbp,%r13), %ymm2
vmovups (%rdx,%r13), %ymm3
vunpcklps %ymm1, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklpd %ymm1, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
vunpckhpd %ymm1, %ymm4, %ymm1 # ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vinsertf128 $0x1, %xmm1, %ymm3, %ymm2
vinsertf128 $0x1, %xmm0, %ymm4, %ymm5
vperm2f128 $0x31, %ymm1, %ymm3, %ymm1 # ymm1 = ymm3[2,3],ymm1[2,3]
vperm2f128 $0x31, %ymm0, %ymm4, %ymm0 # ymm0 = ymm4[2,3],ymm0[2,3]
vmovaps %ymm2, (%r14,%r13,4)
vmovaps %ymm5, 0x20(%r14,%r13,4)
vmovaps %ymm1, 0x40(%r14,%r13,4)
vmovaps %ymm0, 0x60(%r14,%r13,4)
subq $-0x80, %r12
addl $0x8, %r8d
addq $0x20, %r13
jmp 0x369747
vmovups (%rsi,%r13), %xmm0
vmovups (%r10,%r13), %xmm1
vmovups (%rbp,%r13), %xmm2
vmovups (%rdx,%r13), %xmm3
vunpcklps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm3, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vunpckhps %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vmovlhps %xmm5, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm5[0]
vunpckhpd %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[1],xmm5[1]
vmovlhps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0]
vunpckhpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vmovaps %xmm2, (%r14,%r13,4)
vmovaps %xmm3, 0x10(%r14,%r13,4)
vmovaps %xmm4, 0x20(%r14,%r13,4)
vmovaps %xmm0, 0x30(%r14,%r13,4)
addq $0x40, %r12
addl $0x4, %r8d
addq $0x10, %r13
leal 0x3(%r8), %eax
cmpl %r9d, %eax
jl 0x3697d1
jmp 0x36987b
vmovss (%rsi,%r13), %xmm0
vmovss %xmm0, (%r14,%r13,4)
vmovss (%r10,%r13), %xmm0
vmovss %xmm0, 0x4(%r14,%r13,4)
vmovss (%rbp,%r13), %xmm0
vmovss %xmm0, 0x8(%r14,%r13,4)
vmovss (%rdx,%r13), %xmm0
vmovss %xmm0, 0xc(%r14,%r13,4)
addq $0x10, %r12
incl %r8d
addq $0x4, %r13
cmpl %r9d, %r8d
jl 0x36983c
movq %r12, %r14
movq -0x40(%rsp), %r13
movq -0x50(%rsp), %r12
movq -0x48(%rsp), %rbp
movq -0x58(%rsp), %r10
movq -0x78(%rsp), %r8
movq %r15, %rsi
movl -0x64(%rsp), %r15d
addq $0x4, %r12
addq %rcx, %r11
addq %rcx, %r10
addq %rcx, %r8
addq %rcx, %rsi
jmp 0x3696c2
leaq (,%r13,8), %rax
movq %rax, -0x60(%rsp)
movq %r12, %rax
orq $0x1, %rax
movq -0x70(%rsp), %rbx
cmpq %rbx, %rax
jge 0x369a46
movq %rbp, %r15
movq %rdi, %rcx
movq (%rdi), %rsi
movq %r11, %rdi
leaq (%rsi,%r11), %r8
movq %r10, %rbp
addq %r10, %rsi
xorl %r10d, %r10d
xorl %r11d, %r11d
xorl %ebx, %ebx
movq %r14, %rdx
leal 0x7(%rbx), %eax
cmpl %r9d, %eax
jge 0x369972
vmovups (%r8,%r11), %ymm0
vmovups (%rsi,%r11), %ymm1
vunpcklps %ymm1, %ymm0, %ymm2 # ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vinsertf128 $0x1, %xmm0, %ymm2, %ymm1
vperm2f128 $0x31, %ymm0, %ymm2, %ymm0 # ymm0 = ymm2[2,3],ymm0[2,3]
vmovups %ymm1, (%r14,%r11,2)
vmovups %ymm0, 0x20(%r14,%r11,2)
addq $0x40, %rdx
addl $0x8, %ebx
addq $0x20, %r11
addq $0x8, %r10
jmp 0x3698fc
vmovups (%r8,%r11), %xmm0
vmovups (%rsi,%r11), %xmm1
vunpcklps %xmm1, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovaps %xmm2, (%r14,%r11,2)
vmovaps %xmm0, 0x10(%r14,%r11,2)
addq $0x20, %rdx
addl $0x4, %ebx
addq $0x10, %r11
addq $0x4, %r10
leal 0x3(%rbx), %eax
cmpl %r9d, %eax
jl 0x369942
movq %rdi, %r11
cmpl %r9d, %r10d
jge 0x3699a0
vmovss (%r8,%r10,4), %xmm0
vmovss %xmm0, (%rdx)
vmovss (%rsi,%r10,4), %xmm0
vmovss %xmm0, 0x4(%rdx)
addq $0x8, %rdx
incq %r10
jmp 0x36997d
addq $0x2, %r12
movq -0x60(%rsp), %rax
addq %rax, %r11
movq %rbp, %r10
addq %rax, %r10
movq %rdx, %r14
movq %r15, %rbp
movq %rcx, %rdi
jmp 0x3698c6
movq (%rdi), %rcx
leaq (%r12,%rbp), %rax
imulq %r13, %rax
leaq (%rcx,%rax,4), %rax
movq 0x28(%rsp), %rdx
leaq (%rax,%rdx,4), %rax
xorl %edx, %edx
xorl %r8d, %r8d
leal 0x7(%r8), %esi
cmpl %r9d, %esi
jge 0x369a1a
vmovups (%rax), %ymm0
vmovups %ymm0, (%r14)
addq $0x20, %r14
addq $0x20, %rax
addl $0x8, %r8d
addq $0x8, %rdx
jmp 0x3699dd
vmovups (%rax), %xmm0
vmovups %xmm0, (%r14)
addq $0x10, %r14
addq $0x10, %rax
addl $0x4, %r8d
addq $0x4, %rdx
leal 0x3(%r8), %esi
cmpl %r9d, %esi
jl 0x369a01
addq %r11, %rcx
cmpl %r9d, %edx
jge 0x369a3e
vmovss (%rcx,%rdx,4), %xmm0
vmovss %xmm0, (%r14)
addq $0x4, %r14
incq %rdx
jmp 0x369a26
incq %r12
addq -0x18(%rsp), %r11
cmpq %rbx, %r12
jl 0x3699c0
addq $0x30, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/gemm_x86_fma.cpp |
ncnn::transpose_pack_B_tile(ncnn::Mat const&, ncnn::Mat&, int, int, int, int) | static void transpose_pack_B_tile(const Mat& B, Mat& BT, int j, int max_jj, int k, int max_kk)
{
const int elempack = B.elempack;
const int B_hstep = B.dims == 3 ? (int)B.cstep : B.w;
float* pp = BT;
int jj = 0;
#if __SSE2__
for (; jj + 11 < max_jj; jj += 12)
{
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 16;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
__m512 _r0 = _mm512_load_ps(p0);
__m512 _r1 = _mm512_load_ps(p0 + 16 * 1);
__m512 _r2 = _mm512_load_ps(p0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(p0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(p0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(p0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(p0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(p0 + 16 * 7);
__m512 _r8 = _mm512_load_ps(p0 + 16 * 8);
__m512 _r9 = _mm512_load_ps(p0 + 16 * 9);
__m512 _ra = _mm512_load_ps(p0 + 16 * 10);
__m512 _rb = _mm512_load_ps(p0 + 16 * 11);
transpose16x12_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _ra, _rb);
_mm512_store_ps(pp, _r0);
_mm512_store_ps(pp + 16 * 1, _r1);
_mm512_store_ps(pp + 16 * 2, _r2);
_mm512_store_ps(pp + 16 * 3, _r3);
_mm512_store_ps(pp + 16 * 4, _r4);
_mm512_store_ps(pp + 16 * 5, _r5);
_mm512_store_ps(pp + 16 * 6, _r6);
_mm512_store_ps(pp + 16 * 7, _r7);
_mm512_store_ps(pp + 16 * 8, _r8);
_mm512_store_ps(pp + 16 * 9, _r9);
_mm512_store_ps(pp + 16 * 10, _ra);
_mm512_store_ps(pp + 16 * 11, _rb);
pp += 192;
p0 += B_hstep * 16;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 8;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
__m256 _r0 = _mm256_load_ps(p0);
__m256 _r1 = _mm256_load_ps(p0 + 8 * 1);
__m256 _r2 = _mm256_load_ps(p0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(p0 + 8 * 3);
__m256 _r4 = _mm256_load_ps(p0 + 8 * 4);
__m256 _r5 = _mm256_load_ps(p0 + 8 * 5);
__m256 _r6 = _mm256_load_ps(p0 + 8 * 6);
__m256 _r7 = _mm256_load_ps(p0 + 8 * 7);
__m256 _r8 = _mm256_load_ps(p0 + 8 * 8);
__m256 _r9 = _mm256_load_ps(p0 + 8 * 9);
__m256 _ra = _mm256_load_ps(p0 + 8 * 10);
__m256 _rb = _mm256_load_ps(p0 + 8 * 11);
transpose8x12_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _ra, _rb);
_mm256_store_ps(pp, _r0);
_mm256_store_ps(pp + 8 * 1, _r1);
_mm256_store_ps(pp + 8 * 2, _r2);
_mm256_store_ps(pp + 8 * 3, _r3);
_mm256_store_ps(pp + 8 * 4, _r4);
_mm256_store_ps(pp + 8 * 5, _r5);
_mm256_store_ps(pp + 8 * 6, _r6);
_mm256_store_ps(pp + 8 * 7, _r7);
_mm256_store_ps(pp + 8 * 8, _r8);
_mm256_store_ps(pp + 8 * 9, _r9);
_mm256_store_ps(pp + 8 * 10, _ra);
_mm256_store_ps(pp + 8 * 11, _rb);
pp += 96;
p0 += B_hstep * 8;
}
}
#endif // __AVX__
if (elempack == 4)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 4;
int kk = 0;
for (; kk + 3 < max_kk; kk += 4)
{
__m128 _r0 = _mm_load_ps(p0);
__m128 _r1 = _mm_load_ps(p0 + 4 * 1);
__m128 _r2 = _mm_load_ps(p0 + 4 * 2);
__m128 _r3 = _mm_load_ps(p0 + 4 * 3);
__m128 _r4 = _mm_load_ps(p0 + 4 * 4);
__m128 _r5 = _mm_load_ps(p0 + 4 * 5);
__m128 _r6 = _mm_load_ps(p0 + 4 * 6);
__m128 _r7 = _mm_load_ps(p0 + 4 * 7);
__m128 _r8 = _mm_load_ps(p0 + 4 * 8);
__m128 _r9 = _mm_load_ps(p0 + 4 * 9);
__m128 _ra = _mm_load_ps(p0 + 4 * 10);
__m128 _rb = _mm_load_ps(p0 + 4 * 11);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb);
_mm_store_ps(pp, _r0);
_mm_store_ps(pp + 4 * 1, _r4);
_mm_store_ps(pp + 4 * 2, _r8);
_mm_store_ps(pp + 4 * 3, _r1);
_mm_store_ps(pp + 4 * 4, _r5);
_mm_store_ps(pp + 4 * 5, _r9);
_mm_store_ps(pp + 4 * 6, _r2);
_mm_store_ps(pp + 4 * 7, _r6);
_mm_store_ps(pp + 4 * 8, _ra);
_mm_store_ps(pp + 4 * 9, _r3);
_mm_store_ps(pp + 4 * 10, _r7);
_mm_store_ps(pp + 4 * 11, _rb);
pp += 48;
p0 += B_hstep * 4;
}
}
if (elempack == 1)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj);
int kk = 0;
for (; kk < max_kk; kk++)
{
_mm_store_ps(pp, _mm_loadu_ps(p0));
_mm_store_ps(pp + 4, _mm_loadu_ps(p0 + 4));
_mm_store_ps(pp + 8, _mm_loadu_ps(p0 + 8));
pp += 12;
p0 += B_hstep;
}
}
}
for (; jj + 7 < max_jj; jj += 8)
{
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 16;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
__m512 _r0 = _mm512_load_ps(p0);
__m512 _r1 = _mm512_load_ps(p0 + 16 * 1);
__m512 _r2 = _mm512_load_ps(p0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(p0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(p0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(p0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(p0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(p0 + 16 * 7);
transpose16x8_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7);
_mm512_store_ps(pp, _r0);
_mm512_store_ps(pp + 16 * 1, _r1);
_mm512_store_ps(pp + 16 * 2, _r2);
_mm512_store_ps(pp + 16 * 3, _r3);
_mm512_store_ps(pp + 16 * 4, _r4);
_mm512_store_ps(pp + 16 * 5, _r5);
_mm512_store_ps(pp + 16 * 6, _r6);
_mm512_store_ps(pp + 16 * 7, _r7);
pp += 128;
p0 += B_hstep * 16;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 8;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
__m256 _r0 = _mm256_load_ps(p0);
__m256 _r1 = _mm256_load_ps(p0 + 8 * 1);
__m256 _r2 = _mm256_load_ps(p0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(p0 + 8 * 3);
__m256 _r4 = _mm256_load_ps(p0 + 8 * 4);
__m256 _r5 = _mm256_load_ps(p0 + 8 * 5);
__m256 _r6 = _mm256_load_ps(p0 + 8 * 6);
__m256 _r7 = _mm256_load_ps(p0 + 8 * 7);
transpose8x8_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7);
_mm256_store_ps(pp, _r0);
_mm256_store_ps(pp + 8 * 1, _r1);
_mm256_store_ps(pp + 8 * 2, _r2);
_mm256_store_ps(pp + 8 * 3, _r3);
_mm256_store_ps(pp + 8 * 4, _r4);
_mm256_store_ps(pp + 8 * 5, _r5);
_mm256_store_ps(pp + 8 * 6, _r6);
_mm256_store_ps(pp + 8 * 7, _r7);
pp += 64;
p0 += B_hstep * 8;
}
}
#endif // __AVX__
if (elempack == 4)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 4;
int kk = 0;
for (; kk + 3 < max_kk; kk += 4)
{
__m128 _r0 = _mm_load_ps(p0);
__m128 _r1 = _mm_load_ps(p0 + 4 * 1);
__m128 _r2 = _mm_load_ps(p0 + 4 * 2);
__m128 _r3 = _mm_load_ps(p0 + 4 * 3);
__m128 _r4 = _mm_load_ps(p0 + 4 * 4);
__m128 _r5 = _mm_load_ps(p0 + 4 * 5);
__m128 _r6 = _mm_load_ps(p0 + 4 * 6);
__m128 _r7 = _mm_load_ps(p0 + 4 * 7);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_mm_store_ps(pp, _r0);
_mm_store_ps(pp + 4 * 1, _r4);
_mm_store_ps(pp + 4 * 2, _r1);
_mm_store_ps(pp + 4 * 3, _r5);
_mm_store_ps(pp + 4 * 4, _r2);
_mm_store_ps(pp + 4 * 5, _r6);
_mm_store_ps(pp + 4 * 6, _r3);
_mm_store_ps(pp + 4 * 7, _r7);
pp += 32;
p0 += B_hstep * 4;
}
}
if (elempack == 1)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj);
int kk = 0;
for (; kk < max_kk; kk++)
{
_mm_store_ps(pp, _mm_loadu_ps(p0));
_mm_store_ps(pp + 4, _mm_loadu_ps(p0 + 4));
pp += 8;
p0 += B_hstep;
}
}
}
for (; jj + 3 < max_jj; jj += 4)
{
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 16;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
__m512 _r0 = _mm512_load_ps(p0);
__m512 _r1 = _mm512_load_ps(p0 + 16 * 1);
__m512 _r2 = _mm512_load_ps(p0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(p0 + 16 * 3);
transpose16x4_ps(_r0, _r1, _r2, _r3);
_mm512_store_ps(pp, _r0);
_mm512_store_ps(pp + 16 * 1, _r1);
_mm512_store_ps(pp + 16 * 2, _r2);
_mm512_store_ps(pp + 16 * 3, _r3);
pp += 64;
p0 += B_hstep * 16;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 8;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
__m256 _r0 = _mm256_load_ps(p0);
__m256 _r1 = _mm256_load_ps(p0 + 8 * 1);
__m256 _r2 = _mm256_load_ps(p0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(p0 + 8 * 3);
transpose8x4_ps(_r0, _r1, _r2, _r3);
_mm256_store_ps(pp, _r0);
_mm256_store_ps(pp + 8 * 1, _r1);
_mm256_store_ps(pp + 8 * 2, _r2);
_mm256_store_ps(pp + 8 * 3, _r3);
pp += 32;
p0 += B_hstep * 8;
}
}
#endif // __AVX__
if (elempack == 4)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 4;
int kk = 0;
for (; kk + 3 < max_kk; kk += 4)
{
__m128 _r0 = _mm_load_ps(p0);
__m128 _r1 = _mm_load_ps(p0 + 4 * 1);
__m128 _r2 = _mm_load_ps(p0 + 4 * 2);
__m128 _r3 = _mm_load_ps(p0 + 4 * 3);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_mm_store_ps(pp, _r0);
_mm_store_ps(pp + 4 * 1, _r1);
_mm_store_ps(pp + 4 * 2, _r2);
_mm_store_ps(pp + 4 * 3, _r3);
pp += 16;
p0 += B_hstep * 4;
}
}
if (elempack == 1)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj);
int kk = 0;
for (; kk < max_kk; kk++)
{
_mm_store_ps(pp, _mm_loadu_ps(p0));
pp += 4;
p0 += B_hstep;
}
}
}
#endif // __SSE2__
for (; jj + 1 < max_jj; jj += 2)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 16;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
__m512 _r0 = _mm512_load_ps(p0);
__m512 _r1 = _mm512_load_ps(p0 + 16);
transpose16x2_ps(_r0, _r1);
_mm512_store_ps(pp, _r0);
_mm512_store_ps(pp + 16, _r1);
pp += 32;
p0 += B_hstep * 16;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 8;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
__m256 _r0 = _mm256_load_ps(p0);
__m256 _r1 = _mm256_load_ps(p0 + 8);
transpose8x2_ps(_r0, _r1);
_mm256_store_ps(pp, _r0);
_mm256_store_ps(pp + 8, _r1);
pp += 16;
p0 += B_hstep * 8;
}
}
#endif // __AVX__
if (elempack == 4)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 4;
int kk = 0;
for (; kk + 3 < max_kk; kk += 4)
{
__m128 _r0 = _mm_load_ps(p0);
__m128 _r1 = _mm_load_ps(p0 + 4);
__m128 _tmp0 = _mm_unpacklo_ps(_r0, _r1);
__m128 _tmp1 = _mm_unpackhi_ps(_r0, _r1);
_mm_store_ps(pp, _tmp0);
_mm_store_ps(pp + 4, _tmp1);
pp += 8;
p0 += B_hstep * 4;
}
}
#endif // __SSE2__
if (elempack == 1)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj);
int kk = 0;
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
pp[1] = p0[1];
pp += 2;
p0 += B_hstep;
}
}
}
for (; jj < max_jj; jj += 1)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 16;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
_mm512_store_ps(pp, _mm512_load_ps(p0));
pp += 16;
p0 += B_hstep * 16;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 8;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
_mm256_store_ps(pp, _mm256_load_ps(p0));
pp += 8;
p0 += B_hstep * 8;
}
}
#endif // __AVX__
if (elempack == 4)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 4;
int kk = 0;
for (; kk + 3 < max_kk; kk += 4)
{
_mm_store_ps(pp, _mm_load_ps(p0));
pp += 4;
p0 += B_hstep * 4;
}
}
#endif // __SSE2__
if (elempack == 1)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj);
int kk = 0;
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
pp += 1;
p0 += B_hstep;
}
}
}
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movl %ecx, -0x60(%rsp)
movq %rdi, %rbp
leaq 0x2c(%rdi), %rax
leaq 0x40(%rdi), %rcx
cmpl $0x3, 0x28(%rdi)
cmoveq %rcx, %rax
movslq (%rax), %rax
imull %eax, %r8d
xorl %r10d, %r10d
testl %r9d, %r9d
movl $0x0, %ecx
cmovgl %r9d, %ecx
movl %ecx, -0x54(%rsp)
movslq %r8d, %rcx
movslq %edx, %r8
leaq (,%rcx,4), %r11
movq %r8, %rdi
leaq (%r11,%r8,4), %rdx
leaq 0x4(%r11,%r8,4), %r12
leaq 0x10(%r11,%r8,4), %rbx
movq %rbx, -0x20(%rsp)
leaq (%r11,%r8,4), %r11
addq $0x20, %r11
movq %r11, -0x50(%rsp)
movq %r8, %r11
shlq $0x5, %r11
leaq (%r11,%rcx,4), %r8
shlq $0x4, %rdi
leaq (%r11,%rcx,4), %r13
addq $0x20, %r13
leaq 0x60(%r11,%rcx,4), %rbx
movq %rbx, -0x38(%rsp)
leaq 0xe0(%r11,%rcx,4), %rbx
movq %rbx, -0x28(%rsp)
leaq 0x160(%r11,%rcx,4), %r11
movq %r11, -0x10(%rsp)
leaq (%rdi,%rcx,4), %rbx
leaq 0x10(%rdi,%rcx,4), %r11
leaq 0x30(%rdi,%rcx,4), %r14
movq %r14, -0x40(%rsp)
leaq 0x70(%rdi,%rcx,4), %r14
movq %r14, -0x30(%rsp)
leaq 0xb0(%rdi,%rcx,4), %rcx
movq %rcx, -0x18(%rsp)
movq (%rsi), %rsi
leal (,%rax,8), %ecx
movslq %ecx, %r14
leal (,%rax,4), %ecx
movslq %ecx, %r15
movslq -0x60(%rsp), %rcx
shlq $0x2, %r14
shlq $0x2, %r15
shlq $0x2, %rax
movq %rbp, -0x60(%rsp)
movl 0x18(%rbp), %ebp
movq %rcx, -0x48(%rsp)
addq $-0xb, %rcx
movq %rcx, -0x8(%rsp)
cmpq -0x8(%rsp), %r10
jge 0x36ae86
cmpl $0x8, %ebp
jne 0x36acc8
movq -0x60(%rsp), %rcx
movq (%rcx), %rdi
addq -0x10(%rsp), %rdi
movl $0x7, %ecx
cmpl %r9d, %ecx
jge 0x36acc8
vmovaps -0x160(%rdi), %ymm0
vmovaps -0x140(%rdi), %ymm1
vmovaps -0x120(%rdi), %ymm2
vmovaps -0x100(%rdi), %ymm3
vmovaps -0xe0(%rdi), %ymm4
vmovaps -0xc0(%rdi), %ymm5
vmovaps -0xa0(%rdi), %ymm6
vmovaps -0x80(%rdi), %ymm7
vmovaps -0x60(%rdi), %ymm8
vmovaps -0x40(%rdi), %ymm9
vmovaps -0x20(%rdi), %ymm10
vmovaps (%rdi), %ymm11
vunpcklps %ymm1, %ymm0, %ymm12 # ymm12 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklps %ymm5, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
vunpckhps %ymm5, %ymm4, %ymm4 # ymm4 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
vunpcklps %ymm7, %ymm6, %ymm5 # ymm5 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[4],ymm7[4],ymm6[5],ymm7[5]
vunpckhps %ymm7, %ymm6, %ymm6 # ymm6 = ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[6],ymm7[6],ymm6[7],ymm7[7]
vunpcklps %ymm9, %ymm8, %ymm7 # ymm7 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[4],ymm9[4],ymm8[5],ymm9[5]
vunpckhps %ymm9, %ymm8, %ymm8 # ymm8 = ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[6],ymm9[6],ymm8[7],ymm9[7]
vunpcklps %ymm11, %ymm10, %ymm9 # ymm9 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[4],ymm11[4],ymm10[5],ymm11[5]
vunpckhps %ymm11, %ymm10, %ymm10 # ymm10 = ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[6],ymm11[6],ymm10[7],ymm11[7]
vunpcklpd %ymm1, %ymm12, %ymm11 # ymm11 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
vunpckhpd %ymm1, %ymm12, %ymm1 # ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm12 # ymm12 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vunpcklpd %ymm5, %ymm3, %ymm2 # ymm2 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
vunpckhpd %ymm5, %ymm3, %ymm3 # ymm3 = ymm3[1],ymm5[1],ymm3[3],ymm5[3]
vunpcklpd %ymm6, %ymm4, %ymm5 # ymm5 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
vunpckhpd %ymm6, %ymm4, %ymm4 # ymm4 = ymm4[1],ymm6[1],ymm4[3],ymm6[3]
vunpcklpd %ymm9, %ymm7, %ymm6 # ymm6 = ymm7[0],ymm9[0],ymm7[2],ymm9[2]
vunpckhpd %ymm9, %ymm7, %ymm7 # ymm7 = ymm7[1],ymm9[1],ymm7[3],ymm9[3]
vunpcklpd %ymm10, %ymm8, %ymm9 # ymm9 = ymm8[0],ymm10[0],ymm8[2],ymm10[2]
vunpckhpd %ymm10, %ymm8, %ymm8 # ymm8 = ymm8[1],ymm10[1],ymm8[3],ymm10[3]
vinsertf128 $0x1, %xmm2, %ymm11, %ymm10
vperm2f128 $0x31, %ymm2, %ymm11, %ymm2 # ymm2 = ymm11[2,3],ymm2[2,3]
vinsertf128 $0x1, %xmm1, %ymm6, %ymm11
vperm2f128 $0x31, %ymm1, %ymm6, %ymm1 # ymm1 = ymm6[2,3],ymm1[2,3]
vinsertf128 $0x1, %xmm7, %ymm3, %ymm6
vperm2f128 $0x31, %ymm7, %ymm3, %ymm3 # ymm3 = ymm3[2,3],ymm7[2,3]
vinsertf128 $0x1, %xmm5, %ymm12, %ymm7
vperm2f128 $0x31, %ymm5, %ymm12, %ymm5 # ymm5 = ymm12[2,3],ymm5[2,3]
vinsertf128 $0x1, %xmm0, %ymm9, %ymm12
vperm2f128 $0x31, %ymm0, %ymm9, %ymm0 # ymm0 = ymm9[2,3],ymm0[2,3]
vinsertf128 $0x1, %xmm8, %ymm4, %ymm9
vperm2f128 $0x31, %ymm8, %ymm4, %ymm4 # ymm4 = ymm4[2,3],ymm8[2,3]
vmovaps %ymm10, (%rsi)
vmovaps %ymm11, 0x20(%rsi)
vmovaps %ymm6, 0x40(%rsi)
vmovaps %ymm7, 0x60(%rsi)
vmovaps %ymm12, 0x80(%rsi)
vmovaps %ymm9, 0xa0(%rsi)
vmovaps %ymm2, 0xc0(%rsi)
vmovaps %ymm1, 0xe0(%rsi)
vmovaps %ymm3, 0x100(%rsi)
vmovaps %ymm5, 0x120(%rsi)
vmovaps %ymm0, 0x140(%rsi)
vmovaps %ymm4, 0x160(%rsi)
addq $0x180, %rsi # imm = 0x180
addl $0x8, %ecx
addq %r14, %rdi
jmp 0x36ab5a
cmpl $0x4, %ebp
jne 0x36adf5
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq -0x18(%rsp), %rcx
movl $0x3, %edi
cmpl %r9d, %edi
jge 0x36adf5
vmovaps -0xb0(%rcx), %xmm0
vmovaps -0xa0(%rcx), %xmm1
vmovaps -0x90(%rcx), %xmm2
vmovaps -0x80(%rcx), %xmm3
vmovaps -0x70(%rcx), %xmm4
vmovaps -0x60(%rcx), %xmm5
vmovaps -0x50(%rcx), %xmm6
vmovaps -0x40(%rcx), %xmm7
vmovaps -0x30(%rcx), %xmm8
vmovaps -0x20(%rcx), %xmm9
vmovaps -0x10(%rcx), %xmm10
vmovaps (%rcx), %xmm11
vunpcklps %xmm1, %xmm0, %xmm12 # xmm12 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm3, %xmm2, %xmm13 # xmm13 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vunpckhps %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vmovlhps %xmm13, %xmm12, %xmm2 # xmm2 = xmm12[0],xmm13[0]
vunpckhpd %xmm13, %xmm12, %xmm3 # xmm3 = xmm12[1],xmm13[1]
vmovlhps %xmm1, %xmm0, %xmm12 # xmm12 = xmm0[0],xmm1[0]
vunpckhpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vunpcklps %xmm5, %xmm4, %xmm1 # xmm1 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpcklps %xmm7, %xmm6, %xmm13 # xmm13 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
vunpckhps %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
vunpckhps %xmm7, %xmm6, %xmm5 # xmm5 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
vmovlhps %xmm13, %xmm1, %xmm6 # xmm6 = xmm1[0],xmm13[0]
vunpckhpd %xmm13, %xmm1, %xmm1 # xmm1 = xmm1[1],xmm13[1]
vmovlhps %xmm5, %xmm4, %xmm7 # xmm7 = xmm4[0],xmm5[0]
vunpckhpd %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[1],xmm5[1]
vunpcklps %xmm9, %xmm8, %xmm5 # xmm5 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
vunpcklps %xmm11, %xmm10, %xmm13 # xmm13 = xmm10[0],xmm11[0],xmm10[1],xmm11[1]
vunpckhps %xmm9, %xmm8, %xmm8 # xmm8 = xmm8[2],xmm9[2],xmm8[3],xmm9[3]
vunpckhps %xmm11, %xmm10, %xmm9 # xmm9 = xmm10[2],xmm11[2],xmm10[3],xmm11[3]
vmovlhps %xmm13, %xmm5, %xmm10 # xmm10 = xmm5[0],xmm13[0]
vunpckhpd %xmm13, %xmm5, %xmm5 # xmm5 = xmm5[1],xmm13[1]
vmovlhps %xmm9, %xmm8, %xmm11 # xmm11 = xmm8[0],xmm9[0]
vunpckhpd %xmm9, %xmm8, %xmm8 # xmm8 = xmm8[1],xmm9[1]
vmovaps %xmm2, (%rsi)
vmovaps %xmm6, 0x10(%rsi)
vmovaps %xmm10, 0x20(%rsi)
vmovaps %xmm3, 0x30(%rsi)
vmovaps %xmm1, 0x40(%rsi)
vmovaps %xmm5, 0x50(%rsi)
vmovaps %xmm12, 0x60(%rsi)
vmovaps %xmm7, 0x70(%rsi)
vmovaps %xmm11, 0x80(%rsi)
vmovaps %xmm0, 0x90(%rsi)
vmovaps %xmm4, 0xa0(%rsi)
vmovaps %xmm8, 0xb0(%rsi)
addq $0xc0, %rsi
addl $0x4, %edi
addq %r15, %rcx
jmp 0x36ace3
cmpl $0x1, %ebp
jne 0x36ae35
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq -0x50(%rsp), %rcx
movl -0x54(%rsp), %edi
subl $0x1, %edi
jb 0x36ae35
vmovups -0x20(%rcx), %xmm0
vmovaps %xmm0, (%rsi)
vmovups -0x10(%rcx), %xmm0
vmovaps %xmm0, 0x10(%rsi)
vmovups (%rcx), %xmm0
vmovaps %xmm0, 0x20(%rsi)
addq $0x30, %rsi
addq %rax, %rcx
jmp 0x36ae0b
addq $0xc, %r10
movl $0x180, %ecx # imm = 0x180
addq %rcx, %r8
movl $0xc0, %edi
addq %rdi, %rbx
addq $0x30, %rdx
addq %rcx, %r13
addq %rdi, %r11
addq $0x30, %r12
addq %rcx, -0x38(%rsp)
addq %rdi, -0x40(%rsp)
addq %rcx, -0x28(%rsp)
addq %rdi, -0x30(%rsp)
addq $0x30, -0x20(%rsp)
addq %rcx, -0x10(%rsp)
addq %rdi, -0x18(%rsp)
addq $0x30, -0x50(%rsp)
jmp 0x36ab34
movq -0x48(%rsp), %rcx
addq $-0x7, %rcx
movq %rcx, -0x50(%rsp)
cmpq -0x50(%rsp), %r10
jge 0x36b230
cmpl $0x8, %ebp
jne 0x36afa8
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq -0x28(%rsp), %rcx
movl $0x7, %edi
cmpl %r9d, %edi
jge 0x36afa8
vmovaps -0xe0(%rcx), %ymm0
vmovaps -0xc0(%rcx), %ymm1
vmovaps -0xa0(%rcx), %ymm2
vmovaps -0x80(%rcx), %ymm3
vmovaps -0x60(%rcx), %ymm4
vmovaps -0x40(%rcx), %ymm5
vmovaps -0x20(%rcx), %ymm6
vmovaps (%rcx), %ymm7
vunpcklps %ymm1, %ymm0, %ymm8 # ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklps %ymm5, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
vunpckhps %ymm5, %ymm4, %ymm4 # ymm4 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
vunpcklps %ymm7, %ymm6, %ymm5 # ymm5 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[4],ymm7[4],ymm6[5],ymm7[5]
vunpckhps %ymm7, %ymm6, %ymm6 # ymm6 = ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[6],ymm7[6],ymm6[7],ymm7[7]
vunpcklpd %ymm1, %ymm8, %ymm7 # ymm7 = ymm8[0],ymm1[0],ymm8[2],ymm1[2]
vunpckhpd %ymm1, %ymm8, %ymm1 # ymm1 = ymm8[1],ymm1[1],ymm8[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm8 # ymm8 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vunpcklpd %ymm5, %ymm3, %ymm2 # ymm2 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
vunpckhpd %ymm5, %ymm3, %ymm3 # ymm3 = ymm3[1],ymm5[1],ymm3[3],ymm5[3]
vunpcklpd %ymm6, %ymm4, %ymm5 # ymm5 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
vunpckhpd %ymm6, %ymm4, %ymm4 # ymm4 = ymm4[1],ymm6[1],ymm4[3],ymm6[3]
vinsertf128 $0x1, %xmm2, %ymm7, %ymm6
vinsertf128 $0x1, %xmm3, %ymm1, %ymm9
vinsertf128 $0x1, %xmm5, %ymm8, %ymm10
vinsertf128 $0x1, %xmm4, %ymm0, %ymm11
vperm2f128 $0x31, %ymm2, %ymm7, %ymm2 # ymm2 = ymm7[2,3],ymm2[2,3]
vperm2f128 $0x31, %ymm3, %ymm1, %ymm1 # ymm1 = ymm1[2,3],ymm3[2,3]
vperm2f128 $0x31, %ymm5, %ymm8, %ymm3 # ymm3 = ymm8[2,3],ymm5[2,3]
vperm2f128 $0x31, %ymm4, %ymm0, %ymm0 # ymm0 = ymm0[2,3],ymm4[2,3]
vmovaps %ymm6, (%rsi)
vmovaps %ymm9, 0x20(%rsi)
vmovaps %ymm10, 0x40(%rsi)
vmovaps %ymm11, 0x60(%rsi)
vmovaps %ymm2, 0x80(%rsi)
vmovaps %ymm1, 0xa0(%rsi)
vmovaps %ymm3, 0xc0(%rsi)
vmovaps %ymm0, 0xe0(%rsi)
addq $0x100, %rsi # imm = 0x100
addl $0x8, %edi
addq %r14, %rcx
jmp 0x36aeba
cmpl $0x4, %ebp
jne 0x36b06d
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq -0x30(%rsp), %rcx
movl $0x3, %edi
cmpl %r9d, %edi
jge 0x36b06d
vmovaps -0x70(%rcx), %xmm0
vmovaps -0x60(%rcx), %xmm1
vmovaps -0x50(%rcx), %xmm2
vmovaps -0x40(%rcx), %xmm3
vmovaps -0x30(%rcx), %xmm4
vmovaps -0x20(%rcx), %xmm5
vmovaps -0x10(%rcx), %xmm6
vmovaps (%rcx), %xmm7
vunpcklps %xmm1, %xmm0, %xmm8 # xmm8 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm3, %xmm2, %xmm9 # xmm9 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vunpckhps %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vmovlhps %xmm9, %xmm8, %xmm2 # xmm2 = xmm8[0],xmm9[0]
vunpckhpd %xmm9, %xmm8, %xmm3 # xmm3 = xmm8[1],xmm9[1]
vmovlhps %xmm1, %xmm0, %xmm8 # xmm8 = xmm0[0],xmm1[0]
vunpckhpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vunpcklps %xmm5, %xmm4, %xmm1 # xmm1 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpcklps %xmm7, %xmm6, %xmm9 # xmm9 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
vunpckhps %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
vunpckhps %xmm7, %xmm6, %xmm5 # xmm5 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
vmovlhps %xmm9, %xmm1, %xmm6 # xmm6 = xmm1[0],xmm9[0]
vunpckhpd %xmm9, %xmm1, %xmm1 # xmm1 = xmm1[1],xmm9[1]
vmovlhps %xmm5, %xmm4, %xmm7 # xmm7 = xmm4[0],xmm5[0]
vunpckhpd %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[1],xmm5[1]
vmovaps %xmm2, (%rsi)
vmovaps %xmm6, 0x10(%rsi)
vmovaps %xmm3, 0x20(%rsi)
vmovaps %xmm1, 0x30(%rsi)
vmovaps %xmm8, 0x40(%rsi)
vmovaps %xmm7, 0x50(%rsi)
vmovaps %xmm0, 0x60(%rsi)
vmovaps %xmm4, 0x70(%rsi)
subq $-0x80, %rsi
addl $0x4, %edi
addq %r15, %rcx
jmp 0x36afc3
cmpl $0x1, %ebp
jne 0x36b0a3
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq -0x20(%rsp), %rcx
movl -0x54(%rsp), %edi
subl $0x1, %edi
jb 0x36b0a3
vmovups -0x10(%rcx), %xmm0
vmovaps %xmm0, (%rsi)
vmovups (%rcx), %xmm0
vmovaps %xmm0, 0x10(%rsi)
addq $0x20, %rsi
addq %rax, %rcx
jmp 0x36b083
addq $0x8, %r10
movl $0x100, %ecx # imm = 0x100
addq %rcx, %r8
movl $0x80, %edi
addq %rdi, %rbx
addq $0x20, %rdx
addq %rcx, %r13
addq %rdi, %r11
addq $0x20, %r12
addq %rcx, -0x38(%rsp)
addq %rdi, -0x40(%rsp)
addq %rcx, -0x28(%rsp)
addq %rdi, -0x30(%rsp)
addq $0x20, -0x20(%rsp)
jmp 0x36ae94
cmpl $0x8, %ebp
jne 0x36b16e
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq -0x38(%rsp), %rcx
movl $0x7, %edi
cmpl %r9d, %edi
jge 0x36b16e
vmovaps -0x60(%rcx), %ymm0
vmovaps -0x40(%rcx), %ymm1
vmovaps -0x20(%rcx), %ymm2
vmovaps (%rcx), %ymm3
vunpcklps %ymm1, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklpd %ymm1, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
vunpckhpd %ymm1, %ymm4, %ymm1 # ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vinsertf128 $0x1, %xmm1, %ymm3, %ymm2
vinsertf128 $0x1, %xmm0, %ymm4, %ymm5
vperm2f128 $0x31, %ymm1, %ymm3, %ymm1 # ymm1 = ymm3[2,3],ymm1[2,3]
vperm2f128 $0x31, %ymm0, %ymm4, %ymm0 # ymm0 = ymm4[2,3],ymm0[2,3]
vmovaps %ymm2, (%rsi)
vmovaps %ymm5, 0x20(%rsi)
vmovaps %ymm1, 0x40(%rsi)
vmovaps %ymm0, 0x60(%rsi)
subq $-0x80, %rsi
addq %r14, %rcx
addl $0x8, %edi
jmp 0x36b0ff
cmpl $0x4, %ebp
jne 0x36b1dc
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq -0x40(%rsp), %rcx
movl $0x3, %edi
cmpl %r9d, %edi
jge 0x36b1dc
vmovaps -0x30(%rcx), %xmm0
vmovaps -0x20(%rcx), %xmm1
vmovaps -0x10(%rcx), %xmm2
vmovaps (%rcx), %xmm3
vunpcklps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm3, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vunpckhps %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vmovlhps %xmm5, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm5[0]
vunpckhpd %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[1],xmm5[1]
vmovlhps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0]
vunpckhpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vmovaps %xmm2, (%rsi)
vmovaps %xmm3, 0x10(%rsi)
vmovaps %xmm4, 0x20(%rsi)
vmovaps %xmm0, 0x30(%rsi)
addq $0x40, %rsi
addq %r15, %rcx
addl $0x4, %edi
jmp 0x36b185
cmpl $0x1, %ebp
jne 0x36b206
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq %rdx, %rcx
movl -0x54(%rsp), %edi
subl $0x1, %edi
jb 0x36b206
vmovups (%rcx), %xmm0
vmovaps %xmm0, (%rsi)
addq $0x10, %rsi
addq %rax, %rcx
jmp 0x36b1f0
addq $0x4, %r10
movl $0x80, %ecx
addq %rcx, %r8
addq $0x40, %rbx
addq $0x10, %rdx
addq %rcx, %r13
addq $0x40, %r11
addq $0x10, %r12
addq %rcx, -0x38(%rsp)
addq $0x40, -0x40(%rsp)
movq %r10, %rcx
orq $0x3, %rcx
cmpq -0x48(%rsp), %rcx
jl 0x36b0e4
jmp 0x36b323
cmpl $0x8, %ebp
jne 0x36b293
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq %r13, %rcx
movl $0x7, %edi
cmpl %r9d, %edi
jge 0x36b293
vmovaps -0x20(%rcx), %ymm0
vmovaps (%rcx), %ymm1
vunpcklps %ymm1, %ymm0, %ymm2 # ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vinsertf128 $0x1, %xmm0, %ymm2, %ymm1
vperm2f128 $0x31, %ymm0, %ymm2, %ymm0 # ymm0 = ymm2[2,3],ymm0[2,3]
vmovaps %ymm1, (%rsi)
vmovaps %ymm0, 0x20(%rsi)
addq $0x40, %rsi
addq %r14, %rcx
addl $0x8, %edi
jmp 0x36b25c
cmpl $0x4, %ebp
jne 0x36b2d3
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq %r11, %rcx
movl $0x3, %edi
cmpl %r9d, %edi
jge 0x36b2d3
vmovaps -0x10(%rcx), %xmm0
vmovaps (%rcx), %xmm1
vunpcklps %xmm1, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovaps %xmm2, (%rsi)
vmovaps %xmm0, 0x10(%rsi)
addq $0x20, %rsi
addq %r15, %rcx
addl $0x4, %edi
jmp 0x36b2a8
cmpl $0x1, %ebp
jne 0x36b307
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq %r12, %rcx
movl -0x54(%rsp), %edi
subl $0x1, %edi
jb 0x36b307
vmovss -0x4(%rcx), %xmm0
vmovss %xmm0, (%rsi)
vmovss (%rcx), %xmm0
vmovss %xmm0, 0x4(%rsi)
addq $0x8, %rsi
addq %rax, %rcx
jmp 0x36b2e7
addq $0x2, %r10
addq $0x40, %r8
addq $0x20, %rbx
addq $0x8, %rdx
addq $0x40, %r13
addq $0x20, %r11
addq $0x8, %r12
movq %r10, %rcx
orq $0x1, %rcx
cmpq -0x48(%rsp), %rcx
jl 0x36b247
movl $0x7, %ecx
movl $0x3, %edi
cmpq -0x48(%rsp), %r10
jge 0x36b3e7
cmpl $0x8, %ebp
jne 0x36b378
movq -0x60(%rsp), %r11
movq (%r11), %r11
addq %r8, %r11
movl %ecx, %r12d
cmpl %r9d, %r12d
jge 0x36b378
vmovaps (%r11), %ymm0
vmovaps %ymm0, (%rsi)
addq $0x20, %rsi
addq %r14, %r11
addl $0x8, %r12d
jmp 0x36b35d
cmpl $0x4, %ebp
jne 0x36b3a6
movq -0x60(%rsp), %r11
movq (%r11), %r11
addq %rbx, %r11
movl %edi, %r12d
cmpl %r9d, %r12d
jge 0x36b3a6
vmovaps (%r11), %xmm0
vmovaps %xmm0, (%rsi)
addq $0x10, %rsi
addq %r15, %r11
addl $0x4, %r12d
jmp 0x36b38b
cmpl $0x1, %ebp
jne 0x36b3d3
movq -0x60(%rsp), %r11
movq (%r11), %r11
addq %rdx, %r11
movl -0x54(%rsp), %r12d
subl $0x1, %r12d
jb 0x36b3d3
vmovss (%r11), %xmm0
vmovss %xmm0, (%rsi)
addq $0x4, %rsi
addq %rax, %r11
jmp 0x36b3bb
incq %r10
addq $0x20, %r8
addq $0x10, %rbx
addq $0x4, %rdx
jmp 0x36b33f
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/gemm_x86_fma.cpp |
ncnn::Gemm_x86_fma::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int Gemm_x86_fma::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
int M;
int N;
if (constantA && constantB)
{
M = constantM;
N = constantN;
}
else if (constantA)
{
const Mat& B = bottom_blobs[0];
M = constantM;
N = transB ? (B.dims == 3 ? B.c : B.h) * B.elempack : B.w;
}
else if (constantB)
{
const Mat& A = bottom_blobs[0];
M = transA ? A.w : (A.dims == 3 ? A.c : A.h) * A.elempack;
N = constantN;
}
else
{
const Mat& A = bottom_blobs[0];
const Mat& B = bottom_blobs[1];
M = transA ? A.w : (A.dims == 3 ? A.c : A.h) * A.elempack;
N = transB ? (B.dims == 3 ? B.c : B.h) * B.elempack : B.w;
}
Mat C;
int broadcast_type_C = 0;
if (constantC)
{
C = CT_data;
broadcast_type_C = constant_broadcast_type_C;
}
else
{
if (constantA && constantB)
{
C = bottom_blobs.size() == 1 ? bottom_blobs[0] : Mat();
}
else if (constantA)
{
C = bottom_blobs.size() == 2 ? bottom_blobs[1] : Mat();
}
else if (constantB)
{
C = bottom_blobs.size() == 2 ? bottom_blobs[1] : Mat();
}
else
{
C = bottom_blobs.size() == 3 ? bottom_blobs[2] : Mat();
}
if (!C.empty())
{
if (C.dims == 1 && C.w == 1)
{
// scalar
broadcast_type_C = 0;
}
if (C.dims == 1 && C.w * C.elempack == M)
{
// M
// auto broadcast from h to w is the ncnn-style convention
broadcast_type_C = 1;
}
if (C.dims == 1 && C.w * C.elempack == N)
{
// N
broadcast_type_C = 4;
}
if (C.dims == 2 && C.w == 1 && C.h * C.elempack == M)
{
// Mx1
broadcast_type_C = 2;
}
if (C.dims == 2 && C.w == N && C.h * C.elempack == M)
{
// MxN
broadcast_type_C = 3;
}
if (C.dims == 2 && C.w == N && C.h * C.elempack == 1)
{
// 1xN
broadcast_type_C = 4;
}
// pre-multiply C with beta
if (beta != 1.f)
{
Mat C2;
C2.create_like(C, opt.workspace_allocator);
const int size = C.total() * C.elempack;
for (int i = 0; i < size; i++)
{
C2[i] = C[i] * beta;
}
C = C2;
}
}
}
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
int outh = output_transpose ? N : M;
#if __AVX512F__
out_elempack = outh % 16 == 0 ? 16 : outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
#else
out_elempack = outh % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
if (output_elempack)
out_elempack = output_elempack;
size_t out_elemsize = 4u * out_elempack;
Mat& top_blob = top_blobs[0];
if (output_transpose)
{
if (output_N1M)
top_blob.create(M, 1, N / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
else
top_blob.create(M, N / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
}
else
{
if (output_N1M)
top_blob.create(N, 1, M / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
else
top_blob.create(N, M / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
}
if (top_blob.empty())
return -100;
int _nT = nT ? nT : opt.num_threads;
if (nT != 0 && opt.num_threads != nT)
{
// force num_threads the same as in create_pipeline
// so we could use pre-packed A/B from the same tile config
NCNN_LOGE("opt.num_threads %d changed, gemm will use load-time value %d", opt.num_threads, nT);
}
int ret = 0;
if (constantA && constantB)
{
ret = gemm_AT_BT_x86(AT_data, BT_data, C, top_blob, broadcast_type_C, constantM, constantN, constantK, output_transpose, constant_TILE_M, constant_TILE_N, constant_TILE_K, _nT, opt);
}
else if (constantA)
{
const Mat& B = bottom_blobs[0];
ret = gemm_AT_x86(AT_data, B, C, top_blob, broadcast_type_C, constantM, constantK, transB, output_transpose, constant_TILE_M, constant_TILE_N, constant_TILE_K, _nT, opt);
}
else if (constantB)
{
const Mat& A = bottom_blobs[0];
ret = gemm_BT_x86(A, BT_data, C, top_blob, broadcast_type_C, constantN, constantK, transA, output_transpose, constant_TILE_M, constant_TILE_N, constant_TILE_K, _nT, opt);
}
else
{
const Mat& A = bottom_blobs[0];
const Mat& B = bottom_blobs[1];
ret = gemm_x86(A, B, C, top_blob, broadcast_type_C, transA, transB, output_transpose, constant_TILE_M, constant_TILE_N, constant_TILE_K, _nT, opt);
}
// multiply top_blob with alpha
if (alpha != 1.f)
{
const int size = top_blob.total() * out_elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < size; i++)
{
top_blob[i] *= alpha;
}
}
return ret;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x2d8, %rsp # imm = 0x2D8
movq %rcx, %rbp
movq %rdx, %rbx
movq %rdi, %r15
movq (%rdi), %rax
movq -0x18(%rax), %rcx
cmpl $0x0, 0xe0(%rdi,%rcx)
movl 0xe4(%rdi,%rcx), %r8d
je 0x36b444
testl %r8d, %r8d
je 0x36b45e
movl 0xec(%r15,%rcx), %r12d
jmp 0x36b4b2
movq (%rsi), %rdx
movl 0xd8(%r15,%rcx), %edi
testl %r8d, %r8d
je 0x36b48d
testl %edi, %edi
je 0x36b49e
movl 0x2c(%rdx), %r12d
jmp 0x36b4b2
movq (%rsi), %rdx
movl 0xec(%r15,%rcx), %r12d
cmpl $0x0, 0xdc(%r15,%rcx)
je 0x36b6cf
xorl %ecx, %ecx
cmpl $0x3, 0x28(%rdx)
movl 0x18(%rdx), %r13d
sete %cl
imull 0x30(%rdx,%rcx,8), %r13d
jmp 0x36b4ba
testl %edi, %edi
je 0x36b787
movl 0x2c(%rdx), %r12d
jmp 0x36b79b
xorl %edi, %edi
cmpl $0x3, 0x28(%rdx)
movl 0x18(%rdx), %r12d
sete %dil
imull 0x30(%rdx,%rdi,8), %r12d
movl 0xf0(%r15,%rcx), %r13d
andq $0x0, 0xd0(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x90(%rsp)
vmovups %xmm0, 0x9c(%rsp)
vmovaps %xmm0, 0xb0(%rsp)
vmovups %xmm0, 0xbc(%rsp)
movq -0x18(%rax), %rax
cmpl $0x0, 0xe8(%r15,%rax)
movq %r15, 0x60(%rsp)
movq %rsi, 0x28(%rsp)
je 0x36b56d
leaq 0xa0(%r15), %rcx
leaq 0x90(%rsp), %rdx
cmpq %rcx, %rdx
je 0x36b924
movq 0xa8(%r15), %rax
testq %rax, %rax
je 0x36b8b0
lock
incl (%rax)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x36b8b0
lock
decl (%rax)
jne 0x36b8b0
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x36b8a8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36b8b0
movq (%rsi), %rcx
movq 0x8(%rsi), %rdx
subq %rcx, %rdx
cmpl $0x0, 0xe0(%r15,%rax)
movl 0xe4(%r15,%rax), %eax
movq %rbp, 0x20(%rsp)
je 0x36b62d
testl %eax, %eax
je 0x36b6d8
movq %rbx, 0x50(%rsp)
movb $0x1, %al
cmpq $0x48, %rdx
jne 0x36b772
movq (%rcx), %rsi
movq 0x8(%rcx), %r14
movq 0x10(%rcx), %rdi
movl 0x18(%rcx), %edx
movq 0x20(%rcx), %rbp
vmovups 0x28(%rcx), %xmm0
movl 0x38(%rcx), %r15d
movq 0x40(%rcx), %rbx
testq %r14, %r14
je 0x36cd30
lock
incl (%r14)
lock
incl (%r14)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x36b8a1
lock
decl (%rax)
jne 0x36b8a1
movq %rdi, (%rsp)
movl %edx, 0x18(%rsp)
vmovaps %xmm0, 0x40(%rsp)
movq %rsi, 0x8(%rsp)
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x36d797
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36d79f
testl %eax, %eax
je 0x36b7c2
movb $0x1, %al
cmpq $0x90, %rdx
jne 0x36b877
movq 0x48(%rcx), %rsi
movq 0x50(%rcx), %r14
movq 0x58(%rcx), %r15
movl 0x60(%rcx), %edi
movq 0x68(%rcx), %rbp
vmovups 0x70(%rcx), %xmm0
movl 0x80(%rcx), %edx
movq 0x88(%rcx), %rcx
testq %r14, %r14
je 0x36d5c8
lock
incl (%r14)
lock
incl (%r14)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x36d5b5
lock
decl (%rax)
jne 0x36d5b5
movl %edi, (%rsp)
movl %edx, 0x18(%rsp)
movq %rcx, 0x40(%rsp)
vmovaps %xmm0, 0x50(%rsp)
movq %rsi, 0x8(%rsp)
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x36d828
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36d830
movl 0x2c(%rdx), %r13d
jmp 0x36b4ba
movb $0x1, %al
cmpq $0x90, %rdx
jne 0x36b88c
movq 0x48(%rcx), %rsi
movq 0x50(%rcx), %r14
movq 0x58(%rcx), %r15
movl 0x60(%rcx), %edi
movq 0x68(%rcx), %rbp
vmovups 0x70(%rcx), %xmm0
movl 0x80(%rcx), %edx
movq 0x88(%rcx), %rcx
testq %r14, %r14
je 0x36d5d0
lock
incl (%r14)
lock
incl (%r14)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x36d5bc
lock
decl (%rax)
jne 0x36d5bc
movl %edi, (%rsp)
movl %edx, 0x18(%rsp)
movq %rcx, 0x40(%rsp)
vmovaps %xmm0, 0x50(%rsp)
movq %rsi, 0x8(%rsp)
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x36d8b7
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36d8bf
xorl %ebp, %ebp
xorl %edx, %edx
xorl %edi, %edi
xorl %r14d, %r14d
xorl %esi, %esi
xorl %r15d, %r15d
xorl %ebx, %ebx
jmp 0x36d7b4
xorl %edi, %edi
cmpl $0x3, 0x28(%rdx)
movl 0x18(%rdx), %r12d
sete %dil
imull 0x30(%rdx,%rdi,8), %r12d
cmpl $0x0, 0xdc(%r15,%rcx)
je 0x36b86e
xorl %ecx, %ecx
cmpl $0x3, 0x70(%rdx)
movl 0x60(%rdx), %r13d
sete %cl
imull 0x78(%rdx,%rcx,8), %r13d
jmp 0x36b4ba
movb $0x1, %al
cmpq $0xd8, %rdx
jne 0x36cd38
movq 0x90(%rcx), %rsi
movq 0x98(%rcx), %r14
movq 0xa0(%rcx), %r15
movl 0xa8(%rcx), %edi
movq 0xb0(%rcx), %rbp
vmovups 0xb8(%rcx), %xmm0
movl 0xc8(%rcx), %edx
movq 0xd0(%rcx), %rcx
testq %r14, %r14
je 0x36d66a
lock
incl (%r14)
lock
incl (%r14)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x36d5d8
lock
decl (%rax)
jne 0x36d5d8
movl %edi, (%rsp)
movl %edx, 0x18(%rsp)
movq %rcx, 0x40(%rsp)
vmovaps %xmm0, 0x50(%rsp)
movq %rsi, 0x8(%rsp)
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x36d946
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36d94e
movl 0x74(%rdx), %r13d
jmp 0x36b4ba
xorl %ecx, %ecx
xorl %edx, %edx
xorl %ebp, %ebp
xorl %edi, %edi
xorl %r15d, %r15d
xorl %r14d, %r14d
xorl %esi, %esi
jmp 0x36d849
xorl %ecx, %ecx
xorl %edx, %edx
xorl %ebp, %ebp
xorl %edi, %edi
xorl %r15d, %r15d
xorl %r14d, %r14d
xorl %esi, %esi
jmp 0x36d8d8
xorl %eax, %eax
jmp 0x36d7b4
movq %rsi, %rdi
callq 0x5f3e0
vmovups 0xa0(%r15), %xmm0
vmovaps %xmm0, 0x90(%rsp)
movq 0xb0(%r15), %rax
movq %rax, 0xa0(%rsp)
movl 0xb8(%r15), %eax
movl %eax, 0xa8(%rsp)
movq 0xc0(%r15), %rax
movq %rax, 0xb0(%rsp)
vmovups 0xc8(%r15), %xmm0
vmovups %xmm0, 0xb8(%rsp)
movl 0xd8(%r15), %eax
movl %eax, 0xc8(%rsp)
movq 0xe0(%r15), %rax
movq %rax, 0xd0(%rsp)
movq (%r15), %rax
movq -0x18(%rax), %rax
movl 0xf8(%r15,%rax), %eax
movl %eax, 0x50(%rsp)
movq (%r15), %rax
movq -0x18(%rax), %rdx
movl 0x108(%r15,%rdx), %eax
cmpb $0x1, 0x27(%rbp)
jne 0x36b968
testl %eax, %eax
movl %r13d, %ecx
cmovel %r12d, %ecx
xorl %esi, %esi
testb $0x3, %cl
sete %sil
testb $0x7, %cl
leal 0x1(%rsi,%rsi,2), %ecx
pushq $0x8
popq %r9
cmovnel %ecx, %r9d
jmp 0x36b96c
pushq $0x1
popq %r9
movl 0xfc(%r15,%rdx), %ecx
movl 0x100(%r15,%rdx), %edx
testl %edx, %edx
cmovnel %edx, %r9d
leal (,%r9,4), %r8d
movq (%rbx), %rdx
movq %rdx, 0x8(%rsp)
movq 0x8(%rbp), %r10
testl %eax, %eax
movq %r9, 0x1e8(%rsp)
je 0x36b9ab
movl %r13d, %eax
cltd
idivl %r9d
jmp 0x36b9b5
movl %r12d, %eax
cltd
idivl %r9d
movl %r13d, %r12d
testl %ecx, %ecx
je 0x36b9d8
subq $0x8, %rsp
pushq $0x1
popq %rdx
movq 0x10(%rsp), %rbx
movq %rbx, %rdi
movl %r12d, %esi
movl %eax, %ecx
pushq %r10
callq 0x628f2
popq %rax
popq %rcx
jmp 0x36b9f3
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %r12d, %esi
movl %eax, %edx
movq %r8, %rcx
movl %r9d, %r8d
movq %r10, %r9
callq 0x627de
pushq $-0x64
popq %r14
cmpq $0x0, (%rbx)
je 0x36d74b
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x36d74b
movl 0x8(%r15), %ecx
testl %ecx, %ecx
movl 0x4(%rbp), %edx
movl %ecx, %r13d
cmovel %edx, %r13d
sete %al
cmpl %ecx, %edx
sete %sil
orb %al, %sil
je 0x36cd4d
movq (%r15), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xe0(%r15,%rax)
movl 0xe4(%r15,%rax), %edx
movq 0x28(%rsp), %rsi
je 0x36bfb4
movl 0xec(%r15,%rax), %r12d
movl 0x108(%r15,%rax), %ecx
movl %ecx, 0x14(%rsp)
movl 0x10c(%r15,%rax), %ecx
movl 0x110(%r15,%rax), %r8d
movl 0x114(%r15,%rax), %r9d
testl %edx, %edx
movq %r12, 0xd8(%rsp)
movq %r13, (%rsp)
je 0x36c015
movl 0xf0(%r15,%rax), %esi
movl 0xf4(%r15,%rax), %ebx
leaq 0x3c(%rsp), %rax
leaq 0x290(%rsp), %r10
leaq 0x240(%rsp), %r11
movl %r12d, %edi
movl %esi, 0x70(%rsp)
movl %ebx, %edx
pushq %r13
pushq %rax
pushq %r10
pushq %r11
callq 0x368a58
addq $0x20, %rsp
movl 0x240(%rsp), %ecx
leal (%r12,%rcx), %eax
decl %eax
cltd
movq %rcx, 0x130(%rsp)
idivl %ecx
movl %eax, %r13d
andq $0x0, 0x180(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x140(%rsp)
vmovups %xmm0, 0x14c(%rsp)
vmovaps %xmm0, 0x160(%rsp)
vmovups %xmm0, 0x16c(%rsp)
movl 0x3c(%rsp), %eax
movl %ebx, 0x78(%rsp)
movq %rax, 0x138(%rsp)
cmpl %ebx, %eax
setl %al
cmpl $0x3, 0x50(%rsp)
sete %cl
orb %al, %cl
cmpl $0x0, 0x14(%rsp)
setne %al
orb %cl, %al
movl 0x290(%rsp), %ecx
movl %ecx, 0x28(%rsp)
movb %al, 0x238(%rsp)
cmpb $0x1, %al
jne 0x36bb89
movl 0x28(%rsp), %esi
imull 0x130(%rsp), %esi
movq 0x10(%rbp), %r9
leaq 0x140(%rsp), %rdi
pushq $0x1
popq %rdx
pushq $0x4
popq %r8
movq (%rsp), %rcx
callq 0x63810
cmpl $0x0, 0x14(%rsp)
sete 0x30(%rsp)
xorl %eax, %eax
testl %r13d, %r13d
cmovlel %eax, %r13d
movq %r13, 0x1e0(%rsp)
xorl %r13d, %r13d
movl 0x78(%rsp), %ebp
cmpq 0x1e0(%rsp), %r13
je 0x36bf6e
movq %r13, %rcx
movq 0x130(%rsp), %rax
imulq %rax, %rcx
movl %r12d, %edx
movq %rcx, (%rsp)
subl %ecx, %edx
cmpl %edx, %eax
cmovll %eax, %edx
movq %rdx, 0x68(%rsp)
andq $0x0, 0x120(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovups %xmm0, 0xec(%rsp)
leaq 0x100(%rsp), %rax
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
cmpb $0x0, 0x238(%rsp)
je 0x36bced
callq 0x7357d
movl %eax, %ecx
movl 0x168(%rsp), %edi
movslq 0x170(%rsp), %rsi
movslq 0x16c(%rsp), %r8
movq %r8, %r10
imulq %rsi, %r10
movq 0x150(%rsp), %r9
movq %r10, %rax
imulq %r9, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r9
cmpl $0x4, %edi
cmoveq %r10, %rax
decl %edi
movq 0x160(%rsp), %rdx
movl 0x158(%rsp), %r10d
movslq %ecx, %rcx
imulq %r9, %rcx
imulq 0x180(%rsp), %rcx
addq 0x140(%rsp), %rcx
movl 0x174(%rsp), %r11d
movq %rcx, 0xe0(%rsp)
andq $0x0, 0xe8(%rsp)
movq %r9, 0xf0(%rsp)
movl %r10d, 0xf8(%rsp)
movq %rdx, 0x100(%rsp)
movl %edi, 0x108(%rsp)
movl %r8d, 0x10c(%rsp)
movl %esi, 0x110(%rsp)
movl $0x1, 0x114(%rsp)
movl %r11d, 0x118(%rsp)
movq %rax, 0x120(%rsp)
xorl %r14d, %r14d
movq %r13, 0x80(%rsp)
movl 0x70(%rsp), %eax
movl %eax, %r15d
subl %r14d, %r15d
jle 0x36bf59
movl 0x28(%rsp), %eax
cmpl %r15d, %eax
cmovll %eax, %r15d
leaq 0x90(%rsp), %rax
movq %rax, 0x18(%rsp)
cmpl $0x3, 0x50(%rsp)
jne 0x36bd53
leaq 0x90(%rsp), %rdi
leaq 0xe0(%rsp), %rbx
movq %rbx, %rsi
movq (%rsp), %rdx
movq 0x68(%rsp), %rcx
movl %r14d, %r8d
movl %r15d, %r9d
callq 0x369291
movq %rbx, 0x18(%rsp)
movq %r15, 0x20(%rsp)
movq %r14, 0x40(%rsp)
xorl %r10d, %r10d
movl %ebp, %r11d
cmpl %ebp, %r10d
jge 0x36bf20
movq 0x138(%rsp), %r14
movl %r14d, %ebx
subl %r11d, %ebx
cmovll %r14d, %r11d
movq 0x60(%rsp), %rbp
movq 0x50(%rbp), %rsi
imulq %r13, %rsi
movq 0x20(%rbp), %rdi
imulq %rdi, %rsi
addq 0x10(%rbp), %rsi
movslq 0x3c(%rbp), %r8
movl 0x28(%rbp), %r9d
movq 0x30(%rbp), %r13
movl %r10d, %eax
cltd
idivl %r14d
movslq %eax, %rcx
movq %rdi, %rax
imulq %r8, %rax
imulq %rcx, %rax
addq %rsi, %rax
movq %rax, 0x190(%rsp)
andq $0x0, 0x198(%rsp)
movq %rdi, 0x1a0(%rsp)
movl %r9d, 0x1a8(%rsp)
movq %r13, 0x1b0(%rsp)
pushq $0x2
popq %r9
movl %r9d, 0x1b8(%rsp)
movl %r8d, 0x1bc(%rsp)
movabsq $0x100000001, %r13 # imm = 0x100000001
movq %r13, 0x1c0(%rsp)
pushq $0x1
popq %r12
movl %r12d, 0x1c8(%rsp)
movq %r8, 0x1d0(%rsp)
movq 0x40(%rsp), %r15
movl %r15d, %eax
cltd
idivl 0x28(%rsp)
movslq 0x84(%rbp), %rdx
cltq
imulq 0x98(%rbp), %rax
movq 0x68(%rbp), %rsi
imulq %rsi, %rax
addq 0x58(%rbp), %rax
movq %rdx, %rdi
imulq %rsi, %rdi
imulq %rcx, %rdi
movl 0x70(%rbp), %ecx
addq %rax, %rdi
movq 0x78(%rbp), %rax
movl 0x78(%rsp), %ebp
movq %rdi, 0x1f0(%rsp)
andq $0x0, 0x1f8(%rsp)
movq %rsi, 0x200(%rsp)
movl %ecx, 0x208(%rsp)
negl %ebx
movq %rax, 0x210(%rsp)
movl %r9d, 0x218(%rsp)
movl %edx, 0x21c(%rsp)
movq %r13, 0x220(%rsp)
movl %r12d, 0x228(%rsp)
movq %rdx, 0x230(%rsp)
leal (%r10,%r14), %r13d
cmpl %ebp, %r13d
setge %al
andb 0x30(%rsp), %al
movzbl %al, %eax
subq $0x8, %rsp
leaq 0x198(%rsp), %rdi
leaq 0x1f8(%rsp), %rsi
movq 0x20(%rsp), %rdx
leaq 0xe8(%rsp), %rcx
movq 0x10(%rsp), %r8
movl 0x58(%rsp), %r9d
pushq %rax
pushq %r11
pushq %r10
pushq 0x40(%rsp)
pushq %r15
pushq 0x98(%rsp)
pushq 0x38(%rsp)
callq 0x36e020
addq $0x40, %rsp
movl %ebx, %r11d
movl %r13d, %r10d
movq 0x80(%rsp), %r13
jmp 0x36bd63
cmpl $0x0, 0x14(%rsp)
movq 0x40(%rsp), %r14
je 0x36bf4f
leaq 0xe0(%rsp), %rdi
movq 0x8(%rsp), %rsi
movq (%rsp), %rdx
movq 0x68(%rsp), %rcx
movl %r14d, %r8d
movq 0x20(%rsp), %r9
callq 0x370761
addl 0x28(%rsp), %r14d
jmp 0x36bcf8
incq %r13
movq 0x60(%rsp), %r15
movq 0xd8(%rsp), %r12
jmp 0x36bbab
movq 0x148(%rsp), %rax
testq %rax, %rax
movq 0x1e8(%rsp), %rbx
je 0x36d6e8
lock
decl (%rax)
jne 0x36d6e8
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x36d5c3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36d6e8
movq (%rsi), %rcx
movq %rcx, 0x30(%rsp)
testl %edx, %edx
je 0x36c056
movl 0xd8(%r15,%rax), %edx
movl 0xf0(%r15,%rax), %ecx
movl %ecx, 0x70(%rsp)
movl 0xf4(%r15,%rax), %r14d
movl 0x10c(%r15,%rax), %ecx
movl 0x110(%r15,%rax), %r8d
movl 0x114(%r15,%rax), %r9d
movl %edx, 0x138(%rsp)
testl %edx, %edx
je 0x36c095
movq 0x30(%rsp), %rdx
movl 0x2c(%rdx), %r15d
jmp 0x36c0ad
movq (%rsi), %rdx
movl 0xdc(%r15,%rax), %esi
movl 0xf4(%r15,%rax), %eax
movq %rax, 0x28(%rsp)
movl %esi, %r13d
testl %esi, %esi
movq %rbp, 0x20(%rsp)
je 0x36c6ab
xorl %eax, %eax
cmpl $0x3, 0x28(%rdx)
movl 0x18(%rdx), %esi
sete %al
movq %rdx, %rbx
imull 0x30(%rdx,%rax,8), %esi
jmp 0x36c6b1
movl 0xd8(%r15,%rax), %ecx
movl 0xdc(%r15,%rax), %edx
movl %edx, 0x28(%rsp)
movl %ecx, 0x14(%rsp)
testl %ecx, %ecx
je 0x36cd75
movq 0x30(%rsp), %rdx
movl 0x2c(%rdx), %r14d
xorl %ecx, %ecx
cmpl $0x3, 0x28(%rdx)
movl 0x18(%rdx), %ebx
sete %cl
imull 0x30(%rdx,%rcx,8), %ebx
jmp 0x36cd90
xorl %edx, %edx
movq 0x30(%rsp), %rsi
cmpl $0x3, 0x28(%rsi)
movl 0x18(%rsi), %r15d
sete %dl
imull 0x30(%rsi,%rdx,8), %r15d
movq 0x60(%rsp), %rdx
movl 0x108(%rdx,%rax), %eax
movl %eax, 0x14(%rsp)
leaq 0x8c(%rsp), %rax
leaq 0x3c(%rsp), %r10
leaq 0x290(%rsp), %r11
movl %r15d, %edi
movl 0x70(%rsp), %esi
movl %r14d, %edx
pushq %r13
pushq %rax
pushq %r10
pushq %r11
callq 0x368a58
addq $0x20, %rsp
movl 0x290(%rsp), %ecx
leal (%r15,%rcx), %eax
decl %eax
cltd
idivl %ecx
movl %eax, %ebx
movl 0x8c(%rsp), %esi
leal (%r14,%rsi), %eax
decl %eax
cltd
idivl %esi
movq 0x10(%rbp), %r9
leaq 0x140(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movq %rsi, 0x18(%rsp)
movq %rcx, 0x130(%rsp)
imull %ecx, %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
pushq $0x4
popq %r8
movl %eax, %edx
movl %r13d, %ecx
callq 0x63810
andq $0x0, 0x120(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovups %xmm0, 0xec(%rsp)
vmovaps %xmm0, 0x100(%rsp)
vmovups %xmm0, 0x10c(%rsp)
cmpl $0x0, 0x14(%rsp)
jne 0x36c19a
cmpl $0x3, 0x50(%rsp)
je 0x36c19a
cmpl %r14d, 0x18(%rsp)
jge 0x36c1c1
movl 0x3c(%rsp), %esi
imull 0x130(%rsp), %esi
movq 0x10(%rbp), %r9
leaq 0xe0(%rsp), %rdi
pushq $0x1
popq %rdx
pushq $0x4
popq %r8
movl %r13d, %ecx
callq 0x63810
cmpl $0x0, 0x14(%rsp)
sete 0x78(%rsp)
xorl %eax, %eax
testl %ebx, %ebx
cmovlel %eax, %ebx
movl %ebx, 0x1e0(%rsp)
movq 0x60(%rsp), %r15
movq 0x1e8(%rsp), %rbx
cmpl 0x1e0(%rsp), %eax
je 0x36c66d
movq %rax, 0xd8(%rsp)
cmpl $0x0, 0x138(%rsp)
je 0x36c222
movq 0x30(%rsp), %rcx
movl 0x2c(%rcx), %ebp
xorl %eax, %eax
cmpl $0x3, 0x28(%rcx)
movl 0x18(%rcx), %r12d
sete %al
imull 0x30(%rcx,%rax,8), %r12d
jmp 0x36c23c
movq 0x30(%rsp), %rcx
movl 0x18(%rcx), %ebp
xorl %eax, %eax
cmpl $0x3, 0x28(%rcx)
sete %al
imull 0x30(%rcx,%rax,8), %ebp
movl 0x2c(%rcx), %r12d
movq 0xd8(%rsp), %rax
movl %eax, %ecx
movq 0x130(%rsp), %rax
imull %eax, %ecx
movq %rcx, 0x20(%rsp)
subl %ecx, %ebp
cmpl %ebp, %eax
cmovll %eax, %ebp
andq $0x0, 0x1d0(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x190(%rsp)
vmovups %xmm0, 0x19c(%rsp)
leaq 0x1b0(%rsp), %rax
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
cmpl $0x0, 0x14(%rsp)
jne 0x36c2a6
cmpl $0x3, 0x50(%rsp)
je 0x36c2a6
cmpl 0x18(%rsp), %r12d
jle 0x36c37a
callq 0x7357d
movl %eax, %ecx
movl 0x108(%rsp), %edi
movslq 0x110(%rsp), %rsi
movslq 0x10c(%rsp), %r8
movq %r8, %r10
imulq %rsi, %r10
movq 0xf0(%rsp), %r9
movq %r10, %rax
imulq %r9, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r9
cmpl $0x4, %edi
cmoveq %r10, %rax
decl %edi
movq 0x100(%rsp), %rdx
movl 0xf8(%rsp), %r10d
movslq %ecx, %rcx
imulq %r9, %rcx
imulq 0x120(%rsp), %rcx
addq 0xe0(%rsp), %rcx
movl 0x114(%rsp), %r11d
movq %rcx, 0x190(%rsp)
andq $0x0, 0x198(%rsp)
movq %r9, 0x1a0(%rsp)
movl %r10d, 0x1a8(%rsp)
movq %rdx, 0x1b0(%rsp)
movl %edi, 0x1b8(%rsp)
movl %r8d, 0x1bc(%rsp)
movl %esi, 0x1c0(%rsp)
movl $0x1, 0x1c4(%rsp)
movl %r11d, 0x1c8(%rsp)
movq %rax, 0x1d0(%rsp)
xorl %r15d, %r15d
movl 0x3c(%rsp), %eax
movl %eax, (%rsp)
movl 0x70(%rsp), %eax
movl %eax, %r13d
subl %r15d, %r13d
jle 0x36c659
movl (%rsp), %eax
cmpl %r13d, %eax
cmovll %eax, %r13d
leaq 0x90(%rsp), %rax
movq %rax, 0x80(%rsp)
cmpl $0x3, 0x50(%rsp)
jne 0x36c3e2
leaq 0x90(%rsp), %rdi
leaq 0x190(%rsp), %rbx
movq %rbx, %rsi
movq 0x20(%rsp), %rdx
movl %ebp, %ecx
movl %r15d, %r8d
movl %r13d, %r9d
callq 0x369291
movq %rbx, 0x80(%rsp)
movq %r13, 0x68(%rsp)
movq %r15, 0x40(%rsp)
xorl %r13d, %r13d
movl %r12d, %eax
cmpl %r12d, %r13d
movq 0x18(%rsp), %r15
jge 0x36c61b
cmpl %eax, %r15d
movl %eax, 0x28(%rsp)
movl %eax, %r14d
cmovll %r15d, %r14d
callq 0x7357d
movslq %eax, %rsi
imulq 0x180(%rsp), %rsi
movq 0x150(%rsp), %rdi
imulq %rdi, %rsi
addq 0x140(%rsp), %rsi
movslq 0x16c(%rsp), %r8
movl 0x158(%rsp), %r9d
movq 0x160(%rsp), %r10
movl %r13d, %eax
cltd
idivl %r15d
movslq %eax, %rcx
movq %rdi, %rax
imulq %r8, %rax
imulq %rcx, %rax
addq %rsi, %rax
movq %rax, 0x1f0(%rsp)
andq $0x0, 0x1f8(%rsp)
movq %rdi, 0x200(%rsp)
movl %r9d, 0x208(%rsp)
movq %r10, 0x210(%rsp)
pushq $0x2
popq %r10
movl %r10d, 0x218(%rsp)
movl %r8d, 0x21c(%rsp)
movabsq $0x100000001, %r11 # imm = 0x100000001
movq %r11, 0x220(%rsp)
pushq $0x1
popq %rbx
movl %ebx, 0x228(%rsp)
movq %r8, 0x230(%rsp)
movq 0x40(%rsp), %r10
movl %r10d, %eax
cltd
idivl (%rsp)
cltq
movq 0x60(%rsp), %r8
imulq 0x98(%r8), %rax
movq 0x68(%r8), %rdx
imulq %rdx, %rax
addq 0x58(%r8), %rax
movslq 0x84(%r8), %rsi
movl 0x70(%r8), %edi
movq 0x78(%r8), %r8
movq %rsi, %r9
imulq %rdx, %r9
andq $0x0, 0x248(%rsp)
imulq %rcx, %r9
addq %rax, %r9
movq %r9, 0x240(%rsp)
movq %rdx, 0x250(%rsp)
movl %edi, 0x258(%rsp)
movq %r8, 0x260(%rsp)
pushq $0x2
popq %rax
movl %eax, 0x268(%rsp)
movl %esi, 0x26c(%rsp)
movq %r11, 0x270(%rsp)
movl %ebx, 0x278(%rsp)
movq %rsi, 0x280(%rsp)
testl %r10d, %r10d
jne 0x36c5b1
cmpl $0x0, 0x138(%rsp)
je 0x36c588
movq 0x30(%rsp), %rdi
leaq 0x1f0(%rsp), %rsi
movq 0x20(%rsp), %rdx
movl %ebp, %ecx
movl %r13d, %r8d
movl %r14d, %r9d
callq 0x368c66
jmp 0x36c5a7
movq 0x30(%rsp), %rdi
leaq 0x1f0(%rsp), %rsi
movq 0x20(%rsp), %rdx
movl %ebp, %ecx
movl %r13d, %r8d
movl %r14d, %r9d
callq 0x369291
movq 0x18(%rsp), %r15
movq 0x40(%rsp), %r10
leal (%r15,%r13), %ebx
cmpl %r12d, %ebx
setge %al
andb 0x78(%rsp), %al
subq $0x8, %rsp
movzbl %al, %eax
leaq 0x1f8(%rsp), %rdi
leaq 0x248(%rsp), %rsi
movq 0x88(%rsp), %rdx
leaq 0x198(%rsp), %rcx
movq 0x10(%rsp), %r8
movl 0x58(%rsp), %r9d
pushq %rax
pushq %r14
pushq %r13
pushq 0x88(%rsp)
pushq %r10
pushq %rbp
pushq 0x58(%rsp)
callq 0x36e020
addq $0x40, %rsp
movl 0x28(%rsp), %eax
subl %r15d, %eax
movl %ebx, %r13d
jmp 0x36c3f2
cmpl $0x0, 0x14(%rsp)
movq 0x1e8(%rsp), %rbx
movq 0x40(%rsp), %r15
je 0x36c650
leaq 0x190(%rsp), %rdi
movq 0x8(%rsp), %rsi
movq 0x20(%rsp), %rdx
movl %ebp, %ecx
movl %r15d, %r8d
movq 0x68(%rsp), %r9
callq 0x370761
addl (%rsp), %r15d
jmp 0x36c384
movq 0xd8(%rsp), %rax
incl %eax
movq 0x60(%rsp), %r15
jmp 0x36c1e6
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x36d62e
lock
decl (%rax)
jne 0x36d62e
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
je 0x36d626
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36d62e
movq %rdx, %rbx
movl 0x2c(%rdx), %esi
movq %rsi, 0x78(%rsp)
leaq 0x8c(%rsp), %rax
leaq 0x3c(%rsp), %r10
leaq 0x290(%rsp), %r11
movl %r12d, %edi
movq 0x28(%rsp), %r15
movl %r15d, %edx
pushq (%rsp)
pushq %rax
pushq %r10
pushq %r11
callq 0x368a58
addq $0x20, %rsp
movl 0x290(%rsp), %ecx
leal (%r12,%rcx), %eax
decl %eax
cltd
movq %rcx, 0x138(%rsp)
idivl %ecx
movq %rax, 0x30(%rsp)
movl 0x3c(%rsp), %ecx
movq 0x78(%rsp), %rax
addl %ecx, %eax
decl %eax
cltd
idivl %ecx
movl %eax, %ebp
movl 0x8c(%rsp), %r12d
leal (%r15,%r12), %eax
decl %eax
cltd
idivl %r12d
movl %eax, %r15d
movq 0x20(%rsp), %rax
movq 0x10(%rax), %r9
leaq 0x140(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl %r12d, %esi
movq %rcx, 0x40(%rsp)
imull %ecx, %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
pushq $0x4
popq %r8
movl %r15d, %edx
movl %ebp, %ecx
callq 0x63810
imull %r15d, %ebp
xorl %r14d, %r14d
testl %ebp, %ebp
cmovlel %r14d, %ebp
cmpl %r14d, %ebp
je 0x36c88f
movl %r14d, %eax
cltd
idivl %r15d
movslq %eax, %rsi
movq 0x40(%rsp), %rdi
imull %edi, %eax
movl %edx, %r8d
imull %r12d, %r8d
movq 0x78(%rsp), %rcx
subl %eax, %ecx
cmpl %ecx, %edi
cmovll %edi, %ecx
movq 0x28(%rsp), %rdi
movl %edi, %r9d
subl %r8d, %r9d
cmpl %r9d, %r12d
cmovll %r12d, %r9d
movslq 0x16c(%rsp), %rdi
imulq 0x180(%rsp), %rsi
movq 0x150(%rsp), %r10
imulq %r10, %rsi
addq 0x140(%rsp), %rsi
imulq %rdi, %rdx
imulq %r10, %rdx
addq %rsi, %rdx
andq $0x0, 0xe8(%rsp)
movq %rdx, 0xe0(%rsp)
movq %r10, 0xf0(%rsp)
movl 0x158(%rsp), %edx
movl %edx, 0xf8(%rsp)
movq 0x160(%rsp), %rdx
movq %rdx, 0x100(%rsp)
movl $0x2, 0x108(%rsp)
movl %edi, 0x10c(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x110(%rsp)
movl $0x1, 0x118(%rsp)
movq %rdi, 0x120(%rsp)
testl %r13d, %r13d
je 0x36c875
movq %rbx, %rdi
leaq 0xe0(%rsp), %rsi
movl %eax, %edx
callq 0x369a61
jmp 0x36c887
movq %rbx, %rdi
leaq 0xe0(%rsp), %rsi
movl %eax, %edx
callq 0x36aa25
incl %r14d
jmp 0x36c77e
andq $0x0, 0x120(%rsp)
cmpl 0x28(%rsp), %r12d
setl %al
cmpl $0x3, 0x50(%rsp)
sete %cl
orb %al, %cl
cmpl $0x0, 0x14(%rsp)
setne %al
orb %cl, %al
movb %al, 0x70(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovups %xmm0, 0xec(%rsp)
vmovaps %xmm0, 0x100(%rsp)
vmovups %xmm0, 0x10c(%rsp)
je 0x36c912
movq 0x40(%rsp), %rax
movl %eax, %esi
imull 0x138(%rsp), %esi
movq 0x20(%rsp), %rax
movq 0x10(%rax), %r9
leaq 0xe0(%rsp), %rdi
pushq $0x1
popq %rdx
pushq $0x4
popq %r8
movq (%rsp), %rcx
callq 0x63810
cmpl $0x0, 0x14(%rsp)
sete 0x80(%rsp)
xorl %r14d, %r14d
movq 0x30(%rsp), %rax
testl %eax, %eax
cmovlel %r14d, %eax
movq %rax, 0x30(%rsp)
movq 0x60(%rsp), %r15
movq 0x1e8(%rsp), %rbx
movq 0x28(%rsp), %r13
cmpq 0x30(%rsp), %r14
je 0x36ccf2
movq %r14, %rdx
movq 0x138(%rsp), %rcx
imulq %rcx, %rdx
movq 0xd8(%rsp), %rax
movq %rdx, 0x18(%rsp)
subl %edx, %eax
cmpl %eax, %ecx
cmovll %ecx, %eax
movq %rax, (%rsp)
andq $0x0, 0x1d0(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x190(%rsp)
vmovups %xmm0, 0x19c(%rsp)
leaq 0x1b0(%rsp), %rax
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
cmpb $0x0, 0x70(%rsp)
je 0x36ca85
callq 0x7357d
movl %eax, %ecx
movl 0x108(%rsp), %edi
movslq 0x110(%rsp), %rsi
movslq 0x10c(%rsp), %r8
movq %r8, %r10
imulq %rsi, %r10
movq 0xf0(%rsp), %r9
movq %r10, %rax
imulq %r9, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r9
cmpl $0x4, %edi
cmoveq %r10, %rax
decl %edi
movq 0x100(%rsp), %rdx
movl 0xf8(%rsp), %r10d
movslq %ecx, %rcx
imulq %r9, %rcx
imulq 0x120(%rsp), %rcx
addq 0xe0(%rsp), %rcx
movl 0x114(%rsp), %r11d
movq %rcx, 0x190(%rsp)
andq $0x0, 0x198(%rsp)
movq %r9, 0x1a0(%rsp)
movl %r10d, 0x1a8(%rsp)
movq %rdx, 0x1b0(%rsp)
movl %edi, 0x1b8(%rsp)
movl %r8d, 0x1bc(%rsp)
movl %esi, 0x1c0(%rsp)
movl $0x1, 0x1c4(%rsp)
movl %r11d, 0x1c8(%rsp)
movq %rax, 0x1d0(%rsp)
movq %r14, 0x68(%rsp)
xorl %ebp, %ebp
movq 0x78(%rsp), %r14
subl %ebp, %r14d
jle 0x36ccdd
movq 0x40(%rsp), %rax
cmpl %r14d, %eax
cmovll %eax, %r14d
leaq 0x90(%rsp), %r15
movq %r15, 0x20(%rsp)
cmpl $0x3, 0x50(%rsp)
jne 0x36cae6
leaq 0x90(%rsp), %rdi
leaq 0x190(%rsp), %r15
movq %r15, %rsi
movq 0x18(%rsp), %rdx
movq (%rsp), %rcx
movl %ebp, %r8d
movl %r14d, %r9d
callq 0x369291
movq %r15, 0x20(%rsp)
xorl %r10d, %r10d
movl %r13d, %r11d
cmpl %r13d, %r10d
jge 0x36cca7
movl %r12d, %r13d
subl %r11d, %r13d
cmovll %r12d, %r11d
movq 0x60(%rsp), %rax
movq 0x50(%rax), %rsi
imulq 0x68(%rsp), %rsi
movq 0x20(%rax), %rdi
imulq %rdi, %rsi
addq 0x10(%rax), %rsi
movslq 0x3c(%rax), %r8
movl 0x28(%rax), %r9d
movq 0x30(%rax), %rbx
movl %r10d, %eax
cltd
idivl %r12d
movslq %eax, %rcx
movq %rdi, %rax
imulq %r8, %rax
imulq %rcx, %rax
addq %rsi, %rax
movq %rax, 0x1f0(%rsp)
andq $0x0, 0x1f8(%rsp)
movq %rdi, 0x200(%rsp)
movl %r9d, 0x208(%rsp)
movq %rbx, 0x210(%rsp)
pushq $0x2
popq %r9
movl %r9d, 0x218(%rsp)
movl %r8d, 0x21c(%rsp)
movabsq $0x100000001, %rbx # imm = 0x100000001
movq %rbx, 0x220(%rsp)
pushq $0x1
popq %r15
movl %r15d, 0x228(%rsp)
movq %r8, 0x230(%rsp)
movl %ebp, %eax
cltd
idivl 0x40(%rsp)
movslq 0x16c(%rsp), %rdx
movq 0x150(%rsp), %rsi
movq %rdx, %rdi
imulq %rsi, %rdi
imulq %rcx, %rdi
cltq
imulq 0x180(%rsp), %rax
imulq %rsi, %rax
addq 0x140(%rsp), %rax
addq %rax, %rdi
movq %rdi, 0x240(%rsp)
andq $0x0, 0x248(%rsp)
negl %r13d
movq %rsi, 0x250(%rsp)
movl 0x158(%rsp), %eax
movl %eax, 0x258(%rsp)
movq 0x160(%rsp), %rax
movq %rax, 0x260(%rsp)
movl %r9d, 0x268(%rsp)
movl %edx, 0x26c(%rsp)
movq %rbx, 0x270(%rsp)
movl %r15d, 0x278(%rsp)
movq %rdx, 0x280(%rsp)
leal (%r10,%r12), %ebx
cmpl 0x28(%rsp), %ebx
setge %al
andb 0x80(%rsp), %al
movzbl %al, %eax
subq $0x8, %rsp
leaq 0x1f8(%rsp), %rdi
leaq 0x248(%rsp), %rsi
movq 0x28(%rsp), %rdx
leaq 0x198(%rsp), %rcx
movq 0x10(%rsp), %r8
movl 0x58(%rsp), %r9d
pushq %rax
pushq %r11
pushq %r10
pushq %r14
pushq %rbp
pushq 0x30(%rsp)
pushq 0x50(%rsp)
callq 0x36e020
addq $0x40, %rsp
movl %r13d, %r11d
movq 0x28(%rsp), %r13
movl %ebx, %r10d
jmp 0x36caec
cmpl $0x0, 0x14(%rsp)
je 0x36cccf
leaq 0x190(%rsp), %rdi
movq 0x8(%rsp), %rsi
movq 0x18(%rsp), %rdx
movq (%rsp), %rcx
movl %ebp, %r8d
movl %r14d, %r9d
callq 0x370761
addl 0x40(%rsp), %ebp
movq 0x60(%rsp), %r15
jmp 0x36ca8c
movq 0x68(%rsp), %r14
incq %r14
movq 0x1e8(%rsp), %rbx
jmp 0x36c944
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x36d5e7
lock
decl (%rax)
jne 0x36d5e7
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
je 0x36d5df
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36d5e7
xorl %r14d, %r14d
jmp 0x36d7b4
xorl %ecx, %ecx
xorl %edx, %edx
xorl %ebp, %ebp
xorl %edi, %edi
xorl %r15d, %r15d
xorl %r14d, %r14d
xorl %esi, %esi
jmp 0x36d967
movq 0x123284(%rip), %rbx # 0x48ffd8
movq (%rbx), %rdi
leaq 0x8e57a(%rip), %rsi # 0x3fb2d8
xorl %eax, %eax
callq 0x5f150
movq (%rbx), %rsi
pushq $0xa
popq %rdi
callq 0x5f1c0
jmp 0x36ba35
movq 0x30(%rsp), %rdx
movl 0x18(%rdx), %r14d
xorl %ecx, %ecx
cmpl $0x3, 0x28(%rdx)
sete %cl
imull 0x30(%rdx,%rcx,8), %r14d
movl 0x2c(%rdx), %ebx
movl 0x10c(%r15,%rax), %ecx
movl 0x110(%r15,%rax), %r8d
movl 0x114(%r15,%rax), %r9d
cmpl $0x0, 0x28(%rsp)
je 0x36cdc7
xorl %edx, %edx
movq 0x30(%rsp), %rdi
cmpl $0x3, 0x70(%rdi)
movl 0x60(%rdi), %esi
sete %dl
imull 0x78(%rdi,%rdx,8), %esi
jmp 0x36cdcf
movq 0x30(%rsp), %rdx
movl 0x74(%rdx), %esi
movl 0x108(%r15,%rax), %eax
movl %eax, 0xd8(%rsp)
leaq 0x28c(%rsp), %rax
leaq 0x8c(%rsp), %r10
leaq 0x3c(%rsp), %r11
movl %r14d, %edi
movq %rsi, 0x70(%rsp)
movq %rbx, %r15
movl %r15d, %edx
pushq %r13
pushq %rax
pushq %r10
pushq %r11
callq 0x368a58
addq $0x20, %rsp
movq %rbp, %r12
movq %r13, %rcx
movl 0x3c(%rsp), %r8d
leal (%r14,%r8), %eax
decl %eax
cltd
idivl %r8d
movl %eax, 0x130(%rsp)
movl 0x8c(%rsp), %r14d
movl 0x28c(%rsp), %r13d
leal (%r15,%r13), %eax
decl %eax
cltd
idivl %r13d
movl %eax, %ebp
movq 0x10(%r12), %r9
leaq 0x140(%rsp), %rdi
andq $0x0, 0x40(%rdi)
movl %r13d, %esi
movq %r8, 0x238(%rsp)
imull %r8d, %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
pushq $0x4
popq %r8
movl %eax, %edx
movq %rcx, %r15
callq 0x63810
movq %r15, %rax
movq %rax, (%rsp)
movl %r13d, %esi
imull %r14d, %esi
movq 0x70(%rsp), %rax
addl %r14d, %eax
decl %eax
cltd
movq %r14, 0x80(%rsp)
idivl %r14d
movl %eax, %r15d
movq %r12, 0x20(%rsp)
movq 0x10(%r12), %r9
leaq 0xe0(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
pushq $0x4
popq %r8
movl %ebp, %edx
movl %eax, %ecx
callq 0x63810
movq 0x30(%rsp), %rax
leaq 0x48(%rax), %r12
imull %ebp, %r15d
xorl %r14d, %r14d
testl %r15d, %r15d
cmovlel %r14d, %r15d
cmpl %r14d, %r15d
je 0x36d018
movl %r14d, %eax
cltd
idivl %ebp
movslq %eax, %rsi
movq 0x80(%rsp), %rdi
imull %edi, %eax
movl %edx, %r8d
imull %r13d, %r8d
movq 0x70(%rsp), %rcx
subl %eax, %ecx
cmpl %ecx, %edi
cmovll %edi, %ecx
movl %ebx, %r9d
subl %r8d, %r9d
cmpl %r9d, %r13d
cmovll %r13d, %r9d
movslq 0x10c(%rsp), %rdi
imulq 0x120(%rsp), %rsi
movq 0xf0(%rsp), %r10
imulq %r10, %rsi
addq 0xe0(%rsp), %rsi
imulq %rdi, %rdx
imulq %r10, %rdx
addq %rsi, %rdx
andq $0x0, 0x198(%rsp)
movq %rdx, 0x190(%rsp)
movq %r10, 0x1a0(%rsp)
movl 0xf8(%rsp), %edx
movl %edx, 0x1a8(%rsp)
movq 0x100(%rsp), %rdx
movq %rdx, 0x1b0(%rsp)
movl $0x2, 0x1b8(%rsp)
movl %edi, 0x1bc(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x1c0(%rsp)
movl $0x1, 0x1c8(%rsp)
movq %rdi, 0x1d0(%rsp)
cmpl $0x0, 0x28(%rsp)
je 0x36cffe
movq %r12, %rdi
leaq 0x190(%rsp), %rsi
movl %eax, %edx
callq 0x369a61
jmp 0x36d010
movq %r12, %rdi
leaq 0x190(%rsp), %rsi
movl %eax, %edx
callq 0x36aa25
incl %r14d
jmp 0x36cf08
andq $0x0, 0x1d0(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x190(%rsp)
vmovups %xmm0, 0x19c(%rsp)
vmovaps %xmm0, 0x1b0(%rsp)
vmovups %xmm0, 0x1bc(%rsp)
cmpl $0x0, 0xd8(%rsp)
movq 0x80(%rsp), %r12
jne 0x36d067
cmpl $0x3, 0x50(%rsp)
je 0x36d067
cmpl %r13d, %ebx
jle 0x36d093
movl %r12d, %esi
imull 0x238(%rsp), %esi
movq 0x20(%rsp), %rax
movq 0x10(%rax), %r9
leaq 0x190(%rsp), %rdi
pushq $0x1
popq %rdx
pushq $0x4
popq %r8
movq (%rsp), %rcx
callq 0x63810
cmpl $0x0, 0xd8(%rsp)
sete 0x138(%rsp)
xorl %ecx, %ecx
movl 0x130(%rsp), %eax
testl %eax, %eax
cmovlel %ecx, %eax
movl %eax, 0x130(%rsp)
movq 0x60(%rsp), %r15
movq 0x1e8(%rsp), %rbx
movq 0x70(%rsp), %r14
movq %r13, 0x78(%rsp)
movq %rcx, 0x1e0(%rsp)
cmpl 0x130(%rsp), %ecx
je 0x36d577
cmpl $0x0, 0x14(%rsp)
je 0x36d10a
movq 0x30(%rsp), %rcx
movl 0x2c(%rcx), %ebp
xorl %eax, %eax
cmpl $0x3, 0x28(%rcx)
movl 0x18(%rcx), %edx
sete %al
imull 0x30(%rcx,%rax,8), %edx
movl %edx, 0x68(%rsp)
jmp 0x36d127
movq 0x30(%rsp), %rcx
movl 0x18(%rcx), %ebp
xorl %eax, %eax
cmpl $0x3, 0x28(%rcx)
sete %al
imull 0x30(%rcx,%rax,8), %ebp
movl 0x2c(%rcx), %eax
movl %eax, 0x68(%rsp)
movq 0x1e0(%rsp), %rax
movl %eax, %ecx
movq 0x238(%rsp), %rax
imull %eax, %ecx
movq %rcx, 0x40(%rsp)
subl %ecx, %ebp
cmpl %ebp, %eax
cmovll %eax, %ebp
andq $0x0, 0x230(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovups %xmm0, 0x1fc(%rsp)
leaq 0x210(%rsp), %rax
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
cmpl $0x0, 0xd8(%rsp)
jne 0x36d194
cmpl $0x3, 0x50(%rsp)
je 0x36d194
cmpl %r13d, 0x68(%rsp)
jle 0x36d268
callq 0x7357d
movl %eax, %ecx
movl 0x1b8(%rsp), %edi
movslq 0x1c0(%rsp), %rsi
movslq 0x1bc(%rsp), %r8
movq %r8, %r10
imulq %rsi, %r10
movq 0x1a0(%rsp), %r9
movq %r10, %rax
imulq %r9, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r9
cmpl $0x4, %edi
cmoveq %r10, %rax
decl %edi
movq 0x1b0(%rsp), %rdx
movl 0x1a8(%rsp), %r10d
movslq %ecx, %rcx
imulq %r9, %rcx
imulq 0x1d0(%rsp), %rcx
addq 0x190(%rsp), %rcx
movl 0x1c4(%rsp), %r11d
movq %rcx, 0x1f0(%rsp)
andq $0x0, 0x1f8(%rsp)
movq %r9, 0x200(%rsp)
movl %r10d, 0x208(%rsp)
movq %rdx, 0x210(%rsp)
movl %edi, 0x218(%rsp)
movl %r8d, 0x21c(%rsp)
movl %esi, 0x220(%rsp)
movl $0x1, 0x224(%rsp)
movl %r11d, 0x228(%rsp)
movq %rax, 0x230(%rsp)
xorl %r15d, %r15d
movq %rbp, 0x20(%rsp)
movl %r14d, %eax
subl %r15d, %eax
jle 0x36d563
cmpl %eax, %r12d
cmovll %r12d, %eax
movq %rax, 0x18(%rsp)
leaq 0x90(%rsp), %rax
movq %rax, (%rsp)
cmpl $0x3, 0x50(%rsp)
jne 0x36d2c6
leaq 0x90(%rsp), %rdi
leaq 0x1f0(%rsp), %rbx
movq %rbx, %rsi
movq 0x40(%rsp), %rdx
movl %ebp, %ecx
movl %r15d, %r8d
movq 0x18(%rsp), %r9
callq 0x369291
movq %rbx, (%rsp)
movq %r15, 0x28(%rsp)
xorl %r14d, %r14d
movl 0x68(%rsp), %r13d
movl %r13d, %ebp
cmpl %r13d, %r14d
jge 0x36d50c
movq 0x78(%rsp), %r15
cmpl %ebp, %r15d
movl %ebp, %r12d
cmovll %r15d, %r12d
callq 0x7357d
movq 0x28(%rsp), %rbx
movslq %eax, %rsi
imulq 0x180(%rsp), %rsi
movq 0x150(%rsp), %rdi
imulq %rdi, %rsi
addq 0x140(%rsp), %rsi
movslq 0x16c(%rsp), %r8
movl 0x158(%rsp), %r9d
movq 0x160(%rsp), %r10
movl %r14d, %eax
cltd
idivl %r15d
movslq %eax, %rcx
movq %rdi, %rax
imulq %r8, %rax
imulq %rcx, %rax
addq %rsi, %rax
movq %rax, 0x240(%rsp)
andq $0x0, 0x248(%rsp)
movq %rdi, 0x250(%rsp)
movl %r9d, 0x258(%rsp)
movq %r10, 0x260(%rsp)
pushq $0x2
popq %r9
movl %r9d, 0x268(%rsp)
movl %r8d, 0x26c(%rsp)
movabsq $0x100000001, %r10 # imm = 0x100000001
movq %r10, 0x270(%rsp)
pushq $0x1
popq %r11
movl %r11d, 0x278(%rsp)
movq %r8, 0x280(%rsp)
movl %ebx, %eax
cltd
idivl 0x80(%rsp)
cltq
imulq 0x120(%rsp), %rax
movq 0xf0(%rsp), %rdx
imulq %rdx, %rax
addq 0xe0(%rsp), %rax
movslq 0x10c(%rsp), %rsi
movq %rsi, %rdi
imulq %rdx, %rdi
imulq %rcx, %rdi
addq %rax, %rdi
andq $0x0, 0x298(%rsp)
movl 0xf8(%rsp), %eax
movq 0x100(%rsp), %rcx
movq %rdi, 0x290(%rsp)
movq %rdx, 0x2a0(%rsp)
movl %eax, 0x2a8(%rsp)
movq %rcx, 0x2b0(%rsp)
movl %r9d, 0x2b8(%rsp)
movl %esi, 0x2bc(%rsp)
movq %r10, 0x2c0(%rsp)
movl %r11d, 0x2c8(%rsp)
movq %rsi, 0x2d0(%rsp)
testl %ebx, %ebx
movq %rbx, %r10
jne 0x36d4a1
cmpl $0x0, 0x14(%rsp)
je 0x36d47a
movq 0x30(%rsp), %rdi
leaq 0x240(%rsp), %rsi
movq 0x40(%rsp), %rdx
movq 0x20(%rsp), %rcx
movl %r14d, %r8d
movl %r12d, %r9d
callq 0x368c66
jmp 0x36d49c
movq 0x30(%rsp), %rdi
leaq 0x240(%rsp), %rsi
movq 0x40(%rsp), %rdx
movq 0x20(%rsp), %rcx
movl %r14d, %r8d
movl %r12d, %r9d
callq 0x369291
movq 0x28(%rsp), %r10
leal (%r14,%r15), %ebx
movl 0x68(%rsp), %r13d
cmpl %r13d, %ebx
setge %al
andb 0x138(%rsp), %al
subq $0x8, %rsp
movzbl %al, %eax
leaq 0x248(%rsp), %rdi
leaq 0x298(%rsp), %rsi
movq 0x8(%rsp), %rdx
leaq 0x1f8(%rsp), %rcx
movq 0x10(%rsp), %r8
movl 0x58(%rsp), %r9d
pushq %rax
pushq %r12
pushq %r14
pushq 0x38(%rsp)
pushq %r10
pushq 0x50(%rsp)
pushq 0x78(%rsp)
callq 0x36e020
addq $0x40, %rsp
subl %r15d, %ebp
movl %ebx, %r14d
jmp 0x36d2d6
cmpl $0x0, 0xd8(%rsp)
movq 0x20(%rsp), %rbp
movq 0x78(%rsp), %r13
movq 0x28(%rsp), %r15
je 0x36d546
leaq 0x1f0(%rsp), %rdi
movq 0x8(%rsp), %rsi
movq 0x40(%rsp), %rdx
movl %ebp, %ecx
movl %r15d, %r8d
movq 0x18(%rsp), %r9
callq 0x370761
movq 0x80(%rsp), %r12
addl %r12d, %r15d
movq 0x1e8(%rsp), %rbx
movq 0x70(%rsp), %r14
jmp 0x36d270
movq 0x1e0(%rsp), %rcx
incl %ecx
movq 0x60(%rsp), %r15
jmp 0x36d0cf
movq 0x198(%rsp), %rax
testq %rax, %rax
je 0x36d67a
lock
decl (%rax)
jne 0x36d67a
movq 0x190(%rsp), %rsi
movq 0x1b0(%rsp), %rdi
testq %rdi, %rdi
je 0x36d672
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36d67a
xorl %eax, %eax
jmp 0x36d849
xorl %eax, %eax
jmp 0x36d8d8
jmp 0x36d6e0
xorl %r14d, %r14d
jmp 0x36d849
xorl %r14d, %r14d
jmp 0x36d8d8
xorl %eax, %eax
jmp 0x36d967
movq %rsi, %rdi
callq 0x5f3e0
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x36d6e8
lock
decl (%rax)
jne 0x36d6e8
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x36d621
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36d6e8
jmp 0x36d6e0
movq %rsi, %rdi
callq 0x5f3e0
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x36d6e8
lock
decl (%rax)
jne 0x36d6e8
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x36d668
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36d6e8
jmp 0x36d6e0
xorl %r14d, %r14d
jmp 0x36d967
movq %rsi, %rdi
callq 0x5f3e0
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x36d6b1
lock
decl (%rax)
jne 0x36d6b1
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
je 0x36d6a9
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36d6b1
movq %rsi, %rdi
callq 0x5f3e0
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x36d6e8
lock
decl (%rax)
jne 0x36d6e8
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x36d6e0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36d6e8
movq %rsi, %rdi
callq 0x5f3e0
movq (%r15), %rax
movq -0x18(%rax), %rcx
vmovss 0x81591(%rip), %xmm0 # 0x3eec88
vucomiss 0xd0(%r15,%rcx), %xmm0
je 0x36d748
movq 0x8(%rsp), %rcx
imull 0x40(%rcx), %ebx
imull 0x38(%rcx), %ebx
movq %rbx, %rdi
xorl %r14d, %r14d
testl %edi, %edi
cmovlel %r14d, %edi
xorl %ecx, %ecx
cmpq %rcx, %rdi
je 0x36d74b
movq -0x18(%rax), %rdx
movq 0x8(%rsp), %rsi
movq (%rsi), %rsi
vmovss (%rsi,%rcx,4), %xmm0
vmulss 0xd0(%r15,%rdx), %xmm0, %xmm0
vmovss %xmm0, (%rsi,%rcx,4)
incq %rcx
jmp 0x36d71e
xorl %r14d, %r14d
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x36d782
lock
decl (%rax)
jne 0x36d782
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x36d77a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36d782
movq %rsi, %rdi
callq 0x5f3e0
movl %r14d, %eax
addq $0x2d8, %rsp # imm = 0x2D8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rsi, %rdi
callq 0x5f3e0
xorl %eax, %eax
movq 0x8(%rsp), %rsi
vmovaps 0x40(%rsp), %xmm0
movl 0x18(%rsp), %edx
movq (%rsp), %rdi
movq %rsi, 0x90(%rsp)
movq %r14, 0x98(%rsp)
movq %rdi, 0xa0(%rsp)
movl %edx, 0xa8(%rsp)
movq %rbp, 0xb0(%rsp)
vmovups %xmm0, 0xb8(%rsp)
movl %r15d, 0xc8(%rsp)
movq %rbx, 0xd0(%rsp)
testb %al, %al
movq 0x60(%rsp), %r15
movq 0x50(%rsp), %rbx
jne 0x36d9ce
lock
decl (%r14)
jne 0x36d9ce
testq %rbp, %rbp
je 0x36d9c6
movq (%rbp), %rax
movq %rbp, %rdi
callq *0x18(%rax)
jmp 0x36d9ce
movq %rsi, %rdi
callq 0x5f3e0
xorl %eax, %eax
movq 0x8(%rsp), %rsi
vmovaps 0x50(%rsp), %xmm0
movq 0x40(%rsp), %rcx
movl 0x18(%rsp), %edx
movl (%rsp), %edi
movq %rsi, 0x90(%rsp)
movq %r14, 0x98(%rsp)
movq %r15, 0xa0(%rsp)
movl %edi, 0xa8(%rsp)
movq %rbp, 0xb0(%rsp)
vmovups %xmm0, 0xb8(%rsp)
movl %edx, 0xc8(%rsp)
movq %rcx, 0xd0(%rsp)
testb %al, %al
movq 0x60(%rsp), %r15
jne 0x36d9ce
lock
decl (%r14)
jne 0x36d9ce
testq %rbp, %rbp
je 0x36d9c6
movq (%rbp), %rax
movq %rbp, %rdi
callq *0x18(%rax)
jmp 0x36d9ce
movq %rsi, %rdi
callq 0x5f3e0
xorl %eax, %eax
movq 0x8(%rsp), %rsi
vmovaps 0x50(%rsp), %xmm0
movq 0x40(%rsp), %rcx
movl 0x18(%rsp), %edx
movl (%rsp), %edi
movq %rsi, 0x90(%rsp)
movq %r14, 0x98(%rsp)
movq %r15, 0xa0(%rsp)
movl %edi, 0xa8(%rsp)
movq %rbp, 0xb0(%rsp)
vmovups %xmm0, 0xb8(%rsp)
movl %edx, 0xc8(%rsp)
movq %rcx, 0xd0(%rsp)
testb %al, %al
movq 0x60(%rsp), %r15
jne 0x36d9ce
lock
decl (%r14)
jne 0x36d9ce
testq %rbp, %rbp
je 0x36d9c6
movq (%rbp), %rax
movq %rbp, %rdi
callq *0x18(%rax)
jmp 0x36d9ce
movq %rsi, %rdi
callq 0x5f3e0
xorl %eax, %eax
movq 0x8(%rsp), %rsi
vmovaps 0x50(%rsp), %xmm0
movq 0x40(%rsp), %rcx
movl 0x18(%rsp), %edx
movl (%rsp), %edi
movq %rsi, 0x90(%rsp)
movq %r14, 0x98(%rsp)
movq %r15, 0xa0(%rsp)
movl %edi, 0xa8(%rsp)
movq %rbp, 0xb0(%rsp)
vmovups %xmm0, 0xb8(%rsp)
movl %edx, 0xc8(%rsp)
movq %rcx, 0xd0(%rsp)
testb %al, %al
movq 0x60(%rsp), %r15
jne 0x36d9ce
lock
decl (%r14)
jne 0x36d9ce
testq %rbp, %rbp
je 0x36d9c6
movq (%rbp), %rax
movq %rbp, %rdi
callq *0x18(%rax)
jmp 0x36d9ce
movq %rsi, %rdi
callq 0x5f3e0
movl $0x0, 0x50(%rsp)
cmpq $0x0, 0x90(%rsp)
je 0x36da32
movslq 0xc8(%rsp), %rax
imulq 0xd0(%rsp), %rax
testq %rax, %rax
movq 0x20(%rsp), %rbp
je 0x36b930
movl 0xb8(%rsp), %edx
movl 0xbc(%rsp), %ecx
pushq $0x4
popq %rax
cmpl $0x1, %edx
jne 0x36da3c
imull 0xa8(%rsp), %ecx
xorl %edx, %edx
cmpl %r12d, %ecx
sete %dl
cmpl %r13d, %ecx
cmovel %eax, %edx
movl %edx, 0x50(%rsp)
jmp 0x36daa9
movq 0x20(%rsp), %rbp
jmp 0x36b930
cmpl $0x1, %ecx
sete %sil
cmpl $0x2, %edx
sete %dl
andb %dl, %sil
movl $0x0, 0x50(%rsp)
cmpb $0x1, %sil
jne 0x36da7a
movl 0xa8(%rsp), %edx
imull 0xc0(%rsp), %edx
xorl %esi, %esi
cmpl %r12d, %edx
sete %sil
addl %esi, %esi
movl %esi, 0x50(%rsp)
movb $0x1, %dl
testb %dl, %dl
je 0x36daa9
cmpl %r13d, %ecx
jne 0x36daa9
movl 0xa8(%rsp), %ecx
imull 0xc0(%rsp), %ecx
cmpl %r12d, %ecx
pushq $0x3
popq %rdx
movl 0x50(%rsp), %esi
cmovel %edx, %esi
cmpl $0x1, %ecx
cmovel %eax, %esi
movl %esi, 0x50(%rsp)
movq (%r15), %rax
movq -0x18(%rax), %rax
vmovss 0x811d0(%rip), %xmm0 # 0x3eec88
vucomiss 0xd4(%r15,%rax), %xmm0
je 0x36b930
leaq 0x140(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
movq 0x10(%rbp), %rdx
leaq 0x90(%rsp), %rsi
callq 0x63a98
movl 0xc8(%rsp), %eax
imull 0xd0(%rsp), %eax
imull 0xa8(%rsp), %eax
movq 0x90(%rsp), %rcx
movq (%r15), %rdx
movq 0x140(%rsp), %rsi
xorl %edi, %edi
testl %eax, %eax
cmovlel %edi, %eax
cmpq %rdi, %rax
je 0x36db50
movq -0x18(%rdx), %r8
vmovss 0xd4(%r15,%r8), %xmm0
vmulss (%rcx,%rdi,4), %xmm0, %xmm0
vmovss %xmm0, (%rsi,%rdi,4)
incq %rdi
jmp 0x36db2e
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x36db60
lock
incl (%rax)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x36db97
lock
decl (%rax)
jne 0x36db97
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x36db8f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36db97
movq %rsi, %rdi
callq 0x5f3e0
movq 0x148(%rsp), %rax
vmovaps 0x140(%rsp), %xmm0
vmovaps %xmm0, 0x90(%rsp)
movq 0x150(%rsp), %rcx
movq %rcx, 0xa0(%rsp)
movl 0x158(%rsp), %ecx
movl %ecx, 0xa8(%rsp)
movq 0x160(%rsp), %rcx
movq %rcx, 0xb0(%rsp)
vmovups 0x168(%rsp), %xmm0
vmovups %xmm0, 0xb8(%rsp)
movl 0x178(%rsp), %ecx
movl %ecx, 0xc8(%rsp)
movq 0x180(%rsp), %rcx
movq %rcx, 0xd0(%rsp)
testq %rax, %rax
je 0x36b930
lock
decl (%rax)
jne 0x36b930
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
je 0x36dc41
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36b930
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x36b930
movq %rax, %rbx
lock
decl (%r14)
jne 0x36dfc8
testq %rbp, %rbp
jne 0x36dc65
jmp 0x36dcef
movq (%rbp), %rax
movq %rbp, %rdi
movq 0x8(%rsp), %rsi
callq *0x18(%rax)
jmp 0x36dfc8
jmp 0x36e00b
movq %rax, %rbx
lock
decl (%r14)
jne 0x36dfc8
testq %rbp, %rbp
jne 0x36dc92
jmp 0x36dcef
movq (%rbp), %rax
movq %rbp, %rdi
movq 0x8(%rsp), %rsi
callq *0x18(%rax)
jmp 0x36dfc8
jmp 0x36e00b
movq %rax, %rbx
lock
decl (%r14)
jne 0x36dfc8
testq %rbp, %rbp
jne 0x36dcbf
jmp 0x36dcef
movq (%rbp), %rax
movq %rbp, %rdi
movq 0x8(%rsp), %rsi
callq *0x18(%rax)
jmp 0x36dfc8
jmp 0x36e00b
jmp 0x36e00b
movq %rax, %rbx
lock
decl (%r14)
jne 0x36dfc8
testq %rbp, %rbp
jne 0x36dcf9
movq 0x8(%rsp), %rdi
jmp 0x36dfbb
movq (%rbp), %rax
movq %rbp, %rdi
movq 0x8(%rsp), %rsi
callq *0x18(%rax)
jmp 0x36dfc8
jmp 0x36e00b
jmp 0x36e00b
jmp 0x36e00b
jmp 0x36e00b
jmp 0x36e00b
jmp 0x36e00b
jmp 0x36e00b
jmp 0x36e00b
jmp 0x36e00b
jmp 0x36e00b
jmp 0x36e00b
jmp 0x36dd6b
jmp 0x36e00b
jmp 0x36de9b
jmp 0x36e00b
jmp 0x36ddd1
jmp 0x36df57
jmp 0x36ddc4
movq %rax, %rbx
jmp 0x36df0c
jmp 0x36de54
movq %rax, %rbx
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x36dfc8
lock
decl (%rax)
jne 0x36dfc8
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
jne 0x36dda2
jmp 0x36dfb8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36dfc8
jmp 0x36e00b
jmp 0x36ddc4
jmp 0x36ddc4
jmp 0x36ddc4
jmp 0x36ddc4
jmp 0x36e00b
jmp 0x36de9b
movq %rax, %rbx
jmp 0x36dfc8
jmp 0x36df57
movq %rax, %rbx
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x36de0b
lock
decl (%rax)
jne 0x36de0b
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
jne 0x36de05
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x36de0b
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x36dfc8
lock
decl (%rax)
jne 0x36dfc8
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
jne 0x36de3f
jmp 0x36dfb8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36dfc8
jmp 0x36e00b
jmp 0x36e00b
movq %rax, %rbx
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x36dfc8
lock
decl (%rax)
jne 0x36dfc8
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
jne 0x36de8b
jmp 0x36dfb8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36dfc8
jmp 0x36e00b
movq %rax, %rbx
movq 0x198(%rsp), %rax
testq %rax, %rax
je 0x36ded5
lock
decl (%rax)
jne 0x36ded5
movq 0x190(%rsp), %rsi
movq 0x1b0(%rsp), %rdi
testq %rdi, %rdi
jne 0x36decf
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x36ded5
movq (%rdi), %rax
callq *0x18(%rax)
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x36df0c
lock
decl (%rax)
jne 0x36df0c
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
jne 0x36df06
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x36df0c
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x36dfc8
lock
decl (%rax)
jne 0x36dfc8
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
jne 0x36df3d
jmp 0x36dfb8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36dfc8
jmp 0x36e00b
jmp 0x36e00b
jmp 0x36e00b
movq %rax, %rbx
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x36df91
lock
decl (%rax)
jne 0x36df91
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
jne 0x36df8b
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x36df91
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x148(%rsp), %rax
testq %rax, %rax
je 0x36dfc8
lock
decl (%rax)
jne 0x36dfc8
movq 0x140(%rsp), %rsi
movq 0x160(%rsp), %rdi
testq %rdi, %rdi
jne 0x36dfc2
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x36dfc8
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x36dfff
lock
decl (%rax)
jne 0x36dfff
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
jne 0x36dff9
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x36dfff
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x36e00b
jmp 0x36e00b
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/gemm_x86_fma.cpp |
virtual thunk to ncnn::Gemm_x86_fma::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int Gemm_x86_fma::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
int M;
int N;
if (constantA && constantB)
{
M = constantM;
N = constantN;
}
else if (constantA)
{
const Mat& B = bottom_blobs[0];
M = constantM;
N = transB ? (B.dims == 3 ? B.c : B.h) * B.elempack : B.w;
}
else if (constantB)
{
const Mat& A = bottom_blobs[0];
M = transA ? A.w : (A.dims == 3 ? A.c : A.h) * A.elempack;
N = constantN;
}
else
{
const Mat& A = bottom_blobs[0];
const Mat& B = bottom_blobs[1];
M = transA ? A.w : (A.dims == 3 ? A.c : A.h) * A.elempack;
N = transB ? (B.dims == 3 ? B.c : B.h) * B.elempack : B.w;
}
Mat C;
int broadcast_type_C = 0;
if (constantC)
{
C = CT_data;
broadcast_type_C = constant_broadcast_type_C;
}
else
{
if (constantA && constantB)
{
C = bottom_blobs.size() == 1 ? bottom_blobs[0] : Mat();
}
else if (constantA)
{
C = bottom_blobs.size() == 2 ? bottom_blobs[1] : Mat();
}
else if (constantB)
{
C = bottom_blobs.size() == 2 ? bottom_blobs[1] : Mat();
}
else
{
C = bottom_blobs.size() == 3 ? bottom_blobs[2] : Mat();
}
if (!C.empty())
{
if (C.dims == 1 && C.w == 1)
{
// scalar
broadcast_type_C = 0;
}
if (C.dims == 1 && C.w * C.elempack == M)
{
// M
// auto broadcast from h to w is the ncnn-style convention
broadcast_type_C = 1;
}
if (C.dims == 1 && C.w * C.elempack == N)
{
// N
broadcast_type_C = 4;
}
if (C.dims == 2 && C.w == 1 && C.h * C.elempack == M)
{
// Mx1
broadcast_type_C = 2;
}
if (C.dims == 2 && C.w == N && C.h * C.elempack == M)
{
// MxN
broadcast_type_C = 3;
}
if (C.dims == 2 && C.w == N && C.h * C.elempack == 1)
{
// 1xN
broadcast_type_C = 4;
}
// pre-multiply C with beta
if (beta != 1.f)
{
Mat C2;
C2.create_like(C, opt.workspace_allocator);
const int size = C.total() * C.elempack;
for (int i = 0; i < size; i++)
{
C2[i] = C[i] * beta;
}
C = C2;
}
}
}
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
int outh = output_transpose ? N : M;
#if __AVX512F__
out_elempack = outh % 16 == 0 ? 16 : outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = outh % 8 == 0 ? 8 : outh % 4 == 0 ? 4 : 1;
#else
out_elempack = outh % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
if (output_elempack)
out_elempack = output_elempack;
size_t out_elemsize = 4u * out_elempack;
Mat& top_blob = top_blobs[0];
if (output_transpose)
{
if (output_N1M)
top_blob.create(M, 1, N / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
else
top_blob.create(M, N / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
}
else
{
if (output_N1M)
top_blob.create(N, 1, M / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
else
top_blob.create(N, M / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
}
if (top_blob.empty())
return -100;
int _nT = nT ? nT : opt.num_threads;
if (nT != 0 && opt.num_threads != nT)
{
// force num_threads the same as in create_pipeline
// so we could use pre-packed A/B from the same tile config
NCNN_LOGE("opt.num_threads %d changed, gemm will use load-time value %d", opt.num_threads, nT);
}
int ret = 0;
if (constantA && constantB)
{
ret = gemm_AT_BT_x86(AT_data, BT_data, C, top_blob, broadcast_type_C, constantM, constantN, constantK, output_transpose, constant_TILE_M, constant_TILE_N, constant_TILE_K, _nT, opt);
}
else if (constantA)
{
const Mat& B = bottom_blobs[0];
ret = gemm_AT_x86(AT_data, B, C, top_blob, broadcast_type_C, constantM, constantK, transB, output_transpose, constant_TILE_M, constant_TILE_N, constant_TILE_K, _nT, opt);
}
else if (constantB)
{
const Mat& A = bottom_blobs[0];
ret = gemm_BT_x86(A, BT_data, C, top_blob, broadcast_type_C, constantN, constantK, transA, output_transpose, constant_TILE_M, constant_TILE_N, constant_TILE_K, _nT, opt);
}
else
{
const Mat& A = bottom_blobs[0];
const Mat& B = bottom_blobs[1];
ret = gemm_x86(A, B, C, top_blob, broadcast_type_C, transA, transB, output_transpose, constant_TILE_M, constant_TILE_N, constant_TILE_K, _nT, opt);
}
// multiply top_blob with alpha
if (alpha != 1.f)
{
const int size = top_blob.total() * out_elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < size; i++)
{
top_blob[i] *= alpha;
}
}
return ret;
} | movq (%rdi), %rax
addq -0x40(%rax), %rdi
jmp 0x36b402
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/gemm_x86_fma.cpp |
ncnn::transpose_pack_B_tile(ncnn::Mat const&, ncnn::Mat&, int, int, int, int) | static void transpose_pack_B_tile(const Mat& B, Mat& BT, int j, int max_jj, int k, int max_kk)
{
const int elempack = B.elempack;
const int B_hstep = B.dims == 3 ? (int)B.cstep : B.w;
float* pp = BT;
int jj = 0;
#if __SSE2__
for (; jj + 11 < max_jj; jj += 12)
{
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 16;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
__m512 _r0 = _mm512_load_ps(p0);
__m512 _r1 = _mm512_load_ps(p0 + 16 * 1);
__m512 _r2 = _mm512_load_ps(p0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(p0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(p0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(p0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(p0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(p0 + 16 * 7);
__m512 _r8 = _mm512_load_ps(p0 + 16 * 8);
__m512 _r9 = _mm512_load_ps(p0 + 16 * 9);
__m512 _ra = _mm512_load_ps(p0 + 16 * 10);
__m512 _rb = _mm512_load_ps(p0 + 16 * 11);
transpose16x12_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _ra, _rb);
_mm512_store_ps(pp, _r0);
_mm512_store_ps(pp + 16 * 1, _r1);
_mm512_store_ps(pp + 16 * 2, _r2);
_mm512_store_ps(pp + 16 * 3, _r3);
_mm512_store_ps(pp + 16 * 4, _r4);
_mm512_store_ps(pp + 16 * 5, _r5);
_mm512_store_ps(pp + 16 * 6, _r6);
_mm512_store_ps(pp + 16 * 7, _r7);
_mm512_store_ps(pp + 16 * 8, _r8);
_mm512_store_ps(pp + 16 * 9, _r9);
_mm512_store_ps(pp + 16 * 10, _ra);
_mm512_store_ps(pp + 16 * 11, _rb);
pp += 192;
p0 += B_hstep * 16;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 8;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
__m256 _r0 = _mm256_load_ps(p0);
__m256 _r1 = _mm256_load_ps(p0 + 8 * 1);
__m256 _r2 = _mm256_load_ps(p0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(p0 + 8 * 3);
__m256 _r4 = _mm256_load_ps(p0 + 8 * 4);
__m256 _r5 = _mm256_load_ps(p0 + 8 * 5);
__m256 _r6 = _mm256_load_ps(p0 + 8 * 6);
__m256 _r7 = _mm256_load_ps(p0 + 8 * 7);
__m256 _r8 = _mm256_load_ps(p0 + 8 * 8);
__m256 _r9 = _mm256_load_ps(p0 + 8 * 9);
__m256 _ra = _mm256_load_ps(p0 + 8 * 10);
__m256 _rb = _mm256_load_ps(p0 + 8 * 11);
transpose8x12_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _ra, _rb);
_mm256_store_ps(pp, _r0);
_mm256_store_ps(pp + 8 * 1, _r1);
_mm256_store_ps(pp + 8 * 2, _r2);
_mm256_store_ps(pp + 8 * 3, _r3);
_mm256_store_ps(pp + 8 * 4, _r4);
_mm256_store_ps(pp + 8 * 5, _r5);
_mm256_store_ps(pp + 8 * 6, _r6);
_mm256_store_ps(pp + 8 * 7, _r7);
_mm256_store_ps(pp + 8 * 8, _r8);
_mm256_store_ps(pp + 8 * 9, _r9);
_mm256_store_ps(pp + 8 * 10, _ra);
_mm256_store_ps(pp + 8 * 11, _rb);
pp += 96;
p0 += B_hstep * 8;
}
}
#endif // __AVX__
if (elempack == 4)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 4;
int kk = 0;
for (; kk + 3 < max_kk; kk += 4)
{
__m128 _r0 = _mm_load_ps(p0);
__m128 _r1 = _mm_load_ps(p0 + 4 * 1);
__m128 _r2 = _mm_load_ps(p0 + 4 * 2);
__m128 _r3 = _mm_load_ps(p0 + 4 * 3);
__m128 _r4 = _mm_load_ps(p0 + 4 * 4);
__m128 _r5 = _mm_load_ps(p0 + 4 * 5);
__m128 _r6 = _mm_load_ps(p0 + 4 * 6);
__m128 _r7 = _mm_load_ps(p0 + 4 * 7);
__m128 _r8 = _mm_load_ps(p0 + 4 * 8);
__m128 _r9 = _mm_load_ps(p0 + 4 * 9);
__m128 _ra = _mm_load_ps(p0 + 4 * 10);
__m128 _rb = _mm_load_ps(p0 + 4 * 11);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb);
_mm_store_ps(pp, _r0);
_mm_store_ps(pp + 4 * 1, _r4);
_mm_store_ps(pp + 4 * 2, _r8);
_mm_store_ps(pp + 4 * 3, _r1);
_mm_store_ps(pp + 4 * 4, _r5);
_mm_store_ps(pp + 4 * 5, _r9);
_mm_store_ps(pp + 4 * 6, _r2);
_mm_store_ps(pp + 4 * 7, _r6);
_mm_store_ps(pp + 4 * 8, _ra);
_mm_store_ps(pp + 4 * 9, _r3);
_mm_store_ps(pp + 4 * 10, _r7);
_mm_store_ps(pp + 4 * 11, _rb);
pp += 48;
p0 += B_hstep * 4;
}
}
if (elempack == 1)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj);
int kk = 0;
for (; kk < max_kk; kk++)
{
_mm_store_ps(pp, _mm_loadu_ps(p0));
_mm_store_ps(pp + 4, _mm_loadu_ps(p0 + 4));
_mm_store_ps(pp + 8, _mm_loadu_ps(p0 + 8));
pp += 12;
p0 += B_hstep;
}
}
}
for (; jj + 7 < max_jj; jj += 8)
{
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 16;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
__m512 _r0 = _mm512_load_ps(p0);
__m512 _r1 = _mm512_load_ps(p0 + 16 * 1);
__m512 _r2 = _mm512_load_ps(p0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(p0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(p0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(p0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(p0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(p0 + 16 * 7);
transpose16x8_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7);
_mm512_store_ps(pp, _r0);
_mm512_store_ps(pp + 16 * 1, _r1);
_mm512_store_ps(pp + 16 * 2, _r2);
_mm512_store_ps(pp + 16 * 3, _r3);
_mm512_store_ps(pp + 16 * 4, _r4);
_mm512_store_ps(pp + 16 * 5, _r5);
_mm512_store_ps(pp + 16 * 6, _r6);
_mm512_store_ps(pp + 16 * 7, _r7);
pp += 128;
p0 += B_hstep * 16;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 8;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
__m256 _r0 = _mm256_load_ps(p0);
__m256 _r1 = _mm256_load_ps(p0 + 8 * 1);
__m256 _r2 = _mm256_load_ps(p0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(p0 + 8 * 3);
__m256 _r4 = _mm256_load_ps(p0 + 8 * 4);
__m256 _r5 = _mm256_load_ps(p0 + 8 * 5);
__m256 _r6 = _mm256_load_ps(p0 + 8 * 6);
__m256 _r7 = _mm256_load_ps(p0 + 8 * 7);
transpose8x8_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7);
_mm256_store_ps(pp, _r0);
_mm256_store_ps(pp + 8 * 1, _r1);
_mm256_store_ps(pp + 8 * 2, _r2);
_mm256_store_ps(pp + 8 * 3, _r3);
_mm256_store_ps(pp + 8 * 4, _r4);
_mm256_store_ps(pp + 8 * 5, _r5);
_mm256_store_ps(pp + 8 * 6, _r6);
_mm256_store_ps(pp + 8 * 7, _r7);
pp += 64;
p0 += B_hstep * 8;
}
}
#endif // __AVX__
if (elempack == 4)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 4;
int kk = 0;
for (; kk + 3 < max_kk; kk += 4)
{
__m128 _r0 = _mm_load_ps(p0);
__m128 _r1 = _mm_load_ps(p0 + 4 * 1);
__m128 _r2 = _mm_load_ps(p0 + 4 * 2);
__m128 _r3 = _mm_load_ps(p0 + 4 * 3);
__m128 _r4 = _mm_load_ps(p0 + 4 * 4);
__m128 _r5 = _mm_load_ps(p0 + 4 * 5);
__m128 _r6 = _mm_load_ps(p0 + 4 * 6);
__m128 _r7 = _mm_load_ps(p0 + 4 * 7);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_mm_store_ps(pp, _r0);
_mm_store_ps(pp + 4 * 1, _r4);
_mm_store_ps(pp + 4 * 2, _r1);
_mm_store_ps(pp + 4 * 3, _r5);
_mm_store_ps(pp + 4 * 4, _r2);
_mm_store_ps(pp + 4 * 5, _r6);
_mm_store_ps(pp + 4 * 6, _r3);
_mm_store_ps(pp + 4 * 7, _r7);
pp += 32;
p0 += B_hstep * 4;
}
}
if (elempack == 1)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj);
int kk = 0;
for (; kk < max_kk; kk++)
{
_mm_store_ps(pp, _mm_loadu_ps(p0));
_mm_store_ps(pp + 4, _mm_loadu_ps(p0 + 4));
pp += 8;
p0 += B_hstep;
}
}
}
for (; jj + 3 < max_jj; jj += 4)
{
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 16;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
__m512 _r0 = _mm512_load_ps(p0);
__m512 _r1 = _mm512_load_ps(p0 + 16 * 1);
__m512 _r2 = _mm512_load_ps(p0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(p0 + 16 * 3);
transpose16x4_ps(_r0, _r1, _r2, _r3);
_mm512_store_ps(pp, _r0);
_mm512_store_ps(pp + 16 * 1, _r1);
_mm512_store_ps(pp + 16 * 2, _r2);
_mm512_store_ps(pp + 16 * 3, _r3);
pp += 64;
p0 += B_hstep * 16;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 8;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
__m256 _r0 = _mm256_load_ps(p0);
__m256 _r1 = _mm256_load_ps(p0 + 8 * 1);
__m256 _r2 = _mm256_load_ps(p0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(p0 + 8 * 3);
transpose8x4_ps(_r0, _r1, _r2, _r3);
_mm256_store_ps(pp, _r0);
_mm256_store_ps(pp + 8 * 1, _r1);
_mm256_store_ps(pp + 8 * 2, _r2);
_mm256_store_ps(pp + 8 * 3, _r3);
pp += 32;
p0 += B_hstep * 8;
}
}
#endif // __AVX__
if (elempack == 4)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 4;
int kk = 0;
for (; kk + 3 < max_kk; kk += 4)
{
__m128 _r0 = _mm_load_ps(p0);
__m128 _r1 = _mm_load_ps(p0 + 4 * 1);
__m128 _r2 = _mm_load_ps(p0 + 4 * 2);
__m128 _r3 = _mm_load_ps(p0 + 4 * 3);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_mm_store_ps(pp, _r0);
_mm_store_ps(pp + 4 * 1, _r1);
_mm_store_ps(pp + 4 * 2, _r2);
_mm_store_ps(pp + 4 * 3, _r3);
pp += 16;
p0 += B_hstep * 4;
}
}
if (elempack == 1)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj);
int kk = 0;
for (; kk < max_kk; kk++)
{
_mm_store_ps(pp, _mm_loadu_ps(p0));
pp += 4;
p0 += B_hstep;
}
}
}
#endif // __SSE2__
for (; jj + 1 < max_jj; jj += 2)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 16;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
__m512 _r0 = _mm512_load_ps(p0);
__m512 _r1 = _mm512_load_ps(p0 + 16);
transpose16x2_ps(_r0, _r1);
_mm512_store_ps(pp, _r0);
_mm512_store_ps(pp + 16, _r1);
pp += 32;
p0 += B_hstep * 16;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 8;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
__m256 _r0 = _mm256_load_ps(p0);
__m256 _r1 = _mm256_load_ps(p0 + 8);
transpose8x2_ps(_r0, _r1);
_mm256_store_ps(pp, _r0);
_mm256_store_ps(pp + 8, _r1);
pp += 16;
p0 += B_hstep * 8;
}
}
#endif // __AVX__
if (elempack == 4)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 4;
int kk = 0;
for (; kk + 3 < max_kk; kk += 4)
{
__m128 _r0 = _mm_load_ps(p0);
__m128 _r1 = _mm_load_ps(p0 + 4);
__m128 _tmp0 = _mm_unpacklo_ps(_r0, _r1);
__m128 _tmp1 = _mm_unpackhi_ps(_r0, _r1);
_mm_store_ps(pp, _tmp0);
_mm_store_ps(pp + 4, _tmp1);
pp += 8;
p0 += B_hstep * 4;
}
}
#endif // __SSE2__
if (elempack == 1)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj);
int kk = 0;
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
pp[1] = p0[1];
pp += 2;
p0 += B_hstep;
}
}
}
for (; jj < max_jj; jj += 1)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 16;
int kk = 0;
for (; kk + 15 < max_kk; kk += 16)
{
_mm512_store_ps(pp, _mm512_load_ps(p0));
pp += 16;
p0 += B_hstep * 16;
}
}
#endif // __AVX512F__
if (elempack == 8)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 8;
int kk = 0;
for (; kk + 7 < max_kk; kk += 8)
{
_mm256_store_ps(pp, _mm256_load_ps(p0));
pp += 8;
p0 += B_hstep * 8;
}
}
#endif // __AVX__
if (elempack == 4)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj) * 4;
int kk = 0;
for (; kk + 3 < max_kk; kk += 4)
{
_mm_store_ps(pp, _mm_load_ps(p0));
pp += 4;
p0 += B_hstep * 4;
}
}
#endif // __SSE2__
if (elempack == 1)
{
const float* p0 = (const float*)B + k * B_hstep + (j + jj);
int kk = 0;
for (; kk < max_kk; kk++)
{
pp[0] = p0[0];
pp += 1;
p0 += B_hstep;
}
}
}
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movl %ecx, -0x60(%rsp)
movq %rdi, %rbp
leaq 0x2c(%rdi), %rax
leaq 0x40(%rdi), %rcx
cmpl $0x3, 0x28(%rdi)
cmoveq %rcx, %rax
movslq (%rax), %rax
imull %eax, %r8d
xorl %r10d, %r10d
testl %r9d, %r9d
movl $0x0, %ecx
cmovgl %r9d, %ecx
movl %ecx, -0x54(%rsp)
movslq %r8d, %rcx
movslq %edx, %r8
leaq (,%rcx,4), %r11
movq %r8, %rdi
leaq (%r11,%r8,4), %rdx
leaq 0x4(%r11,%r8,4), %r12
leaq 0x10(%r11,%r8,4), %rbx
movq %rbx, -0x20(%rsp)
leaq (%r11,%r8,4), %r11
addq $0x20, %r11
movq %r11, -0x50(%rsp)
movq %r8, %r11
shlq $0x5, %r11
leaq (%r11,%rcx,4), %r8
shlq $0x4, %rdi
leaq (%r11,%rcx,4), %r13
addq $0x20, %r13
leaq 0x60(%r11,%rcx,4), %rbx
movq %rbx, -0x38(%rsp)
leaq 0xe0(%r11,%rcx,4), %rbx
movq %rbx, -0x28(%rsp)
leaq 0x160(%r11,%rcx,4), %r11
movq %r11, -0x10(%rsp)
leaq (%rdi,%rcx,4), %rbx
leaq 0x10(%rdi,%rcx,4), %r11
leaq 0x30(%rdi,%rcx,4), %r14
movq %r14, -0x40(%rsp)
leaq 0x70(%rdi,%rcx,4), %r14
movq %r14, -0x30(%rsp)
leaq 0xb0(%rdi,%rcx,4), %rcx
movq %rcx, -0x18(%rsp)
movq (%rsi), %rsi
leal (,%rax,8), %ecx
movslq %ecx, %r14
leal (,%rax,4), %ecx
movslq %ecx, %r15
movslq -0x60(%rsp), %rcx
shlq $0x2, %r14
shlq $0x2, %r15
shlq $0x2, %rax
movq %rbp, -0x60(%rsp)
movl 0x18(%rbp), %ebp
movq %rcx, -0x48(%rsp)
addq $-0xb, %rcx
movq %rcx, -0x8(%rsp)
cmpq -0x8(%rsp), %r10
jge 0x373fde
cmpl $0x8, %ebp
jne 0x373e20
movq -0x60(%rsp), %rcx
movq (%rcx), %rdi
addq -0x10(%rsp), %rdi
movl $0x7, %ecx
cmpl %r9d, %ecx
jge 0x373e20
vmovaps -0x160(%rdi), %ymm0
vmovaps -0x140(%rdi), %ymm1
vmovaps -0x120(%rdi), %ymm2
vmovaps -0x100(%rdi), %ymm3
vmovaps -0xe0(%rdi), %ymm4
vmovaps -0xc0(%rdi), %ymm5
vmovaps -0xa0(%rdi), %ymm6
vmovaps -0x80(%rdi), %ymm7
vmovaps -0x60(%rdi), %ymm8
vmovaps -0x40(%rdi), %ymm9
vmovaps -0x20(%rdi), %ymm10
vmovaps (%rdi), %ymm11
vunpcklps %ymm1, %ymm0, %ymm12 # ymm12 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklps %ymm5, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
vunpckhps %ymm5, %ymm4, %ymm4 # ymm4 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
vunpcklps %ymm7, %ymm6, %ymm5 # ymm5 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[4],ymm7[4],ymm6[5],ymm7[5]
vunpckhps %ymm7, %ymm6, %ymm6 # ymm6 = ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[6],ymm7[6],ymm6[7],ymm7[7]
vunpcklps %ymm9, %ymm8, %ymm7 # ymm7 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[4],ymm9[4],ymm8[5],ymm9[5]
vunpckhps %ymm9, %ymm8, %ymm8 # ymm8 = ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[6],ymm9[6],ymm8[7],ymm9[7]
vunpcklps %ymm11, %ymm10, %ymm9 # ymm9 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[4],ymm11[4],ymm10[5],ymm11[5]
vunpckhps %ymm11, %ymm10, %ymm10 # ymm10 = ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[6],ymm11[6],ymm10[7],ymm11[7]
vunpcklpd %ymm1, %ymm12, %ymm11 # ymm11 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
vunpckhpd %ymm1, %ymm12, %ymm1 # ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm12 # ymm12 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vunpcklpd %ymm5, %ymm3, %ymm2 # ymm2 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
vunpckhpd %ymm5, %ymm3, %ymm3 # ymm3 = ymm3[1],ymm5[1],ymm3[3],ymm5[3]
vunpcklpd %ymm6, %ymm4, %ymm5 # ymm5 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
vunpckhpd %ymm6, %ymm4, %ymm4 # ymm4 = ymm4[1],ymm6[1],ymm4[3],ymm6[3]
vunpcklpd %ymm9, %ymm7, %ymm6 # ymm6 = ymm7[0],ymm9[0],ymm7[2],ymm9[2]
vunpckhpd %ymm9, %ymm7, %ymm7 # ymm7 = ymm7[1],ymm9[1],ymm7[3],ymm9[3]
vunpcklpd %ymm10, %ymm8, %ymm9 # ymm9 = ymm8[0],ymm10[0],ymm8[2],ymm10[2]
vunpckhpd %ymm10, %ymm8, %ymm8 # ymm8 = ymm8[1],ymm10[1],ymm8[3],ymm10[3]
vinsertf128 $0x1, %xmm2, %ymm11, %ymm10
vperm2f128 $0x31, %ymm2, %ymm11, %ymm2 # ymm2 = ymm11[2,3],ymm2[2,3]
vinsertf128 $0x1, %xmm1, %ymm6, %ymm11
vperm2f128 $0x31, %ymm1, %ymm6, %ymm1 # ymm1 = ymm6[2,3],ymm1[2,3]
vinsertf128 $0x1, %xmm7, %ymm3, %ymm6
vperm2f128 $0x31, %ymm7, %ymm3, %ymm3 # ymm3 = ymm3[2,3],ymm7[2,3]
vinsertf128 $0x1, %xmm5, %ymm12, %ymm7
vperm2f128 $0x31, %ymm5, %ymm12, %ymm5 # ymm5 = ymm12[2,3],ymm5[2,3]
vinsertf128 $0x1, %xmm0, %ymm9, %ymm12
vperm2f128 $0x31, %ymm0, %ymm9, %ymm0 # ymm0 = ymm9[2,3],ymm0[2,3]
vinsertf128 $0x1, %xmm8, %ymm4, %ymm9
vperm2f128 $0x31, %ymm8, %ymm4, %ymm4 # ymm4 = ymm4[2,3],ymm8[2,3]
vmovaps %ymm10, (%rsi)
vmovaps %ymm11, 0x20(%rsi)
vmovaps %ymm6, 0x40(%rsi)
vmovaps %ymm7, 0x60(%rsi)
vmovaps %ymm12, 0x80(%rsi)
vmovaps %ymm9, 0xa0(%rsi)
vmovaps %ymm2, 0xc0(%rsi)
vmovaps %ymm1, 0xe0(%rsi)
vmovaps %ymm3, 0x100(%rsi)
vmovaps %ymm5, 0x120(%rsi)
vmovaps %ymm0, 0x140(%rsi)
vmovaps %ymm4, 0x160(%rsi)
addq $0x180, %rsi # imm = 0x180
addl $0x8, %ecx
addq %r14, %rdi
jmp 0x373cb2
cmpl $0x4, %ebp
jne 0x373f4d
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq -0x18(%rsp), %rcx
movl $0x3, %edi
cmpl %r9d, %edi
jge 0x373f4d
vmovaps -0xb0(%rcx), %xmm0
vmovaps -0xa0(%rcx), %xmm1
vmovaps -0x90(%rcx), %xmm2
vmovaps -0x80(%rcx), %xmm3
vmovaps -0x70(%rcx), %xmm4
vmovaps -0x60(%rcx), %xmm5
vmovaps -0x50(%rcx), %xmm6
vmovaps -0x40(%rcx), %xmm7
vmovaps -0x30(%rcx), %xmm8
vmovaps -0x20(%rcx), %xmm9
vmovaps -0x10(%rcx), %xmm10
vmovaps (%rcx), %xmm11
vunpcklps %xmm1, %xmm0, %xmm12 # xmm12 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm3, %xmm2, %xmm13 # xmm13 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vunpckhps %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vmovlhps %xmm13, %xmm12, %xmm2 # xmm2 = xmm12[0],xmm13[0]
vunpckhpd %xmm13, %xmm12, %xmm3 # xmm3 = xmm12[1],xmm13[1]
vmovlhps %xmm1, %xmm0, %xmm12 # xmm12 = xmm0[0],xmm1[0]
vunpckhpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vunpcklps %xmm5, %xmm4, %xmm1 # xmm1 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpcklps %xmm7, %xmm6, %xmm13 # xmm13 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
vunpckhps %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
vunpckhps %xmm7, %xmm6, %xmm5 # xmm5 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
vmovlhps %xmm13, %xmm1, %xmm6 # xmm6 = xmm1[0],xmm13[0]
vunpckhpd %xmm13, %xmm1, %xmm1 # xmm1 = xmm1[1],xmm13[1]
vmovlhps %xmm5, %xmm4, %xmm7 # xmm7 = xmm4[0],xmm5[0]
vunpckhpd %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[1],xmm5[1]
vunpcklps %xmm9, %xmm8, %xmm5 # xmm5 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
vunpcklps %xmm11, %xmm10, %xmm13 # xmm13 = xmm10[0],xmm11[0],xmm10[1],xmm11[1]
vunpckhps %xmm9, %xmm8, %xmm8 # xmm8 = xmm8[2],xmm9[2],xmm8[3],xmm9[3]
vunpckhps %xmm11, %xmm10, %xmm9 # xmm9 = xmm10[2],xmm11[2],xmm10[3],xmm11[3]
vmovlhps %xmm13, %xmm5, %xmm10 # xmm10 = xmm5[0],xmm13[0]
vunpckhpd %xmm13, %xmm5, %xmm5 # xmm5 = xmm5[1],xmm13[1]
vmovlhps %xmm9, %xmm8, %xmm11 # xmm11 = xmm8[0],xmm9[0]
vunpckhpd %xmm9, %xmm8, %xmm8 # xmm8 = xmm8[1],xmm9[1]
vmovaps %xmm2, (%rsi)
vmovaps %xmm6, 0x10(%rsi)
vmovaps %xmm10, 0x20(%rsi)
vmovaps %xmm3, 0x30(%rsi)
vmovaps %xmm1, 0x40(%rsi)
vmovaps %xmm5, 0x50(%rsi)
vmovaps %xmm12, 0x60(%rsi)
vmovaps %xmm7, 0x70(%rsi)
vmovaps %xmm11, 0x80(%rsi)
vmovaps %xmm0, 0x90(%rsi)
vmovaps %xmm4, 0xa0(%rsi)
vmovaps %xmm8, 0xb0(%rsi)
addq $0xc0, %rsi
addl $0x4, %edi
addq %r15, %rcx
jmp 0x373e3b
cmpl $0x1, %ebp
jne 0x373f8d
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq -0x50(%rsp), %rcx
movl -0x54(%rsp), %edi
subl $0x1, %edi
jb 0x373f8d
vmovups -0x20(%rcx), %xmm0
vmovaps %xmm0, (%rsi)
vmovups -0x10(%rcx), %xmm0
vmovaps %xmm0, 0x10(%rsi)
vmovups (%rcx), %xmm0
vmovaps %xmm0, 0x20(%rsi)
addq $0x30, %rsi
addq %rax, %rcx
jmp 0x373f63
addq $0xc, %r10
movl $0x180, %ecx # imm = 0x180
addq %rcx, %r8
movl $0xc0, %edi
addq %rdi, %rbx
addq $0x30, %rdx
addq %rcx, %r13
addq %rdi, %r11
addq $0x30, %r12
addq %rcx, -0x38(%rsp)
addq %rdi, -0x40(%rsp)
addq %rcx, -0x28(%rsp)
addq %rdi, -0x30(%rsp)
addq $0x30, -0x20(%rsp)
addq %rcx, -0x10(%rsp)
addq %rdi, -0x18(%rsp)
addq $0x30, -0x50(%rsp)
jmp 0x373c8c
movq -0x48(%rsp), %rcx
addq $-0x7, %rcx
movq %rcx, -0x50(%rsp)
cmpq -0x50(%rsp), %r10
jge 0x374388
cmpl $0x8, %ebp
jne 0x374100
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq -0x28(%rsp), %rcx
movl $0x7, %edi
cmpl %r9d, %edi
jge 0x374100
vmovaps -0xe0(%rcx), %ymm0
vmovaps -0xc0(%rcx), %ymm1
vmovaps -0xa0(%rcx), %ymm2
vmovaps -0x80(%rcx), %ymm3
vmovaps -0x60(%rcx), %ymm4
vmovaps -0x40(%rcx), %ymm5
vmovaps -0x20(%rcx), %ymm6
vmovaps (%rcx), %ymm7
vunpcklps %ymm1, %ymm0, %ymm8 # ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklps %ymm5, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
vunpckhps %ymm5, %ymm4, %ymm4 # ymm4 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
vunpcklps %ymm7, %ymm6, %ymm5 # ymm5 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[4],ymm7[4],ymm6[5],ymm7[5]
vunpckhps %ymm7, %ymm6, %ymm6 # ymm6 = ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[6],ymm7[6],ymm6[7],ymm7[7]
vunpcklpd %ymm1, %ymm8, %ymm7 # ymm7 = ymm8[0],ymm1[0],ymm8[2],ymm1[2]
vunpckhpd %ymm1, %ymm8, %ymm1 # ymm1 = ymm8[1],ymm1[1],ymm8[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm8 # ymm8 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vunpcklpd %ymm5, %ymm3, %ymm2 # ymm2 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
vunpckhpd %ymm5, %ymm3, %ymm3 # ymm3 = ymm3[1],ymm5[1],ymm3[3],ymm5[3]
vunpcklpd %ymm6, %ymm4, %ymm5 # ymm5 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
vunpckhpd %ymm6, %ymm4, %ymm4 # ymm4 = ymm4[1],ymm6[1],ymm4[3],ymm6[3]
vinsertf128 $0x1, %xmm2, %ymm7, %ymm6
vinsertf128 $0x1, %xmm3, %ymm1, %ymm9
vinsertf128 $0x1, %xmm5, %ymm8, %ymm10
vinsertf128 $0x1, %xmm4, %ymm0, %ymm11
vperm2f128 $0x31, %ymm2, %ymm7, %ymm2 # ymm2 = ymm7[2,3],ymm2[2,3]
vperm2f128 $0x31, %ymm3, %ymm1, %ymm1 # ymm1 = ymm1[2,3],ymm3[2,3]
vperm2f128 $0x31, %ymm5, %ymm8, %ymm3 # ymm3 = ymm8[2,3],ymm5[2,3]
vperm2f128 $0x31, %ymm4, %ymm0, %ymm0 # ymm0 = ymm0[2,3],ymm4[2,3]
vmovaps %ymm6, (%rsi)
vmovaps %ymm9, 0x20(%rsi)
vmovaps %ymm10, 0x40(%rsi)
vmovaps %ymm11, 0x60(%rsi)
vmovaps %ymm2, 0x80(%rsi)
vmovaps %ymm1, 0xa0(%rsi)
vmovaps %ymm3, 0xc0(%rsi)
vmovaps %ymm0, 0xe0(%rsi)
addq $0x100, %rsi # imm = 0x100
addl $0x8, %edi
addq %r14, %rcx
jmp 0x374012
cmpl $0x4, %ebp
jne 0x3741c5
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq -0x30(%rsp), %rcx
movl $0x3, %edi
cmpl %r9d, %edi
jge 0x3741c5
vmovaps -0x70(%rcx), %xmm0
vmovaps -0x60(%rcx), %xmm1
vmovaps -0x50(%rcx), %xmm2
vmovaps -0x40(%rcx), %xmm3
vmovaps -0x30(%rcx), %xmm4
vmovaps -0x20(%rcx), %xmm5
vmovaps -0x10(%rcx), %xmm6
vmovaps (%rcx), %xmm7
vunpcklps %xmm1, %xmm0, %xmm8 # xmm8 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm3, %xmm2, %xmm9 # xmm9 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vunpckhps %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vmovlhps %xmm9, %xmm8, %xmm2 # xmm2 = xmm8[0],xmm9[0]
vunpckhpd %xmm9, %xmm8, %xmm3 # xmm3 = xmm8[1],xmm9[1]
vmovlhps %xmm1, %xmm0, %xmm8 # xmm8 = xmm0[0],xmm1[0]
vunpckhpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vunpcklps %xmm5, %xmm4, %xmm1 # xmm1 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpcklps %xmm7, %xmm6, %xmm9 # xmm9 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
vunpckhps %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
vunpckhps %xmm7, %xmm6, %xmm5 # xmm5 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
vmovlhps %xmm9, %xmm1, %xmm6 # xmm6 = xmm1[0],xmm9[0]
vunpckhpd %xmm9, %xmm1, %xmm1 # xmm1 = xmm1[1],xmm9[1]
vmovlhps %xmm5, %xmm4, %xmm7 # xmm7 = xmm4[0],xmm5[0]
vunpckhpd %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[1],xmm5[1]
vmovaps %xmm2, (%rsi)
vmovaps %xmm6, 0x10(%rsi)
vmovaps %xmm3, 0x20(%rsi)
vmovaps %xmm1, 0x30(%rsi)
vmovaps %xmm8, 0x40(%rsi)
vmovaps %xmm7, 0x50(%rsi)
vmovaps %xmm0, 0x60(%rsi)
vmovaps %xmm4, 0x70(%rsi)
subq $-0x80, %rsi
addl $0x4, %edi
addq %r15, %rcx
jmp 0x37411b
cmpl $0x1, %ebp
jne 0x3741fb
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq -0x20(%rsp), %rcx
movl -0x54(%rsp), %edi
subl $0x1, %edi
jb 0x3741fb
vmovups -0x10(%rcx), %xmm0
vmovaps %xmm0, (%rsi)
vmovups (%rcx), %xmm0
vmovaps %xmm0, 0x10(%rsi)
addq $0x20, %rsi
addq %rax, %rcx
jmp 0x3741db
addq $0x8, %r10
movl $0x100, %ecx # imm = 0x100
addq %rcx, %r8
movl $0x80, %edi
addq %rdi, %rbx
addq $0x20, %rdx
addq %rcx, %r13
addq %rdi, %r11
addq $0x20, %r12
addq %rcx, -0x38(%rsp)
addq %rdi, -0x40(%rsp)
addq %rcx, -0x28(%rsp)
addq %rdi, -0x30(%rsp)
addq $0x20, -0x20(%rsp)
jmp 0x373fec
cmpl $0x8, %ebp
jne 0x3742c6
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq -0x38(%rsp), %rcx
movl $0x7, %edi
cmpl %r9d, %edi
jge 0x3742c6
vmovaps -0x60(%rcx), %ymm0
vmovaps -0x40(%rcx), %ymm1
vmovaps -0x20(%rcx), %ymm2
vmovaps (%rcx), %ymm3
vunpcklps %ymm1, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklpd %ymm1, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
vunpckhpd %ymm1, %ymm4, %ymm1 # ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vinsertf128 $0x1, %xmm1, %ymm3, %ymm2
vinsertf128 $0x1, %xmm0, %ymm4, %ymm5
vperm2f128 $0x31, %ymm1, %ymm3, %ymm1 # ymm1 = ymm3[2,3],ymm1[2,3]
vperm2f128 $0x31, %ymm0, %ymm4, %ymm0 # ymm0 = ymm4[2,3],ymm0[2,3]
vmovaps %ymm2, (%rsi)
vmovaps %ymm5, 0x20(%rsi)
vmovaps %ymm1, 0x40(%rsi)
vmovaps %ymm0, 0x60(%rsi)
subq $-0x80, %rsi
addq %r14, %rcx
addl $0x8, %edi
jmp 0x374257
cmpl $0x4, %ebp
jne 0x374334
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq -0x40(%rsp), %rcx
movl $0x3, %edi
cmpl %r9d, %edi
jge 0x374334
vmovaps -0x30(%rcx), %xmm0
vmovaps -0x20(%rcx), %xmm1
vmovaps -0x10(%rcx), %xmm2
vmovaps (%rcx), %xmm3
vunpcklps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm3, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vunpckhps %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vmovlhps %xmm5, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm5[0]
vunpckhpd %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[1],xmm5[1]
vmovlhps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0]
vunpckhpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vmovaps %xmm2, (%rsi)
vmovaps %xmm3, 0x10(%rsi)
vmovaps %xmm4, 0x20(%rsi)
vmovaps %xmm0, 0x30(%rsi)
addq $0x40, %rsi
addq %r15, %rcx
addl $0x4, %edi
jmp 0x3742dd
cmpl $0x1, %ebp
jne 0x37435e
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq %rdx, %rcx
movl -0x54(%rsp), %edi
subl $0x1, %edi
jb 0x37435e
vmovups (%rcx), %xmm0
vmovaps %xmm0, (%rsi)
addq $0x10, %rsi
addq %rax, %rcx
jmp 0x374348
addq $0x4, %r10
movl $0x80, %ecx
addq %rcx, %r8
addq $0x40, %rbx
addq $0x10, %rdx
addq %rcx, %r13
addq $0x40, %r11
addq $0x10, %r12
addq %rcx, -0x38(%rsp)
addq $0x40, -0x40(%rsp)
movq %r10, %rcx
orq $0x3, %rcx
cmpq -0x48(%rsp), %rcx
jl 0x37423c
jmp 0x37447b
cmpl $0x8, %ebp
jne 0x3743eb
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq %r13, %rcx
movl $0x7, %edi
cmpl %r9d, %edi
jge 0x3743eb
vmovaps -0x20(%rcx), %ymm0
vmovaps (%rcx), %ymm1
vunpcklps %ymm1, %ymm0, %ymm2 # ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vinsertf128 $0x1, %xmm0, %ymm2, %ymm1
vperm2f128 $0x31, %ymm0, %ymm2, %ymm0 # ymm0 = ymm2[2,3],ymm0[2,3]
vmovaps %ymm1, (%rsi)
vmovaps %ymm0, 0x20(%rsi)
addq $0x40, %rsi
addq %r14, %rcx
addl $0x8, %edi
jmp 0x3743b4
cmpl $0x4, %ebp
jne 0x37442b
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq %r11, %rcx
movl $0x3, %edi
cmpl %r9d, %edi
jge 0x37442b
vmovaps -0x10(%rcx), %xmm0
vmovaps (%rcx), %xmm1
vunpcklps %xmm1, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovaps %xmm2, (%rsi)
vmovaps %xmm0, 0x10(%rsi)
addq $0x20, %rsi
addq %r15, %rcx
addl $0x4, %edi
jmp 0x374400
cmpl $0x1, %ebp
jne 0x37445f
movq -0x60(%rsp), %rcx
movq (%rcx), %rcx
addq %r12, %rcx
movl -0x54(%rsp), %edi
subl $0x1, %edi
jb 0x37445f
vmovss -0x4(%rcx), %xmm0
vmovss %xmm0, (%rsi)
vmovss (%rcx), %xmm0
vmovss %xmm0, 0x4(%rsi)
addq $0x8, %rsi
addq %rax, %rcx
jmp 0x37443f
addq $0x2, %r10
addq $0x40, %r8
addq $0x20, %rbx
addq $0x8, %rdx
addq $0x40, %r13
addq $0x20, %r11
addq $0x8, %r12
movq %r10, %rcx
orq $0x1, %rcx
cmpq -0x48(%rsp), %rcx
jl 0x37439f
movl $0x7, %ecx
movl $0x3, %edi
cmpq -0x48(%rsp), %r10
jge 0x37453f
cmpl $0x8, %ebp
jne 0x3744d0
movq -0x60(%rsp), %r11
movq (%r11), %r11
addq %r8, %r11
movl %ecx, %r12d
cmpl %r9d, %r12d
jge 0x3744d0
vmovaps (%r11), %ymm0
vmovaps %ymm0, (%rsi)
addq $0x20, %rsi
addq %r14, %r11
addl $0x8, %r12d
jmp 0x3744b5
cmpl $0x4, %ebp
jne 0x3744fe
movq -0x60(%rsp), %r11
movq (%r11), %r11
addq %rbx, %r11
movl %edi, %r12d
cmpl %r9d, %r12d
jge 0x3744fe
vmovaps (%r11), %xmm0
vmovaps %xmm0, (%rsi)
addq $0x10, %rsi
addq %r15, %r11
addl $0x4, %r12d
jmp 0x3744e3
cmpl $0x1, %ebp
jne 0x37452b
movq -0x60(%rsp), %r11
movq (%r11), %r11
addq %rdx, %r11
movl -0x54(%rsp), %r12d
subl $0x1, %r12d
jb 0x37452b
vmovss (%r11), %xmm0
vmovss %xmm0, (%rsi)
addq $0x4, %rsi
addq %rax, %r11
jmp 0x374513
incq %r10
addq $0x20, %r8
addq $0x10, %rbx
addq $0x4, %rdx
jmp 0x374497
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/gemm_x86_avx.cpp |
virtual thunk to ncnn::Gemm_x86_avx::create_pipeline(ncnn::Option const&) | int Gemm_x86_avx::create_pipeline(const Option& opt)
{
if (constantA)
{
const int M = constantM;
const int K = constantK;
int TILE_M, TILE_N, TILE_K;
get_optimal_tile_mnk(M, 0, K, constant_TILE_M, constant_TILE_N, constant_TILE_K, TILE_M, TILE_N, TILE_K, opt.num_threads);
const int nn_M = (M + TILE_M - 1) / TILE_M;
AT_data.create(TILE_K * TILE_M, (K + TILE_K - 1) / TILE_K, (M + TILE_M - 1) / TILE_M, 4u, (Allocator*)0);
if (AT_data.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ppj = 0; ppj < nn_M; ppj++)
{
const int i = ppj * TILE_M;
for (int k = 0; k < K; k += TILE_K)
{
const int max_ii = std::min((M - i), TILE_M);
const int max_kk = std::min((K - k), TILE_K);
Mat AT_tile = AT_data.channel(i / TILE_M).row_range(k / TILE_K, 1);
if (transA)
{
transpose_pack_A_tile(A_data, AT_tile, i, max_ii, k, max_kk);
}
else
{
pack_A_tile(A_data, AT_tile, i, max_ii, k, max_kk);
}
}
}
if (opt.lightmode)
{
A_data.release();
}
}
if (constantB)
{
const int N = constantN;
const int K = constantK;
int TILE_M, TILE_N, TILE_K;
get_optimal_tile_mnk(0, N, K, constant_TILE_M, constant_TILE_N, constant_TILE_K, TILE_M, TILE_N, TILE_K, opt.num_threads);
const int nn_N = (N + TILE_N - 1) / TILE_N;
const int nn_K = (K + TILE_K - 1) / TILE_K;
BT_data.create(TILE_K * TILE_N, (K + TILE_K - 1) / TILE_K, (N + TILE_N - 1) / TILE_N, 4u, (Allocator*)0);
if (BT_data.empty())
return -100;
const int nn_NK = nn_N * nn_K;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ppjk = 0; ppjk < nn_NK; ppjk++)
{
const int ppj = ppjk / nn_K;
const int ppk = ppjk % nn_K;
const int j = ppj * TILE_N;
const int k = ppk * TILE_K;
const int max_jj = std::min((N - j), TILE_N);
const int max_kk = std::min((K - k), TILE_K);
Mat BT_tile = BT_data.channel(j / TILE_N).row_range(k / TILE_K, 1);
if (transB)
{
pack_B_tile(B_data, BT_tile, j, max_jj, k, max_kk);
}
else
{
transpose_pack_B_tile(B_data, BT_tile, j, max_jj, k, max_kk);
}
}
if (opt.lightmode)
{
B_data.release();
}
}
if (constantC && constant_broadcast_type_C != -1)
{
CT_data = C_data;
#if __SSE2__
if (constant_broadcast_type_C == 3 && opt.use_packing_layout)
{
#if __AVX512F__
int C_elempack = constantM % 16 == 0 ? 16 : constantM % 8 == 0 ? 8 : constantM % 4 == 0 ? 4 : 1;
#elif __AVX__
int C_elempack = constantM % 8 == 0 ? 8 : constantM % 4 == 0 ? 4 : 1;
#else
int C_elempack = constantM % 4 == 0 ? 4 : 1;
#endif
convert_packing(C_data, CT_data, C_elempack, opt);
}
#endif // __SSE2__
// pre-multiply C with beta
if (beta != 1.f)
{
Mat C2;
C2.create_like(CT_data);
const int size = CT_data.total() * CT_data.elempack;
for (int i = 0; i < size; i++)
{
C2[i] = CT_data[i] * beta;
}
CT_data = C2;
}
if (opt.lightmode)
{
C_data.release();
}
}
if (constantA || constantB || constantC)
{
nT = opt.num_threads;
}
return 0;
} | movq (%rdi), %rax
addq -0x30(%rax), %rdi
jmp 0x371306
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/gemm_x86_avx.cpp |
ncnn::transpose_unpack_output_tile(ncnn::Mat const&, ncnn::Mat&, int, int, int, int) | static void transpose_unpack_output_tile(const Mat& topT, Mat& top_blob, int i, int max_ii, int j, int max_jj)
{
const int out_elempack = top_blob.elempack;
const int out_hstep = top_blob.dims == 3 ? (int)top_blob.cstep : top_blob.w;
const float* pp = topT;
int ii = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
for (; ii + 15 < max_ii; ii += 16)
{
if (out_elempack == 16)
{
float* p0 = (float*)top_blob + (j / 16 * 16) * out_hstep + (i + ii) * 16;
int jj = 0;
if (j % 16 == 4)
{
__m512 _r0 = _mm512_load_ps(pp);
__m512 _r1 = _mm512_load_ps(pp + 16);
__m512 _r2 = _mm512_load_ps(pp + 16 * 2);
__m512 _r3 = _mm512_load_ps(pp + 16 * 3);
if (max_jj > 4)
{
// assert max_jj > 8
__m512 _r4 = _mm512_load_ps(pp + 16 * 4);
__m512 _r5 = _mm512_load_ps(pp + 16 * 5);
__m512 _r6 = _mm512_load_ps(pp + 16 * 6);
__m512 _r7 = _mm512_load_ps(pp + 16 * 7);
__m512 _r8 = _mm512_load_ps(pp + 16 * 8);
__m512 _r9 = _mm512_load_ps(pp + 16 * 9);
__m512 _ra = _mm512_load_ps(pp + 16 * 10);
__m512 _rb = _mm512_load_ps(pp + 16 * 11);
transpose16x12_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _ra, _rb);
_mm256_storeu_ps(p0 + 4, _mm512_extractf32x8_ps(_r0, 0));
_mm_store_ps(p0 + 4 + 8, _mm512_extractf32x4_ps(_r0, 2));
_mm_store_ps(p0 + 16 + 4, _mm512_extractf32x4_ps(_r0, 3));
_mm256_store_ps(p0 + 16 + 4 + 4, _mm512_extractf32x8_ps(_r1, 0));
_mm256_storeu_ps(p0 + 16 * 2 + 4, _mm512_extractf32x8_ps(_r1, 1));
_mm_store_ps(p0 + 16 * 2 + 4 + 8, _mm512_extractf32x4_ps(_r2, 0));
_mm_store_ps(p0 + 16 * 3 + 4, _mm512_extractf32x4_ps(_r2, 1));
_mm256_store_ps(p0 + 16 * 3 + 4 + 4, _mm512_extractf32x8_ps(_r2, 1));
_mm256_storeu_ps(p0 + 16 * 4 + 4, _mm512_extractf32x8_ps(_r3, 0));
_mm_store_ps(p0 + 16 * 4 + 4 + 8, _mm512_extractf32x4_ps(_r3, 2));
_mm_store_ps(p0 + 16 * 5 + 4, _mm512_extractf32x4_ps(_r3, 3));
_mm256_store_ps(p0 + 16 * 5 + 4 + 4, _mm512_extractf32x8_ps(_r4, 0));
_mm256_storeu_ps(p0 + 16 * 6 + 4, _mm512_extractf32x8_ps(_r4, 1));
_mm_store_ps(p0 + 16 * 6 + 4 + 8, _mm512_extractf32x4_ps(_r5, 0));
_mm_store_ps(p0 + 16 * 7 + 4, _mm512_extractf32x4_ps(_r5, 1));
_mm256_store_ps(p0 + 16 * 7 + 4 + 4, _mm512_extractf32x8_ps(_r5, 1));
_mm256_storeu_ps(p0 + 16 * 8 + 4, _mm512_extractf32x8_ps(_r6, 0));
_mm_store_ps(p0 + 16 * 8 + 4 + 8, _mm512_extractf32x4_ps(_r6, 2));
_mm_store_ps(p0 + 16 * 9 + 4, _mm512_extractf32x4_ps(_r6, 3));
_mm256_store_ps(p0 + 16 * 9 + 4 + 4, _mm512_extractf32x8_ps(_r7, 0));
_mm256_storeu_ps(p0 + 16 * 10 + 4, _mm512_extractf32x8_ps(_r7, 1));
_mm_store_ps(p0 + 16 * 10 + 4 + 8, _mm512_extractf32x4_ps(_r8, 0));
_mm_store_ps(p0 + 16 * 11 + 4, _mm512_extractf32x4_ps(_r8, 1));
_mm256_store_ps(p0 + 16 * 11 + 4 + 4, _mm512_extractf32x8_ps(_r8, 1));
_mm256_storeu_ps(p0 + 16 * 12 + 4, _mm512_extractf32x8_ps(_r9, 0));
_mm_store_ps(p0 + 16 * 12 + 4 + 8, _mm512_extractf32x4_ps(_r9, 2));
_mm_store_ps(p0 + 16 * 13 + 4, _mm512_extractf32x4_ps(_r9, 3));
_mm256_store_ps(p0 + 16 * 13 + 4 + 4, _mm512_extractf32x8_ps(_ra, 0));
_mm256_storeu_ps(p0 + 16 * 14 + 4, _mm512_extractf32x8_ps(_ra, 1));
_mm_store_ps(p0 + 16 * 14 + 4 + 8, _mm512_extractf32x4_ps(_rb, 0));
_mm_store_ps(p0 + 16 * 15 + 4, _mm512_extractf32x4_ps(_rb, 1));
_mm256_store_ps(p0 + 16 * 15 + 4 + 4, _mm512_extractf32x8_ps(_rb, 1));
pp += 192;
jj += 12;
}
else
{
transpose16x4_ps(_r0, _r1, _r2, _r3);
_mm_store_ps(p0 + 4, _mm512_extractf32x4_ps(_r0, 0));
_mm_store_ps(p0 + 16 + 4, _mm512_extractf32x4_ps(_r0, 1));
_mm_store_ps(p0 + 16 * 2 + 4, _mm512_extractf32x4_ps(_r0, 2));
_mm_store_ps(p0 + 16 * 3 + 4, _mm512_extractf32x4_ps(_r0, 3));
_mm_store_ps(p0 + 16 * 4 + 4, _mm512_extractf32x4_ps(_r1, 0));
_mm_store_ps(p0 + 16 * 5 + 4, _mm512_extractf32x4_ps(_r1, 1));
_mm_store_ps(p0 + 16 * 6 + 4, _mm512_extractf32x4_ps(_r1, 2));
_mm_store_ps(p0 + 16 * 7 + 4, _mm512_extractf32x4_ps(_r1, 3));
_mm_store_ps(p0 + 16 * 8 + 4, _mm512_extractf32x4_ps(_r2, 0));
_mm_store_ps(p0 + 16 * 9 + 4, _mm512_extractf32x4_ps(_r2, 1));
_mm_store_ps(p0 + 16 * 10 + 4, _mm512_extractf32x4_ps(_r2, 2));
_mm_store_ps(p0 + 16 * 11 + 4, _mm512_extractf32x4_ps(_r2, 3));
_mm_store_ps(p0 + 16 * 12 + 4, _mm512_extractf32x4_ps(_r3, 0));
_mm_store_ps(p0 + 16 * 13 + 4, _mm512_extractf32x4_ps(_r3, 1));
_mm_store_ps(p0 + 16 * 14 + 4, _mm512_extractf32x4_ps(_r3, 2));
_mm_store_ps(p0 + 16 * 15 + 4, _mm512_extractf32x4_ps(_r3, 3));
pp += 64;
jj += 4;
}
p0 += out_hstep * 16;
}
if (j % 16 == 8)
{
__m512 _r0 = _mm512_load_ps(pp);
__m512 _r1 = _mm512_load_ps(pp + 16);
__m512 _r2 = _mm512_load_ps(pp + 16 * 2);
__m512 _r3 = _mm512_load_ps(pp + 16 * 3);
if (max_jj > 4)
{
__m512 _r4 = _mm512_load_ps(pp + 16 * 4);
__m512 _r5 = _mm512_load_ps(pp + 16 * 5);
__m512 _r6 = _mm512_load_ps(pp + 16 * 6);
__m512 _r7 = _mm512_load_ps(pp + 16 * 7);
transpose16x8_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7);
_mm256_store_ps(p0 + 8, _mm512_extractf32x8_ps(_r0, 0));
_mm256_store_ps(p0 + 16 + 8, _mm512_extractf32x8_ps(_r0, 1));
_mm256_store_ps(p0 + 16 * 2 + 8, _mm512_extractf32x8_ps(_r1, 0));
_mm256_store_ps(p0 + 16 * 3 + 8, _mm512_extractf32x8_ps(_r1, 1));
_mm256_store_ps(p0 + 16 * 4 + 8, _mm512_extractf32x8_ps(_r2, 0));
_mm256_store_ps(p0 + 16 * 5 + 8, _mm512_extractf32x8_ps(_r2, 1));
_mm256_store_ps(p0 + 16 * 6 + 8, _mm512_extractf32x8_ps(_r3, 0));
_mm256_store_ps(p0 + 16 * 7 + 8, _mm512_extractf32x8_ps(_r3, 1));
_mm256_store_ps(p0 + 16 * 8 + 8, _mm512_extractf32x8_ps(_r4, 0));
_mm256_store_ps(p0 + 16 * 9 + 8, _mm512_extractf32x8_ps(_r4, 1));
_mm256_store_ps(p0 + 16 * 10 + 8, _mm512_extractf32x8_ps(_r5, 0));
_mm256_store_ps(p0 + 16 * 11 + 8, _mm512_extractf32x8_ps(_r5, 1));
_mm256_store_ps(p0 + 16 * 12 + 8, _mm512_extractf32x8_ps(_r6, 0));
_mm256_store_ps(p0 + 16 * 13 + 8, _mm512_extractf32x8_ps(_r6, 1));
_mm256_store_ps(p0 + 16 * 14 + 8, _mm512_extractf32x8_ps(_r7, 0));
_mm256_store_ps(p0 + 16 * 15 + 8, _mm512_extractf32x8_ps(_r7, 1));
pp += 128;
jj += 8;
}
else
{
transpose16x4_ps(_r0, _r1, _r2, _r3);
_mm_store_ps(p0 + 8, _mm512_extractf32x4_ps(_r0, 0));
_mm_store_ps(p0 + 16 + 8, _mm512_extractf32x4_ps(_r0, 1));
_mm_store_ps(p0 + 16 * 2 + 8, _mm512_extractf32x4_ps(_r0, 2));
_mm_store_ps(p0 + 16 * 3 + 8, _mm512_extractf32x4_ps(_r0, 3));
_mm_store_ps(p0 + 16 * 4 + 8, _mm512_extractf32x4_ps(_r1, 0));
_mm_store_ps(p0 + 16 * 5 + 8, _mm512_extractf32x4_ps(_r1, 1));
_mm_store_ps(p0 + 16 * 6 + 8, _mm512_extractf32x4_ps(_r1, 2));
_mm_store_ps(p0 + 16 * 7 + 8, _mm512_extractf32x4_ps(_r1, 3));
_mm_store_ps(p0 + 16 * 8 + 8, _mm512_extractf32x4_ps(_r2, 0));
_mm_store_ps(p0 + 16 * 9 + 8, _mm512_extractf32x4_ps(_r2, 1));
_mm_store_ps(p0 + 16 * 10 + 8, _mm512_extractf32x4_ps(_r2, 2));
_mm_store_ps(p0 + 16 * 11 + 8, _mm512_extractf32x4_ps(_r2, 3));
_mm_store_ps(p0 + 16 * 12 + 8, _mm512_extractf32x4_ps(_r3, 0));
_mm_store_ps(p0 + 16 * 13 + 8, _mm512_extractf32x4_ps(_r3, 1));
_mm_store_ps(p0 + 16 * 14 + 8, _mm512_extractf32x4_ps(_r3, 2));
_mm_store_ps(p0 + 16 * 15 + 8, _mm512_extractf32x4_ps(_r3, 3));
pp += 64;
jj += 4;
}
p0 += out_hstep * 16;
}
if (j % 16 == 12)
{
__m512 _r0 = _mm512_load_ps(pp);
__m512 _r1 = _mm512_load_ps(pp + 16);
__m512 _r2 = _mm512_load_ps(pp + 16 * 2);
__m512 _r3 = _mm512_load_ps(pp + 16 * 3);
transpose16x4_ps(_r0, _r1, _r2, _r3);
_mm_store_ps(p0 + 12, _mm512_extractf32x4_ps(_r0, 0));
_mm_store_ps(p0 + 16 + 12, _mm512_extractf32x4_ps(_r0, 1));
_mm_store_ps(p0 + 16 * 2 + 12, _mm512_extractf32x4_ps(_r0, 2));
_mm_store_ps(p0 + 16 * 3 + 12, _mm512_extractf32x4_ps(_r0, 3));
_mm_store_ps(p0 + 16 * 4 + 12, _mm512_extractf32x4_ps(_r1, 0));
_mm_store_ps(p0 + 16 * 5 + 12, _mm512_extractf32x4_ps(_r1, 1));
_mm_store_ps(p0 + 16 * 6 + 12, _mm512_extractf32x4_ps(_r1, 2));
_mm_store_ps(p0 + 16 * 7 + 12, _mm512_extractf32x4_ps(_r1, 3));
_mm_store_ps(p0 + 16 * 8 + 12, _mm512_extractf32x4_ps(_r2, 0));
_mm_store_ps(p0 + 16 * 9 + 12, _mm512_extractf32x4_ps(_r2, 1));
_mm_store_ps(p0 + 16 * 10 + 12, _mm512_extractf32x4_ps(_r2, 2));
_mm_store_ps(p0 + 16 * 11 + 12, _mm512_extractf32x4_ps(_r2, 3));
_mm_store_ps(p0 + 16 * 12 + 12, _mm512_extractf32x4_ps(_r3, 0));
_mm_store_ps(p0 + 16 * 13 + 12, _mm512_extractf32x4_ps(_r3, 1));
_mm_store_ps(p0 + 16 * 14 + 12, _mm512_extractf32x4_ps(_r3, 2));
_mm_store_ps(p0 + 16 * 15 + 12, _mm512_extractf32x4_ps(_r3, 3));
pp += 64;
p0 += out_hstep * 16;
jj += 4;
}
for (; jj + 15 < max_jj; jj += 16)
{
__m512 _r0 = _mm512_load_ps(pp);
__m512 _r1 = _mm512_load_ps(pp + 16);
__m512 _r2 = _mm512_load_ps(pp + 16 * 2);
__m512 _r3 = _mm512_load_ps(pp + 16 * 3);
__m512 _r4 = _mm512_load_ps(pp + 16 * 4);
__m512 _r5 = _mm512_load_ps(pp + 16 * 5);
__m512 _r6 = _mm512_load_ps(pp + 16 * 6);
__m512 _r7 = _mm512_load_ps(pp + 16 * 7);
__m512 _r8 = _mm512_load_ps(pp + 16 * 8);
__m512 _r9 = _mm512_load_ps(pp + 16 * 9);
__m512 _ra = _mm512_load_ps(pp + 16 * 10);
__m512 _rb = _mm512_load_ps(pp + 16 * 11);
__m512 _rc = _mm512_load_ps(pp + 16 * 12);
__m512 _rd = _mm512_load_ps(pp + 16 * 13);
__m512 _re = _mm512_load_ps(pp + 16 * 14);
__m512 _rf = _mm512_load_ps(pp + 16 * 15);
transpose16x16_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _ra, _rb, _rc, _rd, _re, _rf);
_mm512_store_ps(p0, _r0);
_mm512_store_ps(p0 + 16, _r1);
_mm512_store_ps(p0 + 16 * 2, _r2);
_mm512_store_ps(p0 + 16 * 3, _r3);
_mm512_store_ps(p0 + 16 * 4, _r4);
_mm512_store_ps(p0 + 16 * 5, _r5);
_mm512_store_ps(p0 + 16 * 6, _r6);
_mm512_store_ps(p0 + 16 * 7, _r7);
_mm512_store_ps(p0 + 16 * 8, _r8);
_mm512_store_ps(p0 + 16 * 9, _r9);
_mm512_store_ps(p0 + 16 * 10, _ra);
_mm512_store_ps(p0 + 16 * 11, _rb);
_mm512_store_ps(p0 + 16 * 12, _rc);
_mm512_store_ps(p0 + 16 * 13, _rd);
_mm512_store_ps(p0 + 16 * 14, _re);
_mm512_store_ps(p0 + 16 * 15, _rf);
pp += 256;
p0 += out_hstep * 16;
}
for (; jj + 11 < max_jj; jj += 12)
{
__m512 _r0 = _mm512_load_ps(pp);
__m512 _r1 = _mm512_load_ps(pp + 16);
__m512 _r2 = _mm512_load_ps(pp + 16 * 2);
__m512 _r3 = _mm512_load_ps(pp + 16 * 3);
__m512 _r4 = _mm512_load_ps(pp + 16 * 4);
__m512 _r5 = _mm512_load_ps(pp + 16 * 5);
__m512 _r6 = _mm512_load_ps(pp + 16 * 6);
__m512 _r7 = _mm512_load_ps(pp + 16 * 7);
__m512 _r8 = _mm512_load_ps(pp + 16 * 8);
__m512 _r9 = _mm512_load_ps(pp + 16 * 9);
__m512 _ra = _mm512_load_ps(pp + 16 * 10);
__m512 _rb = _mm512_load_ps(pp + 16 * 11);
transpose16x12_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _ra, _rb);
_mm256_store_ps(p0, _mm512_extractf32x8_ps(_r0, 0));
_mm_store_ps(p0 + 8, _mm512_extractf32x4_ps(_r0, 2));
_mm_store_ps(p0 + 16, _mm512_extractf32x4_ps(_r0, 3));
_mm256_storeu_ps(p0 + 16 + 4, _mm512_extractf32x8_ps(_r1, 0));
_mm256_store_ps(p0 + 16 * 2, _mm512_extractf32x8_ps(_r1, 1));
_mm_store_ps(p0 + 16 * 2 + 8, _mm512_extractf32x4_ps(_r2, 0));
_mm_store_ps(p0 + 16 * 3, _mm512_extractf32x4_ps(_r2, 1));
_mm256_storeu_ps(p0 + 16 * 3 + 4, _mm512_extractf32x8_ps(_r2, 1));
_mm256_store_ps(p0 + 16 * 4, _mm512_extractf32x8_ps(_r3, 0));
_mm_store_ps(p0 + 16 * 4 + 8, _mm512_extractf32x4_ps(_r3, 2));
_mm_store_ps(p0 + 16 * 5, _mm512_extractf32x4_ps(_r3, 3));
_mm256_storeu_ps(p0 + 16 * 5 + 4, _mm512_extractf32x8_ps(_r4, 0));
_mm256_store_ps(p0 + 16 * 6, _mm512_extractf32x8_ps(_r4, 1));
_mm_store_ps(p0 + 16 * 6 + 8, _mm512_extractf32x4_ps(_r5, 0));
_mm_store_ps(p0 + 16 * 7, _mm512_extractf32x4_ps(_r5, 1));
_mm256_storeu_ps(p0 + 16 * 7 + 4, _mm512_extractf32x8_ps(_r5, 1));
_mm256_store_ps(p0 + 16 * 8, _mm512_extractf32x8_ps(_r6, 0));
_mm_store_ps(p0 + 16 * 8 + 8, _mm512_extractf32x4_ps(_r6, 2));
_mm_store_ps(p0 + 16 * 9, _mm512_extractf32x4_ps(_r6, 3));
_mm256_storeu_ps(p0 + 16 * 9 + 4, _mm512_extractf32x8_ps(_r7, 0));
_mm256_store_ps(p0 + 16 * 10, _mm512_extractf32x8_ps(_r7, 1));
_mm_store_ps(p0 + 16 * 10 + 8, _mm512_extractf32x4_ps(_r8, 0));
_mm_store_ps(p0 + 16 * 11, _mm512_extractf32x4_ps(_r8, 1));
_mm256_storeu_ps(p0 + 16 * 11 + 4, _mm512_extractf32x8_ps(_r8, 1));
_mm256_store_ps(p0 + 16 * 12, _mm512_extractf32x8_ps(_r9, 0));
_mm_store_ps(p0 + 16 * 12 + 8, _mm512_extractf32x4_ps(_r9, 2));
_mm_store_ps(p0 + 16 * 13, _mm512_extractf32x4_ps(_r9, 3));
_mm256_storeu_ps(p0 + 16 * 13 + 4, _mm512_extractf32x8_ps(_ra, 0));
_mm256_store_ps(p0 + 16 * 14, _mm512_extractf32x8_ps(_ra, 1));
_mm_store_ps(p0 + 16 * 14 + 8, _mm512_extractf32x4_ps(_rb, 0));
_mm_store_ps(p0 + 16 * 15, _mm512_extractf32x4_ps(_rb, 1));
_mm256_storeu_ps(p0 + 16 * 15 + 4, _mm512_extractf32x8_ps(_rb, 1));
pp += 192;
p0 += out_hstep * 16;
}
for (; jj + 7 < max_jj; jj += 8)
{
__m512 _r0 = _mm512_load_ps(pp);
__m512 _r1 = _mm512_load_ps(pp + 16);
__m512 _r2 = _mm512_load_ps(pp + 16 * 2);
__m512 _r3 = _mm512_load_ps(pp + 16 * 3);
__m512 _r4 = _mm512_load_ps(pp + 16 * 4);
__m512 _r5 = _mm512_load_ps(pp + 16 * 5);
__m512 _r6 = _mm512_load_ps(pp + 16 * 6);
__m512 _r7 = _mm512_load_ps(pp + 16 * 7);
transpose16x8_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7);
_mm256_store_ps(p0, _mm512_extractf32x8_ps(_r0, 0));
_mm256_store_ps(p0 + 16, _mm512_extractf32x8_ps(_r0, 1));
_mm256_store_ps(p0 + 16 * 2, _mm512_extractf32x8_ps(_r1, 0));
_mm256_store_ps(p0 + 16 * 3, _mm512_extractf32x8_ps(_r1, 1));
_mm256_store_ps(p0 + 16 * 4, _mm512_extractf32x8_ps(_r2, 0));
_mm256_store_ps(p0 + 16 * 5, _mm512_extractf32x8_ps(_r2, 1));
_mm256_store_ps(p0 + 16 * 6, _mm512_extractf32x8_ps(_r3, 0));
_mm256_store_ps(p0 + 16 * 7, _mm512_extractf32x8_ps(_r3, 1));
_mm256_store_ps(p0 + 16 * 8, _mm512_extractf32x8_ps(_r4, 0));
_mm256_store_ps(p0 + 16 * 9, _mm512_extractf32x8_ps(_r4, 1));
_mm256_store_ps(p0 + 16 * 10, _mm512_extractf32x8_ps(_r5, 0));
_mm256_store_ps(p0 + 16 * 11, _mm512_extractf32x8_ps(_r5, 1));
_mm256_store_ps(p0 + 16 * 12, _mm512_extractf32x8_ps(_r6, 0));
_mm256_store_ps(p0 + 16 * 13, _mm512_extractf32x8_ps(_r6, 1));
_mm256_store_ps(p0 + 16 * 14, _mm512_extractf32x8_ps(_r7, 0));
_mm256_store_ps(p0 + 16 * 15, _mm512_extractf32x8_ps(_r7, 1));
pp += 128;
p0 += out_hstep * 16;
}
for (; jj + 3 < max_jj; jj += 4)
{
__m512 _r0 = _mm512_load_ps(pp);
__m512 _r1 = _mm512_load_ps(pp + 16);
__m512 _r2 = _mm512_load_ps(pp + 16 * 2);
__m512 _r3 = _mm512_load_ps(pp + 16 * 3);
transpose16x4_ps(_r0, _r1, _r2, _r3);
_mm_store_ps(p0, _mm512_extractf32x4_ps(_r0, 0));
_mm_store_ps(p0 + 16, _mm512_extractf32x4_ps(_r0, 1));
_mm_store_ps(p0 + 16 * 2, _mm512_extractf32x4_ps(_r0, 2));
_mm_store_ps(p0 + 16 * 3, _mm512_extractf32x4_ps(_r0, 3));
_mm_store_ps(p0 + 16 * 4, _mm512_extractf32x4_ps(_r1, 0));
_mm_store_ps(p0 + 16 * 5, _mm512_extractf32x4_ps(_r1, 1));
_mm_store_ps(p0 + 16 * 6, _mm512_extractf32x4_ps(_r1, 2));
_mm_store_ps(p0 + 16 * 7, _mm512_extractf32x4_ps(_r1, 3));
_mm_store_ps(p0 + 16 * 8, _mm512_extractf32x4_ps(_r2, 0));
_mm_store_ps(p0 + 16 * 9, _mm512_extractf32x4_ps(_r2, 1));
_mm_store_ps(p0 + 16 * 10, _mm512_extractf32x4_ps(_r2, 2));
_mm_store_ps(p0 + 16 * 11, _mm512_extractf32x4_ps(_r2, 3));
_mm_store_ps(p0 + 16 * 12, _mm512_extractf32x4_ps(_r3, 0));
_mm_store_ps(p0 + 16 * 13, _mm512_extractf32x4_ps(_r3, 1));
_mm_store_ps(p0 + 16 * 14, _mm512_extractf32x4_ps(_r3, 2));
_mm_store_ps(p0 + 16 * 15, _mm512_extractf32x4_ps(_r3, 3));
pp += 64;
p0 += out_hstep * 16;
}
}
if (out_elempack == 8)
{
float* p0 = (float*)top_blob + (j / 8 * 8) * out_hstep + (i + ii) * 8;
int jj = 0;
if (j % 8 == 4)
{
__m512 _r0 = _mm512_load_ps(pp);
__m512 _r1 = _mm512_load_ps(pp + 16);
__m512 _r2 = _mm512_load_ps(pp + 16 * 2);
__m512 _r3 = _mm512_load_ps(pp + 16 * 3);
transpose16x4_ps(_r0, _r1, _r2, _r3);
_mm_store_ps(p0 + 4, _mm512_extractf32x4_ps(_r0, 0));
_mm_store_ps(p0 + 8 + 4, _mm512_extractf32x4_ps(_r0, 1));
_mm_store_ps(p0 + 8 * 2 + 4, _mm512_extractf32x4_ps(_r0, 2));
_mm_store_ps(p0 + 8 * 3 + 4, _mm512_extractf32x4_ps(_r0, 3));
_mm_store_ps(p0 + 8 * 4 + 4, _mm512_extractf32x4_ps(_r1, 0));
_mm_store_ps(p0 + 8 * 5 + 4, _mm512_extractf32x4_ps(_r1, 1));
_mm_store_ps(p0 + 8 * 6 + 4, _mm512_extractf32x4_ps(_r1, 2));
_mm_store_ps(p0 + 8 * 7 + 4, _mm512_extractf32x4_ps(_r1, 3));
_mm_store_ps(p0 + 8 * 8 + 4, _mm512_extractf32x4_ps(_r2, 0));
_mm_store_ps(p0 + 8 * 9 + 4, _mm512_extractf32x4_ps(_r2, 1));
_mm_store_ps(p0 + 8 * 10 + 4, _mm512_extractf32x4_ps(_r2, 2));
_mm_store_ps(p0 + 8 * 11 + 4, _mm512_extractf32x4_ps(_r2, 3));
_mm_store_ps(p0 + 8 * 12 + 4, _mm512_extractf32x4_ps(_r3, 0));
_mm_store_ps(p0 + 8 * 13 + 4, _mm512_extractf32x4_ps(_r3, 1));
_mm_store_ps(p0 + 8 * 14 + 4, _mm512_extractf32x4_ps(_r3, 2));
_mm_store_ps(p0 + 8 * 15 + 4, _mm512_extractf32x4_ps(_r3, 3));
pp += 64;
p0 += out_hstep * 8;
jj += 4;
}
for (; jj + 7 < max_jj; jj += 8)
{
__m512 _r0 = _mm512_load_ps(pp);
__m512 _r1 = _mm512_load_ps(pp + 16);
__m512 _r2 = _mm512_load_ps(pp + 16 * 2);
__m512 _r3 = _mm512_load_ps(pp + 16 * 3);
__m512 _r4 = _mm512_load_ps(pp + 16 * 4);
__m512 _r5 = _mm512_load_ps(pp + 16 * 5);
__m512 _r6 = _mm512_load_ps(pp + 16 * 6);
__m512 _r7 = _mm512_load_ps(pp + 16 * 7);
transpose16x8_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7);
_mm512_storeu_ps(p0, _r0);
_mm512_storeu_ps(p0 + 16, _r1);
_mm512_storeu_ps(p0 + 16 * 2, _r2);
_mm512_storeu_ps(p0 + 16 * 3, _r3);
_mm512_storeu_ps(p0 + 16 * 4, _r4);
_mm512_storeu_ps(p0 + 16 * 5, _r5);
_mm512_storeu_ps(p0 + 16 * 6, _r6);
_mm512_storeu_ps(p0 + 16 * 7, _r7);
pp += 128;
p0 += out_hstep * 8;
}
for (; jj + 3 < max_jj; jj += 4)
{
__m512 _r0 = _mm512_load_ps(pp);
__m512 _r1 = _mm512_load_ps(pp + 16);
__m512 _r2 = _mm512_load_ps(pp + 16 * 2);
__m512 _r3 = _mm512_load_ps(pp + 16 * 3);
transpose16x4_ps(_r0, _r1, _r2, _r3);
_mm_store_ps(p0, _mm512_extractf32x4_ps(_r0, 0));
_mm_store_ps(p0 + 8, _mm512_extractf32x4_ps(_r0, 1));
_mm_store_ps(p0 + 8 * 2, _mm512_extractf32x4_ps(_r0, 2));
_mm_store_ps(p0 + 8 * 3, _mm512_extractf32x4_ps(_r0, 3));
_mm_store_ps(p0 + 8 * 4, _mm512_extractf32x4_ps(_r1, 0));
_mm_store_ps(p0 + 8 * 5, _mm512_extractf32x4_ps(_r1, 1));
_mm_store_ps(p0 + 8 * 6, _mm512_extractf32x4_ps(_r1, 2));
_mm_store_ps(p0 + 8 * 7, _mm512_extractf32x4_ps(_r1, 3));
_mm_store_ps(p0 + 8 * 8, _mm512_extractf32x4_ps(_r2, 0));
_mm_store_ps(p0 + 8 * 9, _mm512_extractf32x4_ps(_r2, 1));
_mm_store_ps(p0 + 8 * 10, _mm512_extractf32x4_ps(_r2, 2));
_mm_store_ps(p0 + 8 * 11, _mm512_extractf32x4_ps(_r2, 3));
_mm_store_ps(p0 + 8 * 12, _mm512_extractf32x4_ps(_r3, 0));
_mm_store_ps(p0 + 8 * 13, _mm512_extractf32x4_ps(_r3, 1));
_mm_store_ps(p0 + 8 * 14, _mm512_extractf32x4_ps(_r3, 2));
_mm_store_ps(p0 + 8 * 15, _mm512_extractf32x4_ps(_r3, 3));
pp += 64;
p0 += out_hstep * 8;
}
}
if (out_elempack == 4)
{
float* p0 = (float*)top_blob + j * out_hstep + (i + ii) * 4;
for (int jj = 0; jj + 3 < max_jj; jj += 4)
{
__m512 _r0 = _mm512_load_ps(pp);
__m512 _r1 = _mm512_load_ps(pp + 16);
__m512 _r2 = _mm512_load_ps(pp + 16 * 2);
__m512 _r3 = _mm512_load_ps(pp + 16 * 3);
transpose16x4_ps(_r0, _r1, _r2, _r3);
_mm512_storeu_ps(p0, _r0);
_mm512_storeu_ps(p0 + 16, _r1);
_mm512_storeu_ps(p0 + 16 * 2, _r2);
_mm512_storeu_ps(p0 + 16 * 3, _r3);
pp += 64;
p0 += out_hstep * 4;
}
}
if (out_elempack == 1)
{
float* p0 = (float*)top_blob + j * out_hstep + (i + ii);
for (int jj = 0; jj < max_jj; jj += 1)
{
__m512 _r0 = _mm512_load_ps(pp);
_mm512_storeu_ps(p0, _r0);
pp += 16;
p0 += out_hstep;
}
}
}
#endif // __AVX512F__
for (; ii + 7 < max_ii; ii += 8)
{
#if __AVX512F__
if (out_elempack == 16)
{
float* p0 = (float*)top_blob + (j / 16 * 16) * out_hstep + (i + ii) * 16;
int jj = 0;
if (j % 16 == 4)
{
__m256 _r0 = _mm256_load_ps(pp);
__m256 _r1 = _mm256_load_ps(pp + 8);
__m256 _r2 = _mm256_load_ps(pp + 8 * 2);
__m256 _r3 = _mm256_load_ps(pp + 8 * 3);
if (max_jj > 4)
{
// assert max_jj > 8
__m256 _r4 = _mm256_load_ps(pp + 8 * 4);
__m256 _r5 = _mm256_load_ps(pp + 8 * 5);
__m256 _r6 = _mm256_load_ps(pp + 8 * 6);
__m256 _r7 = _mm256_load_ps(pp + 8 * 7);
__m256 _r8 = _mm256_load_ps(pp + 8 * 8);
__m256 _r9 = _mm256_load_ps(pp + 8 * 9);
__m256 _ra = _mm256_load_ps(pp + 8 * 10);
__m256 _rb = _mm256_load_ps(pp + 8 * 11);
transpose8x12_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _ra, _rb);
_mm256_storeu_ps(p0 + 4, _r0);
_mm_store_ps(p0 + 12, _mm256_extractf128_ps(_r1, 0));
_mm_store_ps(p0 + 16 + 4, _mm256_extractf128_ps(_r1, 1));
_mm256_store_ps(p0 + 16 + 8, _r2);
_mm256_storeu_ps(p0 + 16 * 2 + 4, _r3);
_mm_store_ps(p0 + 16 * 2 + 12, _mm256_extractf128_ps(_r4, 0));
_mm_store_ps(p0 + 16 * 3 + 4, _mm256_extractf128_ps(_r4, 1));
_mm256_store_ps(p0 + 16 * 3 + 8, _r5);
_mm256_storeu_ps(p0 + 16 * 4 + 4, _r6);
_mm_store_ps(p0 + 16 * 4 + 12, _mm256_extractf128_ps(_r7, 0));
_mm_store_ps(p0 + 16 * 5 + 4, _mm256_extractf128_ps(_r7, 1));
_mm256_store_ps(p0 + 16 * 5 + 8, _r8);
_mm256_storeu_ps(p0 + 16 * 6 + 4, _r9);
_mm_store_ps(p0 + 16 * 6 + 12, _mm256_extractf128_ps(_ra, 0));
_mm_store_ps(p0 + 16 * 7 + 4, _mm256_extractf128_ps(_ra, 1));
_mm256_store_ps(p0 + 16 * 7 + 8, _rb);
pp += 96;
jj += 12;
}
else
{
transpose8x4_ps(_r0, _r1, _r2, _r3);
_mm_store_ps(p0 + 4, _mm256_extractf128_ps(_r0, 0));
_mm_store_ps(p0 + 16 + 4, _mm256_extractf128_ps(_r0, 1));
_mm_store_ps(p0 + 16 * 2 + 4, _mm256_extractf128_ps(_r1, 0));
_mm_store_ps(p0 + 16 * 3 + 4, _mm256_extractf128_ps(_r1, 1));
_mm_store_ps(p0 + 16 * 4 + 4, _mm256_extractf128_ps(_r2, 0));
_mm_store_ps(p0 + 16 * 5 + 4, _mm256_extractf128_ps(_r2, 1));
_mm_store_ps(p0 + 16 * 6 + 4, _mm256_extractf128_ps(_r3, 0));
_mm_store_ps(p0 + 16 * 7 + 4, _mm256_extractf128_ps(_r3, 1));
pp += 32;
jj += 4;
}
p0 += out_hstep * 16;
}
if (j % 16 == 8)
{
__m256 _r0 = _mm256_load_ps(pp);
__m256 _r1 = _mm256_load_ps(pp + 8);
__m256 _r2 = _mm256_load_ps(pp + 8 * 2);
__m256 _r3 = _mm256_load_ps(pp + 8 * 3);
if (max_jj > 4)
{
__m256 _r4 = _mm256_load_ps(pp + 8 * 4);
__m256 _r5 = _mm256_load_ps(pp + 8 * 5);
__m256 _r6 = _mm256_load_ps(pp + 8 * 6);
__m256 _r7 = _mm256_load_ps(pp + 8 * 7);
transpose8x8_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7);
_mm256_store_ps(p0 + 8, _r0);
_mm256_store_ps(p0 + 16 + 8, _r1);
_mm256_store_ps(p0 + 16 * 2 + 8, _r2);
_mm256_store_ps(p0 + 16 * 3 + 8, _r3);
_mm256_store_ps(p0 + 16 * 4 + 8, _r4);
_mm256_store_ps(p0 + 16 * 5 + 8, _r5);
_mm256_store_ps(p0 + 16 * 6 + 8, _r6);
_mm256_store_ps(p0 + 16 * 7 + 8, _r7);
pp += 64;
jj += 8;
}
else
{
transpose8x4_ps(_r0, _r1, _r2, _r3);
_mm_store_ps(p0 + 8, _mm256_extractf128_ps(_r0, 0));
_mm_store_ps(p0 + 16 + 8, _mm256_extractf128_ps(_r0, 1));
_mm_store_ps(p0 + 16 * 2 + 8, _mm256_extractf128_ps(_r1, 0));
_mm_store_ps(p0 + 16 * 3 + 8, _mm256_extractf128_ps(_r1, 1));
_mm_store_ps(p0 + 16 * 4 + 8, _mm256_extractf128_ps(_r2, 0));
_mm_store_ps(p0 + 16 * 5 + 8, _mm256_extractf128_ps(_r2, 1));
_mm_store_ps(p0 + 16 * 6 + 8, _mm256_extractf128_ps(_r3, 0));
_mm_store_ps(p0 + 16 * 7 + 8, _mm256_extractf128_ps(_r3, 1));
pp += 32;
jj += 4;
}
p0 += out_hstep * 16;
}
if (j % 16 == 12)
{
__m256 _r0 = _mm256_load_ps(pp);
__m256 _r1 = _mm256_load_ps(pp + 8);
__m256 _r2 = _mm256_load_ps(pp + 8 * 2);
__m256 _r3 = _mm256_load_ps(pp + 8 * 3);
transpose8x4_ps(_r0, _r1, _r2, _r3);
_mm_store_ps(p0 + 12, _mm256_extractf128_ps(_r0, 0));
_mm_store_ps(p0 + 16 + 12, _mm256_extractf128_ps(_r0, 1));
_mm_store_ps(p0 + 16 * 2 + 12, _mm256_extractf128_ps(_r1, 0));
_mm_store_ps(p0 + 16 * 3 + 12, _mm256_extractf128_ps(_r1, 1));
_mm_store_ps(p0 + 16 * 4 + 12, _mm256_extractf128_ps(_r2, 0));
_mm_store_ps(p0 + 16 * 5 + 12, _mm256_extractf128_ps(_r2, 1));
_mm_store_ps(p0 + 16 * 6 + 12, _mm256_extractf128_ps(_r3, 0));
_mm_store_ps(p0 + 16 * 7 + 12, _mm256_extractf128_ps(_r3, 1));
pp += 32;
p0 += out_hstep * 16;
jj += 4;
}
for (; jj + 15 < max_jj; jj += 16)
{
__m256 _r0 = _mm256_load_ps(pp);
__m256 _r1 = _mm256_load_ps(pp + 8);
__m256 _r2 = _mm256_load_ps(pp + 8 * 2);
__m256 _r3 = _mm256_load_ps(pp + 8 * 3);
__m256 _r4 = _mm256_load_ps(pp + 8 * 4);
__m256 _r5 = _mm256_load_ps(pp + 8 * 5);
__m256 _r6 = _mm256_load_ps(pp + 8 * 6);
__m256 _r7 = _mm256_load_ps(pp + 8 * 7);
__m256 _r8 = _mm256_load_ps(pp + 8 * 8);
__m256 _r9 = _mm256_load_ps(pp + 8 * 9);
__m256 _ra = _mm256_load_ps(pp + 8 * 10);
__m256 _rb = _mm256_load_ps(pp + 8 * 11);
__m256 _rc = _mm256_load_ps(pp + 8 * 12);
__m256 _rd = _mm256_load_ps(pp + 8 * 13);
__m256 _re = _mm256_load_ps(pp + 8 * 14);
__m256 _rf = _mm256_load_ps(pp + 8 * 15);
transpose8x16_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _ra, _rb, _rc, _rd, _re, _rf);
_mm256_store_ps(p0, _r0);
_mm256_store_ps(p0 + 8, _r1);
_mm256_store_ps(p0 + 8 * 2, _r2);
_mm256_store_ps(p0 + 8 * 3, _r3);
_mm256_store_ps(p0 + 8 * 4, _r4);
_mm256_store_ps(p0 + 8 * 5, _r5);
_mm256_store_ps(p0 + 8 * 6, _r6);
_mm256_store_ps(p0 + 8 * 7, _r7);
_mm256_store_ps(p0 + 8 * 8, _r8);
_mm256_store_ps(p0 + 8 * 9, _r9);
_mm256_store_ps(p0 + 8 * 10, _ra);
_mm256_store_ps(p0 + 8 * 11, _rb);
_mm256_store_ps(p0 + 8 * 12, _rc);
_mm256_store_ps(p0 + 8 * 13, _rd);
_mm256_store_ps(p0 + 8 * 14, _re);
_mm256_store_ps(p0 + 8 * 15, _rf);
pp += 128;
p0 += out_hstep * 16;
}
for (; jj + 11 < max_jj; jj += 12)
{
__m256 _r0 = _mm256_load_ps(pp);
__m256 _r1 = _mm256_load_ps(pp + 8);
__m256 _r2 = _mm256_load_ps(pp + 8 * 2);
__m256 _r3 = _mm256_load_ps(pp + 8 * 3);
__m256 _r4 = _mm256_load_ps(pp + 8 * 4);
__m256 _r5 = _mm256_load_ps(pp + 8 * 5);
__m256 _r6 = _mm256_load_ps(pp + 8 * 6);
__m256 _r7 = _mm256_load_ps(pp + 8 * 7);
__m256 _r8 = _mm256_load_ps(pp + 8 * 8);
__m256 _r9 = _mm256_load_ps(pp + 8 * 9);
__m256 _ra = _mm256_load_ps(pp + 8 * 10);
__m256 _rb = _mm256_load_ps(pp + 8 * 11);
transpose8x12_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _ra, _rb);
_mm256_store_ps(p0, _r0);
_mm_store_ps(p0 + 8, _mm256_extractf128_ps(_r1, 0));
_mm_store_ps(p0 + 16, _mm256_extractf128_ps(_r1, 1));
_mm256_storeu_ps(p0 + 16 + 4, _r2);
_mm256_store_ps(p0 + 16 * 2, _r3);
_mm_store_ps(p0 + 16 * 2 + 8, _mm256_extractf128_ps(_r4, 0));
_mm_store_ps(p0 + 16 * 3, _mm256_extractf128_ps(_r4, 1));
_mm256_storeu_ps(p0 + 16 * 3 + 4, _r5);
_mm256_store_ps(p0 + 16 * 4, _r6);
_mm_store_ps(p0 + 16 * 4 + 8, _mm256_extractf128_ps(_r7, 0));
_mm_store_ps(p0 + 16 * 5, _mm256_extractf128_ps(_r7, 1));
_mm256_storeu_ps(p0 + 16 * 5 + 4, _r8);
_mm256_store_ps(p0 + 16 * 6, _r9);
_mm_store_ps(p0 + 16 * 6 + 8, _mm256_extractf128_ps(_ra, 0));
_mm_store_ps(p0 + 16 * 7, _mm256_extractf128_ps(_ra, 1));
_mm256_storeu_ps(p0 + 16 * 7 + 4, _rb);
pp += 96;
p0 += out_hstep * 16;
}
for (; jj + 7 < max_jj; jj += 8)
{
__m256 _r0 = _mm256_load_ps(pp);
__m256 _r1 = _mm256_load_ps(pp + 8);
__m256 _r2 = _mm256_load_ps(pp + 8 * 2);
__m256 _r3 = _mm256_load_ps(pp + 8 * 3);
__m256 _r4 = _mm256_load_ps(pp + 8 * 4);
__m256 _r5 = _mm256_load_ps(pp + 8 * 5);
__m256 _r6 = _mm256_load_ps(pp + 8 * 6);
__m256 _r7 = _mm256_load_ps(pp + 8 * 7);
transpose8x8_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7);
_mm256_store_ps(p0, _r0);
_mm256_store_ps(p0 + 16, _r1);
_mm256_store_ps(p0 + 16 * 2, _r2);
_mm256_store_ps(p0 + 16 * 3, _r3);
_mm256_store_ps(p0 + 16 * 4, _r4);
_mm256_store_ps(p0 + 16 * 5, _r5);
_mm256_store_ps(p0 + 16 * 6, _r6);
_mm256_store_ps(p0 + 16 * 7, _r7);
pp += 64;
p0 += out_hstep * 16;
}
for (; jj + 3 < max_jj; jj += 4)
{
__m256 _r0 = _mm256_load_ps(pp);
__m256 _r1 = _mm256_load_ps(pp + 8);
__m256 _r2 = _mm256_load_ps(pp + 8 * 2);
__m256 _r3 = _mm256_load_ps(pp + 8 * 3);
transpose8x4_ps(_r0, _r1, _r2, _r3);
_mm_store_ps(p0, _mm256_extractf128_ps(_r0, 0));
_mm_store_ps(p0 + 16, _mm256_extractf128_ps(_r0, 1));
_mm_store_ps(p0 + 16 * 2, _mm256_extractf128_ps(_r1, 0));
_mm_store_ps(p0 + 16 * 3, _mm256_extractf128_ps(_r1, 1));
_mm_store_ps(p0 + 16 * 4, _mm256_extractf128_ps(_r2, 0));
_mm_store_ps(p0 + 16 * 5, _mm256_extractf128_ps(_r2, 1));
_mm_store_ps(p0 + 16 * 6, _mm256_extractf128_ps(_r3, 0));
_mm_store_ps(p0 + 16 * 7, _mm256_extractf128_ps(_r3, 1));
pp += 32;
p0 += out_hstep * 16;
}
}
#endif // __AVX512F__
if (out_elempack == 8)
{
float* p0 = (float*)top_blob + (j / 8 * 8) * out_hstep + (i + ii) * 8;
int jj = 0;
if (j % 8 == 4)
{
__m256 _r0 = _mm256_load_ps(pp);
__m256 _r1 = _mm256_load_ps(pp + 8);
__m256 _r2 = _mm256_load_ps(pp + 8 * 2);
__m256 _r3 = _mm256_load_ps(pp + 8 * 3);
transpose8x4_ps(_r0, _r1, _r2, _r3);
_mm_store_ps(p0 + 4, _mm256_extractf128_ps(_r0, 0));
_mm_store_ps(p0 + 8 + 4, _mm256_extractf128_ps(_r0, 1));
_mm_store_ps(p0 + 8 * 2 + 4, _mm256_extractf128_ps(_r1, 0));
_mm_store_ps(p0 + 8 * 3 + 4, _mm256_extractf128_ps(_r1, 1));
_mm_store_ps(p0 + 8 * 4 + 4, _mm256_extractf128_ps(_r2, 0));
_mm_store_ps(p0 + 8 * 5 + 4, _mm256_extractf128_ps(_r2, 1));
_mm_store_ps(p0 + 8 * 6 + 4, _mm256_extractf128_ps(_r3, 0));
_mm_store_ps(p0 + 8 * 7 + 4, _mm256_extractf128_ps(_r3, 1));
pp += 32;
p0 += out_hstep * 8;
jj += 4;
}
for (; jj + 7 < max_jj; jj += 8)
{
__m256 _r0 = _mm256_load_ps(pp);
__m256 _r1 = _mm256_load_ps(pp + 8);
__m256 _r2 = _mm256_load_ps(pp + 8 * 2);
__m256 _r3 = _mm256_load_ps(pp + 8 * 3);
__m256 _r4 = _mm256_load_ps(pp + 8 * 4);
__m256 _r5 = _mm256_load_ps(pp + 8 * 5);
__m256 _r6 = _mm256_load_ps(pp + 8 * 6);
__m256 _r7 = _mm256_load_ps(pp + 8 * 7);
transpose8x8_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7);
_mm256_storeu_ps(p0, _r0);
_mm256_storeu_ps(p0 + 8, _r1);
_mm256_storeu_ps(p0 + 8 * 2, _r2);
_mm256_storeu_ps(p0 + 8 * 3, _r3);
_mm256_storeu_ps(p0 + 8 * 4, _r4);
_mm256_storeu_ps(p0 + 8 * 5, _r5);
_mm256_storeu_ps(p0 + 8 * 6, _r6);
_mm256_storeu_ps(p0 + 8 * 7, _r7);
pp += 64;
p0 += out_hstep * 8;
}
for (; jj + 3 < max_jj; jj += 4)
{
__m256 _r0 = _mm256_load_ps(pp);
__m256 _r1 = _mm256_load_ps(pp + 8);
__m256 _r2 = _mm256_load_ps(pp + 8 * 2);
__m256 _r3 = _mm256_load_ps(pp + 8 * 3);
transpose8x4_ps(_r0, _r1, _r2, _r3);
_mm_store_ps(p0, _mm256_extractf128_ps(_r0, 0));
_mm_store_ps(p0 + 8, _mm256_extractf128_ps(_r0, 1));
_mm_store_ps(p0 + 8 * 2, _mm256_extractf128_ps(_r1, 0));
_mm_store_ps(p0 + 8 * 3, _mm256_extractf128_ps(_r1, 1));
_mm_store_ps(p0 + 8 * 4, _mm256_extractf128_ps(_r2, 0));
_mm_store_ps(p0 + 8 * 5, _mm256_extractf128_ps(_r2, 1));
_mm_store_ps(p0 + 8 * 6, _mm256_extractf128_ps(_r3, 0));
_mm_store_ps(p0 + 8 * 7, _mm256_extractf128_ps(_r3, 1));
pp += 32;
p0 += out_hstep * 8;
}
}
if (out_elempack == 4)
{
float* p0 = (float*)top_blob + j * out_hstep + (i + ii) * 4;
for (int jj = 0; jj + 3 < max_jj; jj += 4)
{
__m256 _r0 = _mm256_load_ps(pp);
__m256 _r1 = _mm256_load_ps(pp + 8);
__m256 _r2 = _mm256_load_ps(pp + 8 * 2);
__m256 _r3 = _mm256_load_ps(pp + 8 * 3);
transpose8x4_ps(_r0, _r1, _r2, _r3);
_mm256_storeu_ps(p0, _r0);
_mm256_storeu_ps(p0 + 8, _r1);
_mm256_storeu_ps(p0 + 8 * 2, _r2);
_mm256_storeu_ps(p0 + 8 * 3, _r3);
pp += 32;
p0 += out_hstep * 4;
}
}
if (out_elempack == 1)
{
float* p0 = (float*)top_blob + j * out_hstep + (i + ii);
for (int jj = 0; jj < max_jj; jj += 1)
{
__m256 _r0 = _mm256_load_ps(pp);
_mm256_storeu_ps(p0, _r0);
pp += 8;
p0 += out_hstep;
}
}
}
#endif // __AVX__
for (; ii + 3 < max_ii; ii += 4)
{
#if __AVX__
#if __AVX512F__
if (out_elempack == 16)
{
float* p0 = (float*)top_blob + (j / 16 * 16) * out_hstep + (i + ii) * 16;
int jj = 0;
if (j % 16 == 4)
{
__m128 _r0 = _mm_load_ps(pp);
__m128 _r1 = _mm_load_ps(pp + 4);
__m128 _r2 = _mm_load_ps(pp + 4 * 2);
__m128 _r3 = _mm_load_ps(pp + 4 * 3);
if (max_jj > 4)
{
// assert max_jj > 8
__m128 _r4 = _mm_load_ps(pp + 4 * 4);
__m128 _r5 = _mm_load_ps(pp + 4 * 5);
__m128 _r6 = _mm_load_ps(pp + 4 * 6);
__m128 _r7 = _mm_load_ps(pp + 4 * 7);
__m128 _r8 = _mm_load_ps(pp + 4 * 8);
__m128 _r9 = _mm_load_ps(pp + 4 * 9);
__m128 _ra = _mm_load_ps(pp + 4 * 10);
__m128 _rb = _mm_load_ps(pp + 4 * 11);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb);
_mm_store_ps(p0 + 4, _r0);
_mm_store_ps(p0 + 4 + 4, _r4);
_mm_store_ps(p0 + 4 + 8, _r8);
_mm_store_ps(p0 + 16 + 4, _r1);
_mm_store_ps(p0 + 16 + 4 + 4, _r5);
_mm_store_ps(p0 + 16 + 4 + 8, _r9);
_mm_store_ps(p0 + 16 * 2 + 4, _r2);
_mm_store_ps(p0 + 16 * 2 + 4 + 4, _r6);
_mm_store_ps(p0 + 16 * 2 + 4 + 8, _ra);
_mm_store_ps(p0 + 16 * 3 + 4, _r3);
_mm_store_ps(p0 + 16 * 3 + 4 + 4, _r7);
_mm_store_ps(p0 + 16 * 3 + 4 + 8, _rb);
pp += 48;
jj += 12;
}
else
{
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_mm_store_ps(p0 + 4, _r0);
_mm_store_ps(p0 + 16 + 4, _r1);
_mm_store_ps(p0 + 16 * 2 + 4, _r2);
_mm_store_ps(p0 + 16 * 3 + 4, _r3);
pp += 16;
jj += 4;
}
p0 += out_hstep * 16;
}
if (j % 16 == 8)
{
__m128 _r0 = _mm_load_ps(pp);
__m128 _r1 = _mm_load_ps(pp + 4);
__m128 _r2 = _mm_load_ps(pp + 4 * 2);
__m128 _r3 = _mm_load_ps(pp + 4 * 3);
if (max_jj > 4)
{
__m128 _r4 = _mm_load_ps(pp + 4 * 4);
__m128 _r5 = _mm_load_ps(pp + 4 * 5);
__m128 _r6 = _mm_load_ps(pp + 4 * 6);
__m128 _r7 = _mm_load_ps(pp + 4 * 7);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_mm_store_ps(p0 + 8, _r0);
_mm_store_ps(p0 + 8 + 4, _r4);
_mm_store_ps(p0 + 16 + 8, _r1);
_mm_store_ps(p0 + 16 + 8 + 4, _r5);
_mm_store_ps(p0 + 16 * 2 + 8, _r2);
_mm_store_ps(p0 + 16 * 2 + 8 + 4, _r6);
_mm_store_ps(p0 + 16 * 3 + 8, _r3);
_mm_store_ps(p0 + 16 * 3 + 8 + 4, _r7);
pp += 32;
jj += 8;
}
else
{
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_mm_store_ps(p0 + 8, _r0);
_mm_store_ps(p0 + 16 + 8, _r1);
_mm_store_ps(p0 + 16 * 2 + 8, _r2);
_mm_store_ps(p0 + 16 * 3 + 8, _r3);
pp += 16;
jj += 4;
}
p0 += out_hstep * 16;
}
if (j % 16 == 12)
{
__m128 _r0 = _mm_load_ps(pp);
__m128 _r1 = _mm_load_ps(pp + 4);
__m128 _r2 = _mm_load_ps(pp + 4 * 2);
__m128 _r3 = _mm_load_ps(pp + 4 * 3);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_mm_store_ps(p0 + 12, _r0);
_mm_store_ps(p0 + 16 + 12, _r1);
_mm_store_ps(p0 + 16 * 2 + 12, _r2);
_mm_store_ps(p0 + 16 * 3 + 12, _r3);
pp += 16;
p0 += out_hstep * 16;
jj += 4;
}
for (; jj + 15 < max_jj; jj += 16)
{
__m128 _r0 = _mm_load_ps(pp);
__m128 _r1 = _mm_load_ps(pp + 4);
__m128 _r2 = _mm_load_ps(pp + 4 * 2);
__m128 _r3 = _mm_load_ps(pp + 4 * 3);
__m128 _r4 = _mm_load_ps(pp + 4 * 4);
__m128 _r5 = _mm_load_ps(pp + 4 * 5);
__m128 _r6 = _mm_load_ps(pp + 4 * 6);
__m128 _r7 = _mm_load_ps(pp + 4 * 7);
__m128 _r8 = _mm_load_ps(pp + 4 * 8);
__m128 _r9 = _mm_load_ps(pp + 4 * 9);
__m128 _ra = _mm_load_ps(pp + 4 * 10);
__m128 _rb = _mm_load_ps(pp + 4 * 11);
__m128 _rc = _mm_load_ps(pp + 4 * 12);
__m128 _rd = _mm_load_ps(pp + 4 * 13);
__m128 _re = _mm_load_ps(pp + 4 * 14);
__m128 _rf = _mm_load_ps(pp + 4 * 15);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb);
_MM_TRANSPOSE4_PS(_rc, _rd, _re, _rf);
_mm_store_ps(p0, _r0);
_mm_store_ps(p0 + 4, _r4);
_mm_store_ps(p0 + 4 * 2, _r8);
_mm_store_ps(p0 + 4 * 3, _rc);
_mm_store_ps(p0 + 4 * 4, _r1);
_mm_store_ps(p0 + 4 * 5, _r5);
_mm_store_ps(p0 + 4 * 6, _r9);
_mm_store_ps(p0 + 4 * 7, _rd);
_mm_store_ps(p0 + 4 * 8, _r2);
_mm_store_ps(p0 + 4 * 9, _r6);
_mm_store_ps(p0 + 4 * 10, _ra);
_mm_store_ps(p0 + 4 * 11, _re);
_mm_store_ps(p0 + 4 * 12, _r3);
_mm_store_ps(p0 + 4 * 13, _r7);
_mm_store_ps(p0 + 4 * 14, _rb);
_mm_store_ps(p0 + 4 * 15, _rf);
pp += 64;
p0 += out_hstep * 16;
}
for (; jj + 11 < max_jj; jj += 12)
{
__m128 _r0 = _mm_load_ps(pp);
__m128 _r1 = _mm_load_ps(pp + 4);
__m128 _r2 = _mm_load_ps(pp + 4 * 2);
__m128 _r3 = _mm_load_ps(pp + 4 * 3);
__m128 _r4 = _mm_load_ps(pp + 4 * 4);
__m128 _r5 = _mm_load_ps(pp + 4 * 5);
__m128 _r6 = _mm_load_ps(pp + 4 * 6);
__m128 _r7 = _mm_load_ps(pp + 4 * 7);
__m128 _r8 = _mm_load_ps(pp + 4 * 8);
__m128 _r9 = _mm_load_ps(pp + 4 * 9);
__m128 _ra = _mm_load_ps(pp + 4 * 10);
__m128 _rb = _mm_load_ps(pp + 4 * 11);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb);
_mm_store_ps(p0, _r0);
_mm_store_ps(p0 + 4, _r4);
_mm_store_ps(p0 + 8, _r8);
_mm_store_ps(p0 + 16, _r1);
_mm_store_ps(p0 + 16 + 4, _r5);
_mm_store_ps(p0 + 16 + 8, _r9);
_mm_store_ps(p0 + 16 * 2, _r2);
_mm_store_ps(p0 + 16 * 2 + 4, _r6);
_mm_store_ps(p0 + 16 * 2 + 8, _ra);
_mm_store_ps(p0 + 16 * 3, _r3);
_mm_store_ps(p0 + 16 * 3 + 4, _r7);
_mm_store_ps(p0 + 16 * 3 + 8, _rb);
pp += 48;
p0 += out_hstep * 16;
}
for (; jj + 7 < max_jj; jj += 8)
{
__m128 _r0 = _mm_load_ps(pp);
__m128 _r1 = _mm_load_ps(pp + 4);
__m128 _r2 = _mm_load_ps(pp + 4 * 2);
__m128 _r3 = _mm_load_ps(pp + 4 * 3);
__m128 _r4 = _mm_load_ps(pp + 4 * 4);
__m128 _r5 = _mm_load_ps(pp + 4 * 5);
__m128 _r6 = _mm_load_ps(pp + 4 * 6);
__m128 _r7 = _mm_load_ps(pp + 4 * 7);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_mm_store_ps(p0, _r0);
_mm_store_ps(p0 + 4, _r4);
_mm_store_ps(p0 + 16, _r1);
_mm_store_ps(p0 + 16 + 4, _r5);
_mm_store_ps(p0 + 16 * 2, _r2);
_mm_store_ps(p0 + 16 * 2 + 4, _r6);
_mm_store_ps(p0 + 16 * 3, _r3);
_mm_store_ps(p0 + 16 * 3 + 4, _r7);
pp += 32;
p0 += out_hstep * 16;
}
for (; jj + 3 < max_jj; jj += 4)
{
__m128 _r0 = _mm_load_ps(pp);
__m128 _r1 = _mm_load_ps(pp + 4);
__m128 _r2 = _mm_load_ps(pp + 4 * 2);
__m128 _r3 = _mm_load_ps(pp + 4 * 3);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_mm_store_ps(p0, _r0);
_mm_store_ps(p0 + 16, _r1);
_mm_store_ps(p0 + 16 * 2, _r2);
_mm_store_ps(p0 + 16 * 3, _r3);
pp += 16;
p0 += out_hstep * 16;
}
}
#endif // __AVX512F__
if (out_elempack == 8)
{
float* p0 = (float*)top_blob + (j / 8 * 8) * out_hstep + (i + ii) * 8;
int jj = 0;
if (j % 8 == 4)
{
__m128 _r0 = _mm_load_ps(pp);
__m128 _r1 = _mm_load_ps(pp + 4);
__m128 _r2 = _mm_load_ps(pp + 4 * 2);
__m128 _r3 = _mm_load_ps(pp + 4 * 3);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_mm_store_ps(p0 + 4, _r0);
_mm_store_ps(p0 + 8 + 4, _r1);
_mm_store_ps(p0 + 8 * 2 + 4, _r2);
_mm_store_ps(p0 + 8 * 3 + 4, _r3);
pp += 16;
p0 += out_hstep * 8;
jj += 4;
}
for (; jj + 7 < max_jj; jj += 8)
{
__m128 _r0 = _mm_load_ps(pp);
__m128 _r1 = _mm_load_ps(pp + 4);
__m128 _r2 = _mm_load_ps(pp + 4 * 2);
__m128 _r3 = _mm_load_ps(pp + 4 * 3);
__m128 _r4 = _mm_load_ps(pp + 4 * 4);
__m128 _r5 = _mm_load_ps(pp + 4 * 5);
__m128 _r6 = _mm_load_ps(pp + 4 * 6);
__m128 _r7 = _mm_load_ps(pp + 4 * 7);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_mm_storeu_ps(p0, _r0);
_mm_storeu_ps(p0 + 4, _r4);
_mm_storeu_ps(p0 + 4 * 2, _r1);
_mm_storeu_ps(p0 + 4 * 3, _r5);
_mm_storeu_ps(p0 + 4 * 4, _r2);
_mm_storeu_ps(p0 + 4 * 5, _r6);
_mm_storeu_ps(p0 + 4 * 6, _r3);
_mm_storeu_ps(p0 + 4 * 7, _r7);
pp += 32;
p0 += out_hstep * 8;
}
for (; jj + 3 < max_jj; jj += 4)
{
__m128 _r0 = _mm_load_ps(pp);
__m128 _r1 = _mm_load_ps(pp + 4);
__m128 _r2 = _mm_load_ps(pp + 4 * 2);
__m128 _r3 = _mm_load_ps(pp + 4 * 3);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_mm_store_ps(p0, _r0);
_mm_store_ps(p0 + 8, _r1);
_mm_store_ps(p0 + 8 * 2, _r2);
_mm_store_ps(p0 + 8 * 3, _r3);
pp += 16;
p0 += out_hstep * 8;
}
}
#endif // __AVX__
if (out_elempack == 4)
{
float* p0 = (float*)top_blob + j * out_hstep + (i + ii) * 4;
for (int jj = 0; jj + 3 < max_jj; jj += 4)
{
__m128 _r0 = _mm_load_ps(pp);
__m128 _r1 = _mm_load_ps(pp + 4);
__m128 _r2 = _mm_load_ps(pp + 4 * 2);
__m128 _r3 = _mm_load_ps(pp + 4 * 3);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_mm_storeu_ps(p0, _r0);
_mm_storeu_ps(p0 + 4, _r1);
_mm_storeu_ps(p0 + 4 * 2, _r2);
_mm_storeu_ps(p0 + 4 * 3, _r3);
pp += 16;
p0 += out_hstep * 4;
}
}
if (out_elempack == 1)
{
float* p0 = (float*)top_blob + j * out_hstep + (i + ii);
for (int jj = 0; jj < max_jj; jj += 1)
{
__m128 _r0 = _mm_load_ps(pp);
_mm_storeu_ps(p0, _r0);
pp += 4;
p0 += out_hstep;
}
}
}
#endif // __SSE2__
for (; ii + 1 < max_ii; ii += 2)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (out_elempack == 16)
{
float* p0 = (float*)top_blob + (j / 16 * 16) * out_hstep + (i + ii) * 16;
int jj = 0;
if (j % 16 == 4)
{
if (max_jj > 4)
{
// assert max_jj > 8
p0[0 + 4] = pp[0];
p0[1 + 4] = pp[2];
p0[2 + 4] = pp[4];
p0[3 + 4] = pp[6];
p0[4 + 4] = pp[8];
p0[5 + 4] = pp[10];
p0[6 + 4] = pp[12];
p0[7 + 4] = pp[14];
p0[8 + 4] = pp[16];
p0[9 + 4] = pp[18];
p0[10 + 4] = pp[20];
p0[11 + 4] = pp[22];
p0[16 + 4] = pp[1];
p0[17 + 4] = pp[3];
p0[18 + 4] = pp[5];
p0[19 + 4] = pp[7];
p0[20 + 4] = pp[9];
p0[21 + 4] = pp[11];
p0[22 + 4] = pp[13];
p0[23 + 4] = pp[15];
p0[24 + 4] = pp[17];
p0[25 + 4] = pp[19];
p0[26 + 4] = pp[21];
p0[27 + 4] = pp[23];
pp += 24;
jj += 12;
}
else
{
p0[0 + 4] = pp[0];
p0[1 + 4] = pp[2];
p0[2 + 4] = pp[4];
p0[3 + 4] = pp[6];
p0[16 + 4] = pp[1];
p0[17 + 4] = pp[3];
p0[18 + 4] = pp[5];
p0[19 + 4] = pp[7];
pp += 8;
jj += 4;
}
p0 += out_hstep * 16;
}
if (j % 16 == 8)
{
if (max_jj > 4)
{
p0[0 + 8] = pp[0];
p0[1 + 8] = pp[2];
p0[2 + 8] = pp[4];
p0[3 + 8] = pp[6];
p0[4 + 8] = pp[8];
p0[5 + 8] = pp[10];
p0[6 + 8] = pp[12];
p0[7 + 8] = pp[14];
p0[16 + 8] = pp[1];
p0[17 + 8] = pp[3];
p0[18 + 8] = pp[5];
p0[19 + 8] = pp[7];
p0[20 + 8] = pp[9];
p0[21 + 8] = pp[11];
p0[22 + 8] = pp[13];
p0[23 + 8] = pp[15];
pp += 16;
jj += 8;
}
else
{
p0[0 + 8] = pp[0];
p0[1 + 8] = pp[2];
p0[2 + 8] = pp[4];
p0[3 + 8] = pp[6];
p0[16 + 8] = pp[1];
p0[17 + 8] = pp[3];
p0[18 + 8] = pp[5];
p0[19 + 8] = pp[7];
pp += 8;
jj += 4;
}
p0 += out_hstep * 16;
}
if (j % 16 == 12)
{
p0[0 + 12] = pp[0];
p0[1 + 12] = pp[2];
p0[2 + 12] = pp[4];
p0[3 + 12] = pp[6];
p0[16 + 12] = pp[1];
p0[17 + 12] = pp[3];
p0[18 + 12] = pp[5];
p0[19 + 12] = pp[7];
pp += 8;
p0 += out_hstep * 16;
jj += 4;
}
for (; jj + 15 < max_jj; jj += 16)
{
p0[0] = pp[0];
p0[1] = pp[2];
p0[2] = pp[4];
p0[3] = pp[6];
p0[4] = pp[8];
p0[5] = pp[10];
p0[6] = pp[12];
p0[7] = pp[14];
p0[8] = pp[16];
p0[9] = pp[18];
p0[10] = pp[20];
p0[11] = pp[22];
p0[12] = pp[24];
p0[13] = pp[26];
p0[14] = pp[28];
p0[15] = pp[30];
p0[16] = pp[1];
p0[17] = pp[3];
p0[18] = pp[5];
p0[19] = pp[7];
p0[20] = pp[9];
p0[21] = pp[11];
p0[22] = pp[13];
p0[23] = pp[15];
p0[24] = pp[17];
p0[25] = pp[19];
p0[26] = pp[21];
p0[27] = pp[23];
p0[28] = pp[25];
p0[29] = pp[27];
p0[30] = pp[29];
p0[31] = pp[31];
pp += 32;
p0 += out_hstep * 16;
}
for (; jj + 11 < max_jj; jj += 12)
{
p0[0] = pp[0];
p0[1] = pp[2];
p0[2] = pp[4];
p0[3] = pp[6];
p0[4] = pp[8];
p0[5] = pp[10];
p0[6] = pp[12];
p0[7] = pp[14];
p0[8] = pp[16];
p0[9] = pp[18];
p0[10] = pp[20];
p0[11] = pp[22];
p0[16] = pp[1];
p0[17] = pp[3];
p0[18] = pp[5];
p0[19] = pp[7];
p0[20] = pp[9];
p0[21] = pp[11];
p0[22] = pp[13];
p0[23] = pp[15];
p0[24] = pp[17];
p0[25] = pp[19];
p0[26] = pp[21];
p0[27] = pp[23];
pp += 24;
p0 += out_hstep * 16;
}
for (; jj + 7 < max_jj; jj += 8)
{
p0[0] = pp[0];
p0[1] = pp[2];
p0[2] = pp[4];
p0[3] = pp[6];
p0[4] = pp[8];
p0[5] = pp[10];
p0[6] = pp[12];
p0[7] = pp[14];
p0[16] = pp[1];
p0[17] = pp[3];
p0[18] = pp[5];
p0[19] = pp[7];
p0[20] = pp[9];
p0[21] = pp[11];
p0[22] = pp[13];
p0[23] = pp[15];
pp += 16;
p0 += out_hstep * 16;
}
for (; jj + 3 < max_jj; jj += 4)
{
p0[0] = pp[0];
p0[1] = pp[2];
p0[2] = pp[4];
p0[3] = pp[6];
p0[16] = pp[1];
p0[17] = pp[3];
p0[18] = pp[5];
p0[19] = pp[7];
pp += 8;
p0 += out_hstep * 16;
}
}
#endif // __AVX512F__
if (out_elempack == 8)
{
float* p0 = (float*)top_blob + (j / 8 * 8) * out_hstep + (i + ii) * 8;
int jj = 0;
if (j % 8 == 4)
{
p0[0 + 4] = pp[0];
p0[1 + 4] = pp[2];
p0[2 + 4] = pp[4];
p0[3 + 4] = pp[6];
p0[8 + 4] = pp[1];
p0[9 + 4] = pp[3];
p0[10 + 4] = pp[5];
p0[11 + 4] = pp[7];
pp += 8;
p0 += out_hstep * 8;
jj += 4;
}
for (; jj + 7 < max_jj; jj += 8)
{
p0[0] = pp[0];
p0[1] = pp[2];
p0[2] = pp[4];
p0[3] = pp[6];
p0[4] = pp[8];
p0[5] = pp[10];
p0[6] = pp[12];
p0[7] = pp[14];
p0[8] = pp[1];
p0[9] = pp[3];
p0[10] = pp[5];
p0[11] = pp[7];
p0[12] = pp[9];
p0[13] = pp[11];
p0[14] = pp[13];
p0[15] = pp[15];
pp += 16;
p0 += out_hstep * 8;
}
for (; jj + 3 < max_jj; jj += 4)
{
p0[0] = pp[0];
p0[1] = pp[2];
p0[2] = pp[4];
p0[3] = pp[6];
p0[8] = pp[1];
p0[9] = pp[3];
p0[10] = pp[5];
p0[11] = pp[7];
pp += 8;
p0 += out_hstep * 8;
}
}
#endif // __AVX__
if (out_elempack == 4)
{
float* p0 = (float*)top_blob + j * out_hstep + (i + ii) * 4;
for (int jj = 0; jj + 3 < max_jj; jj += 4)
{
p0[0] = pp[0];
p0[1] = pp[2];
p0[2] = pp[4];
p0[3] = pp[6];
p0[4] = pp[1];
p0[5] = pp[3];
p0[6] = pp[5];
p0[7] = pp[7];
pp += 8;
p0 += out_hstep * 4;
}
}
#endif // __SSE2__
if (out_elempack == 1)
{
float* p0 = (float*)top_blob + j * out_hstep + (i + ii);
for (int jj = 0; jj < max_jj; jj += 1)
{
p0[0] = pp[0];
p0[1] = pp[1];
pp += 2;
p0 += out_hstep;
}
}
}
for (; ii < max_ii; ii += 1)
{
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (out_elempack == 16)
{
float* p0 = (float*)top_blob + (j / 16 * 16) * out_hstep + (i + ii) * 16;
int jj = 0;
if (j % 16 == 4)
{
if (max_jj > 4)
{
// assert max_jj > 8
__m256 _r0 = _mm256_loadu_ps(pp);
__m128 _r1 = _mm_loadu_ps(pp + 8);
_mm256_storeu_ps(p0 + 4, _r0);
_mm_store_ps(p0 + 4 + 8, _r1);
pp += 12;
jj += 12;
}
else
{
__m128 _r0 = _mm_loadu_ps(pp);
_mm_storeu_ps(p0 + 4, _r0);
pp += 4;
jj += 4;
}
p0 += out_hstep * 16;
}
if (j % 16 == 8)
{
if (max_jj > 4)
{
__m256 _r0 = _mm256_loadu_ps(pp);
_mm256_store_ps(p0 + 8, _r0);
pp += 8;
jj += 8;
}
else
{
__m128 _r0 = _mm_loadu_ps(pp);
_mm_store_ps(p0 + 8, _r0);
pp += 4;
jj += 4;
}
p0 += out_hstep * 16;
}
if (j % 16 == 12)
{
__m128 _r0 = _mm_loadu_ps(pp);
_mm_store_ps(p0 + 12, _r0);
pp += 4;
p0 += out_hstep * 16;
jj += 4;
}
for (; jj + 15 < max_jj; jj += 16)
{
__m512 _r0 = _mm512_loadu_ps(pp);
_mm512_store_ps(p0, _r0);
pp += 16;
p0 += out_hstep * 16;
}
for (; jj + 11 < max_jj; jj += 12)
{
__m256 _r0 = _mm256_loadu_ps(pp);
__m128 _r1 = _mm_loadu_ps(pp + 8);
_mm256_store_ps(p0, _r0);
_mm_store_ps(p0 + 8, _r1);
pp += 12;
p0 += out_hstep * 16;
}
for (; jj + 7 < max_jj; jj += 8)
{
__m256 _r0 = _mm256_loadu_ps(pp);
_mm256_store_ps(p0, _r0);
pp += 8;
p0 += out_hstep * 16;
}
for (; jj + 3 < max_jj; jj += 4)
{
__m128 _r0 = _mm_loadu_ps(pp);
_mm_store_ps(p0, _r0);
pp += 4;
p0 += out_hstep * 16;
}
}
#endif // __AVX512F__
if (out_elempack == 8)
{
float* p0 = (float*)top_blob + (j / 8 * 8) * out_hstep + (i + ii) * 8;
int jj = 0;
if (j % 8 == 4)
{
p0[0 + 4] = pp[0];
p0[1 + 4] = pp[1];
p0[2 + 4] = pp[2];
p0[3 + 4] = pp[3];
pp += 4;
p0 += out_hstep * 8;
jj += 4;
}
for (; jj + 7 < max_jj; jj += 8)
{
__m256 _r0 = _mm256_loadu_ps(pp);
_mm256_store_ps(p0, _r0);
pp += 8;
p0 += out_hstep * 8;
}
for (; jj + 3 < max_jj; jj += 4)
{
p0[0] = pp[0];
p0[1] = pp[1];
p0[2] = pp[2];
p0[3] = pp[3];
pp += 4;
p0 += out_hstep * 8;
}
}
#endif // __AVX__
if (out_elempack == 4)
{
float* p0 = (float*)top_blob + j * out_hstep + (i + ii) * 4;
for (int jj = 0; jj + 3 < max_jj; jj += 4)
{
__m128 _r0 = _mm_loadu_ps(pp);
_mm_store_ps(p0, _r0);
pp += 4;
p0 += out_hstep * 4;
}
}
#endif // __SSE2__
if (out_elempack == 1)
{
float* p0 = (float*)top_blob + j * out_hstep + (i + ii);
for (int jj = 0; jj < max_jj; jj += 1)
{
p0[0] = pp[0];
pp += 1;
p0 += out_hstep;
}
}
}
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movl %r8d, %ebp
movl %edx, %r10d
leaq 0x2c(%rsi), %rax
leaq 0x40(%rsi), %rdx
cmpl $0x3, 0x28(%rsi)
cmoveq %rdx, %rax
movl 0x18(%rsi), %r11d
movslq (%rax), %rbx
movq (%rdi), %rdi
leal (,%rbx,8), %r14d
movl $0x8, %r8d
movl %ebp, %eax
cltd
idivl %r8d
imull %r14d, %eax
cltq
movq %rax, -0x38(%rsp)
movslq %r14d, %r13
movl %ebx, %eax
imull %ebp, %eax
andl $0x80000007, %ebp # imm = 0x80000007
movl %ebp, -0x44(%rsp)
movslq %eax, %r8
leal (,%rbx,4), %eax
movslq %eax, %rdx
xorl %r14d, %r14d
testl %r9d, %r9d
movl $0x0, %eax
cmovgl %r9d, %eax
movl %eax, -0x48(%rsp)
movslq %r10d, %rax
movq %rsi, %r10
movslq %ecx, %rbp
movq %rax, %rcx
shlq $0x4, %rcx
leaq (%rcx,%r8,4), %rsi
movq %rsi, -0x8(%rsp)
leaq (%rcx,%r8,4), %rsi
addq $0x1c, %rsi
leaq 0x30(%rcx,%r8,4), %r12
leaq 0x60(%rcx,%r8,4), %rcx
movq %rcx, -0x40(%rsp)
leaq (,%r8,4), %rcx
leaq (%rcx,%rax,4), %r8
movq %rax, -0x18(%rsp)
leaq 0x4(%rcx,%rax,4), %r15
movq %r12, %rax
movq %r8, %r12
movq %rsi, %r8
shlq $0x2, %rdx
shlq $0x2, %rbx
movq %r13, -0x28(%rsp)
leaq (,%r13,4), %r13
movq %r10, -0x10(%rsp)
movq %rbp, -0x20(%rsp)
movq %r14, %rcx
orq $0x7, %rcx
cmpq %rbp, %rcx
jge 0x379d78
cmpl $0x8, %r11d
jne 0x379caa
movq %rax, -0x30(%rsp)
movq -0x38(%rsp), %rax
leaq (,%rax,4), %rsi
addq (%r10), %rsi
movq -0x18(%rsp), %rax
leaq (%r14,%rax), %rcx
shlq $0x5, %rcx
addq %rsi, %rcx
xorl %r10d, %r10d
cmpl $0x4, -0x44(%rsp)
jne 0x379b17
vmovaps (%rdi), %ymm0
vmovaps 0x20(%rdi), %ymm1
vmovaps 0x40(%rdi), %ymm2
vmovaps 0x60(%rdi), %ymm3
vunpcklps %ymm1, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklpd %ymm1, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
vunpckhpd %ymm1, %ymm4, %ymm1 # ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vmovaps %xmm3, 0x10(%rcx)
vmovaps %xmm1, 0x30(%rcx)
vmovaps %xmm4, 0x50(%rcx)
vmovaps %xmm0, 0x70(%rcx)
vextractf128 $0x1, %ymm3, 0x90(%rcx)
vextractf128 $0x1, %ymm1, 0xb0(%rcx)
vextractf128 $0x1, %ymm4, 0xd0(%rcx)
vextractf128 $0x1, %ymm0, 0xf0(%rcx)
subq $-0x80, %rdi
movq -0x28(%rsp), %rax
leaq (%rcx,%rax,4), %rcx
movl $0x4, %eax
movl %eax, %r10d
leal 0x3(%r10), %esi
leaq 0xe0(%rcx), %rbp
leal 0x7(%r10), %eax
cmpl %r9d, %eax
jge 0x379c1e
vmovaps (%rdi), %ymm0
vmovaps 0x20(%rdi), %ymm1
vmovaps 0x40(%rdi), %ymm2
vmovaps 0x60(%rdi), %ymm3
vmovaps 0x80(%rdi), %ymm4
vmovaps 0xa0(%rdi), %ymm5
vmovaps 0xc0(%rdi), %ymm6
vmovaps 0xe0(%rdi), %ymm7
vunpcklps %ymm1, %ymm0, %ymm8 # ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklps %ymm5, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
vunpckhps %ymm5, %ymm4, %ymm4 # ymm4 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
vunpcklps %ymm7, %ymm6, %ymm5 # ymm5 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[4],ymm7[4],ymm6[5],ymm7[5]
vunpckhps %ymm7, %ymm6, %ymm6 # ymm6 = ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[6],ymm7[6],ymm6[7],ymm7[7]
vunpcklpd %ymm1, %ymm8, %ymm7 # ymm7 = ymm8[0],ymm1[0],ymm8[2],ymm1[2]
vunpckhpd %ymm1, %ymm8, %ymm1 # ymm1 = ymm8[1],ymm1[1],ymm8[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm8 # ymm8 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vunpcklpd %ymm5, %ymm3, %ymm2 # ymm2 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
vunpckhpd %ymm5, %ymm3, %ymm3 # ymm3 = ymm3[1],ymm5[1],ymm3[3],ymm5[3]
vunpcklpd %ymm6, %ymm4, %ymm5 # ymm5 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
vunpckhpd %ymm6, %ymm4, %ymm4 # ymm4 = ymm4[1],ymm6[1],ymm4[3],ymm6[3]
vinsertf128 $0x1, %xmm2, %ymm7, %ymm6
vinsertf128 $0x1, %xmm3, %ymm1, %ymm9
vinsertf128 $0x1, %xmm5, %ymm8, %ymm10
vinsertf128 $0x1, %xmm4, %ymm0, %ymm11
vperm2f128 $0x31, %ymm2, %ymm7, %ymm2 # ymm2 = ymm7[2,3],ymm2[2,3]
vperm2f128 $0x31, %ymm3, %ymm1, %ymm1 # ymm1 = ymm1[2,3],ymm3[2,3]
vperm2f128 $0x31, %ymm5, %ymm8, %ymm3 # ymm3 = ymm8[2,3],ymm5[2,3]
vperm2f128 $0x31, %ymm4, %ymm0, %ymm0 # ymm0 = ymm0[2,3],ymm4[2,3]
vmovups %ymm6, (%rcx)
vmovups %ymm9, 0x20(%rcx)
vmovups %ymm10, 0x40(%rcx)
vmovups %ymm11, 0x60(%rcx)
vmovups %ymm2, 0x80(%rcx)
vmovups %ymm1, 0xa0(%rcx)
vmovups %ymm3, 0xc0(%rcx)
vmovups %ymm0, 0xe0(%rcx)
addq $0x100, %rdi # imm = 0x100
addl $0x8, %r10d
addq %r13, %rcx
addl $0x8, %esi
addq %r13, %rbp
jmp 0x379b22
movq -0x10(%rsp), %r10
movq -0x30(%rsp), %rax
cmpl %r9d, %esi
jge 0x379ca5
vmovaps (%rdi), %ymm0
vmovaps 0x20(%rdi), %ymm1
vmovaps 0x40(%rdi), %ymm2
vmovaps 0x60(%rdi), %ymm3
vunpcklps %ymm1, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklpd %ymm1, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
vunpckhpd %ymm1, %ymm4, %ymm1 # ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vmovaps %xmm3, -0xe0(%rbp)
vmovaps %xmm1, -0xc0(%rbp)
vmovaps %xmm4, -0xa0(%rbp)
vmovaps %xmm0, -0x80(%rbp)
vextractf128 $0x1, %ymm3, -0x60(%rbp)
vextractf128 $0x1, %ymm1, -0x40(%rbp)
vextractf128 $0x1, %ymm4, -0x20(%rbp)
vextractf128 $0x1, %ymm0, (%rbp)
subq $-0x80, %rdi
addl $0x4, %esi
addq %r13, %rbp
jmp 0x379c28
movq -0x20(%rsp), %rbp
cmpl $0x4, %r11d
jne 0x379d2c
movq (%r10), %rcx
addq -0x40(%rsp), %rcx
movl $0x3, %esi
cmpl %r9d, %esi
jge 0x379d2c
vmovaps (%rdi), %ymm0
vmovaps 0x20(%rdi), %ymm1
vmovaps 0x40(%rdi), %ymm2
vmovaps 0x60(%rdi), %ymm3
vunpcklps %ymm1, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklpd %ymm1, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
vunpckhpd %ymm1, %ymm4, %ymm1 # ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vinsertf128 $0x1, %xmm1, %ymm3, %ymm2
vinsertf128 $0x1, %xmm0, %ymm4, %ymm5
vperm2f128 $0x31, %ymm1, %ymm3, %ymm1 # ymm1 = ymm3[2,3],ymm1[2,3]
vperm2f128 $0x31, %ymm0, %ymm4, %ymm0 # ymm0 = ymm4[2,3],ymm0[2,3]
vmovups %ymm2, -0x60(%rcx)
vmovups %ymm5, -0x40(%rcx)
vmovups %ymm1, -0x20(%rcx)
vmovups %ymm0, (%rcx)
subq $-0x80, %rdi
addl $0x4, %esi
addq %rdx, %rcx
jmp 0x379cbd
cmpl $0x1, %r11d
jne 0x379d52
movq (%r10), %rcx
addq %r12, %rcx
movl -0x48(%rsp), %esi
subl $0x1, %esi
jb 0x379d52
vmovaps (%rdi), %ymm0
vmovups %ymm0, (%rcx)
addq $0x20, %rdi
addq %rbx, %rcx
jmp 0x379d3c
addq $0x8, %r14
movl $0x80, %ecx
addq %rcx, -0x8(%rsp)
addq $0x20, %r12
addq %rcx, %r8
addq $0x20, %r15
addq %rcx, %rax
addq %rcx, -0x40(%rsp)
jmp 0x379a46
movq %r12, -0x40(%rsp)
movq %rax, %r12
movq -0x38(%rsp), %rax
leaq (,%rax,4), %rax
movq %rax, -0x30(%rsp)
movq %r14, %rax
orq $0x3, %rax
cmpq %rbp, %rax
jge 0x379ff9
cmpl $0x8, %r11d
jne 0x379f48
movq (%r10), %rax
addq -0x30(%rsp), %rax
movq -0x18(%rsp), %rcx
leaq (%r14,%rcx), %rsi
shlq $0x5, %rsi
addq %rax, %rsi
xorl %r10d, %r10d
cmpl $0x4, -0x44(%rsp)
jne 0x379e2a
vmovaps (%rdi), %xmm0
vmovaps 0x10(%rdi), %xmm1
vmovaps 0x20(%rdi), %xmm2
vmovaps 0x30(%rdi), %xmm3
vunpcklps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm3, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vunpckhps %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vmovlhps %xmm5, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm5[0]
vunpckhpd %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[1],xmm5[1]
vmovlhps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0]
vunpckhpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vmovaps %xmm2, 0x10(%rsi)
vmovaps %xmm3, 0x30(%rsi)
vmovaps %xmm4, 0x50(%rsi)
vmovaps %xmm0, 0x70(%rsi)
addq $0x40, %rdi
movq -0x28(%rsp), %rax
leaq (%rsi,%rax,4), %rsi
movl $0x4, %eax
movl %eax, %r10d
leal 0x3(%r10), %ebp
leaq 0x60(%rsi), %rcx
leal 0x7(%r10), %eax
cmpl %r9d, %eax
jge 0x379ee7
vmovaps (%rdi), %xmm0
vmovaps 0x10(%rdi), %xmm1
vmovaps 0x20(%rdi), %xmm2
vmovaps 0x30(%rdi), %xmm3
vmovaps 0x40(%rdi), %xmm4
vmovaps 0x50(%rdi), %xmm5
vmovaps 0x60(%rdi), %xmm6
vmovaps 0x70(%rdi), %xmm7
vunpcklps %xmm1, %xmm0, %xmm8 # xmm8 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm3, %xmm2, %xmm9 # xmm9 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vunpckhps %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vmovlhps %xmm9, %xmm8, %xmm2 # xmm2 = xmm8[0],xmm9[0]
vunpckhpd %xmm9, %xmm8, %xmm3 # xmm3 = xmm8[1],xmm9[1]
vmovlhps %xmm1, %xmm0, %xmm8 # xmm8 = xmm0[0],xmm1[0]
vunpckhpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vunpcklps %xmm5, %xmm4, %xmm1 # xmm1 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpcklps %xmm7, %xmm6, %xmm9 # xmm9 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
vunpckhps %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
vunpckhps %xmm7, %xmm6, %xmm5 # xmm5 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
vmovlhps %xmm9, %xmm1, %xmm6 # xmm6 = xmm1[0],xmm9[0]
vunpckhpd %xmm9, %xmm1, %xmm1 # xmm1 = xmm1[1],xmm9[1]
vmovlhps %xmm5, %xmm4, %xmm7 # xmm7 = xmm4[0],xmm5[0]
vunpckhpd %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[1],xmm5[1]
vmovups %xmm2, (%rsi)
vmovups %xmm6, 0x10(%rsi)
vmovups %xmm3, 0x20(%rsi)
vmovups %xmm1, 0x30(%rsi)
vmovups %xmm8, 0x40(%rsi)
vmovups %xmm7, 0x50(%rsi)
vmovups %xmm0, 0x60(%rsi)
vmovups %xmm4, 0x70(%rsi)
subq $-0x80, %rdi
addl $0x8, %r10d
addq %r13, %rsi
addl $0x8, %ebp
addq %r13, %rcx
jmp 0x379e32
movq -0x10(%rsp), %r10
cmpl %r9d, %ebp
jge 0x379f43
vmovaps (%rdi), %xmm0
vmovaps 0x10(%rdi), %xmm1
vmovaps 0x20(%rdi), %xmm2
vmovaps 0x30(%rdi), %xmm3
vunpcklps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm3, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vunpckhps %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vmovlhps %xmm5, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm5[0]
vunpckhpd %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[1],xmm5[1]
vmovlhps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0]
vunpckhpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vmovaps %xmm2, -0x60(%rcx)
vmovaps %xmm3, -0x40(%rcx)
vmovaps %xmm4, -0x20(%rcx)
vmovaps %xmm0, (%rcx)
addq $0x40, %rdi
addl $0x4, %ebp
addq %r13, %rcx
jmp 0x379eec
movq -0x20(%rsp), %rbp
cmpl $0x4, %r11d
jne 0x379fb0
movq (%r10), %rcx
addq %r12, %rcx
movl $0x3, %esi
cmpl %r9d, %esi
jge 0x379fb0
vmovaps (%rdi), %xmm0
vmovaps 0x10(%rdi), %xmm1
vmovaps 0x20(%rdi), %xmm2
vmovaps 0x30(%rdi), %xmm3
vunpcklps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm3, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vunpckhps %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vmovlhps %xmm5, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm5[0]
vunpckhpd %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[1],xmm5[1]
vmovlhps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0]
vunpckhpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vmovups %xmm2, -0x30(%rcx)
vmovups %xmm3, -0x20(%rcx)
vmovups %xmm4, -0x10(%rcx)
vmovups %xmm0, (%rcx)
addq $0x40, %rdi
addl $0x4, %esi
addq %rdx, %rcx
jmp 0x379f59
cmpl $0x1, %r11d
jne 0x379fd8
movq (%r10), %rcx
addq -0x40(%rsp), %rcx
movl -0x48(%rsp), %esi
subl $0x1, %esi
jb 0x379fd8
vmovaps (%rdi), %xmm0
vmovups %xmm0, (%rcx)
addq $0x10, %rdi
addq %rbx, %rcx
jmp 0x379fc2
addq $0x4, %r14
addq $0x40, -0x8(%rsp)
addq $0x10, -0x40(%rsp)
addq $0x40, %r8
addq $0x10, %r15
addq $0x40, %r12
jmp 0x379d92
movq (%r10), %rax
movq -0x38(%rsp), %rcx
leaq (%rax,%rcx,4), %rcx
movq %rcx, -0x38(%rsp)
addq %rax, %r8
addq %rax, %r15
movq -0x40(%rsp), %r12
movq %r14, %rax
orq $0x1, %rax
cmpq %rbp, %rax
jge 0x37a297
cmpl $0x8, %r11d
jne 0x37a1e2
movq -0x18(%rsp), %rax
leaq (%r14,%rax), %rsi
shlq $0x5, %rsi
addq -0x38(%rsp), %rsi
xorl %ecx, %ecx
cmpl $0x4, -0x44(%rsp)
jne 0x37a0ad
vmovss (%rdi), %xmm0
vmovss %xmm0, 0x10(%rsi)
vmovss 0x8(%rdi), %xmm0
vmovss %xmm0, 0x14(%rsi)
vmovss 0x10(%rdi), %xmm0
vmovss %xmm0, 0x18(%rsi)
vmovss 0x18(%rdi), %xmm0
vmovss %xmm0, 0x1c(%rsi)
vmovss 0x4(%rdi), %xmm0
vmovss %xmm0, 0x30(%rsi)
vmovss 0xc(%rdi), %xmm0
vmovss %xmm0, 0x34(%rsi)
vmovss 0x14(%rdi), %xmm0
vmovss %xmm0, 0x38(%rsi)
vmovss 0x1c(%rdi), %xmm0
vmovss %xmm0, 0x3c(%rsi)
addq $0x20, %rdi
movq -0x28(%rsp), %rax
leaq (%rsi,%rax,4), %rsi
movl $0x4, %eax
movl %eax, %ecx
leaq 0x2c(%rsi), %r10
leal 0x3(%rcx), %ebp
leal 0x7(%rcx), %eax
cmpl %r9d, %eax
jge 0x37a1d3
vmovss (%rdi), %xmm0
vmovss %xmm0, (%rsi)
vmovss 0x8(%rdi), %xmm0
vmovss %xmm0, 0x4(%rsi)
vmovss 0x10(%rdi), %xmm0
vmovss %xmm0, 0x8(%rsi)
vmovss 0x18(%rdi), %xmm0
vmovss %xmm0, 0xc(%rsi)
vmovss 0x20(%rdi), %xmm0
vmovss %xmm0, 0x10(%rsi)
vmovss 0x28(%rdi), %xmm0
vmovss %xmm0, 0x14(%rsi)
vmovss 0x30(%rdi), %xmm0
vmovss %xmm0, 0x18(%rsi)
vmovss 0x38(%rdi), %xmm0
vmovss %xmm0, 0x1c(%rsi)
vmovss 0x4(%rdi), %xmm0
vmovss %xmm0, 0x20(%rsi)
vmovss 0xc(%rdi), %xmm0
vmovss %xmm0, 0x24(%rsi)
vmovss 0x14(%rdi), %xmm0
vmovss %xmm0, 0x28(%rsi)
vmovss 0x1c(%rdi), %xmm0
vmovss %xmm0, 0x2c(%rsi)
vmovss 0x24(%rdi), %xmm0
vmovss %xmm0, 0x30(%rsi)
vmovss 0x2c(%rdi), %xmm0
vmovss %xmm0, 0x34(%rsi)
vmovss 0x34(%rdi), %xmm0
vmovss %xmm0, 0x38(%rsi)
vmovss 0x3c(%rdi), %xmm0
vmovss %xmm0, 0x3c(%rsi)
addq $0x40, %rdi
addl $0x8, %ecx
addq %r13, %rsi
addq %r13, %r10
addl $0x8, %ebp
jmp 0x37a0b4
vmovss (%rdi), %xmm0
vmovss %xmm0, -0x2c(%r10)
vmovss 0x8(%rdi), %xmm0
vmovss %xmm0, -0x28(%r10)
vmovss 0x10(%rdi), %xmm0
vmovss %xmm0, -0x24(%r10)
vmovss 0x18(%rdi), %xmm0
vmovss %xmm0, -0x20(%r10)
vmovss 0x4(%rdi), %xmm0
vmovss %xmm0, -0xc(%r10)
vmovss 0xc(%rdi), %xmm0
vmovss %xmm0, -0x8(%r10)
vmovss 0x14(%rdi), %xmm0
vmovss %xmm0, -0x4(%r10)
vmovss 0x1c(%rdi), %xmm0
vmovss %xmm0, (%r10)
addq $0x20, %rdi
addq %r13, %r10
addl $0x4, %ebp
cmpl %r9d, %ebp
jl 0x37a173
movq -0x10(%rsp), %r10
movq -0x20(%rsp), %rbp
cmpl $0x4, %r11d
jne 0x37a24f
movl $0x3, %ecx
movq %r8, %rsi
cmpl %r9d, %ecx
jge 0x37a24f
vmovss (%rdi), %xmm0
vmovss %xmm0, -0x1c(%rsi)
vmovss 0x8(%rdi), %xmm0
vmovss %xmm0, -0x18(%rsi)
vmovss 0x10(%rdi), %xmm0
vmovss %xmm0, -0x14(%rsi)
vmovss 0x18(%rdi), %xmm0
vmovss %xmm0, -0x10(%rsi)
vmovss 0x4(%rdi), %xmm0
vmovss %xmm0, -0xc(%rsi)
vmovss 0xc(%rdi), %xmm0
vmovss %xmm0, -0x8(%rsi)
vmovss 0x14(%rdi), %xmm0
vmovss %xmm0, -0x4(%rsi)
vmovss 0x1c(%rdi), %xmm0
vmovss %xmm0, (%rsi)
addq $0x20, %rdi
addq %rdx, %rsi
addl $0x4, %ecx
jmp 0x37a1f0
cmpl $0x1, %r11d
jne 0x37a27c
movl -0x48(%rsp), %ecx
movq %r15, %rsi
subl $0x1, %ecx
jb 0x37a27c
vmovss (%rdi), %xmm0
vmovss %xmm0, -0x4(%rsi)
vmovss 0x4(%rdi), %xmm0
vmovss %xmm0, (%rsi)
addq $0x8, %rdi
addq %rbx, %rsi
jmp 0x37a25c
addq $0x2, %r14
addq $0x20, -0x8(%rsp)
addq $0x8, %r12
addq $0x20, %r8
addq $0x8, %r15
jmp 0x37a015
movl $0x3, %ecx
movq -0x8(%rsp), %r15
cmpq %rbp, %r14
jge 0x37a3e2
cmpl $0x8, %r11d
jne 0x37a381
movq (%r10), %r8
addq -0x30(%rsp), %r8
movq -0x18(%rsp), %rax
leaq (%r14,%rax), %rsi
shlq $0x5, %rsi
addq %r8, %rsi
xorl %ebp, %ebp
cmpl $0x4, -0x44(%rsp)
jne 0x37a310
vmovss (%rdi), %xmm0
vmovss %xmm0, 0x10(%rsi)
vmovss 0x4(%rdi), %xmm0
vmovss %xmm0, 0x14(%rsi)
vmovss 0x8(%rdi), %xmm0
vmovss %xmm0, 0x18(%rsi)
vmovss 0xc(%rdi), %xmm0
vmovss %xmm0, 0x1c(%rsi)
addq $0x10, %rdi
movq -0x28(%rsp), %rax
leaq (%rsi,%rax,4), %rsi
movl $0x4, %eax
movl %eax, %ebp
leaq 0xc(%rsi), %r8
leal 0x3(%rbp), %r10d
leal 0x7(%rbp), %eax
cmpl %r9d, %eax
jge 0x37a33b
vmovups (%rdi), %ymm0
vmovaps %ymm0, (%rsi)
addq $0x20, %rdi
addl $0x8, %ebp
addq %r13, %rsi
addq %r13, %r8
addl $0x8, %r10d
jmp 0x37a318
movq -0x20(%rsp), %rbp
cmpl %r9d, %r10d
jge 0x37a37c
vmovss (%rdi), %xmm0
vmovss %xmm0, -0xc(%r8)
vmovss 0x4(%rdi), %xmm0
vmovss %xmm0, -0x8(%r8)
vmovss 0x8(%rdi), %xmm0
vmovss %xmm0, -0x4(%r8)
vmovss 0xc(%rdi), %xmm0
vmovss %xmm0, (%r8)
addq $0x10, %rdi
addq %r13, %r8
addl $0x4, %r10d
jmp 0x37a340
movq -0x10(%rsp), %r10
cmpl $0x4, %r11d
jne 0x37a3aa
movq (%r10), %rsi
addq %r15, %rsi
movl %ecx, %r8d
cmpl %r9d, %r8d
jge 0x37a3aa
vmovups (%rdi), %xmm0
vmovaps %xmm0, (%rsi)
addq $0x10, %rdi
addq %rdx, %rsi
addl $0x4, %r8d
jmp 0x37a390
cmpl $0x1, %r11d
jne 0x37a3d2
movq (%r10), %rsi
addq %r12, %rsi
movl -0x48(%rsp), %r8d
subl $0x1, %r8d
jb 0x37a3d2
vmovss (%rdi), %xmm0
vmovss %xmm0, (%rsi)
addq $0x4, %rdi
addq %rbx, %rsi
jmp 0x37a3bb
incq %r14
addq $0x10, %r15
addq $0x4, %r12
jmp 0x37a2a1
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/gemm_x86_avx.cpp |
ncnn::GroupNorm_x86::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int GroupNorm_x86::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
const int dims = bottom_top_blob.dims;
const int channels_per_group = channels / group;
if (dims == 1)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat bottom_top_blob_g = bottom_top_blob.range(g * channels_per_group, channels_per_group);
const Mat gamma_data_g = gamma_data.range(g * channels_per_group, channels_per_group);
const Mat beta_data_g = beta_data.range(g * channels_per_group, channels_per_group);
float sum = 0.f;
float* ptr = bottom_top_blob_g;
{
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _sum_avx512 = _mm512_setzero_ps();
for (; i + 15 < channels_per_group; i += 16)
{
_sum_avx512 = _mm512_add_ps(_sum_avx512, _mm512_loadu_ps(ptr));
ptr += 16;
}
sum += _mm512_comp_reduce_add_ps(_sum_avx512);
#endif // __AVX512F__
__m256 _sum_avx = _mm256_setzero_ps();
for (; i + 7 < channels_per_group; i += 8)
{
_sum_avx = _mm256_add_ps(_sum_avx, _mm256_loadu_ps(ptr));
ptr += 8;
}
sum += _mm256_reduce_add_ps(_sum_avx);
#endif // __AVX__
__m128 _sum = _mm_setzero_ps();
for (; i + 3 < channels_per_group; i += 4)
{
_sum = _mm_add_ps(_sum, _mm_loadu_ps(ptr));
ptr += 4;
}
sum += _mm_reduce_add_ps(_sum);
#endif // __SSE2__
for (; i < channels_per_group; i++)
{
sum += *ptr;
ptr++;
}
}
float mean = sum / channels_per_group;
float sqsum = 0.f;
ptr = bottom_top_blob_g;
{
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _sqsum_avx512 = _mm512_setzero_ps();
__m512 _mean_avx512 = _mm512_set1_ps(mean);
for (; i + 15 < channels_per_group; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
_p = _mm512_sub_ps(_p, _mean_avx512);
_p = _mm512_mul_ps(_p, _p);
_sqsum_avx512 = _mm512_add_ps(_p, _sqsum_avx512);
ptr += 16;
}
sqsum += _mm512_comp_reduce_add_ps(_sqsum_avx512);
#endif // __AVX512F__
__m256 _sqsum_avx = _mm256_setzero_ps();
__m256 _mean_avx = _mm256_set1_ps(mean);
for (; i + 7 < channels_per_group; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_sub_ps(_p, _mean_avx);
_sqsum_avx = _mm256_comp_fmadd_ps(_p, _p, _sqsum_avx);
ptr += 8;
}
sqsum += _mm256_reduce_add_ps(_sqsum_avx);
#endif // __AVX__
__m128 _sqsum = _mm_setzero_ps();
__m128 _mean = _mm_set1_ps(mean);
for (; i + 3 < channels_per_group; i += 4)
{
__m128 _p = _mm_loadu_ps(ptr);
_p = _mm_sub_ps(_p, _mean);
_sqsum = _mm_comp_fmadd_ps(_p, _p, _sqsum);
ptr += 4;
}
sqsum += _mm_reduce_add_ps(_sqsum);
#endif // __SSE2__
for (; i < channels_per_group; i++)
{
float tmp = *ptr - mean;
sqsum += tmp * tmp;
ptr++;
}
}
float scale1 = 1.f / sqrtf(sqsum / channels_per_group + eps);
float scale2 = -mean * scale1;
ptr = bottom_top_blob_g;
if (affine)
{
int i = 0;
const float* gamma = gamma_data_g;
const float* beta = beta_data_g;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _scale1_avx512 = _mm512_set1_ps(scale1);
__m512 _scale2_avx512 = _mm512_set1_ps(scale2);
for (; i + 15 < channels_per_group; i += 16)
{
__m512 _gamma = _mm512_loadu_ps(gamma);
__m512 _beta = _mm512_loadu_ps(beta);
__m512 _p = _mm512_loadu_ps(ptr);
__m512 _a = _mm512_mul_ps(_gamma, _scale1_avx512);
__m512 _b = _mm512_add_ps(_mm512_mul_ps(_gamma, _scale2_avx512), _beta);
_p = _mm512_add_ps(_mm512_mul_ps(_p, _a), _b);
_mm512_storeu_ps(ptr, _p);
gamma += 16;
beta += 16;
ptr += 16;
}
#endif // __AVX512F__
__m256 _scale1_avx = _mm256_set1_ps(scale1);
__m256 _scale2_avx = _mm256_set1_ps(scale2);
for (; i + 7 < channels_per_group; i += 8)
{
__m256 _gamma = _mm256_loadu_ps(gamma);
__m256 _beta = _mm256_loadu_ps(beta);
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _a = _mm256_mul_ps(_gamma, _scale1_avx);
__m256 _b = _mm256_comp_fmadd_ps(_gamma, _scale2_avx, _beta);
_p = _mm256_comp_fmadd_ps(_p, _a, _b);
_mm256_storeu_ps(ptr, _p);
gamma += 8;
beta += 8;
ptr += 8;
}
#endif // __AVX__
__m128 _scale1 = _mm_set1_ps(scale1);
__m128 _scale2 = _mm_set1_ps(scale2);
for (; i + 3 < channels_per_group; i += 4)
{
__m128 _gamma = _mm_loadu_ps(gamma);
__m128 _beta = _mm_loadu_ps(beta);
__m128 _p = _mm_loadu_ps(ptr);
__m128 _a = _mm_mul_ps(_gamma, _scale1);
__m128 _b = _mm_comp_fmadd_ps(_gamma, _scale2, _beta);
_p = _mm_comp_fmadd_ps(_p, _a, _b);
_mm_storeu_ps(ptr, _p);
gamma += 4;
beta += 4;
ptr += 4;
}
#endif // __SSE2__
for (; i < channels_per_group; i++)
{
float a = *gamma * scale1;
float b = *gamma * scale2 + *beta;
*ptr = *ptr * a + b;
gamma++;
beta++;
ptr++;
}
}
else
{
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _scale1_avx512 = _mm512_set1_ps(scale1);
__m512 _scale2_avx512 = _mm512_set1_ps(scale2);
for (; i + 15 < channels_per_group; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
_p = _mm512_add_ps(_mm512_mul_ps(_p, _scale1_avx512), _scale2_avx512);
_mm512_storeu_ps(ptr, _p);
ptr += 16;
}
#endif // __AVX512F__
__m256 _scale1_avx = _mm256_set1_ps(scale1);
__m256 _scale2_avx = _mm256_set1_ps(scale2);
for (; i + 7 < channels_per_group; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_comp_fmadd_ps(_p, _scale1_avx, _scale2_avx);
_mm256_storeu_ps(ptr, _p);
ptr += 8;
}
#endif // __AVX__
__m128 _scale1 = _mm_set1_ps(scale1);
__m128 _scale2 = _mm_set1_ps(scale2);
for (; i + 3 < channels_per_group; i += 4)
{
__m128 _p = _mm_loadu_ps(ptr);
_p = _mm_comp_fmadd_ps(_p, _scale1, _scale2);
_mm_storeu_ps(ptr, _p);
ptr += 4;
}
#endif // __SSE2__
for (; i < channels_per_group; i++)
{
*ptr = *ptr * scale1 + scale2;
ptr++;
}
}
}
return 0;
}
if (dims == 2)
{
int w = bottom_top_blob.w;
int size = channels_per_group * w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat bottom_top_blob_g = bottom_top_blob.row_range(g * channels_per_group, channels_per_group);
const Mat gamma_data_g = gamma_data.range(g * channels_per_group, channels_per_group);
const Mat beta_data_g = beta_data.range(g * channels_per_group, channels_per_group);
// mean and var
float sum = 0.f;
float* ptr = bottom_top_blob_g;
{
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _sum_avx512 = _mm512_setzero_ps();
for (; i + 15 < size; i += 16)
{
_sum_avx512 = _mm512_add_ps(_sum_avx512, _mm512_loadu_ps(ptr));
ptr += 16;
}
sum += _mm512_comp_reduce_add_ps(_sum_avx512);
#endif // __AVX512F__
__m256 _sum_avx = _mm256_setzero_ps();
for (; i + 7 < size; i += 8)
{
_sum_avx = _mm256_add_ps(_sum_avx, _mm256_loadu_ps(ptr));
ptr += 8;
}
sum += _mm256_reduce_add_ps(_sum_avx);
#endif // __AVX__
__m128 _sum = _mm_setzero_ps();
for (; i + 3 < size; i += 4)
{
_sum = _mm_add_ps(_sum, _mm_loadu_ps(ptr));
ptr += 4;
}
sum += _mm_reduce_add_ps(_sum);
#endif // __SSE2__
for (; i < size; i++)
{
sum += *ptr;
ptr++;
}
}
float mean = sum / size;
float sqsum = 0.f;
ptr = bottom_top_blob_g;
{
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _sqsum_avx512 = _mm512_setzero_ps();
__m512 _mean_avx512 = _mm512_set1_ps(mean);
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
_p = _mm512_sub_ps(_p, _mean_avx512);
_p = _mm512_mul_ps(_p, _p);
_sqsum_avx512 = _mm512_add_ps(_p, _sqsum_avx512);
ptr += 16;
}
sqsum += _mm512_comp_reduce_add_ps(_sqsum_avx512);
#endif // __AVX512F__
__m256 _sqsum_avx = _mm256_setzero_ps();
__m256 _mean_avx = _mm256_set1_ps(mean);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_sub_ps(_p, _mean_avx);
_sqsum_avx = _mm256_comp_fmadd_ps(_p, _p, _sqsum_avx);
ptr += 8;
}
sqsum += _mm256_reduce_add_ps(_sqsum_avx);
#endif // __AVX__
__m128 _sqsum = _mm_setzero_ps();
__m128 _mean = _mm_set1_ps(mean);
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_loadu_ps(ptr);
_p = _mm_sub_ps(_p, _mean);
_sqsum = _mm_comp_fmadd_ps(_p, _p, _sqsum);
ptr += 4;
}
sqsum += _mm_reduce_add_ps(_sqsum);
#endif // __SSE2__
for (; i < size; i++)
{
float tmp = *ptr - mean;
sqsum += tmp * tmp;
ptr++;
}
}
float scale1 = 1.f / sqrtf(sqsum / size + eps);
float scale2 = -mean * scale1;
ptr = bottom_top_blob_g;
if (affine)
{
const float* gamma = gamma_data_g;
const float* beta = beta_data_g;
for (int q = 0; q < channels_per_group; q++)
{
float a = *gamma * scale1;
float b = *gamma * scale2 + *beta;
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _a_avx512 = _mm512_set1_ps(a);
__m512 _b_avx512 = _mm512_set1_ps(b);
for (; i + 15 < w; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
_p = _mm512_add_ps(_mm512_mul_ps(_p, _a_avx512), _b_avx512);
_mm512_storeu_ps(ptr, _p);
ptr += 16;
}
#endif // __AVX512F__
__m256 _a_avx = _mm256_set1_ps(a);
__m256 _b_avx = _mm256_set1_ps(b);
for (; i + 7 < w; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_comp_fmadd_ps(_p, _a_avx, _b_avx);
_mm256_storeu_ps(ptr, _p);
ptr += 8;
}
#endif // __AVX__
__m128 _a = _mm_set1_ps(a);
__m128 _b = _mm_set1_ps(b);
for (; i + 3 < w; i += 4)
{
__m128 _p = _mm_loadu_ps(ptr);
_p = _mm_comp_fmadd_ps(_p, _a, _b);
_mm_storeu_ps(ptr, _p);
ptr += 4;
}
#endif // __SSE2__
for (; i < w; i++)
{
*ptr = *ptr * a + b;
ptr++;
}
gamma++;
beta++;
}
}
else
{
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _scale1_avx512 = _mm512_set1_ps(scale1);
__m512 _scale2_avx512 = _mm512_set1_ps(scale2);
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
_p = _mm512_add_ps(_mm512_mul_ps(_p, _scale1_avx512), _scale2_avx512);
_mm512_storeu_ps(ptr, _p);
ptr += 16;
}
#endif // __AVX512F__
__m256 _scale1_avx = _mm256_set1_ps(scale1);
__m256 _scale2_avx = _mm256_set1_ps(scale2);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_comp_fmadd_ps(_p, _scale1_avx, _scale2_avx);
_mm256_storeu_ps(ptr, _p);
ptr += 8;
}
#endif // __AVX__
__m128 _scale1 = _mm_set1_ps(scale1);
__m128 _scale2 = _mm_set1_ps(scale2);
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_loadu_ps(ptr);
_p = _mm_comp_fmadd_ps(_p, _scale1, _scale2);
_mm_storeu_ps(ptr, _p);
ptr += 4;
}
#endif // __SSE2__
for (; i < size; i++)
{
*ptr = *ptr * scale1 + scale2;
ptr++;
}
}
}
return 0;
}
if (dims == 3 || dims == 4)
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int d = bottom_top_blob.d;
int size = w * h * d;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat bottom_top_blob_g = bottom_top_blob.channel_range(g * channels_per_group, channels_per_group);
const Mat gamma_data_g = gamma_data.range(g * channels_per_group, channels_per_group);
const Mat beta_data_g = beta_data.range(g * channels_per_group, channels_per_group);
// mean and var
float sum = 0.f;
for (int q = 0; q < channels_per_group; q++)
{
const float* ptr = bottom_top_blob_g.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _sum_avx512 = _mm512_setzero_ps();
for (; i + 15 < size; i += 16)
{
_sum_avx512 = _mm512_add_ps(_sum_avx512, _mm512_loadu_ps(ptr));
ptr += 16;
}
sum += _mm512_comp_reduce_add_ps(_sum_avx512);
#endif // __AVX512F__
__m256 _sum_avx = _mm256_setzero_ps();
for (; i + 7 < size; i += 8)
{
_sum_avx = _mm256_add_ps(_sum_avx, _mm256_loadu_ps(ptr));
ptr += 8;
}
sum += _mm256_reduce_add_ps(_sum_avx);
#endif // __AVX__
__m128 _sum = _mm_setzero_ps();
for (; i + 3 < size; i += 4)
{
_sum = _mm_add_ps(_sum, _mm_loadu_ps(ptr));
ptr += 4;
}
sum += _mm_reduce_add_ps(_sum);
#endif // __SSE2__
for (; i < size; i++)
{
sum += *ptr;
ptr++;
}
}
float mean = sum / (channels_per_group * size);
float sqsum = 0.f;
for (int q = 0; q < channels_per_group; q++)
{
const float* ptr = bottom_top_blob_g.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _sqsum_avx512 = _mm512_setzero_ps();
__m512 _mean_avx512 = _mm512_set1_ps(mean);
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
_p = _mm512_sub_ps(_p, _mean_avx512);
_p = _mm512_mul_ps(_p, _p);
_sqsum_avx512 = _mm512_add_ps(_p, _sqsum_avx512);
ptr += 16;
}
sqsum += _mm512_comp_reduce_add_ps(_sqsum_avx512);
#endif // __AVX512F__
__m256 _sqsum_avx = _mm256_setzero_ps();
__m256 _mean_avx = _mm256_set1_ps(mean);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_sub_ps(_p, _mean_avx);
_sqsum_avx = _mm256_comp_fmadd_ps(_p, _p, _sqsum_avx);
ptr += 8;
}
sqsum += _mm256_reduce_add_ps(_sqsum_avx);
#endif // __AVX__
__m128 _sqsum = _mm_setzero_ps();
__m128 _mean = _mm_set1_ps(mean);
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_loadu_ps(ptr);
_p = _mm_sub_ps(_p, _mean);
_sqsum = _mm_comp_fmadd_ps(_p, _p, _sqsum);
ptr += 4;
}
sqsum += _mm_reduce_add_ps(_sqsum);
#endif // __SSE2__
for (; i < size; i++)
{
float tmp = *ptr - mean;
sqsum += tmp * tmp;
ptr++;
}
}
float scale1 = 1.f / sqrtf(sqsum / (channels_per_group * size) + eps);
float scale2 = -mean * scale1;
const float* gamma = gamma_data_g;
const float* beta = beta_data_g;
for (int q = 0; q < channels_per_group; q++)
{
float a = scale1;
float b = scale2;
if (affine)
{
a = *gamma * a;
b = *gamma * b + *beta;
}
float* ptr = bottom_top_blob_g.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _a_avx512 = _mm512_set1_ps(a);
__m512 _b_avx512 = _mm512_set1_ps(b);
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
_p = _mm512_add_ps(_mm512_mul_ps(_p, _a_avx512), _b_avx512);
_mm512_storeu_ps(ptr, _p);
ptr += 16;
}
#endif // __AVX512F__
__m256 _a_avx = _mm256_set1_ps(a);
__m256 _b_avx = _mm256_set1_ps(b);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_comp_fmadd_ps(_p, _a_avx, _b_avx);
_mm256_storeu_ps(ptr, _p);
ptr += 8;
}
#endif // __AVX__
__m128 _a = _mm_set1_ps(a);
__m128 _b = _mm_set1_ps(b);
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_loadu_ps(ptr);
_p = _mm_comp_fmadd_ps(_p, _a, _b);
_mm_storeu_ps(ptr, _p);
ptr += 4;
}
#endif // __SSE2__
for (; i < size; i++)
{
*ptr = *ptr * a + b;
ptr++;
}
gamma++;
beta++;
}
}
return 0;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %rsi, -0x38(%rsp)
movl 0x28(%rsi), %esi
movq (%rdi), %rax
movq %rax, -0x28(%rsp)
movq -0x18(%rax), %r10
movl 0xd0(%rdi,%r10), %ebp
movl 0xd4(%rdi,%r10), %eax
cltd
idivl %ebp
movl %eax, %ecx
movslq %eax, %r11
cmpl $0x2, %esi
movq %rdi, -0x20(%rsp)
movq %r11, -0x30(%rsp)
je 0x37b0fc
cmpl $0x1, %esi
jne 0x37b3c1
cvtsi2ss %ecx, %xmm1
movss 0x73e0a(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
xorl %eax, %eax
movss 0x7a938(%rip), %xmm1 # 0x3f57c4
movss 0x76a90(%rip), %xmm2 # 0x3f1924
movaps 0x731f5(%rip), %xmm3 # 0x3ee090
xorl %r15d, %r15d
movslq %ebp, %rsi
cmpq %rsi, %r15
jge 0x37b66f
movq %r15, -0x40(%rsp)
imulq %r11, %r15
movq -0x38(%rsp), %rsi
movq (%rsi), %r8
movq 0x10(%rsi), %r9
imulq %r9, %r15
addq %r8, %r15
movq 0xe0(%rdi,%r10), %rbx
movq 0xf0(%rdi,%r10), %r12
movq 0x128(%rdi,%r10), %r14
movq %r10, %rdx
movq 0x138(%rdi,%r10), %r13
xorl %esi, %esi
xorps %xmm4, %xmm4
xorl %r10d, %r10d
movq %r15, %r11
leal 0x3(%r10), %ebp
cmpl %ecx, %ebp
jge 0x37af11
movups (%r11), %xmm5
addps %xmm5, %xmm4
addq $0x10, %r11
addl $0x4, %r10d
addq $0x4, %rsi
jmp 0x37aef4
movaps %xmm4, %xmm5
unpckhpd %xmm4, %xmm5 # xmm5 = xmm5[1],xmm4[1]
addps %xmm4, %xmm5
movaps %xmm5, %xmm4
shufps $0x55, %xmm5, %xmm4 # xmm4 = xmm4[1,1],xmm5[1,1]
addss %xmm5, %xmm4
imulq %rax, %r9
addq %r9, %r8
cmpl %ecx, %esi
jge 0x37af3c
addss (%r8,%rsi,4), %xmm4
incq %rsi
jmp 0x37af2d
mulss %xmm0, %xmm4
movaps %xmm4, %xmm6
shufps $0x0, %xmm4, %xmm6 # xmm6 = xmm6[0,0],xmm4[0,0]
xorl %r9d, %r9d
xorps %xmm5, %xmm5
xorl %esi, %esi
movq %r15, %r10
leal 0x3(%rsi), %r11d
cmpl %ecx, %r11d
jge 0x37af75
movups (%r10), %xmm7
subps %xmm6, %xmm7
mulps %xmm7, %xmm7
addps %xmm7, %xmm5
addq $0x10, %r10
addl $0x4, %esi
addq $0x4, %r9
jmp 0x37af52
movaps %xmm5, %xmm7
unpckhpd %xmm5, %xmm7 # xmm7 = xmm7[1],xmm5[1]
addps %xmm5, %xmm7
movaps %xmm7, %xmm6
shufps $0x55, %xmm7, %xmm6 # xmm6 = xmm6[1,1],xmm7[1,1]
addss %xmm7, %xmm6
movq -0x20(%rsp), %rdi
cmpl %ecx, %r9d
jge 0x37afab
movss (%r8,%r9,4), %xmm5
subss %xmm4, %xmm5
mulss %xmm5, %xmm5
addss %xmm5, %xmm6
incq %r9
jmp 0x37af8f
mulss %xmm0, %xmm6
addss 0xd8(%rdi,%rdx), %xmm6
rsqrtss %xmm6, %xmm5
mulss %xmm5, %xmm6
mulss %xmm5, %xmm6
addss %xmm1, %xmm6
mulss %xmm2, %xmm5
mulss %xmm6, %xmm5
xorps %xmm3, %xmm4
mulss %xmm5, %xmm4
cmpl $0x0, 0xdc(%rdi,%rdx)
je 0x37b07f
movaps %xmm5, %xmm6
shufps $0x0, %xmm5, %xmm6 # xmm6 = xmm6[0,0],xmm5[0,0]
movaps %xmm4, %xmm7
shufps $0x0, %xmm4, %xmm7 # xmm7 = xmm7[0,0],xmm4[0,0]
imulq %rax, %r13
addq %r13, %r14
imulq %rax, %r12
addq %r12, %rbx
xorl %esi, %esi
xorl %r9d, %r9d
xorl %r10d, %r10d
movq -0x40(%rsp), %r15
leal 0x3(%r10), %r11d
cmpl %ecx, %r11d
jge 0x37b049
movups (%rbx,%r9), %xmm8
movups (%r14,%r9), %xmm9
movups (%r8,%r9), %xmm10
mulps %xmm6, %xmm10
addps %xmm7, %xmm10
mulps %xmm8, %xmm10
addps %xmm9, %xmm10
movups %xmm10, (%r8,%r9)
addl $0x4, %r10d
addq $0x10, %r9
addq $0x4, %rsi
jmp 0x37b00e
movq -0x20(%rsp), %rdi
movq -0x30(%rsp), %r11
cmpl %ecx, %esi
jge 0x37b0e0
movss (%r8,%rsi,4), %xmm6
mulss %xmm5, %xmm6
addss %xmm4, %xmm6
mulss (%rbx,%rsi,4), %xmm6
addss (%r14,%rsi,4), %xmm6
movss %xmm6, (%r8,%rsi,4)
incq %rsi
jmp 0x37b053
movaps %xmm5, %xmm6
shufps $0x0, %xmm5, %xmm6 # xmm6 = xmm6[0,0],xmm5[0,0]
movaps %xmm4, %xmm7
shufps $0x0, %xmm4, %xmm7 # xmm7 = xmm7[0,0],xmm4[0,0]
xorl %esi, %esi
xorl %r9d, %r9d
movq -0x30(%rsp), %r11
leal 0x3(%r9), %r10d
cmpl %ecx, %r10d
jge 0x37b0be
movups (%r15), %xmm8
mulps %xmm6, %xmm8
addps %xmm7, %xmm8
movups %xmm8, (%r15)
addq $0x10, %r15
addl $0x4, %r9d
addq $0x4, %rsi
jmp 0x37b097
movq -0x40(%rsp), %r15
cmpl %ecx, %esi
jge 0x37b0e0
movss (%r8,%rsi,4), %xmm6
mulss %xmm5, %xmm6
addss %xmm4, %xmm6
movss %xmm6, (%r8,%rsi,4)
incq %rsi
jmp 0x37b0c3
incq %r15
movq -0x28(%rsp), %rsi
movq -0x18(%rsi), %r10
movl 0xd0(%rdi,%r10), %ebp
addq %r11, %rax
jmp 0x37ae9e
movq -0x38(%rsp), %rax
movl 0x2c(%rax), %eax
movl %eax, %edx
imull %ecx, %edx
cvtsi2ss %edx, %xmm1
xorl %ebx, %ebx
testl %ecx, %ecx
cmovlel %ebx, %ecx
movss 0x73b6c(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
movss 0x7a69c(%rip), %xmm1 # 0x3f57c4
movss 0x767f4(%rip), %xmm2 # 0x3f1924
movaps 0x72f59(%rip), %xmm3 # 0x3ee090
xorl %r8d, %r8d
movslq %ebp, %rsi
cmpq %rsi, %r8
jge 0x37b66f
movq %rbx, -0x8(%rsp)
movq %r8, -0x10(%rsp)
movq %r8, %rbp
imulq %r11, %rbp
movq -0x38(%rsp), %rsi
movslq 0x2c(%rsi), %r9
movq (%rsi), %r13
movq 0x10(%rsi), %r8
movq %rbp, %r14
movq %r9, -0x18(%rsp)
imulq %r9, %r14
imulq %r8, %r14
addq %r13, %r14
movq 0xe0(%rdi,%r10), %r15
movq 0x128(%rdi,%r10), %r12
movq 0xf0(%rdi,%r10), %rsi
imulq %rbp, %rsi
movq %r10, -0x40(%rsp)
imulq 0x138(%rdi,%r10), %rbp
xorps %xmm4, %xmm4
xorl %r9d, %r9d
movq %r14, %rbx
xorl %r11d, %r11d
leal 0x3(%r11), %r10d
cmpl %edx, %r10d
jge 0x37b1cd
movups (%rbx), %xmm5
addps %xmm5, %xmm4
addq $0x10, %rbx
addl $0x4, %r11d
addq $0x4, %r9
jmp 0x37b1b0
addq %rsi, %r15
movaps %xmm4, %xmm5
unpckhpd %xmm4, %xmm5 # xmm5 = xmm5[1],xmm4[1]
addps %xmm4, %xmm5
movaps %xmm5, %xmm4
shufps $0x55, %xmm5, %xmm4 # xmm4 = xmm4[1,1],xmm5[1,1]
addss %xmm5, %xmm4
movq -0x8(%rsp), %rbx
imulq %rbx, %r8
imulq -0x18(%rsp), %r8
addq %r8, %r13
movq -0x20(%rsp), %rdi
movq -0x30(%rsp), %r11
cmpl %edx, %r9d
jge 0x37b212
addss (%r13,%r9,4), %xmm4
incq %r9
jmp 0x37b201
mulss %xmm0, %xmm4
movaps %xmm4, %xmm6
shufps $0x0, %xmm4, %xmm6 # xmm6 = xmm6[0,0],xmm4[0,0]
xorl %r9d, %r9d
xorps %xmm5, %xmm5
movq %r14, %rsi
xorl %r8d, %r8d
leal 0x3(%r8), %r10d
cmpl %edx, %r10d
jge 0x37b24c
movups (%rsi), %xmm7
subps %xmm6, %xmm7
mulps %xmm7, %xmm7
addps %xmm7, %xmm5
addq $0x10, %rsi
addl $0x4, %r8d
addq $0x4, %r9
jmp 0x37b229
movaps %xmm5, %xmm7
unpckhpd %xmm5, %xmm7 # xmm7 = xmm7[1],xmm5[1]
addps %xmm5, %xmm7
movaps %xmm7, %xmm6
shufps $0x55, %xmm7, %xmm6 # xmm6 = xmm6[1,1],xmm7[1,1]
addss %xmm7, %xmm6
cmpl %edx, %r9d
jge 0x37b27e
movss (%r13,%r9,4), %xmm5
subss %xmm4, %xmm5
mulss %xmm5, %xmm5
addss %xmm5, %xmm6
incq %r9
jmp 0x37b261
mulss %xmm0, %xmm6
movq -0x40(%rsp), %rsi
addss 0xd8(%rdi,%rsi), %xmm6
rsqrtss %xmm6, %xmm5
mulss %xmm5, %xmm6
mulss %xmm5, %xmm6
addss %xmm1, %xmm6
mulss %xmm2, %xmm5
mulss %xmm6, %xmm5
xorps %xmm3, %xmm4
mulss %xmm5, %xmm4
cmpl $0x0, 0xdc(%rdi,%rsi)
je 0x37b349
addq %rbp, %r12
xorl %r9d, %r9d
cmpl %ecx, %r9d
je 0x37b3a0
movss (%r15), %xmm6
movaps %xmm6, %xmm7
mulss %xmm5, %xmm7
mulss %xmm4, %xmm6
addss (%r12), %xmm6
movaps %xmm7, %xmm8
shufps $0x0, %xmm7, %xmm8 # xmm8 = xmm8[0,0],xmm7[0,0]
movaps %xmm6, %xmm9
shufps $0x0, %xmm6, %xmm9 # xmm9 = xmm9[0,0],xmm6[0,0]
xorl %r8d, %r8d
leal 0x3(%r8), %esi
cmpl %eax, %esi
jge 0x37b334
movups (%r14), %xmm10
mulps %xmm8, %xmm10
addps %xmm9, %xmm10
movups %xmm10, (%r14)
addq $0x10, %r14
addl $0x4, %r8d
jmp 0x37b2f7
movss (%r14), %xmm8
mulss %xmm7, %xmm8
addss %xmm6, %xmm8
movss %xmm8, (%r14)
addq $0x4, %r14
incl %r8d
cmpl %eax, %r8d
jl 0x37b319
addq $0x4, %r15
addq $0x4, %r12
incl %r9d
jmp 0x37b2c3
movaps %xmm5, %xmm6
shufps $0x0, %xmm5, %xmm6 # xmm6 = xmm6[0,0],xmm5[0,0]
movaps %xmm4, %xmm7
shufps $0x0, %xmm4, %xmm7 # xmm7 = xmm7[0,0],xmm4[0,0]
xorl %r8d, %r8d
xorl %esi, %esi
leal 0x3(%rsi), %r9d
cmpl %edx, %r9d
jge 0x37b39b
movups (%r14), %xmm8
mulps %xmm6, %xmm8
addps %xmm7, %xmm8
movups %xmm8, (%r14)
addq $0x10, %r14
addl $0x4, %esi
addq $0x4, %r8
jmp 0x37b35c
movss (%r13,%r8,4), %xmm6
mulss %xmm5, %xmm6
addss %xmm4, %xmm6
movss %xmm6, (%r13,%r8,4)
incq %r8
cmpl %edx, %r8d
jl 0x37b382
movq -0x10(%rsp), %r8
incq %r8
movq -0x28(%rsp), %rsi
movq -0x18(%rsi), %r10
movl 0xd0(%rdi,%r10), %ebp
addq %r11, %rbx
jmp 0x37b13a
addl $-0x3, %esi
cmpl $0x1, %esi
ja 0x37b66f
movq -0x38(%rsp), %rax
movl 0x30(%rax), %ebx
imull 0x2c(%rax), %ebx
imull 0x34(%rax), %ebx
movl %ebx, %eax
imull %ecx, %eax
cvtsi2ss %eax, %xmm1
xorl %r14d, %r14d
testl %ecx, %ecx
cmovlel %r14d, %ecx
movss 0x73891(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
movss 0x7a3c1(%rip), %xmm1 # 0x3f57c4
movss 0x76519(%rip), %xmm2 # 0x3f1924
movaps 0x72c7e(%rip), %xmm3 # 0x3ee090
movslq %ebp, %rax
cmpq %rax, %r14
jge 0x37b66f
movq %r14, %rbp
imulq %r11, %rbp
movq -0x38(%rsp), %r9
movslq 0x2c(%r9), %rax
movslq 0x30(%r9), %rdx
imulq %rax, %rdx
movslq 0x34(%r9), %r8
movq 0x40(%r9), %r15
imulq %rbp, %r15
movq 0x10(%r9), %rsi
imulq %rsi, %r15
addq (%r9), %r15
imulq %rsi, %r8
imulq %rdx, %r8
addq $0xf, %r8
andq $-0x10, %r8
movq 0xe0(%rdi,%r10), %r12
movq 0xf0(%rdi,%r10), %r9
imulq %rbp, %r9
imulq 0x138(%rdi,%r10), %rbp
movq %r10, -0x40(%rsp)
movq 0x128(%rdi,%r10), %r13
movq %r8, %rax
xorl %edx, %edx
divq %rsi
subq %rdx, %r8
xorps %xmm4, %xmm4
xorl %eax, %eax
cmpq %rcx, %rax
je 0x37b4ee
movq %r8, %rdx
imulq %rax, %rdx
addq %r15, %rdx
xorps %xmm5, %xmm5
xorl %esi, %esi
leal 0x3(%rsi), %r10d
cmpl %ebx, %r10d
jge 0x37b4c3
movups (%rdx), %xmm6
addps %xmm6, %xmm5
addq $0x10, %rdx
addl $0x4, %esi
jmp 0x37b4ab
movaps %xmm5, %xmm6
unpckhpd %xmm5, %xmm6 # xmm6 = xmm6[1],xmm5[1]
addps %xmm5, %xmm6
addss %xmm6, %xmm4
shufps $0x55, %xmm6, %xmm6 # xmm6 = xmm6[1,1,1,1]
addss %xmm6, %xmm4
cmpl %ebx, %esi
jge 0x37b4e9
addss (%rdx), %xmm4
addq $0x4, %rdx
incl %esi
jmp 0x37b4d9
incq %rax
jmp 0x37b497
addq %r9, %r12
mulss %xmm0, %xmm4
movaps %xmm4, %xmm5
shufps $0x0, %xmm4, %xmm5 # xmm5 = xmm5[0,0],xmm4[0,0]
xorps %xmm6, %xmm6
xorl %eax, %eax
cmpq %rcx, %rax
je 0x37b577
movq %r8, %rdx
imulq %rax, %rdx
addq %r15, %rdx
xorl %r9d, %r9d
xorps %xmm7, %xmm7
leal 0x3(%r9), %esi
cmpl %ebx, %esi
jge 0x37b538
movups (%rdx), %xmm8
subps %xmm5, %xmm8
mulps %xmm8, %xmm8
addps %xmm8, %xmm7
addq $0x10, %rdx
addl $0x4, %r9d
jmp 0x37b516
movaps %xmm7, %xmm8
unpckhpd %xmm7, %xmm8 # xmm8 = xmm8[1],xmm7[1]
addps %xmm7, %xmm8
addss %xmm8, %xmm6
shufps $0x55, %xmm8, %xmm8 # xmm8 = xmm8[1,1,1,1]
addss %xmm8, %xmm6
cmpl %ebx, %r9d
jge 0x37b572
movss (%rdx), %xmm7
subss %xmm4, %xmm7
mulss %xmm7, %xmm7
addss %xmm7, %xmm6
addq $0x4, %rdx
incl %r9d
jmp 0x37b554
incq %rax
jmp 0x37b501
addq %rbp, %r13
mulss %xmm0, %xmm6
movq -0x40(%rsp), %rax
addss 0xd8(%rdi,%rax), %xmm6
rsqrtss %xmm6, %xmm5
mulss %xmm5, %xmm6
mulss %xmm5, %xmm6
addss %xmm1, %xmm6
mulss %xmm2, %xmm5
mulss %xmm6, %xmm5
xorps %xmm3, %xmm4
mulss %xmm5, %xmm4
movq (%rdi), %rax
xorl %edx, %edx
cmpq %rcx, %rdx
je 0x37b656
movq -0x18(%rax), %rsi
cmpl $0x0, 0xdc(%rdi,%rsi)
movaps %xmm5, %xmm6
movaps %xmm4, %xmm7
je 0x37b5e4
movss (%r12), %xmm7
movaps %xmm7, %xmm6
mulss %xmm5, %xmm6
mulss %xmm4, %xmm7
addss (%r13), %xmm7
movq %r8, %rsi
imulq %rdx, %rsi
addq %r15, %rsi
movaps %xmm6, %xmm8
shufps $0x0, %xmm6, %xmm8 # xmm8 = xmm8[0,0],xmm6[0,0]
movaps %xmm7, %xmm9
shufps $0x0, %xmm7, %xmm9 # xmm9 = xmm9[0,0],xmm7[0,0]
xorl %r9d, %r9d
leal 0x3(%r9), %r10d
cmpl %ebx, %r10d
jge 0x37b641
movups (%rsi), %xmm10
mulps %xmm8, %xmm10
addps %xmm9, %xmm10
movups %xmm10, (%rsi)
addq $0x10, %rsi
addl $0x4, %r9d
jmp 0x37b603
movss (%rsi), %xmm8
mulss %xmm6, %xmm8
addss %xmm7, %xmm8
movss %xmm8, (%rsi)
addq $0x4, %rsi
incl %r9d
cmpl %ebx, %r9d
jl 0x37b626
addq $0x4, %r12
addq $0x4, %r13
incq %rdx
jmp 0x37b5b0
incq %r14
movq -0x28(%rsp), %rax
movq -0x18(%rax), %r10
movl 0xd0(%rdi,%r10), %ebp
jmp 0x37b412
xorl %eax, %eax
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
| /csukuangfj[P]ncnn/src/layer/x86/groupnorm_x86.cpp |
ncnn::LayerNorm::load_model(ncnn::ModelBin const&) | int LayerNorm::load_model(const ModelBin& mb)
{
if (affine == 0)
return 0;
gamma_data = mb.load(affine_size, 1);
if (gamma_data.empty())
return -100;
beta_data = mb.load(affine_size, 1);
if (beta_data.empty())
return -100;
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
xorl %ebx, %ebx
cmpl $0x0, 0xd8(%rdi)
je 0x37dc1a
movq %rsi, %r15
movq %rdi, %r14
movl 0xd0(%rdi), %edx
movq (%rsi), %rax
movq %rsp, %r12
pushq $0x1
popq %rcx
movq %r12, %rdi
callq *0x10(%rax)
leaq 0xe0(%r14), %r13
movq 0x8(%r12), %rax
cmpq %r12, %r13
je 0x37dac3
testq %rax, %rax
je 0x37da31
lock
incl (%rax)
movq 0xe8(%r14), %rax
testq %rax, %rax
je 0x37da65
lock
decl (%rax)
jne 0x37da65
movq 0xe0(%r14), %rsi
movq 0x100(%r14), %rdi
testq %rdi, %rdi
je 0x37da5d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x37da65
movq %rsi, %rdi
callq 0x5f3e0
movq (%rsp), %rax
movq %rax, 0xe0(%r14)
movq 0x8(%rsp), %rax
movq %rax, 0xe8(%r14)
movq 0x10(%rsp), %rcx
movq %rcx, 0xf0(%r14)
movl 0x18(%rsp), %ecx
movl %ecx, 0xf8(%r14)
movq 0x20(%rsp), %rcx
movq %rcx, 0x100(%r14)
movups 0x28(%rsp), %xmm0
movups %xmm0, 0x108(%r14)
movl 0x38(%rsp), %ecx
movl %ecx, 0x118(%r14)
movq 0x40(%rsp), %rcx
movq %rcx, 0x120(%r14)
testq %rax, %rax
je 0x37daeb
lock
decl (%rax)
jne 0x37daeb
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x37dae3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x37daeb
movq %rsi, %rdi
callq 0x5f3e0
pushq $-0x64
popq %rbp
cmpq $0x0, (%r13)
je 0x37dc18
movslq 0x118(%r14), %rax
imulq 0x120(%r14), %rax
testq %rax, %rax
je 0x37dc18
movl 0xd0(%r14), %edx
movq (%r15), %rax
pushq $0x1
popq %rcx
movq %r12, %rdi
movq %r15, %rsi
callq *0x10(%rax)
leaq 0x128(%r14), %r15
movq 0x8(%rsp), %rax
cmpq %r12, %r15
je 0x37dbd6
testq %rax, %rax
je 0x37db44
lock
incl (%rax)
movq 0x130(%r14), %rax
testq %rax, %rax
je 0x37db78
lock
decl (%rax)
jne 0x37db78
movq 0x128(%r14), %rsi
movq 0x148(%r14), %rdi
testq %rdi, %rdi
je 0x37db70
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x37db78
movq %rsi, %rdi
callq 0x5f3e0
movq (%rsp), %rax
movq %rax, 0x128(%r14)
movq 0x8(%rsp), %rax
movq %rax, 0x130(%r14)
movq 0x10(%rsp), %rcx
movq %rcx, 0x138(%r14)
movl 0x18(%rsp), %ecx
movl %ecx, 0x140(%r14)
movq 0x20(%rsp), %rcx
movq %rcx, 0x148(%r14)
movups 0x28(%rsp), %xmm0
movups %xmm0, 0x150(%r14)
movl 0x38(%rsp), %ecx
movl %ecx, 0x160(%r14)
movq 0x40(%rsp), %rcx
movq %rcx, 0x168(%r14)
testq %rax, %rax
je 0x37dbfe
lock
decl (%rax)
jne 0x37dbfe
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x37dbf6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x37dbfe
movq %rsi, %rdi
callq 0x5f3e0
cmpq $0x0, (%r15)
je 0x37dc18
movslq 0x160(%r14), %rax
imulq 0x168(%r14), %rax
testq %rax, %rax
jne 0x37dc1a
movl %ebp, %ebx
movl %ebx, %eax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x37dc87
lock
decl (%rax)
jne 0x37dc87
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x37dc77
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x37dc87
jmp 0x37dc91
jmp 0x37dc91
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x37dc87
lock
decl (%rax)
jne 0x37dc87
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x37dc81
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x37dc87
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x37dc91
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/layernorm.cpp |
ncnn::LayerNorm::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int LayerNorm::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
// x = (x - mean) / sqrt(var + eps) * gamma + beta
int dims = bottom_top_blob.dims;
if (dims == 1)
{
int w = bottom_top_blob.w;
// assert affine_size == w
float* ptr = bottom_top_blob;
// mean and var
float sum = 0.f;
float sqsum = 0.f;
for (int i = 0; i < w; i++)
{
sum += ptr[i];
//sqsum += ptr[i] * ptr[i];
}
float mean = sum / w;
float tmp = 0.f;
for (int i = 0; i < w; i++)
{
tmp = ptr[i] - mean;
sqsum += tmp * tmp;
}
float var = sqsum / w;
// the var maybe minus due to accuracy
//float var = sqsum / w - mean * mean;
float a = 1.f / (sqrtf(var + eps));
float b = -mean * a;
if (affine)
{
for (int i = 0; i < w; i++)
{
ptr[i] = (ptr[i] * a + b) * gamma_data[i] + beta_data[i];
}
}
else
{
for (int i = 0; i < w; i++)
{
ptr[i] = ptr[i] * a + b;
}
}
}
if (dims == 2)
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
// assert affine_size == w
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; i++)
{
float* ptr = bottom_top_blob.row(i);
// mean and var
float sum = 0.f;
float sqsum = 0.f;
for (int j = 0; j < w; j++)
{
sum += ptr[j];
//sqsum += ptr[j] * ptr[j];
}
float mean = sum / w;
float tmp = 0.f;
for (int j = 0; j < w; j++)
{
tmp = ptr[j] - mean;
sqsum += tmp * tmp;
}
float var = sqsum / w;
// the var maybe minus due to accuracy
//float var = sqsum / w - mean * mean;
float a = 1.f / (sqrtf(var + eps));
float b = -mean * a;
if (affine)
{
for (int j = 0; j < w; j++)
{
ptr[j] = (ptr[j] * a + b) * gamma_data[j] + beta_data[j];
}
}
else
{
for (int j = 0; j < w; j++)
{
ptr[j] = ptr[j] * a + b;
}
}
}
}
if (dims == 3)
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
int size = w * h;
if (affine_size == w)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
for (int i = 0; i < h; i++)
{
float* ptr = bottom_top_blob.channel(q).row(i);
// mean and var
float sum = 0.f;
float sqsum = 0.f;
for (int j = 0; j < w; j++)
{
sum += ptr[j];
//sqsum += ptr[j] * ptr[j];
}
float mean = sum / w;
float tmp = 0.f;
for (int j = 0; j < w; j++)
{
tmp = ptr[j] - mean;
sqsum += tmp * tmp;
}
float var = sqsum / w;
// the var maybe minus due to accuracy
//float var = sqsum / w - mean * mean;
float a = 1.f / (sqrtf(var + eps));
float b = -mean * a;
if (affine)
{
for (int j = 0; j < w; j++)
{
ptr[j] = (ptr[j] * a + b) * gamma_data[j] + beta_data[j];
}
}
else
{
for (int j = 0; j < w; j++)
{
ptr[j] = ptr[j] * a + b;
}
}
}
}
}
else // if (affine_size == size)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
// mean and var
float sum = 0.f;
float sqsum = 0.f;
for (int i = 0; i < size; i++)
{
sum += ptr[i];
//sqsum += ptr[i] * ptr[i];
}
float mean = sum / size;
float tmp = 0.f;
for (int i = 0; i < size; i++)
{
tmp = ptr[i] - mean;
sqsum += tmp * tmp;
}
float var = sqsum / size;
// the var maybe minus due to accuracy
//float var = sqsum / size - mean * mean;
float a = 1.f / (sqrtf(var + eps));
float b = -mean * a;
if (affine)
{
for (int i = 0; i < size; i++)
{
ptr[i] = (ptr[i] * a + b) * gamma_data[i] + beta_data[i];
}
}
else
{
for (int i = 0; i < size; i++)
{
ptr[i] = ptr[i] * a + b;
}
}
}
}
}
return 0;
} | pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movl 0x28(%rsi), %eax
cmpl $0x3, %eax
je 0x37de0b
cmpl $0x2, %eax
je 0x37dce3
cmpl $0x1, %eax
jne 0x37e14b
movl 0x2c(%rsi), %edx
movq (%rsi), %rax
xorl %esi, %esi
testl %edx, %edx
movl $0x0, %ecx
cmovgl %edx, %ecx
xorps %xmm0, %xmm0
cmpq %rsi, %rcx
je 0x37df6a
addss (%rax,%rsi,4), %xmm0
incq %rsi
jmp 0x37dcd0
movslq 0x2c(%rsi), %rax
cvtsi2ss %eax, %xmm1
xorl %ecx, %ecx
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movl 0x30(%rsi), %r8d
movq (%rsi), %r9
testl %r8d, %r8d
cmovlel %ecx, %r8d
movl 0xd8(%rdi), %r10d
movss 0x70f74(%rip), %xmm0 # 0x3eec88
imulq 0x10(%rsi), %rax
divss %xmm1, %xmm0
movss 0x77a9f(%rip), %xmm1 # 0x3f57c4
movss 0x73bf7(%rip), %xmm2 # 0x3f1924
movaps 0x7035c(%rip), %xmm3 # 0x3ee090
cmpq %r8, %rcx
je 0x37e14b
xorps %xmm4, %xmm4
xorl %esi, %esi
cmpq %rsi, %rdx
je 0x37dd52
addss (%r9,%rsi,4), %xmm4
incq %rsi
jmp 0x37dd42
mulss %xmm0, %xmm4
xorps %xmm6, %xmm6
xorl %esi, %esi
cmpq %rsi, %rdx
je 0x37dd77
movss (%r9,%rsi,4), %xmm5
subss %xmm4, %xmm5
mulss %xmm5, %xmm5
addss %xmm5, %xmm6
incq %rsi
jmp 0x37dd5b
mulss %xmm0, %xmm6
addss 0xd4(%rdi), %xmm6
rsqrtss %xmm6, %xmm5
mulss %xmm5, %xmm6
mulss %xmm5, %xmm6
addss %xmm1, %xmm6
mulss %xmm2, %xmm5
mulss %xmm6, %xmm5
xorps %xmm3, %xmm4
mulss %xmm5, %xmm4
testl %r10d, %r10d
je 0x37dde0
movq 0xe0(%rdi), %rsi
movq 0x128(%rdi), %r11
xorl %ebx, %ebx
cmpq %rbx, %rdx
je 0x37de00
movss (%r9,%rbx,4), %xmm6
mulss %xmm5, %xmm6
addss %xmm4, %xmm6
mulss (%rsi,%rbx,4), %xmm6
addss (%r11,%rbx,4), %xmm6
movss %xmm6, (%r9,%rbx,4)
incq %rbx
jmp 0x37ddb7
xorl %esi, %esi
cmpq %rsi, %rdx
je 0x37de00
movss (%r9,%rsi,4), %xmm6
mulss %xmm5, %xmm6
addss %xmm4, %xmm6
movss %xmm6, (%r9,%rsi,4)
incq %rsi
jmp 0x37dde2
incq %rcx
addq %rax, %r9
jmp 0x37dd34
movl 0x2c(%rsi), %edx
movl 0x30(%rsi), %eax
movl 0x38(%rsi), %ecx
cmpl %edx, 0xd0(%rdi)
jne 0x37e010
cvtsi2ss %edx, %xmm1
xorl %r8d, %r8d
testl %edx, %edx
cmovlel %r8d, %edx
testl %eax, %eax
cmovlel %r8d, %eax
testl %ecx, %ecx
cmovlel %r8d, %ecx
movss 0x70e47(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
movss 0x77977(%rip), %xmm1 # 0x3f57c4
movss 0x73acf(%rip), %xmm2 # 0x3f1924
movaps 0x70234(%rip), %xmm3 # 0x3ee090
cmpq %rcx, %r8
je 0x37e14b
movslq 0x2c(%rsi), %r9
movq 0x10(%rsi), %r10
imulq %r10, %r9
movl 0xd8(%rdi), %r11d
imulq 0x40(%rsi), %r10
imulq %r8, %r10
addq (%rsi), %r10
xorl %ebx, %ebx
cmpq %rax, %rbx
je 0x37df62
xorps %xmm4, %xmm4
xorl %r14d, %r14d
cmpq %r14, %rdx
je 0x37dea5
addss (%r10,%r14,4), %xmm4
incq %r14
jmp 0x37de95
mulss %xmm0, %xmm4
xorps %xmm6, %xmm6
xorl %r14d, %r14d
cmpq %r14, %rdx
je 0x37decb
movss (%r10,%r14,4), %xmm5
subss %xmm4, %xmm5
mulss %xmm5, %xmm5
addss %xmm5, %xmm6
incq %r14
jmp 0x37deaf
mulss %xmm0, %xmm6
addss 0xd4(%rdi), %xmm6
rsqrtss %xmm6, %xmm5
mulss %xmm5, %xmm6
mulss %xmm5, %xmm6
addss %xmm1, %xmm6
mulss %xmm2, %xmm5
mulss %xmm6, %xmm5
xorps %xmm3, %xmm4
mulss %xmm5, %xmm4
testl %r11d, %r11d
je 0x37df36
movq 0xe0(%rdi), %r14
movq 0x128(%rdi), %r15
xorl %r12d, %r12d
cmpq %r12, %rdx
je 0x37df57
movss (%r10,%r12,4), %xmm6
mulss %xmm5, %xmm6
addss %xmm4, %xmm6
mulss (%r14,%r12,4), %xmm6
addss (%r15,%r12,4), %xmm6
movss %xmm6, (%r10,%r12,4)
incq %r12
jmp 0x37df0c
xorl %r14d, %r14d
cmpq %r14, %rdx
je 0x37df57
movss (%r10,%r14,4), %xmm6
mulss %xmm5, %xmm6
addss %xmm4, %xmm6
movss %xmm6, (%r10,%r14,4)
incq %r14
jmp 0x37df39
incq %rbx
addq %r9, %r10
jmp 0x37de86
incq %r8
jmp 0x37de5c
cvtsi2ss %edx, %xmm1
divss %xmm1, %xmm0
xorps %xmm2, %xmm2
xorl %edx, %edx
cmpq %rdx, %rcx
je 0x37df92
movss (%rax,%rdx,4), %xmm3
subss %xmm0, %xmm3
mulss %xmm3, %xmm3
addss %xmm3, %xmm2
incq %rdx
jmp 0x37df77
divss %xmm1, %xmm2
addss 0xd4(%rdi), %xmm2
rsqrtss %xmm2, %xmm1
mulss %xmm1, %xmm2
mulss %xmm1, %xmm2
addss 0x77812(%rip), %xmm2 # 0x3f57c4
mulss 0x7396a(%rip), %xmm1 # 0x3f1924
xorps 0x700cf(%rip), %xmm0 # 0x3ee090
mulss %xmm2, %xmm1
mulss %xmm1, %xmm0
cmpl $0x0, 0xd8(%rdi)
je 0x37e12d
movq 0xe0(%rdi), %rdx
movq 0x128(%rdi), %rsi
xorl %edi, %edi
cmpq %rdi, %rcx
je 0x37e14b
movss (%rax,%rdi,4), %xmm2
mulss %xmm1, %xmm2
addss %xmm0, %xmm2
mulss (%rdx,%rdi,4), %xmm2
addss (%rsi,%rdi,4), %xmm2
movss %xmm2, (%rax,%rdi,4)
incq %rdi
jmp 0x37dfe6
imull %edx, %eax
movq (%rsi), %rdx
cvtsi2ss %eax, %xmm1
xorl %r8d, %r8d
testl %eax, %eax
cmovlel %r8d, %eax
movq 0x40(%rsi), %r9
testl %ecx, %ecx
cmovlel %r8d, %ecx
movl 0xd8(%rdi), %r10d
movss 0x70c4c(%rip), %xmm0 # 0x3eec88
imulq 0x10(%rsi), %r9
divss %xmm1, %xmm0
movss 0x77777(%rip), %xmm1 # 0x3f57c4
movss 0x738cf(%rip), %xmm2 # 0x3f1924
movaps 0x70034(%rip), %xmm3 # 0x3ee090
cmpq %rcx, %r8
je 0x37e14b
xorps %xmm4, %xmm4
xorl %esi, %esi
cmpq %rsi, %rax
je 0x37e079
addss (%rdx,%rsi,4), %xmm4
incq %rsi
jmp 0x37e06a
mulss %xmm0, %xmm4
xorps %xmm6, %xmm6
xorl %esi, %esi
cmpq %rsi, %rax
je 0x37e09d
movss (%rdx,%rsi,4), %xmm5
subss %xmm4, %xmm5
mulss %xmm5, %xmm5
addss %xmm5, %xmm6
incq %rsi
jmp 0x37e082
mulss %xmm0, %xmm6
addss 0xd4(%rdi), %xmm6
rsqrtss %xmm6, %xmm5
mulss %xmm5, %xmm6
mulss %xmm5, %xmm6
addss %xmm1, %xmm6
mulss %xmm2, %xmm5
mulss %xmm6, %xmm5
xorps %xmm3, %xmm4
mulss %xmm5, %xmm4
testl %r10d, %r10d
je 0x37e104
movq 0xe0(%rdi), %rsi
movq 0x128(%rdi), %r11
xorl %ebx, %ebx
cmpq %rbx, %rax
je 0x37e122
movss (%rdx,%rbx,4), %xmm6
mulss %xmm5, %xmm6
addss %xmm4, %xmm6
mulss (%rsi,%rbx,4), %xmm6
addss (%r11,%rbx,4), %xmm6
movss %xmm6, (%rdx,%rbx,4)
incq %rbx
jmp 0x37e0dd
xorl %esi, %esi
cmpq %rsi, %rax
je 0x37e122
movss (%rdx,%rsi,4), %xmm6
mulss %xmm5, %xmm6
addss %xmm4, %xmm6
movss %xmm6, (%rdx,%rsi,4)
incq %rsi
jmp 0x37e106
incq %r8
addq %r9, %rdx
jmp 0x37e05c
xorl %edx, %edx
cmpq %rdx, %rcx
je 0x37e14b
movss (%rax,%rdx,4), %xmm2
mulss %xmm1, %xmm2
addss %xmm0, %xmm2
movss %xmm2, (%rax,%rdx,4)
incq %rdx
jmp 0x37e12f
xorl %eax, %eax
popq %rbx
popq %r12
popq %r14
popq %r15
retq
nop
| /csukuangfj[P]ncnn/src/layer/layernorm.cpp |
virtual thunk to ncnn::LayerNorm_x86::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int LayerNorm_x86::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int dims = bottom_top_blob.dims;
int elempack = bottom_top_blob.elempack;
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
const float* gamma = gamma_data;
const float* beta = beta_data;
if (dims == 1)
{
int elemcount = w * elempack;
float* ptr = bottom_top_blob;
// 1D layer norm is special. Treat them as unpacked.
fast_1d_layer_norm(ptr, 1, elemcount, elemcount, gamma, beta, affine, eps);
}
if (dims == 2)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; ++i)
{
float* ptr = bottom_top_blob.row(i);
fast_1d_layer_norm(ptr, elempack, w, w * elempack, gamma, beta, affine, eps);
}
}
if (dims == 3)
{
if (affine_size == w)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; ++q)
{
for (int i = 0; i < h; ++i)
{
float* ptr = bottom_top_blob.channel(q).row(i);
fast_1d_layer_norm(ptr, elempack, w, w * elempack, gamma, beta, affine, eps);
}
}
}
else // if (affine_size == w * h)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; ++q)
{
float* ptr = bottom_top_blob.channel(q);
fast_1d_layer_norm(ptr, elempack, w * h, w * h * elempack, gamma, beta, affine, eps);
}
}
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x58(%rax), %rdi
callq 0x37e1b8
xorl %eax, %eax
popq %rcx
retq
nopl (%rax)
| /csukuangfj[P]ncnn/src/layer/x86/layernorm_x86.cpp |
ncnn::LayerNorm_x86_avx512::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int LayerNorm_x86_avx512::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int dims = bottom_top_blob.dims;
int elempack = bottom_top_blob.elempack;
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
const float* gamma = gamma_data;
const float* beta = beta_data;
if (dims == 1)
{
int elemcount = w * elempack;
float* ptr = bottom_top_blob;
// 1D layer norm is special. Treat them as unpacked.
fast_1d_layer_norm(ptr, 1, elemcount, elemcount, gamma, beta, affine, eps);
}
if (dims == 2)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; ++i)
{
float* ptr = bottom_top_blob.row(i);
fast_1d_layer_norm(ptr, elempack, w, w * elempack, gamma, beta, affine, eps);
}
}
if (dims == 3)
{
if (affine_size == w)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; ++q)
{
for (int i = 0; i < h; ++i)
{
float* ptr = bottom_top_blob.channel(q).row(i);
fast_1d_layer_norm(ptr, elempack, w, w * elempack, gamma, beta, affine, eps);
}
}
}
else // if (affine_size == w * h)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; ++q)
{
float* ptr = bottom_top_blob.channel(q);
fast_1d_layer_norm(ptr, elempack, w * h, w * h * elempack, gamma, beta, affine, eps);
}
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %rdi, %r8
movl 0x18(%rsi), %eax
movl %eax, -0x34(%rsp)
movl 0x28(%rsi), %eax
movl 0x2c(%rsi), %r9d
movq (%rdi), %rcx
movq %rcx, -0x18(%rsp)
movq -0x18(%rcx), %rcx
movq 0xe0(%rdi,%rcx), %rdi
movq %rdi, -0x28(%rsp)
movq %r8, -0x20(%rsp)
movq 0x128(%r8,%rcx), %rdi
movq %rdi, -0x30(%rsp)
cmpl $0x1, %eax
je 0x380366
movl 0x30(%rsi), %r8d
cmpl $0x3, %eax
movq %rsi, -0x10(%rsp)
je 0x37f941
cmpl $0x2, %eax
movl -0x34(%rsp), %edx
jne 0x3810c1
movl $0x10, %esi
vcvtsi2ss %r9d, %xmm0, %xmm0
imull %edx, %r9d
vmovss 0x6fd04(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
xorl %r14d, %r14d
testl %r8d, %r8d
cmovlel %r14d, %r8d
vbroadcastss %xmm0, %zmm1
vmovss 0x76822(%rip), %xmm24 # 0x3f57c4
vbroadcastss 0x72209(%rip), %xmm3 # 0x3f11b4
vmovss 0x7296f(%rip), %xmm25 # 0x3f1924
vmovaps 0x7c481(%rip), %zmm5 # 0x3fb440
vbroadcastss 0x767fc(%rip), %xmm6 # 0x3f57c4
vbroadcastss 0x72953(%rip), %xmm7 # 0x3f1924
vbroadcastss 0x767ea(%rip), %ymm8 # 0x3f57c4
vbroadcastss 0x72941(%rip), %ymm9 # 0x3f1924
vbroadcastss 0x721c8(%rip), %ymm10 # 0x3f11b4
vbroadcastss 0x767ce(%rip), %zmm11 # 0x3f57c4
vbroadcastss 0x72924(%rip), %zmm12 # 0x3f1924
movb $-0x40, %bpl
vbroadcastss 0x721a7(%rip), %zmm13 # 0x3f11b4
vxorps %xmm14, %xmm14, %xmm14
cmpq %r8, %r14
je 0x3810c1
movq -0x10(%rsp), %rax
movslq 0x2c(%rax), %r15
imulq %r14, %r15
imulq 0x10(%rax), %r15
addq (%rax), %r15
movq -0x18(%rsp), %rax
movq -0x18(%rax), %rax
movq -0x20(%rsp), %rcx
movl 0xd8(%rcx,%rax), %r13d
vmovss 0xd4(%rcx,%rax), %xmm17
xorl %ecx, %ecx
vxorps %xmm15, %xmm15, %xmm15
movq %r15, %rax
leal 0x10(%rcx), %edi
cmpl %r9d, %edi
jg 0x37f06e
vaddps (%rax), %zmm15, %zmm15
addq $0x40, %rax
movl %edi, %ecx
jmp 0x37f058
vxorps %xmm16, %xmm16, %xmm16
leal 0x8(%rcx), %edi
cmpl %r9d, %edi
jg 0x37f08a
vaddps (%rax), %ymm16, %ymm16
addq $0x20, %rax
movl %edi, %ecx
jmp 0x37f074
vxorps %xmm18, %xmm18, %xmm18
leal 0x4(%rcx), %edi
cmpl %r9d, %edi
jg 0x37f0a6
vaddps (%rax), %xmm18, %xmm18
addq $0x10, %rax
movl %edi, %ecx
jmp 0x37f090
vxorps %xmm19, %xmm19, %xmm19
cmpl %r9d, %ecx
jge 0x37f0bf
vaddss (%rax), %xmm19, %xmm19
incl %ecx
addq $0x4, %rax
jmp 0x37f0ac
cmpl $0x10, %edx
je 0x37f1e4
vextractf64x4 $0x1, %zmm15, %ymm20
cmpl $0x4, %edx
je 0x37f179
cmpl $0x8, %edx
je 0x37f1b6
cmpl $0x1, %edx
jne 0x37f20e
vaddps %ymm20, %ymm15, %ymm15
vextractf32x4 $0x1, %ymm15, %xmm20
vaddps %xmm20, %xmm15, %xmm15
vshufpd $0x1, %xmm15, %xmm15, %xmm20 # xmm20 = xmm15[1,0]
vaddps %xmm15, %xmm20, %xmm15
vextractf32x4 $0x1, %ymm16, %xmm20
vaddps %xmm16, %xmm20, %xmm16
vshufps $0x11, %xmm16, %xmm18, %xmm20 # xmm20 = xmm18[1,0],xmm16[1,0]
vshufps $0xbb, %xmm16, %xmm18, %xmm16 # xmm16 = xmm18[3,2],xmm16[3,2]
vaddps %xmm20, %xmm16, %xmm16
vshufpd $0x1, %xmm16, %xmm16, %xmm18 # xmm18 = xmm16[1,0]
vaddps %xmm18, %xmm16, %xmm2
vhaddps %xmm2, %xmm2, %xmm2
vmovshdup %xmm15, %xmm16 # xmm16 = xmm15[1,1,3,3]
vaddss %xmm19, %xmm16, %xmm16
vaddss %xmm16, %xmm15, %xmm15
vaddss %xmm2, %xmm15, %xmm2
vmulss %xmm0, %xmm2, %xmm18
vbroadcastss %xmm18, %ymm16
vmovss %xmm16, %xmm14, %xmm2 # xmm2 = xmm16[0],xmm14[1,2,3]
vmovaps %ymm2, %ymm15
movb $0x1, %r12b
xorl %r11d, %r11d
vmovaps %xmm16, %xmm21
jmp 0x37f22b
vaddps %ymm16, %ymm15, %ymm2
vaddps %ymm20, %ymm2, %ymm2
vextractf128 $0x1, %ymm2, %xmm15
vaddps %xmm18, %xmm2, %xmm2
vaddps %xmm2, %xmm15, %xmm2
vmulps %xmm1, %xmm2, %xmm18
vinsertf32x4 $0x1, %xmm18, %ymm18, %ymm16
movb $0x1, %r11b
xorl %r12d, %r12d
vmovaps %zmm18, %zmm15
vmovaps %xmm18, %xmm21
jmp 0x37f22b
vaddps %ymm16, %ymm15, %ymm2
vaddps %ymm20, %ymm2, %ymm2
vmulps %ymm1, %ymm2, %ymm16
vbroadcastss %xmm16, %xmm21
movb $0x1, %bl
xorl %r11d, %r11d
vmovaps %xmm16, %xmm18
vmovaps %zmm16, %zmm15
xorl %r12d, %r12d
jmp 0x37f22d
vmulps %zmm1, %zmm15, %zmm18
vbroadcastss %xmm18, %ymm16
movb $0x1, %al
xorl %r11d, %r11d
vmovaps %zmm18, %zmm15
xorl %r12d, %r12d
vmovaps %xmm16, %xmm21
xorl %ebx, %ebx
vmovaps %zmm18, %zmm20
jmp 0x37f236
vxorps %xmm21, %xmm21, %xmm21
vxorps %xmm15, %xmm15, %xmm15
vxorps %xmm18, %xmm18, %xmm18
vxorps %xmm16, %xmm16, %xmm16
xorl %r11d, %r11d
xorl %r12d, %r12d
xorl %ebx, %ebx
vinsertf64x4 $0x1, %ymm16, %zmm16, %zmm20
xorl %eax, %eax
xorl %edi, %edi
vxorps %xmm19, %xmm19, %xmm19
movq %r15, %rcx
leal 0x10(%rdi), %r10d
cmpl %r9d, %r10d
jg 0x37f265
vmovups (%rcx), %zmm2
vsubps %zmm20, %zmm2, %zmm2
vfmadd231ps %zmm2, %zmm2, %zmm19 # zmm19 = (zmm2 * zmm2) + zmm19
addq $0x40, %rcx
movl %r10d, %edi
jmp 0x37f241
vxorps %xmm20, %xmm20, %xmm20
leal 0x8(%rdi), %r10d
cmpl %r9d, %r10d
jg 0x37f28d
vmovups (%rcx), %ymm2
vsubps %ymm16, %ymm2, %ymm2
vfmadd231ps %ymm2, %ymm2, %ymm20 # ymm20 = (ymm2 * ymm2) + ymm20
addq $0x20, %rcx
movl %r10d, %edi
jmp 0x37f26b
vxorps %xmm22, %xmm22, %xmm22
leal 0x4(%rdi), %r10d
cmpl %r9d, %r10d
jg 0x37f2b5
vmovups (%rcx), %xmm2
vsubps %xmm21, %xmm2, %xmm2
vfmadd231ps %xmm2, %xmm2, %xmm22 # xmm22 = (xmm2 * xmm2) + xmm22
addq $0x10, %rcx
movl %r10d, %edi
jmp 0x37f293
vxorps %xmm21, %xmm21, %xmm21
cmpl %r9d, %edi
jge 0x37f2d8
vmovss (%rcx), %xmm2
vsubss %xmm18, %xmm2, %xmm2
vfmadd231ss %xmm2, %xmm2, %xmm21 # xmm21 = (xmm2 * xmm2) + xmm21
incl %edi
addq $0x4, %rcx
jmp 0x37f2bb
testb %al, %al
jne 0x37f2e4
vxorps %xmm16, %xmm16, %xmm16
jmp 0x37f2ea
vmulps %zmm1, %zmm19, %zmm16
vextractf64x4 $0x1, %zmm19, %ymm23
testb %bl, %bl
je 0x37f30e
vaddps %ymm20, %ymm19, %ymm2
vaddps %ymm23, %ymm2, %ymm20
vmulps %ymm1, %ymm20, %ymm2
vinsertf64x4 $0x0, %ymm2, %zmm16, %zmm16
testb %r11b, %r11b
je 0x37f33f
vaddps %ymm20, %ymm19, %ymm2
vaddps %ymm23, %ymm2, %ymm20
vextractf32x4 $0x1, %ymm20, %xmm2
vaddps %xmm22, %xmm20, %xmm22
vaddps %xmm2, %xmm22, %xmm22
vmulps %xmm1, %xmm22, %xmm2
vinsertf32x4 $0x0, %xmm2, %zmm16, %zmm16
testb %r12b, %r12b
je 0x37f3bd
vaddps %ymm23, %ymm19, %ymm2
vextractf32x4 $0x1, %ymm2, %xmm19
vaddps %xmm19, %xmm2, %xmm2
vshufpd $0x1, %xmm2, %xmm2, %xmm19 # xmm19 = xmm2[1,0]
vextractf32x4 $0x1, %ymm20, %xmm23
vaddps %xmm20, %xmm23, %xmm20
vshufps $0x11, %xmm22, %xmm20, %xmm23 # xmm23 = xmm20[1,0],xmm22[1,0]
vshufps $0xbb, %xmm22, %xmm20, %xmm20 # xmm20 = xmm20[3,2],xmm22[3,2]
vaddps %xmm23, %xmm20, %xmm20
vshufpd $0x1, %xmm20, %xmm20, %xmm22 # xmm22 = xmm20[1,0]
vaddps %xmm22, %xmm20, %xmm4
vhaddps %xmm4, %xmm4, %xmm4
vaddps %xmm2, %xmm19, %xmm2
vmovshdup %xmm2, %xmm19 # xmm19 = xmm2[1,1,3,3]
vaddss %xmm21, %xmm19, %xmm19
vaddss %xmm19, %xmm2, %xmm2
vaddss %xmm4, %xmm2, %xmm2
vmulss %xmm0, %xmm2, %xmm2
vmovss %xmm2, %xmm16, %xmm2 # xmm2 = xmm2[0],xmm16[1,2,3]
vinsertf32x4 $0x0, %xmm2, %zmm16, %zmm16
cmpl $0x1, %edx
je 0x37f487
cmpl $0x4, %edx
je 0x37f414
cmpl $0x8, %edx
je 0x37f44d
cmpl $0x10, %edx
jne 0x37f4c7
vbroadcastss %xmm17, %zmm2
vaddps %zmm2, %zmm16, %zmm2
vrsqrt14ps %zmm2, %zmm4
vmulps %zmm4, %zmm2, %zmm2
vfmadd213ps %zmm11, %zmm4, %zmm2 # zmm2 = (zmm4 * zmm2) + zmm11
vmulps %zmm12, %zmm4, %zmm4
vmulps %zmm2, %zmm4, %zmm16
vxorps %zmm13, %zmm15, %zmm2
vmulps %zmm2, %zmm16, %zmm15
jmp 0x37f4c7
vbroadcastss %xmm17, %xmm2
vaddps %xmm2, %xmm16, %xmm2
vrsqrtps %xmm2, %xmm4
vmulps %xmm4, %xmm2, %xmm2
vfmadd213ps %xmm6, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) + xmm6
vmulps %xmm7, %xmm4, %xmm4
vmulps %xmm2, %xmm4, %xmm2
vxorps %xmm3, %xmm15, %xmm4
vmulps %xmm4, %xmm2, %xmm4
vinsertf32x4 $0x0, %xmm2, %zmm16, %zmm16
vinsertf32x4 $0x0, %xmm4, %zmm15, %zmm15
jmp 0x37f4c7
vbroadcastss %xmm17, %ymm2
vaddps %ymm2, %ymm16, %ymm2
vrsqrtps %ymm2, %ymm4
vmulps %ymm4, %ymm2, %ymm2
vfmadd213ps %ymm8, %ymm4, %ymm2 # ymm2 = (ymm4 * ymm2) + ymm8
vmulps %ymm4, %ymm9, %ymm4
vmulps %ymm2, %ymm4, %ymm2
vxorps %ymm10, %ymm15, %ymm4
vmulps %ymm4, %ymm2, %ymm4
vinsertf64x4 $0x0, %ymm2, %zmm16, %zmm16
vinsertf64x4 $0x0, %ymm4, %zmm15, %zmm15
jmp 0x37f4c7
vaddss %xmm17, %xmm16, %xmm2
vrsqrtss %xmm2, %xmm2, %xmm4
vmulss %xmm4, %xmm2, %xmm2
vfmadd213ss %xmm24, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) + xmm24
vmulss %xmm25, %xmm4, %xmm4
vmulss %xmm2, %xmm4, %xmm2
vmovss %xmm2, %xmm16, %xmm4 # xmm4 = xmm2[0],xmm16[1,2,3]
vinsertf32x4 $0x0, %xmm4, %zmm16, %zmm16
vxorps %xmm3, %xmm18, %xmm4
vmulss %xmm4, %xmm2, %xmm2
vmovss %xmm2, %xmm15, %xmm2 # xmm2 = xmm2[0],xmm15[1,2,3]
vinsertf32x4 $0x0, %xmm2, %zmm15, %zmm15
testl %r13d, %r13d
je 0x37f511
testb %al, %al
je 0x37f531
movl %esi, %ecx
movq -0x30(%rsp), %r13
movq -0x28(%rsp), %rax
cmpl %r9d, %ecx
jg 0x37f53b
vbroadcastss (%rax), %zmm2
vmovups (%r15), %zmm4
vfmadd132ps %zmm16, %zmm15, %zmm4 # zmm4 = (zmm4 * zmm16) + zmm15
vfmadd213ps (%r13){1to16}, %zmm2, %zmm4 # zmm4 = (zmm2 * zmm4) + mem
vmovups %zmm4, (%r15)
addq $0x40, %r15
addq $0x4, %rax
addq $0x4, %r13
addl $0x10, %ecx
jmp 0x37f4dc
vmovaps %xmm16, %xmm17
testb %r11b, %r11b
jne 0x37f876
vbroadcastss %xmm17, %xmm18
vbroadcastss %xmm15, %xmm19
jmp 0x37f882
movq -0x30(%rsp), %r13
movq -0x28(%rsp), %rax
testb %bl, %bl
je 0x37f5ee
vinsertf64x4 $0x1, %ymm16, %zmm16, %zmm17
vinsertf64x4 $0x1, %ymm15, %zmm15, %zmm18
xorl %ebx, %ebx
movl $0x8, %edi
xorl %r10d, %r10d
movq %r15, %rcx
addl $0x10, %r10d
cmpl %r9d, %r10d
jg 0x37f5b5
vmovss (%rax,%rbx), %xmm2
vmovss 0x4(%rax,%rbx), %xmm4
vmovss (%r13,%rbx), %xmm19
vmovss 0x4(%r13,%rbx), %xmm20
vpermt2ps %zmm4, %zmm5, %zmm2
vpermt2ps %zmm20, %zmm5, %zmm19
vmovups (%r15,%rbx,8), %zmm4
vfmadd132ps %zmm17, %zmm18, %zmm4 # zmm4 = (zmm4 * zmm17) + zmm18
vfmadd213ps %zmm19, %zmm2, %zmm4 # zmm4 = (zmm2 * zmm4) + zmm19
vmovups %zmm4, (%r15,%rbx,8)
addq $0x40, %rcx
addq $0x8, %rbx
addl $0x10, %edi
jmp 0x37f55e
addq %rbx, %r13
addq %rbx, %rax
cmpl %r9d, %edi
jg 0x37f5eb
vbroadcastss (%rax), %ymm2
vmovups (%rcx), %ymm4
vfmadd132ps %ymm16, %ymm15, %ymm4 # ymm4 = (ymm4 * ymm16) + ymm15
vfmadd213ps (%r13){1to8}, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm4) + mem
vmovups %ymm4, (%rcx)
addq $0x20, %rcx
addq $0x4, %rax
addq $0x4, %r13
addl $0x8, %edi
jmp 0x37f5bb
movq %rcx, %r15
testb %r11b, %r11b
je 0x37f76c
vpermpd $0x44, %ymm16, %ymm17 # ymm17 = ymm16[0,1,0,1]
vpermpd $0x44, %ymm15, %ymm18 # ymm18 = ymm15[0,1,0,1]
vinsertf64x4 $0x1, %ymm16, %zmm16, %zmm2
vpermpd $0x44, %zmm2, %zmm19 # zmm19 = zmm2[0,1,0,1,4,5,4,5]
vinsertf64x4 $0x1, %ymm15, %zmm15, %zmm2
vpermpd $0x44, %zmm2, %zmm20 # zmm20 = zmm2[0,1,0,1,4,5,4,5]
xorl %r11d, %r11d
movq %r15, %rcx
leal 0x10(%r11), %edi
cmpl %r9d, %edi
jg 0x37f728
vbroadcastss 0x8(%rax,%r11), %zmm2
vbroadcastss 0xc(%rax,%r11), %zmm4
vmovss (%rax,%r11), %xmm21
vmovss 0x4(%rax,%r11), %xmm22
vshufps $0x0, %xmm22, %xmm21, %xmm21 # xmm21 = xmm21[0,0],xmm22[0,0]
vpermpd $0x50, %ymm21, %ymm21 # ymm21 = ymm21[0,0,1,1]
vinsertf64x4 $0x0, %ymm21, %zmm2, %zmm2
kmovd %ebp, %k1
vmovapd %zmm4, %zmm2 {%k1}
vbroadcastss 0x8(%r13,%r11), %zmm4
vbroadcastss 0xc(%r13,%r11), %zmm21
vmovss (%r13,%r11), %xmm22
vmovss 0x4(%r13,%r11), %xmm23
vshufps $0x0, %xmm23, %xmm22, %xmm22 # xmm22 = xmm22[0,0],xmm23[0,0]
vpermpd $0x50, %ymm22, %ymm22 # ymm22 = ymm22[0,0,1,1]
vinsertf64x4 $0x0, %ymm22, %zmm4, %zmm4
vmovapd %zmm21, %zmm4 {%k1}
vmovups (%rcx), %zmm21
vfmadd132ps %zmm19, %zmm20, %zmm21 # zmm21 = (zmm21 * zmm19) + zmm20
vfmadd213ps %zmm4, %zmm2, %zmm21 # zmm21 = (zmm2 * zmm21) + zmm4
vmovups %zmm21, (%rcx)
addq $0x40, %rcx
addq $0x10, %r11
jmp 0x37f627
vmovss (%rax,%r11), %xmm2
vmovss 0x4(%rax,%r11), %xmm4
vshufps $0x0, %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0,0],xmm4[0,0]
vmovss (%r13,%r11), %xmm4
vmovss 0x4(%r13,%r11), %xmm19
vshufps $0x0, %xmm19, %xmm4, %xmm4 # xmm4 = xmm4[0,0],xmm19[0,0]
vpermpd $0x50, %ymm2, %ymm2 # ymm2 = ymm2[0,0,1,1]
vpermpd $0x50, %ymm4, %ymm4 # ymm4 = ymm4[0,0,1,1]
vmovups (%r15,%r11,4), %ymm19
vfmadd132ps %ymm17, %ymm18, %ymm19 # ymm19 = (ymm19 * ymm17) + ymm18
vfmadd213ps %ymm4, %ymm2, %ymm19 # ymm19 = (ymm2 * ymm19) + ymm4
vmovups %ymm19, (%r15,%r11,4)
addq $0x20, %rcx
addq $0x8, %r11
leal 0x8(%r11), %edi
cmpl %r9d, %edi
jle 0x37f6d2
jmp 0x37f75b
vbroadcastss (%rax,%r11), %xmm2
vmovups (%r15,%r11,4), %xmm4
vfmadd132ps %xmm16, %xmm15, %xmm4 # xmm4 = (xmm4 * xmm16) + xmm15
vfmadd213ps (%r13,%r11){1to4}, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm4) + mem
vmovups %xmm4, (%r15,%r11,4)
addq $0x10, %rcx
addq $0x4, %r11
leal 0x4(%r11), %edi
cmpl %r9d, %edi
jle 0x37f733
addq %r11, %r13
addq %r11, %rax
jmp 0x37f76f
movq %r15, %rcx
testb %r12b, %r12b
je 0x37f939
vbroadcastss %xmm16, %zmm17
vbroadcastss %xmm15, %zmm18
xorl %r11d, %r11d
xorl %edi, %edi
movq %r13, %r10
movq %rax, %rbx
movq %rcx, %r15
leal 0x10(%rdi), %r12d
cmpl %r9d, %r12d
jg 0x37f7ce
vmovups (%rbx), %zmm2
vmovups (%r15), %zmm4
vfmadd132ps %zmm17, %zmm18, %zmm4 # zmm4 = (zmm4 * zmm17) + zmm18
vfmadd213ps (%r10), %zmm2, %zmm4 # zmm4 = (zmm2 * zmm4) + mem
vmovups %zmm4, (%r15)
addq $0x40, %r15
addq $0x40, %rbx
addq $0x40, %r10
addq $0x40, %r11
movl %r12d, %edi
jmp 0x37f792
vbroadcastss %xmm15, %ymm18
leal 0x8(%rdi), %r10d
cmpl %r9d, %r10d
jg 0x37f805
vmovups (%rax,%r11), %ymm2
vmovups (%rcx,%r11), %ymm4
vfmadd132ps %ymm17, %ymm18, %ymm4 # ymm4 = (ymm4 * ymm17) + ymm18
vfmadd213ps (%r13,%r11), %ymm2, %ymm4 # ymm4 = (ymm2 * ymm4) + mem
vmovups %ymm4, (%rcx,%r11)
addq $0x20, %r11
movl %r10d, %edi
jmp 0x37f7d4
vbroadcastss %xmm15, %xmm18
leal 0x4(%rdi), %r10d
cmpl %r9d, %r10d
jg 0x37f83c
vmovups (%rax,%r11), %xmm2
vmovups (%rcx,%r11), %xmm4
vfmadd132ps %xmm17, %xmm18, %xmm4 # xmm4 = (xmm4 * xmm17) + xmm18
vfmadd213ps (%r13,%r11), %xmm2, %xmm4 # xmm4 = (xmm2 * xmm4) + mem
vmovups %xmm4, (%rcx,%r11)
addq $0x10, %r11
movl %r10d, %edi
jmp 0x37f80b
addq %r11, %r13
addq %r11, %rax
addq %r11, %rcx
cmpl %r9d, %edi
jge 0x37f939
vmovss (%rcx), %xmm2
vfmadd132ss %xmm16, %xmm15, %xmm2 # xmm2 = (xmm2 * xmm16) + xmm15
vmovss (%rax), %xmm4
vfmadd213ss (%r13), %xmm2, %xmm4 # xmm4 = (xmm2 * xmm4) + mem
vmovss %xmm4, (%rcx)
incl %edi
addq $0x4, %rcx
addq $0x4, %rax
addq $0x4, %r13
jmp 0x37f845
vmovaps %xmm17, %xmm18
vmovaps %xmm15, %xmm19
testb %bl, %bl
jne 0x37f896
vinsertf32x4 $0x1, %xmm18, %ymm18, %ymm20
vinsertf32x4 $0x1, %xmm19, %ymm19, %ymm21
jmp 0x37f8a2
vmovaps %ymm16, %ymm20
vmovaps %ymm15, %ymm21
vmovaps %zmm15, %zmm22
testb %al, %al
jne 0x37f8ba
vinsertf64x4 $0x1, %ymm20, %zmm20, %zmm16
vinsertf64x4 $0x1, %ymm21, %zmm21, %zmm22
xorl %eax, %eax
leal 0x10(%rax), %ecx
cmpl %r9d, %ecx
jg 0x37f8f4
vmovups (%r15), %zmm2
vfmadd132ps %zmm16, %zmm22, %zmm2 # zmm2 = (zmm2 * zmm16) + zmm22
vmovups %zmm2, (%r15)
addq $0x40, %r15
movl %ecx, %eax
jmp 0x37f8bc
vmovups (%r15), %ymm2
vfmadd132ps %ymm20, %ymm21, %ymm2 # ymm2 = (ymm2 * ymm20) + ymm21
vmovups %ymm2, (%r15)
addq $0x20, %r15
movl %ecx, %eax
leal 0x8(%rax), %ecx
cmpl %r9d, %ecx
jle 0x37f8de
jmp 0x37f914
vmovups (%r15), %xmm2
vfmadd132ps %xmm18, %xmm19, %xmm2 # xmm2 = (xmm2 * xmm18) + xmm19
vmovups %xmm2, (%r15)
addq $0x10, %r15
movl %ecx, %eax
leal 0x4(%rax), %ecx
cmpl %r9d, %ecx
jle 0x37f8fe
jmp 0x37f934
vmovss (%r15), %xmm2
vfmadd132ss %xmm17, %xmm15, %xmm2 # xmm2 = (xmm2 * xmm17) + xmm15
vmovss %xmm2, (%r15)
incl %eax
addq $0x4, %r15
cmpl %r9d, %eax
jl 0x37f91e
incq %r14
jmp 0x37f012
movl 0x38(%rsi), %eax
xorl %edi, %edi
testl %eax, %eax
cmovlel %edi, %eax
movq %rax, -0x8(%rsp)
movq -0x20(%rsp), %rax
cmpl %r9d, 0xd0(%rax,%rcx)
jne 0x380669
vcvtsi2ss %r9d, %xmm0, %xmm0
imull -0x34(%rsp), %r9d
vmovss 0x6f312(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vbroadcastss %xmm0, %zmm1
testl %r8d, %r8d
cmovlel %edi, %r8d
vmovss 0x75e33(%rip), %xmm24 # 0x3f57c4
vmovss 0x71f89(%rip), %xmm25 # 0x3f1924
vbroadcastss 0x71810(%rip), %xmm4 # 0x3f11b4
vmovaps 0x7ba92(%rip), %zmm5 # 0x3fb440
vbroadcastss 0x75e0d(%rip), %xmm6 # 0x3f57c4
vbroadcastss 0x71f64(%rip), %xmm7 # 0x3f1924
vbroadcastss 0x75dfb(%rip), %ymm8 # 0x3f57c4
vbroadcastss 0x71f52(%rip), %ymm9 # 0x3f1924
vbroadcastss 0x717d9(%rip), %ymm10 # 0x3f11b4
vbroadcastss 0x75ddf(%rip), %zmm11 # 0x3f57c4
vbroadcastss 0x71f35(%rip), %zmm12 # 0x3f1924
movb $-0x40, %r14b
vbroadcastss 0x717b8(%rip), %zmm13 # 0x3f11b4
vxorps %xmm14, %xmm14, %xmm14
cmpq -0x8(%rsp), %rdi
je 0x3810c1
movq -0x20(%rsp), %rax
movq (%rax), %rax
movq %rax, -0x18(%rsp)
xorl %r12d, %r12d
cmpq %r8, %r12
je 0x38035e
movq -0x10(%rsp), %rsi
movslq 0x2c(%rsi), %r15
movq 0x40(%rsi), %rax
movq %rdi, %rdx
imulq %rdi, %rax
movq 0x10(%rsi), %rcx
imulq %rcx, %rax
addq (%rsi), %rax
imulq %r12, %r15
imulq %rcx, %r15
addq %rax, %r15
movq -0x18(%rsp), %rax
movq -0x18(%rax), %rax
movq -0x20(%rsp), %rcx
movl 0xd8(%rcx,%rax), %r10d
vmovss 0xd4(%rcx,%rax), %xmm17
xorl %ecx, %ecx
vxorps %xmm15, %xmm15, %xmm15
movq %r15, %rax
leal 0x10(%rcx), %edi
cmpl %r9d, %edi
jg 0x37fa8d
vaddps (%rax), %zmm15, %zmm15
addq $0x40, %rax
movl %edi, %ecx
jmp 0x37fa77
vxorps %xmm16, %xmm16, %xmm16
movl -0x34(%rsp), %esi
leal 0x8(%rcx), %edi
cmpl %r9d, %edi
jg 0x37faad
vaddps (%rax), %ymm16, %ymm16
addq $0x20, %rax
movl %edi, %ecx
jmp 0x37fa97
vxorps %xmm18, %xmm18, %xmm18
leal 0x4(%rcx), %edi
cmpl %r9d, %edi
jg 0x37fac9
vaddps (%rax), %xmm18, %xmm18
addq $0x10, %rax
movl %edi, %ecx
jmp 0x37fab3
vxorps %xmm19, %xmm19, %xmm19
cmpl %r9d, %ecx
jge 0x37fae2
vaddss (%rax), %xmm19, %xmm19
incl %ecx
addq $0x4, %rax
jmp 0x37facf
cmpl $0x10, %esi
je 0x37fc05
vextractf64x4 $0x1, %zmm15, %ymm20
cmpl $0x4, %esi
je 0x37fb9b
cmpl $0x8, %esi
je 0x37fbd8
cmpl $0x1, %esi
jne 0x37fc2e
vaddps %ymm20, %ymm15, %ymm15
vextractf32x4 $0x1, %ymm15, %xmm20
vaddps %xmm20, %xmm15, %xmm15
vshufpd $0x1, %xmm15, %xmm15, %xmm20 # xmm20 = xmm15[1,0]
vaddps %xmm15, %xmm20, %xmm15
vextractf32x4 $0x1, %ymm16, %xmm20
vaddps %xmm16, %xmm20, %xmm16
vshufps $0x11, %xmm16, %xmm18, %xmm20 # xmm20 = xmm18[1,0],xmm16[1,0]
vshufps $0xbb, %xmm16, %xmm18, %xmm16 # xmm16 = xmm18[3,2],xmm16[3,2]
vaddps %xmm20, %xmm16, %xmm16
vshufpd $0x1, %xmm16, %xmm16, %xmm18 # xmm18 = xmm16[1,0]
vaddps %xmm18, %xmm16, %xmm2
vhaddps %xmm2, %xmm2, %xmm2
vmovshdup %xmm15, %xmm16 # xmm16 = xmm15[1,1,3,3]
vaddss %xmm19, %xmm16, %xmm16
vaddss %xmm16, %xmm15, %xmm15
vaddss %xmm2, %xmm15, %xmm2
vmulss %xmm0, %xmm2, %xmm18
vbroadcastss %xmm18, %ymm16
vmovss %xmm16, %xmm14, %xmm2 # xmm2 = xmm16[0],xmm14[1,2,3]
vmovaps %ymm2, %ymm15
movb $0x1, %r11b
xorl %ebp, %ebp
vmovaps %xmm16, %xmm21
jmp 0x37fc4a
vaddps %ymm16, %ymm15, %ymm2
vaddps %ymm20, %ymm2, %ymm2
vextractf128 $0x1, %ymm2, %xmm15
vaddps %xmm18, %xmm2, %xmm2
vaddps %xmm2, %xmm15, %xmm2
vmulps %xmm1, %xmm2, %xmm18
vinsertf32x4 $0x1, %xmm18, %ymm18, %ymm16
movb $0x1, %bpl
xorl %r11d, %r11d
vmovaps %zmm18, %zmm15
vmovaps %xmm18, %xmm21
jmp 0x37fc4a
vaddps %ymm16, %ymm15, %ymm2
vaddps %ymm20, %ymm2, %ymm2
vmulps %ymm1, %ymm2, %ymm16
vbroadcastss %xmm16, %xmm21
movb $0x1, %cl
xorl %ebp, %ebp
vmovaps %xmm16, %xmm18
vmovaps %zmm16, %zmm15
xorl %r11d, %r11d
jmp 0x37fc4c
vmulps %zmm1, %zmm15, %zmm18
vbroadcastss %xmm18, %ymm16
movb $0x1, %al
xorl %ebp, %ebp
vmovaps %zmm18, %zmm15
xorl %r11d, %r11d
vmovaps %xmm16, %xmm21
xorl %ecx, %ecx
vmovaps %zmm18, %zmm20
jmp 0x37fc55
vxorps %xmm21, %xmm21, %xmm21
vxorps %xmm15, %xmm15, %xmm15
vxorps %xmm18, %xmm18, %xmm18
vxorps %xmm16, %xmm16, %xmm16
xorl %ebp, %ebp
xorl %r11d, %r11d
xorl %ecx, %ecx
vinsertf64x4 $0x1, %ymm16, %zmm16, %zmm20
xorl %eax, %eax
xorl %edi, %edi
vxorps %xmm19, %xmm19, %xmm19
movq %r15, %r13
leal 0x10(%rdi), %ebx
cmpl %r9d, %ebx
jg 0x37fc83
vmovups (%r13), %zmm2
vsubps %zmm20, %zmm2, %zmm2
vfmadd231ps %zmm2, %zmm2, %zmm19 # zmm19 = (zmm2 * zmm2) + zmm19
addq $0x40, %r13
movl %ebx, %edi
jmp 0x37fc60
vxorps %xmm20, %xmm20, %xmm20
leal 0x8(%rdi), %ebx
cmpl %r9d, %ebx
jg 0x37fcab
vmovups (%r13), %ymm2
vsubps %ymm16, %ymm2, %ymm2
vfmadd231ps %ymm2, %ymm2, %ymm20 # ymm20 = (ymm2 * ymm2) + ymm20
addq $0x20, %r13
movl %ebx, %edi
jmp 0x37fc89
vxorps %xmm22, %xmm22, %xmm22
leal 0x4(%rdi), %ebx
cmpl %r9d, %ebx
jg 0x37fcd3
vmovups (%r13), %xmm2
vsubps %xmm21, %xmm2, %xmm2
vfmadd231ps %xmm2, %xmm2, %xmm22 # xmm22 = (xmm2 * xmm2) + xmm22
addq $0x10, %r13
movl %ebx, %edi
jmp 0x37fcb1
vxorps %xmm21, %xmm21, %xmm21
cmpl %r9d, %edi
jge 0x37fcf8
vmovss (%r13), %xmm2
vsubss %xmm18, %xmm2, %xmm2
vfmadd231ss %xmm2, %xmm2, %xmm21 # xmm21 = (xmm2 * xmm2) + xmm21
incl %edi
addq $0x4, %r13
jmp 0x37fcd9
testb %al, %al
jne 0x37fd04
vxorps %xmm16, %xmm16, %xmm16
jmp 0x37fd0a
vmulps %zmm1, %zmm19, %zmm16
vextractf64x4 $0x1, %zmm19, %ymm23
testb %cl, %cl
je 0x37fd2e
vaddps %ymm20, %ymm19, %ymm2
vaddps %ymm23, %ymm2, %ymm20
vmulps %ymm1, %ymm20, %ymm2
vinsertf64x4 $0x0, %ymm2, %zmm16, %zmm16
testb %bpl, %bpl
je 0x37fd5f
vaddps %ymm20, %ymm19, %ymm2
vaddps %ymm23, %ymm2, %ymm20
vextractf32x4 $0x1, %ymm20, %xmm2
vaddps %xmm22, %xmm20, %xmm22
vaddps %xmm2, %xmm22, %xmm22
vmulps %xmm1, %xmm22, %xmm2
vinsertf32x4 $0x0, %xmm2, %zmm16, %zmm16
testb %r11b, %r11b
je 0x37fddd
vaddps %ymm23, %ymm19, %ymm2
vextractf32x4 $0x1, %ymm2, %xmm19
vaddps %xmm19, %xmm2, %xmm2
vshufpd $0x1, %xmm2, %xmm2, %xmm19 # xmm19 = xmm2[1,0]
vextractf32x4 $0x1, %ymm20, %xmm23
vaddps %xmm20, %xmm23, %xmm20
vshufps $0x11, %xmm22, %xmm20, %xmm23 # xmm23 = xmm20[1,0],xmm22[1,0]
vshufps $0xbb, %xmm22, %xmm20, %xmm20 # xmm20 = xmm20[3,2],xmm22[3,2]
vaddps %xmm23, %xmm20, %xmm20
vshufpd $0x1, %xmm20, %xmm20, %xmm22 # xmm22 = xmm20[1,0]
vaddps %xmm22, %xmm20, %xmm3
vhaddps %xmm3, %xmm3, %xmm3
vaddps %xmm2, %xmm19, %xmm2
vmovshdup %xmm2, %xmm19 # xmm19 = xmm2[1,1,3,3]
vaddss %xmm21, %xmm19, %xmm19
vaddss %xmm19, %xmm2, %xmm2
vaddss %xmm3, %xmm2, %xmm2
vmulss %xmm0, %xmm2, %xmm2
vmovss %xmm2, %xmm16, %xmm2 # xmm2 = xmm2[0],xmm16[1,2,3]
vinsertf32x4 $0x0, %xmm2, %zmm16, %zmm16
cmpl $0x1, %esi
je 0x37fea7
cmpl $0x4, %esi
je 0x37fe34
cmpl $0x8, %esi
je 0x37fe6d
cmpl $0x10, %esi
jne 0x37fee7
vbroadcastss %xmm17, %zmm2
vaddps %zmm2, %zmm16, %zmm2
vrsqrt14ps %zmm2, %zmm3
vmulps %zmm3, %zmm2, %zmm2
vfmadd213ps %zmm11, %zmm3, %zmm2 # zmm2 = (zmm3 * zmm2) + zmm11
vmulps %zmm12, %zmm3, %zmm3
vmulps %zmm2, %zmm3, %zmm16
vxorps %zmm13, %zmm15, %zmm2
vmulps %zmm2, %zmm16, %zmm15
jmp 0x37fee7
vbroadcastss %xmm17, %xmm2
vaddps %xmm2, %xmm16, %xmm2
vrsqrtps %xmm2, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vfmadd213ps %xmm6, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + xmm6
vmulps %xmm7, %xmm3, %xmm3
vmulps %xmm2, %xmm3, %xmm2
vxorps %xmm4, %xmm15, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vinsertf32x4 $0x0, %xmm2, %zmm16, %zmm16
vinsertf32x4 $0x0, %xmm3, %zmm15, %zmm15
jmp 0x37fee7
vbroadcastss %xmm17, %ymm2
vaddps %ymm2, %ymm16, %ymm2
vrsqrtps %ymm2, %ymm3
vmulps %ymm3, %ymm2, %ymm2
vfmadd213ps %ymm8, %ymm3, %ymm2 # ymm2 = (ymm3 * ymm2) + ymm8
vmulps %ymm3, %ymm9, %ymm3
vmulps %ymm2, %ymm3, %ymm2
vxorps %ymm10, %ymm15, %ymm3
vmulps %ymm3, %ymm2, %ymm3
vinsertf64x4 $0x0, %ymm2, %zmm16, %zmm16
vinsertf64x4 $0x0, %ymm3, %zmm15, %zmm15
jmp 0x37fee7
vaddss %xmm17, %xmm16, %xmm2
vrsqrtss %xmm2, %xmm2, %xmm3
vmulss %xmm3, %xmm2, %xmm2
vfmadd213ss %xmm24, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + xmm24
vmulss %xmm25, %xmm3, %xmm3
vmulss %xmm2, %xmm3, %xmm2
vmovss %xmm2, %xmm16, %xmm3 # xmm3 = xmm2[0],xmm16[1,2,3]
vinsertf32x4 $0x0, %xmm3, %zmm16, %zmm16
vxorps %xmm4, %xmm18, %xmm3
vmulss %xmm3, %xmm2, %xmm2
vmovss %xmm2, %xmm15, %xmm2 # xmm2 = xmm2[0],xmm15[1,2,3]
vinsertf32x4 $0x0, %xmm2, %zmm15, %zmm15
testl %r10d, %r10d
je 0x37ff33
testb %al, %al
je 0x37ff53
movl $0x10, %edi
movq -0x30(%rsp), %r10
movq -0x28(%rsp), %rax
cmpl %r9d, %edi
jg 0x37ff5d
vbroadcastss (%rax), %zmm2
vmovups (%r15), %zmm3
vfmadd132ps %zmm16, %zmm15, %zmm3 # zmm3 = (zmm3 * zmm16) + zmm15
vfmadd213ps (%r10){1to16}, %zmm2, %zmm3 # zmm3 = (zmm2 * zmm3) + mem
vmovups %zmm3, (%r15)
addq $0x40, %r15
addq $0x4, %rax
addq $0x4, %r10
addl $0x10, %edi
jmp 0x37feff
vmovaps %xmm16, %xmm17
testb %bpl, %bpl
jne 0x380290
vbroadcastss %xmm17, %xmm18
vbroadcastss %xmm15, %xmm19
jmp 0x38029c
movq -0x30(%rsp), %r10
movq -0x28(%rsp), %rax
testb %cl, %cl
je 0x38000f
vinsertf64x4 $0x1, %ymm16, %zmm16, %zmm17
vinsertf64x4 $0x1, %ymm15, %zmm15, %zmm18
xorl %edi, %edi
movl $0x8, %r13d
xorl %ebx, %ebx
movq %r15, %rcx
addl $0x10, %ebx
cmpl %r9d, %ebx
jg 0x37ffd6
vmovss (%rax,%rdi), %xmm2
vmovss 0x4(%rax,%rdi), %xmm3
vmovss (%r10,%rdi), %xmm19
vmovss 0x4(%r10,%rdi), %xmm20
vpermt2ps %zmm3, %zmm5, %zmm2
vpermt2ps %zmm20, %zmm5, %zmm19
vmovups (%r15,%rdi,8), %zmm3
vfmadd132ps %zmm17, %zmm18, %zmm3 # zmm3 = (zmm3 * zmm17) + zmm18
vfmadd213ps %zmm19, %zmm2, %zmm3 # zmm3 = (zmm2 * zmm3) + zmm19
vmovups %zmm3, (%r15,%rdi,8)
addq $0x40, %rcx
addq $0x8, %rdi
addl $0x10, %r13d
jmp 0x37ff80
addq %rdi, %r10
addq %rdi, %rax
cmpl %r9d, %r13d
jg 0x38000c
vbroadcastss (%rax), %ymm2
vmovups (%rcx), %ymm3
vfmadd132ps %ymm16, %ymm15, %ymm3 # ymm3 = (ymm3 * ymm16) + ymm15
vfmadd213ps (%r10){1to8}, %ymm2, %ymm3 # ymm3 = (ymm2 * ymm3) + mem
vmovups %ymm3, (%rcx)
addq $0x20, %rcx
addq $0x4, %rax
addq $0x4, %r10
addl $0x8, %r13d
jmp 0x37ffdc
movq %rcx, %r15
testb %bpl, %bpl
je 0x380185
vpermpd $0x44, %ymm16, %ymm17 # ymm17 = ymm16[0,1,0,1]
vpermpd $0x44, %ymm15, %ymm18 # ymm18 = ymm15[0,1,0,1]
vinsertf64x4 $0x1, %ymm16, %zmm16, %zmm2
vpermpd $0x44, %zmm2, %zmm19 # zmm19 = zmm2[0,1,0,1,4,5,4,5]
vinsertf64x4 $0x1, %ymm15, %zmm15, %zmm2
vpermpd $0x44, %zmm2, %zmm20 # zmm20 = zmm2[0,1,0,1,4,5,4,5]
xorl %ebp, %ebp
movq %r15, %rcx
leal 0x10(%rbp), %edi
cmpl %r9d, %edi
jg 0x380144
vbroadcastss 0x8(%rax,%rbp), %zmm2
vbroadcastss 0xc(%rax,%rbp), %zmm3
vmovss (%rax,%rbp), %xmm21
vmovss 0x4(%rax,%rbp), %xmm22
vshufps $0x0, %xmm22, %xmm21, %xmm21 # xmm21 = xmm21[0,0],xmm22[0,0]
vpermpd $0x50, %ymm21, %ymm21 # ymm21 = ymm21[0,0,1,1]
vinsertf64x4 $0x0, %ymm21, %zmm2, %zmm2
kmovd %r14d, %k1
vmovapd %zmm3, %zmm2 {%k1}
vbroadcastss 0x8(%r10,%rbp), %zmm3
vbroadcastss 0xc(%r10,%rbp), %zmm21
vmovss (%r10,%rbp), %xmm22
vmovss 0x4(%r10,%rbp), %xmm23
vshufps $0x0, %xmm23, %xmm22, %xmm22 # xmm22 = xmm22[0,0],xmm23[0,0]
vpermpd $0x50, %ymm22, %ymm22 # ymm22 = ymm22[0,0,1,1]
vinsertf64x4 $0x0, %ymm22, %zmm3, %zmm3
vmovapd %zmm21, %zmm3 {%k1}
vmovups (%rcx), %zmm21
vfmadd132ps %zmm19, %zmm20, %zmm21 # zmm21 = (zmm21 * zmm19) + zmm20
vfmadd213ps %zmm3, %zmm2, %zmm21 # zmm21 = (zmm2 * zmm21) + zmm3
vmovups %zmm21, (%rcx)
addq $0x40, %rcx
addq $0x10, %rbp
jmp 0x380047
vmovss (%rax,%rbp), %xmm2
vmovss 0x4(%rax,%rbp), %xmm3
vshufps $0x0, %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0,0],xmm3[0,0]
vmovss (%r10,%rbp), %xmm3
vmovss 0x4(%r10,%rbp), %xmm19
vshufps $0x0, %xmm19, %xmm3, %xmm3 # xmm3 = xmm3[0,0],xmm19[0,0]
vpermpd $0x50, %ymm2, %ymm2 # ymm2 = ymm2[0,0,1,1]
vpermpd $0x50, %ymm3, %ymm3 # ymm3 = ymm3[0,0,1,1]
vmovups (%r15,%rbp,4), %ymm19
vfmadd132ps %ymm17, %ymm18, %ymm19 # ymm19 = (ymm19 * ymm17) + ymm18
vfmadd213ps %ymm3, %ymm2, %ymm19 # ymm19 = (ymm2 * ymm19) + ymm3
vmovups %ymm19, (%r15,%rbp,4)
addq $0x20, %rcx
addq $0x8, %rbp
leal 0x8(%rbp), %edi
cmpl %r9d, %edi
jle 0x3800f1
jmp 0x380175
vbroadcastss (%rax,%rbp), %xmm2
vmovups (%r15,%rbp,4), %xmm3
vfmadd132ps %xmm16, %xmm15, %xmm3 # xmm3 = (xmm3 * xmm16) + xmm15
vfmadd213ps (%r10,%rbp){1to4}, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + mem
vmovups %xmm3, (%r15,%rbp,4)
addq $0x10, %rcx
addq $0x4, %rbp
leal 0x4(%rbp), %edi
cmpl %r9d, %edi
jle 0x38014e
addq %rbp, %r10
addq %rbp, %rax
jmp 0x380188
movq %r15, %rcx
testb %r11b, %r11b
je 0x380353
vbroadcastss %xmm16, %zmm17
vbroadcastss %xmm15, %zmm18
xorl %r11d, %r11d
xorl %r15d, %r15d
movq %r10, %rdi
movq %rax, %rbx
movq %rcx, %r13
leal 0x10(%r15), %ebp
cmpl %r9d, %ebp
jg 0x3801ea
vmovups (%rbx), %zmm2
vmovups (%r13), %zmm3
vfmadd132ps %zmm17, %zmm18, %zmm3 # zmm3 = (zmm3 * zmm17) + zmm18
vfmadd213ps (%rdi), %zmm2, %zmm3 # zmm3 = (zmm2 * zmm3) + mem
vmovups %zmm3, (%r13)
addq $0x40, %r13
addq $0x40, %rbx
addq $0x40, %rdi
addq $0x40, %r11
movl %ebp, %r15d
jmp 0x3801ac
vbroadcastss %xmm15, %ymm18
leal 0x8(%r15), %edi
cmpl %r9d, %edi
jg 0x380220
vmovups (%rax,%r11), %ymm2
vmovups (%rcx,%r11), %ymm3
vfmadd132ps %ymm17, %ymm18, %ymm3 # ymm3 = (ymm3 * ymm17) + ymm18
vfmadd213ps (%r10,%r11), %ymm2, %ymm3 # ymm3 = (ymm2 * ymm3) + mem
vmovups %ymm3, (%rcx,%r11)
addq $0x20, %r11
movl %edi, %r15d
jmp 0x3801f0
vbroadcastss %xmm15, %xmm18
leal 0x4(%r15), %edi
cmpl %r9d, %edi
jg 0x380256
vmovups (%rax,%r11), %xmm2
vmovups (%rcx,%r11), %xmm3
vfmadd132ps %xmm17, %xmm18, %xmm3 # xmm3 = (xmm3 * xmm17) + xmm18
vfmadd213ps (%r10,%r11), %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + mem
vmovups %xmm3, (%rcx,%r11)
addq $0x10, %r11
movl %edi, %r15d
jmp 0x380226
addq %r11, %r10
addq %r11, %rax
addq %r11, %rcx
cmpl %r9d, %r15d
jge 0x380353
vmovss (%rcx), %xmm2
vfmadd132ss %xmm16, %xmm15, %xmm2 # xmm2 = (xmm2 * xmm16) + xmm15
vmovss (%rax), %xmm3
vfmadd213ss (%r10), %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + mem
vmovss %xmm3, (%rcx)
incl %r15d
addq $0x4, %rcx
addq $0x4, %rax
addq $0x4, %r10
jmp 0x38025f
vmovaps %xmm17, %xmm18
vmovaps %xmm15, %xmm19
testb %cl, %cl
jne 0x3802b0
vinsertf32x4 $0x1, %xmm18, %ymm18, %ymm20
vinsertf32x4 $0x1, %xmm19, %ymm19, %ymm21
jmp 0x3802bc
vmovaps %ymm16, %ymm20
vmovaps %ymm15, %ymm21
vmovaps %zmm15, %zmm22
testb %al, %al
jne 0x3802d4
vinsertf64x4 $0x1, %ymm20, %zmm20, %zmm16
vinsertf64x4 $0x1, %ymm21, %zmm21, %zmm22
xorl %eax, %eax
leal 0x10(%rax), %ecx
cmpl %r9d, %ecx
jg 0x38030e
vmovups (%r15), %zmm2
vfmadd132ps %zmm16, %zmm22, %zmm2 # zmm2 = (zmm2 * zmm16) + zmm22
vmovups %zmm2, (%r15)
addq $0x40, %r15
movl %ecx, %eax
jmp 0x3802d6
vmovups (%r15), %ymm2
vfmadd132ps %ymm20, %ymm21, %ymm2 # ymm2 = (ymm2 * ymm20) + ymm21
vmovups %ymm2, (%r15)
addq $0x20, %r15
movl %ecx, %eax
leal 0x8(%rax), %ecx
cmpl %r9d, %ecx
jle 0x3802f8
jmp 0x38032e
vmovups (%r15), %xmm2
vfmadd132ps %xmm18, %xmm19, %xmm2 # xmm2 = (xmm2 * xmm18) + xmm19
vmovups %xmm2, (%r15)
addq $0x10, %r15
movl %ecx, %eax
leal 0x4(%rax), %ecx
cmpl %r9d, %ecx
jle 0x380318
jmp 0x38034e
vmovss (%r15), %xmm2
vfmadd132ss %xmm17, %xmm15, %xmm2 # xmm2 = (xmm2 * xmm17) + xmm15
vmovss %xmm2, (%r15)
incl %eax
addq $0x4, %r15
cmpl %r9d, %eax
jl 0x380338
incq %r12
movq %rdx, %rdi
jmp 0x37fa1c
incq %rdi
jmp 0x37fa01
imull -0x34(%rsp), %r9d
movq (%rsi), %rdx
movq -0x20(%rsp), %rsi
movl 0xd8(%rsi,%rcx), %eax
vmovss 0xd4(%rsi,%rcx), %xmm0
xorl %esi, %esi
vxorps %xmm1, %xmm1, %xmm1
movq %rdx, %rcx
leal 0x10(%rsi), %edi
cmpl %r9d, %edi
jg 0x3803a3
vaddps (%rcx), %zmm1, %zmm1
addq $0x40, %rcx
movl %edi, %esi
jmp 0x38038d
vxorps %xmm2, %xmm2, %xmm2
leal 0x8(%rsi), %edi
cmpl %r9d, %edi
jg 0x3803bb
vaddps (%rcx), %ymm2, %ymm2
addq $0x20, %rcx
movl %edi, %esi
jmp 0x3803a7
vxorps %xmm3, %xmm3, %xmm3
leal 0x4(%rsi), %edi
cmpl %r9d, %edi
jg 0x3803d3
vaddps (%rcx), %xmm3, %xmm3
addq $0x10, %rcx
movl %edi, %esi
jmp 0x3803bf
vxorps %xmm4, %xmm4, %xmm4
cmpl %r9d, %esi
jge 0x3803e8
vaddss (%rcx), %xmm4, %xmm4
incl %esi
addq $0x4, %rcx
jmp 0x3803d7
vextractf64x4 $0x1, %zmm1, %ymm5
vaddps %ymm5, %ymm1, %ymm1
vextractf128 $0x1, %ymm1, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1,0]
vaddps %xmm1, %xmm5, %xmm1
vextractf128 $0x1, %ymm2, %xmm5
vaddps %xmm2, %xmm5, %xmm2
vshufps $0x11, %xmm2, %xmm3, %xmm5 # xmm5 = xmm3[1,0],xmm2[1,0]
vshufps $0xbb, %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[3,2],xmm2[3,2]
vaddps %xmm5, %xmm2, %xmm2
vshufpd $0x1, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,0]
vaddps %xmm3, %xmm2, %xmm2
vhaddps %xmm2, %xmm2, %xmm2
vmovshdup %xmm1, %xmm3 # xmm3 = xmm1[1,1,3,3]
vaddss %xmm4, %xmm3, %xmm3
vaddss %xmm3, %xmm1, %xmm1
vaddss %xmm2, %xmm1, %xmm1
vcvtsi2ss %r9d, %xmm6, %xmm2
vdivss %xmm2, %xmm1, %xmm1
vbroadcastss %xmm1, %zmm5
xorl %esi, %esi
vxorps %xmm3, %xmm3, %xmm3
movq %rdx, %rcx
leal 0x10(%rsi), %edi
cmpl %r9d, %edi
jg 0x380475
vmovups (%rcx), %zmm4
vsubps %zmm5, %zmm4, %zmm4
vfmadd231ps %zmm4, %zmm4, %zmm3 # zmm3 = (zmm4 * zmm4) + zmm3
addq $0x40, %rcx
movl %edi, %esi
jmp 0x380453
vbroadcastss %xmm5, %ymm6
vxorps %xmm4, %xmm4, %xmm4
leal 0x8(%rsi), %edi
cmpl %r9d, %edi
jg 0x38049b
vmovups (%rcx), %ymm7
vsubps %ymm6, %ymm7, %ymm7
vfmadd231ps %ymm7, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm7) + ymm4
addq $0x20, %rcx
movl %edi, %esi
jmp 0x38047e
vxorps %xmm6, %xmm6, %xmm6
leal 0x4(%rsi), %edi
cmpl %r9d, %edi
jg 0x3804bc
vmovups (%rcx), %xmm7
vsubps %xmm5, %xmm7, %xmm7
vfmadd231ps %xmm7, %xmm7, %xmm6 # xmm6 = (xmm7 * xmm7) + xmm6
addq $0x10, %rcx
movl %edi, %esi
jmp 0x38049f
vxorps %xmm5, %xmm5, %xmm5
cmpl %r9d, %esi
jge 0x3804da
vmovss (%rcx), %xmm7
vsubss %xmm1, %xmm7, %xmm7
vfmadd231ss %xmm7, %xmm7, %xmm5 # xmm5 = (xmm7 * xmm7) + xmm5
incl %esi
addq $0x4, %rcx
jmp 0x3804c0
vextractf64x4 $0x1, %zmm3, %ymm7
vaddps %ymm7, %ymm3, %ymm3
vextractf128 $0x1, %ymm3, %xmm7
vaddps %xmm7, %xmm3, %xmm3
vshufpd $0x1, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[1,0]
vaddps %xmm3, %xmm7, %xmm3
vextractf128 $0x1, %ymm4, %xmm7
vaddps %xmm4, %xmm7, %xmm4
vshufps $0x11, %xmm4, %xmm6, %xmm7 # xmm7 = xmm6[1,0],xmm4[1,0]
vshufps $0xbb, %xmm4, %xmm6, %xmm4 # xmm4 = xmm6[3,2],xmm4[3,2]
vaddps %xmm7, %xmm4, %xmm4
vshufpd $0x1, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,0]
vaddps %xmm6, %xmm4, %xmm4
vhaddps %xmm4, %xmm4, %xmm4
vmovshdup %xmm3, %xmm6 # xmm6 = xmm3[1,1,3,3]
vaddss %xmm5, %xmm6, %xmm5
vaddss %xmm5, %xmm3, %xmm3
vaddss %xmm4, %xmm3, %xmm3
vdivss %xmm2, %xmm3, %xmm2
vaddss %xmm0, %xmm2, %xmm0
vrsqrtss %xmm0, %xmm0, %xmm2
vmulss %xmm2, %xmm0, %xmm0
vfmadd213ss 0x7527e(%rip), %xmm2, %xmm0 # xmm0 = (xmm2 * xmm0) + mem
vmulss 0x713d6(%rip), %xmm2, %xmm2 # 0x3f1924
vmulss %xmm0, %xmm2, %xmm0
vxorps 0x70c58(%rip){1to4}, %xmm1, %xmm1 # 0x3f11b4
vmulss %xmm1, %xmm0, %xmm1
vbroadcastss %xmm0, %zmm3
vbroadcastss %xmm1, %zmm2
testl %eax, %eax
je 0x38103c
xorl %eax, %eax
leal 0x10(%rax), %ecx
cmpl %r9d, %ecx
jg 0x3805b0
movq -0x28(%rsp), %rcx
vmovups (%rcx,%rax,4), %zmm4
vmovups (%rdx,%rax,4), %zmm5
vfmadd132ps %zmm3, %zmm2, %zmm5 # zmm5 = (zmm5 * zmm3) + zmm2
movq -0x30(%rsp), %rcx
vfmadd213ps (%rcx,%rax,4), %zmm4, %zmm5 # zmm5 = (zmm4 * zmm5) + mem
vmovups %zmm5, (%rdx,%rax,4)
addq $0x10, %rax
jmp 0x380576
vbroadcastss %xmm3, %xmm4
vbroadcastss %xmm2, %ymm5
leal 0x8(%rax), %ecx
cmpl %r9d, %ecx
jg 0x3805ec
movq -0x28(%rsp), %rcx
vmovups (%rcx,%rax,4), %ymm6
vmovups (%rdx,%rax,4), %ymm7
vfmadd132ps %ymm3, %ymm5, %ymm7 # ymm7 = (ymm7 * ymm3) + ymm5
movq -0x30(%rsp), %rcx
vfmadd213ps (%rcx,%rax,4), %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + mem
vmovups %ymm7, (%rdx,%rax,4)
addq $0x8, %rax
jmp 0x3805ba
vbroadcastss %xmm2, %xmm2
leal 0x4(%rax), %ecx
cmpl %r9d, %ecx
jg 0x380623
movq -0x28(%rsp), %rcx
vmovups (%rcx,%rax,4), %xmm3
vmovups (%rdx,%rax,4), %xmm5
vfmadd132ps %xmm4, %xmm2, %xmm5 # xmm5 = (xmm5 * xmm4) + xmm2
movq -0x30(%rsp), %rcx
vfmadd213ps (%rcx,%rax,4), %xmm3, %xmm5 # xmm5 = (xmm3 * xmm5) + mem
vmovups %xmm5, (%rdx,%rax,4)
addq $0x4, %rax
jmp 0x3805f1
xorl %ecx, %ecx
movl %eax, %esi
cmpl %r9d, %esi
jge 0x3810c1
movq -0x30(%rsp), %rdi
addq %rcx, %rdi
movq -0x28(%rsp), %r8
addq %rcx, %r8
leaq (%rdx,%rcx), %r10
vmovss (%r10,%rax,4), %xmm2
vfmadd132ss %xmm0, %xmm1, %xmm2 # xmm2 = (xmm2 * xmm0) + xmm1
vmovss (%r8,%rax,4), %xmm3
vfmadd213ss (%rdi,%rax,4), %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + mem
vmovss %xmm3, (%r10,%rax,4)
incl %esi
addq $0x4, %rcx
jmp 0x380627
imull %r9d, %r8d
vcvtsi2ss %r8d, %xmm0, %xmm0
movl -0x34(%rsp), %edx
imull %edx, %r8d
vmovss 0x6e606(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vbroadcastss %xmm0, %zmm1
xorl %r9d, %r9d
vmovss 0x7512b(%rip), %xmm24 # 0x3f57c4
vmovss 0x71281(%rip), %xmm25 # 0x3f1924
vbroadcastss 0x70b08(%rip), %xmm4 # 0x3f11b4
movl $0x10, %esi
movl $0x8, %ebp
vmovaps 0x7ad80(%rip), %zmm5 # 0x3fb440
vbroadcastss 0x750fb(%rip), %xmm6 # 0x3f57c4
vbroadcastss 0x71252(%rip), %xmm7 # 0x3f1924
vbroadcastss 0x750e9(%rip), %ymm8 # 0x3f57c4
vbroadcastss 0x71240(%rip), %ymm9 # 0x3f1924
vbroadcastss 0x70ac7(%rip), %ymm10 # 0x3f11b4
vbroadcastss 0x750cd(%rip), %zmm11 # 0x3f57c4
vbroadcastss 0x71223(%rip), %zmm12 # 0x3f1924
movb $-0x40, %r14b
vbroadcastss 0x70aa6(%rip), %zmm13 # 0x3f11b4
vxorps %xmm14, %xmm14, %xmm14
cmpq -0x8(%rsp), %r9
je 0x3810c1
movq -0x10(%rsp), %rax
movq 0x40(%rax), %r15
imulq %r9, %r15
imulq 0x10(%rax), %r15
addq (%rax), %r15
movq -0x18(%rsp), %rax
movq -0x18(%rax), %rax
movq -0x20(%rsp), %rcx
movl 0xd8(%rcx,%rax), %r12d
vmovss 0xd4(%rcx,%rax), %xmm17
xorl %ecx, %ecx
vxorps %xmm15, %xmm15, %xmm15
movq %r15, %rax
leal 0x10(%rcx), %edi
cmpl %r8d, %edi
jg 0x380771
vaddps (%rax), %zmm15, %zmm15
addq $0x40, %rax
movl %edi, %ecx
jmp 0x38075b
vxorps %xmm16, %xmm16, %xmm16
leal 0x8(%rcx), %edi
cmpl %r8d, %edi
jg 0x38078d
vaddps (%rax), %ymm16, %ymm16
addq $0x20, %rax
movl %edi, %ecx
jmp 0x380777
vxorps %xmm18, %xmm18, %xmm18
leal 0x4(%rcx), %edi
cmpl %r8d, %edi
jg 0x3807a9
vaddps (%rax), %xmm18, %xmm18
addq $0x10, %rax
movl %edi, %ecx
jmp 0x380793
vxorps %xmm19, %xmm19, %xmm19
cmpl %r8d, %ecx
jge 0x3807c2
vaddss (%rax), %xmm19, %xmm19
incl %ecx
addq $0x4, %rax
jmp 0x3807af
cmpl $0x10, %edx
je 0x3808e7
vextractf64x4 $0x1, %zmm15, %ymm20
cmpl $0x4, %edx
je 0x38087c
cmpl $0x8, %edx
je 0x3808b9
cmpl $0x1, %edx
jne 0x380911
vaddps %ymm20, %ymm15, %ymm15
vextractf32x4 $0x1, %ymm15, %xmm20
vaddps %xmm20, %xmm15, %xmm15
vshufpd $0x1, %xmm15, %xmm15, %xmm20 # xmm20 = xmm15[1,0]
vaddps %xmm15, %xmm20, %xmm15
vextractf32x4 $0x1, %ymm16, %xmm20
vaddps %xmm16, %xmm20, %xmm16
vshufps $0x11, %xmm16, %xmm18, %xmm20 # xmm20 = xmm18[1,0],xmm16[1,0]
vshufps $0xbb, %xmm16, %xmm18, %xmm16 # xmm16 = xmm18[3,2],xmm16[3,2]
vaddps %xmm20, %xmm16, %xmm16
vshufpd $0x1, %xmm16, %xmm16, %xmm18 # xmm18 = xmm16[1,0]
vaddps %xmm18, %xmm16, %xmm2
vhaddps %xmm2, %xmm2, %xmm2
vmovshdup %xmm15, %xmm16 # xmm16 = xmm15[1,1,3,3]
vaddss %xmm19, %xmm16, %xmm16
vaddss %xmm16, %xmm15, %xmm15
vaddss %xmm2, %xmm15, %xmm2
vmulss %xmm0, %xmm2, %xmm18
vbroadcastss %xmm18, %ymm16
vmovss %xmm16, %xmm14, %xmm2 # xmm2 = xmm16[0],xmm14[1,2,3]
vmovaps %ymm2, %ymm15
movb $0x1, %r11b
xorl %r13d, %r13d
vmovaps %xmm16, %xmm21
jmp 0x38092e
vaddps %ymm16, %ymm15, %ymm2
vaddps %ymm20, %ymm2, %ymm2
vextractf128 $0x1, %ymm2, %xmm15
vaddps %xmm18, %xmm2, %xmm2
vaddps %xmm2, %xmm15, %xmm2
vmulps %xmm1, %xmm2, %xmm18
vinsertf32x4 $0x1, %xmm18, %ymm18, %ymm16
movb $0x1, %r13b
xorl %r11d, %r11d
vmovaps %zmm18, %zmm15
vmovaps %xmm18, %xmm21
jmp 0x38092e
vaddps %ymm16, %ymm15, %ymm2
vaddps %ymm20, %ymm2, %ymm2
vmulps %ymm1, %ymm2, %ymm16
vbroadcastss %xmm16, %xmm21
movb $0x1, %cl
xorl %r13d, %r13d
vmovaps %xmm16, %xmm18
vmovaps %zmm16, %zmm15
xorl %r11d, %r11d
jmp 0x380930
vmulps %zmm1, %zmm15, %zmm18
vbroadcastss %xmm18, %ymm16
movb $0x1, %al
xorl %r13d, %r13d
vmovaps %zmm18, %zmm15
xorl %r11d, %r11d
vmovaps %xmm16, %xmm21
xorl %ecx, %ecx
vmovaps %zmm18, %zmm20
jmp 0x380939
vxorps %xmm21, %xmm21, %xmm21
vxorps %xmm15, %xmm15, %xmm15
vxorps %xmm18, %xmm18, %xmm18
vxorps %xmm16, %xmm16, %xmm16
xorl %r13d, %r13d
xorl %r11d, %r11d
xorl %ecx, %ecx
vinsertf64x4 $0x1, %ymm16, %zmm16, %zmm20
xorl %eax, %eax
xorl %ebx, %ebx
vxorps %xmm19, %xmm19, %xmm19
movq %r15, %rdi
leal 0x10(%rbx), %r10d
cmpl %r8d, %r10d
jg 0x380968
vmovups (%rdi), %zmm2
vsubps %zmm20, %zmm2, %zmm2
vfmadd231ps %zmm2, %zmm2, %zmm19 # zmm19 = (zmm2 * zmm2) + zmm19
addq $0x40, %rdi
movl %r10d, %ebx
jmp 0x380944
vxorps %xmm20, %xmm20, %xmm20
leal 0x8(%rbx), %r10d
cmpl %r8d, %r10d
jg 0x380990
vmovups (%rdi), %ymm2
vsubps %ymm16, %ymm2, %ymm2
vfmadd231ps %ymm2, %ymm2, %ymm20 # ymm20 = (ymm2 * ymm2) + ymm20
addq $0x20, %rdi
movl %r10d, %ebx
jmp 0x38096e
vxorps %xmm22, %xmm22, %xmm22
leal 0x4(%rbx), %r10d
cmpl %r8d, %r10d
jg 0x3809b8
vmovups (%rdi), %xmm2
vsubps %xmm21, %xmm2, %xmm2
vfmadd231ps %xmm2, %xmm2, %xmm22 # xmm22 = (xmm2 * xmm2) + xmm22
addq $0x10, %rdi
movl %r10d, %ebx
jmp 0x380996
vxorps %xmm21, %xmm21, %xmm21
cmpl %r8d, %ebx
jge 0x3809db
vmovss (%rdi), %xmm2
vsubss %xmm18, %xmm2, %xmm2
vfmadd231ss %xmm2, %xmm2, %xmm21 # xmm21 = (xmm2 * xmm2) + xmm21
incl %ebx
addq $0x4, %rdi
jmp 0x3809be
testb %al, %al
jne 0x3809e7
vxorps %xmm16, %xmm16, %xmm16
jmp 0x3809ed
vmulps %zmm1, %zmm19, %zmm16
vextractf64x4 $0x1, %zmm19, %ymm23
testb %cl, %cl
je 0x380a11
vaddps %ymm20, %ymm19, %ymm2
vaddps %ymm23, %ymm2, %ymm20
vmulps %ymm1, %ymm20, %ymm2
vinsertf64x4 $0x0, %ymm2, %zmm16, %zmm16
testb %r13b, %r13b
je 0x380a42
vaddps %ymm20, %ymm19, %ymm2
vaddps %ymm23, %ymm2, %ymm20
vextractf32x4 $0x1, %ymm20, %xmm2
vaddps %xmm22, %xmm20, %xmm22
vaddps %xmm2, %xmm22, %xmm22
vmulps %xmm1, %xmm22, %xmm2
vinsertf32x4 $0x0, %xmm2, %zmm16, %zmm16
testb %r11b, %r11b
je 0x380ac0
vaddps %ymm23, %ymm19, %ymm2
vextractf32x4 $0x1, %ymm2, %xmm19
vaddps %xmm19, %xmm2, %xmm2
vshufpd $0x1, %xmm2, %xmm2, %xmm19 # xmm19 = xmm2[1,0]
vextractf32x4 $0x1, %ymm20, %xmm23
vaddps %xmm20, %xmm23, %xmm20
vshufps $0x11, %xmm22, %xmm20, %xmm23 # xmm23 = xmm20[1,0],xmm22[1,0]
vshufps $0xbb, %xmm22, %xmm20, %xmm20 # xmm20 = xmm20[3,2],xmm22[3,2]
vaddps %xmm23, %xmm20, %xmm20
vshufpd $0x1, %xmm20, %xmm20, %xmm22 # xmm22 = xmm20[1,0]
vaddps %xmm22, %xmm20, %xmm3
vhaddps %xmm3, %xmm3, %xmm3
vaddps %xmm2, %xmm19, %xmm2
vmovshdup %xmm2, %xmm19 # xmm19 = xmm2[1,1,3,3]
vaddss %xmm21, %xmm19, %xmm19
vaddss %xmm19, %xmm2, %xmm2
vaddss %xmm3, %xmm2, %xmm2
vmulss %xmm0, %xmm2, %xmm2
vmovss %xmm2, %xmm16, %xmm2 # xmm2 = xmm2[0],xmm16[1,2,3]
vinsertf32x4 $0x0, %xmm2, %zmm16, %zmm16
cmpl $0x1, %edx
je 0x380b8a
cmpl $0x4, %edx
je 0x380b17
cmpl $0x8, %edx
je 0x380b50
cmpl $0x10, %edx
jne 0x380bca
vbroadcastss %xmm17, %zmm2
vaddps %zmm2, %zmm16, %zmm2
vrsqrt14ps %zmm2, %zmm3
vmulps %zmm3, %zmm2, %zmm2
vfmadd213ps %zmm11, %zmm3, %zmm2 # zmm2 = (zmm3 * zmm2) + zmm11
vmulps %zmm12, %zmm3, %zmm3
vmulps %zmm2, %zmm3, %zmm16
vxorps %zmm13, %zmm15, %zmm2
vmulps %zmm2, %zmm16, %zmm15
jmp 0x380bca
vbroadcastss %xmm17, %xmm2
vaddps %xmm2, %xmm16, %xmm2
vrsqrtps %xmm2, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vfmadd213ps %xmm6, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + xmm6
vmulps %xmm7, %xmm3, %xmm3
vmulps %xmm2, %xmm3, %xmm2
vxorps %xmm4, %xmm15, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vinsertf32x4 $0x0, %xmm2, %zmm16, %zmm16
vinsertf32x4 $0x0, %xmm3, %zmm15, %zmm15
jmp 0x380bca
vbroadcastss %xmm17, %ymm2
vaddps %ymm2, %ymm16, %ymm2
vrsqrtps %ymm2, %ymm3
vmulps %ymm3, %ymm2, %ymm2
vfmadd213ps %ymm8, %ymm3, %ymm2 # ymm2 = (ymm3 * ymm2) + ymm8
vmulps %ymm3, %ymm9, %ymm3
vmulps %ymm2, %ymm3, %ymm2
vxorps %ymm10, %ymm15, %ymm3
vmulps %ymm3, %ymm2, %ymm3
vinsertf64x4 $0x0, %ymm2, %zmm16, %zmm16
vinsertf64x4 $0x0, %ymm3, %zmm15, %zmm15
jmp 0x380bca
vaddss %xmm17, %xmm16, %xmm2
vrsqrtss %xmm2, %xmm2, %xmm3
vmulss %xmm3, %xmm2, %xmm2
vfmadd213ss %xmm24, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + xmm24
vmulss %xmm25, %xmm3, %xmm3
vmulss %xmm2, %xmm3, %xmm2
vmovss %xmm2, %xmm16, %xmm3 # xmm3 = xmm2[0],xmm16[1,2,3]
vinsertf32x4 $0x0, %xmm3, %zmm16, %zmm16
vxorps %xmm4, %xmm18, %xmm3
vmulss %xmm3, %xmm2, %xmm2
vmovss %xmm2, %xmm15, %xmm2 # xmm2 = xmm2[0],xmm15[1,2,3]
vinsertf32x4 $0x0, %xmm2, %zmm15, %zmm15
testl %r12d, %r12d
je 0x380c14
testb %al, %al
je 0x380c34
movl %esi, %edi
movq -0x30(%rsp), %r12
movq -0x28(%rsp), %rax
cmpl %r8d, %edi
jg 0x380c3e
vbroadcastss (%rax), %zmm2
vmovups (%r15), %zmm3
vfmadd132ps %zmm16, %zmm15, %zmm3 # zmm3 = (zmm3 * zmm16) + zmm15
vfmadd213ps (%r12){1to16}, %zmm2, %zmm3 # zmm3 = (zmm2 * zmm3) + mem
vmovups %zmm3, (%r15)
addq $0x40, %r15
addq $0x4, %rax
addq $0x4, %r12
addl $0x10, %edi
jmp 0x380bdf
vmovaps %xmm16, %xmm17
testb %r13b, %r13b
jne 0x380f71
vbroadcastss %xmm17, %xmm18
vbroadcastss %xmm15, %xmm19
jmp 0x380f7d
movq -0x30(%rsp), %r12
movq -0x28(%rsp), %rax
testb %cl, %cl
je 0x380ced
vinsertf64x4 $0x1, %ymm16, %zmm16, %zmm17
vinsertf64x4 $0x1, %ymm15, %zmm15, %zmm18
xorl %ebx, %ebx
movl %ebp, %edi
xorl %r10d, %r10d
movq %r15, %rcx
addl $0x10, %r10d
cmpl %r8d, %r10d
jg 0x380cb4
vmovss (%rax,%rbx), %xmm2
vmovss 0x4(%rax,%rbx), %xmm3
vmovss (%r12,%rbx), %xmm19
vmovss 0x4(%r12,%rbx), %xmm20
vpermt2ps %zmm3, %zmm5, %zmm2
vpermt2ps %zmm20, %zmm5, %zmm19
vmovups (%r15,%rbx,8), %zmm3
vfmadd132ps %zmm17, %zmm18, %zmm3 # zmm3 = (zmm3 * zmm17) + zmm18
vfmadd213ps %zmm19, %zmm2, %zmm3 # zmm3 = (zmm2 * zmm3) + zmm19
vmovups %zmm3, (%r15,%rbx,8)
addq $0x40, %rcx
addq $0x8, %rbx
addl $0x10, %edi
jmp 0x380c5e
addq %rbx, %r12
addq %rbx, %rax
cmpl %r8d, %edi
jg 0x380cea
vbroadcastss (%rax), %ymm2
vmovups (%rcx), %ymm3
vfmadd132ps %ymm16, %ymm15, %ymm3 # ymm3 = (ymm3 * ymm16) + ymm15
vfmadd213ps (%r12){1to8}, %ymm2, %ymm3 # ymm3 = (ymm2 * ymm3) + mem
vmovups %ymm3, (%rcx)
addq $0x20, %rcx
addq $0x4, %rax
addq $0x4, %r12
addl $0x8, %edi
jmp 0x380cba
movq %rcx, %r15
testb %r13b, %r13b
je 0x380e69
vpermpd $0x44, %ymm16, %ymm17 # ymm17 = ymm16[0,1,0,1]
vpermpd $0x44, %ymm15, %ymm18 # ymm18 = ymm15[0,1,0,1]
vinsertf64x4 $0x1, %ymm16, %zmm16, %zmm2
vpermpd $0x44, %zmm2, %zmm19 # zmm19 = zmm2[0,1,0,1,4,5,4,5]
vinsertf64x4 $0x1, %ymm15, %zmm15, %zmm2
vpermpd $0x44, %zmm2, %zmm20 # zmm20 = zmm2[0,1,0,1,4,5,4,5]
xorl %r13d, %r13d
movq %r15, %rcx
leal 0x10(%r13), %edi
cmpl %r8d, %edi
jg 0x380e26
vbroadcastss 0x8(%rax,%r13), %zmm2
vbroadcastss 0xc(%rax,%r13), %zmm3
vmovss (%rax,%r13), %xmm21
vmovss 0x4(%rax,%r13), %xmm22
vshufps $0x0, %xmm22, %xmm21, %xmm21 # xmm21 = xmm21[0,0],xmm22[0,0]
vpermpd $0x50, %ymm21, %ymm21 # ymm21 = ymm21[0,0,1,1]
vinsertf64x4 $0x0, %ymm21, %zmm2, %zmm2
kmovd %r14d, %k1
vmovapd %zmm3, %zmm2 {%k1}
vbroadcastss 0x8(%r12,%r13), %zmm3
vbroadcastss 0xc(%r12,%r13), %zmm21
vmovss (%r12,%r13), %xmm22
vmovss 0x4(%r12,%r13), %xmm23
vshufps $0x0, %xmm23, %xmm22, %xmm22 # xmm22 = xmm22[0,0],xmm23[0,0]
vpermpd $0x50, %ymm22, %ymm22 # ymm22 = ymm22[0,0,1,1]
vinsertf64x4 $0x0, %ymm22, %zmm3, %zmm3
vmovapd %zmm21, %zmm3 {%k1}
vmovups (%rcx), %zmm21
vfmadd132ps %zmm19, %zmm20, %zmm21 # zmm21 = (zmm21 * zmm19) + zmm20
vfmadd213ps %zmm3, %zmm2, %zmm21 # zmm21 = (zmm2 * zmm21) + zmm3
vmovups %zmm21, (%rcx)
addq $0x40, %rcx
addq $0x10, %r13
jmp 0x380d26
vmovss (%rax,%r13), %xmm2
vmovss 0x4(%rax,%r13), %xmm3
vshufps $0x0, %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0,0],xmm3[0,0]
vmovss (%r12,%r13), %xmm3
vmovss 0x4(%r12,%r13), %xmm19
vshufps $0x0, %xmm19, %xmm3, %xmm3 # xmm3 = xmm3[0,0],xmm19[0,0]
vpermpd $0x50, %ymm2, %ymm2 # ymm2 = ymm2[0,0,1,1]
vpermpd $0x50, %ymm3, %ymm3 # ymm3 = ymm3[0,0,1,1]
vmovups (%r15,%r13,4), %ymm19
vfmadd132ps %ymm17, %ymm18, %ymm19 # ymm19 = (ymm19 * ymm17) + ymm18
vfmadd213ps %ymm3, %ymm2, %ymm19 # ymm19 = (ymm2 * ymm19) + ymm3
vmovups %ymm19, (%r15,%r13,4)
addq $0x20, %rcx
addq $0x8, %r13
leal 0x8(%r13), %edi
cmpl %r8d, %edi
jle 0x380dd1
jmp 0x380e58
vbroadcastss (%rax,%r13), %xmm2
vmovups (%r15,%r13,4), %xmm3
vfmadd132ps %xmm16, %xmm15, %xmm3 # xmm3 = (xmm3 * xmm16) + xmm15
vfmadd213ps (%r12,%r13){1to4}, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + mem
vmovups %xmm3, (%r15,%r13,4)
addq $0x10, %rcx
addq $0x4, %r13
leal 0x4(%r13), %edi
cmpl %r8d, %edi
jle 0x380e31
addq %r13, %r12
addq %r13, %rax
jmp 0x380e6c
movq %r15, %rcx
testb %r11b, %r11b
je 0x381034
vbroadcastss %xmm16, %zmm17
vbroadcastss %xmm15, %zmm18
xorl %r11d, %r11d
xorl %edi, %edi
movq %r12, %r10
movq %rax, %rbx
movq %rcx, %r15
leal 0x10(%rdi), %r13d
cmpl %r8d, %r13d
jg 0x380ecb
vmovups (%rbx), %zmm2
vmovups (%r15), %zmm3
vfmadd132ps %zmm17, %zmm18, %zmm3 # zmm3 = (zmm3 * zmm17) + zmm18
vfmadd213ps (%r10), %zmm2, %zmm3 # zmm3 = (zmm2 * zmm3) + mem
vmovups %zmm3, (%r15)
addq $0x40, %r15
addq $0x40, %rbx
addq $0x40, %r10
addq $0x40, %r11
movl %r13d, %edi
jmp 0x380e8f
vbroadcastss %xmm15, %ymm18
leal 0x8(%rdi), %r10d
cmpl %r8d, %r10d
jg 0x380f01
vmovups (%rax,%r11), %ymm2
vmovups (%rcx,%r11), %ymm3
vfmadd132ps %ymm17, %ymm18, %ymm3 # ymm3 = (ymm3 * ymm17) + ymm18
vfmadd213ps (%r12,%r11), %ymm2, %ymm3 # ymm3 = (ymm2 * ymm3) + mem
vmovups %ymm3, (%rcx,%r11)
addq $0x20, %r11
movl %r10d, %edi
jmp 0x380ed1
vbroadcastss %xmm15, %xmm18
leal 0x4(%rdi), %r10d
cmpl %r8d, %r10d
jg 0x380f37
vmovups (%rax,%r11), %xmm2
vmovups (%rcx,%r11), %xmm3
vfmadd132ps %xmm17, %xmm18, %xmm3 # xmm3 = (xmm3 * xmm17) + xmm18
vfmadd213ps (%r12,%r11), %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + mem
vmovups %xmm3, (%rcx,%r11)
addq $0x10, %r11
movl %r10d, %edi
jmp 0x380f07
addq %r11, %r12
addq %r11, %rax
addq %r11, %rcx
cmpl %r8d, %edi
jge 0x381034
vmovss (%rcx), %xmm2
vfmadd132ss %xmm16, %xmm15, %xmm2 # xmm2 = (xmm2 * xmm16) + xmm15
vmovss (%rax), %xmm3
vfmadd213ss (%r12), %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + mem
vmovss %xmm3, (%rcx)
incl %edi
addq $0x4, %rcx
addq $0x4, %rax
addq $0x4, %r12
jmp 0x380f40
vmovaps %xmm17, %xmm18
vmovaps %xmm15, %xmm19
testb %cl, %cl
jne 0x380f91
vinsertf32x4 $0x1, %xmm18, %ymm18, %ymm20
vinsertf32x4 $0x1, %xmm19, %ymm19, %ymm21
jmp 0x380f9d
vmovaps %ymm16, %ymm20
vmovaps %ymm15, %ymm21
vmovaps %zmm15, %zmm22
testb %al, %al
jne 0x380fb5
vinsertf64x4 $0x1, %ymm20, %zmm20, %zmm16
vinsertf64x4 $0x1, %ymm21, %zmm21, %zmm22
xorl %eax, %eax
leal 0x10(%rax), %ecx
cmpl %r8d, %ecx
jg 0x380fef
vmovups (%r15), %zmm2
vfmadd132ps %zmm16, %zmm22, %zmm2 # zmm2 = (zmm2 * zmm16) + zmm22
vmovups %zmm2, (%r15)
addq $0x40, %r15
movl %ecx, %eax
jmp 0x380fb7
vmovups (%r15), %ymm2
vfmadd132ps %ymm20, %ymm21, %ymm2 # ymm2 = (ymm2 * ymm20) + ymm21
vmovups %ymm2, (%r15)
addq $0x20, %r15
movl %ecx, %eax
leal 0x8(%rax), %ecx
cmpl %r8d, %ecx
jle 0x380fd9
jmp 0x38100f
vmovups (%r15), %xmm2
vfmadd132ps %xmm18, %xmm19, %xmm2 # xmm2 = (xmm2 * xmm18) + xmm19
vmovups %xmm2, (%r15)
addq $0x10, %r15
movl %ecx, %eax
leal 0x4(%rax), %ecx
cmpl %r8d, %ecx
jle 0x380ff9
jmp 0x38102f
vmovss (%r15), %xmm2
vfmadd132ss %xmm17, %xmm15, %xmm2 # xmm2 = (xmm2 * xmm17) + xmm15
vmovss %xmm2, (%r15)
incl %eax
addq $0x4, %r15
cmpl %r8d, %eax
jl 0x381019
incq %r9
jmp 0x380713
xorl %eax, %eax
leal 0x10(%rax), %ecx
cmpl %r9d, %ecx
jg 0x381060
vmovups (%rdx), %zmm4
vfmadd132ps %zmm3, %zmm2, %zmm4 # zmm4 = (zmm4 * zmm3) + zmm2
vmovups %zmm4, (%rdx)
addq $0x40, %rdx
movl %ecx, %eax
jmp 0x38103e
vbroadcastss %xmm3, %xmm4
vbroadcastss %xmm2, %ymm5
leal 0x8(%rax), %ecx
cmpl %r9d, %ecx
jg 0x381087
vmovups (%rdx), %ymm6
vfmadd132ps %ymm3, %ymm5, %ymm6 # ymm6 = (ymm6 * ymm3) + ymm5
vmovups %ymm6, (%rdx)
addq $0x20, %rdx
movl %ecx, %eax
jmp 0x38106a
vbroadcastss %xmm2, %xmm2
leal 0x4(%rax), %ecx
cmpl %r9d, %ecx
jg 0x3810bc
vmovups (%rdx), %xmm3
vfmadd132ps %xmm4, %xmm2, %xmm3 # xmm3 = (xmm3 * xmm4) + xmm2
vmovups %xmm3, (%rdx)
addq $0x10, %rdx
movl %ecx, %eax
jmp 0x38108c
vmovss (%rdx), %xmm2
vfmadd132ss %xmm0, %xmm1, %xmm2 # xmm2 = (xmm2 * xmm0) + xmm1
vmovss %xmm2, (%rdx)
incl %eax
addq $0x4, %rdx
cmpl %r9d, %eax
jl 0x3810a9
xorl %eax, %eax
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/layernorm_x86_avx512.cpp |
virtual thunk to ncnn::LayerNorm_x86_avx512::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int LayerNorm_x86_avx512::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int dims = bottom_top_blob.dims;
int elempack = bottom_top_blob.elempack;
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
const float* gamma = gamma_data;
const float* beta = beta_data;
if (dims == 1)
{
int elemcount = w * elempack;
float* ptr = bottom_top_blob;
// 1D layer norm is special. Treat them as unpacked.
fast_1d_layer_norm(ptr, 1, elemcount, elemcount, gamma, beta, affine, eps);
}
if (dims == 2)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; ++i)
{
float* ptr = bottom_top_blob.row(i);
fast_1d_layer_norm(ptr, elempack, w, w * elempack, gamma, beta, affine, eps);
}
}
if (dims == 3)
{
if (affine_size == w)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; ++q)
{
for (int i = 0; i < h; ++i)
{
float* ptr = bottom_top_blob.channel(q).row(i);
fast_1d_layer_norm(ptr, elempack, w, w * elempack, gamma, beta, affine, eps);
}
}
}
else // if (affine_size == w * h)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; ++q)
{
float* ptr = bottom_top_blob.channel(q);
fast_1d_layer_norm(ptr, elempack, w * h, w * h * elempack, gamma, beta, affine, eps);
}
}
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x58(%rax), %rdi
callq 0x37ef00
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/layernorm_x86_avx512.cpp |
ncnn::LayerNorm_x86_fma::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int LayerNorm_x86_fma::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int dims = bottom_top_blob.dims;
int elempack = bottom_top_blob.elempack;
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
const float* gamma = gamma_data;
const float* beta = beta_data;
if (dims == 1)
{
int elemcount = w * elempack;
float* ptr = bottom_top_blob;
// 1D layer norm is special. Treat them as unpacked.
fast_1d_layer_norm(ptr, 1, elemcount, elemcount, gamma, beta, affine, eps);
}
if (dims == 2)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; ++i)
{
float* ptr = bottom_top_blob.row(i);
fast_1d_layer_norm(ptr, elempack, w, w * elempack, gamma, beta, affine, eps);
}
}
if (dims == 3)
{
if (affine_size == w)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; ++q)
{
for (int i = 0; i < h; ++i)
{
float* ptr = bottom_top_blob.channel(q).row(i);
fast_1d_layer_norm(ptr, elempack, w, w * elempack, gamma, beta, affine, eps);
}
}
}
else // if (affine_size == w * h)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; ++q)
{
float* ptr = bottom_top_blob.channel(q);
fast_1d_layer_norm(ptr, elempack, w * h, w * h * elempack, gamma, beta, affine, eps);
}
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %rdi, %r8
movl 0x18(%rsi), %eax
movl %eax, -0x34(%rsp)
movl 0x28(%rsi), %eax
movl 0x2c(%rsi), %r9d
movq (%rdi), %rcx
movq %rcx, -0x18(%rsp)
movq -0x18(%rcx), %rcx
movq 0xe0(%rdi,%rcx), %rdi
movq %rdi, -0x28(%rsp)
movq %r8, -0x20(%rsp)
movq 0x128(%r8,%rcx), %rdi
movq %rdi, -0x30(%rsp)
cmpl $0x1, %eax
je 0x381d33
movl 0x30(%rsi), %r8d
cmpl $0x3, %eax
je 0x38172f
cmpl $0x2, %eax
jne 0x382552
movl $0x8, %edx
vcvtsi2ss %r9d, %xmm0, %xmm0
imull -0x34(%rsp), %r9d
vmovss 0x6dadf(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %ebx, %ebx
testl %r8d, %r8d
cmovlel %ebx, %r8d
vbroadcastss 0x6ffea(%rip), %xmm5 # 0x3f11b4
vbroadcastss 0x70751(%rip), %xmm9 # 0x3f1924
vbroadcastss 0x6ffd8(%rip), %ymm10 # 0x3f11b4
vxorps %xmm11, %xmm11, %xmm11
cmpq %r8, %rbx
je 0x382552
movq (%rsi), %r15
movq 0x10(%rsi), %rcx
movslq 0x2c(%rsi), %r10
movq %rbx, %r14
imulq %r10, %r14
imulq %rcx, %r14
addq %r15, %r14
movq -0x18(%rsp), %rax
movq -0x18(%rax), %rax
movq -0x20(%rsp), %rdi
movl 0xd8(%rdi,%rax), %r12d
vmovss 0xd4(%rdi,%rax), %xmm14
xorl %eax, %eax
vxorps %xmm3, %xmm3, %xmm3
xorl %r13d, %r13d
movq %r14, %rdi
leal 0x8(%r13), %r11d
cmpl %r9d, %r11d
jg 0x381248
vaddps (%rdi), %ymm3, %ymm3
addq $0x20, %rdi
addq $0x8, %rax
movl %r11d, %r13d
jmp 0x38122e
vxorps %xmm4, %xmm4, %xmm4
addl $0x4, %r13d
cmpl %r9d, %r13d
jg 0x381263
vaddps (%rdi), %xmm4, %xmm4
addq $0x10, %rdi
addq $0x4, %rax
jmp 0x38124c
imulq %rbx, %rcx
imulq %r10, %rcx
addq %rcx, %r15
vxorps %xmm6, %xmm6, %xmm6
cmpl %r9d, %eax
jge 0x381282
vaddss (%r15,%rax,4), %xmm6, %xmm6
incq %rax
jmp 0x381272
movl -0x34(%rsp), %eax
cmpl $0x8, %eax
je 0x3812f1
vextractf128 $0x1, %ymm3, %xmm7
cmpl $0x4, %eax
je 0x3812d4
cmpl $0x1, %eax
jne 0x38130d
vshufpd $0x1, %xmm4, %xmm4, %xmm12 # xmm12 = xmm4[1,0]
vhaddps %xmm3, %xmm7, %xmm3
vaddps %xmm4, %xmm12, %xmm4
vhaddps %xmm3, %xmm3, %xmm3
vmovshdup %xmm4, %xmm7 # xmm7 = xmm4[1,1,3,3]
vhaddps %xmm3, %xmm3, %xmm3
vaddss %xmm7, %xmm3, %xmm3
vaddss %xmm4, %xmm3, %xmm3
vaddss %xmm6, %xmm3, %xmm3
vmulss %xmm0, %xmm3, %xmm15
vmovss %xmm15, %xmm11, %xmm12 # xmm12 = xmm15[0],xmm11[1,2,3]
vshufps $0x0, %xmm15, %xmm15, %xmm13 # xmm13 = xmm15[0,0,0,0]
movb $0x1, %bpl
jmp 0x38131e
vaddps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm7, %xmm3
vmulps %xmm1, %xmm3, %xmm13
movb $0x1, %r13b
xorl %ebp, %ebp
vmovaps %ymm13, %ymm12
vmovaps %xmm13, %xmm15
jmp 0x381321
vmulps %ymm2, %ymm3, %ymm12
vshufps $0x0, %xmm12, %xmm12, %xmm13 # xmm13 = xmm12[0,0,0,0]
movb $0x1, %al
xorl %ebp, %ebp
vmovaps %xmm12, %xmm15
xorl %r13d, %r13d
vmovaps %ymm12, %ymm4
jmp 0x381329
vxorps %xmm15, %xmm15, %xmm15
vxorps %xmm12, %xmm12, %xmm12
vxorps %xmm13, %xmm13, %xmm13
xorl %ebp, %ebp
xorl %r13d, %r13d
vinsertf128 $0x1, %xmm13, %ymm13, %ymm4
xorl %eax, %eax
xorl %r10d, %r10d
vxorps %xmm3, %xmm3, %xmm3
xorl %edi, %edi
movq %r14, %rcx
leal 0x8(%rdi), %r11d
cmpl %r9d, %r11d
jg 0x381358
vmovups (%rcx), %ymm6
vsubps %ymm4, %ymm6, %ymm6
vfmadd231ps %ymm6, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm6) + ymm3
addq $0x20, %rcx
addq $0x8, %r10
movl %r11d, %edi
jmp 0x381335
vxorps %xmm4, %xmm4, %xmm4
addl $0x4, %edi
cmpl %r9d, %edi
jg 0x38137c
vmovups (%rcx), %xmm6
vsubps %xmm13, %xmm6, %xmm6
vfmadd231ps %xmm6, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm6) + xmm4
addq $0x10, %rcx
addq $0x4, %r10
jmp 0x38135c
vxorps %xmm6, %xmm6, %xmm6
cmpl %r9d, %r10d
jge 0x38139a
vmovss (%r15,%r10,4), %xmm7
vsubss %xmm15, %xmm7, %xmm7
vfmadd231ss %xmm7, %xmm7, %xmm6 # xmm6 = (xmm7 * xmm7) + xmm6
incq %r10
jmp 0x381380
testb %al, %al
jne 0x3813a5
vxorps %xmm13, %xmm13, %xmm13
jmp 0x3813a9
vmulps %ymm2, %ymm3, %ymm13
vextractf128 $0x1, %ymm3, %xmm7
testb %r13b, %r13b
je 0x3813c6
vaddps %xmm4, %xmm3, %xmm4
vaddps %xmm4, %xmm7, %xmm4
vmulps %xmm1, %xmm4, %xmm8
vblendps $0xf, %ymm8, %ymm13, %ymm13 # ymm13 = ymm8[0,1,2,3],ymm13[4,5,6,7]
testb %bpl, %bpl
je 0x3813fa
vshufpd $0x1, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[1,0]
vaddps %xmm4, %xmm8, %xmm4
vhaddps %xmm3, %xmm7, %xmm3
vmovshdup %xmm4, %xmm7 # xmm7 = xmm4[1,1,3,3]
vhaddps %xmm3, %xmm3, %xmm3
vhaddps %xmm3, %xmm3, %xmm3
vaddss %xmm7, %xmm6, %xmm6
vaddss %xmm4, %xmm6, %xmm4
vaddss %xmm4, %xmm3, %xmm3
vmulss %xmm0, %xmm3, %xmm3
vblendps $0x1, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0],ymm13[1,2,3,4,5,6,7]
movl -0x34(%rsp), %ecx
cmpl $0x1, %ecx
je 0x381495
cmpl $0x4, %ecx
je 0x381457
cmpl $0x8, %ecx
jne 0x3814cb
vshufps $0x0, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vaddps %ymm3, %ymm13, %ymm3
vrsqrtps %ymm3, %ymm4
vmulps %ymm4, %ymm3, %ymm3
vbroadcastss 0x7438e(%rip), %ymm6 # 0x3f57c4
vfmadd213ps %ymm6, %ymm4, %ymm3 # ymm3 = (ymm4 * ymm3) + ymm6
vbroadcastss 0x704e0(%rip), %ymm6 # 0x3f1924
vmulps %ymm6, %ymm4, %ymm4
vmulps %ymm3, %ymm4, %ymm13
vxorps %ymm10, %ymm12, %ymm3
vmulps %ymm3, %ymm13, %ymm12
jmp 0x3814cb
vshufps $0x0, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[0,0,0,0]
vaddps %xmm3, %xmm13, %xmm3
vrsqrtps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vbroadcastss 0x74352(%rip), %xmm6 # 0x3f57c4
vfmadd213ps %xmm6, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + xmm6
vmulps %xmm4, %xmm9, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vxorps %xmm5, %xmm12, %xmm4
vmulps %xmm4, %xmm3, %xmm4
vblendps $0xf, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0,1,2,3],ymm13[4,5,6,7]
vblendps $0xf, %ymm4, %ymm12, %ymm12 # ymm12 = ymm4[0,1,2,3],ymm12[4,5,6,7]
jmp 0x3814cb
vaddss %xmm14, %xmm13, %xmm3
vrsqrtss %xmm3, %xmm3, %xmm4
vmulss %xmm4, %xmm3, %xmm3
vfmadd213ss 0x74319(%rip), %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + mem
vmulss 0x70471(%rip), %xmm4, %xmm4 # 0x3f1924
vmulss %xmm3, %xmm4, %xmm3
vblendps $0x1, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0],ymm13[1,2,3,4,5,6,7]
vxorps %xmm5, %xmm15, %xmm4
vmulss %xmm4, %xmm3, %xmm3
vblendps $0x1, %ymm3, %ymm12, %ymm12 # ymm12 = ymm3[0],ymm12[1,2,3,4,5,6,7]
testl %r12d, %r12d
je 0x381514
testb %al, %al
je 0x381531
movl %edx, %ecx
movq -0x30(%rsp), %rax
movq -0x28(%rsp), %r15
cmpl %r9d, %ecx
jg 0x38153b
vbroadcastss (%r15), %ymm3
vbroadcastss (%rax), %ymm4
vmovups (%r14), %ymm6
vfmadd132ps %ymm13, %ymm12, %ymm6 # ymm6 = (ymm6 * ymm13) + ymm12
vfmadd213ps %ymm4, %ymm3, %ymm6 # ymm6 = (ymm3 * ymm6) + ymm4
vmovups %ymm6, (%r14)
addq $0x20, %r14
addq $0x4, %r15
addq $0x4, %rax
addl $0x8, %ecx
jmp 0x3814e0
vmovaps %xmm13, %xmm3
testb %r13b, %r13b
jne 0x3816a9
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vshufps $0x0, %xmm12, %xmm12, %xmm14 # xmm14 = xmm12[0,0,0,0]
jmp 0x3816b2
movq -0x30(%rsp), %rax
movq -0x28(%rsp), %r15
testb %r13b, %r13b
je 0x3815dd
vinsertf128 $0x1, %xmm13, %ymm13, %ymm3
vinsertf128 $0x1, %xmm12, %ymm12, %ymm4
xorl %ecx, %ecx
movq %r14, %r12
leal 0x8(%rcx), %edi
cmpl %r9d, %edi
jg 0x3815cd
vbroadcastss 0x4(%r15,%rcx), %xmm6
vbroadcastss (%r15,%rcx), %xmm7
vinsertf128 $0x1, %xmm6, %ymm7, %ymm6
vbroadcastss 0x4(%rax,%rcx), %xmm7
vbroadcastss (%rax,%rcx), %xmm8
vinsertf128 $0x1, %xmm7, %ymm8, %ymm7
vmovups (%r12), %ymm8
vfmadd132ps %ymm3, %ymm4, %ymm8 # ymm8 = (ymm8 * ymm3) + ymm4
vfmadd213ps %ymm7, %ymm6, %ymm8 # ymm8 = (ymm6 * ymm8) + ymm7
vmovups %ymm8, (%r12)
addq $0x20, %r12
addq $0x8, %rcx
jmp 0x381555
vbroadcastss (%r15,%rcx), %xmm3
vbroadcastss (%rax,%rcx), %xmm4
vmovups (%r14,%rcx,4), %xmm6
vfmadd132ps %xmm13, %xmm12, %xmm6 # xmm6 = (xmm6 * xmm13) + xmm12
vfmadd213ps %xmm4, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm4
vmovups %xmm6, (%r14,%rcx,4)
addq $0x10, %r12
addq $0x4, %rcx
leal 0x4(%rcx), %edi
cmpl %r9d, %edi
jle 0x3815a3
addq %rcx, %rax
addq %rcx, %r15
jmp 0x3815e0
movq %r14, %r12
testb %bpl, %bpl
je 0x381727
vshufps $0x0, %xmm13, %xmm13, %xmm3 # xmm3 = xmm13[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm4
vshufps $0x0, %xmm12, %xmm12, %xmm6 # xmm6 = xmm12[0,0,0,0]
vinsertf128 $0x1, %xmm6, %ymm6, %ymm6
xorl %r14d, %r14d
xorl %ecx, %ecx
movq %rax, %r10
xorl %edi, %edi
movq %r15, %r13
movq %r12, %rbp
leal 0x8(%rdi), %r11d
cmpl %r9d, %r11d
jg 0x38164d
vmovups (%r13), %ymm7
vmovups (%rbp), %ymm8
vfmadd132ps %ymm4, %ymm6, %ymm8 # ymm8 = (ymm8 * ymm4) + ymm6
vfmadd213ps (%r10), %ymm7, %ymm8 # ymm8 = (ymm7 * ymm8) + mem
vmovups %ymm8, (%rbp)
addq $0x20, %rbp
addq $0x20, %r13
addq $0x20, %r10
addq $0x20, %rcx
addq $0x8, %r14
movl %r11d, %edi
jmp 0x381611
vshufps $0x0, %xmm12, %xmm12, %xmm4 # xmm4 = xmm12[0,0,0,0]
addl $0x4, %edi
cmpl %r9d, %edi
jg 0x3816a2
vmovups (%r15,%rcx), %xmm6
vmovups (%r12,%rcx), %xmm7
vfmadd132ps %xmm3, %xmm4, %xmm7 # xmm7 = (xmm7 * xmm3) + xmm4
vfmadd213ps (%rax,%rcx), %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + mem
vmovups %xmm7, (%r12,%rcx)
addq $0x10, %rcx
addq $0x4, %r14
jmp 0x381653
vmovss (%r12,%r14,4), %xmm3
vfmadd132ss %xmm13, %xmm12, %xmm3 # xmm3 = (xmm3 * xmm13) + xmm12
vmovss (%r15,%r14,4), %xmm4
vfmadd213ss (%rax,%r14,4), %xmm3, %xmm4 # xmm4 = (xmm3 * xmm4) + mem
vmovss %xmm4, (%r12,%r14,4)
incq %r14
cmpl %r9d, %r14d
jl 0x381682
jmp 0x381727
vmovaps %xmm3, %xmm4
vmovaps %xmm12, %xmm14
vmovaps %ymm12, %ymm6
testb %al, %al
jne 0x3816c6
vinsertf128 $0x1, %xmm4, %ymm4, %ymm13
vinsertf128 $0x1, %xmm14, %ymm14, %ymm6
xorl %eax, %eax
xorl %ecx, %ecx
leal 0x8(%rcx), %edi
cmpl %r9d, %edi
jg 0x381704
vmovups (%r14), %ymm7
vfmadd132ps %ymm13, %ymm6, %ymm7 # ymm7 = (ymm7 * ymm13) + ymm6
vmovups %ymm7, (%r14)
addq $0x20, %r14
addq $0x8, %rax
movl %edi, %ecx
jmp 0x3816ca
vmovups (%r14), %xmm6
vfmadd132ps %xmm4, %xmm14, %xmm6 # xmm6 = (xmm6 * xmm4) + xmm14
vmovups %xmm6, (%r14)
addq $0x10, %r14
addq $0x4, %rax
addl $0x4, %ecx
cmpl %r9d, %ecx
jle 0x3816ed
jmp 0x381722
vmovss (%r15,%rax,4), %xmm4
vfmadd132ss %xmm3, %xmm12, %xmm4 # xmm4 = (xmm4 * xmm3) + xmm12
vmovss %xmm4, (%r15,%rax,4)
incq %rax
cmpl %r9d, %eax
jl 0x38170e
incq %rbx
jmp 0x3811e1
movl 0x38(%rsi), %eax
xorl %edi, %edi
testl %eax, %eax
cmovlel %edi, %eax
movq %rax, -0x10(%rsp)
movq -0x20(%rsp), %rax
cmpl %r9d, 0xd0(%rax,%rcx)
jne 0x381f56
vcvtsi2ss %r9d, %xmm0, %xmm0
imull -0x34(%rsp), %r9d
vmovss 0x6d524(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
testl %r8d, %r8d
cmovlel %edi, %r8d
vbroadcastss 0x6fa31(%rip), %xmm5 # 0x3f11b4
vbroadcastss 0x70198(%rip), %xmm9 # 0x3f1924
vbroadcastss 0x6fa1f(%rip), %ymm10 # 0x3f11b4
vxorps %xmm11, %xmm11, %xmm11
movq %rsi, -0x8(%rsp)
cmpq -0x10(%rsp), %rdi
je 0x382552
movq -0x20(%rsp), %rax
movq (%rax), %rax
movq %rax, -0x18(%rsp)
xorl %r15d, %r15d
cmpq %r8, %r15
je 0x381d2b
movslq 0x2c(%rsi), %r11
movq (%rsi), %r13
movq 0x10(%rsi), %rcx
movq 0x40(%rsi), %rbp
movq %rdi, %rdx
imulq %rdi, %rbp
movq %rbp, %rax
imulq %rcx, %rax
addq %r13, %rax
imulq %r15, %r11
movq %r11, %r14
imulq %rcx, %r14
addq %rax, %r14
movq -0x18(%rsp), %rax
movq -0x18(%rax), %rax
movq -0x20(%rsp), %rdi
movl 0xd8(%rdi,%rax), %r10d
vmovss 0xd4(%rdi,%rax), %xmm14
xorl %eax, %eax
vxorps %xmm3, %xmm3, %xmm3
xorl %edi, %edi
movq %r14, %r12
movl -0x34(%rsp), %esi
leal 0x8(%rdi), %ebx
cmpl %r9d, %ebx
jg 0x381839
vaddps (%r12), %ymm3, %ymm3
addq $0x20, %r12
addq $0x8, %rax
movl %ebx, %edi
jmp 0x38181f
vxorps %xmm4, %xmm4, %xmm4
addl $0x4, %edi
cmpl %r9d, %edi
jg 0x381855
vaddps (%r12), %xmm4, %xmm4
addq $0x10, %r12
addq $0x4, %rax
jmp 0x38183d
addq %r11, %rbp
imulq %rbp, %rcx
addq %rcx, %r13
vxorps %xmm6, %xmm6, %xmm6
cmpl %r9d, %eax
jge 0x381874
vaddss (%r13,%rax,4), %xmm6, %xmm6
incq %rax
jmp 0x381863
cmpl $0x8, %esi
je 0x3818df
vextractf128 $0x1, %ymm3, %xmm7
cmpl $0x4, %esi
je 0x3818c2
cmpl $0x1, %esi
jne 0x3818fb
vshufpd $0x1, %xmm4, %xmm4, %xmm12 # xmm12 = xmm4[1,0]
vhaddps %xmm3, %xmm7, %xmm3
vaddps %xmm4, %xmm12, %xmm4
vhaddps %xmm3, %xmm3, %xmm3
vmovshdup %xmm4, %xmm7 # xmm7 = xmm4[1,1,3,3]
vhaddps %xmm3, %xmm3, %xmm3
vaddss %xmm7, %xmm3, %xmm3
vaddss %xmm4, %xmm3, %xmm3
vaddss %xmm6, %xmm3, %xmm3
vmulss %xmm0, %xmm3, %xmm15
vmovss %xmm15, %xmm11, %xmm12 # xmm12 = xmm15[0],xmm11[1,2,3]
vshufps $0x0, %xmm15, %xmm15, %xmm13 # xmm13 = xmm15[0,0,0,0]
movb $0x1, %bpl
jmp 0x38190c
vaddps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm7, %xmm3
vmulps %xmm1, %xmm3, %xmm13
movb $0x1, %r11b
xorl %ebp, %ebp
vmovaps %ymm13, %ymm12
vmovaps %xmm13, %xmm15
jmp 0x38190f
vmulps %ymm2, %ymm3, %ymm12
vshufps $0x0, %xmm12, %xmm12, %xmm13 # xmm13 = xmm12[0,0,0,0]
movb $0x1, %al
xorl %ebp, %ebp
vmovaps %xmm12, %xmm15
xorl %r11d, %r11d
vmovaps %ymm12, %ymm4
jmp 0x381917
vxorps %xmm15, %xmm15, %xmm15
vxorps %xmm12, %xmm12, %xmm12
vxorps %xmm13, %xmm13, %xmm13
xorl %ebp, %ebp
xorl %r11d, %r11d
vinsertf128 $0x1, %xmm13, %ymm13, %ymm4
xorl %eax, %eax
xorl %ecx, %ecx
vxorps %xmm3, %xmm3, %xmm3
xorl %edi, %edi
movq %r14, %r12
leal 0x8(%rdi), %ebx
cmpl %r9d, %ebx
jg 0x381945
vmovups (%r12), %ymm6
vsubps %ymm4, %ymm6, %ymm6
vfmadd231ps %ymm6, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm6) + ymm3
addq $0x20, %r12
addq $0x8, %rcx
movl %ebx, %edi
jmp 0x381922
vxorps %xmm4, %xmm4, %xmm4
addl $0x4, %edi
cmpl %r9d, %edi
jg 0x38196b
vmovups (%r12), %xmm6
vsubps %xmm13, %xmm6, %xmm6
vfmadd231ps %xmm6, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm6) + xmm4
addq $0x10, %r12
addq $0x4, %rcx
jmp 0x381949
vxorps %xmm6, %xmm6, %xmm6
cmpl %r9d, %ecx
jge 0x38198a
vmovss (%r13,%rcx,4), %xmm7
vsubss %xmm15, %xmm7, %xmm7
vfmadd231ss %xmm7, %xmm7, %xmm6 # xmm6 = (xmm7 * xmm7) + xmm6
incq %rcx
jmp 0x38196f
testb %al, %al
jne 0x381995
vxorps %xmm13, %xmm13, %xmm13
jmp 0x381999
vmulps %ymm2, %ymm3, %ymm13
vextractf128 $0x1, %ymm3, %xmm7
testb %r11b, %r11b
je 0x3819b6
vaddps %xmm4, %xmm3, %xmm4
vaddps %xmm4, %xmm7, %xmm4
vmulps %xmm1, %xmm4, %xmm8
vblendps $0xf, %ymm8, %ymm13, %ymm13 # ymm13 = ymm8[0,1,2,3],ymm13[4,5,6,7]
testb %bpl, %bpl
je 0x3819ea
vshufpd $0x1, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[1,0]
vaddps %xmm4, %xmm8, %xmm4
vhaddps %xmm3, %xmm7, %xmm3
vmovshdup %xmm4, %xmm7 # xmm7 = xmm4[1,1,3,3]
vhaddps %xmm3, %xmm3, %xmm3
vhaddps %xmm3, %xmm3, %xmm3
vaddss %xmm7, %xmm6, %xmm6
vaddss %xmm4, %xmm6, %xmm4
vaddss %xmm4, %xmm3, %xmm3
vmulss %xmm0, %xmm3, %xmm3
vblendps $0x1, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0],ymm13[1,2,3,4,5,6,7]
cmpl $0x1, %esi
je 0x381a81
cmpl $0x4, %esi
je 0x381a43
cmpl $0x8, %esi
jne 0x381ab7
vshufps $0x0, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vaddps %ymm3, %ymm13, %ymm3
vrsqrtps %ymm3, %ymm4
vmulps %ymm4, %ymm3, %ymm3
vbroadcastss 0x73da2(%rip), %ymm6 # 0x3f57c4
vfmadd213ps %ymm6, %ymm4, %ymm3 # ymm3 = (ymm4 * ymm3) + ymm6
vbroadcastss 0x6fef4(%rip), %ymm6 # 0x3f1924
vmulps %ymm6, %ymm4, %ymm4
vmulps %ymm3, %ymm4, %ymm13
vxorps %ymm10, %ymm12, %ymm3
vmulps %ymm3, %ymm13, %ymm12
jmp 0x381ab7
vshufps $0x0, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[0,0,0,0]
vaddps %xmm3, %xmm13, %xmm3
vrsqrtps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vbroadcastss 0x73d66(%rip), %xmm6 # 0x3f57c4
vfmadd213ps %xmm6, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + xmm6
vmulps %xmm4, %xmm9, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vxorps %xmm5, %xmm12, %xmm4
vmulps %xmm4, %xmm3, %xmm4
vblendps $0xf, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0,1,2,3],ymm13[4,5,6,7]
vblendps $0xf, %ymm4, %ymm12, %ymm12 # ymm12 = ymm4[0,1,2,3],ymm12[4,5,6,7]
jmp 0x381ab7
vaddss %xmm14, %xmm13, %xmm3
vrsqrtss %xmm3, %xmm3, %xmm4
vmulss %xmm4, %xmm3, %xmm3
vfmadd213ss 0x73d2d(%rip), %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + mem
vmulss 0x6fe85(%rip), %xmm4, %xmm4 # 0x3f1924
vmulss %xmm3, %xmm4, %xmm3
vblendps $0x1, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0],ymm13[1,2,3,4,5,6,7]
vxorps %xmm5, %xmm15, %xmm4
vmulss %xmm4, %xmm3, %xmm3
vblendps $0x1, %ymm3, %ymm12, %ymm12 # ymm12 = ymm3[0],ymm12[1,2,3,4,5,6,7]
testl %r10d, %r10d
je 0x381b03
testb %al, %al
je 0x381b20
movl $0x8, %ecx
movq -0x30(%rsp), %rax
movq -0x28(%rsp), %r10
cmpl %r9d, %ecx
jg 0x381b2a
vbroadcastss (%r10), %ymm3
vbroadcastss (%rax), %ymm4
vmovups (%r14), %ymm6
vfmadd132ps %ymm13, %ymm12, %ymm6 # ymm6 = (ymm6 * ymm13) + ymm12
vfmadd213ps %ymm4, %ymm3, %ymm6 # ymm6 = (ymm3 * ymm6) + ymm4
vmovups %ymm6, (%r14)
addq $0x20, %r14
addq $0x4, %r10
addq $0x4, %rax
addl $0x8, %ecx
jmp 0x381acf
vmovaps %xmm13, %xmm3
testb %r11b, %r11b
jne 0x381c9b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vshufps $0x0, %xmm12, %xmm12, %xmm14 # xmm14 = xmm12[0,0,0,0]
jmp 0x381ca4
movq -0x30(%rsp), %rax
movq -0x28(%rsp), %r10
testb %r11b, %r11b
je 0x381bca
vinsertf128 $0x1, %xmm13, %ymm13, %ymm3
vinsertf128 $0x1, %xmm12, %ymm12, %ymm4
xorl %ecx, %ecx
movq %r14, %r11
leal 0x8(%rcx), %edi
cmpl %r9d, %edi
jg 0x381bba
vbroadcastss 0x4(%r10,%rcx), %xmm6
vbroadcastss (%r10,%rcx), %xmm7
vinsertf128 $0x1, %xmm6, %ymm7, %ymm6
vbroadcastss 0x4(%rax,%rcx), %xmm7
vbroadcastss (%rax,%rcx), %xmm8
vinsertf128 $0x1, %xmm7, %ymm8, %ymm7
vmovups (%r11), %ymm8
vfmadd132ps %ymm3, %ymm4, %ymm8 # ymm8 = (ymm8 * ymm3) + ymm4
vfmadd213ps %ymm7, %ymm6, %ymm8 # ymm8 = (ymm6 * ymm8) + ymm7
vmovups %ymm8, (%r11)
addq $0x20, %r11
addq $0x8, %rcx
jmp 0x381b44
vbroadcastss (%r10,%rcx), %xmm3
vbroadcastss (%rax,%rcx), %xmm4
vmovups (%r14,%rcx,4), %xmm6
vfmadd132ps %xmm13, %xmm12, %xmm6 # xmm6 = (xmm6 * xmm13) + xmm12
vfmadd213ps %xmm4, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm4
vmovups %xmm6, (%r14,%rcx,4)
addq $0x10, %r11
addq $0x4, %rcx
leal 0x4(%rcx), %edi
cmpl %r9d, %edi
jle 0x381b90
addq %rcx, %rax
addq %rcx, %r10
jmp 0x381bcd
movq %r14, %r11
testb %bpl, %bpl
je 0x381d1b
vshufps $0x0, %xmm13, %xmm13, %xmm3 # xmm3 = xmm13[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm4
vshufps $0x0, %xmm12, %xmm12, %xmm6 # xmm6 = xmm12[0,0,0,0]
vinsertf128 $0x1, %xmm6, %ymm6, %ymm6
xorl %r12d, %r12d
xorl %ecx, %ecx
movq %rax, %rdi
xorl %r14d, %r14d
movq %r10, %r13
movq %r11, %rbp
leal 0x8(%r14), %ebx
cmpl %r9d, %ebx
jg 0x381c3b
vmovups (%r13), %ymm7
vmovups (%rbp), %ymm8
vfmadd132ps %ymm4, %ymm6, %ymm8 # ymm8 = (ymm8 * ymm4) + ymm6
vfmadd213ps (%rdi), %ymm7, %ymm8 # ymm8 = (ymm7 * ymm8) + mem
vmovups %ymm8, (%rbp)
addq $0x20, %rbp
addq $0x20, %r13
addq $0x20, %rdi
addq $0x20, %rcx
addq $0x8, %r12
movl %ebx, %r14d
jmp 0x381bff
vshufps $0x0, %xmm12, %xmm12, %xmm4 # xmm4 = xmm12[0,0,0,0]
addl $0x4, %r14d
cmpl %r9d, %r14d
jg 0x381c91
vmovups (%r10,%rcx), %xmm6
vmovups (%r11,%rcx), %xmm7
vfmadd132ps %xmm3, %xmm4, %xmm7 # xmm7 = (xmm7 * xmm3) + xmm4
vfmadd213ps (%rax,%rcx), %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + mem
vmovups %xmm7, (%r11,%rcx)
addq $0x10, %rcx
addq $0x4, %r12
jmp 0x381c41
vmovss (%r11,%r12,4), %xmm3
vfmadd132ss %xmm13, %xmm12, %xmm3 # xmm3 = (xmm3 * xmm13) + xmm12
vmovss (%r10,%r12,4), %xmm4
vfmadd213ss (%rax,%r12,4), %xmm3, %xmm4 # xmm4 = (xmm3 * xmm4) + mem
vmovss %xmm4, (%r11,%r12,4)
incq %r12
cmpl %r9d, %r12d
jl 0x381c71
jmp 0x381d1b
vmovaps %xmm3, %xmm4
vmovaps %xmm12, %xmm14
vmovaps %ymm12, %ymm6
testb %al, %al
jne 0x381cb8
vinsertf128 $0x1, %xmm4, %ymm4, %ymm13
vinsertf128 $0x1, %xmm14, %ymm14, %ymm6
xorl %eax, %eax
xorl %ecx, %ecx
leal 0x8(%rcx), %edi
cmpl %r9d, %edi
jg 0x381cf6
vmovups (%r14), %ymm7
vfmadd132ps %ymm13, %ymm6, %ymm7 # ymm7 = (ymm7 * ymm13) + ymm6
vmovups %ymm7, (%r14)
addq $0x20, %r14
addq $0x8, %rax
movl %edi, %ecx
jmp 0x381cbc
vmovups (%r14), %xmm6
vfmadd132ps %xmm4, %xmm14, %xmm6 # xmm6 = (xmm6 * xmm4) + xmm14
vmovups %xmm6, (%r14)
addq $0x10, %r14
addq $0x4, %rax
addl $0x4, %ecx
cmpl %r9d, %ecx
jle 0x381cdf
jmp 0x381d16
vmovss (%r13,%rax,4), %xmm4
vfmadd132ss %xmm3, %xmm12, %xmm4 # xmm4 = (xmm4 * xmm3) + xmm12
vmovss %xmm4, (%r13,%rax,4)
incq %rax
cmpl %r9d, %eax
jl 0x381d00
incq %r15
movq %rdx, %rdi
movq -0x8(%rsp), %rsi
jmp 0x3817ba
incq %rdi
jmp 0x38179f
imull -0x34(%rsp), %r9d
movq (%rsi), %rdx
movq -0x20(%rsp), %rsi
movl 0xd8(%rsi,%rcx), %eax
vmovss 0xd4(%rsi,%rcx), %xmm0
vxorps %xmm1, %xmm1, %xmm1
xorl %ecx, %ecx
leal 0x8(%rcx), %esi
cmpl %r9d, %esi
jg 0x381d6a
vaddps (%rdx,%rcx,4), %ymm1, %ymm1
addq $0x8, %rcx
jmp 0x381d57
vxorps %xmm2, %xmm2, %xmm2
leal 0x4(%rcx), %esi
cmpl %r9d, %esi
jg 0x381d81
vaddps (%rdx,%rcx,4), %xmm2, %xmm2
addq $0x4, %rcx
jmp 0x381d6e
vxorps %xmm3, %xmm3, %xmm3
cmpl %r9d, %ecx
jge 0x381d94
vaddss (%rdx,%rcx,4), %xmm3, %xmm3
incq %rcx
jmp 0x381d85
vextractf128 $0x1, %ymm1, %xmm4
vhaddps %xmm1, %xmm4, %xmm1
vshufpd $0x1, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,0]
vhaddps %xmm1, %xmm1, %xmm1
vaddps %xmm2, %xmm4, %xmm2
vhaddps %xmm1, %xmm1, %xmm1
vmovshdup %xmm2, %xmm4 # xmm4 = xmm2[1,1,3,3]
vaddss %xmm4, %xmm1, %xmm1
vaddss %xmm2, %xmm1, %xmm1
vaddss %xmm3, %xmm1, %xmm1
vcvtsi2ss %r9d, %xmm5, %xmm2
vdivss %xmm2, %xmm1, %xmm1
vshufps $0x0, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm4
vxorps %xmm3, %xmm3, %xmm3
xorl %ecx, %ecx
leal 0x8(%rcx), %esi
cmpl %r9d, %esi
jg 0x381df5
vmovups (%rdx,%rcx,4), %ymm5
vsubps %ymm4, %ymm5, %ymm5
vfmadd231ps %ymm5, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm5) + ymm3
addq $0x8, %rcx
jmp 0x381dd9
vshufps $0x0, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[0,0,0,0]
vxorps %xmm4, %xmm4, %xmm4
leal 0x4(%rcx), %esi
cmpl %r9d, %esi
jg 0x381e1a
vmovups (%rdx,%rcx,4), %xmm6
vsubps %xmm5, %xmm6, %xmm6
vfmadd231ps %xmm6, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm6) + xmm4
addq $0x4, %rcx
jmp 0x381dfe
vxorps %xmm5, %xmm5, %xmm5
cmpl %r9d, %ecx
jge 0x381e36
vmovss (%rdx,%rcx,4), %xmm6
vsubss %xmm1, %xmm6, %xmm6
vfmadd231ss %xmm6, %xmm6, %xmm5 # xmm5 = (xmm6 * xmm6) + xmm5
incq %rcx
jmp 0x381e1e
vextractf128 $0x1, %ymm3, %xmm6
vshufpd $0x1, %xmm4, %xmm4, %xmm7 # xmm7 = xmm4[1,0]
vaddps %xmm4, %xmm7, %xmm4
vhaddps %xmm3, %xmm6, %xmm3
vmovshdup %xmm4, %xmm6 # xmm6 = xmm4[1,1,3,3]
vhaddps %xmm3, %xmm3, %xmm3
vhaddps %xmm3, %xmm3, %xmm3
vaddss %xmm6, %xmm3, %xmm3
vaddss %xmm4, %xmm3, %xmm3
vaddss %xmm5, %xmm3, %xmm3
vdivss %xmm2, %xmm3, %xmm2
vaddss %xmm0, %xmm2, %xmm0
vrsqrtss %xmm0, %xmm0, %xmm2
vmulss %xmm2, %xmm0, %xmm0
vfmadd213ss 0x7394a(%rip), %xmm2, %xmm0 # xmm0 = (xmm2 * xmm0) + mem
vmulss 0x6faa2(%rip), %xmm2, %xmm2 # 0x3f1924
vmulss %xmm0, %xmm2, %xmm0
vbroadcastss 0x6f325(%rip), %xmm2 # 0x3f11b4
vxorps %xmm2, %xmm1, %xmm1
vmulss %xmm1, %xmm0, %xmm1
vshufps $0x0, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vshufps $0x0, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
testl %eax, %eax
je 0x3824f5
xorl %eax, %eax
leal 0x8(%rax), %ecx
cmpl %r9d, %ecx
jg 0x381ee9
movq -0x28(%rsp), %rcx
vmovups (%rcx,%rax,4), %ymm4
vmovups (%rdx,%rax,4), %ymm5
vfmadd132ps %ymm2, %ymm3, %ymm5 # ymm5 = (ymm5 * ymm2) + ymm3
movq -0x30(%rsp), %rcx
vfmadd213ps (%rcx,%rax,4), %ymm4, %ymm5 # ymm5 = (ymm4 * ymm5) + mem
vmovups %ymm5, (%rdx,%rax,4)
addq $0x8, %rax
jmp 0x381eb7
vshufps $0x0, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[0,0,0,0]
vshufps $0x0, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[0,0,0,0]
leal 0x4(%rax), %ecx
cmpl %r9d, %ecx
jg 0x381f4c
movq -0x28(%rsp), %rcx
vmovups (%rcx,%rax,4), %xmm4
vmovups (%rdx,%rax,4), %xmm5
vfmadd132ps %xmm2, %xmm3, %xmm5 # xmm5 = (xmm5 * xmm2) + xmm3
movq -0x30(%rsp), %rcx
vfmadd213ps (%rcx,%rax,4), %xmm4, %xmm5 # xmm5 = (xmm4 * xmm5) + mem
vmovups %xmm5, (%rdx,%rax,4)
addq $0x4, %rax
jmp 0x381ef3
vmovss (%rdx,%rax,4), %xmm2
vfmadd132ss %xmm0, %xmm1, %xmm2 # xmm2 = (xmm2 * xmm0) + xmm1
movq -0x28(%rsp), %rcx
vmovss (%rcx,%rax,4), %xmm3
movq -0x30(%rsp), %rcx
vfmadd213ss (%rcx,%rax,4), %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + mem
vmovss %xmm3, (%rdx,%rax,4)
incq %rax
cmpl %r9d, %eax
jl 0x381f25
jmp 0x382552
imull %r9d, %r8d
vcvtsi2ss %r8d, %xmm0, %xmm0
imull -0x34(%rsp), %r8d
vmovss 0x6cd1b(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %r9d, %r9d
vbroadcastss 0x6f22c(%rip), %xmm5 # 0x3f11b4
movl $0x8, %edx
vbroadcastss 0x6f98e(%rip), %xmm9 # 0x3f1924
vbroadcastss 0x6f215(%rip), %ymm10 # 0x3f11b4
vxorps %xmm11, %xmm11, %xmm11
cmpq -0x10(%rsp), %r9
je 0x382552
movq (%rsi), %r15
movq 0x10(%rsi), %r12
movq 0x40(%rsi), %rcx
movq %rcx, %r14
imulq %r9, %r14
imulq %r12, %r14
addq %r15, %r14
movq -0x18(%rsp), %rax
movq -0x18(%rax), %rax
movq -0x20(%rsp), %rdi
movl 0xd8(%rdi,%rax), %r10d
vmovss 0xd4(%rdi,%rax), %xmm14
xorl %eax, %eax
vxorps %xmm3, %xmm3, %xmm3
xorl %r13d, %r13d
movq %r14, %rdi
leal 0x8(%r13), %r11d
cmpl %r8d, %r11d
jg 0x38200d
vaddps (%rdi), %ymm3, %ymm3
addq $0x20, %rdi
addq $0x8, %rax
movl %r11d, %r13d
jmp 0x381ff3
vxorps %xmm4, %xmm4, %xmm4
addl $0x4, %r13d
cmpl %r8d, %r13d
jg 0x382028
vaddps (%rdi), %xmm4, %xmm4
addq $0x10, %rdi
addq $0x4, %rax
jmp 0x382011
imulq %r12, %rcx
imulq %r9, %rcx
addq %rcx, %r15
vxorps %xmm6, %xmm6, %xmm6
cmpl %r8d, %eax
jge 0x382047
vaddss (%r15,%rax,4), %xmm6, %xmm6
incq %rax
jmp 0x382037
movl -0x34(%rsp), %eax
cmpl $0x8, %eax
je 0x3820b6
vextractf128 $0x1, %ymm3, %xmm7
cmpl $0x4, %eax
je 0x382099
cmpl $0x1, %eax
jne 0x3820d3
vshufpd $0x1, %xmm4, %xmm4, %xmm12 # xmm12 = xmm4[1,0]
vhaddps %xmm3, %xmm7, %xmm3
vaddps %xmm4, %xmm12, %xmm4
vhaddps %xmm3, %xmm3, %xmm3
vmovshdup %xmm4, %xmm7 # xmm7 = xmm4[1,1,3,3]
vhaddps %xmm3, %xmm3, %xmm3
vaddss %xmm7, %xmm3, %xmm3
vaddss %xmm4, %xmm3, %xmm3
vaddss %xmm6, %xmm3, %xmm3
vmulss %xmm0, %xmm3, %xmm15
vmovss %xmm15, %xmm11, %xmm12 # xmm12 = xmm15[0],xmm11[1,2,3]
vshufps $0x0, %xmm15, %xmm15, %xmm13 # xmm13 = xmm15[0,0,0,0]
movb $0x1, %bpl
jmp 0x3820e4
vaddps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm7, %xmm3
vmulps %xmm1, %xmm3, %xmm13
movb $0x1, %r12b
xorl %ebp, %ebp
vmovaps %ymm13, %ymm12
vmovaps %xmm13, %xmm15
jmp 0x3820e7
vmulps %ymm2, %ymm3, %ymm12
vshufps $0x0, %xmm12, %xmm12, %xmm13 # xmm13 = xmm12[0,0,0,0]
movb $0x1, %r13b
xorl %ebp, %ebp
vmovaps %xmm12, %xmm15
xorl %r12d, %r12d
vmovaps %ymm12, %ymm4
jmp 0x3820f0
vxorps %xmm15, %xmm15, %xmm15
vxorps %xmm12, %xmm12, %xmm12
vxorps %xmm13, %xmm13, %xmm13
xorl %ebp, %ebp
xorl %r12d, %r12d
vinsertf128 $0x1, %xmm13, %ymm13, %ymm4
xorl %r13d, %r13d
xorl %eax, %eax
vxorps %xmm3, %xmm3, %xmm3
xorl %edi, %edi
movq %r14, %rcx
leal 0x8(%rdi), %r11d
cmpl %r8d, %r11d
jg 0x38211e
vmovups (%rcx), %ymm6
vsubps %ymm4, %ymm6, %ymm6
vfmadd231ps %ymm6, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm6) + ymm3
addq $0x20, %rcx
addq $0x8, %rax
movl %r11d, %edi
jmp 0x3820fb
vxorps %xmm4, %xmm4, %xmm4
addl $0x4, %edi
cmpl %r8d, %edi
jg 0x382142
vmovups (%rcx), %xmm6
vsubps %xmm13, %xmm6, %xmm6
vfmadd231ps %xmm6, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm6) + xmm4
addq $0x10, %rcx
addq $0x4, %rax
jmp 0x382122
vxorps %xmm6, %xmm6, %xmm6
cmpl %r8d, %eax
jge 0x382160
vmovss (%r15,%rax,4), %xmm7
vsubss %xmm15, %xmm7, %xmm7
vfmadd231ss %xmm7, %xmm7, %xmm6 # xmm6 = (xmm7 * xmm7) + xmm6
incq %rax
jmp 0x382146
testb %r13b, %r13b
jne 0x38216c
vxorps %xmm13, %xmm13, %xmm13
jmp 0x382170
vmulps %ymm2, %ymm3, %ymm13
vextractf128 $0x1, %ymm3, %xmm7
testb %r12b, %r12b
je 0x38218d
vaddps %xmm4, %xmm3, %xmm4
vaddps %xmm4, %xmm7, %xmm4
vmulps %xmm1, %xmm4, %xmm8
vblendps $0xf, %ymm8, %ymm13, %ymm13 # ymm13 = ymm8[0,1,2,3],ymm13[4,5,6,7]
testb %bpl, %bpl
je 0x3821c1
vshufpd $0x1, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[1,0]
vaddps %xmm4, %xmm8, %xmm4
vhaddps %xmm3, %xmm7, %xmm3
vmovshdup %xmm4, %xmm7 # xmm7 = xmm4[1,1,3,3]
vhaddps %xmm3, %xmm3, %xmm3
vhaddps %xmm3, %xmm3, %xmm3
vaddss %xmm7, %xmm6, %xmm6
vaddss %xmm4, %xmm6, %xmm4
vaddss %xmm4, %xmm3, %xmm3
vmulss %xmm0, %xmm3, %xmm3
vblendps $0x1, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0],ymm13[1,2,3,4,5,6,7]
movl -0x34(%rsp), %eax
cmpl $0x1, %eax
je 0x38225c
cmpl $0x4, %eax
je 0x38221e
cmpl $0x8, %eax
jne 0x382292
vshufps $0x0, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vaddps %ymm3, %ymm13, %ymm3
vrsqrtps %ymm3, %ymm4
vmulps %ymm4, %ymm3, %ymm3
vbroadcastss 0x735c7(%rip), %ymm6 # 0x3f57c4
vfmadd213ps %ymm6, %ymm4, %ymm3 # ymm3 = (ymm4 * ymm3) + ymm6
vbroadcastss 0x6f719(%rip), %ymm6 # 0x3f1924
vmulps %ymm6, %ymm4, %ymm4
vmulps %ymm3, %ymm4, %ymm13
vxorps %ymm10, %ymm12, %ymm3
vmulps %ymm3, %ymm13, %ymm12
jmp 0x382292
vshufps $0x0, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[0,0,0,0]
vaddps %xmm3, %xmm13, %xmm3
vrsqrtps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vbroadcastss 0x7358b(%rip), %xmm6 # 0x3f57c4
vfmadd213ps %xmm6, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + xmm6
vmulps %xmm4, %xmm9, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vxorps %xmm5, %xmm12, %xmm4
vmulps %xmm4, %xmm3, %xmm4
vblendps $0xf, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0,1,2,3],ymm13[4,5,6,7]
vblendps $0xf, %ymm4, %ymm12, %ymm12 # ymm12 = ymm4[0,1,2,3],ymm12[4,5,6,7]
jmp 0x382292
vaddss %xmm14, %xmm13, %xmm3
vrsqrtss %xmm3, %xmm3, %xmm4
vmulss %xmm4, %xmm3, %xmm3
vfmadd213ss 0x73552(%rip), %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + mem
vmulss 0x6f6aa(%rip), %xmm4, %xmm4 # 0x3f1924
vmulss %xmm3, %xmm4, %xmm3
vblendps $0x1, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0],ymm13[1,2,3,4,5,6,7]
vxorps %xmm5, %xmm15, %xmm4
vmulss %xmm4, %xmm3, %xmm3
vblendps $0x1, %ymm3, %ymm12, %ymm12 # ymm12 = ymm3[0],ymm12[1,2,3,4,5,6,7]
testl %r10d, %r10d
je 0x3822dc
testb %r13b, %r13b
je 0x3822f9
movl %edx, %ecx
movq -0x30(%rsp), %rax
movq -0x28(%rsp), %r10
cmpl %r8d, %ecx
jg 0x382303
vbroadcastss (%r10), %ymm3
vbroadcastss (%rax), %ymm4
vmovups (%r14), %ymm6
vfmadd132ps %ymm13, %ymm12, %ymm6 # ymm6 = (ymm6 * ymm13) + ymm12
vfmadd213ps %ymm4, %ymm3, %ymm6 # ymm6 = (ymm3 * ymm6) + ymm4
vmovups %ymm6, (%r14)
addq $0x20, %r14
addq $0x4, %r10
addq $0x4, %rax
addl $0x8, %ecx
jmp 0x3822a8
vmovaps %xmm13, %xmm3
testb %r12b, %r12b
jne 0x38246e
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vshufps $0x0, %xmm12, %xmm12, %xmm14 # xmm14 = xmm12[0,0,0,0]
jmp 0x382477
movq -0x30(%rsp), %rax
movq -0x28(%rsp), %r10
testb %r12b, %r12b
je 0x3823a3
vinsertf128 $0x1, %xmm13, %ymm13, %ymm3
vinsertf128 $0x1, %xmm12, %ymm12, %ymm4
xorl %ecx, %ecx
movq %r14, %r15
leal 0x8(%rcx), %edi
cmpl %r8d, %edi
jg 0x382393
vbroadcastss 0x4(%r10,%rcx), %xmm6
vbroadcastss (%r10,%rcx), %xmm7
vinsertf128 $0x1, %xmm6, %ymm7, %ymm6
vbroadcastss 0x4(%rax,%rcx), %xmm7
vbroadcastss (%rax,%rcx), %xmm8
vinsertf128 $0x1, %xmm7, %ymm8, %ymm7
vmovups (%r15), %ymm8
vfmadd132ps %ymm3, %ymm4, %ymm8 # ymm8 = (ymm8 * ymm3) + ymm4
vfmadd213ps %ymm7, %ymm6, %ymm8 # ymm8 = (ymm6 * ymm8) + ymm7
vmovups %ymm8, (%r15)
addq $0x20, %r15
addq $0x8, %rcx
jmp 0x38231d
vbroadcastss (%r10,%rcx), %xmm3
vbroadcastss (%rax,%rcx), %xmm4
vmovups (%r14,%rcx,4), %xmm6
vfmadd132ps %xmm13, %xmm12, %xmm6 # xmm6 = (xmm6 * xmm13) + xmm12
vfmadd213ps %xmm4, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm4
vmovups %xmm6, (%r14,%rcx,4)
addq $0x10, %r15
addq $0x4, %rcx
leal 0x4(%rcx), %edi
cmpl %r8d, %edi
jle 0x382369
addq %rcx, %rax
addq %rcx, %r10
jmp 0x3823a6
movq %r14, %r15
testb %bpl, %bpl
je 0x3824ed
vshufps $0x0, %xmm13, %xmm13, %xmm3 # xmm3 = xmm13[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm4
vshufps $0x0, %xmm12, %xmm12, %xmm6 # xmm6 = xmm12[0,0,0,0]
vinsertf128 $0x1, %xmm6, %ymm6, %ymm6
xorl %r14d, %r14d
xorl %ecx, %ecx
movq %rax, %r12
xorl %edi, %edi
movq %r10, %r13
movq %r15, %rbp
leal 0x8(%rdi), %ebx
cmpl %r8d, %ebx
jg 0x382412
vmovups (%r13), %ymm7
vmovups (%rbp), %ymm8
vfmadd132ps %ymm4, %ymm6, %ymm8 # ymm8 = (ymm8 * ymm4) + ymm6
vfmadd213ps (%r12), %ymm7, %ymm8 # ymm8 = (ymm7 * ymm8) + mem
vmovups %ymm8, (%rbp)
addq $0x20, %rbp
addq $0x20, %r13
addq $0x20, %r12
addq $0x20, %rcx
addq $0x8, %r14
movl %ebx, %edi
jmp 0x3823d7
vshufps $0x0, %xmm12, %xmm12, %xmm4 # xmm4 = xmm12[0,0,0,0]
addl $0x4, %edi
cmpl %r8d, %edi
jg 0x382467
vmovups (%r10,%rcx), %xmm6
vmovups (%r15,%rcx), %xmm7
vfmadd132ps %xmm3, %xmm4, %xmm7 # xmm7 = (xmm7 * xmm3) + xmm4
vfmadd213ps (%rax,%rcx), %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + mem
vmovups %xmm7, (%r15,%rcx)
addq $0x10, %rcx
addq $0x4, %r14
jmp 0x382418
vmovss (%r15,%r14,4), %xmm3
vfmadd132ss %xmm13, %xmm12, %xmm3 # xmm3 = (xmm3 * xmm13) + xmm12
vmovss (%r10,%r14,4), %xmm4
vfmadd213ss (%rax,%r14,4), %xmm3, %xmm4 # xmm4 = (xmm3 * xmm4) + mem
vmovss %xmm4, (%r15,%r14,4)
incq %r14
cmpl %r8d, %r14d
jl 0x382447
jmp 0x3824ed
vmovaps %xmm3, %xmm4
vmovaps %xmm12, %xmm14
vmovaps %ymm12, %ymm6
testb %r13b, %r13b
jne 0x38248c
vinsertf128 $0x1, %xmm4, %ymm4, %ymm13
vinsertf128 $0x1, %xmm14, %ymm14, %ymm6
xorl %eax, %eax
xorl %ecx, %ecx
leal 0x8(%rcx), %edi
cmpl %r8d, %edi
jg 0x3824ca
vmovups (%r14), %ymm7
vfmadd132ps %ymm13, %ymm6, %ymm7 # ymm7 = (ymm7 * ymm13) + ymm6
vmovups %ymm7, (%r14)
addq $0x20, %r14
addq $0x8, %rax
movl %edi, %ecx
jmp 0x382490
vmovups (%r14), %xmm6
vfmadd132ps %xmm4, %xmm14, %xmm6 # xmm6 = (xmm6 * xmm4) + xmm14
vmovups %xmm6, (%r14)
addq $0x10, %r14
addq $0x4, %rax
addl $0x4, %ecx
cmpl %r8d, %ecx
jle 0x3824b3
jmp 0x3824e8
vmovss (%r15,%rax,4), %xmm4
vfmadd132ss %xmm3, %xmm12, %xmm4 # xmm4 = (xmm4 * xmm3) + xmm12
vmovss %xmm4, (%r15,%rax,4)
incq %rax
cmpl %r8d, %eax
jl 0x3824d4
incq %r9
jmp 0x381fa4
xorl %eax, %eax
leal 0x8(%rax), %ecx
cmpl %r9d, %ecx
jg 0x382514
vmovups (%rdx,%rax,4), %ymm4
vfmadd132ps %ymm2, %ymm3, %ymm4 # ymm4 = (ymm4 * ymm2) + ymm3
vmovups %ymm4, (%rdx,%rax,4)
addq $0x8, %rax
jmp 0x3824f7
vshufps $0x0, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[0,0,0,0]
vshufps $0x0, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[0,0,0,0]
leal 0x4(%rax), %ecx
cmpl %r9d, %ecx
jg 0x38254d
vmovups (%rdx,%rax,4), %xmm4
vfmadd132ps %xmm2, %xmm3, %xmm4 # xmm4 = (xmm4 * xmm2) + xmm3
vmovups %xmm4, (%rdx,%rax,4)
addq $0x4, %rax
jmp 0x38251e
vmovss (%rdx,%rax,4), %xmm2
vfmadd132ss %xmm0, %xmm1, %xmm2 # xmm2 = (xmm2 * xmm0) + xmm1
vmovss %xmm2, (%rdx,%rax,4)
incq %rax
cmpl %r9d, %eax
jl 0x38253b
xorl %eax, %eax
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/layernorm_x86_fma.cpp |
virtual thunk to ncnn::LayerNorm_x86_fma::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int LayerNorm_x86_fma::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int dims = bottom_top_blob.dims;
int elempack = bottom_top_blob.elempack;
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
const float* gamma = gamma_data;
const float* beta = beta_data;
if (dims == 1)
{
int elemcount = w * elempack;
float* ptr = bottom_top_blob;
// 1D layer norm is special. Treat them as unpacked.
fast_1d_layer_norm(ptr, 1, elemcount, elemcount, gamma, beta, affine, eps);
}
if (dims == 2)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; ++i)
{
float* ptr = bottom_top_blob.row(i);
fast_1d_layer_norm(ptr, elempack, w, w * elempack, gamma, beta, affine, eps);
}
}
if (dims == 3)
{
if (affine_size == w)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; ++q)
{
for (int i = 0; i < h; ++i)
{
float* ptr = bottom_top_blob.channel(q).row(i);
fast_1d_layer_norm(ptr, elempack, w, w * elempack, gamma, beta, affine, eps);
}
}
}
else // if (affine_size == w * h)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; ++q)
{
float* ptr = bottom_top_blob.channel(q);
fast_1d_layer_norm(ptr, elempack, w * h, w * h * elempack, gamma, beta, affine, eps);
}
}
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x58(%rax), %rdi
callq 0x38112c
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/layernorm_x86_fma.cpp |
ncnn::LayerNorm_x86_avx::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int LayerNorm_x86_avx::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int dims = bottom_top_blob.dims;
int elempack = bottom_top_blob.elempack;
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
const float* gamma = gamma_data;
const float* beta = beta_data;
if (dims == 1)
{
int elemcount = w * elempack;
float* ptr = bottom_top_blob;
// 1D layer norm is special. Treat them as unpacked.
fast_1d_layer_norm(ptr, 1, elemcount, elemcount, gamma, beta, affine, eps);
}
if (dims == 2)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; ++i)
{
float* ptr = bottom_top_blob.row(i);
fast_1d_layer_norm(ptr, elempack, w, w * elempack, gamma, beta, affine, eps);
}
}
if (dims == 3)
{
if (affine_size == w)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; ++q)
{
for (int i = 0; i < h; ++i)
{
float* ptr = bottom_top_blob.channel(q).row(i);
fast_1d_layer_norm(ptr, elempack, w, w * elempack, gamma, beta, affine, eps);
}
}
}
else // if (affine_size == w * h)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; ++q)
{
float* ptr = bottom_top_blob.channel(q);
fast_1d_layer_norm(ptr, elempack, w * h, w * h * elempack, gamma, beta, affine, eps);
}
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %rdi, %r8
movl 0x18(%rsi), %eax
movl %eax, -0x34(%rsp)
movl 0x28(%rsi), %eax
movl 0x2c(%rsi), %r9d
movq (%rdi), %rcx
movq %rcx, -0x18(%rsp)
movq -0x18(%rcx), %rcx
movq 0xe0(%rdi,%rcx), %rdi
movq %rdi, -0x28(%rsp)
movq %r8, -0x20(%rsp)
movq 0x128(%r8,%rcx), %rdi
movq %rdi, -0x30(%rsp)
cmpl $0x1, %eax
je 0x3831e1
movl 0x30(%rsi), %r8d
cmpl $0x3, %eax
je 0x382bd0
cmpl $0x2, %eax
jne 0x383a14
movl $0x8, %edx
vcvtsi2ss %r9d, %xmm0, %xmm0
imull -0x34(%rsp), %r9d
vmovss 0x6c64f(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %ebx, %ebx
testl %r8d, %r8d
cmovlel %ebx, %r8d
vbroadcastss 0x6eb5a(%rip), %xmm5 # 0x3f11b4
vbroadcastss 0x6f2c1(%rip), %xmm9 # 0x3f1924
vbroadcastss 0x6eb48(%rip), %ymm10 # 0x3f11b4
vxorps %xmm11, %xmm11, %xmm11
cmpq %r8, %rbx
je 0x383a14
movq (%rsi), %r15
movq 0x10(%rsi), %rcx
movslq 0x2c(%rsi), %r10
movq %rbx, %r14
imulq %r10, %r14
imulq %rcx, %r14
addq %r15, %r14
movq -0x18(%rsp), %rax
movq -0x18(%rax), %rax
movq -0x20(%rsp), %rdi
movl 0xd8(%rdi,%rax), %r12d
vmovss 0xd4(%rdi,%rax), %xmm14
xorl %eax, %eax
vxorps %xmm3, %xmm3, %xmm3
xorl %r13d, %r13d
movq %r14, %rdi
leal 0x8(%r13), %r11d
cmpl %r9d, %r11d
jg 0x3826d8
vaddps (%rdi), %ymm3, %ymm3
addq $0x20, %rdi
addq $0x8, %rax
movl %r11d, %r13d
jmp 0x3826be
vxorps %xmm4, %xmm4, %xmm4
addl $0x4, %r13d
cmpl %r9d, %r13d
jg 0x3826f3
vaddps (%rdi), %xmm4, %xmm4
addq $0x10, %rdi
addq $0x4, %rax
jmp 0x3826dc
imulq %rbx, %rcx
imulq %r10, %rcx
addq %rcx, %r15
vxorps %xmm6, %xmm6, %xmm6
cmpl %r9d, %eax
jge 0x382712
vaddss (%r15,%rax,4), %xmm6, %xmm6
incq %rax
jmp 0x382702
movl -0x34(%rsp), %eax
cmpl $0x8, %eax
je 0x382781
vextractf128 $0x1, %ymm3, %xmm7
cmpl $0x4, %eax
je 0x382764
cmpl $0x1, %eax
jne 0x38279d
vshufpd $0x1, %xmm4, %xmm4, %xmm12 # xmm12 = xmm4[1,0]
vhaddps %xmm3, %xmm7, %xmm3
vaddps %xmm4, %xmm12, %xmm4
vhaddps %xmm3, %xmm3, %xmm3
vmovshdup %xmm4, %xmm7 # xmm7 = xmm4[1,1,3,3]
vhaddps %xmm3, %xmm3, %xmm3
vaddss %xmm7, %xmm3, %xmm3
vaddss %xmm4, %xmm3, %xmm3
vaddss %xmm6, %xmm3, %xmm3
vmulss %xmm0, %xmm3, %xmm15
vmovss %xmm15, %xmm11, %xmm12 # xmm12 = xmm15[0],xmm11[1,2,3]
vshufps $0x0, %xmm15, %xmm15, %xmm13 # xmm13 = xmm15[0,0,0,0]
movb $0x1, %bpl
jmp 0x3827ae
vaddps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm7, %xmm3
vmulps %xmm1, %xmm3, %xmm13
movb $0x1, %r13b
xorl %ebp, %ebp
vmovaps %ymm13, %ymm12
vmovaps %xmm13, %xmm15
jmp 0x3827b1
vmulps %ymm2, %ymm3, %ymm12
vshufps $0x0, %xmm12, %xmm12, %xmm13 # xmm13 = xmm12[0,0,0,0]
movb $0x1, %al
xorl %ebp, %ebp
vmovaps %xmm12, %xmm15
xorl %r13d, %r13d
vmovaps %ymm12, %ymm4
jmp 0x3827b9
vxorps %xmm15, %xmm15, %xmm15
vxorps %xmm12, %xmm12, %xmm12
vxorps %xmm13, %xmm13, %xmm13
xorl %ebp, %ebp
xorl %r13d, %r13d
vinsertf128 $0x1, %xmm13, %ymm13, %ymm4
xorl %eax, %eax
xorl %r10d, %r10d
vxorps %xmm3, %xmm3, %xmm3
xorl %edi, %edi
movq %r14, %rcx
leal 0x8(%rdi), %r11d
cmpl %r9d, %r11d
jg 0x3827eb
vmovups (%rcx), %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm6, %ymm6
vaddps %ymm3, %ymm6, %ymm3
addq $0x20, %rcx
addq $0x8, %r10
movl %r11d, %edi
jmp 0x3827c5
vxorps %xmm4, %xmm4, %xmm4
addl $0x4, %edi
cmpl %r9d, %edi
jg 0x382812
vmovups (%rcx), %xmm6
vsubps %xmm13, %xmm6, %xmm6
vmulps %xmm6, %xmm6, %xmm6
vaddps %xmm4, %xmm6, %xmm4
addq $0x10, %rcx
addq $0x4, %r10
jmp 0x3827ef
vxorps %xmm6, %xmm6, %xmm6
cmpl %r9d, %r10d
jge 0x382833
vmovss (%r15,%r10,4), %xmm7
vsubss %xmm15, %xmm7, %xmm7
vmulss %xmm7, %xmm7, %xmm7
vaddss %xmm6, %xmm7, %xmm6
incq %r10
jmp 0x382816
testb %al, %al
jne 0x38283e
vxorps %xmm13, %xmm13, %xmm13
jmp 0x382842
vmulps %ymm2, %ymm3, %ymm13
vextractf128 $0x1, %ymm3, %xmm7
testb %r13b, %r13b
je 0x38285f
vaddps %xmm4, %xmm3, %xmm4
vaddps %xmm4, %xmm7, %xmm4
vmulps %xmm1, %xmm4, %xmm8
vblendps $0xf, %ymm8, %ymm13, %ymm13 # ymm13 = ymm8[0,1,2,3],ymm13[4,5,6,7]
testb %bpl, %bpl
je 0x382893
vshufpd $0x1, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[1,0]
vaddps %xmm4, %xmm8, %xmm4
vhaddps %xmm3, %xmm7, %xmm3
vmovshdup %xmm4, %xmm7 # xmm7 = xmm4[1,1,3,3]
vhaddps %xmm3, %xmm3, %xmm3
vhaddps %xmm3, %xmm3, %xmm3
vaddss %xmm7, %xmm6, %xmm6
vaddss %xmm4, %xmm6, %xmm4
vaddss %xmm4, %xmm3, %xmm3
vmulss %xmm0, %xmm3, %xmm3
vblendps $0x1, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0],ymm13[1,2,3,4,5,6,7]
movl -0x34(%rsp), %ecx
cmpl $0x1, %ecx
je 0x382934
cmpl $0x4, %ecx
je 0x3828f3
cmpl $0x8, %ecx
jne 0x38296d
vshufps $0x0, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vaddps %ymm3, %ymm13, %ymm3
vrsqrtps %ymm3, %ymm4
vmulps %ymm4, %ymm3, %ymm3
vmulps %ymm4, %ymm3, %ymm3
vbroadcastss 0x72ef1(%rip), %ymm6 # 0x3f57c4
vaddps %ymm6, %ymm3, %ymm3
vbroadcastss 0x6f044(%rip), %ymm6 # 0x3f1924
vmulps %ymm6, %ymm4, %ymm4
vmulps %ymm3, %ymm4, %ymm13
vxorps %ymm10, %ymm12, %ymm3
vmulps %ymm3, %ymm13, %ymm12
jmp 0x38296d
vshufps $0x0, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[0,0,0,0]
vaddps %xmm3, %xmm13, %xmm3
vrsqrtps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vmulps %xmm4, %xmm3, %xmm3
vbroadcastss 0x72eb2(%rip), %xmm6 # 0x3f57c4
vaddps %xmm6, %xmm3, %xmm3
vmulps %xmm4, %xmm9, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vxorps %xmm5, %xmm12, %xmm4
vmulps %xmm4, %xmm3, %xmm4
vblendps $0xf, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0,1,2,3],ymm13[4,5,6,7]
vblendps $0xf, %ymm4, %ymm12, %ymm12 # ymm12 = ymm4[0,1,2,3],ymm12[4,5,6,7]
jmp 0x38296d
vaddss %xmm14, %xmm13, %xmm3
vrsqrtss %xmm3, %xmm3, %xmm4
vmulss %xmm4, %xmm3, %xmm3
vmulss %xmm4, %xmm3, %xmm3
vaddss 0x72e77(%rip), %xmm3, %xmm3 # 0x3f57c4
vmulss 0x6efcf(%rip), %xmm4, %xmm4 # 0x3f1924
vmulss %xmm3, %xmm4, %xmm3
vblendps $0x1, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0],ymm13[1,2,3,4,5,6,7]
vxorps %xmm5, %xmm15, %xmm4
vmulss %xmm4, %xmm3, %xmm3
vblendps $0x1, %ymm3, %ymm12, %ymm12 # ymm12 = ymm3[0],ymm12[1,2,3,4,5,6,7]
testl %r12d, %r12d
je 0x3829b8
testb %al, %al
je 0x3829d5
movl %edx, %ecx
movq -0x30(%rsp), %rax
movq -0x28(%rsp), %r15
cmpl %r9d, %ecx
jg 0x3829df
vbroadcastss (%r15), %ymm3
vbroadcastss (%rax), %ymm4
vmulps (%r14), %ymm13, %ymm6
vaddps %ymm6, %ymm12, %ymm6
vmulps %ymm6, %ymm3, %ymm3
vaddps %ymm4, %ymm3, %ymm3
vmovups %ymm3, (%r14)
addq $0x20, %r14
addq $0x4, %r15
addq $0x4, %rax
addl $0x8, %ecx
jmp 0x382982
vmovaps %xmm13, %xmm3
testb %r13b, %r13b
jne 0x382b4d
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vshufps $0x0, %xmm12, %xmm12, %xmm14 # xmm14 = xmm12[0,0,0,0]
jmp 0x382b56
movq -0x30(%rsp), %rax
movq -0x28(%rsp), %r15
testb %r13b, %r13b
je 0x382a85
vinsertf128 $0x1, %xmm13, %ymm13, %ymm3
vinsertf128 $0x1, %xmm12, %ymm12, %ymm4
xorl %ecx, %ecx
movq %r14, %r12
leal 0x8(%rcx), %edi
cmpl %r9d, %edi
jg 0x382a75
vbroadcastss 0x4(%r15,%rcx), %xmm6
vbroadcastss (%r15,%rcx), %xmm7
vinsertf128 $0x1, %xmm6, %ymm7, %ymm6
vbroadcastss 0x4(%rax,%rcx), %xmm7
vbroadcastss (%rax,%rcx), %xmm8
vinsertf128 $0x1, %xmm7, %ymm8, %ymm7
vmulps (%r12), %ymm3, %ymm8
vaddps %ymm4, %ymm8, %ymm8
vmulps %ymm6, %ymm8, %ymm6
vaddps %ymm6, %ymm7, %ymm6
vmovups %ymm6, (%r12)
addq $0x20, %r12
addq $0x8, %rcx
jmp 0x3829f9
vbroadcastss (%r15,%rcx), %xmm3
vbroadcastss (%rax,%rcx), %xmm4
vmulps (%r14,%rcx,4), %xmm13, %xmm6
vaddps %xmm6, %xmm12, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vmovups %xmm3, (%r14,%rcx,4)
addq $0x10, %r12
addq $0x4, %rcx
leal 0x4(%rcx), %edi
cmpl %r9d, %edi
jle 0x382a49
addq %rcx, %rax
addq %rcx, %r15
jmp 0x382a88
movq %r14, %r12
testb %bpl, %bpl
je 0x382bc8
vshufps $0x0, %xmm13, %xmm13, %xmm3 # xmm3 = xmm13[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm4
vshufps $0x0, %xmm12, %xmm12, %xmm6 # xmm6 = xmm12[0,0,0,0]
vinsertf128 $0x1, %xmm6, %ymm6, %ymm6
xorl %r14d, %r14d
xorl %ecx, %ecx
movq %rax, %r10
xorl %edi, %edi
movq %r15, %r13
movq %r12, %rbp
leal 0x8(%rdi), %r11d
cmpl %r9d, %r11d
jg 0x382af4
vmulps (%rbp), %ymm4, %ymm7
vaddps %ymm6, %ymm7, %ymm7
vmulps (%r13), %ymm7, %ymm7
vaddps (%r10), %ymm7, %ymm7
vmovups %ymm7, (%rbp)
addq $0x20, %rbp
addq $0x20, %r13
addq $0x20, %r10
addq $0x20, %rcx
addq $0x8, %r14
movl %r11d, %edi
jmp 0x382ab9
vshufps $0x0, %xmm12, %xmm12, %xmm4 # xmm4 = xmm12[0,0,0,0]
addl $0x4, %edi
cmpl %r9d, %edi
jg 0x382b46
vmulps (%r12,%rcx), %xmm3, %xmm6
vaddps %xmm4, %xmm6, %xmm6
vmulps (%r15,%rcx), %xmm6, %xmm6
vaddps (%rax,%rcx), %xmm6, %xmm6
vmovups %xmm6, (%r12,%rcx)
addq $0x10, %rcx
addq $0x4, %r14
jmp 0x382afa
vmulss (%r12,%r14,4), %xmm13, %xmm3
vaddss %xmm3, %xmm12, %xmm3
vmulss (%r15,%r14,4), %xmm3, %xmm3
vaddss (%rax,%r14,4), %xmm3, %xmm3
vmovss %xmm3, (%r12,%r14,4)
incq %r14
cmpl %r9d, %r14d
jl 0x382b27
jmp 0x382bc8
vmovaps %xmm3, %xmm4
vmovaps %xmm12, %xmm14
vmovaps %ymm12, %ymm6
testb %al, %al
jne 0x382b6a
vinsertf128 $0x1, %xmm4, %ymm4, %ymm13
vinsertf128 $0x1, %xmm14, %ymm14, %ymm6
xorl %eax, %eax
xorl %ecx, %ecx
leal 0x8(%rcx), %edi
cmpl %r9d, %edi
jg 0x382ba6
vmulps (%r14), %ymm13, %ymm7
vaddps %ymm6, %ymm7, %ymm7
vmovups %ymm7, (%r14)
addq $0x20, %r14
addq $0x8, %rax
movl %edi, %ecx
jmp 0x382b6e
vmulps (%r14), %xmm4, %xmm6
vaddps %xmm6, %xmm14, %xmm6
vmovups %xmm6, (%r14)
addq $0x10, %r14
addq $0x4, %rax
addl $0x4, %ecx
cmpl %r9d, %ecx
jle 0x382b90
jmp 0x382bc3
vmulss (%r15,%rax,4), %xmm3, %xmm4
vaddss %xmm4, %xmm12, %xmm4
vmovss %xmm4, (%r15,%rax,4)
incq %rax
cmpl %r9d, %eax
jl 0x382bb0
incq %rbx
jmp 0x382671
movl 0x38(%rsi), %eax
xorl %edi, %edi
testl %eax, %eax
cmovlel %edi, %eax
movq %rax, -0x10(%rsp)
movq -0x20(%rsp), %rax
cmpl %r9d, 0xd0(%rax,%rcx)
jne 0x38340a
vcvtsi2ss %r9d, %xmm0, %xmm0
imull -0x34(%rsp), %r9d
vmovss 0x6c083(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
testl %r8d, %r8d
cmovlel %edi, %r8d
vbroadcastss 0x6e590(%rip), %xmm5 # 0x3f11b4
vbroadcastss 0x6ecf7(%rip), %xmm9 # 0x3f1924
vbroadcastss 0x6e57e(%rip), %ymm10 # 0x3f11b4
vxorps %xmm11, %xmm11, %xmm11
movq %rsi, -0x8(%rsp)
cmpq -0x10(%rsp), %rdi
je 0x383a14
movq -0x20(%rsp), %rax
movq (%rax), %rax
movq %rax, -0x18(%rsp)
xorl %r15d, %r15d
cmpq %r8, %r15
je 0x3831d9
movslq 0x2c(%rsi), %r11
movq (%rsi), %r13
movq 0x10(%rsi), %rcx
movq 0x40(%rsi), %rbp
movq %rdi, %rdx
imulq %rdi, %rbp
movq %rbp, %rax
imulq %rcx, %rax
addq %r13, %rax
imulq %r15, %r11
movq %r11, %r14
imulq %rcx, %r14
addq %rax, %r14
movq -0x18(%rsp), %rax
movq -0x18(%rax), %rax
movq -0x20(%rsp), %rdi
movl 0xd8(%rdi,%rax), %r10d
vmovss 0xd4(%rdi,%rax), %xmm14
xorl %eax, %eax
vxorps %xmm3, %xmm3, %xmm3
xorl %edi, %edi
movq %r14, %r12
movl -0x34(%rsp), %esi
leal 0x8(%rdi), %ebx
cmpl %r9d, %ebx
jg 0x382cda
vaddps (%r12), %ymm3, %ymm3
addq $0x20, %r12
addq $0x8, %rax
movl %ebx, %edi
jmp 0x382cc0
vxorps %xmm4, %xmm4, %xmm4
addl $0x4, %edi
cmpl %r9d, %edi
jg 0x382cf6
vaddps (%r12), %xmm4, %xmm4
addq $0x10, %r12
addq $0x4, %rax
jmp 0x382cde
addq %r11, %rbp
imulq %rbp, %rcx
addq %rcx, %r13
vxorps %xmm6, %xmm6, %xmm6
cmpl %r9d, %eax
jge 0x382d15
vaddss (%r13,%rax,4), %xmm6, %xmm6
incq %rax
jmp 0x382d04
cmpl $0x8, %esi
je 0x382d80
vextractf128 $0x1, %ymm3, %xmm7
cmpl $0x4, %esi
je 0x382d63
cmpl $0x1, %esi
jne 0x382d9c
vshufpd $0x1, %xmm4, %xmm4, %xmm12 # xmm12 = xmm4[1,0]
vhaddps %xmm3, %xmm7, %xmm3
vaddps %xmm4, %xmm12, %xmm4
vhaddps %xmm3, %xmm3, %xmm3
vmovshdup %xmm4, %xmm7 # xmm7 = xmm4[1,1,3,3]
vhaddps %xmm3, %xmm3, %xmm3
vaddss %xmm7, %xmm3, %xmm3
vaddss %xmm4, %xmm3, %xmm3
vaddss %xmm6, %xmm3, %xmm3
vmulss %xmm0, %xmm3, %xmm15
vmovss %xmm15, %xmm11, %xmm12 # xmm12 = xmm15[0],xmm11[1,2,3]
vshufps $0x0, %xmm15, %xmm15, %xmm13 # xmm13 = xmm15[0,0,0,0]
movb $0x1, %bpl
jmp 0x382dad
vaddps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm7, %xmm3
vmulps %xmm1, %xmm3, %xmm13
movb $0x1, %r11b
xorl %ebp, %ebp
vmovaps %ymm13, %ymm12
vmovaps %xmm13, %xmm15
jmp 0x382db0
vmulps %ymm2, %ymm3, %ymm12
vshufps $0x0, %xmm12, %xmm12, %xmm13 # xmm13 = xmm12[0,0,0,0]
movb $0x1, %al
xorl %ebp, %ebp
vmovaps %xmm12, %xmm15
xorl %r11d, %r11d
vmovaps %ymm12, %ymm4
jmp 0x382db8
vxorps %xmm15, %xmm15, %xmm15
vxorps %xmm12, %xmm12, %xmm12
vxorps %xmm13, %xmm13, %xmm13
xorl %ebp, %ebp
xorl %r11d, %r11d
vinsertf128 $0x1, %xmm13, %ymm13, %ymm4
xorl %eax, %eax
xorl %ecx, %ecx
vxorps %xmm3, %xmm3, %xmm3
xorl %edi, %edi
movq %r14, %r12
leal 0x8(%rdi), %ebx
cmpl %r9d, %ebx
jg 0x382de9
vmovups (%r12), %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm6, %ymm6
vaddps %ymm3, %ymm6, %ymm3
addq $0x20, %r12
addq $0x8, %rcx
movl %ebx, %edi
jmp 0x382dc3
vxorps %xmm4, %xmm4, %xmm4
addl $0x4, %edi
cmpl %r9d, %edi
jg 0x382e12
vmovups (%r12), %xmm6
vsubps %xmm13, %xmm6, %xmm6
vmulps %xmm6, %xmm6, %xmm6
vaddps %xmm4, %xmm6, %xmm4
addq $0x10, %r12
addq $0x4, %rcx
jmp 0x382ded
vxorps %xmm6, %xmm6, %xmm6
cmpl %r9d, %ecx
jge 0x382e34
vmovss (%r13,%rcx,4), %xmm7
vsubss %xmm15, %xmm7, %xmm7
vmulss %xmm7, %xmm7, %xmm7
vaddss %xmm6, %xmm7, %xmm6
incq %rcx
jmp 0x382e16
testb %al, %al
jne 0x382e3f
vxorps %xmm13, %xmm13, %xmm13
jmp 0x382e43
vmulps %ymm2, %ymm3, %ymm13
vextractf128 $0x1, %ymm3, %xmm7
testb %r11b, %r11b
je 0x382e60
vaddps %xmm4, %xmm3, %xmm4
vaddps %xmm4, %xmm7, %xmm4
vmulps %xmm1, %xmm4, %xmm8
vblendps $0xf, %ymm8, %ymm13, %ymm13 # ymm13 = ymm8[0,1,2,3],ymm13[4,5,6,7]
testb %bpl, %bpl
je 0x382e94
vshufpd $0x1, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[1,0]
vaddps %xmm4, %xmm8, %xmm4
vhaddps %xmm3, %xmm7, %xmm3
vmovshdup %xmm4, %xmm7 # xmm7 = xmm4[1,1,3,3]
vhaddps %xmm3, %xmm3, %xmm3
vhaddps %xmm3, %xmm3, %xmm3
vaddss %xmm7, %xmm6, %xmm6
vaddss %xmm4, %xmm6, %xmm4
vaddss %xmm4, %xmm3, %xmm3
vmulss %xmm0, %xmm3, %xmm3
vblendps $0x1, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0],ymm13[1,2,3,4,5,6,7]
cmpl $0x1, %esi
je 0x382f31
cmpl $0x4, %esi
je 0x382ef0
cmpl $0x8, %esi
jne 0x382f6a
vshufps $0x0, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vaddps %ymm3, %ymm13, %ymm3
vrsqrtps %ymm3, %ymm4
vmulps %ymm4, %ymm3, %ymm3
vmulps %ymm4, %ymm3, %ymm3
vbroadcastss 0x728f4(%rip), %ymm6 # 0x3f57c4
vaddps %ymm6, %ymm3, %ymm3
vbroadcastss 0x6ea47(%rip), %ymm6 # 0x3f1924
vmulps %ymm6, %ymm4, %ymm4
vmulps %ymm3, %ymm4, %ymm13
vxorps %ymm10, %ymm12, %ymm3
vmulps %ymm3, %ymm13, %ymm12
jmp 0x382f6a
vshufps $0x0, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[0,0,0,0]
vaddps %xmm3, %xmm13, %xmm3
vrsqrtps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vmulps %xmm4, %xmm3, %xmm3
vbroadcastss 0x728b5(%rip), %xmm6 # 0x3f57c4
vaddps %xmm6, %xmm3, %xmm3
vmulps %xmm4, %xmm9, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vxorps %xmm5, %xmm12, %xmm4
vmulps %xmm4, %xmm3, %xmm4
vblendps $0xf, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0,1,2,3],ymm13[4,5,6,7]
vblendps $0xf, %ymm4, %ymm12, %ymm12 # ymm12 = ymm4[0,1,2,3],ymm12[4,5,6,7]
jmp 0x382f6a
vaddss %xmm14, %xmm13, %xmm3
vrsqrtss %xmm3, %xmm3, %xmm4
vmulss %xmm4, %xmm3, %xmm3
vmulss %xmm4, %xmm3, %xmm3
vaddss 0x7287a(%rip), %xmm3, %xmm3 # 0x3f57c4
vmulss 0x6e9d2(%rip), %xmm4, %xmm4 # 0x3f1924
vmulss %xmm3, %xmm4, %xmm3
vblendps $0x1, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0],ymm13[1,2,3,4,5,6,7]
vxorps %xmm5, %xmm15, %xmm4
vmulss %xmm4, %xmm3, %xmm3
vblendps $0x1, %ymm3, %ymm12, %ymm12 # ymm12 = ymm3[0],ymm12[1,2,3,4,5,6,7]
testl %r10d, %r10d
je 0x382fb8
testb %al, %al
je 0x382fd5
movl $0x8, %ecx
movq -0x30(%rsp), %rax
movq -0x28(%rsp), %r10
cmpl %r9d, %ecx
jg 0x382fdf
vbroadcastss (%r10), %ymm3
vbroadcastss (%rax), %ymm4
vmulps (%r14), %ymm13, %ymm6
vaddps %ymm6, %ymm12, %ymm6
vmulps %ymm6, %ymm3, %ymm3
vaddps %ymm4, %ymm3, %ymm3
vmovups %ymm3, (%r14)
addq $0x20, %r14
addq $0x4, %r10
addq $0x4, %rax
addl $0x8, %ecx
jmp 0x382f82
vmovaps %xmm13, %xmm3
testb %r11b, %r11b
jne 0x38314c
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vshufps $0x0, %xmm12, %xmm12, %xmm14 # xmm14 = xmm12[0,0,0,0]
jmp 0x383155
movq -0x30(%rsp), %rax
movq -0x28(%rsp), %r10
testb %r11b, %r11b
je 0x383083
vinsertf128 $0x1, %xmm13, %ymm13, %ymm3
vinsertf128 $0x1, %xmm12, %ymm12, %ymm4
xorl %ecx, %ecx
movq %r14, %r11
leal 0x8(%rcx), %edi
cmpl %r9d, %edi
jg 0x383073
vbroadcastss 0x4(%r10,%rcx), %xmm6
vbroadcastss (%r10,%rcx), %xmm7
vinsertf128 $0x1, %xmm6, %ymm7, %ymm6
vbroadcastss 0x4(%rax,%rcx), %xmm7
vbroadcastss (%rax,%rcx), %xmm8
vinsertf128 $0x1, %xmm7, %ymm8, %ymm7
vmulps (%r11), %ymm3, %ymm8
vaddps %ymm4, %ymm8, %ymm8
vmulps %ymm6, %ymm8, %ymm6
vaddps %ymm6, %ymm7, %ymm6
vmovups %ymm6, (%r11)
addq $0x20, %r11
addq $0x8, %rcx
jmp 0x382ff9
vbroadcastss (%r10,%rcx), %xmm3
vbroadcastss (%rax,%rcx), %xmm4
vmulps (%r14,%rcx,4), %xmm13, %xmm6
vaddps %xmm6, %xmm12, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vmovups %xmm3, (%r14,%rcx,4)
addq $0x10, %r11
addq $0x4, %rcx
leal 0x4(%rcx), %edi
cmpl %r9d, %edi
jle 0x383047
addq %rcx, %rax
addq %rcx, %r10
jmp 0x383086
movq %r14, %r11
testb %bpl, %bpl
je 0x3831c9
vshufps $0x0, %xmm13, %xmm13, %xmm3 # xmm3 = xmm13[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm4
vshufps $0x0, %xmm12, %xmm12, %xmm6 # xmm6 = xmm12[0,0,0,0]
vinsertf128 $0x1, %xmm6, %ymm6, %ymm6
xorl %r12d, %r12d
xorl %ecx, %ecx
movq %rax, %rdi
xorl %r14d, %r14d
movq %r10, %r13
movq %r11, %rbp
leal 0x8(%r14), %ebx
cmpl %r9d, %ebx
jg 0x3830f2
vmulps (%rbp), %ymm4, %ymm7
vaddps %ymm6, %ymm7, %ymm7
vmulps (%r13), %ymm7, %ymm7
vaddps (%rdi), %ymm7, %ymm7
vmovups %ymm7, (%rbp)
addq $0x20, %rbp
addq $0x20, %r13
addq $0x20, %rdi
addq $0x20, %rcx
addq $0x8, %r12
movl %ebx, %r14d
jmp 0x3830b8
vshufps $0x0, %xmm12, %xmm12, %xmm4 # xmm4 = xmm12[0,0,0,0]
addl $0x4, %r14d
cmpl %r9d, %r14d
jg 0x383145
vmulps (%r11,%rcx), %xmm3, %xmm6
vaddps %xmm4, %xmm6, %xmm6
vmulps (%r10,%rcx), %xmm6, %xmm6
vaddps (%rax,%rcx), %xmm6, %xmm6
vmovups %xmm6, (%r11,%rcx)
addq $0x10, %rcx
addq $0x4, %r12
jmp 0x3830f8
vmulss (%r11,%r12,4), %xmm13, %xmm3
vaddss %xmm3, %xmm12, %xmm3
vmulss (%r10,%r12,4), %xmm3, %xmm3
vaddss (%rax,%r12,4), %xmm3, %xmm3
vmovss %xmm3, (%r11,%r12,4)
incq %r12
cmpl %r9d, %r12d
jl 0x383126
jmp 0x3831c9
vmovaps %xmm3, %xmm4
vmovaps %xmm12, %xmm14
vmovaps %ymm12, %ymm6
testb %al, %al
jne 0x383169
vinsertf128 $0x1, %xmm4, %ymm4, %ymm13
vinsertf128 $0x1, %xmm14, %ymm14, %ymm6
xorl %eax, %eax
xorl %ecx, %ecx
leal 0x8(%rcx), %edi
cmpl %r9d, %edi
jg 0x3831a5
vmulps (%r14), %ymm13, %ymm7
vaddps %ymm6, %ymm7, %ymm7
vmovups %ymm7, (%r14)
addq $0x20, %r14
addq $0x8, %rax
movl %edi, %ecx
jmp 0x38316d
vmulps (%r14), %xmm4, %xmm6
vaddps %xmm6, %xmm14, %xmm6
vmovups %xmm6, (%r14)
addq $0x10, %r14
addq $0x4, %rax
addl $0x4, %ecx
cmpl %r9d, %ecx
jle 0x38318f
jmp 0x3831c4
vmulss (%r13,%rax,4), %xmm3, %xmm4
vaddss %xmm4, %xmm12, %xmm4
vmovss %xmm4, (%r13,%rax,4)
incq %rax
cmpl %r9d, %eax
jl 0x3831af
incq %r15
movq %rdx, %rdi
movq -0x8(%rsp), %rsi
jmp 0x382c5b
incq %rdi
jmp 0x382c40
imull -0x34(%rsp), %r9d
movq (%rsi), %rdx
movq -0x20(%rsp), %rsi
movl 0xd8(%rsi,%rcx), %eax
vmovss 0xd4(%rsi,%rcx), %xmm0
vxorps %xmm1, %xmm1, %xmm1
xorl %ecx, %ecx
leal 0x8(%rcx), %esi
cmpl %r9d, %esi
jg 0x383218
vaddps (%rdx,%rcx,4), %ymm1, %ymm1
addq $0x8, %rcx
jmp 0x383205
vxorps %xmm2, %xmm2, %xmm2
leal 0x4(%rcx), %esi
cmpl %r9d, %esi
jg 0x38322f
vaddps (%rdx,%rcx,4), %xmm2, %xmm2
addq $0x4, %rcx
jmp 0x38321c
vxorps %xmm3, %xmm3, %xmm3
cmpl %r9d, %ecx
jge 0x383242
vaddss (%rdx,%rcx,4), %xmm3, %xmm3
incq %rcx
jmp 0x383233
vextractf128 $0x1, %ymm1, %xmm4
vhaddps %xmm1, %xmm4, %xmm1
vshufpd $0x1, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,0]
vhaddps %xmm1, %xmm1, %xmm1
vaddps %xmm2, %xmm4, %xmm2
vhaddps %xmm1, %xmm1, %xmm1
vmovshdup %xmm2, %xmm4 # xmm4 = xmm2[1,1,3,3]
vaddss %xmm4, %xmm1, %xmm1
vaddss %xmm2, %xmm1, %xmm1
vaddss %xmm3, %xmm1, %xmm1
vcvtsi2ss %r9d, %xmm5, %xmm2
vdivss %xmm2, %xmm1, %xmm1
vshufps $0x0, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm4
vxorps %xmm3, %xmm3, %xmm3
xorl %ecx, %ecx
leal 0x8(%rcx), %esi
cmpl %r9d, %esi
jg 0x3832a6
vmovups (%rdx,%rcx,4), %ymm5
vsubps %ymm4, %ymm5, %ymm5
vmulps %ymm5, %ymm5, %ymm5
vaddps %ymm3, %ymm5, %ymm3
addq $0x8, %rcx
jmp 0x383287
vshufps $0x0, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[0,0,0,0]
vxorps %xmm4, %xmm4, %xmm4
leal 0x4(%rcx), %esi
cmpl %r9d, %esi
jg 0x3832ce
vmovups (%rdx,%rcx,4), %xmm6
vsubps %xmm5, %xmm6, %xmm6
vmulps %xmm6, %xmm6, %xmm6
vaddps %xmm4, %xmm6, %xmm4
addq $0x4, %rcx
jmp 0x3832af
vxorps %xmm5, %xmm5, %xmm5
cmpl %r9d, %ecx
jge 0x3832ed
vmovss (%rdx,%rcx,4), %xmm6
vsubss %xmm1, %xmm6, %xmm6
vmulss %xmm6, %xmm6, %xmm6
vaddss %xmm5, %xmm6, %xmm5
incq %rcx
jmp 0x3832d2
vextractf128 $0x1, %ymm3, %xmm6
vhaddps %xmm3, %xmm6, %xmm3
vhaddps %xmm3, %xmm3, %xmm3
vshufpd $0x1, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,0]
vhaddps %xmm3, %xmm3, %xmm3
vaddps %xmm4, %xmm6, %xmm4
vmovshdup %xmm4, %xmm6 # xmm6 = xmm4[1,1,3,3]
vaddss %xmm6, %xmm3, %xmm3
vaddss %xmm4, %xmm3, %xmm3
vaddss %xmm5, %xmm3, %xmm3
vdivss %xmm2, %xmm3, %xmm2
vaddss %xmm0, %xmm2, %xmm0
vrsqrtss %xmm0, %xmm0, %xmm2
vmulss %xmm2, %xmm0, %xmm0
vmulss %xmm2, %xmm0, %xmm0
vaddss 0x72490(%rip), %xmm0, %xmm0 # 0x3f57c4
vmulss 0x6e5e8(%rip), %xmm2, %xmm2 # 0x3f1924
vmulss %xmm0, %xmm2, %xmm0
vbroadcastss 0x6de6b(%rip), %xmm2 # 0x3f11b4
vxorps %xmm2, %xmm1, %xmm1
vmulss %xmm1, %xmm0, %xmm1
vshufps $0x0, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vshufps $0x0, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
testl %eax, %eax
je 0x3839ba
xorl %eax, %eax
leal 0x8(%rax), %ecx
cmpl %r9d, %ecx
jg 0x3833a1
vmulps (%rdx,%rax,4), %ymm2, %ymm4
vaddps %ymm3, %ymm4, %ymm4
movq -0x28(%rsp), %rcx
vmulps (%rcx,%rax,4), %ymm4, %ymm4
movq -0x30(%rsp), %rcx
vaddps (%rcx,%rax,4), %ymm4, %ymm4
vmovups %ymm4, (%rdx,%rax,4)
addq $0x8, %rax
jmp 0x383371
vshufps $0x0, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[0,0,0,0]
vshufps $0x0, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[0,0,0,0]
leal 0x4(%rax), %ecx
cmpl %r9d, %ecx
jg 0x383400
vmulps (%rdx,%rax,4), %xmm2, %xmm4
vaddps %xmm3, %xmm4, %xmm4
movq -0x28(%rsp), %rcx
vmulps (%rcx,%rax,4), %xmm4, %xmm4
movq -0x30(%rsp), %rcx
vaddps (%rcx,%rax,4), %xmm4, %xmm4
vmovups %xmm4, (%rdx,%rax,4)
addq $0x4, %rax
jmp 0x3833ab
vmulss (%rdx,%rax,4), %xmm0, %xmm2
vaddss %xmm1, %xmm2, %xmm2
movq -0x28(%rsp), %rcx
vmulss (%rcx,%rax,4), %xmm2, %xmm2
movq -0x30(%rsp), %rcx
vaddss (%rcx,%rax,4), %xmm2, %xmm2
vmovss %xmm2, (%rdx,%rax,4)
incq %rax
cmpl %r9d, %eax
jl 0x3833db
jmp 0x383a14
imull %r9d, %r8d
vcvtsi2ss %r8d, %xmm0, %xmm0
imull -0x34(%rsp), %r8d
vmovss 0x6b867(%rip), %xmm1 # 0x3eec88
vdivss %xmm0, %xmm1, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm2
xorl %r9d, %r9d
vbroadcastss 0x6dd78(%rip), %xmm5 # 0x3f11b4
movl $0x8, %edx
vbroadcastss 0x6e4da(%rip), %xmm9 # 0x3f1924
vbroadcastss 0x6dd61(%rip), %ymm10 # 0x3f11b4
vxorps %xmm11, %xmm11, %xmm11
cmpq -0x10(%rsp), %r9
je 0x383a14
movq (%rsi), %r15
movq 0x10(%rsi), %r12
movq 0x40(%rsi), %rcx
movq %rcx, %r14
imulq %r9, %r14
imulq %r12, %r14
addq %r15, %r14
movq -0x18(%rsp), %rax
movq -0x18(%rax), %rax
movq -0x20(%rsp), %rdi
movl 0xd8(%rdi,%rax), %r10d
vmovss 0xd4(%rdi,%rax), %xmm14
xorl %eax, %eax
vxorps %xmm3, %xmm3, %xmm3
xorl %r13d, %r13d
movq %r14, %rdi
leal 0x8(%r13), %r11d
cmpl %r8d, %r11d
jg 0x3834c1
vaddps (%rdi), %ymm3, %ymm3
addq $0x20, %rdi
addq $0x8, %rax
movl %r11d, %r13d
jmp 0x3834a7
vxorps %xmm4, %xmm4, %xmm4
addl $0x4, %r13d
cmpl %r8d, %r13d
jg 0x3834dc
vaddps (%rdi), %xmm4, %xmm4
addq $0x10, %rdi
addq $0x4, %rax
jmp 0x3834c5
imulq %r12, %rcx
imulq %r9, %rcx
addq %rcx, %r15
vxorps %xmm6, %xmm6, %xmm6
cmpl %r8d, %eax
jge 0x3834fb
vaddss (%r15,%rax,4), %xmm6, %xmm6
incq %rax
jmp 0x3834eb
movl -0x34(%rsp), %eax
cmpl $0x8, %eax
je 0x38356a
vextractf128 $0x1, %ymm3, %xmm7
cmpl $0x4, %eax
je 0x38354d
cmpl $0x1, %eax
jne 0x383587
vshufpd $0x1, %xmm4, %xmm4, %xmm12 # xmm12 = xmm4[1,0]
vhaddps %xmm3, %xmm7, %xmm3
vaddps %xmm4, %xmm12, %xmm4
vhaddps %xmm3, %xmm3, %xmm3
vmovshdup %xmm4, %xmm7 # xmm7 = xmm4[1,1,3,3]
vhaddps %xmm3, %xmm3, %xmm3
vaddss %xmm7, %xmm3, %xmm3
vaddss %xmm4, %xmm3, %xmm3
vaddss %xmm6, %xmm3, %xmm3
vmulss %xmm0, %xmm3, %xmm15
vmovss %xmm15, %xmm11, %xmm12 # xmm12 = xmm15[0],xmm11[1,2,3]
vshufps $0x0, %xmm15, %xmm15, %xmm13 # xmm13 = xmm15[0,0,0,0]
movb $0x1, %bpl
jmp 0x383598
vaddps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm7, %xmm3
vmulps %xmm1, %xmm3, %xmm13
movb $0x1, %r12b
xorl %ebp, %ebp
vmovaps %ymm13, %ymm12
vmovaps %xmm13, %xmm15
jmp 0x38359b
vmulps %ymm2, %ymm3, %ymm12
vshufps $0x0, %xmm12, %xmm12, %xmm13 # xmm13 = xmm12[0,0,0,0]
movb $0x1, %r13b
xorl %ebp, %ebp
vmovaps %xmm12, %xmm15
xorl %r12d, %r12d
vmovaps %ymm12, %ymm4
jmp 0x3835a4
vxorps %xmm15, %xmm15, %xmm15
vxorps %xmm12, %xmm12, %xmm12
vxorps %xmm13, %xmm13, %xmm13
xorl %ebp, %ebp
xorl %r12d, %r12d
vinsertf128 $0x1, %xmm13, %ymm13, %ymm4
xorl %r13d, %r13d
xorl %eax, %eax
vxorps %xmm3, %xmm3, %xmm3
xorl %edi, %edi
movq %r14, %rcx
leal 0x8(%rdi), %r11d
cmpl %r8d, %r11d
jg 0x3835d5
vmovups (%rcx), %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm6, %ymm6
vaddps %ymm3, %ymm6, %ymm3
addq $0x20, %rcx
addq $0x8, %rax
movl %r11d, %edi
jmp 0x3835af
vxorps %xmm4, %xmm4, %xmm4
addl $0x4, %edi
cmpl %r8d, %edi
jg 0x3835fc
vmovups (%rcx), %xmm6
vsubps %xmm13, %xmm6, %xmm6
vmulps %xmm6, %xmm6, %xmm6
vaddps %xmm4, %xmm6, %xmm4
addq $0x10, %rcx
addq $0x4, %rax
jmp 0x3835d9
vxorps %xmm6, %xmm6, %xmm6
cmpl %r8d, %eax
jge 0x38361d
vmovss (%r15,%rax,4), %xmm7
vsubss %xmm15, %xmm7, %xmm7
vmulss %xmm7, %xmm7, %xmm7
vaddss %xmm6, %xmm7, %xmm6
incq %rax
jmp 0x383600
testb %r13b, %r13b
jne 0x383629
vxorps %xmm13, %xmm13, %xmm13
jmp 0x38362d
vmulps %ymm2, %ymm3, %ymm13
vextractf128 $0x1, %ymm3, %xmm7
testb %r12b, %r12b
je 0x38364a
vaddps %xmm4, %xmm3, %xmm4
vaddps %xmm4, %xmm7, %xmm4
vmulps %xmm1, %xmm4, %xmm8
vblendps $0xf, %ymm8, %ymm13, %ymm13 # ymm13 = ymm8[0,1,2,3],ymm13[4,5,6,7]
testb %bpl, %bpl
je 0x38367e
vshufpd $0x1, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[1,0]
vaddps %xmm4, %xmm8, %xmm4
vhaddps %xmm3, %xmm7, %xmm3
vmovshdup %xmm4, %xmm7 # xmm7 = xmm4[1,1,3,3]
vhaddps %xmm3, %xmm3, %xmm3
vhaddps %xmm3, %xmm3, %xmm3
vaddss %xmm7, %xmm6, %xmm6
vaddss %xmm4, %xmm6, %xmm4
vaddss %xmm4, %xmm3, %xmm3
vmulss %xmm0, %xmm3, %xmm3
vblendps $0x1, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0],ymm13[1,2,3,4,5,6,7]
movl -0x34(%rsp), %eax
cmpl $0x1, %eax
je 0x38371f
cmpl $0x4, %eax
je 0x3836de
cmpl $0x8, %eax
jne 0x383758
vshufps $0x0, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vaddps %ymm3, %ymm13, %ymm3
vrsqrtps %ymm3, %ymm4
vmulps %ymm4, %ymm3, %ymm3
vmulps %ymm4, %ymm3, %ymm3
vbroadcastss 0x72106(%rip), %ymm6 # 0x3f57c4
vaddps %ymm6, %ymm3, %ymm3
vbroadcastss 0x6e259(%rip), %ymm6 # 0x3f1924
vmulps %ymm6, %ymm4, %ymm4
vmulps %ymm3, %ymm4, %ymm13
vxorps %ymm10, %ymm12, %ymm3
vmulps %ymm3, %ymm13, %ymm12
jmp 0x383758
vshufps $0x0, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[0,0,0,0]
vaddps %xmm3, %xmm13, %xmm3
vrsqrtps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vmulps %xmm4, %xmm3, %xmm3
vbroadcastss 0x720c7(%rip), %xmm6 # 0x3f57c4
vaddps %xmm6, %xmm3, %xmm3
vmulps %xmm4, %xmm9, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vxorps %xmm5, %xmm12, %xmm4
vmulps %xmm4, %xmm3, %xmm4
vblendps $0xf, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0,1,2,3],ymm13[4,5,6,7]
vblendps $0xf, %ymm4, %ymm12, %ymm12 # ymm12 = ymm4[0,1,2,3],ymm12[4,5,6,7]
jmp 0x383758
vaddss %xmm14, %xmm13, %xmm3
vrsqrtss %xmm3, %xmm3, %xmm4
vmulss %xmm4, %xmm3, %xmm3
vmulss %xmm4, %xmm3, %xmm3
vaddss 0x7208c(%rip), %xmm3, %xmm3 # 0x3f57c4
vmulss 0x6e1e4(%rip), %xmm4, %xmm4 # 0x3f1924
vmulss %xmm3, %xmm4, %xmm3
vblendps $0x1, %ymm3, %ymm13, %ymm13 # ymm13 = ymm3[0],ymm13[1,2,3,4,5,6,7]
vxorps %xmm5, %xmm15, %xmm4
vmulss %xmm4, %xmm3, %xmm3
vblendps $0x1, %ymm3, %ymm12, %ymm12 # ymm12 = ymm3[0],ymm12[1,2,3,4,5,6,7]
testl %r10d, %r10d
je 0x3837a4
testb %r13b, %r13b
je 0x3837c1
movl %edx, %ecx
movq -0x30(%rsp), %rax
movq -0x28(%rsp), %r10
cmpl %r8d, %ecx
jg 0x3837cb
vbroadcastss (%r10), %ymm3
vbroadcastss (%rax), %ymm4
vmulps (%r14), %ymm13, %ymm6
vaddps %ymm6, %ymm12, %ymm6
vmulps %ymm6, %ymm3, %ymm3
vaddps %ymm4, %ymm3, %ymm3
vmovups %ymm3, (%r14)
addq $0x20, %r14
addq $0x4, %r10
addq $0x4, %rax
addl $0x8, %ecx
jmp 0x38376e
vmovaps %xmm13, %xmm3
testb %r12b, %r12b
jne 0x383936
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vshufps $0x0, %xmm12, %xmm12, %xmm14 # xmm14 = xmm12[0,0,0,0]
jmp 0x38393f
movq -0x30(%rsp), %rax
movq -0x28(%rsp), %r10
testb %r12b, %r12b
je 0x38386f
vinsertf128 $0x1, %xmm13, %ymm13, %ymm3
vinsertf128 $0x1, %xmm12, %ymm12, %ymm4
xorl %ecx, %ecx
movq %r14, %r15
leal 0x8(%rcx), %edi
cmpl %r8d, %edi
jg 0x38385f
vbroadcastss 0x4(%r10,%rcx), %xmm6
vbroadcastss (%r10,%rcx), %xmm7
vinsertf128 $0x1, %xmm6, %ymm7, %ymm6
vbroadcastss 0x4(%rax,%rcx), %xmm7
vbroadcastss (%rax,%rcx), %xmm8
vinsertf128 $0x1, %xmm7, %ymm8, %ymm7
vmulps (%r15), %ymm3, %ymm8
vaddps %ymm4, %ymm8, %ymm8
vmulps %ymm6, %ymm8, %ymm6
vaddps %ymm6, %ymm7, %ymm6
vmovups %ymm6, (%r15)
addq $0x20, %r15
addq $0x8, %rcx
jmp 0x3837e5
vbroadcastss (%r10,%rcx), %xmm3
vbroadcastss (%rax,%rcx), %xmm4
vmulps (%r14,%rcx,4), %xmm13, %xmm6
vaddps %xmm6, %xmm12, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vmovups %xmm3, (%r14,%rcx,4)
addq $0x10, %r15
addq $0x4, %rcx
leal 0x4(%rcx), %edi
cmpl %r8d, %edi
jle 0x383833
addq %rcx, %rax
addq %rcx, %r10
jmp 0x383872
movq %r14, %r15
testb %bpl, %bpl
je 0x3839b2
vshufps $0x0, %xmm13, %xmm13, %xmm3 # xmm3 = xmm13[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm4
vshufps $0x0, %xmm12, %xmm12, %xmm6 # xmm6 = xmm12[0,0,0,0]
vinsertf128 $0x1, %xmm6, %ymm6, %ymm6
xorl %r14d, %r14d
xorl %ecx, %ecx
movq %rax, %r12
xorl %edi, %edi
movq %r10, %r13
movq %r15, %rbp
leal 0x8(%rdi), %ebx
cmpl %r8d, %ebx
jg 0x3838dd
vmulps (%rbp), %ymm4, %ymm7
vaddps %ymm6, %ymm7, %ymm7
vmulps (%r13), %ymm7, %ymm7
vaddps (%r12), %ymm7, %ymm7
vmovups %ymm7, (%rbp)
addq $0x20, %rbp
addq $0x20, %r13
addq $0x20, %r12
addq $0x20, %rcx
addq $0x8, %r14
movl %ebx, %edi
jmp 0x3838a3
vshufps $0x0, %xmm12, %xmm12, %xmm4 # xmm4 = xmm12[0,0,0,0]
addl $0x4, %edi
cmpl %r8d, %edi
jg 0x38392f
vmulps (%r15,%rcx), %xmm3, %xmm6
vaddps %xmm4, %xmm6, %xmm6
vmulps (%r10,%rcx), %xmm6, %xmm6
vaddps (%rax,%rcx), %xmm6, %xmm6
vmovups %xmm6, (%r15,%rcx)
addq $0x10, %rcx
addq $0x4, %r14
jmp 0x3838e3
vmulss (%r15,%r14,4), %xmm13, %xmm3
vaddss %xmm3, %xmm12, %xmm3
vmulss (%r10,%r14,4), %xmm3, %xmm3
vaddss (%rax,%r14,4), %xmm3, %xmm3
vmovss %xmm3, (%r15,%r14,4)
incq %r14
cmpl %r8d, %r14d
jl 0x383910
jmp 0x3839b2
vmovaps %xmm3, %xmm4
vmovaps %xmm12, %xmm14
vmovaps %ymm12, %ymm6
testb %r13b, %r13b
jne 0x383954
vinsertf128 $0x1, %xmm4, %ymm4, %ymm13
vinsertf128 $0x1, %xmm14, %ymm14, %ymm6
xorl %eax, %eax
xorl %ecx, %ecx
leal 0x8(%rcx), %edi
cmpl %r8d, %edi
jg 0x383990
vmulps (%r14), %ymm13, %ymm7
vaddps %ymm6, %ymm7, %ymm7
vmovups %ymm7, (%r14)
addq $0x20, %r14
addq $0x8, %rax
movl %edi, %ecx
jmp 0x383958
vmulps (%r14), %xmm4, %xmm6
vaddps %xmm6, %xmm14, %xmm6
vmovups %xmm6, (%r14)
addq $0x10, %r14
addq $0x4, %rax
addl $0x4, %ecx
cmpl %r8d, %ecx
jle 0x38397a
jmp 0x3839ad
vmulss (%r15,%rax,4), %xmm3, %xmm4
vaddss %xmm4, %xmm12, %xmm4
vmovss %xmm4, (%r15,%rax,4)
incq %rax
cmpl %r8d, %eax
jl 0x38399a
incq %r9
jmp 0x383458
xorl %eax, %eax
leal 0x8(%rax), %ecx
cmpl %r9d, %ecx
jg 0x3839d8
vmulps (%rdx,%rax,4), %ymm2, %ymm4
vaddps %ymm3, %ymm4, %ymm4
vmovups %ymm4, (%rdx,%rax,4)
addq $0x8, %rax
jmp 0x3839bc
vshufps $0x0, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[0,0,0,0]
vshufps $0x0, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[0,0,0,0]
leal 0x4(%rax), %ecx
cmpl %r9d, %ecx
jg 0x383a0f
vmulps (%rdx,%rax,4), %xmm2, %xmm4
vaddps %xmm3, %xmm4, %xmm4
vmovups %xmm4, (%rdx,%rax,4)
addq $0x4, %rax
jmp 0x3839e2
vmulss (%rdx,%rax,4), %xmm0, %xmm2
vaddss %xmm1, %xmm2, %xmm2
vmovss %xmm2, (%rdx,%rax,4)
incq %rax
cmpl %r9d, %eax
jl 0x3839fe
xorl %eax, %eax
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/layernorm_x86_avx.cpp |
virtual thunk to ncnn::LayerNorm_x86_avx::forward_inplace(ncnn::Mat&, ncnn::Option const&) const | int LayerNorm_x86_avx::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int dims = bottom_top_blob.dims;
int elempack = bottom_top_blob.elempack;
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
const float* gamma = gamma_data;
const float* beta = beta_data;
if (dims == 1)
{
int elemcount = w * elempack;
float* ptr = bottom_top_blob;
// 1D layer norm is special. Treat them as unpacked.
fast_1d_layer_norm(ptr, 1, elemcount, elemcount, gamma, beta, affine, eps);
}
if (dims == 2)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; ++i)
{
float* ptr = bottom_top_blob.row(i);
fast_1d_layer_norm(ptr, elempack, w, w * elempack, gamma, beta, affine, eps);
}
}
if (dims == 3)
{
if (affine_size == w)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; ++q)
{
for (int i = 0; i < h; ++i)
{
float* ptr = bottom_top_blob.channel(q).row(i);
fast_1d_layer_norm(ptr, elempack, w, w * elempack, gamma, beta, affine, eps);
}
}
}
else // if (affine_size == w * h)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; ++q)
{
float* ptr = bottom_top_blob.channel(q);
fast_1d_layer_norm(ptr, elempack, w * h, w * h * elempack, gamma, beta, affine, eps);
}
}
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x58(%rax), %rdi
callq 0x3825bc
xorl %eax, %eax
popq %rcx
retq
nopl (%rax)
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/layernorm_x86_avx.cpp |
ncnn::gru(ncnn::Mat const&, ncnn::Mat&, int, ncnn::Mat const&, ncnn::Mat const&, ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) | static int gru(const Mat& bottom_blob, Mat& top_blob, int reverse, const Mat& weight_xc, const Mat& bias_c, const Mat& weight_hc, Mat& hidden_state, const Option& opt)
{
int size = bottom_blob.w;
int T = bottom_blob.h;
int num_output = top_blob.w;
// 2 x num_output
Mat gates(2, num_output, 4u, opt.workspace_allocator);
if (gates.empty())
return -100;
// unroll
for (int t = 0; t < T; t++)
{
int ti = reverse ? T - 1 - t : t;
const float* x = bottom_blob.row(ti);
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < num_output; q++)
{
float* gates_data = gates.row(q);
// gate reset update
const float* bias_c_R = bias_c.row(0);
const float* bias_c_U = bias_c.row(1);
const float* weight_xc_R = weight_xc.row(num_output * 0 + q);
const float* weight_xc_U = weight_xc.row(num_output * 1 + q);
const float* weight_hc_R = weight_hc.row(num_output * 0 + q);
const float* weight_hc_U = weight_hc.row(num_output * 1 + q);
float R = bias_c_R[q];
float U = bias_c_U[q];
for (int i = 0; i < size; i++)
{
float xi = x[i];
R += weight_xc_R[i] * xi;
U += weight_xc_U[i] * xi;
}
for (int i = 0; i < num_output; i++)
{
float h_cont = hidden_state[i];
R += weight_hc_R[i] * h_cont;
U += weight_hc_U[i] * h_cont;
}
// sigmoid(R)
// sigmoid(U)
R = 1.f / (1.f + expf(-R));
U = 1.f / (1.f + expf(-U));
// gate new
const float* bias_c_WN = bias_c.row(2);
const float* bias_c_BN = bias_c.row(3);
const float* weight_xc_N = weight_xc.row(num_output * 2 + q);
const float* weight_hc_N = weight_hc.row(num_output * 2 + q);
float N = bias_c_BN[q];
for (int i = 0; i < num_output; i++)
{
float h_cont = hidden_state[i];
N += weight_hc_N[i] * h_cont;
}
N = bias_c_WN[q] + R * N;
for (int i = 0; i < size; i++)
{
float xi = x[i];
N += weight_xc_N[i] * xi;
}
// tanh(N)
N = tanhf(N);
gates_data[0] = U;
gates_data[1] = N;
}
// h_t := (1 - update) .* new + update .* h_{t-1}
float* output_data = top_blob.row(ti);
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < num_output; q++)
{
const float* gates_data = gates.row(q);
float U = gates_data[0];
float N = gates_data[1];
float H = (1 - U) * N + U * hidden_state[q];
hidden_state[q] = H;
output_data[q] = H;
}
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x158, %rsp # imm = 0x158
movq %r9, 0x78(%rsp)
movq %r8, 0x70(%rsp)
movq %rcx, 0x68(%rsp)
movl %edx, 0x10(%rsp)
movq %rsi, %rbx
movq %rdi, %r14
movq 0x198(%rsp), %rax
movl 0x2c(%rdi), %ebp
movl 0x30(%rdi), %ecx
movl %ecx, 0x8(%rsp)
movl 0x2c(%rsi), %r13d
movq 0x10(%rax), %r8
leaq 0xf0(%rsp), %r12
andq $0x0, 0x40(%r12)
xorps %xmm0, %xmm0
movaps %xmm0, (%r12)
movups %xmm0, 0xc(%r12)
movaps %xmm0, 0x20(%r12)
movups %xmm0, 0x2c(%r12)
pushq $0x2
popq %rsi
pushq $0x4
popq %rcx
movq %r12, %rdi
movl %r13d, %edx
callq 0x636fa
movq (%r12), %rcx
pushq $-0x64
popq %rax
movq %rax, 0x18(%rsp)
movq %rcx, 0x20(%rsp)
testq %rcx, %rcx
je 0x384f34
movslq 0x128(%rsp), %rax
imulq 0x130(%rsp), %rax
testq %rax, %rax
je 0x384f34
movslq %r13d, %rax
movq %rax, 0x60(%rsp)
movq (%r14), %rax
movq %rax, 0x58(%rsp)
movslq 0x2c(%r14), %rax
imulq 0x10(%r14), %rax
movq %rax, 0x50(%rsp)
leal (,%r13,2), %eax
movq (%rbx), %rcx
movq %rcx, 0x48(%rsp)
movslq 0x2c(%rbx), %rcx
imulq 0x10(%rbx), %rcx
movq %rcx, 0x40(%rsp)
xorl %ecx, %ecx
testl %ebp, %ebp
cmovlel %ecx, %ebp
cltq
movq %rax, 0x38(%rsp)
testl %r13d, %r13d
movl %r13d, %edi
movl $0x0, %eax
movq %rax, 0x18(%rsp)
cmovlel %ecx, %edi
movl 0x8(%rsp), %eax
testl %eax, %eax
movl $0x0, %ecx
cmovgl %eax, %ecx
movl %ecx, 0xc(%rsp)
movq 0x20(%rsp), %rax
addq $0x4, %rax
movq %rax, 0x30(%rsp)
xorl %ecx, %ecx
movq %rdi, 0x88(%rsp)
cmpl 0xc(%rsp), %ecx
je 0x384f34
movl %ecx, %eax
notl %eax
addl 0x8(%rsp), %eax
cmpl $0x0, 0x10(%rsp)
movl %ecx, 0x14(%rsp)
cmovel %ecx, %eax
cltq
movq 0x50(%rsp), %rbx
movq %rax, 0x80(%rsp)
imulq %rax, %rbx
addq 0x58(%rsp), %rbx
movslq 0x11c(%rsp), %rax
imulq 0x100(%rsp), %rax
movq %rax, 0xc0(%rsp)
movq 0x70(%rsp), %rcx
movq (%rcx), %rsi
movslq 0x2c(%rcx), %rax
imulq 0x10(%rcx), %rax
leaq (%rsi,%rax), %rcx
movq %rcx, 0xb0(%rsp)
movq 0x68(%rsp), %rdx
movq (%rdx), %r8
movslq 0x2c(%rdx), %r12
movq 0x78(%rsp), %rcx
movq (%rcx), %r9
movslq 0x2c(%rcx), %r14
leaq (%rsi,%rax,2), %r10
movq %r10, 0x98(%rsp)
leaq (%rax,%rax,2), %rax
imulq 0x10(%rdx), %r12
movq %rsi, 0xb8(%rsp)
addq %rsi, %rax
movq %rax, 0x90(%rsp)
movq %r12, %rdx
movq 0x60(%rsp), %rax
imulq %rax, %rdx
addq %r8, %rdx
imulq 0x10(%rcx), %r14
movq %r14, %rsi
imulq %rax, %rsi
addq %r9, %rsi
movq %r14, 0xa0(%rsp)
movq 0x38(%rsp), %rax
imulq %rax, %r14
addq %r9, %r14
movq %r12, 0xa8(%rsp)
imulq %rax, %r12
addq %r8, %r12
xorl %r10d, %r10d
cmpq %rdi, %r10
je 0x384ec5
movq 0xc0(%rsp), %rax
imulq %r10, %rax
movq %rax, 0xc8(%rsp)
movq 0xb8(%rsp), %rax
movss (%rax,%r10,4), %xmm0
movq 0xb0(%rsp), %rax
movss (%rax,%r10,4), %xmm2
unpcklps %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
xorl %eax, %eax
cmpq %rax, %rbp
je 0x384d3c
movss (%r8,%rax,4), %xmm0
movss (%rdx,%rax,4), %xmm1
unpcklps %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
movss (%rbx,%rax,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm1, %xmm0
addps %xmm0, %xmm2
incq %rax
jmp 0x384d15
movq 0x190(%rsp), %rax
movq (%rax), %r15
xorl %eax, %eax
cmpq %rax, %r13
je 0x384d71
movss (%r9,%rax,4), %xmm0
movss (%rsi,%rax,4), %xmm1
unpcklps %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
movss (%r15,%rax,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm1, %xmm0
addps %xmm0, %xmm2
incq %rax
jmp 0x384d49
movq %rsi, 0xd0(%rsp)
movq %rdx, 0xd8(%rsp)
movq %r9, 0xe0(%rsp)
movq %r8, 0xe8(%rsp)
movaps %xmm2, %xmm0
shufps $0x55, %xmm2, %xmm0 # xmm0 = xmm0[1,1],xmm2[1,1]
movaps 0x692f1(%rip), %xmm1 # 0x3ee090
xorps %xmm1, %xmm0
movq %r10, 0x28(%rsp)
movaps %xmm2, 0x140(%rsp)
callq 0x5f410
movss %xmm0, 0x4(%rsp)
movaps 0x140(%rsp), %xmm0
xorps 0x692c7(%rip), %xmm0 # 0x3ee090
callq 0x5f410
movq 0x28(%rsp), %rcx
movss 0x69ead(%rip), %xmm3 # 0x3eec88
addss %xmm3, %xmm0
movq 0x90(%rsp), %rax
movss (%rax,%rcx,4), %xmm1
xorl %eax, %eax
cmpq %rax, %r13
je 0x384e08
movss (%r14,%rax,4), %xmm2
mulss (%r15,%rax,4), %xmm2
addss %xmm2, %xmm1
incq %rax
jmp 0x384dee
movss 0x4(%rsp), %xmm2
addss %xmm3, %xmm2
divss %xmm2, %xmm1
movq 0x98(%rsp), %rax
addss (%rax,%rcx,4), %xmm1
xorl %eax, %eax
cmpq %rax, %rbp
je 0x384e3e
movss (%r12,%rax,4), %xmm2
mulss (%rbx,%rax,4), %xmm2
addss %xmm2, %xmm1
incq %rax
jmp 0x384e25
divss %xmm0, %xmm3
movss %xmm3, 0x4(%rsp)
movaps %xmm1, %xmm0
callq 0x5f160
movq 0x28(%rsp), %r10
movq 0x20(%rsp), %rax
movq 0xc8(%rsp), %rcx
movss 0x4(%rsp), %xmm1
movss %xmm1, (%rax,%rcx)
movss %xmm0, 0x4(%rax,%rcx)
incq %r10
movq 0xa8(%rsp), %rax
movq 0xd8(%rsp), %rdx
addq %rax, %rdx
movq 0xe8(%rsp), %r8
addq %rax, %r8
movq 0xa0(%rsp), %rcx
movq 0xd0(%rsp), %rsi
addq %rcx, %rsi
movq 0xe0(%rsp), %r9
addq %rcx, %r9
addq %rcx, %r14
addq %rax, %r12
movq 0x88(%rsp), %rdi
jmp 0x384cd7
movq 0x80(%rsp), %r8
imulq 0x40(%rsp), %r8
movslq 0x11c(%rsp), %rax
imulq 0x100(%rsp), %rax
addq 0x48(%rsp), %r8
movq 0x190(%rsp), %rcx
movq (%rcx), %rcx
movq 0x30(%rsp), %rdx
xorl %esi, %esi
cmpq %rsi, %rdi
je 0x384f29
movss (%rdx), %xmm0
movss (%rcx,%rsi,4), %xmm1
subss %xmm0, %xmm1
mulss -0x4(%rdx), %xmm1
addss %xmm0, %xmm1
movss %xmm1, (%rcx,%rsi,4)
movss %xmm1, (%r8,%rsi,4)
incq %rsi
addq %rax, %rdx
jmp 0x384efb
movl 0x14(%rsp), %ecx
incl %ecx
jmp 0x384be7
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x384f6b
lock
decl (%rax)
jne 0x384f6b
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x384f63
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x384f6b
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
addq $0x158, %rsp # imm = 0x158
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/gru.cpp |
virtual thunk to ncnn::MultiHeadAttention_x86::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int MultiHeadAttention_x86::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& q_blob = bottom_blobs[0];
const Mat& k_blob = (bottom_blobs.size() == 1 || (bottom_blobs.size() == 2 && attn_mask)) ? q_blob : bottom_blobs[1];
const Mat& v_blob = (bottom_blobs.size() == 1 || (bottom_blobs.size() == 2 && attn_mask)) ? q_blob : (bottom_blobs.size() == 2 || (bottom_blobs.size() == 3 && attn_mask)) ? k_blob : bottom_blobs[2];
const Mat& attn_mask_blob = attn_mask ? bottom_blobs[bottom_blobs.size() - 1] : Mat();
Mat attn_mask_blob_unpacked;
if (attn_mask_blob.elempack != 1)
{
convert_packing(attn_mask_blob, attn_mask_blob_unpacked, 1, opt);
}
else
{
attn_mask_blob_unpacked = attn_mask_blob;
}
const int embed_dim_per_head = embed_dim / num_heads;
const int src_seqlen = q_blob.h * q_blob.elempack;
const int dst_seqlen = k_blob.h * k_blob.elempack;
Mat q_affine;
q_gemm->forward(q_blob, q_affine, opt);
Mat k_affine;
k_gemm->forward(k_blob, k_affine, opt);
Mat qk_cross(dst_seqlen, src_seqlen * num_heads, 4u, opt.blob_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < num_heads; i++)
{
std::vector<Mat> qk_bottom_blobs(2);
qk_bottom_blobs[0] = q_affine.row_range(i * embed_dim_per_head, embed_dim_per_head);
qk_bottom_blobs[1] = k_affine.row_range(i * embed_dim_per_head, embed_dim_per_head);
if (attn_mask)
{
const Mat& maskm = attn_mask_blob_unpacked.dims == 3 ? attn_mask_blob_unpacked.channel(i) : attn_mask_blob_unpacked;
qk_bottom_blobs.push_back(maskm);
}
std::vector<Mat> qk_top_blobs(1);
qk_top_blobs[0] = qk_cross.row_range(i * src_seqlen, src_seqlen);
Option opt1 = opt;
opt1.num_threads = 1;
qk_gemm->forward(qk_bottom_blobs, qk_top_blobs, opt1);
}
q_affine.release();
k_affine.release();
qk_softmax->forward_inplace(qk_cross, opt);
Mat v_affine;
v_gemm->forward(v_blob, v_affine, opt);
Mat qkv_cross(src_seqlen, embed_dim_per_head * num_heads, 4u, opt.blob_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < num_heads; i++)
{
std::vector<Mat> qkv_bottom_blobs(2);
qkv_bottom_blobs[0] = qk_cross.row_range(i * src_seqlen, src_seqlen);
qkv_bottom_blobs[1] = v_affine.row_range(i * embed_dim_per_head, embed_dim_per_head);
std::vector<Mat> qkv_top_blobs(1);
qkv_top_blobs[0] = qkv_cross.row_range(i * embed_dim_per_head, embed_dim_per_head);
Option opt1 = opt;
opt1.num_threads = 1;
qkv_gemm->forward(qkv_bottom_blobs, qkv_top_blobs, opt1);
}
v_affine.release();
o_gemm->forward(qkv_cross, top_blobs[0], opt);
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x40(%rax), %rdi
callq 0x388fac
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/src/layer/x86/multiheadattention_x86.cpp |
ncnn::MultiHeadAttention_x86_avx512::MultiHeadAttention_x86_avx512() | MultiHeadAttention_x86_avx512::MultiHeadAttention_x86_avx512()
{
#if __SSE2__
support_packing = true;
#endif // __SSE2__
q_gemm = 0;
k_gemm = 0;
v_gemm = 0;
qk_gemm = 0;
qkv_gemm = 0;
qk_softmax = 0;
o_gemm = 0;
} | movq (%rsi), %rax
movq %rax, (%rdi)
movq 0x8(%rsi), %rcx
movq -0x18(%rax), %rax
movq %rcx, (%rdi,%rax)
movq (%rdi), %rax
movq -0x18(%rax), %rax
movb $0x1, 0xb(%rdi,%rax)
vxorps %xmm0, %xmm0, %xmm0
vmovups %ymm0, 0x20(%rdi)
vmovups %ymm0, 0x8(%rdi)
vzeroupper
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/multiheadattention_x86_avx512.cpp |
ncnn::MultiHeadAttention_x86_avx512::MultiHeadAttention_x86_avx512() | MultiHeadAttention_x86_avx512::MultiHeadAttention_x86_avx512()
{
#if __SSE2__
support_packing = true;
#endif // __SSE2__
q_gemm = 0;
k_gemm = 0;
v_gemm = 0;
qk_gemm = 0;
qkv_gemm = 0;
qk_softmax = 0;
o_gemm = 0;
} | pushq %rbx
movq %rdi, %rbx
addq $0x40, %rdi
callq 0x385df0
leaq 0x103c20(%rip), %rax # 0x48dec0
movq %rax, (%rbx)
leaq 0x103c96(%rip), %rax # 0x48df40
movq %rax, 0x40(%rbx)
movb $0x1, 0x4b(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %ymm0, 0x8(%rbx)
vmovups %ymm0, 0x20(%rbx)
popq %rbx
vzeroupper
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/multiheadattention_x86_avx512.cpp |
virtual thunk to ncnn::MultiHeadAttention_x86_avx512::destroy_pipeline(ncnn::Option const&) | int MultiHeadAttention_x86_avx512::destroy_pipeline(const Option& opt)
{
if (q_gemm)
{
q_gemm->destroy_pipeline(opt);
delete q_gemm;
q_gemm = 0;
}
if (k_gemm)
{
k_gemm->destroy_pipeline(opt);
delete k_gemm;
k_gemm = 0;
}
if (v_gemm)
{
v_gemm->destroy_pipeline(opt);
delete v_gemm;
v_gemm = 0;
}
if (qk_gemm)
{
qk_gemm->destroy_pipeline(opt);
delete qk_gemm;
qk_gemm = 0;
}
if (qkv_gemm)
{
qkv_gemm->destroy_pipeline(opt);
delete qkv_gemm;
qkv_gemm = 0;
}
if (qk_softmax)
{
qk_softmax->destroy_pipeline(opt);
delete qk_softmax;
qk_softmax = 0;
}
if (o_gemm)
{
o_gemm->destroy_pipeline(opt);
delete o_gemm;
o_gemm = 0;
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x38(%rax), %rdi
callq 0x38b9c4
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/multiheadattention_x86_avx512.cpp |
ncnn::MultiHeadAttention_x86_avx512::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int MultiHeadAttention_x86_avx512::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& q_blob = bottom_blobs[0];
const Mat& k_blob = (bottom_blobs.size() == 1 || (bottom_blobs.size() == 2 && attn_mask)) ? q_blob : bottom_blobs[1];
const Mat& v_blob = (bottom_blobs.size() == 1 || (bottom_blobs.size() == 2 && attn_mask)) ? q_blob : (bottom_blobs.size() == 2 || (bottom_blobs.size() == 3 && attn_mask)) ? k_blob : bottom_blobs[2];
const Mat& attn_mask_blob = attn_mask ? bottom_blobs[bottom_blobs.size() - 1] : Mat();
Mat attn_mask_blob_unpacked;
if (attn_mask_blob.elempack != 1)
{
convert_packing(attn_mask_blob, attn_mask_blob_unpacked, 1, opt);
}
else
{
attn_mask_blob_unpacked = attn_mask_blob;
}
const int embed_dim_per_head = embed_dim / num_heads;
const int src_seqlen = q_blob.h * q_blob.elempack;
const int dst_seqlen = k_blob.h * k_blob.elempack;
Mat q_affine;
q_gemm->forward(q_blob, q_affine, opt);
Mat k_affine;
k_gemm->forward(k_blob, k_affine, opt);
Mat qk_cross(dst_seqlen, src_seqlen * num_heads, 4u, opt.blob_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < num_heads; i++)
{
std::vector<Mat> qk_bottom_blobs(2);
qk_bottom_blobs[0] = q_affine.row_range(i * embed_dim_per_head, embed_dim_per_head);
qk_bottom_blobs[1] = k_affine.row_range(i * embed_dim_per_head, embed_dim_per_head);
if (attn_mask)
{
const Mat& maskm = attn_mask_blob_unpacked.dims == 3 ? attn_mask_blob_unpacked.channel(i) : attn_mask_blob_unpacked;
qk_bottom_blobs.push_back(maskm);
}
std::vector<Mat> qk_top_blobs(1);
qk_top_blobs[0] = qk_cross.row_range(i * src_seqlen, src_seqlen);
Option opt1 = opt;
opt1.num_threads = 1;
qk_gemm->forward(qk_bottom_blobs, qk_top_blobs, opt1);
}
q_affine.release();
k_affine.release();
qk_softmax->forward_inplace(qk_cross, opt);
Mat v_affine;
v_gemm->forward(v_blob, v_affine, opt);
Mat qkv_cross(src_seqlen, embed_dim_per_head * num_heads, 4u, opt.blob_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < num_heads; i++)
{
std::vector<Mat> qkv_bottom_blobs(2);
qkv_bottom_blobs[0] = qk_cross.row_range(i * src_seqlen, src_seqlen);
qkv_bottom_blobs[1] = v_affine.row_range(i * embed_dim_per_head, embed_dim_per_head);
std::vector<Mat> qkv_top_blobs(1);
qkv_top_blobs[0] = qkv_cross.row_range(i * embed_dim_per_head, embed_dim_per_head);
Option opt1 = opt;
opt1.num_threads = 1;
qkv_gemm->forward(qkv_bottom_blobs, qkv_top_blobs, opt1);
}
v_affine.release();
o_gemm->forward(qkv_cross, top_blobs[0], opt);
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x2f8, %rsp # imm = 0x2F8
movq %rcx, %r15
movq %rdx, 0x270(%rsp)
movq %rdi, %r12
movq (%rsi), %rbx
movq 0x8(%rsi), %rcx
subq %rbx, %rcx
pushq $0x48
popq %rsi
movq %rcx, %rax
cqto
idivq %rsi
movq %rbx, %r14
movq %rbx, %r13
cmpq $0x1, %rax
je 0x38bb88
cmpq $0x2, %rax
jne 0x38bb5c
movq (%r12), %rax
movq -0x18(%rax), %rax
leaq 0x48(%rbx), %r14
cmpl $0x0, 0xe4(%r12,%rax)
cmovneq %rbx, %r14
movq %rbx, %r13
cmoveq %r14, %r13
jmp 0x38bb88
leaq 0x48(%rbx), %r14
cmpq $0xd8, %rcx
jne 0x38bb81
movq (%r12), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xe4(%r12,%rax)
je 0x38bb81
movq %r14, %r13
jmp 0x38bb88
leaq 0x90(%rbx), %r13
movq (%r12), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xe4(%r12,%rax)
je 0x38bc0c
movq -0x40(%rbx,%rcx), %rdx
vmovups -0x48(%rbx,%rcx), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
movq -0x38(%rbx,%rcx), %rax
movq %rax, 0xf0(%rsp)
movl -0x30(%rbx,%rcx), %eax
movl %eax, 0xf8(%rsp)
movq -0x28(%rbx,%rcx), %rsi
movq %rsi, 0x100(%rsp)
vmovups -0x20(%rbx,%rcx), %xmm0
vmovups %xmm0, 0x108(%rsp)
movl -0x10(%rbx,%rcx), %esi
movl %esi, 0x118(%rsp)
movq -0x8(%rbx,%rcx), %rcx
movq %rcx, 0x120(%rsp)
testq %rdx, %rdx
je 0x38bc3f
lock
incl (%rdx)
movl 0xf8(%rsp), %eax
jmp 0x38bc3f
andq $0x0, 0x120(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovups %xmm0, 0xec(%rsp)
vmovaps %xmm0, 0x100(%rsp)
vmovups %xmm0, 0x10c(%rsp)
xorl %eax, %eax
andq $0x0, 0xc0(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x80(%rsp)
vmovups %xmm0, 0x8c(%rsp)
vmovaps %xmm0, 0xa0(%rsp)
vmovups %xmm0, 0xac(%rsp)
cmpl $0x1, %eax
jne 0x38bcb4
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x38bcd9
lock
incl (%rax)
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x38bcd9
lock
decl (%rax)
jne 0x38bcd9
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x38bcd1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38bcd9
leaq 0xe0(%rsp), %rdi
leaq 0x80(%rsp), %rsi
pushq $0x1
popq %rdx
movq %r15, %rcx
callq 0x64e3b
jmp 0x38bd49
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0xe0(%rsp), %xmm0
vmovaps %xmm0, 0x80(%rsp)
movq 0xf0(%rsp), %rax
movq %rax, 0x90(%rsp)
movl 0xf8(%rsp), %eax
movl %eax, 0x98(%rsp)
movq 0x100(%rsp), %rax
movq %rax, 0xa0(%rsp)
vmovups 0x108(%rsp), %xmm0
vmovups %xmm0, 0xa8(%rsp)
movl 0x118(%rsp), %eax
movl %eax, 0xb8(%rsp)
movq 0x120(%rsp), %rax
movq %rax, 0xc0(%rsp)
movq %r13, 0x278(%rsp)
movq (%r12), %rax
movq -0x18(%rax), %rcx
movl 0xd0(%r12,%rcx), %eax
cltd
idivl 0xd4(%r12,%rcx)
movl %eax, 0x4(%rsp)
movl 0x18(%rbx), %ebp
imull 0x30(%rbx), %ebp
movl 0x18(%r14), %r13d
imull 0x30(%r14), %r13d
leaq 0x1d0(%rsp), %rdx
andq $0x0, 0x40(%rdx)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdx)
vmovups %xmm0, 0xc(%rdx)
vmovaps %xmm0, 0x20(%rdx)
vmovups %xmm0, 0x2c(%rdx)
movq 0x8(%r12), %rdi
movq (%rdi), %rax
movq %rbx, %rsi
movq %r15, %rcx
callq *0x38(%rax)
leaq 0x180(%rsp), %rdx
andq $0x0, 0x40(%rdx)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdx)
vmovups %xmm0, 0xc(%rdx)
vmovaps %xmm0, 0x20(%rdx)
vmovups %xmm0, 0x2c(%rdx)
movq 0x10(%r12), %rdi
movq (%rdi), %rax
movq %r14, %rsi
movq %r15, %rcx
callq *0x38(%rax)
movq (%r12), %rax
movq -0x18(%rax), %rax
movl 0xd4(%r12,%rax), %edx
imull %ebp, %edx
movq 0x8(%r15), %r8
leaq 0x220(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
pushq $0x4
popq %rcx
movl %r13d, %esi
callq 0x636fa
movslq 0x4(%rsp), %rax
movq %rax, 0x10(%rsp)
movslq %ebp, %rax
movq %rax, 0xd0(%rsp)
movq $0x0, 0x70(%rsp)
leaq 0x130(%rsp), %rbx
xorl %r13d, %r13d
xorl %ecx, %ecx
movq %r15, 0x128(%rsp)
movq %r12, 0xd8(%rsp)
movl %ebp, 0x1c(%rsp)
movq (%r12), %rax
movq -0x18(%rax), %rax
movslq 0xd4(%r12,%rax), %rax
cmpq %rax, %rcx
jge 0x38c2c3
movq %rcx, 0x8(%rsp)
movq %rbx, %rdi
pushq $0x2
popq %rsi
leaq 0x20(%rsp), %rdx
callq 0x6fa72
movslq 0x1fc(%rsp), %rbp
movq %r13, 0x78(%rsp)
movq %r13, %r12
imulq %rbp, %r12
movq 0x1e0(%rsp), %rbx
imulq %rbx, %r12
addq 0x1d0(%rsp), %r12
movl 0x1e8(%rsp), %r13d
movq 0x1f0(%rsp), %r14
movq 0x130(%rsp), %r15
movq 0x8(%r15), %rax
testq %rax, %rax
je 0x38bf05
lock
decl (%rax)
jne 0x38bf05
movq (%r15), %rsi
movq 0x20(%r15), %rdi
testq %rdi, %rdi
je 0x38befd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38bf05
movq %rsi, %rdi
callq 0x5f3e0
movq %rbp, %rax
imulq 0x10(%rsp), %rax
movq %r12, (%r15)
andq $0x0, 0x8(%r15)
movq %rbx, 0x10(%r15)
movl %r13d, 0x18(%r15)
movq %r14, 0x20(%r15)
movl $0x2, 0x28(%r15)
movl %ebp, 0x2c(%r15)
movl 0x4(%rsp), %ecx
movl %ecx, 0x30(%r15)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x34(%r15)
movq %rax, 0x40(%r15)
movslq 0x1ac(%rsp), %rbp
movq 0x190(%rsp), %rbx
movl 0x198(%rsp), %r15d
movq 0x1a0(%rsp), %r13
movq 0x130(%rsp), %r12
movq 0x50(%r12), %rax
movq 0x78(%rsp), %r14
imulq %rbp, %r14
imulq %rbx, %r14
addq 0x180(%rsp), %r14
testq %rax, %rax
je 0x38bfb3
lock
decl (%rax)
jne 0x38bfb3
movq 0x48(%r12), %rsi
movq 0x68(%r12), %rdi
testq %rdi, %rdi
je 0x38bfab
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38bfb3
movq %rsi, %rdi
callq 0x5f3e0
movq %rbp, %rax
imulq 0x10(%rsp), %rax
movq %r14, 0x48(%r12)
andq $0x0, 0x50(%r12)
movq %rbx, 0x58(%r12)
movl %r15d, 0x60(%r12)
movq %r13, 0x68(%r12)
movl $0x2, 0x70(%r12)
movl %ebp, 0x74(%r12)
movl 0x4(%rsp), %ecx
movl %ecx, 0x78(%r12)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x7c(%r12)
movq %rax, 0x88(%r12)
movq 0xd8(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xe4(%rcx,%rax)
leaq 0x130(%rsp), %rdi
leaq 0x2b0(%rsp), %rbx
movq 0x8(%rsp), %rsi
je 0x38c17d
movl 0xa8(%rsp), %ecx
cmpl $0x3, %ecx
jne 0x38c0e1
movslq 0xac(%rsp), %rcx
movslq 0xb0(%rsp), %rax
movq 0xc0(%rsp), %rdx
imulq %rsi, %rdx
movq 0x90(%rsp), %rsi
imulq %rsi, %rdx
addq 0x80(%rsp), %rdx
movl 0xb4(%rsp), %r10d
movl 0x98(%rsp), %r8d
movq 0xa0(%rsp), %r9
movq %rdx, 0x20(%rsp)
andq $0x0, 0x28(%rsp)
movq %rsi, 0x30(%rsp)
movl %r8d, 0x38(%rsp)
movq %r9, 0x40(%rsp)
movl %ecx, 0x4c(%rsp)
movl %eax, 0x50(%rsp)
movl $0x1, 0x54(%rsp)
movl %r10d, 0x58(%rsp)
imulq %rcx, %rax
imulq %rsi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rsi
movq %rax, 0x60(%rsp)
movl $0x2, 0x48(%rsp)
jmp 0x38c145
movq 0x88(%rsp), %rax
vmovaps 0x80(%rsp), %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq 0x90(%rsp), %rdx
movq %rdx, 0x30(%rsp)
movl 0x98(%rsp), %edx
movl %edx, 0x38(%rsp)
movq 0xa0(%rsp), %rdx
movq %rdx, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
vmovups 0xac(%rsp), %xmm0
vmovups %xmm0, 0x4c(%rsp)
movq 0xc0(%rsp), %rcx
movq %rcx, 0x60(%rsp)
testq %rax, %rax
je 0x38c145
lock
incl (%rax)
leaq 0x20(%rsp), %rsi
callq 0x38a072
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x38c17d
lock
decl (%rax)
jne 0x38c17d
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x38c175
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38c17d
movq %rsi, %rdi
callq 0x5f3e0
movq %rbx, %rdi
pushq $0x1
popq %rsi
leaq 0x20(%rsp), %rdx
callq 0x6fa72
movslq 0x24c(%rsp), %rbp
movq 0x70(%rsp), %r12
imulq %rbp, %r12
movq 0x230(%rsp), %rbx
imulq %rbx, %r12
addq 0x220(%rsp), %r12
movl 0x238(%rsp), %ecx
movq 0x240(%rsp), %r13
movq 0x2b0(%rsp), %r15
movq 0x8(%r15), %rax
testq %rax, %rax
je 0x38c1fc
lock
decl (%rax)
jne 0x38c1fc
movl %ecx, %r14d
movq (%r15), %rsi
movq 0x20(%r15), %rdi
testq %rdi, %rdi
je 0x38c1f1
movq (%rdi), %rax
callq *0x18(%rax)
movl %r14d, %ecx
jmp 0x38c1fc
movq %rsi, %rdi
callq 0x5f3e0
movl %r14d, %ecx
movq %rbp, %rax
movq 0xd0(%rsp), %r14
imulq %r14, %rax
movq %r12, (%r15)
andq $0x0, 0x8(%r15)
movq %rbx, 0x10(%r15)
movl %ecx, 0x18(%r15)
movq %r13, 0x20(%r15)
movl $0x2, 0x28(%r15)
movl %ebp, 0x2c(%r15)
movl 0x1c(%rsp), %ebp
movl %ebp, 0x30(%r15)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x34(%r15)
movq %rax, 0x40(%r15)
movq 0xd8(%rsp), %r12
movq 0x28(%r12), %rdi
movq 0x128(%rsp), %r15
vmovups (%r15), %zmm0
vmovups %zmm0, 0x20(%rsp)
movl $0x1, 0x24(%rsp)
movq (%rdi), %rax
leaq 0x130(%rsp), %rbx
movq %rbx, %rsi
leaq 0x2b0(%rsp), %r13
movq %r13, %rdx
leaq 0x20(%rsp), %rcx
vzeroupper
callq *0x30(%rax)
movq %r13, %rdi
callq 0x6fac4
movq %rbx, %rdi
callq 0x6fac4
movq 0x8(%rsp), %rcx
incq %rcx
movq 0x78(%rsp), %r13
addq 0x10(%rsp), %r13
addq %r14, 0x70(%rsp)
jmp 0x38be6d
movq 0x1d8(%rsp), %rax
testq %rax, %rax
je 0x38c2fa
lock
decl (%rax)
jne 0x38c2fa
movq 0x1d0(%rsp), %rsi
movq 0x1f0(%rsp), %rdi
testq %rdi, %rdi
je 0x38c2f2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38c2fa
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x210(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x1d0(%rsp)
vmovups %xmm0, 0x1dc(%rsp)
leaq 0x1f8(%rsp), %rax
andl $0x0, 0x10(%rax)
vmovups %xmm0, (%rax)
movq 0x188(%rsp), %rax
testq %rax, %rax
je 0x38c368
lock
decl (%rax)
jne 0x38c368
movq 0x180(%rsp), %rsi
movq 0x1a0(%rsp), %rdi
testq %rdi, %rdi
je 0x38c35c
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x38c368
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
andq $0x0, 0x1c0(%rsp)
vmovaps %xmm0, 0x180(%rsp)
vmovups %xmm0, 0x18c(%rsp)
leaq 0x1a8(%rsp), %rax
andl $0x0, 0x10(%rax)
vmovups %xmm0, (%rax)
movq 0x38(%r12), %rdi
movq (%rdi), %rax
leaq 0x220(%rsp), %rsi
movq %r15, %rdx
callq *0x48(%rax)
leaq 0x20(%rsp), %rdx
andq $0x0, 0x40(%rdx)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdx)
vmovups %xmm0, 0xc(%rdx)
vmovaps %xmm0, 0x20(%rdx)
vmovups %xmm0, 0x2c(%rdx)
movq 0x18(%r12), %rdi
movq (%rdi), %rax
movq 0x278(%rsp), %rsi
movq %r15, %rcx
callq *0x38(%rax)
movq (%r12), %rax
movq -0x18(%rax), %rax
movl 0xd4(%r12,%rax), %edx
imull 0x4(%rsp), %edx
movq 0x8(%r15), %r8
leaq 0x130(%rsp), %rdi
andq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
pushq $0x4
popq %rcx
movl %ebp, %esi
callq 0x636fa
movq $0x0, 0x8(%rsp)
leaq 0x280(%rsp), %rbx
xorl %ebp, %ebp
xorl %ecx, %ecx
movq (%r12), %rax
movq -0x18(%rax), %rax
movslq 0xd4(%r12,%rax), %rax
cmpq %rax, %rcx
jge 0x38c71d
movq %rcx, 0x78(%rsp)
movq %rbx, %rdi
pushq $0x2
popq %rsi
leaq 0x2b0(%rsp), %rdx
callq 0x6fa72
movslq 0x24c(%rsp), %r14
movq %rbp, 0x70(%rsp)
movq %rbp, %r12
imulq %r14, %r12
movq 0x230(%rsp), %rbx
imulq %rbx, %r12
addq 0x220(%rsp), %r12
movl 0x238(%rsp), %r13d
movq 0x240(%rsp), %rbp
movq 0x280(%rsp), %r15
movq 0x8(%r15), %rax
testq %rax, %rax
je 0x38c4d7
lock
decl (%rax)
jne 0x38c4d7
movq (%r15), %rsi
movq 0x20(%r15), %rdi
testq %rdi, %rdi
je 0x38c4cf
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38c4d7
movq %rsi, %rdi
callq 0x5f3e0
movq %r14, %rax
imulq 0xd0(%rsp), %rax
movq %r12, (%r15)
andq $0x0, 0x8(%r15)
movq %rbx, 0x10(%r15)
movl %r13d, 0x18(%r15)
movq %rbp, 0x20(%r15)
movl $0x2, 0x28(%r15)
movl %r14d, 0x2c(%r15)
movl 0x1c(%rsp), %ecx
movl %ecx, 0x30(%r15)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x34(%r15)
movq %rax, 0x40(%r15)
movslq 0x4c(%rsp), %r14
movq 0x30(%rsp), %r15
movl 0x38(%rsp), %ebx
movq 0x40(%rsp), %r13
movq 0x280(%rsp), %r12
movq 0x50(%r12), %rax
movq 0x8(%rsp), %rbp
imulq %r14, %rbp
imulq %r15, %rbp
addq 0x20(%rsp), %rbp
testq %rax, %rax
je 0x38c578
lock
decl (%rax)
jne 0x38c578
movq 0x48(%r12), %rsi
movq 0x68(%r12), %rdi
testq %rdi, %rdi
je 0x38c570
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38c578
movq %rsi, %rdi
callq 0x5f3e0
movq %r14, %rax
imulq 0x10(%rsp), %rax
movq %rbp, 0x48(%r12)
andq $0x0, 0x50(%r12)
movq %r15, 0x58(%r12)
movl %ebx, 0x60(%r12)
movq %r13, 0x68(%r12)
movl $0x2, 0x70(%r12)
movl %r14d, 0x74(%r12)
movl 0x4(%rsp), %ecx
movl %ecx, 0x78(%r12)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x7c(%r12)
movq %rax, 0x88(%r12)
leaq 0x298(%rsp), %rdi
pushq $0x1
popq %rsi
leaq 0x2b0(%rsp), %rdx
callq 0x6fa72
movslq 0x15c(%rsp), %r14
movq 0x8(%rsp), %r12
imulq %r14, %r12
movq 0x140(%rsp), %rbx
imulq %rbx, %r12
addq 0x130(%rsp), %r12
movl 0x148(%rsp), %ebp
movq 0x150(%rsp), %rcx
movq 0x298(%rsp), %r15
movq 0x8(%r15), %rax
testq %rax, %rax
je 0x38c650
lock
decl (%rax)
jne 0x38c650
movq %rcx, %r13
movq (%r15), %rsi
movq 0x20(%r15), %rdi
testq %rdi, %rdi
je 0x38c645
movq (%rdi), %rax
callq *0x18(%rax)
movq %r13, %rcx
jmp 0x38c650
movq %rsi, %rdi
callq 0x5f3e0
movq %r13, %rcx
movq %r14, %rax
movq 0x10(%rsp), %r13
imulq %r13, %rax
movq %r12, (%r15)
andq $0x0, 0x8(%r15)
movq %rbx, 0x10(%r15)
movl %ebp, 0x18(%r15)
movq %rcx, 0x20(%r15)
movl $0x2, 0x28(%r15)
movl %r14d, 0x2c(%r15)
movl 0x4(%rsp), %ecx
movl %ecx, 0x30(%r15)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x34(%r15)
movq %rax, 0x40(%r15)
movq 0xd8(%rsp), %r12
movq 0x30(%r12), %rdi
movq 0x128(%rsp), %r15
vmovups (%r15), %zmm0
vmovups %zmm0, 0x2b0(%rsp)
movl $0x1, 0x2b4(%rsp)
movq (%rdi), %rax
leaq 0x280(%rsp), %rbx
movq %rbx, %rsi
leaq 0x298(%rsp), %r14
movq %r14, %rdx
leaq 0x2b0(%rsp), %rcx
vzeroupper
callq *0x30(%rax)
movq %r14, %rdi
callq 0x6fac4
movq %rbx, %rdi
callq 0x6fac4
movq 0x78(%rsp), %rcx
incq %rcx
movq 0x70(%rsp), %rbp
addq 0xd0(%rsp), %rbp
addq %r13, 0x8(%rsp)
jmp 0x38c43c
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x38c74b
lock
decl (%rax)
jne 0x38c74b
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x38c743
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38c74b
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
leaq 0x48(%rsp), %rax
andl $0x0, 0x10(%rax)
vmovups %xmm0, (%rax)
movq 0x20(%r12), %rdi
movq 0x270(%rsp), %rax
movq (%rax), %rdx
movq (%rdi), %rax
leaq 0x130(%rsp), %rsi
movq %r15, %rcx
callq *0x38(%rax)
movq 0x138(%rsp), %rax
testq %rax, %rax
je 0x38c7c6
lock
decl (%rax)
jne 0x38c7c6
movq 0x130(%rsp), %rsi
movq 0x150(%rsp), %rdi
testq %rdi, %rdi
je 0x38c7be
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38c7c6
movq %rsi, %rdi
callq 0x5f3e0
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x38c7f4
lock
decl (%rax)
jne 0x38c7f4
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x38c7ec
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38c7f4
movq %rsi, %rdi
callq 0x5f3e0
movq 0x228(%rsp), %rax
testq %rax, %rax
je 0x38c82b
lock
decl (%rax)
jne 0x38c82b
movq 0x220(%rsp), %rsi
movq 0x240(%rsp), %rdi
testq %rdi, %rdi
je 0x38c823
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38c82b
movq %rsi, %rdi
callq 0x5f3e0
movq 0x188(%rsp), %rax
testq %rax, %rax
je 0x38c862
lock
decl (%rax)
jne 0x38c862
movq 0x180(%rsp), %rsi
movq 0x1a0(%rsp), %rdi
testq %rdi, %rdi
je 0x38c85a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38c862
movq %rsi, %rdi
callq 0x5f3e0
movq 0x1d8(%rsp), %rax
testq %rax, %rax
je 0x38c899
lock
decl (%rax)
jne 0x38c899
movq 0x1d0(%rsp), %rsi
movq 0x1f0(%rsp), %rdi
testq %rdi, %rdi
je 0x38c891
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38c899
movq %rsi, %rdi
callq 0x5f3e0
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x38c8d0
lock
decl (%rax)
jne 0x38c8d0
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x38c8c8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38c8d0
movq %rsi, %rdi
callq 0x5f3e0
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x38c907
lock
decl (%rax)
jne 0x38c907
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
je 0x38c8ff
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38c907
movq %rsi, %rdi
callq 0x5f3e0
xorl %eax, %eax
addq $0x2f8, %rsp # imm = 0x2F8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x38cbab
jmp 0x38cbab
jmp 0x38cbab
jmp 0x38cbab
jmp 0x38cbab
jmp 0x38cbab
jmp 0x38cbab
movq %rax, %rbx
jmp 0x38cb2d
jmp 0x38c948
movq %rax, %rbx
jmp 0x38ca27
jmp 0x38c952
movq %rax, %rbx
jmp 0x38cabf
movq %rax, %rbx
jmp 0x38caf6
jmp 0x38c9c9
jmp 0x38ca85
jmp 0x38cbab
jmp 0x38c9e0
jmp 0x38c9e0
jmp 0x38c9ce
jmp 0x38ca73
jmp 0x38ca61
jmp 0x38ca73
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x38ca76
lock
decl (%rax)
jne 0x38ca76
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x38c9b9
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x38ca76
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38ca76
jmp 0x38cbab
movq %rax, %rbx
jmp 0x38c9f0
movq %rax, %rbx
leaq 0x298(%rsp), %rdi
callq 0x6fac4
jmp 0x38c9e3
movq %rax, %rbx
leaq 0x280(%rsp), %rdi
callq 0x6fac4
movq 0x138(%rsp), %rax
testq %rax, %rax
je 0x38ca27
lock
decl (%rax)
jne 0x38ca27
movq 0x130(%rsp), %rsi
movq 0x150(%rsp), %rdi
testq %rdi, %rdi
jne 0x38ca21
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x38ca27
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x38ca88
lock
decl (%rax)
jne 0x38ca88
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x38ca4f
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x38ca88
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x38ca88
jmp 0x38cbab
jmp 0x38cbab
movq %rax, %rbx
leaq 0x2b0(%rsp), %rdi
callq 0x6fac4
jmp 0x38ca76
movq %rax, %rbx
leaq 0x130(%rsp), %rdi
callq 0x6fac4
jmp 0x38ca88
movq %rax, %rbx
movq 0x228(%rsp), %rax
testq %rax, %rax
je 0x38cabf
lock
decl (%rax)
jne 0x38cabf
movq 0x220(%rsp), %rsi
movq 0x240(%rsp), %rdi
testq %rdi, %rdi
jne 0x38cab9
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x38cabf
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x188(%rsp), %rax
testq %rax, %rax
je 0x38caf6
lock
decl (%rax)
jne 0x38caf6
movq 0x180(%rsp), %rsi
movq 0x1a0(%rsp), %rdi
testq %rdi, %rdi
jne 0x38caf0
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x38caf6
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1d8(%rsp), %rax
testq %rax, %rax
je 0x38cb2d
lock
decl (%rax)
jne 0x38cb2d
movq 0x1d0(%rsp), %rsi
movq 0x1f0(%rsp), %rdi
testq %rdi, %rdi
jne 0x38cb27
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x38cb2d
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x38cb64
lock
decl (%rax)
jne 0x38cb64
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x38cb5e
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x38cb64
movq (%rdi), %rax
callq *0x18(%rax)
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x38cb9b
lock
decl (%rax)
jne 0x38cb9b
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
jne 0x38cb95
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x38cb9b
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x38cbab
jmp 0x38cbab
jmp 0x38cbab
jmp 0x38cbab
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/multiheadattention_x86_avx512.cpp |
virtual thunk to ncnn::MatMul_x86::create_pipeline(ncnn::Option const&) | int MatMul_x86::create_pipeline(const Option& opt)
{
gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
ncnn::ParamDict pd;
pd.set(2, 0); // transA
pd.set(3, transB); // transB
pd.set(4, 0); // constantA
pd.set(5, 0); // constantB
pd.set(6, 1); // constantC
pd.set(7, 0); // M = outch
pd.set(8, 0); // N = size
pd.set(9, 0); // K = maxk*inch
pd.set(10, -1); // constant_broadcast_type_C = null
pd.set(11, 0); // output_N1M
pd.set(12, 1); // output_elempack
gemm->load_param(pd);
gemm->load_model(ModelBinFromMatArray(0));
gemm->create_pipeline(opt);
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x30(%rax), %rdi
callq 0x3b034a
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/src/layer/x86/matmul_x86.cpp |
ncnn::MatMul_x86::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int MatMul_x86::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& A = bottom_blobs[0];
const Mat& B = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int Adims = A.dims;
const int Bdims = B.dims;
const int max_ABdims = std::max(Adims, Bdims);
const size_t elemsize = A.elemsize;
if (Adims == 1 && Bdims == 1)
{
// dot product
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A.reshape(A.w, 1);
_bottom_blobs[1] = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(1, opt.blob_allocator);
}
else if (Adims == 2 && Bdims == 2)
{
// matrix multiply
gemm->forward(bottom_blobs, top_blobs, opt);
}
else if (Adims == 1 && Bdims == 2)
{
// matrix multiply
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A.reshape(A.w, 1);
_bottom_blobs[1] = B;
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(top_blob.w, opt.blob_allocator);
}
else if (Adims == 2 && Bdims == 1)
{
// matrix multiply
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A;
_bottom_blobs[1] = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(top_blob.h, opt.blob_allocator);
}
else if (Adims == 1 && Bdims > 2)
{
// batched matrix multiply
const int N = transB == 0 ? B.w : B.h;
const int batch_size = B.d * B.c;
Mat top_blob1(N, 1, batch_size, elemsize, opt.blob_allocator);
if (top_blob1.empty())
return -100;
Mat A1 = A.reshape(A.w, 1);
Mat B1 = B.reshape(B.w, B.h, batch_size);
for (int p = 0; p < batch_size; p++)
{
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1;
_bottom_blobs[1] = B1.channel(p);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob1.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
if (Bdims == 3)
top_blob = top_blob1.reshape(N, B.d * B.c, opt.blob_allocator);
else
top_blob = top_blob1.reshape(N, B.d, B.c, opt.blob_allocator);
}
else if (Adims > 2 && Bdims == 1)
{
// batched matrix multiply
const int M = A.h;
const int batch_size = A.d * A.c;
Mat top_blob1(1, M, batch_size, elemsize, opt.blob_allocator);
if (top_blob1.empty())
return -100;
Mat A1 = A.reshape(A.w, A.h, batch_size);
Mat BT = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
for (int p = 0; p < batch_size; p++)
{
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(p);
_bottom_blobs[1] = BT;
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob1.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
if (Adims == 3)
top_blob = top_blob1.reshape(M, A.d * A.c, opt.blob_allocator);
else
top_blob = top_blob1.reshape(M, A.d, A.c, opt.blob_allocator);
}
else if (max_ABdims == 3)
{
Mat A1 = Adims == 2 ? A.reshape(A.w, A.h, 1) : A;
Mat B1 = Bdims == 2 ? B.reshape(B.w, B.h, 1) : B;
const int M = A1.h;
const int N = transB == 0 ? B1.w : B1.h;
const int batch_size = std::max(A1.c, B1.c);
top_blob.create(N, M, batch_size, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
for (int p = 0; p < batch_size; p++)
{
int Ap = A1.c == 1 ? 0 : p;
int Bp = B1.c == 1 ? 0 : p;
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(Ap);
_bottom_blobs[1] = B1.channel(Bp);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
}
else if (max_ABdims == 4)
{
Mat A1 = Adims == 3 ? A.reshape(A.w, A.h, A.c, 1) : A;
Mat B1 = Bdims == 3 ? B.reshape(B.w, B.h, B.c, 1) : B;
const int M = A1.h;
const int N = transB == 0 ? B1.w : B1.h;
const int batch_size_d = std::max(A1.d, B1.d);
const int batch_size_c = std::max(A1.c, B1.c);
top_blob.create(N, M, batch_size_d, batch_size_c, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
for (int p = 0; p < batch_size_c; p++)
{
int Ap = A1.c == 1 ? 0 : p;
int Bp = B1.c == 1 ? 0 : p;
for (int q = 0; q < batch_size_d; q++)
{
int Ad = A1.d == 1 ? 0 : q;
int Bd = B1.d == 1 ? 0 : q;
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(Ap).depth(Ad);
_bottom_blobs[1] = B1.channel(Bp).depth(Bd);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob.channel(p).depth(q);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
}
}
else
{
NCNN_LOGE("impossible matmul %d %d", Adims, Bdims);
return -1;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1b8, %rsp # imm = 0x1B8
movq %rcx, %r15
movq %rdi, 0x98(%rsp)
movq (%rsi), %rbx
leaq 0x48(%rbx), %rax
movq %rax, 0x88(%rsp)
movq %rdx, 0x70(%rsp)
movq (%rdx), %r12
movl 0x28(%rbx), %ebp
movl 0x70(%rbx), %r14d
cmpl %r14d, %ebp
movl %r14d, %eax
cmovgl %ebp, %eax
movl %ebp, %edx
xorl $0x1, %edx
movl %r14d, %ecx
xorl $0x1, %ecx
movl %edx, %edi
orl %ecx, %edi
jne 0x3b05b7
movq %r15, %r13
leaq 0xa0(%rsp), %rdi
pushq $0x2
popq %rsi
leaq 0x10(%rsp), %rdx
callq 0x6fa72
movl 0x2c(%rbx), %edx
leaq 0x10(%rsp), %r14
pushq $0x1
popq %rbp
movq %r14, %rdi
movq %rbx, %r15
movq %rbx, %rsi
movl %ebp, %ecx
xorl %r8d, %r8d
callq 0x62e4e
movq 0xa0(%rsp), %rbx
movq 0x18(%rsp), %rax
cmpq %r14, %rbx
je 0x3b0bcc
testq %rax, %rax
je 0x3b0586
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x3b0b72
lock
decl (%rax)
jne 0x3b0b72
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x3b0b6a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b0b72
movl %ebp, %edi
xorl $0x2, %edi
movl %r14d, %r8d
xorl $0x2, %r8d
movl %edi, %r9d
orl %r8d, %r9d
jne 0x3b05ea
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
movq 0x70(%rsp), %rdx
movq %r15, %rcx
callq *0x30(%rax)
jmp 0x3b195f
orl %r8d, %edx
jne 0x3b0673
leaq 0xa0(%rsp), %rdi
pushq $0x2
popq %rsi
leaq 0x10(%rsp), %rdx
callq 0x6fa72
movl 0x2c(%rbx), %edx
leaq 0x10(%rsp), %r14
pushq $0x1
popq %rcx
movq %r14, %rdi
movq %rbx, %rbp
movq %rbx, %rsi
xorl %r8d, %r8d
callq 0x62e4e
movq 0xa0(%rsp), %rbx
movq 0x18(%rsp), %rax
cmpq %r14, %rbx
je 0x3b1409
testq %rax, %rax
je 0x3b0642
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x3b13af
lock
decl (%rax)
jne 0x3b13af
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x3b13a7
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b13af
orl %ecx, %edi
jne 0x3b06e0
movq %rbx, %r14
leaq 0xa0(%rsp), %rdi
pushq $0x2
popq %rsi
leaq 0x10(%rsp), %rdx
callq 0x6fa72
movq 0xa0(%rsp), %rbx
movq %r14, %rsi
cmpq %r14, %rbx
je 0x3b1764
movq 0x8(%rsi), %rax
testq %rax, %rax
je 0x3b06af
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x3b1714
lock
decl (%rax)
jne 0x3b1714
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x3b170c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b1714
movq 0x10(%rbx), %r8
cmpl $0x1, %ebp
sete %cl
cmpl $0x3, %r14d
setge %dl
andb %cl, %dl
cmpb $0x1, %dl
movq %r12, 0x148(%rsp)
movq %r15, 0x140(%rsp)
jne 0x3b0de8
movq 0x98(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rax
xorl %ecx, %ecx
cmpl $0x0, 0xd0(%rdx,%rax)
setne %cl
movl 0x74(%rbx,%rcx,4), %esi
movl 0x80(%rbx), %r13d
imull 0x7c(%rbx), %r13d
movq 0x8(%r15), %r9
leaq 0x10(%rsp), %r15
andq $0x0, 0x40(%r15)
xorps %xmm0, %xmm0
movaps %xmm0, (%r15)
movups %xmm0, 0xc(%r15)
movaps %xmm0, 0x20(%r15)
movups %xmm0, 0x2c(%r15)
pushq $0x1
popq %rdx
movq %r15, %rdi
movl %esi, 0xe8(%rsp)
movl %r13d, %ecx
callq 0x63810
cmpq $0x0, (%r15)
je 0x3b1274
movslq 0x48(%rsp), %rax
imulq 0x50(%rsp), %rax
testq %rax, %rax
je 0x3b1274
movl 0x2c(%rbx), %edx
leaq 0xa0(%rsp), %rdi
pushq $0x1
popq %rcx
movq %rbx, %rsi
xorl %r8d, %r8d
callq 0x62e4e
movl 0x74(%rbx), %edx
movq %rbx, 0x138(%rsp)
movl 0x78(%rbx), %ecx
xorl %r15d, %r15d
leaq 0x150(%rsp), %rdi
movq 0x88(%rsp), %rsi
movl %r13d, %r8d
xorl %r9d, %r9d
callq 0x63020
movq 0x140(%rsp), %rbx
testl %r13d, %r13d
cmovlel %r15d, %r13d
leaq 0xf0(%rsp), %rbp
movl %r14d, 0x80(%rsp)
movq %r13, 0x88(%rsp)
cmpq %r15, %r13
je 0x3b12e9
movq %rbp, %rdi
pushq $0x2
popq %rsi
leaq 0x1a0(%rsp), %rdx
callq 0x6fa72
movq 0xf0(%rsp), %rbx
leaq 0xa0(%rsp), %rax
cmpq %rax, %rbx
je 0x3b08d0
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b0840
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x3b086a
lock
decl (%rax)
jne 0x3b086a
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x3b0862
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b086a
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movups %xmm0, 0xc(%rbx)
andl $0x0, 0x38(%rbx)
movups %xmm0, 0x28(%rbx)
movaps 0xa0(%rsp), %xmm0
movups %xmm0, (%rbx)
movq 0xb0(%rsp), %rax
movq %rax, 0x10(%rbx)
movl 0xb8(%rsp), %eax
movl %eax, 0x18(%rbx)
movq 0xc0(%rsp), %rax
movq %rax, 0x20(%rbx)
movups 0xc8(%rsp), %xmm0
movups %xmm0, 0x28(%rbx)
movl 0xd8(%rsp), %eax
movl %eax, 0x38(%rbx)
movq 0xe0(%rsp), %rax
movq %rax, 0x40(%rbx)
movslq 0x17c(%rsp), %rdi
movslq 0x180(%rsp), %r12
movl 0x184(%rsp), %esi
movq 0x190(%rsp), %rbp
movq %r15, 0x70(%rsp)
imulq %r15, %rbp
movq 0x160(%rsp), %rbx
imulq %rbx, %rbp
addq 0x150(%rsp), %rbp
movl 0x168(%rsp), %r8d
movq 0x170(%rsp), %r9
movq %r12, %rcx
imulq %rdi, %rcx
movq %rbx, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rbx
movq %rax, %r13
movl 0x178(%rsp), %r14d
cmpl $0x4, %r14d
cmoveq %rcx, %r13
movq 0xf0(%rsp), %r15
movq 0x50(%r15), %rax
testq %rax, %rax
je 0x3b09b6
lock
decl (%rax)
jne 0x3b09b6
movq %r9, 0x78(%rsp)
movl %r8d, 0x8(%rsp)
movq %rdi, 0x60(%rsp)
movl %esi, 0x68(%rsp)
movq 0x48(%r15), %rsi
movq 0x68(%r15), %rdi
testq %rdi, %rdi
je 0x3b099b
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x68(%rsp), %esi
movq 0x60(%rsp), %rdi
movl 0x8(%rsp), %r8d
movq 0x78(%rsp), %r9
jmp 0x3b09b6
movq %rsi, %rdi
callq 0x5f3e0
movl 0x68(%rsp), %esi
movq 0x60(%rsp), %rdi
movl 0x8(%rsp), %r8d
movq 0x78(%rsp), %r9
decl %r14d
movq %rbp, 0x48(%r15)
andq $0x0, 0x50(%r15)
movq %rbx, 0x58(%r15)
movl %r8d, 0x60(%r15)
movq %r9, 0x68(%r15)
movl %r14d, 0x70(%r15)
movl %edi, 0x74(%r15)
movl %r12d, 0x78(%r15)
movl $0x1, 0x7c(%r15)
movl %esi, 0x80(%r15)
movq %r13, 0x88(%r15)
leaq 0x1a0(%rsp), %rdi
pushq $0x1
popq %rsi
leaq 0x97(%rsp), %rdx
callq 0x6fa72
movslq 0x3c(%rsp), %rdi
movslq 0x40(%rsp), %r12
movl 0x44(%rsp), %esi
movq 0x50(%rsp), %rbx
imulq 0x70(%rsp), %rbx
movq 0x20(%rsp), %rbp
imulq %rbp, %rbx
addq 0x10(%rsp), %rbx
movl 0x28(%rsp), %r8d
movq 0x30(%rsp), %r9
movq %r12, %rcx
imulq %rdi, %rcx
movq %rbp, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rbp
movq %rax, %r13
movl 0x38(%rsp), %r14d
cmpl $0x4, %r14d
cmoveq %rcx, %r13
movq 0x1a0(%rsp), %r15
movq 0x8(%r15), %rax
testq %rax, %rax
je 0x3b0acf
lock
decl (%rax)
jne 0x3b0acf
movq %r9, 0x78(%rsp)
movl %r8d, 0x8(%rsp)
movq %rdi, 0x60(%rsp)
movl %esi, 0x68(%rsp)
movq (%r15), %rsi
movq 0x20(%r15), %rdi
testq %rdi, %rdi
je 0x3b0ab4
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x68(%rsp), %esi
movq 0x60(%rsp), %rdi
movl 0x8(%rsp), %r8d
movq 0x78(%rsp), %r9
jmp 0x3b0acf
movq %rsi, %rdi
callq 0x5f3e0
movl 0x68(%rsp), %esi
movq 0x60(%rsp), %rdi
movl 0x8(%rsp), %r8d
movq 0x78(%rsp), %r9
movq %rbx, (%r15)
andq $0x0, 0x8(%r15)
decl %r14d
movq %rbp, 0x10(%r15)
movl %r8d, 0x18(%r15)
movq %r9, 0x20(%r15)
movl %r14d, 0x28(%r15)
movl %edi, 0x2c(%r15)
movl %r12d, 0x30(%r15)
movl $0x1, 0x34(%r15)
movl %esi, 0x38(%r15)
movq %r13, 0x40(%r15)
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0xf0(%rsp), %rbp
movq %rbp, %rsi
leaq 0x1a0(%rsp), %r14
movq %r14, %rdx
movq 0x140(%rsp), %rbx
movq %rbx, %rcx
callq *0x30(%rax)
movq %r14, %rdi
callq 0x6fac4
movq %rbp, %rdi
callq 0x6fac4
movq 0x70(%rsp), %r15
incq %r15
movq 0x148(%rsp), %r12
movl 0x80(%rsp), %r14d
movq 0x88(%rsp), %r13
jmp 0x3b07fb
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movups %xmm0, 0xc(%rbx)
movups %xmm0, 0x28(%rbx)
andl $0x0, 0x38(%rbx)
movq 0x10(%rsp), %rax
movq %rax, (%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x8(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x10(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x18(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x20(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x28(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x38(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x40(%rbx)
testq %rax, %rax
je 0x3b0bf5
lock
decl (%rax)
jne 0x3b0bf5
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b0bed
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b0bf5
movq %rsi, %rdi
callq 0x5f3e0
movq 0x98(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xd0(%rcx,%rax)
movl 0x74(%r15), %eax
movl %eax, %edx
cmovel %ebp, %edx
cmovel %eax, %ebp
leaq 0x10(%rsp), %r14
movq %r14, %rdi
movq 0x88(%rsp), %rsi
movl %ebp, %ecx
xorl %r8d, %r8d
callq 0x62e4e
movq 0xa0(%rsp), %rbx
leaq 0x48(%rbx), %rcx
movq 0x18(%rsp), %rax
cmpq %r14, %rcx
je 0x3b0cc5
testq %rax, %rax
je 0x3b0c50
lock
incl (%rax)
movq 0x50(%rbx), %rax
testq %rax, %rax
je 0x3b0c7b
lock
decl (%rax)
jne 0x3b0c7b
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x3b0c73
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b0c7b
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0x48(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x50(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x58(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x60(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x68(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x70(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x80(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x88(%rbx)
testq %rax, %rax
je 0x3b0cee
lock
decl (%rax)
jne 0x3b0cee
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b0ce6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b0cee
movq %rsi, %rdi
callq 0x5f3e0
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0xa0(%rsp), %rsi
movq 0x70(%rsp), %rdx
movq %r13, %rcx
callq *0x30(%rax)
movq 0x8(%r13), %rcx
leaq 0x10(%rsp), %r14
pushq $0x1
popq %rdx
movq %r14, %rdi
movq %r12, %rsi
callq 0x62c8a
movq 0x18(%rsp), %rax
cmpq %r14, %r12
je 0x3b0db8
testq %rax, %rax
je 0x3b0d3d
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3b0d6a
lock
decl (%rax)
jne 0x3b0d6a
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3b0d62
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b0d6a
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, (%r12)
movq 0x18(%rsp), %rax
movq %rax, 0x8(%r12)
movq 0x20(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x28(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x30(%rsp), %rcx
movq %rcx, 0x20(%r12)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x28(%r12)
movl 0x48(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x50(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3b1952
lock
decl (%rax)
jne 0x3b1952
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b194a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b1952
cmpl $0x1, %r14d
sete %cl
cmpl $0x3, %ebp
setge %dl
andb %cl, %dl
cmpb $0x1, %dl
jne 0x3b12a9
movl 0x30(%rbx), %edx
movl 0x38(%rbx), %ecx
imull 0x34(%rbx), %ecx
movq 0x8(%r15), %r9
leaq 0x10(%rsp), %r15
andq $0x0, 0x40(%r15)
xorps %xmm0, %xmm0
movaps %xmm0, (%r15)
movups %xmm0, 0xc(%r15)
movaps %xmm0, 0x20(%r15)
movups %xmm0, 0x2c(%r15)
pushq $0x1
popq %r14
movq %r15, %rdi
movl %r14d, %esi
movl %edx, 0xe8(%rsp)
movq %rcx, %r13
callq 0x63810
cmpq $0x0, (%r15)
je 0x3b1372
movslq 0x48(%rsp), %rax
imulq 0x50(%rsp), %rax
testq %rax, %rax
je 0x3b1372
movl 0x2c(%rbx), %edx
movl 0x30(%rbx), %ecx
leaq 0xa0(%rsp), %rdi
movq %rbx, %rsi
movl %r13d, %r8d
xorl %r9d, %r9d
callq 0x63020
movq 0x98(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xd0(%rcx,%rax)
movq %rbx, 0x138(%rsp)
movl 0x74(%rbx), %eax
movl %eax, %edx
cmovel %r14d, %edx
cmovel %eax, %r14d
xorl %r15d, %r15d
leaq 0x150(%rsp), %rdi
movq 0x88(%rsp), %rsi
movl %r14d, %ecx
xorl %r8d, %r8d
callq 0x62e4e
movq 0x140(%rsp), %rbx
testl %r13d, %r13d
cmovlel %r15d, %r13d
movq %r13, 0x88(%rsp)
leaq 0xf0(%rsp), %r14
leaq 0x1a0(%rsp), %r13
cmpq %r15, 0x88(%rsp)
je 0x3b1608
movq %r14, %rdi
pushq $0x2
popq %rsi
movq %r13, %rdx
callq 0x6fa72
movslq 0xcc(%rsp), %rdi
movslq 0xd0(%rsp), %r8
movl 0xd4(%rsp), %esi
movq 0xe0(%rsp), %r12
movq %r15, 0x70(%rsp)
imulq %r15, %r12
movq 0xb0(%rsp), %rbx
imulq %rbx, %r12
addq 0xa0(%rsp), %r12
movl 0xb8(%rsp), %r9d
movq 0xc0(%rsp), %r10
movq %r8, %rcx
imulq %rdi, %rcx
movq %rbx, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rbx
movq %rax, %r13
movl 0xc8(%rsp), %r14d
cmpl $0x4, %r14d
cmoveq %rcx, %r13
movq 0xf0(%rsp), %r15
movq 0x8(%r15), %rax
testq %rax, %rax
je 0x3b100b
lock
decl (%rax)
jne 0x3b100b
movq %r10, 0x78(%rsp)
movl %r9d, 0x8(%rsp)
movq %r8, 0x60(%rsp)
movq %rdi, 0x68(%rsp)
movl %esi, 0x80(%rsp)
movq (%r15), %rsi
movq 0x20(%r15), %rdi
testq %rdi, %rdi
je 0x3b0fe8
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
jmp 0x3b100b
movq %rsi, %rdi
callq 0x5f3e0
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
decl %r14d
movq %r12, (%r15)
andq $0x0, 0x8(%r15)
movq %rbx, 0x10(%r15)
movl %r9d, 0x18(%r15)
movq %r10, 0x20(%r15)
movl %r14d, 0x28(%r15)
movl %edi, 0x2c(%r15)
movl %r8d, 0x30(%r15)
movl $0x1, 0x34(%r15)
movl %esi, 0x38(%r15)
movq %r13, 0x40(%r15)
movq 0xf0(%rsp), %rbx
leaq 0x48(%rbx), %rax
leaq 0x150(%rsp), %rcx
cmpq %rcx, %rax
movq 0x70(%rsp), %r14
leaq 0x1a0(%rsp), %r15
je 0x3b10f9
movq 0x158(%rsp), %rax
testq %rax, %rax
je 0x3b1078
lock
incl (%rax)
movq 0x50(%rbx), %rax
testq %rax, %rax
je 0x3b10a3
lock
decl (%rax)
jne 0x3b10a3
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x3b109b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b10a3
movq %rsi, %rdi
callq 0x5f3e0
movaps 0x150(%rsp), %xmm0
movups %xmm0, 0x48(%rbx)
movq 0x160(%rsp), %rax
movq %rax, 0x58(%rbx)
movl 0x168(%rsp), %eax
movl %eax, 0x60(%rbx)
movq 0x170(%rsp), %rax
movq %rax, 0x68(%rbx)
movups 0x178(%rsp), %xmm0
movups %xmm0, 0x70(%rbx)
movl 0x188(%rsp), %eax
movl %eax, 0x80(%rbx)
movq 0x190(%rsp), %rax
movq %rax, 0x88(%rbx)
movq %r15, %rdi
pushq $0x1
popq %rsi
leaq 0x97(%rsp), %rdx
callq 0x6fa72
movslq 0x3c(%rsp), %rdi
movslq 0x40(%rsp), %r8
movl 0x44(%rsp), %esi
movq 0x50(%rsp), %rbx
imulq %r14, %rbx
movq 0x20(%rsp), %r12
imulq %r12, %rbx
addq 0x10(%rsp), %rbx
movl 0x28(%rsp), %r9d
movq 0x30(%rsp), %r10
movq %r8, %rcx
imulq %rdi, %rcx
movq %r12, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r12
movq %rax, %r13
movl 0x38(%rsp), %r14d
cmpl $0x4, %r14d
cmoveq %rcx, %r13
movq 0x1a0(%rsp), %r15
movq 0x8(%r15), %rax
testq %rax, %rax
je 0x3b11e9
lock
decl (%rax)
jne 0x3b11e9
movq %r10, 0x78(%rsp)
movl %r9d, 0x8(%rsp)
movq %r8, 0x60(%rsp)
movq %rdi, 0x68(%rsp)
movl %esi, 0x80(%rsp)
movq (%r15), %rsi
movq 0x20(%r15), %rdi
testq %rdi, %rdi
je 0x3b11c6
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
jmp 0x3b11e9
movq %rsi, %rdi
callq 0x5f3e0
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
movq %rbx, (%r15)
andq $0x0, 0x8(%r15)
decl %r14d
movq %r12, 0x10(%r15)
movl %r9d, 0x18(%r15)
movq %r10, 0x20(%r15)
movl %r14d, 0x28(%r15)
movl %edi, 0x2c(%r15)
movl %r8d, 0x30(%r15)
movl $0x1, 0x34(%r15)
movl %esi, 0x38(%r15)
movq %r13, 0x40(%r15)
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0xf0(%rsp), %r14
movq %r14, %rsi
leaq 0x1a0(%rsp), %r13
movq %r13, %rdx
movq 0x140(%rsp), %rbx
movq %rbx, %rcx
callq *0x30(%rax)
movq %r13, %rdi
callq 0x6fac4
movq %r14, %rdi
callq 0x6fac4
movq 0x70(%rsp), %r15
incq %r15
movq 0x148(%rsp), %r12
jmp 0x3b0ef2
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b25a8
lock
decl (%rax)
jne 0x3b25a8
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b25a0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b25a8
cmpl $0x4, %eax
je 0x3b15d7
cmpl $0x3, %eax
jne 0x3b25b0
cmpl $0x2, %ebp
jne 0x3b1973
movl 0x2c(%rbx), %edx
movl 0x30(%rbx), %ecx
leaq 0x10(%rsp), %rdi
movq %r8, %r13
pushq $0x1
popq %r8
movq %rbx, %rsi
xorl %r9d, %r9d
callq 0x63020
movq %r13, %r8
jmp 0x3b19b7
cmpl $0x3, %r14d
jne 0x3b168d
movq 0x138(%rsp), %rax
movl 0x80(%rax), %ecx
imull 0x7c(%rax), %ecx
movq 0x8(%rbx), %r8
leaq 0xf0(%rsp), %r14
leaq 0x10(%rsp), %rsi
movq %r14, %rdi
movl 0xe8(%rsp), %edx
callq 0x62e4e
movq 0xf8(%rsp), %rax
cmpq %r14, %r12
je 0x3b264a
testq %rax, %rax
je 0x3b133e
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3b25e4
lock
decl (%rax)
jne 0x3b25e4
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3b25dc
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b25e4
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b25a8
lock
decl (%rax)
jne 0x3b25a8
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b25a0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b25a8
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movups %xmm0, 0xc(%rbx)
movups %xmm0, 0x28(%rbx)
andl $0x0, 0x38(%rbx)
movq 0x10(%rsp), %rax
movq %rax, (%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x8(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x10(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x18(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x20(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x28(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x38(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x40(%rbx)
testq %rax, %rax
je 0x3b1432
lock
decl (%rax)
jne 0x3b1432
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b142a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b1432
movq %rsi, %rdi
callq 0x5f3e0
movq 0xa0(%rsp), %rbx
cmpq %rbp, %rbx
je 0x3b14db
movq 0x50(%rbp), %rax
testq %rax, %rax
je 0x3b144f
lock
incl (%rax)
movq 0x50(%rbx), %rax
testq %rax, %rax
je 0x3b147a
lock
decl (%rax)
jne 0x3b147a
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x3b1472
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b147a
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x88(%rbx)
leaq 0x48(%rbx), %rax
xorps %xmm0, %xmm0
movups %xmm0, 0xc(%rax)
movups %xmm0, (%rax)
andl $0x0, 0x80(%rbx)
movups %xmm0, 0x70(%rbx)
movups 0x48(%rbp), %xmm0
movups %xmm0, 0x48(%rbx)
movq 0x58(%rbp), %rax
movq %rax, 0x58(%rbx)
movl 0x60(%rbp), %eax
movl %eax, 0x60(%rbx)
movq 0x68(%rbp), %rax
movq %rax, 0x68(%rbx)
movups 0x70(%rbp), %xmm0
movups %xmm0, 0x70(%rbx)
movl 0x80(%rbp), %eax
movl %eax, 0x80(%rbx)
movq 0x88(%rbp), %rax
movq %rax, 0x88(%rbx)
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0xa0(%rsp), %rsi
movq 0x70(%rsp), %rdx
movq %r15, %rcx
callq *0x30(%rax)
movl 0x2c(%r12), %edx
movq 0x8(%r15), %rcx
leaq 0x10(%rsp), %r14
movq %r14, %rdi
movq %r12, %rsi
callq 0x62c8a
movq 0x18(%rsp), %rax
cmpq %r14, %r12
je 0x3b15a7
testq %rax, %rax
je 0x3b152c
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3b1559
lock
decl (%rax)
jne 0x3b1559
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3b1551
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b1559
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, (%r12)
movq 0x18(%rsp), %rax
movq %rax, 0x8(%r12)
movq 0x20(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x28(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x30(%rsp), %rcx
movq %rcx, 0x20(%r12)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x28(%r12)
movl 0x48(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x50(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3b1952
lock
decl (%rax)
jne 0x3b1952
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b194a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b1952
cmpl $0x3, %ebp
jne 0x3b1eec
movq %r8, %r13
movl 0x2c(%rbx), %edx
movl 0x30(%rbx), %ecx
movl 0x38(%rbx), %r8d
andq $0x0, (%rsp)
leaq 0x10(%rsp), %rdi
pushq $0x1
popq %r9
movq %rbx, %rsi
callq 0x632f0
jmp 0x3b1f33
cmpl $0x3, %ebp
jne 0x3b244c
movq 0x138(%rsp), %rax
movl 0x38(%rax), %ecx
imull 0x34(%rax), %ecx
movq 0x8(%rbx), %r8
leaq 0xf0(%rsp), %r14
leaq 0x10(%rsp), %rsi
movq %r14, %rdi
movl 0xe8(%rsp), %edx
callq 0x62e4e
movq 0xf8(%rsp), %rax
cmpq %r14, %r12
je 0x3b28a8
testq %rax, %rax
je 0x3b1659
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3b2842
lock
decl (%rax)
jne 0x3b2842
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3b283a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b2842
movq 0x138(%rsp), %rax
movl 0x7c(%rax), %ecx
movl 0x80(%rax), %r8d
movq 0x8(%rbx), %r9
leaq 0xf0(%rsp), %r14
leaq 0x10(%rsp), %rsi
movq %r14, %rdi
movl 0xe8(%rsp), %edx
callq 0x63020
movq 0xf8(%rsp), %rax
cmpq %r14, %r12
je 0x3b26ee
testq %rax, %rax
je 0x3b16d8
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3b2688
lock
decl (%rax)
jne 0x3b2688
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3b2680
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b2688
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movups %xmm0, 0xc(%rbx)
andl $0x0, 0x38(%rbx)
movups %xmm0, 0x28(%rbx)
movq %r14, %rsi
movups (%r14), %xmm0
movups %xmm0, (%rbx)
movq 0x10(%r14), %rax
movq %rax, 0x10(%rbx)
movl 0x18(%r14), %eax
movl %eax, 0x18(%rbx)
movq 0x20(%r14), %rax
movq %rax, 0x20(%rbx)
movups 0x28(%r14), %xmm0
movups %xmm0, 0x28(%rbx)
movl 0x38(%r14), %eax
movl %eax, 0x38(%rbx)
movq 0x40(%r14), %rax
movq %rax, 0x40(%rbx)
pushq $0x1
popq %rcx
movq 0x98(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xd0(%rdx,%rax)
movl 0x74(%rsi), %eax
movl %eax, %edx
cmovel %ecx, %edx
cmovel %eax, %ecx
leaq 0x10(%rsp), %r14
movq %r14, %rdi
movq 0x88(%rsp), %rsi
xorl %r8d, %r8d
callq 0x62e4e
movq 0xa0(%rsp), %rbx
leaq 0x48(%rbx), %rcx
movq 0x18(%rsp), %rax
cmpq %r14, %rcx
je 0x3b1834
testq %rax, %rax
je 0x3b17bf
lock
incl (%rax)
movq 0x50(%rbx), %rax
testq %rax, %rax
je 0x3b17ea
lock
decl (%rax)
jne 0x3b17ea
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x3b17e2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b17ea
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, 0x48(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x50(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x58(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x60(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x68(%rbx)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x70(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x80(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x88(%rbx)
testq %rax, %rax
je 0x3b185d
lock
decl (%rax)
jne 0x3b185d
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b1855
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b185d
movq %rsi, %rdi
callq 0x5f3e0
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0xa0(%rsp), %rsi
movq 0x70(%rsp), %rdx
movq %r15, %rcx
callq *0x30(%rax)
movl 0x30(%r12), %edx
movq 0x8(%r15), %rcx
leaq 0x10(%rsp), %r14
movq %r14, %rdi
movq %r12, %rsi
callq 0x62c8a
movq 0x18(%rsp), %rax
cmpq %r14, %r12
je 0x3b1929
testq %rax, %rax
je 0x3b18ae
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3b18db
lock
decl (%rax)
jne 0x3b18db
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3b18d3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b18db
movq %rsi, %rdi
callq 0x5f3e0
movq 0x10(%rsp), %rax
movq %rax, (%r12)
movq 0x18(%rsp), %rax
movq %rax, 0x8(%r12)
movq 0x20(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x28(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x30(%rsp), %rcx
movq %rcx, 0x20(%r12)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0x28(%r12)
movl 0x48(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x50(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3b1952
lock
decl (%rax)
jne 0x3b1952
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b194a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b1952
movq %rsi, %rdi
callq 0x5f3e0
leaq 0xa0(%rsp), %rdi
callq 0x6fac4
xorl %eax, %eax
addq $0x1b8, %rsp # imm = 0x1B8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq (%rbx), %rax
movq %rax, 0x10(%rsp)
movq 0x8(%rbx), %rax
movq %rax, 0x18(%rsp)
movq %r8, 0x20(%rsp)
movl 0x18(%rbx), %ecx
movl %ecx, 0x28(%rsp)
movq 0x20(%rbx), %rcx
movq %rcx, 0x30(%rsp)
movl %ebp, 0x38(%rsp)
movups 0x2c(%rbx), %xmm0
movups %xmm0, 0x3c(%rsp)
movq 0x40(%rbx), %rcx
movq %rcx, 0x50(%rsp)
testq %rax, %rax
je 0x3b19b7
lock
incl (%rax)
cmpl $0x2, %r14d
jne 0x3b19e7
movl 0x74(%rbx), %edx
movl 0x78(%rbx), %ecx
leaq 0xa0(%rsp), %rdi
movq %r8, %rbx
pushq $0x1
popq %r8
movq 0x88(%rsp), %rsi
xorl %r9d, %r9d
callq 0x63020
movq %rbx, %r8
jmp 0x3b1a51
movq 0x48(%rbx), %rax
movq %rax, 0xa0(%rsp)
movq 0x50(%rbx), %rax
movq %rax, 0xa8(%rsp)
movq 0x58(%rbx), %rcx
movq %rcx, 0xb0(%rsp)
movl 0x60(%rbx), %ecx
movl %ecx, 0xb8(%rsp)
movq 0x68(%rbx), %rcx
movq %rcx, 0xc0(%rsp)
movups 0x70(%rbx), %xmm0
movups %xmm0, 0xc8(%rsp)
movl 0x80(%rbx), %ecx
movl %ecx, 0xd8(%rsp)
movq 0x88(%rbx), %rcx
movq %rcx, 0xe0(%rsp)
testq %rax, %rax
je 0x3b1a51
lock
incl (%rax)
movq 0x98(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rax
xorl %ecx, %ecx
cmpl $0x0, 0xd0(%rdx,%rax)
setne %cl
movl 0xcc(%rsp,%rcx,4), %esi
movl 0x40(%rsp), %edx
movl 0x48(%rsp), %eax
movl 0xd8(%rsp), %ecx
cmpl %ecx, %eax
cmovgl %eax, %ecx
movq 0x8(%r15), %r9
movq %r12, %rdi
movq %rcx, %rbx
callq 0x63810
cmpq $0x0, (%r12)
je 0x3b1eae
movslq 0x38(%r12), %rax
imulq 0x40(%r12), %rax
testq %rax, %rax
je 0x3b1eae
xorl %eax, %eax
testl %ebx, %ebx
cmovlel %eax, %ebx
movq %rbx, 0x88(%rsp)
leaq 0x150(%rsp), %r14
leaq 0xf0(%rsp), %r15
xorl %r13d, %r13d
cmpq %r13, 0x88(%rsp)
je 0x3b24c8
movl 0x48(%rsp), %ebp
movl 0xd8(%rsp), %ebx
movq %r14, %rdi
pushq $0x2
popq %rsi
movq %r15, %rdx
callq 0x6fa72
cmpl $0x1, %ebp
movslq 0x3c(%rsp), %rdi
movslq 0x40(%rsp), %r8
movl 0x44(%rsp), %esi
movq %r13, 0x70(%rsp)
movl $0x0, %eax
cmoveq %rax, %r13
imulq 0x50(%rsp), %r13
movq 0x20(%rsp), %r14
imulq %r14, %r13
addq 0x10(%rsp), %r13
movl 0x28(%rsp), %r9d
movq 0x30(%rsp), %r10
movq %r8, %rcx
imulq %rdi, %rcx
movq %r14, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r14
movq %rax, %r15
movl 0x38(%rsp), %r12d
cmpl $0x4, %r12d
cmoveq %rcx, %r15
movq 0x150(%rsp), %rbp
movq 0x8(%rbp), %rax
testq %rax, %rax
je 0x3b1beb
lock
decl (%rax)
jne 0x3b1beb
movq %r10, 0x78(%rsp)
movl %r9d, 0x8(%rsp)
movq %r8, 0x60(%rsp)
movq %rdi, 0x68(%rsp)
movl %esi, 0x80(%rsp)
movq (%rbp), %rsi
movq 0x20(%rbp), %rdi
testq %rdi, %rdi
je 0x3b1bc8
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
jmp 0x3b1beb
movq %rsi, %rdi
callq 0x5f3e0
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
andq $0x0, 0x8(%rbp)
decl %r12d
cmpl $0x1, %ebx
movq %r13, (%rbp)
movq %r14, 0x10(%rbp)
movl %r9d, 0x18(%rbp)
movq %r10, 0x20(%rbp)
movl %r12d, 0x28(%rbp)
movl %edi, 0x2c(%rbp)
movl %r8d, 0x30(%rbp)
movl $0x1, 0x34(%rbp)
movl %esi, 0x38(%rbp)
movq %r15, 0x40(%rbp)
movslq 0xcc(%rsp), %rdi
movslq 0xd0(%rsp), %r12
movl 0xd4(%rsp), %esi
movq 0xb0(%rsp), %rbp
movl 0xb8(%rsp), %r8d
movq 0xc0(%rsp), %r9
movl 0xc8(%rsp), %ebx
movq 0x70(%rsp), %r13
movl $0x0, %eax
cmoveq %rax, %r13
imulq 0xe0(%rsp), %r13
imulq %rbp, %r13
addq 0xa0(%rsp), %r13
movq %r12, %rcx
imulq %rdi, %rcx
movq %rbp, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rbp
movq %rax, %r15
cmpl $0x4, %ebx
cmoveq %rcx, %r15
movq 0x150(%rsp), %r14
movq 0x50(%r14), %rax
testq %rax, %rax
je 0x3b1d12
lock
decl (%rax)
jne 0x3b1d12
movq %r9, 0x8(%rsp)
movl %r8d, 0x60(%rsp)
movq %rdi, 0x68(%rsp)
movl %esi, 0x80(%rsp)
movq 0x48(%r14), %rsi
movq 0x68(%r14), %rdi
testq %rdi, %rdi
je 0x3b1cf4
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movl 0x60(%rsp), %r8d
movq 0x8(%rsp), %r9
jmp 0x3b1d12
movq %rsi, %rdi
callq 0x5f3e0
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movl 0x60(%rsp), %r8d
movq 0x8(%rsp), %r9
decl %ebx
movq %r13, 0x48(%r14)
andq $0x0, 0x50(%r14)
movq %rbp, 0x58(%r14)
movl %r8d, 0x60(%r14)
movq %r9, 0x68(%r14)
movl %ebx, 0x70(%r14)
movl %edi, 0x74(%r14)
movl %r12d, 0x78(%r14)
movl $0x1, 0x7c(%r14)
movl %esi, 0x80(%r14)
movq %r15, 0x88(%r14)
leaq 0xf0(%rsp), %rdi
pushq $0x1
popq %rsi
leaq 0x1a0(%rsp), %rdx
callq 0x6fa72
movq 0x148(%rsp), %rsi
movslq 0x2c(%rsi), %r8
movslq 0x30(%rsi), %r12
movl 0x34(%rsi), %edi
movq 0x40(%rsi), %rbp
imulq 0x70(%rsp), %rbp
movq 0x10(%rsi), %r13
imulq %r13, %rbp
addq (%rsi), %rbp
movl 0x18(%rsi), %r9d
movq 0x20(%rsi), %r10
movq %r12, %rcx
imulq %r8, %rcx
movq %rcx, %rax
imulq %r13, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r13
movq %rax, %r15
movl 0x28(%rsi), %ebx
cmpl $0x4, %ebx
cmoveq %rcx, %r15
movq 0xf0(%rsp), %r14
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x3b1e2f
lock
decl (%rax)
jne 0x3b1e2f
movq %r10, 0x8(%rsp)
movl %r9d, 0x60(%rsp)
movq %r8, 0x68(%rsp)
movl %edi, 0x80(%rsp)
movq (%r14), %rsi
movq 0x20(%r14), %rdi
testq %rdi, %rdi
je 0x3b1e11
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x80(%rsp), %edi
movq 0x68(%rsp), %r8
movl 0x60(%rsp), %r9d
movq 0x8(%rsp), %r10
jmp 0x3b1e2f
movq %rsi, %rdi
callq 0x5f3e0
movl 0x80(%rsp), %edi
movq 0x68(%rsp), %r8
movl 0x60(%rsp), %r9d
movq 0x8(%rsp), %r10
movq %rbp, (%r14)
andq $0x0, 0x8(%r14)
decl %ebx
movq %r13, 0x10(%r14)
movl %r9d, 0x18(%r14)
movq %r10, 0x20(%r14)
movl %ebx, 0x28(%r14)
movl %r8d, 0x2c(%r14)
movl %r12d, 0x30(%r14)
movl $0x1, 0x34(%r14)
movl %edi, 0x38(%r14)
movq %r15, 0x40(%r14)
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0x150(%rsp), %r14
movq %r14, %rsi
leaq 0xf0(%rsp), %r15
movq %r15, %rdx
movq 0x140(%rsp), %rcx
callq *0x30(%rax)
movq %r15, %rdi
callq 0x6fac4
movq %r14, %rdi
callq 0x6fac4
movq 0x70(%rsp), %r13
incq %r13
jmp 0x3b1ad8
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b254c
lock
decl (%rax)
jne 0x3b254c
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2544
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b254c
movq (%rbx), %rax
movq %rax, 0x10(%rsp)
movq 0x8(%rbx), %rax
movq %rax, 0x18(%rsp)
movq %r8, %r13
movq %r8, 0x20(%rsp)
movl 0x18(%rbx), %ecx
movl %ecx, 0x28(%rsp)
movq 0x20(%rbx), %rcx
movq %rcx, 0x30(%rsp)
movl %ebp, 0x38(%rsp)
movups 0x2c(%rbx), %xmm0
movups %xmm0, 0x3c(%rsp)
movq 0x40(%rbx), %rcx
movq %rcx, 0x50(%rsp)
testq %rax, %rax
je 0x3b1f33
lock
incl (%rax)
cmpl $0x3, %r14d
jne 0x3b1f66
movl 0x74(%rbx), %edx
movl 0x78(%rbx), %ecx
movl 0x80(%rbx), %r8d
andq $0x0, (%rsp)
leaq 0xa0(%rsp), %rdi
pushq $0x1
popq %r9
movq 0x88(%rsp), %rsi
callq 0x632f0
jmp 0x3b1fd0
movq 0x48(%rbx), %rax
movq %rax, 0xa0(%rsp)
movq 0x50(%rbx), %rax
movq %rax, 0xa8(%rsp)
movq 0x58(%rbx), %rcx
movq %rcx, 0xb0(%rsp)
movl 0x60(%rbx), %ecx
movl %ecx, 0xb8(%rsp)
movq 0x68(%rbx), %rcx
movq %rcx, 0xc0(%rsp)
movups 0x70(%rbx), %xmm0
movups %xmm0, 0xc8(%rsp)
movl 0x80(%rbx), %ecx
movl %ecx, 0xd8(%rsp)
movq 0x88(%rbx), %rcx
movq %rcx, 0xe0(%rsp)
testq %rax, %rax
je 0x3b1fd0
lock
incl (%rax)
movq 0x98(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rax
xorl %ecx, %ecx
cmpl $0x0, 0xd0(%rdx,%rax)
setne %cl
movl 0xcc(%rsp,%rcx,4), %esi
movl 0x40(%rsp), %edx
movl 0x44(%rsp), %eax
movl 0xd4(%rsp), %ecx
movl 0xd8(%rsp), %r8d
cmpl %ecx, %eax
cmovgl %eax, %ecx
movl 0x48(%rsp), %eax
cmpl %r8d, %eax
cmovgl %eax, %r8d
movq 0x8(%r15), %rax
movq %rax, (%rsp)
movq %r12, %rdi
movq %rcx, %rbx
movq %r8, %r14
movq %r13, %r9
callq 0x6393c
cmpq $0x0, (%r12)
je 0x3b240e
movslq 0x38(%r12), %rax
imulq 0x40(%r12), %rax
testq %rax, %rax
je 0x3b240e
xorl %eax, %eax
testl %ebx, %ebx
cmovlel %eax, %ebx
movq %rbx, 0x80(%rsp)
testl %r14d, %r14d
cmovlel %eax, %r14d
movq %r14, 0x138(%rsp)
leaq 0x150(%rsp), %r15
leaq 0xf0(%rsp), %r12
xorl %eax, %eax
cmpq 0x138(%rsp), %rax
je 0x3b2506
cmpl $0x1, 0x48(%rsp)
movq %rax, 0x88(%rsp)
movl %eax, %ecx
movq %rcx, %rdx
movl $0x0, %eax
cmoveq %rax, %rdx
movq %rdx, 0x68(%rsp)
cmpl $0x1, 0xd8(%rsp)
cmoveq %rax, %rcx
movq %rcx, 0x60(%rsp)
xorl %ebx, %ebx
cmpq %rbx, 0x80(%rsp)
je 0x3b23fe
movq %rbx, 0x70(%rsp)
movl 0x44(%rsp), %r14d
movl 0xd4(%rsp), %ebx
movq %r15, %rdi
pushq $0x2
popq %rsi
movq %r12, %rdx
callq 0x6fa72
movslq 0x3c(%rsp), %rcx
movslq 0x40(%rsp), %rbp
movq 0x50(%rsp), %r13
imulq 0x68(%rsp), %r13
movq 0x20(%rsp), %r12
imulq %r12, %r13
addq 0x10(%rsp), %r13
cmpl $0x1, %r14d
movl 0x28(%rsp), %esi
movq 0x70(%rsp), %rdx
movq %rdx, %r15
movl $0x0, %eax
cmoveq %rax, %r15
movq 0x30(%rsp), %rdi
movq 0x150(%rsp), %r14
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x3b21a0
lock
decl (%rax)
jne 0x3b21a0
movq %rdi, 0xe8(%rsp)
movl %esi, 0x78(%rsp)
movq %rcx, 0x8(%rsp)
movq (%r14), %rsi
movq 0x20(%r14), %rdi
testq %rdi, %rdi
je 0x3b2182
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x70(%rsp), %rdx
movq 0x8(%rsp), %rcx
movl 0x78(%rsp), %esi
movq 0xe8(%rsp), %rdi
jmp 0x3b21a0
movq %rsi, %rdi
callq 0x5f3e0
movq 0x70(%rsp), %rdx
movq 0x8(%rsp), %rcx
movl 0x78(%rsp), %esi
movq 0xe8(%rsp), %rdi
movq %rbp, %rax
imulq %rcx, %rax
imulq %rax, %r15
imulq %r12, %r15
addq %r15, %r13
movq %r13, (%r14)
andq $0x0, 0x8(%r14)
movq %r12, 0x10(%r14)
movl %esi, 0x18(%r14)
movq %rdi, 0x20(%r14)
movl $0x2, 0x28(%r14)
movl %ecx, 0x2c(%r14)
movl %ebp, 0x30(%r14)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x34(%r14)
movq %rax, 0x40(%r14)
movslq 0xcc(%rsp), %r15
movslq 0xd0(%rsp), %rbp
movq 0xb0(%rsp), %r13
movq 0xe0(%rsp), %r12
imulq 0x60(%rsp), %r12
imulq %r13, %r12
addq 0xa0(%rsp), %r12
cmpl $0x1, %ebx
movl 0xb8(%rsp), %esi
movq 0xc0(%rsp), %rdi
movq 0x150(%rsp), %rbx
movq 0x50(%rbx), %rax
movq %rdx, %r14
movl $0x0, %ecx
cmoveq %rcx, %r14
testq %rax, %rax
je 0x3b2286
lock
decl (%rax)
jne 0x3b2286
movq %rdi, 0x78(%rsp)
movl %esi, 0x8(%rsp)
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x3b2275
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x8(%rsp), %esi
movq 0x78(%rsp), %rdi
jmp 0x3b2286
movq %rsi, %rdi
callq 0x5f3e0
movl 0x8(%rsp), %esi
movq 0x78(%rsp), %rdi
movq %rbp, %rax
imulq %r15, %rax
imulq %rax, %r14
imulq %r13, %r14
addq %r14, %r12
movq %r12, 0x48(%rbx)
andq $0x0, 0x50(%rbx)
movq %r13, 0x58(%rbx)
movl %esi, 0x60(%rbx)
movq %rdi, 0x68(%rbx)
movl $0x2, 0x70(%rbx)
movl %r15d, 0x74(%rbx)
movl %ebp, 0x78(%rbx)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x7c(%rbx)
movq %rax, 0x88(%rbx)
leaq 0xf0(%rsp), %rdi
pushq $0x1
popq %rsi
leaq 0x1a0(%rsp), %rdx
callq 0x6fa72
movq 0x148(%rsp), %rax
movslq 0x2c(%rax), %rbx
movslq 0x30(%rax), %r15
movq 0x40(%rax), %r12
imulq 0x88(%rsp), %r12
movq 0x10(%rax), %r14
imulq %r14, %r12
addq (%rax), %r12
movl 0x18(%rax), %ebp
movq 0x20(%rax), %rsi
movq 0xf0(%rsp), %r13
movq 0x8(%r13), %rax
testq %rax, %rax
movq 0x70(%rsp), %rdx
je 0x3b2367
lock
decl (%rax)
jne 0x3b2367
movq %rsi, 0x8(%rsp)
movq (%r13), %rsi
movq 0x20(%r13), %rdi
testq %rdi, %rdi
je 0x3b2355
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x70(%rsp), %rdx
movq 0x8(%rsp), %rsi
jmp 0x3b2367
movq %rsi, %rdi
callq 0x5f3e0
movq 0x70(%rsp), %rdx
movq 0x8(%rsp), %rsi
movq %r15, %rax
imulq %rbx, %rax
movq %rdx, %rcx
imulq %r14, %rcx
imulq %rax, %rcx
addq %rcx, %r12
movq %r12, (%r13)
andq $0x0, 0x8(%r13)
movq %r14, 0x10(%r13)
movl %ebp, 0x18(%r13)
movq %rsi, 0x20(%r13)
movl $0x2, 0x28(%r13)
movl %ebx, 0x2c(%r13)
movl %r15d, 0x30(%r13)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x34(%r13)
movq %rax, 0x40(%r13)
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0x150(%rsp), %r15
movq %r15, %rsi
leaq 0xf0(%rsp), %r12
movq %rdx, %rbx
movq %r12, %rdx
movq 0x140(%rsp), %rcx
callq *0x30(%rax)
movq %r12, %rdi
callq 0x6fac4
movq %r15, %rdi
callq 0x6fac4
incq %rbx
jmp 0x3b20c3
movq 0x88(%rsp), %rax
incq %rax
jmp 0x3b2082
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b257a
lock
decl (%rax)
jne 0x3b257a
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2572
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b257a
movq 0x138(%rsp), %rax
movl 0x34(%rax), %ecx
movl 0x38(%rax), %r8d
movq 0x8(%rbx), %r9
leaq 0xf0(%rsp), %r14
leaq 0x10(%rsp), %rsi
movq %r14, %rdi
movl 0xe8(%rsp), %edx
callq 0x63020
movq 0xf8(%rsp), %rax
cmpq %r14, %r12
je 0x3b294c
testq %rax, %rax
je 0x3b2494
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3b28e6
lock
decl (%rax)
jne 0x3b28e6
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3b28de
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b28e6
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b27c8
lock
decl (%rax)
jne 0x3b27c8
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3b27c0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b27c8
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b2805
lock
decl (%rax)
jne 0x3b2805
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3b27fd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b2805
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b25a8
lock
decl (%rax)
jne 0x3b25a8
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b25a0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b25a8
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b25a8
lock
decl (%rax)
jne 0x3b25a8
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b25a0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b25a8
movq %rsi, %rdi
callq 0x5f3e0
pushq $-0x64
popq %rax
jmp 0x3b1961
movq 0xdda21(%rip), %rbx # 0x48ffd8
movq (%rbx), %rdi
leaq 0x49f15(%rip), %rsi # 0x3fc4d6
movl %ebp, %edx
movl %r14d, %ecx
xorl %eax, %eax
callq 0x5f150
movq (%rbx), %rsi
pushq $0xa
popq %rdi
callq 0x5f1c0
pushq $-0x1
jmp 0x3b25aa
movq %rsi, %rdi
callq 0x5f3e0
movq 0xf0(%rsp), %rax
movq %rax, (%r12)
movq 0xf8(%rsp), %rax
movq %rax, 0x8(%r12)
movq 0x100(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x108(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x110(%rsp), %rcx
movq %rcx, 0x20(%r12)
movups 0x118(%rsp), %xmm0
movups %xmm0, 0x28(%r12)
movl 0x128(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x130(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3b271d
lock
decl (%rax)
jne 0x3b271d
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2715
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b271d
movq %rsi, %rdi
callq 0x5f3e0
movq 0xf0(%rsp), %rax
movq %rax, (%r12)
movq 0xf8(%rsp), %rax
movq %rax, 0x8(%r12)
movq 0x100(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x108(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x110(%rsp), %rcx
movq %rcx, 0x20(%r12)
movups 0x118(%rsp), %xmm0
movups %xmm0, 0x28(%r12)
movl 0x128(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x130(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3b271d
lock
decl (%rax)
jne 0x3b271d
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2715
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b271d
movq %rsi, %rdi
callq 0x5f3e0
movq 0x158(%rsp), %rax
testq %rax, %rax
je 0x3b2754
lock
decl (%rax)
jne 0x3b2754
movq 0x150(%rsp), %rsi
movq 0x170(%rsp), %rdi
testq %rdi, %rdi
je 0x3b274c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b2754
movq %rsi, %rdi
callq 0x5f3e0
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b278b
lock
decl (%rax)
jne 0x3b278b
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2783
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b278b
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b195f
lock
decl (%rax)
jne 0x3b195f
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2a1a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b195f
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b195f
lock
decl (%rax)
jne 0x3b195f
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2a1a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b195f
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b195f
lock
decl (%rax)
jne 0x3b195f
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2a1a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b195f
movq %rsi, %rdi
callq 0x5f3e0
movq 0xf0(%rsp), %rax
movq %rax, (%r12)
movq 0xf8(%rsp), %rax
movq %rax, 0x8(%r12)
movq 0x100(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x108(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x110(%rsp), %rcx
movq %rcx, 0x20(%r12)
movups 0x118(%rsp), %xmm0
movups %xmm0, 0x28(%r12)
movl 0x128(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x130(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3b297b
lock
decl (%rax)
jne 0x3b297b
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2973
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b297b
movq %rsi, %rdi
callq 0x5f3e0
movq 0xf0(%rsp), %rax
movq %rax, (%r12)
movq 0xf8(%rsp), %rax
movq %rax, 0x8(%r12)
movq 0x100(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x108(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x110(%rsp), %rcx
movq %rcx, 0x20(%r12)
movups 0x118(%rsp), %xmm0
movups %xmm0, 0x28(%r12)
movl 0x128(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x130(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3b297b
lock
decl (%rax)
jne 0x3b297b
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2973
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b297b
movq %rsi, %rdi
callq 0x5f3e0
movq 0x158(%rsp), %rax
testq %rax, %rax
je 0x3b29b2
lock
decl (%rax)
jne 0x3b29b2
movq 0x150(%rsp), %rsi
movq 0x170(%rsp), %rdi
testq %rdi, %rdi
je 0x3b29aa
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b29b2
movq %rsi, %rdi
callq 0x5f3e0
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b29e9
lock
decl (%rax)
jne 0x3b29e9
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3b29e1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b29e9
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b195f
lock
decl (%rax)
jne 0x3b195f
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2a1a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b195f
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b195f
movq %rax, %rbx
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3b2ee3
lock
decl (%rax)
jne 0x3b2ee3
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2a9b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b2ee3
jmp 0x3b312d
movq %rax, %rbx
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3b2ee3
lock
decl (%rax)
jne 0x3b2ee3
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b2aa8
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b2ee3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b2ee3
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
movq %rax, %rbx
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3b2fbc
lock
decl (%rax)
jne 0x3b2fbc
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2b4a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b2fbc
jmp 0x3b312d
movq %rax, %rbx
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3b2fbc
lock
decl (%rax)
jne 0x3b2fbc
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b2b57
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b2fbc
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b2fbc
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b2ebc
jmp 0x3b2ebc
jmp 0x3b312d
movq %rax, %rbx
jmp 0x3b30f5
movq %rax, %rbx
jmp 0x3b2e5a
jmp 0x3b2f95
jmp 0x3b2f95
movq %rax, %rbx
jmp 0x3b2f1a
movq %rax, %rbx
jmp 0x3b2f51
jmp 0x3b3097
jmp 0x3b2dfc
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b3077
lock
decl (%rax)
jne 0x3b3077
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2dc1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b3077
jmp 0x3b312d
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b3077
lock
decl (%rax)
jne 0x3b3077
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2dc1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b3077
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
movq %rax, %rbx
jmp 0x3b2ff3
movq %rax, %rbx
jmp 0x3b302a
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b3077
lock
decl (%rax)
jne 0x3b3077
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2dc1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b3077
jmp 0x3b312d
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b3077
lock
decl (%rax)
jne 0x3b3077
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2dc1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b3077
jmp 0x3b312d
jmp 0x3b2e13
jmp 0x3b2e13
jmp 0x3b2e01
jmp 0x3b2ed3
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b2ed3
jmp 0x3b2ec1
jmp 0x3b2fac
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b3077
lock
decl (%rax)
jne 0x3b3077
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2dc1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b3077
jmp 0x3b312d
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b3077
lock
decl (%rax)
jne 0x3b3077
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b2dc1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b3077
jmp 0x3b312d
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b3077
lock
decl (%rax)
jne 0x3b3077
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b2dce
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b3077
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b3077
jmp 0x3b312d
jmp 0x3b3074
jmp 0x3b3074
jmp 0x3b3089
jmp 0x3b3074
jmp 0x3b2fac
jmp 0x3b2f9a
movq %rax, %rbx
jmp 0x3b2e23
movq %rax, %rbx
leaq 0xf0(%rsp), %rdi
callq 0x6fac4
jmp 0x3b2e16
movq %rax, %rbx
leaq 0x150(%rsp), %rdi
callq 0x6fac4
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b2e5a
lock
decl (%rax)
jne 0x3b2e5a
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b2e54
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b2e5a
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b3123
lock
decl (%rax)
jne 0x3b3123
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b3113
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b3123
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b3074
jmp 0x3b3074
jmp 0x3b3089
jmp 0x3b3074
movq %rax, %rbx
jmp 0x3b2ee3
movq %rax, %rbx
leaq 0x1a0(%rsp), %rdi
callq 0x6fac4
jmp 0x3b2ed6
movq %rax, %rbx
leaq 0xf0(%rsp), %rdi
callq 0x6fac4
movq 0x158(%rsp), %rax
testq %rax, %rax
je 0x3b2f1a
lock
decl (%rax)
jne 0x3b2f1a
movq 0x150(%rsp), %rsi
movq 0x170(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b2f14
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b2f1a
movq (%rdi), %rax
callq *0x18(%rax)
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b2f51
lock
decl (%rax)
jne 0x3b2f51
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b2f4b
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b2f51
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b3123
lock
decl (%rax)
jne 0x3b3123
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b3113
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b3123
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
movq %rax, %rbx
jmp 0x3b2fbc
movq %rax, %rbx
leaq 0x1a0(%rsp), %rdi
callq 0x6fac4
jmp 0x3b2faf
movq %rax, %rbx
leaq 0xf0(%rsp), %rdi
callq 0x6fac4
movq 0x158(%rsp), %rax
testq %rax, %rax
je 0x3b2ff3
lock
decl (%rax)
jne 0x3b2ff3
movq 0x150(%rsp), %rsi
movq 0x170(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b2fed
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b2ff3
movq (%rdi), %rax
callq *0x18(%rax)
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b302a
lock
decl (%rax)
jne 0x3b302a
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b3024
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b302a
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b3123
lock
decl (%rax)
jne 0x3b3123
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b3113
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b3123
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b312d
jmp 0x3b3074
jmp 0x3b3074
jmp 0x3b3074
movq %rax, %rbx
leaq 0xa0(%rsp), %rdi
callq 0x6fac4
jmp 0x3b3123
movq %rax, %rbx
jmp 0x3b3123
jmp 0x3b30ae
jmp 0x3b30ae
jmp 0x3b309c
movq %rax, %rbx
jmp 0x3b30be
movq %rax, %rbx
leaq 0xf0(%rsp), %rdi
callq 0x6fac4
jmp 0x3b30b1
movq %rax, %rbx
leaq 0x150(%rsp), %rdi
callq 0x6fac4
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b30f5
lock
decl (%rax)
jne 0x3b30f5
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b30ef
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b30f5
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b3123
lock
decl (%rax)
jne 0x3b3123
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b311d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b3123
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x3b312d
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/x86/matmul_x86.cpp |
virtual thunk to ncnn::MatMul_x86::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int MatMul_x86::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& A = bottom_blobs[0];
const Mat& B = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int Adims = A.dims;
const int Bdims = B.dims;
const int max_ABdims = std::max(Adims, Bdims);
const size_t elemsize = A.elemsize;
if (Adims == 1 && Bdims == 1)
{
// dot product
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A.reshape(A.w, 1);
_bottom_blobs[1] = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(1, opt.blob_allocator);
}
else if (Adims == 2 && Bdims == 2)
{
// matrix multiply
gemm->forward(bottom_blobs, top_blobs, opt);
}
else if (Adims == 1 && Bdims == 2)
{
// matrix multiply
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A.reshape(A.w, 1);
_bottom_blobs[1] = B;
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(top_blob.w, opt.blob_allocator);
}
else if (Adims == 2 && Bdims == 1)
{
// matrix multiply
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A;
_bottom_blobs[1] = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(top_blob.h, opt.blob_allocator);
}
else if (Adims == 1 && Bdims > 2)
{
// batched matrix multiply
const int N = transB == 0 ? B.w : B.h;
const int batch_size = B.d * B.c;
Mat top_blob1(N, 1, batch_size, elemsize, opt.blob_allocator);
if (top_blob1.empty())
return -100;
Mat A1 = A.reshape(A.w, 1);
Mat B1 = B.reshape(B.w, B.h, batch_size);
for (int p = 0; p < batch_size; p++)
{
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1;
_bottom_blobs[1] = B1.channel(p);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob1.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
if (Bdims == 3)
top_blob = top_blob1.reshape(N, B.d * B.c, opt.blob_allocator);
else
top_blob = top_blob1.reshape(N, B.d, B.c, opt.blob_allocator);
}
else if (Adims > 2 && Bdims == 1)
{
// batched matrix multiply
const int M = A.h;
const int batch_size = A.d * A.c;
Mat top_blob1(1, M, batch_size, elemsize, opt.blob_allocator);
if (top_blob1.empty())
return -100;
Mat A1 = A.reshape(A.w, A.h, batch_size);
Mat BT = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
for (int p = 0; p < batch_size; p++)
{
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(p);
_bottom_blobs[1] = BT;
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob1.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
if (Adims == 3)
top_blob = top_blob1.reshape(M, A.d * A.c, opt.blob_allocator);
else
top_blob = top_blob1.reshape(M, A.d, A.c, opt.blob_allocator);
}
else if (max_ABdims == 3)
{
Mat A1 = Adims == 2 ? A.reshape(A.w, A.h, 1) : A;
Mat B1 = Bdims == 2 ? B.reshape(B.w, B.h, 1) : B;
const int M = A1.h;
const int N = transB == 0 ? B1.w : B1.h;
const int batch_size = std::max(A1.c, B1.c);
top_blob.create(N, M, batch_size, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
for (int p = 0; p < batch_size; p++)
{
int Ap = A1.c == 1 ? 0 : p;
int Bp = B1.c == 1 ? 0 : p;
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(Ap);
_bottom_blobs[1] = B1.channel(Bp);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
}
else if (max_ABdims == 4)
{
Mat A1 = Adims == 3 ? A.reshape(A.w, A.h, A.c, 1) : A;
Mat B1 = Bdims == 3 ? B.reshape(B.w, B.h, B.c, 1) : B;
const int M = A1.h;
const int N = transB == 0 ? B1.w : B1.h;
const int batch_size_d = std::max(A1.d, B1.d);
const int batch_size_c = std::max(A1.c, B1.c);
top_blob.create(N, M, batch_size_d, batch_size_c, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
for (int p = 0; p < batch_size_c; p++)
{
int Ap = A1.c == 1 ? 0 : p;
int Bp = B1.c == 1 ? 0 : p;
for (int q = 0; q < batch_size_d; q++)
{
int Ad = A1.d == 1 ? 0 : q;
int Bd = B1.d == 1 ? 0 : q;
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(Ap).depth(Ad);
_bottom_blobs[1] = B1.channel(Bp).depth(Bd);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob.channel(p).depth(q);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
}
}
else
{
NCNN_LOGE("impossible matmul %d %d", Adims, Bdims);
return -1;
}
return 0;
} | movq (%rdi), %rax
addq -0x40(%rax), %rdi
jmp 0x3b04da
nop
| /csukuangfj[P]ncnn/src/layer/x86/matmul_x86.cpp |
ncnn::MatMul_x86_avx512::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int MatMul_x86_avx512::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& A = bottom_blobs[0];
const Mat& B = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int Adims = A.dims;
const int Bdims = B.dims;
const int max_ABdims = std::max(Adims, Bdims);
const size_t elemsize = A.elemsize;
if (Adims == 1 && Bdims == 1)
{
// dot product
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A.reshape(A.w, 1);
_bottom_blobs[1] = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(1, opt.blob_allocator);
}
else if (Adims == 2 && Bdims == 2)
{
// matrix multiply
gemm->forward(bottom_blobs, top_blobs, opt);
}
else if (Adims == 1 && Bdims == 2)
{
// matrix multiply
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A.reshape(A.w, 1);
_bottom_blobs[1] = B;
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(top_blob.w, opt.blob_allocator);
}
else if (Adims == 2 && Bdims == 1)
{
// matrix multiply
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A;
_bottom_blobs[1] = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(top_blob.h, opt.blob_allocator);
}
else if (Adims == 1 && Bdims > 2)
{
// batched matrix multiply
const int N = transB == 0 ? B.w : B.h;
const int batch_size = B.d * B.c;
Mat top_blob1(N, 1, batch_size, elemsize, opt.blob_allocator);
if (top_blob1.empty())
return -100;
Mat A1 = A.reshape(A.w, 1);
Mat B1 = B.reshape(B.w, B.h, batch_size);
for (int p = 0; p < batch_size; p++)
{
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1;
_bottom_blobs[1] = B1.channel(p);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob1.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
if (Bdims == 3)
top_blob = top_blob1.reshape(N, B.d * B.c, opt.blob_allocator);
else
top_blob = top_blob1.reshape(N, B.d, B.c, opt.blob_allocator);
}
else if (Adims > 2 && Bdims == 1)
{
// batched matrix multiply
const int M = A.h;
const int batch_size = A.d * A.c;
Mat top_blob1(1, M, batch_size, elemsize, opt.blob_allocator);
if (top_blob1.empty())
return -100;
Mat A1 = A.reshape(A.w, A.h, batch_size);
Mat BT = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
for (int p = 0; p < batch_size; p++)
{
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(p);
_bottom_blobs[1] = BT;
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob1.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
if (Adims == 3)
top_blob = top_blob1.reshape(M, A.d * A.c, opt.blob_allocator);
else
top_blob = top_blob1.reshape(M, A.d, A.c, opt.blob_allocator);
}
else if (max_ABdims == 3)
{
Mat A1 = Adims == 2 ? A.reshape(A.w, A.h, 1) : A;
Mat B1 = Bdims == 2 ? B.reshape(B.w, B.h, 1) : B;
const int M = A1.h;
const int N = transB == 0 ? B1.w : B1.h;
const int batch_size = std::max(A1.c, B1.c);
top_blob.create(N, M, batch_size, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
for (int p = 0; p < batch_size; p++)
{
int Ap = A1.c == 1 ? 0 : p;
int Bp = B1.c == 1 ? 0 : p;
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(Ap);
_bottom_blobs[1] = B1.channel(Bp);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
}
else if (max_ABdims == 4)
{
Mat A1 = Adims == 3 ? A.reshape(A.w, A.h, A.c, 1) : A;
Mat B1 = Bdims == 3 ? B.reshape(B.w, B.h, B.c, 1) : B;
const int M = A1.h;
const int N = transB == 0 ? B1.w : B1.h;
const int batch_size_d = std::max(A1.d, B1.d);
const int batch_size_c = std::max(A1.c, B1.c);
top_blob.create(N, M, batch_size_d, batch_size_c, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
for (int p = 0; p < batch_size_c; p++)
{
int Ap = A1.c == 1 ? 0 : p;
int Bp = B1.c == 1 ? 0 : p;
for (int q = 0; q < batch_size_d; q++)
{
int Ad = A1.d == 1 ? 0 : q;
int Bd = B1.d == 1 ? 0 : q;
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(Ap).depth(Ad);
_bottom_blobs[1] = B1.channel(Bp).depth(Bd);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob.channel(p).depth(q);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
}
}
else
{
NCNN_LOGE("impossible matmul %d %d", Adims, Bdims);
return -1;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1b8, %rsp # imm = 0x1B8
movq %rcx, %r15
movq %rdi, 0x98(%rsp)
movq (%rsi), %rbx
leaq 0x48(%rbx), %rax
movq %rax, 0x88(%rsp)
movq %rdx, 0x70(%rsp)
movq (%rdx), %r12
movl 0x28(%rbx), %ebp
movl 0x70(%rbx), %r14d
cmpl %r14d, %ebp
movl %r14d, %eax
cmovgl %ebp, %eax
movl %ebp, %edx
xorl $0x1, %edx
movl %r14d, %ecx
xorl $0x1, %ecx
movl %edx, %edi
orl %ecx, %edi
jne 0x3b33f3
movq %r15, %r13
leaq 0xa0(%rsp), %rdi
pushq $0x2
popq %rsi
leaq 0x10(%rsp), %rdx
callq 0x6fa72
movl 0x2c(%rbx), %edx
leaq 0x10(%rsp), %r14
pushq $0x1
popq %rbp
movq %r14, %rdi
movq %rbx, %r15
movq %rbx, %rsi
movl %ebp, %ecx
xorl %r8d, %r8d
callq 0x62e4e
movq 0xa0(%rsp), %rbx
movq 0x18(%rsp), %rax
cmpq %r14, %rbx
je 0x3b3a19
testq %rax, %rax
je 0x3b33c2
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x3b39bb
lock
decl (%rax)
jne 0x3b39bb
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x3b39b3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b39bb
movl %ebp, %edi
xorl $0x2, %edi
movl %r14d, %r8d
xorl $0x2, %r8d
movl %edi, %r9d
orl %r8d, %r9d
jne 0x3b3426
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
movq 0x70(%rsp), %rdx
movq %r15, %rcx
callq *0x30(%rax)
jmp 0x3b47c9
orl %r8d, %edx
jne 0x3b34af
leaq 0xa0(%rsp), %rdi
pushq $0x2
popq %rsi
leaq 0x10(%rsp), %rdx
callq 0x6fa72
movl 0x2c(%rbx), %edx
leaq 0x10(%rsp), %r14
pushq $0x1
popq %rcx
movq %r14, %rdi
movq %rbx, %rbp
movq %rbx, %rsi
xorl %r8d, %r8d
callq 0x62e4e
movq 0xa0(%rsp), %rbx
movq 0x18(%rsp), %rax
cmpq %r14, %rbx
je 0x3b4263
testq %rax, %rax
je 0x3b347e
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x3b4205
lock
decl (%rax)
jne 0x3b4205
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x3b41fd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b4205
orl %ecx, %edi
jne 0x3b351c
movq %rbx, %r14
leaq 0xa0(%rsp), %rdi
pushq $0x2
popq %rsi
leaq 0x10(%rsp), %rdx
callq 0x6fa72
movq 0xa0(%rsp), %rbx
movq %r14, %rsi
cmpq %r14, %rbx
je 0x3b45ce
movq 0x8(%rsi), %rax
testq %rax, %rax
je 0x3b34eb
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x3b4576
lock
decl (%rax)
jne 0x3b4576
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x3b456e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b4576
movq 0x10(%rbx), %r8
cmpl $0x1, %ebp
sete %cl
cmpl $0x3, %r14d
setge %dl
andb %cl, %dl
cmpb $0x1, %dl
movq %r12, 0x148(%rsp)
movq %r15, 0x140(%rsp)
jne 0x3b3c35
movq 0x98(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rax
xorl %ecx, %ecx
cmpl $0x0, 0xd0(%rdx,%rax)
setne %cl
movl 0x74(%rbx,%rcx,4), %esi
movl 0x80(%rbx), %r13d
imull 0x7c(%rbx), %r13d
movq 0x8(%r15), %r9
leaq 0x10(%rsp), %r15
andq $0x0, 0x40(%r15)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%r15)
vmovups %xmm0, 0xc(%r15)
vmovaps %xmm0, 0x20(%r15)
vmovups %xmm0, 0x2c(%r15)
pushq $0x1
popq %rdx
movq %r15, %rdi
movl %esi, 0xe8(%rsp)
movl %r13d, %ecx
callq 0x63810
cmpq $0x0, (%r15)
je 0x3b40ca
movslq 0x48(%rsp), %rax
imulq 0x50(%rsp), %rax
testq %rax, %rax
je 0x3b40ca
movl 0x2c(%rbx), %edx
leaq 0xa0(%rsp), %rdi
pushq $0x1
popq %rcx
movq %rbx, %rsi
xorl %r8d, %r8d
callq 0x62e4e
movl 0x74(%rbx), %edx
movq %rbx, 0x138(%rsp)
movl 0x78(%rbx), %ecx
xorl %r15d, %r15d
leaq 0x150(%rsp), %rdi
movq 0x88(%rsp), %rsi
movl %r13d, %r8d
xorl %r9d, %r9d
callq 0x63020
movq 0x140(%rsp), %rbx
testl %r13d, %r13d
cmovlel %r15d, %r13d
leaq 0xf0(%rsp), %rbp
movl %r14d, 0x80(%rsp)
movq %r13, 0x88(%rsp)
cmpq %r15, %r13
je 0x3b413f
movq %rbp, %rdi
pushq $0x2
popq %rsi
leaq 0x1a0(%rsp), %rdx
callq 0x6fa72
movq 0xf0(%rsp), %rbx
leaq 0xa0(%rsp), %rax
cmpq %rax, %rbx
je 0x3b3719
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b3681
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x3b36ab
lock
decl (%rax)
jne 0x3b36ab
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x3b36a3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b36ab
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%rbx)
vmovups %xmm0, 0xc(%rbx)
andl $0x0, 0x38(%rbx)
vmovups %xmm0, 0x28(%rbx)
vmovaps 0xa0(%rsp), %xmm0
vmovups %xmm0, (%rbx)
movq 0xb0(%rsp), %rax
movq %rax, 0x10(%rbx)
movl 0xb8(%rsp), %eax
movl %eax, 0x18(%rbx)
movq 0xc0(%rsp), %rax
movq %rax, 0x20(%rbx)
vmovups 0xc8(%rsp), %xmm0
vmovups %xmm0, 0x28(%rbx)
movl 0xd8(%rsp), %eax
movl %eax, 0x38(%rbx)
movq 0xe0(%rsp), %rax
movq %rax, 0x40(%rbx)
movslq 0x17c(%rsp), %rdi
movslq 0x180(%rsp), %r12
movl 0x184(%rsp), %esi
movq 0x190(%rsp), %rbp
movq %r15, 0x70(%rsp)
imulq %r15, %rbp
movq 0x160(%rsp), %rbx
imulq %rbx, %rbp
addq 0x150(%rsp), %rbp
movl 0x168(%rsp), %r8d
movq 0x170(%rsp), %r9
movq %r12, %rcx
imulq %rdi, %rcx
movq %rbx, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rbx
movq %rax, %r13
movl 0x178(%rsp), %r14d
cmpl $0x4, %r14d
cmoveq %rcx, %r13
movq 0xf0(%rsp), %r15
movq 0x50(%r15), %rax
testq %rax, %rax
je 0x3b37ff
lock
decl (%rax)
jne 0x3b37ff
movq %r9, 0x78(%rsp)
movl %r8d, 0x8(%rsp)
movq %rdi, 0x60(%rsp)
movl %esi, 0x68(%rsp)
movq 0x48(%r15), %rsi
movq 0x68(%r15), %rdi
testq %rdi, %rdi
je 0x3b37e4
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x68(%rsp), %esi
movq 0x60(%rsp), %rdi
movl 0x8(%rsp), %r8d
movq 0x78(%rsp), %r9
jmp 0x3b37ff
movq %rsi, %rdi
callq 0x5f3e0
movl 0x68(%rsp), %esi
movq 0x60(%rsp), %rdi
movl 0x8(%rsp), %r8d
movq 0x78(%rsp), %r9
decl %r14d
movq %rbp, 0x48(%r15)
andq $0x0, 0x50(%r15)
movq %rbx, 0x58(%r15)
movl %r8d, 0x60(%r15)
movq %r9, 0x68(%r15)
movl %r14d, 0x70(%r15)
movl %edi, 0x74(%r15)
movl %r12d, 0x78(%r15)
movl $0x1, 0x7c(%r15)
movl %esi, 0x80(%r15)
movq %r13, 0x88(%r15)
leaq 0x1a0(%rsp), %rdi
pushq $0x1
popq %rsi
leaq 0x97(%rsp), %rdx
callq 0x6fa72
movslq 0x3c(%rsp), %rdi
movslq 0x40(%rsp), %r12
movl 0x44(%rsp), %esi
movq 0x50(%rsp), %rbx
imulq 0x70(%rsp), %rbx
movq 0x20(%rsp), %rbp
imulq %rbp, %rbx
addq 0x10(%rsp), %rbx
movl 0x28(%rsp), %r8d
movq 0x30(%rsp), %r9
movq %r12, %rcx
imulq %rdi, %rcx
movq %rbp, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rbp
movq %rax, %r13
movl 0x38(%rsp), %r14d
cmpl $0x4, %r14d
cmoveq %rcx, %r13
movq 0x1a0(%rsp), %r15
movq 0x8(%r15), %rax
testq %rax, %rax
je 0x3b3918
lock
decl (%rax)
jne 0x3b3918
movq %r9, 0x78(%rsp)
movl %r8d, 0x8(%rsp)
movq %rdi, 0x60(%rsp)
movl %esi, 0x68(%rsp)
movq (%r15), %rsi
movq 0x20(%r15), %rdi
testq %rdi, %rdi
je 0x3b38fd
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x68(%rsp), %esi
movq 0x60(%rsp), %rdi
movl 0x8(%rsp), %r8d
movq 0x78(%rsp), %r9
jmp 0x3b3918
movq %rsi, %rdi
callq 0x5f3e0
movl 0x68(%rsp), %esi
movq 0x60(%rsp), %rdi
movl 0x8(%rsp), %r8d
movq 0x78(%rsp), %r9
movq %rbx, (%r15)
andq $0x0, 0x8(%r15)
decl %r14d
movq %rbp, 0x10(%r15)
movl %r8d, 0x18(%r15)
movq %r9, 0x20(%r15)
movl %r14d, 0x28(%r15)
movl %edi, 0x2c(%r15)
movl %r12d, 0x30(%r15)
movl $0x1, 0x34(%r15)
movl %esi, 0x38(%r15)
movq %r13, 0x40(%r15)
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0xf0(%rsp), %rbp
movq %rbp, %rsi
leaq 0x1a0(%rsp), %r14
movq %r14, %rdx
movq 0x140(%rsp), %rbx
movq %rbx, %rcx
callq *0x30(%rax)
movq %r14, %rdi
callq 0x6fac4
movq %rbp, %rdi
callq 0x6fac4
movq 0x70(%rsp), %r15
incq %r15
movq 0x148(%rsp), %r12
movl 0x80(%rsp), %r14d
movq 0x88(%rsp), %r13
jmp 0x3b363c
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%rbx)
vmovups %xmm0, 0xc(%rbx)
andl $0x0, 0x38(%rbx)
vmovups %xmm0, 0x28(%rbx)
movq 0x18(%rsp), %rax
vmovaps 0x10(%rsp), %xmm0
vmovups %xmm0, (%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x10(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x18(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x20(%rbx)
vmovups 0x38(%rsp), %xmm0
vmovups %xmm0, 0x28(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x38(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x40(%rbx)
testq %rax, %rax
je 0x3b3a42
lock
decl (%rax)
jne 0x3b3a42
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b3a3a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b3a42
movq %rsi, %rdi
callq 0x5f3e0
movq 0x98(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xd0(%rcx,%rax)
movl 0x74(%r15), %eax
movl %eax, %edx
cmovel %ebp, %edx
cmovel %eax, %ebp
leaq 0x10(%rsp), %r14
movq %r14, %rdi
movq 0x88(%rsp), %rsi
movl %ebp, %ecx
xorl %r8d, %r8d
callq 0x62e4e
movq 0xa0(%rsp), %rbx
leaq 0x48(%rbx), %rcx
movq 0x18(%rsp), %rax
cmpq %r14, %rcx
je 0x3b3b12
testq %rax, %rax
je 0x3b3a9d
lock
incl (%rax)
movq 0x50(%rbx), %rax
testq %rax, %rax
je 0x3b3ac8
lock
decl (%rax)
jne 0x3b3ac8
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x3b3ac0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b3ac8
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
vmovaps 0x10(%rsp), %xmm0
vmovups %xmm0, 0x48(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x58(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x60(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x68(%rbx)
vmovups 0x38(%rsp), %xmm0
vmovups %xmm0, 0x70(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x80(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x88(%rbx)
testq %rax, %rax
je 0x3b3b3b
lock
decl (%rax)
jne 0x3b3b3b
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b3b33
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b3b3b
movq %rsi, %rdi
callq 0x5f3e0
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0xa0(%rsp), %rsi
movq 0x70(%rsp), %rdx
movq %r13, %rcx
callq *0x30(%rax)
movq 0x8(%r13), %rcx
leaq 0x10(%rsp), %r14
pushq $0x1
popq %rdx
movq %r14, %rdi
movq %r12, %rsi
callq 0x62c8a
movq 0x18(%rsp), %rax
cmpq %r14, %r12
je 0x3b3c05
testq %rax, %rax
je 0x3b3b8a
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3b3bb7
lock
decl (%rax)
jne 0x3b3bb7
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3b3baf
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b3bb7
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
vmovaps 0x10(%rsp), %xmm0
vmovups %xmm0, (%r12)
movq 0x20(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x28(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x30(%rsp), %rcx
movq %rcx, 0x20(%r12)
vmovups 0x38(%rsp), %xmm0
vmovups %xmm0, 0x28(%r12)
movl 0x48(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x50(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3b47bc
lock
decl (%rax)
jne 0x3b47bc
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b47b4
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b47bc
cmpl $0x1, %r14d
sete %cl
cmpl $0x3, %ebp
setge %dl
andb %cl, %dl
cmpb $0x1, %dl
jne 0x3b40ff
movl 0x30(%rbx), %edx
movl 0x38(%rbx), %ecx
imull 0x34(%rbx), %ecx
movq 0x8(%r15), %r9
leaq 0x10(%rsp), %r15
andq $0x0, 0x40(%r15)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%r15)
vmovups %xmm0, 0xc(%r15)
vmovaps %xmm0, 0x20(%r15)
vmovups %xmm0, 0x2c(%r15)
pushq $0x1
popq %r14
movq %r15, %rdi
movl %r14d, %esi
movl %edx, 0xe8(%rsp)
movq %rcx, %r13
callq 0x63810
cmpq $0x0, (%r15)
je 0x3b41c8
movslq 0x48(%rsp), %rax
imulq 0x50(%rsp), %rax
testq %rax, %rax
je 0x3b41c8
movl 0x2c(%rbx), %edx
movl 0x30(%rbx), %ecx
leaq 0xa0(%rsp), %rdi
movq %rbx, %rsi
movl %r13d, %r8d
xorl %r9d, %r9d
callq 0x63020
movq 0x98(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xd0(%rcx,%rax)
movq %rbx, 0x138(%rsp)
movl 0x74(%rbx), %eax
movl %eax, %edx
cmovel %r14d, %edx
cmovel %eax, %r14d
xorl %r15d, %r15d
leaq 0x150(%rsp), %rdi
movq 0x88(%rsp), %rsi
movl %r14d, %ecx
xorl %r8d, %r8d
callq 0x62e4e
movq 0x140(%rsp), %rbx
testl %r13d, %r13d
cmovlel %r15d, %r13d
movq %r13, 0x88(%rsp)
leaq 0xf0(%rsp), %r14
leaq 0x1a0(%rsp), %r13
cmpq %r15, 0x88(%rsp)
je 0x3b446a
movq %r14, %rdi
pushq $0x2
popq %rsi
movq %r13, %rdx
callq 0x6fa72
movslq 0xcc(%rsp), %rdi
movslq 0xd0(%rsp), %r8
movl 0xd4(%rsp), %esi
movq 0xe0(%rsp), %r12
movq %r15, 0x70(%rsp)
imulq %r15, %r12
movq 0xb0(%rsp), %rbx
imulq %rbx, %r12
addq 0xa0(%rsp), %r12
movl 0xb8(%rsp), %r9d
movq 0xc0(%rsp), %r10
movq %r8, %rcx
imulq %rdi, %rcx
movq %rbx, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rbx
movq %rax, %r13
movl 0xc8(%rsp), %r14d
cmpl $0x4, %r14d
cmoveq %rcx, %r13
movq 0xf0(%rsp), %r15
movq 0x8(%r15), %rax
testq %rax, %rax
je 0x3b3e5d
lock
decl (%rax)
jne 0x3b3e5d
movq %r10, 0x78(%rsp)
movl %r9d, 0x8(%rsp)
movq %r8, 0x60(%rsp)
movq %rdi, 0x68(%rsp)
movl %esi, 0x80(%rsp)
movq (%r15), %rsi
movq 0x20(%r15), %rdi
testq %rdi, %rdi
je 0x3b3e3a
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
jmp 0x3b3e5d
movq %rsi, %rdi
callq 0x5f3e0
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
decl %r14d
movq %r12, (%r15)
andq $0x0, 0x8(%r15)
movq %rbx, 0x10(%r15)
movl %r9d, 0x18(%r15)
movq %r10, 0x20(%r15)
movl %r14d, 0x28(%r15)
movl %edi, 0x2c(%r15)
movl %r8d, 0x30(%r15)
movl $0x1, 0x34(%r15)
movl %esi, 0x38(%r15)
movq %r13, 0x40(%r15)
movq 0xf0(%rsp), %rbx
leaq 0x48(%rbx), %rax
leaq 0x150(%rsp), %rcx
cmpq %rcx, %rax
movq 0x70(%rsp), %r14
leaq 0x1a0(%rsp), %r15
je 0x3b3f4f
movq 0x158(%rsp), %rax
testq %rax, %rax
je 0x3b3eca
lock
incl (%rax)
movq 0x50(%rbx), %rax
testq %rax, %rax
je 0x3b3ef5
lock
decl (%rax)
jne 0x3b3ef5
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x3b3eed
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b3ef5
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x150(%rsp), %xmm0
vmovups %xmm0, 0x48(%rbx)
movq 0x160(%rsp), %rax
movq %rax, 0x58(%rbx)
movl 0x168(%rsp), %eax
movl %eax, 0x60(%rbx)
movq 0x170(%rsp), %rax
movq %rax, 0x68(%rbx)
vmovups 0x178(%rsp), %xmm0
vmovups %xmm0, 0x70(%rbx)
movl 0x188(%rsp), %eax
movl %eax, 0x80(%rbx)
movq 0x190(%rsp), %rax
movq %rax, 0x88(%rbx)
movq %r15, %rdi
pushq $0x1
popq %rsi
leaq 0x97(%rsp), %rdx
callq 0x6fa72
movslq 0x3c(%rsp), %rdi
movslq 0x40(%rsp), %r8
movl 0x44(%rsp), %esi
movq 0x50(%rsp), %rbx
imulq %r14, %rbx
movq 0x20(%rsp), %r12
imulq %r12, %rbx
addq 0x10(%rsp), %rbx
movl 0x28(%rsp), %r9d
movq 0x30(%rsp), %r10
movq %r8, %rcx
imulq %rdi, %rcx
movq %r12, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r12
movq %rax, %r13
movl 0x38(%rsp), %r14d
cmpl $0x4, %r14d
cmoveq %rcx, %r13
movq 0x1a0(%rsp), %r15
movq 0x8(%r15), %rax
testq %rax, %rax
je 0x3b403f
lock
decl (%rax)
jne 0x3b403f
movq %r10, 0x78(%rsp)
movl %r9d, 0x8(%rsp)
movq %r8, 0x60(%rsp)
movq %rdi, 0x68(%rsp)
movl %esi, 0x80(%rsp)
movq (%r15), %rsi
movq 0x20(%r15), %rdi
testq %rdi, %rdi
je 0x3b401c
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
jmp 0x3b403f
movq %rsi, %rdi
callq 0x5f3e0
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
movq %rbx, (%r15)
andq $0x0, 0x8(%r15)
decl %r14d
movq %r12, 0x10(%r15)
movl %r9d, 0x18(%r15)
movq %r10, 0x20(%r15)
movl %r14d, 0x28(%r15)
movl %edi, 0x2c(%r15)
movl %r8d, 0x30(%r15)
movl $0x1, 0x34(%r15)
movl %esi, 0x38(%r15)
movq %r13, 0x40(%r15)
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0xf0(%rsp), %r14
movq %r14, %rsi
leaq 0x1a0(%rsp), %r13
movq %r13, %rdx
movq 0x140(%rsp), %rbx
movq %rbx, %rcx
callq *0x30(%rax)
movq %r13, %rdi
callq 0x6fac4
movq %r14, %rdi
callq 0x6fac4
movq 0x70(%rsp), %r15
incq %r15
movq 0x148(%rsp), %r12
jmp 0x3b3d44
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b5408
lock
decl (%rax)
jne 0x3b5408
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5400
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5408
cmpl $0x4, %eax
je 0x3b4439
cmpl $0x3, %eax
jne 0x3b5410
cmpl $0x2, %ebp
jne 0x3b47dd
movl 0x2c(%rbx), %edx
movl 0x30(%rbx), %ecx
leaq 0x10(%rsp), %rdi
movq %r8, %r13
pushq $0x1
popq %r8
movq %rbx, %rsi
xorl %r9d, %r9d
callq 0x63020
movq %r13, %r8
jmp 0x3b4820
cmpl $0x3, %r14d
jne 0x3b44ef
movq 0x138(%rsp), %rax
movl 0x80(%rax), %ecx
imull 0x7c(%rax), %ecx
movq 0x8(%rbx), %r8
leaq 0xf0(%rsp), %r14
leaq 0x10(%rsp), %rsi
movq %r14, %rdi
movl 0xe8(%rsp), %edx
callq 0x62e4e
movq 0xf8(%rsp), %rax
cmpq %r14, %r12
je 0x3b54aa
testq %rax, %rax
je 0x3b4194
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3b5444
lock
decl (%rax)
jne 0x3b5444
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3b543c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5444
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b5408
lock
decl (%rax)
jne 0x3b5408
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5400
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5408
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%rbx)
vmovups %xmm0, 0xc(%rbx)
andl $0x0, 0x38(%rbx)
vmovups %xmm0, 0x28(%rbx)
movq 0x18(%rsp), %rax
vmovaps 0x10(%rsp), %xmm0
vmovups %xmm0, (%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x10(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x18(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x20(%rbx)
vmovups 0x38(%rsp), %xmm0
vmovups %xmm0, 0x28(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x38(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x40(%rbx)
testq %rax, %rax
je 0x3b428c
lock
decl (%rax)
jne 0x3b428c
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b4284
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b428c
movq %rsi, %rdi
callq 0x5f3e0
movq 0xa0(%rsp), %rbx
cmpq %rbp, %rbx
je 0x3b433d
movq 0x50(%rbp), %rax
testq %rax, %rax
je 0x3b42a9
lock
incl (%rax)
movq 0x50(%rbx), %rax
testq %rax, %rax
je 0x3b42d4
lock
decl (%rax)
jne 0x3b42d4
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x3b42cc
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b42d4
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x88(%rbx)
leaq 0x48(%rbx), %rax
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x80(%rbx)
vmovups %xmm0, 0x70(%rbx)
vmovups 0x48(%rbp), %xmm0
vmovups %xmm0, 0x48(%rbx)
movq 0x58(%rbp), %rax
movq %rax, 0x58(%rbx)
movl 0x60(%rbp), %eax
movl %eax, 0x60(%rbx)
movq 0x68(%rbp), %rax
movq %rax, 0x68(%rbx)
vmovups 0x70(%rbp), %xmm0
vmovups %xmm0, 0x70(%rbx)
movl 0x80(%rbp), %eax
movl %eax, 0x80(%rbx)
movq 0x88(%rbp), %rax
movq %rax, 0x88(%rbx)
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0xa0(%rsp), %rsi
movq 0x70(%rsp), %rdx
movq %r15, %rcx
callq *0x30(%rax)
movl 0x2c(%r12), %edx
movq 0x8(%r15), %rcx
leaq 0x10(%rsp), %r14
movq %r14, %rdi
movq %r12, %rsi
callq 0x62c8a
movq 0x18(%rsp), %rax
cmpq %r14, %r12
je 0x3b4409
testq %rax, %rax
je 0x3b438e
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3b43bb
lock
decl (%rax)
jne 0x3b43bb
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3b43b3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b43bb
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
vmovaps 0x10(%rsp), %xmm0
vmovups %xmm0, (%r12)
movq 0x20(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x28(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x30(%rsp), %rcx
movq %rcx, 0x20(%r12)
vmovups 0x38(%rsp), %xmm0
vmovups %xmm0, 0x28(%r12)
movl 0x48(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x50(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3b47bc
lock
decl (%rax)
jne 0x3b47bc
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b47b4
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b47bc
cmpl $0x3, %ebp
jne 0x3b4d51
movq %r8, %r13
movl 0x2c(%rbx), %edx
movl 0x30(%rbx), %ecx
movl 0x38(%rbx), %r8d
andq $0x0, (%rsp)
leaq 0x10(%rsp), %rdi
pushq $0x1
popq %r9
movq %rbx, %rsi
callq 0x632f0
jmp 0x3b4d97
cmpl $0x3, %ebp
jne 0x3b52ac
movq 0x138(%rsp), %rax
movl 0x38(%rax), %ecx
imull 0x34(%rax), %ecx
movq 0x8(%rbx), %r8
leaq 0xf0(%rsp), %r14
leaq 0x10(%rsp), %rsi
movq %r14, %rdi
movl 0xe8(%rsp), %edx
callq 0x62e4e
movq 0xf8(%rsp), %rax
cmpq %r14, %r12
je 0x3b5708
testq %rax, %rax
je 0x3b44bb
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3b56a2
lock
decl (%rax)
jne 0x3b56a2
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3b569a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b56a2
movq 0x138(%rsp), %rax
movl 0x7c(%rax), %ecx
movl 0x80(%rax), %r8d
movq 0x8(%rbx), %r9
leaq 0xf0(%rsp), %r14
leaq 0x10(%rsp), %rsi
movq %r14, %rdi
movl 0xe8(%rsp), %edx
callq 0x63020
movq 0xf8(%rsp), %rax
cmpq %r14, %r12
je 0x3b554e
testq %rax, %rax
je 0x3b453a
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3b54e8
lock
decl (%rax)
jne 0x3b54e8
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3b54e0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b54e8
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%rbx)
vmovups %xmm0, 0xc(%rbx)
andl $0x0, 0x38(%rbx)
vmovups %xmm0, 0x28(%rbx)
movq %r14, %rsi
vmovups (%r14), %xmm0
vmovups %xmm0, (%rbx)
movq 0x10(%r14), %rax
movq %rax, 0x10(%rbx)
movl 0x18(%r14), %eax
movl %eax, 0x18(%rbx)
movq 0x20(%r14), %rax
movq %rax, 0x20(%rbx)
vmovups 0x28(%r14), %xmm0
vmovups %xmm0, 0x28(%rbx)
movl 0x38(%r14), %eax
movl %eax, 0x38(%rbx)
movq 0x40(%r14), %rax
movq %rax, 0x40(%rbx)
pushq $0x1
popq %rcx
movq 0x98(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xd0(%rdx,%rax)
movl 0x74(%rsi), %eax
movl %eax, %edx
cmovel %ecx, %edx
cmovel %eax, %ecx
leaq 0x10(%rsp), %r14
movq %r14, %rdi
movq 0x88(%rsp), %rsi
xorl %r8d, %r8d
callq 0x62e4e
movq 0xa0(%rsp), %rbx
leaq 0x48(%rbx), %rcx
movq 0x18(%rsp), %rax
cmpq %r14, %rcx
je 0x3b469e
testq %rax, %rax
je 0x3b4629
lock
incl (%rax)
movq 0x50(%rbx), %rax
testq %rax, %rax
je 0x3b4654
lock
decl (%rax)
jne 0x3b4654
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x3b464c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b4654
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
vmovaps 0x10(%rsp), %xmm0
vmovups %xmm0, 0x48(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x58(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x60(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x68(%rbx)
vmovups 0x38(%rsp), %xmm0
vmovups %xmm0, 0x70(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x80(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x88(%rbx)
testq %rax, %rax
je 0x3b46c7
lock
decl (%rax)
jne 0x3b46c7
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b46bf
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b46c7
movq %rsi, %rdi
callq 0x5f3e0
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0xa0(%rsp), %rsi
movq 0x70(%rsp), %rdx
movq %r15, %rcx
callq *0x30(%rax)
movl 0x30(%r12), %edx
movq 0x8(%r15), %rcx
leaq 0x10(%rsp), %r14
movq %r14, %rdi
movq %r12, %rsi
callq 0x62c8a
movq 0x18(%rsp), %rax
cmpq %r14, %r12
je 0x3b4793
testq %rax, %rax
je 0x3b4718
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3b4745
lock
decl (%rax)
jne 0x3b4745
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3b473d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b4745
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
vmovaps 0x10(%rsp), %xmm0
vmovups %xmm0, (%r12)
movq 0x20(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x28(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x30(%rsp), %rcx
movq %rcx, 0x20(%r12)
vmovups 0x38(%rsp), %xmm0
vmovups %xmm0, 0x28(%r12)
movl 0x48(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x50(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3b47bc
lock
decl (%rax)
jne 0x3b47bc
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b47b4
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b47bc
movq %rsi, %rdi
callq 0x5f3e0
leaq 0xa0(%rsp), %rdi
callq 0x6fac4
xorl %eax, %eax
addq $0x1b8, %rsp # imm = 0x1B8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq 0x8(%rbx), %rax
vmovups (%rbx), %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq %r8, 0x20(%rsp)
movl 0x18(%rbx), %ecx
movl %ecx, 0x28(%rsp)
movq 0x20(%rbx), %rcx
movq %rcx, 0x30(%rsp)
movl %ebp, 0x38(%rsp)
vmovups 0x2c(%rbx), %xmm0
vmovups %xmm0, 0x3c(%rsp)
movq 0x40(%rbx), %rcx
movq %rcx, 0x50(%rsp)
testq %rax, %rax
je 0x3b4820
lock
incl (%rax)
cmpl $0x2, %r14d
jne 0x3b4850
movl 0x74(%rbx), %edx
movl 0x78(%rbx), %ecx
leaq 0xa0(%rsp), %rdi
movq %r8, %rbx
pushq $0x1
popq %r8
movq 0x88(%rsp), %rsi
xorl %r9d, %r9d
callq 0x63020
movq %rbx, %r8
jmp 0x3b48b6
movq 0x50(%rbx), %rax
vmovups 0x48(%rbx), %xmm0
vmovaps %xmm0, 0xa0(%rsp)
movq 0x58(%rbx), %rcx
movq %rcx, 0xb0(%rsp)
movl 0x60(%rbx), %ecx
movl %ecx, 0xb8(%rsp)
movq 0x68(%rbx), %rcx
movq %rcx, 0xc0(%rsp)
vmovups 0x70(%rbx), %xmm0
vmovups %xmm0, 0xc8(%rsp)
movl 0x80(%rbx), %ecx
movl %ecx, 0xd8(%rsp)
movq 0x88(%rbx), %rcx
movq %rcx, 0xe0(%rsp)
testq %rax, %rax
je 0x3b48b6
lock
incl (%rax)
movq 0x98(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rax
xorl %ecx, %ecx
cmpl $0x0, 0xd0(%rdx,%rax)
setne %cl
movl 0xcc(%rsp,%rcx,4), %esi
movl 0x40(%rsp), %edx
movl 0x48(%rsp), %eax
movl 0xd8(%rsp), %ecx
cmpl %ecx, %eax
cmovgl %eax, %ecx
movq 0x8(%r15), %r9
movq %r12, %rdi
movq %rcx, %rbx
callq 0x63810
cmpq $0x0, (%r12)
je 0x3b4d13
movslq 0x38(%r12), %rax
imulq 0x40(%r12), %rax
testq %rax, %rax
je 0x3b4d13
xorl %eax, %eax
testl %ebx, %ebx
cmovlel %eax, %ebx
movq %rbx, 0x88(%rsp)
leaq 0x150(%rsp), %r14
leaq 0xf0(%rsp), %r15
xorl %r13d, %r13d
cmpq %r13, 0x88(%rsp)
je 0x3b5328
movl 0x48(%rsp), %ebp
movl 0xd8(%rsp), %ebx
movq %r14, %rdi
pushq $0x2
popq %rsi
movq %r15, %rdx
callq 0x6fa72
cmpl $0x1, %ebp
movslq 0x3c(%rsp), %rdi
movslq 0x40(%rsp), %r8
movl 0x44(%rsp), %esi
movq %r13, 0x70(%rsp)
movl $0x0, %eax
cmoveq %rax, %r13
imulq 0x50(%rsp), %r13
movq 0x20(%rsp), %r14
imulq %r14, %r13
addq 0x10(%rsp), %r13
movl 0x28(%rsp), %r9d
movq 0x30(%rsp), %r10
movq %r8, %rcx
imulq %rdi, %rcx
movq %r14, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r14
movq %rax, %r15
movl 0x38(%rsp), %r12d
cmpl $0x4, %r12d
cmoveq %rcx, %r15
movq 0x150(%rsp), %rbp
movq 0x8(%rbp), %rax
testq %rax, %rax
je 0x3b4a50
lock
decl (%rax)
jne 0x3b4a50
movq %r10, 0x78(%rsp)
movl %r9d, 0x8(%rsp)
movq %r8, 0x60(%rsp)
movq %rdi, 0x68(%rsp)
movl %esi, 0x80(%rsp)
movq (%rbp), %rsi
movq 0x20(%rbp), %rdi
testq %rdi, %rdi
je 0x3b4a2d
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
jmp 0x3b4a50
movq %rsi, %rdi
callq 0x5f3e0
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
andq $0x0, 0x8(%rbp)
decl %r12d
cmpl $0x1, %ebx
movq %r13, (%rbp)
movq %r14, 0x10(%rbp)
movl %r9d, 0x18(%rbp)
movq %r10, 0x20(%rbp)
movl %r12d, 0x28(%rbp)
movl %edi, 0x2c(%rbp)
movl %r8d, 0x30(%rbp)
movl $0x1, 0x34(%rbp)
movl %esi, 0x38(%rbp)
movq %r15, 0x40(%rbp)
movslq 0xcc(%rsp), %rdi
movslq 0xd0(%rsp), %r12
movl 0xd4(%rsp), %esi
movq 0xb0(%rsp), %rbp
movl 0xb8(%rsp), %r8d
movq 0xc0(%rsp), %r9
movl 0xc8(%rsp), %ebx
movq 0x70(%rsp), %r13
movl $0x0, %eax
cmoveq %rax, %r13
imulq 0xe0(%rsp), %r13
imulq %rbp, %r13
addq 0xa0(%rsp), %r13
movq %r12, %rcx
imulq %rdi, %rcx
movq %rbp, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rbp
movq %rax, %r15
cmpl $0x4, %ebx
cmoveq %rcx, %r15
movq 0x150(%rsp), %r14
movq 0x50(%r14), %rax
testq %rax, %rax
je 0x3b4b77
lock
decl (%rax)
jne 0x3b4b77
movq %r9, 0x8(%rsp)
movl %r8d, 0x60(%rsp)
movq %rdi, 0x68(%rsp)
movl %esi, 0x80(%rsp)
movq 0x48(%r14), %rsi
movq 0x68(%r14), %rdi
testq %rdi, %rdi
je 0x3b4b59
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movl 0x60(%rsp), %r8d
movq 0x8(%rsp), %r9
jmp 0x3b4b77
movq %rsi, %rdi
callq 0x5f3e0
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movl 0x60(%rsp), %r8d
movq 0x8(%rsp), %r9
decl %ebx
movq %r13, 0x48(%r14)
andq $0x0, 0x50(%r14)
movq %rbp, 0x58(%r14)
movl %r8d, 0x60(%r14)
movq %r9, 0x68(%r14)
movl %ebx, 0x70(%r14)
movl %edi, 0x74(%r14)
movl %r12d, 0x78(%r14)
movl $0x1, 0x7c(%r14)
movl %esi, 0x80(%r14)
movq %r15, 0x88(%r14)
leaq 0xf0(%rsp), %rdi
pushq $0x1
popq %rsi
leaq 0x1a0(%rsp), %rdx
callq 0x6fa72
movq 0x148(%rsp), %rsi
movslq 0x2c(%rsi), %r8
movslq 0x30(%rsi), %r12
movl 0x34(%rsi), %edi
movq 0x40(%rsi), %rbp
imulq 0x70(%rsp), %rbp
movq 0x10(%rsi), %r13
imulq %r13, %rbp
addq (%rsi), %rbp
movl 0x18(%rsi), %r9d
movq 0x20(%rsi), %r10
movq %r12, %rcx
imulq %r8, %rcx
movq %rcx, %rax
imulq %r13, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r13
movq %rax, %r15
movl 0x28(%rsi), %ebx
cmpl $0x4, %ebx
cmoveq %rcx, %r15
movq 0xf0(%rsp), %r14
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x3b4c94
lock
decl (%rax)
jne 0x3b4c94
movq %r10, 0x8(%rsp)
movl %r9d, 0x60(%rsp)
movq %r8, 0x68(%rsp)
movl %edi, 0x80(%rsp)
movq (%r14), %rsi
movq 0x20(%r14), %rdi
testq %rdi, %rdi
je 0x3b4c76
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x80(%rsp), %edi
movq 0x68(%rsp), %r8
movl 0x60(%rsp), %r9d
movq 0x8(%rsp), %r10
jmp 0x3b4c94
movq %rsi, %rdi
callq 0x5f3e0
movl 0x80(%rsp), %edi
movq 0x68(%rsp), %r8
movl 0x60(%rsp), %r9d
movq 0x8(%rsp), %r10
movq %rbp, (%r14)
andq $0x0, 0x8(%r14)
decl %ebx
movq %r13, 0x10(%r14)
movl %r9d, 0x18(%r14)
movq %r10, 0x20(%r14)
movl %ebx, 0x28(%r14)
movl %r8d, 0x2c(%r14)
movl %r12d, 0x30(%r14)
movl $0x1, 0x34(%r14)
movl %edi, 0x38(%r14)
movq %r15, 0x40(%r14)
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0x150(%rsp), %r14
movq %r14, %rsi
leaq 0xf0(%rsp), %r15
movq %r15, %rdx
movq 0x140(%rsp), %rcx
callq *0x30(%rax)
movq %r15, %rdi
callq 0x6fac4
movq %r14, %rdi
callq 0x6fac4
movq 0x70(%rsp), %r13
incq %r13
jmp 0x3b493d
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b53ac
lock
decl (%rax)
jne 0x3b53ac
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3b53a4
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b53ac
movq 0x8(%rbx), %rax
vmovups (%rbx), %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq %r8, %r13
movq %r8, 0x20(%rsp)
movl 0x18(%rbx), %ecx
movl %ecx, 0x28(%rsp)
movq 0x20(%rbx), %rcx
movq %rcx, 0x30(%rsp)
movl %ebp, 0x38(%rsp)
vmovups 0x2c(%rbx), %xmm0
vmovups %xmm0, 0x3c(%rsp)
movq 0x40(%rbx), %rcx
movq %rcx, 0x50(%rsp)
testq %rax, %rax
je 0x3b4d97
lock
incl (%rax)
cmpl $0x3, %r14d
jne 0x3b4dca
movl 0x74(%rbx), %edx
movl 0x78(%rbx), %ecx
movl 0x80(%rbx), %r8d
andq $0x0, (%rsp)
leaq 0xa0(%rsp), %rdi
pushq $0x1
popq %r9
movq 0x88(%rsp), %rsi
callq 0x632f0
jmp 0x3b4e30
movq 0x50(%rbx), %rax
vmovups 0x48(%rbx), %xmm0
vmovaps %xmm0, 0xa0(%rsp)
movq 0x58(%rbx), %rcx
movq %rcx, 0xb0(%rsp)
movl 0x60(%rbx), %ecx
movl %ecx, 0xb8(%rsp)
movq 0x68(%rbx), %rcx
movq %rcx, 0xc0(%rsp)
vmovups 0x70(%rbx), %xmm0
vmovups %xmm0, 0xc8(%rsp)
movl 0x80(%rbx), %ecx
movl %ecx, 0xd8(%rsp)
movq 0x88(%rbx), %rcx
movq %rcx, 0xe0(%rsp)
testq %rax, %rax
je 0x3b4e30
lock
incl (%rax)
movq 0x98(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rax
xorl %ecx, %ecx
cmpl $0x0, 0xd0(%rdx,%rax)
setne %cl
movl 0xcc(%rsp,%rcx,4), %esi
movl 0x40(%rsp), %edx
movl 0x44(%rsp), %eax
movl 0xd4(%rsp), %ecx
movl 0xd8(%rsp), %r8d
cmpl %ecx, %eax
cmovgl %eax, %ecx
movl 0x48(%rsp), %eax
cmpl %r8d, %eax
cmovgl %eax, %r8d
movq 0x8(%r15), %rax
movq %rax, (%rsp)
movq %r12, %rdi
movq %rcx, %rbx
movq %r8, %r14
movq %r13, %r9
callq 0x6393c
cmpq $0x0, (%r12)
je 0x3b526e
movslq 0x38(%r12), %rax
imulq 0x40(%r12), %rax
testq %rax, %rax
je 0x3b526e
xorl %eax, %eax
testl %ebx, %ebx
cmovlel %eax, %ebx
movq %rbx, 0x80(%rsp)
testl %r14d, %r14d
cmovlel %eax, %r14d
movq %r14, 0x138(%rsp)
leaq 0x150(%rsp), %r15
leaq 0xf0(%rsp), %r12
xorl %eax, %eax
cmpq 0x138(%rsp), %rax
je 0x3b5366
cmpl $0x1, 0x48(%rsp)
movq %rax, 0x88(%rsp)
movl %eax, %ecx
movq %rcx, %rdx
movl $0x0, %eax
cmoveq %rax, %rdx
movq %rdx, 0x68(%rsp)
cmpl $0x1, 0xd8(%rsp)
cmoveq %rax, %rcx
movq %rcx, 0x60(%rsp)
xorl %ebx, %ebx
cmpq %rbx, 0x80(%rsp)
je 0x3b525e
movq %rbx, 0x70(%rsp)
movl 0x44(%rsp), %r14d
movl 0xd4(%rsp), %ebx
movq %r15, %rdi
pushq $0x2
popq %rsi
movq %r12, %rdx
callq 0x6fa72
movslq 0x3c(%rsp), %rcx
movslq 0x40(%rsp), %rbp
movq 0x50(%rsp), %r13
imulq 0x68(%rsp), %r13
movq 0x20(%rsp), %r12
imulq %r12, %r13
addq 0x10(%rsp), %r13
cmpl $0x1, %r14d
movl 0x28(%rsp), %esi
movq 0x70(%rsp), %rdx
movq %rdx, %r15
movl $0x0, %eax
cmoveq %rax, %r15
movq 0x30(%rsp), %rdi
movq 0x150(%rsp), %r14
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x3b5000
lock
decl (%rax)
jne 0x3b5000
movq %rdi, 0xe8(%rsp)
movl %esi, 0x78(%rsp)
movq %rcx, 0x8(%rsp)
movq (%r14), %rsi
movq 0x20(%r14), %rdi
testq %rdi, %rdi
je 0x3b4fe2
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x70(%rsp), %rdx
movq 0x8(%rsp), %rcx
movl 0x78(%rsp), %esi
movq 0xe8(%rsp), %rdi
jmp 0x3b5000
movq %rsi, %rdi
callq 0x5f3e0
movq 0x70(%rsp), %rdx
movq 0x8(%rsp), %rcx
movl 0x78(%rsp), %esi
movq 0xe8(%rsp), %rdi
movq %rbp, %rax
imulq %rcx, %rax
imulq %rax, %r15
imulq %r12, %r15
addq %r15, %r13
movq %r13, (%r14)
andq $0x0, 0x8(%r14)
movq %r12, 0x10(%r14)
movl %esi, 0x18(%r14)
movq %rdi, 0x20(%r14)
movl $0x2, 0x28(%r14)
movl %ecx, 0x2c(%r14)
movl %ebp, 0x30(%r14)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x34(%r14)
movq %rax, 0x40(%r14)
movslq 0xcc(%rsp), %r15
movslq 0xd0(%rsp), %rbp
movq 0xb0(%rsp), %r13
movq 0xe0(%rsp), %r12
imulq 0x60(%rsp), %r12
imulq %r13, %r12
addq 0xa0(%rsp), %r12
cmpl $0x1, %ebx
movl 0xb8(%rsp), %esi
movq 0xc0(%rsp), %rdi
movq 0x150(%rsp), %rbx
movq 0x50(%rbx), %rax
movq %rdx, %r14
movl $0x0, %ecx
cmoveq %rcx, %r14
testq %rax, %rax
je 0x3b50e6
lock
decl (%rax)
jne 0x3b50e6
movq %rdi, 0x78(%rsp)
movl %esi, 0x8(%rsp)
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x3b50d5
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x8(%rsp), %esi
movq 0x78(%rsp), %rdi
jmp 0x3b50e6
movq %rsi, %rdi
callq 0x5f3e0
movl 0x8(%rsp), %esi
movq 0x78(%rsp), %rdi
movq %rbp, %rax
imulq %r15, %rax
imulq %rax, %r14
imulq %r13, %r14
addq %r14, %r12
movq %r12, 0x48(%rbx)
andq $0x0, 0x50(%rbx)
movq %r13, 0x58(%rbx)
movl %esi, 0x60(%rbx)
movq %rdi, 0x68(%rbx)
movl $0x2, 0x70(%rbx)
movl %r15d, 0x74(%rbx)
movl %ebp, 0x78(%rbx)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x7c(%rbx)
movq %rax, 0x88(%rbx)
leaq 0xf0(%rsp), %rdi
pushq $0x1
popq %rsi
leaq 0x1a0(%rsp), %rdx
callq 0x6fa72
movq 0x148(%rsp), %rax
movslq 0x2c(%rax), %rbx
movslq 0x30(%rax), %r15
movq 0x40(%rax), %r12
imulq 0x88(%rsp), %r12
movq 0x10(%rax), %r14
imulq %r14, %r12
addq (%rax), %r12
movl 0x18(%rax), %ebp
movq 0x20(%rax), %rsi
movq 0xf0(%rsp), %r13
movq 0x8(%r13), %rax
testq %rax, %rax
movq 0x70(%rsp), %rdx
je 0x3b51c7
lock
decl (%rax)
jne 0x3b51c7
movq %rsi, 0x8(%rsp)
movq (%r13), %rsi
movq 0x20(%r13), %rdi
testq %rdi, %rdi
je 0x3b51b5
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x70(%rsp), %rdx
movq 0x8(%rsp), %rsi
jmp 0x3b51c7
movq %rsi, %rdi
callq 0x5f3e0
movq 0x70(%rsp), %rdx
movq 0x8(%rsp), %rsi
movq %r15, %rax
imulq %rbx, %rax
movq %rdx, %rcx
imulq %r14, %rcx
imulq %rax, %rcx
addq %rcx, %r12
movq %r12, (%r13)
andq $0x0, 0x8(%r13)
movq %r14, 0x10(%r13)
movl %ebp, 0x18(%r13)
movq %rsi, 0x20(%r13)
movl $0x2, 0x28(%r13)
movl %ebx, 0x2c(%r13)
movl %r15d, 0x30(%r13)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x34(%r13)
movq %rax, 0x40(%r13)
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0x150(%rsp), %r15
movq %r15, %rsi
leaq 0xf0(%rsp), %r12
movq %rdx, %rbx
movq %r12, %rdx
movq 0x140(%rsp), %rcx
callq *0x30(%rax)
movq %r12, %rdi
callq 0x6fac4
movq %r15, %rdi
callq 0x6fac4
incq %rbx
jmp 0x3b4f23
movq 0x88(%rsp), %rax
incq %rax
jmp 0x3b4ee2
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b53da
lock
decl (%rax)
jne 0x3b53da
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3b53d2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b53da
movq 0x138(%rsp), %rax
movl 0x34(%rax), %ecx
movl 0x38(%rax), %r8d
movq 0x8(%rbx), %r9
leaq 0xf0(%rsp), %r14
leaq 0x10(%rsp), %rsi
movq %r14, %rdi
movl 0xe8(%rsp), %edx
callq 0x63020
movq 0xf8(%rsp), %rax
cmpq %r14, %r12
je 0x3b57ac
testq %rax, %rax
je 0x3b52f4
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3b5746
lock
decl (%rax)
jne 0x3b5746
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3b573e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5746
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b5628
lock
decl (%rax)
jne 0x3b5628
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5620
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5628
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b5665
lock
decl (%rax)
jne 0x3b5665
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3b565d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5665
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b5408
lock
decl (%rax)
jne 0x3b5408
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5400
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5408
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b5408
lock
decl (%rax)
jne 0x3b5408
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5400
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5408
movq %rsi, %rdi
callq 0x5f3e0
pushq $-0x64
popq %rax
jmp 0x3b47cb
movq 0xdabc1(%rip), %rbx # 0x48ffd8
movq (%rbx), %rdi
leaq 0x470b5(%rip), %rsi # 0x3fc4d6
movl %ebp, %edx
movl %r14d, %ecx
xorl %eax, %eax
callq 0x5f150
movq (%rbx), %rsi
pushq $0xa
popq %rdi
callq 0x5f1c0
pushq $-0x1
jmp 0x3b540a
movq %rsi, %rdi
callq 0x5f3e0
movq 0xf8(%rsp), %rax
vmovaps 0xf0(%rsp), %xmm0
vmovups %xmm0, (%r12)
movq 0x100(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x108(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x110(%rsp), %rcx
movq %rcx, 0x20(%r12)
vmovups 0x118(%rsp), %xmm0
vmovups %xmm0, 0x28(%r12)
movl 0x128(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x130(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3b557d
lock
decl (%rax)
jne 0x3b557d
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5575
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b557d
movq %rsi, %rdi
callq 0x5f3e0
movq 0xf8(%rsp), %rax
vmovaps 0xf0(%rsp), %xmm0
vmovups %xmm0, (%r12)
movq 0x100(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x108(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x110(%rsp), %rcx
movq %rcx, 0x20(%r12)
vmovups 0x118(%rsp), %xmm0
vmovups %xmm0, 0x28(%r12)
movl 0x128(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x130(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3b557d
lock
decl (%rax)
jne 0x3b557d
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5575
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b557d
movq %rsi, %rdi
callq 0x5f3e0
movq 0x158(%rsp), %rax
testq %rax, %rax
je 0x3b55b4
lock
decl (%rax)
jne 0x3b55b4
movq 0x150(%rsp), %rsi
movq 0x170(%rsp), %rdi
testq %rdi, %rdi
je 0x3b55ac
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b55b4
movq %rsi, %rdi
callq 0x5f3e0
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b55eb
lock
decl (%rax)
jne 0x3b55eb
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3b55e3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b55eb
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b47c9
lock
decl (%rax)
jne 0x3b47c9
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b587a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b47c9
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b47c9
lock
decl (%rax)
jne 0x3b47c9
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b587a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b47c9
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b47c9
lock
decl (%rax)
jne 0x3b47c9
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b587a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b47c9
movq %rsi, %rdi
callq 0x5f3e0
movq 0xf8(%rsp), %rax
vmovaps 0xf0(%rsp), %xmm0
vmovups %xmm0, (%r12)
movq 0x100(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x108(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x110(%rsp), %rcx
movq %rcx, 0x20(%r12)
vmovups 0x118(%rsp), %xmm0
vmovups %xmm0, 0x28(%r12)
movl 0x128(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x130(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3b57db
lock
decl (%rax)
jne 0x3b57db
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3b57d3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b57db
movq %rsi, %rdi
callq 0x5f3e0
movq 0xf8(%rsp), %rax
vmovaps 0xf0(%rsp), %xmm0
vmovups %xmm0, (%r12)
movq 0x100(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x108(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x110(%rsp), %rcx
movq %rcx, 0x20(%r12)
vmovups 0x118(%rsp), %xmm0
vmovups %xmm0, 0x28(%r12)
movl 0x128(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x130(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3b57db
lock
decl (%rax)
jne 0x3b57db
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3b57d3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b57db
movq %rsi, %rdi
callq 0x5f3e0
movq 0x158(%rsp), %rax
testq %rax, %rax
je 0x3b5812
lock
decl (%rax)
jne 0x3b5812
movq 0x150(%rsp), %rsi
movq 0x170(%rsp), %rdi
testq %rdi, %rdi
je 0x3b580a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5812
movq %rsi, %rdi
callq 0x5f3e0
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b5849
lock
decl (%rax)
jne 0x3b5849
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5841
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5849
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b47c9
lock
decl (%rax)
jne 0x3b47c9
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b587a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b47c9
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b47c9
movq %rax, %rbx
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3b5d43
lock
decl (%rax)
jne 0x3b5d43
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3b58fb
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5d43
jmp 0x3b5f8d
movq %rax, %rbx
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3b5d43
lock
decl (%rax)
jne 0x3b5d43
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b5908
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b5d43
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5d43
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
movq %rax, %rbx
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3b5e1c
lock
decl (%rax)
jne 0x3b5e1c
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3b59aa
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5e1c
jmp 0x3b5f8d
movq %rax, %rbx
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3b5e1c
lock
decl (%rax)
jne 0x3b5e1c
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b59b7
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b5e1c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5e1c
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5d1c
jmp 0x3b5d1c
jmp 0x3b5f8d
movq %rax, %rbx
jmp 0x3b5f55
movq %rax, %rbx
jmp 0x3b5cba
jmp 0x3b5df5
jmp 0x3b5df5
movq %rax, %rbx
jmp 0x3b5d7a
movq %rax, %rbx
jmp 0x3b5db1
jmp 0x3b5ef7
jmp 0x3b5c5c
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b5ed7
lock
decl (%rax)
jne 0x3b5ed7
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5c21
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5ed7
jmp 0x3b5f8d
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b5ed7
lock
decl (%rax)
jne 0x3b5ed7
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5c21
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5ed7
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
movq %rax, %rbx
jmp 0x3b5e53
movq %rax, %rbx
jmp 0x3b5e8a
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b5ed7
lock
decl (%rax)
jne 0x3b5ed7
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5c21
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5ed7
jmp 0x3b5f8d
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b5ed7
lock
decl (%rax)
jne 0x3b5ed7
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5c21
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5ed7
jmp 0x3b5f8d
jmp 0x3b5c73
jmp 0x3b5c73
jmp 0x3b5c61
jmp 0x3b5d33
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5d33
jmp 0x3b5d21
jmp 0x3b5e0c
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b5ed7
lock
decl (%rax)
jne 0x3b5ed7
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5c21
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5ed7
jmp 0x3b5f8d
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b5ed7
lock
decl (%rax)
jne 0x3b5ed7
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5c21
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5ed7
jmp 0x3b5f8d
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b5ed7
lock
decl (%rax)
jne 0x3b5ed7
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b5c2e
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b5ed7
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5ed7
jmp 0x3b5f8d
jmp 0x3b5ed4
jmp 0x3b5ed4
jmp 0x3b5ee9
jmp 0x3b5ed4
jmp 0x3b5e0c
jmp 0x3b5dfa
movq %rax, %rbx
jmp 0x3b5c83
movq %rax, %rbx
leaq 0xf0(%rsp), %rdi
callq 0x6fac4
jmp 0x3b5c76
movq %rax, %rbx
leaq 0x150(%rsp), %rdi
callq 0x6fac4
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b5cba
lock
decl (%rax)
jne 0x3b5cba
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b5cb4
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b5cba
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b5f83
lock
decl (%rax)
jne 0x3b5f83
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5f73
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5f83
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5ed4
jmp 0x3b5ed4
jmp 0x3b5ee9
jmp 0x3b5ed4
movq %rax, %rbx
jmp 0x3b5d43
movq %rax, %rbx
leaq 0x1a0(%rsp), %rdi
callq 0x6fac4
jmp 0x3b5d36
movq %rax, %rbx
leaq 0xf0(%rsp), %rdi
callq 0x6fac4
movq 0x158(%rsp), %rax
testq %rax, %rax
je 0x3b5d7a
lock
decl (%rax)
jne 0x3b5d7a
movq 0x150(%rsp), %rsi
movq 0x170(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b5d74
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b5d7a
movq (%rdi), %rax
callq *0x18(%rax)
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b5db1
lock
decl (%rax)
jne 0x3b5db1
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b5dab
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b5db1
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b5f83
lock
decl (%rax)
jne 0x3b5f83
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5f73
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5f83
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
movq %rax, %rbx
jmp 0x3b5e1c
movq %rax, %rbx
leaq 0x1a0(%rsp), %rdi
callq 0x6fac4
jmp 0x3b5e0f
movq %rax, %rbx
leaq 0xf0(%rsp), %rdi
callq 0x6fac4
movq 0x158(%rsp), %rax
testq %rax, %rax
je 0x3b5e53
lock
decl (%rax)
jne 0x3b5e53
movq 0x150(%rsp), %rsi
movq 0x170(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b5e4d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b5e53
movq (%rdi), %rax
callq *0x18(%rax)
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b5e8a
lock
decl (%rax)
jne 0x3b5e8a
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b5e84
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b5e8a
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b5f83
lock
decl (%rax)
jne 0x3b5f83
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b5f73
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b5f83
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5f8d
jmp 0x3b5ed4
jmp 0x3b5ed4
jmp 0x3b5ed4
movq %rax, %rbx
leaq 0xa0(%rsp), %rdi
callq 0x6fac4
jmp 0x3b5f83
movq %rax, %rbx
jmp 0x3b5f83
jmp 0x3b5f0e
jmp 0x3b5f0e
jmp 0x3b5efc
movq %rax, %rbx
jmp 0x3b5f1e
movq %rax, %rbx
leaq 0xf0(%rsp), %rdi
callq 0x6fac4
jmp 0x3b5f11
movq %rax, %rbx
leaq 0x150(%rsp), %rdi
callq 0x6fac4
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b5f55
lock
decl (%rax)
jne 0x3b5f55
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b5f4f
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b5f55
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3b5f83
lock
decl (%rax)
jne 0x3b5f83
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x3b5f7d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3b5f83
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x3b5f8d
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/matmul_x86_avx512.cpp |
virtual thunk to ncnn::MatMul_x86_avx512::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int MatMul_x86_avx512::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& A = bottom_blobs[0];
const Mat& B = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int Adims = A.dims;
const int Bdims = B.dims;
const int max_ABdims = std::max(Adims, Bdims);
const size_t elemsize = A.elemsize;
if (Adims == 1 && Bdims == 1)
{
// dot product
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A.reshape(A.w, 1);
_bottom_blobs[1] = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(1, opt.blob_allocator);
}
else if (Adims == 2 && Bdims == 2)
{
// matrix multiply
gemm->forward(bottom_blobs, top_blobs, opt);
}
else if (Adims == 1 && Bdims == 2)
{
// matrix multiply
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A.reshape(A.w, 1);
_bottom_blobs[1] = B;
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(top_blob.w, opt.blob_allocator);
}
else if (Adims == 2 && Bdims == 1)
{
// matrix multiply
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A;
_bottom_blobs[1] = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(top_blob.h, opt.blob_allocator);
}
else if (Adims == 1 && Bdims > 2)
{
// batched matrix multiply
const int N = transB == 0 ? B.w : B.h;
const int batch_size = B.d * B.c;
Mat top_blob1(N, 1, batch_size, elemsize, opt.blob_allocator);
if (top_blob1.empty())
return -100;
Mat A1 = A.reshape(A.w, 1);
Mat B1 = B.reshape(B.w, B.h, batch_size);
for (int p = 0; p < batch_size; p++)
{
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1;
_bottom_blobs[1] = B1.channel(p);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob1.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
if (Bdims == 3)
top_blob = top_blob1.reshape(N, B.d * B.c, opt.blob_allocator);
else
top_blob = top_blob1.reshape(N, B.d, B.c, opt.blob_allocator);
}
else if (Adims > 2 && Bdims == 1)
{
// batched matrix multiply
const int M = A.h;
const int batch_size = A.d * A.c;
Mat top_blob1(1, M, batch_size, elemsize, opt.blob_allocator);
if (top_blob1.empty())
return -100;
Mat A1 = A.reshape(A.w, A.h, batch_size);
Mat BT = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
for (int p = 0; p < batch_size; p++)
{
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(p);
_bottom_blobs[1] = BT;
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob1.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
if (Adims == 3)
top_blob = top_blob1.reshape(M, A.d * A.c, opt.blob_allocator);
else
top_blob = top_blob1.reshape(M, A.d, A.c, opt.blob_allocator);
}
else if (max_ABdims == 3)
{
Mat A1 = Adims == 2 ? A.reshape(A.w, A.h, 1) : A;
Mat B1 = Bdims == 2 ? B.reshape(B.w, B.h, 1) : B;
const int M = A1.h;
const int N = transB == 0 ? B1.w : B1.h;
const int batch_size = std::max(A1.c, B1.c);
top_blob.create(N, M, batch_size, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
for (int p = 0; p < batch_size; p++)
{
int Ap = A1.c == 1 ? 0 : p;
int Bp = B1.c == 1 ? 0 : p;
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(Ap);
_bottom_blobs[1] = B1.channel(Bp);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
}
else if (max_ABdims == 4)
{
Mat A1 = Adims == 3 ? A.reshape(A.w, A.h, A.c, 1) : A;
Mat B1 = Bdims == 3 ? B.reshape(B.w, B.h, B.c, 1) : B;
const int M = A1.h;
const int N = transB == 0 ? B1.w : B1.h;
const int batch_size_d = std::max(A1.d, B1.d);
const int batch_size_c = std::max(A1.c, B1.c);
top_blob.create(N, M, batch_size_d, batch_size_c, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
for (int p = 0; p < batch_size_c; p++)
{
int Ap = A1.c == 1 ? 0 : p;
int Bp = B1.c == 1 ? 0 : p;
for (int q = 0; q < batch_size_d; q++)
{
int Ad = A1.d == 1 ? 0 : q;
int Bd = B1.d == 1 ? 0 : q;
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(Ap).depth(Ad);
_bottom_blobs[1] = B1.channel(Bp).depth(Bd);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob.channel(p).depth(q);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
}
}
else
{
NCNN_LOGE("impossible matmul %d %d", Adims, Bdims);
return -1;
}
return 0;
} | movq (%rdi), %rax
addq -0x40(%rax), %rdi
jmp 0x3b3316
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/matmul_x86_avx512.cpp |
ncnn::MatMul_x86_avx::create_pipeline(ncnn::Option const&) | int MatMul_x86_avx::create_pipeline(const Option& opt)
{
gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
ncnn::ParamDict pd;
pd.set(2, 0); // transA
pd.set(3, transB); // transB
pd.set(4, 0); // constantA
pd.set(5, 0); // constantB
pd.set(6, 1); // constantC
pd.set(7, 0); // M = outch
pd.set(8, 0); // N = size
pd.set(9, 0); // K = maxk*inch
pd.set(10, -1); // constant_broadcast_type_C = null
pd.set(11, 0); // output_N1M
pd.set(12, 1); // output_elempack
gemm->load_param(pd);
gemm->load_model(ModelBinFromMatArray(0));
gemm->create_pipeline(opt);
return 0;
} | pushq %r15
pushq %r14
pushq %rbx
subq $0x20, %rsp
movq %rsi, %rbx
movq %rdi, %r14
pushq $0x4a
popq %rdi
callq 0x782bf
movq %rax, 0x8(%r14)
movq %rsp, %r15
movq %r15, %rdi
callq 0x71548
pushq $0x2
popq %rsi
movq %r15, %rdi
xorl %edx, %edx
callq 0x7193a
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r14,%rax), %edx
movq %rsp, %rdi
pushq $0x3
popq %rsi
callq 0x7193a
movq %rsp, %rdi
pushq $0x4
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq %rsp, %rdi
pushq $0x5
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq %rsp, %rdi
pushq $0x6
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
movq %rsp, %rdi
pushq $0x7
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq %rsp, %rdi
pushq $0x8
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq %rsp, %rdi
pushq $0x9
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq %rsp, %rdi
pushq $0xa
popq %rsi
pushq $-0x1
popq %rdx
callq 0x7193a
movq %rsp, %rdi
pushq $0xb
popq %rsi
xorl %edx, %edx
callq 0x7193a
movq %rsp, %rdi
pushq $0xc
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
movq 0x8(%r14), %rdi
movq (%rdi), %rax
movq %rsp, %rsi
callq *0x10(%rax)
movq 0x8(%r14), %r15
leaq 0x10(%rsp), %rdi
xorl %esi, %esi
callq 0x6b00e
movq (%r15), %rax
leaq 0x10(%rsp), %rsi
movq %r15, %rdi
callq *0x18(%rax)
leaq 0x10(%rsp), %rdi
callq 0x6b03a
movq 0x8(%r14), %rdi
movq (%rdi), %rax
movq %rbx, %rsi
callq *0x20(%rax)
movq %rsp, %rdi
callq 0x71614
xorl %eax, %eax
addq $0x20, %rsp
popq %rbx
popq %r14
popq %r15
retq
movq %rax, %rbx
leaq 0x10(%rsp), %rdi
callq 0x6b03a
jmp 0x3b8f75
jmp 0x3b8f72
movq %rax, %rbx
movq %rsp, %rdi
callq 0x71614
movq %rbx, %rdi
callq 0x5f340
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/matmul_x86_avx.cpp |
ncnn::MatMul_x86_avx::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int MatMul_x86_avx::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& A = bottom_blobs[0];
const Mat& B = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int Adims = A.dims;
const int Bdims = B.dims;
const int max_ABdims = std::max(Adims, Bdims);
const size_t elemsize = A.elemsize;
if (Adims == 1 && Bdims == 1)
{
// dot product
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A.reshape(A.w, 1);
_bottom_blobs[1] = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(1, opt.blob_allocator);
}
else if (Adims == 2 && Bdims == 2)
{
// matrix multiply
gemm->forward(bottom_blobs, top_blobs, opt);
}
else if (Adims == 1 && Bdims == 2)
{
// matrix multiply
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A.reshape(A.w, 1);
_bottom_blobs[1] = B;
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(top_blob.w, opt.blob_allocator);
}
else if (Adims == 2 && Bdims == 1)
{
// matrix multiply
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A;
_bottom_blobs[1] = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(top_blob.h, opt.blob_allocator);
}
else if (Adims == 1 && Bdims > 2)
{
// batched matrix multiply
const int N = transB == 0 ? B.w : B.h;
const int batch_size = B.d * B.c;
Mat top_blob1(N, 1, batch_size, elemsize, opt.blob_allocator);
if (top_blob1.empty())
return -100;
Mat A1 = A.reshape(A.w, 1);
Mat B1 = B.reshape(B.w, B.h, batch_size);
for (int p = 0; p < batch_size; p++)
{
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1;
_bottom_blobs[1] = B1.channel(p);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob1.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
if (Bdims == 3)
top_blob = top_blob1.reshape(N, B.d * B.c, opt.blob_allocator);
else
top_blob = top_blob1.reshape(N, B.d, B.c, opt.blob_allocator);
}
else if (Adims > 2 && Bdims == 1)
{
// batched matrix multiply
const int M = A.h;
const int batch_size = A.d * A.c;
Mat top_blob1(1, M, batch_size, elemsize, opt.blob_allocator);
if (top_blob1.empty())
return -100;
Mat A1 = A.reshape(A.w, A.h, batch_size);
Mat BT = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
for (int p = 0; p < batch_size; p++)
{
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(p);
_bottom_blobs[1] = BT;
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob1.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
if (Adims == 3)
top_blob = top_blob1.reshape(M, A.d * A.c, opt.blob_allocator);
else
top_blob = top_blob1.reshape(M, A.d, A.c, opt.blob_allocator);
}
else if (max_ABdims == 3)
{
Mat A1 = Adims == 2 ? A.reshape(A.w, A.h, 1) : A;
Mat B1 = Bdims == 2 ? B.reshape(B.w, B.h, 1) : B;
const int M = A1.h;
const int N = transB == 0 ? B1.w : B1.h;
const int batch_size = std::max(A1.c, B1.c);
top_blob.create(N, M, batch_size, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
for (int p = 0; p < batch_size; p++)
{
int Ap = A1.c == 1 ? 0 : p;
int Bp = B1.c == 1 ? 0 : p;
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(Ap);
_bottom_blobs[1] = B1.channel(Bp);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
}
else if (max_ABdims == 4)
{
Mat A1 = Adims == 3 ? A.reshape(A.w, A.h, A.c, 1) : A;
Mat B1 = Bdims == 3 ? B.reshape(B.w, B.h, B.c, 1) : B;
const int M = A1.h;
const int N = transB == 0 ? B1.w : B1.h;
const int batch_size_d = std::max(A1.d, B1.d);
const int batch_size_c = std::max(A1.c, B1.c);
top_blob.create(N, M, batch_size_d, batch_size_c, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
for (int p = 0; p < batch_size_c; p++)
{
int Ap = A1.c == 1 ? 0 : p;
int Bp = B1.c == 1 ? 0 : p;
for (int q = 0; q < batch_size_d; q++)
{
int Ad = A1.d == 1 ? 0 : q;
int Bd = B1.d == 1 ? 0 : q;
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(Ap).depth(Ad);
_bottom_blobs[1] = B1.channel(Bp).depth(Bd);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob.channel(p).depth(q);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
}
}
else
{
NCNN_LOGE("impossible matmul %d %d", Adims, Bdims);
return -1;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1b8, %rsp # imm = 0x1B8
movq %rcx, %r15
movq %rdi, 0x98(%rsp)
movq (%rsi), %rbx
leaq 0x48(%rbx), %rax
movq %rax, 0x88(%rsp)
movq %rdx, 0x70(%rsp)
movq (%rdx), %r12
movl 0x28(%rbx), %ebp
movl 0x70(%rbx), %r14d
cmpl %r14d, %ebp
movl %r14d, %eax
cmovgl %ebp, %eax
movl %ebp, %edx
xorl $0x1, %edx
movl %r14d, %ecx
xorl $0x1, %ecx
movl %edx, %edi
orl %ecx, %edi
jne 0x3b90b3
movq %r15, %r13
leaq 0xa0(%rsp), %rdi
pushq $0x2
popq %rsi
leaq 0x10(%rsp), %rdx
callq 0x6fa72
movl 0x2c(%rbx), %edx
leaq 0x10(%rsp), %r14
pushq $0x1
popq %rbp
movq %r14, %rdi
movq %rbx, %r15
movq %rbx, %rsi
movl %ebp, %ecx
xorl %r8d, %r8d
callq 0x62e4e
movq 0xa0(%rsp), %rbx
movq 0x18(%rsp), %rax
cmpq %r14, %rbx
je 0x3b96d9
testq %rax, %rax
je 0x3b9082
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x3b967b
lock
decl (%rax)
jne 0x3b967b
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x3b9673
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b967b
movl %ebp, %edi
xorl $0x2, %edi
movl %r14d, %r8d
xorl $0x2, %r8d
movl %edi, %r9d
orl %r8d, %r9d
jne 0x3b90e6
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
movq 0x70(%rsp), %rdx
movq %r15, %rcx
callq *0x30(%rax)
jmp 0x3ba489
orl %r8d, %edx
jne 0x3b916f
leaq 0xa0(%rsp), %rdi
pushq $0x2
popq %rsi
leaq 0x10(%rsp), %rdx
callq 0x6fa72
movl 0x2c(%rbx), %edx
leaq 0x10(%rsp), %r14
pushq $0x1
popq %rcx
movq %r14, %rdi
movq %rbx, %rbp
movq %rbx, %rsi
xorl %r8d, %r8d
callq 0x62e4e
movq 0xa0(%rsp), %rbx
movq 0x18(%rsp), %rax
cmpq %r14, %rbx
je 0x3b9f23
testq %rax, %rax
je 0x3b913e
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x3b9ec5
lock
decl (%rax)
jne 0x3b9ec5
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x3b9ebd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b9ec5
orl %ecx, %edi
jne 0x3b91dc
movq %rbx, %r14
leaq 0xa0(%rsp), %rdi
pushq $0x2
popq %rsi
leaq 0x10(%rsp), %rdx
callq 0x6fa72
movq 0xa0(%rsp), %rbx
movq %r14, %rsi
cmpq %r14, %rbx
je 0x3ba28e
movq 0x8(%rsi), %rax
testq %rax, %rax
je 0x3b91ab
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x3ba236
lock
decl (%rax)
jne 0x3ba236
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x3ba22e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3ba236
movq 0x10(%rbx), %r8
cmpl $0x1, %ebp
sete %cl
cmpl $0x3, %r14d
setge %dl
andb %cl, %dl
cmpb $0x1, %dl
movq %r12, 0x148(%rsp)
movq %r15, 0x140(%rsp)
jne 0x3b98f5
movq 0x98(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rax
xorl %ecx, %ecx
cmpl $0x0, 0xd0(%rdx,%rax)
setne %cl
movl 0x74(%rbx,%rcx,4), %esi
movl 0x80(%rbx), %r13d
imull 0x7c(%rbx), %r13d
movq 0x8(%r15), %r9
leaq 0x10(%rsp), %r15
andq $0x0, 0x40(%r15)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%r15)
vmovups %xmm0, 0xc(%r15)
vmovaps %xmm0, 0x20(%r15)
vmovups %xmm0, 0x2c(%r15)
pushq $0x1
popq %rdx
movq %r15, %rdi
movl %esi, 0xe8(%rsp)
movl %r13d, %ecx
callq 0x63810
cmpq $0x0, (%r15)
je 0x3b9d8a
movslq 0x48(%rsp), %rax
imulq 0x50(%rsp), %rax
testq %rax, %rax
je 0x3b9d8a
movl 0x2c(%rbx), %edx
leaq 0xa0(%rsp), %rdi
pushq $0x1
popq %rcx
movq %rbx, %rsi
xorl %r8d, %r8d
callq 0x62e4e
movl 0x74(%rbx), %edx
movq %rbx, 0x138(%rsp)
movl 0x78(%rbx), %ecx
xorl %r15d, %r15d
leaq 0x150(%rsp), %rdi
movq 0x88(%rsp), %rsi
movl %r13d, %r8d
xorl %r9d, %r9d
callq 0x63020
movq 0x140(%rsp), %rbx
testl %r13d, %r13d
cmovlel %r15d, %r13d
leaq 0xf0(%rsp), %rbp
movl %r14d, 0x80(%rsp)
movq %r13, 0x88(%rsp)
cmpq %r15, %r13
je 0x3b9dff
movq %rbp, %rdi
pushq $0x2
popq %rsi
leaq 0x1a0(%rsp), %rdx
callq 0x6fa72
movq 0xf0(%rsp), %rbx
leaq 0xa0(%rsp), %rax
cmpq %rax, %rbx
je 0x3b93d9
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3b9341
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x3b936b
lock
decl (%rax)
jne 0x3b936b
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x3b9363
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b936b
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%rbx)
vmovups %xmm0, 0xc(%rbx)
andl $0x0, 0x38(%rbx)
vmovups %xmm0, 0x28(%rbx)
vmovaps 0xa0(%rsp), %xmm0
vmovups %xmm0, (%rbx)
movq 0xb0(%rsp), %rax
movq %rax, 0x10(%rbx)
movl 0xb8(%rsp), %eax
movl %eax, 0x18(%rbx)
movq 0xc0(%rsp), %rax
movq %rax, 0x20(%rbx)
vmovups 0xc8(%rsp), %xmm0
vmovups %xmm0, 0x28(%rbx)
movl 0xd8(%rsp), %eax
movl %eax, 0x38(%rbx)
movq 0xe0(%rsp), %rax
movq %rax, 0x40(%rbx)
movslq 0x17c(%rsp), %rdi
movslq 0x180(%rsp), %r12
movl 0x184(%rsp), %esi
movq 0x190(%rsp), %rbp
movq %r15, 0x70(%rsp)
imulq %r15, %rbp
movq 0x160(%rsp), %rbx
imulq %rbx, %rbp
addq 0x150(%rsp), %rbp
movl 0x168(%rsp), %r8d
movq 0x170(%rsp), %r9
movq %r12, %rcx
imulq %rdi, %rcx
movq %rbx, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rbx
movq %rax, %r13
movl 0x178(%rsp), %r14d
cmpl $0x4, %r14d
cmoveq %rcx, %r13
movq 0xf0(%rsp), %r15
movq 0x50(%r15), %rax
testq %rax, %rax
je 0x3b94bf
lock
decl (%rax)
jne 0x3b94bf
movq %r9, 0x78(%rsp)
movl %r8d, 0x8(%rsp)
movq %rdi, 0x60(%rsp)
movl %esi, 0x68(%rsp)
movq 0x48(%r15), %rsi
movq 0x68(%r15), %rdi
testq %rdi, %rdi
je 0x3b94a4
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x68(%rsp), %esi
movq 0x60(%rsp), %rdi
movl 0x8(%rsp), %r8d
movq 0x78(%rsp), %r9
jmp 0x3b94bf
movq %rsi, %rdi
callq 0x5f3e0
movl 0x68(%rsp), %esi
movq 0x60(%rsp), %rdi
movl 0x8(%rsp), %r8d
movq 0x78(%rsp), %r9
decl %r14d
movq %rbp, 0x48(%r15)
andq $0x0, 0x50(%r15)
movq %rbx, 0x58(%r15)
movl %r8d, 0x60(%r15)
movq %r9, 0x68(%r15)
movl %r14d, 0x70(%r15)
movl %edi, 0x74(%r15)
movl %r12d, 0x78(%r15)
movl $0x1, 0x7c(%r15)
movl %esi, 0x80(%r15)
movq %r13, 0x88(%r15)
leaq 0x1a0(%rsp), %rdi
pushq $0x1
popq %rsi
leaq 0x97(%rsp), %rdx
callq 0x6fa72
movslq 0x3c(%rsp), %rdi
movslq 0x40(%rsp), %r12
movl 0x44(%rsp), %esi
movq 0x50(%rsp), %rbx
imulq 0x70(%rsp), %rbx
movq 0x20(%rsp), %rbp
imulq %rbp, %rbx
addq 0x10(%rsp), %rbx
movl 0x28(%rsp), %r8d
movq 0x30(%rsp), %r9
movq %r12, %rcx
imulq %rdi, %rcx
movq %rbp, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rbp
movq %rax, %r13
movl 0x38(%rsp), %r14d
cmpl $0x4, %r14d
cmoveq %rcx, %r13
movq 0x1a0(%rsp), %r15
movq 0x8(%r15), %rax
testq %rax, %rax
je 0x3b95d8
lock
decl (%rax)
jne 0x3b95d8
movq %r9, 0x78(%rsp)
movl %r8d, 0x8(%rsp)
movq %rdi, 0x60(%rsp)
movl %esi, 0x68(%rsp)
movq (%r15), %rsi
movq 0x20(%r15), %rdi
testq %rdi, %rdi
je 0x3b95bd
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x68(%rsp), %esi
movq 0x60(%rsp), %rdi
movl 0x8(%rsp), %r8d
movq 0x78(%rsp), %r9
jmp 0x3b95d8
movq %rsi, %rdi
callq 0x5f3e0
movl 0x68(%rsp), %esi
movq 0x60(%rsp), %rdi
movl 0x8(%rsp), %r8d
movq 0x78(%rsp), %r9
movq %rbx, (%r15)
andq $0x0, 0x8(%r15)
decl %r14d
movq %rbp, 0x10(%r15)
movl %r8d, 0x18(%r15)
movq %r9, 0x20(%r15)
movl %r14d, 0x28(%r15)
movl %edi, 0x2c(%r15)
movl %r12d, 0x30(%r15)
movl $0x1, 0x34(%r15)
movl %esi, 0x38(%r15)
movq %r13, 0x40(%r15)
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0xf0(%rsp), %rbp
movq %rbp, %rsi
leaq 0x1a0(%rsp), %r14
movq %r14, %rdx
movq 0x140(%rsp), %rbx
movq %rbx, %rcx
callq *0x30(%rax)
movq %r14, %rdi
callq 0x6fac4
movq %rbp, %rdi
callq 0x6fac4
movq 0x70(%rsp), %r15
incq %r15
movq 0x148(%rsp), %r12
movl 0x80(%rsp), %r14d
movq 0x88(%rsp), %r13
jmp 0x3b92fc
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%rbx)
vmovups %xmm0, 0xc(%rbx)
andl $0x0, 0x38(%rbx)
vmovups %xmm0, 0x28(%rbx)
movq 0x18(%rsp), %rax
vmovaps 0x10(%rsp), %xmm0
vmovups %xmm0, (%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x10(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x18(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x20(%rbx)
vmovups 0x38(%rsp), %xmm0
vmovups %xmm0, 0x28(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x38(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x40(%rbx)
testq %rax, %rax
je 0x3b9702
lock
decl (%rax)
jne 0x3b9702
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b96fa
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b9702
movq %rsi, %rdi
callq 0x5f3e0
movq 0x98(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xd0(%rcx,%rax)
movl 0x74(%r15), %eax
movl %eax, %edx
cmovel %ebp, %edx
cmovel %eax, %ebp
leaq 0x10(%rsp), %r14
movq %r14, %rdi
movq 0x88(%rsp), %rsi
movl %ebp, %ecx
xorl %r8d, %r8d
callq 0x62e4e
movq 0xa0(%rsp), %rbx
leaq 0x48(%rbx), %rcx
movq 0x18(%rsp), %rax
cmpq %r14, %rcx
je 0x3b97d2
testq %rax, %rax
je 0x3b975d
lock
incl (%rax)
movq 0x50(%rbx), %rax
testq %rax, %rax
je 0x3b9788
lock
decl (%rax)
jne 0x3b9788
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x3b9780
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b9788
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
vmovaps 0x10(%rsp), %xmm0
vmovups %xmm0, 0x48(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x58(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x60(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x68(%rbx)
vmovups 0x38(%rsp), %xmm0
vmovups %xmm0, 0x70(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x80(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x88(%rbx)
testq %rax, %rax
je 0x3b97fb
lock
decl (%rax)
jne 0x3b97fb
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b97f3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b97fb
movq %rsi, %rdi
callq 0x5f3e0
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0xa0(%rsp), %rsi
movq 0x70(%rsp), %rdx
movq %r13, %rcx
callq *0x30(%rax)
movq 0x8(%r13), %rcx
leaq 0x10(%rsp), %r14
pushq $0x1
popq %rdx
movq %r14, %rdi
movq %r12, %rsi
callq 0x62c8a
movq 0x18(%rsp), %rax
cmpq %r14, %r12
je 0x3b98c5
testq %rax, %rax
je 0x3b984a
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3b9877
lock
decl (%rax)
jne 0x3b9877
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3b986f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b9877
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
vmovaps 0x10(%rsp), %xmm0
vmovups %xmm0, (%r12)
movq 0x20(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x28(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x30(%rsp), %rcx
movq %rcx, 0x20(%r12)
vmovups 0x38(%rsp), %xmm0
vmovups %xmm0, 0x28(%r12)
movl 0x48(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x50(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3ba47c
lock
decl (%rax)
jne 0x3ba47c
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3ba474
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3ba47c
cmpl $0x1, %r14d
sete %cl
cmpl $0x3, %ebp
setge %dl
andb %cl, %dl
cmpb $0x1, %dl
jne 0x3b9dbf
movl 0x30(%rbx), %edx
movl 0x38(%rbx), %ecx
imull 0x34(%rbx), %ecx
movq 0x8(%r15), %r9
leaq 0x10(%rsp), %r15
andq $0x0, 0x40(%r15)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%r15)
vmovups %xmm0, 0xc(%r15)
vmovaps %xmm0, 0x20(%r15)
vmovups %xmm0, 0x2c(%r15)
pushq $0x1
popq %r14
movq %r15, %rdi
movl %r14d, %esi
movl %edx, 0xe8(%rsp)
movq %rcx, %r13
callq 0x63810
cmpq $0x0, (%r15)
je 0x3b9e88
movslq 0x48(%rsp), %rax
imulq 0x50(%rsp), %rax
testq %rax, %rax
je 0x3b9e88
movl 0x2c(%rbx), %edx
movl 0x30(%rbx), %ecx
leaq 0xa0(%rsp), %rdi
movq %rbx, %rsi
movl %r13d, %r8d
xorl %r9d, %r9d
callq 0x63020
movq 0x98(%rsp), %rcx
movq (%rcx), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xd0(%rcx,%rax)
movq %rbx, 0x138(%rsp)
movl 0x74(%rbx), %eax
movl %eax, %edx
cmovel %r14d, %edx
cmovel %eax, %r14d
xorl %r15d, %r15d
leaq 0x150(%rsp), %rdi
movq 0x88(%rsp), %rsi
movl %r14d, %ecx
xorl %r8d, %r8d
callq 0x62e4e
movq 0x140(%rsp), %rbx
testl %r13d, %r13d
cmovlel %r15d, %r13d
movq %r13, 0x88(%rsp)
leaq 0xf0(%rsp), %r14
leaq 0x1a0(%rsp), %r13
cmpq %r15, 0x88(%rsp)
je 0x3ba12a
movq %r14, %rdi
pushq $0x2
popq %rsi
movq %r13, %rdx
callq 0x6fa72
movslq 0xcc(%rsp), %rdi
movslq 0xd0(%rsp), %r8
movl 0xd4(%rsp), %esi
movq 0xe0(%rsp), %r12
movq %r15, 0x70(%rsp)
imulq %r15, %r12
movq 0xb0(%rsp), %rbx
imulq %rbx, %r12
addq 0xa0(%rsp), %r12
movl 0xb8(%rsp), %r9d
movq 0xc0(%rsp), %r10
movq %r8, %rcx
imulq %rdi, %rcx
movq %rbx, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rbx
movq %rax, %r13
movl 0xc8(%rsp), %r14d
cmpl $0x4, %r14d
cmoveq %rcx, %r13
movq 0xf0(%rsp), %r15
movq 0x8(%r15), %rax
testq %rax, %rax
je 0x3b9b1d
lock
decl (%rax)
jne 0x3b9b1d
movq %r10, 0x78(%rsp)
movl %r9d, 0x8(%rsp)
movq %r8, 0x60(%rsp)
movq %rdi, 0x68(%rsp)
movl %esi, 0x80(%rsp)
movq (%r15), %rsi
movq 0x20(%r15), %rdi
testq %rdi, %rdi
je 0x3b9afa
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
jmp 0x3b9b1d
movq %rsi, %rdi
callq 0x5f3e0
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
decl %r14d
movq %r12, (%r15)
andq $0x0, 0x8(%r15)
movq %rbx, 0x10(%r15)
movl %r9d, 0x18(%r15)
movq %r10, 0x20(%r15)
movl %r14d, 0x28(%r15)
movl %edi, 0x2c(%r15)
movl %r8d, 0x30(%r15)
movl $0x1, 0x34(%r15)
movl %esi, 0x38(%r15)
movq %r13, 0x40(%r15)
movq 0xf0(%rsp), %rbx
leaq 0x48(%rbx), %rax
leaq 0x150(%rsp), %rcx
cmpq %rcx, %rax
movq 0x70(%rsp), %r14
leaq 0x1a0(%rsp), %r15
je 0x3b9c0f
movq 0x158(%rsp), %rax
testq %rax, %rax
je 0x3b9b8a
lock
incl (%rax)
movq 0x50(%rbx), %rax
testq %rax, %rax
je 0x3b9bb5
lock
decl (%rax)
jne 0x3b9bb5
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x3b9bad
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b9bb5
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0x150(%rsp), %xmm0
vmovups %xmm0, 0x48(%rbx)
movq 0x160(%rsp), %rax
movq %rax, 0x58(%rbx)
movl 0x168(%rsp), %eax
movl %eax, 0x60(%rbx)
movq 0x170(%rsp), %rax
movq %rax, 0x68(%rbx)
vmovups 0x178(%rsp), %xmm0
vmovups %xmm0, 0x70(%rbx)
movl 0x188(%rsp), %eax
movl %eax, 0x80(%rbx)
movq 0x190(%rsp), %rax
movq %rax, 0x88(%rbx)
movq %r15, %rdi
pushq $0x1
popq %rsi
leaq 0x97(%rsp), %rdx
callq 0x6fa72
movslq 0x3c(%rsp), %rdi
movslq 0x40(%rsp), %r8
movl 0x44(%rsp), %esi
movq 0x50(%rsp), %rbx
imulq %r14, %rbx
movq 0x20(%rsp), %r12
imulq %r12, %rbx
addq 0x10(%rsp), %rbx
movl 0x28(%rsp), %r9d
movq 0x30(%rsp), %r10
movq %r8, %rcx
imulq %rdi, %rcx
movq %r12, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r12
movq %rax, %r13
movl 0x38(%rsp), %r14d
cmpl $0x4, %r14d
cmoveq %rcx, %r13
movq 0x1a0(%rsp), %r15
movq 0x8(%r15), %rax
testq %rax, %rax
je 0x3b9cff
lock
decl (%rax)
jne 0x3b9cff
movq %r10, 0x78(%rsp)
movl %r9d, 0x8(%rsp)
movq %r8, 0x60(%rsp)
movq %rdi, 0x68(%rsp)
movl %esi, 0x80(%rsp)
movq (%r15), %rsi
movq 0x20(%r15), %rdi
testq %rdi, %rdi
je 0x3b9cdc
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
jmp 0x3b9cff
movq %rsi, %rdi
callq 0x5f3e0
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
movq %rbx, (%r15)
andq $0x0, 0x8(%r15)
decl %r14d
movq %r12, 0x10(%r15)
movl %r9d, 0x18(%r15)
movq %r10, 0x20(%r15)
movl %r14d, 0x28(%r15)
movl %edi, 0x2c(%r15)
movl %r8d, 0x30(%r15)
movl $0x1, 0x34(%r15)
movl %esi, 0x38(%r15)
movq %r13, 0x40(%r15)
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0xf0(%rsp), %r14
movq %r14, %rsi
leaq 0x1a0(%rsp), %r13
movq %r13, %rdx
movq 0x140(%rsp), %rbx
movq %rbx, %rcx
callq *0x30(%rax)
movq %r13, %rdi
callq 0x6fac4
movq %r14, %rdi
callq 0x6fac4
movq 0x70(%rsp), %r15
incq %r15
movq 0x148(%rsp), %r12
jmp 0x3b9a04
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3bb0c8
lock
decl (%rax)
jne 0x3bb0c8
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb0c0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb0c8
cmpl $0x4, %eax
je 0x3ba0f9
cmpl $0x3, %eax
jne 0x3bb0d0
cmpl $0x2, %ebp
jne 0x3ba49d
movl 0x2c(%rbx), %edx
movl 0x30(%rbx), %ecx
leaq 0x10(%rsp), %rdi
movq %r8, %r13
pushq $0x1
popq %r8
movq %rbx, %rsi
xorl %r9d, %r9d
callq 0x63020
movq %r13, %r8
jmp 0x3ba4e0
cmpl $0x3, %r14d
jne 0x3ba1af
movq 0x138(%rsp), %rax
movl 0x80(%rax), %ecx
imull 0x7c(%rax), %ecx
movq 0x8(%rbx), %r8
leaq 0xf0(%rsp), %r14
leaq 0x10(%rsp), %rsi
movq %r14, %rdi
movl 0xe8(%rsp), %edx
callq 0x62e4e
movq 0xf8(%rsp), %rax
cmpq %r14, %r12
je 0x3bb16a
testq %rax, %rax
je 0x3b9e54
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3bb104
lock
decl (%rax)
jne 0x3bb104
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3bb0fc
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb104
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3bb0c8
lock
decl (%rax)
jne 0x3bb0c8
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb0c0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb0c8
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%rbx)
vmovups %xmm0, 0xc(%rbx)
andl $0x0, 0x38(%rbx)
vmovups %xmm0, 0x28(%rbx)
movq 0x18(%rsp), %rax
vmovaps 0x10(%rsp), %xmm0
vmovups %xmm0, (%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x10(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x18(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x20(%rbx)
vmovups 0x38(%rsp), %xmm0
vmovups %xmm0, 0x28(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x38(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x40(%rbx)
testq %rax, %rax
je 0x3b9f4c
lock
decl (%rax)
jne 0x3b9f4c
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3b9f44
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b9f4c
movq %rsi, %rdi
callq 0x5f3e0
movq 0xa0(%rsp), %rbx
cmpq %rbp, %rbx
je 0x3b9ffd
movq 0x50(%rbp), %rax
testq %rax, %rax
je 0x3b9f69
lock
incl (%rax)
movq 0x50(%rbx), %rax
testq %rax, %rax
je 0x3b9f94
lock
decl (%rax)
jne 0x3b9f94
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x3b9f8c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3b9f94
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x88(%rbx)
leaq 0x48(%rbx), %rax
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x80(%rbx)
vmovups %xmm0, 0x70(%rbx)
vmovups 0x48(%rbp), %xmm0
vmovups %xmm0, 0x48(%rbx)
movq 0x58(%rbp), %rax
movq %rax, 0x58(%rbx)
movl 0x60(%rbp), %eax
movl %eax, 0x60(%rbx)
movq 0x68(%rbp), %rax
movq %rax, 0x68(%rbx)
vmovups 0x70(%rbp), %xmm0
vmovups %xmm0, 0x70(%rbx)
movl 0x80(%rbp), %eax
movl %eax, 0x80(%rbx)
movq 0x88(%rbp), %rax
movq %rax, 0x88(%rbx)
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0xa0(%rsp), %rsi
movq 0x70(%rsp), %rdx
movq %r15, %rcx
callq *0x30(%rax)
movl 0x2c(%r12), %edx
movq 0x8(%r15), %rcx
leaq 0x10(%rsp), %r14
movq %r14, %rdi
movq %r12, %rsi
callq 0x62c8a
movq 0x18(%rsp), %rax
cmpq %r14, %r12
je 0x3ba0c9
testq %rax, %rax
je 0x3ba04e
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3ba07b
lock
decl (%rax)
jne 0x3ba07b
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3ba073
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3ba07b
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
vmovaps 0x10(%rsp), %xmm0
vmovups %xmm0, (%r12)
movq 0x20(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x28(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x30(%rsp), %rcx
movq %rcx, 0x20(%r12)
vmovups 0x38(%rsp), %xmm0
vmovups %xmm0, 0x28(%r12)
movl 0x48(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x50(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3ba47c
lock
decl (%rax)
jne 0x3ba47c
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3ba474
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3ba47c
cmpl $0x3, %ebp
jne 0x3baa11
movq %r8, %r13
movl 0x2c(%rbx), %edx
movl 0x30(%rbx), %ecx
movl 0x38(%rbx), %r8d
andq $0x0, (%rsp)
leaq 0x10(%rsp), %rdi
pushq $0x1
popq %r9
movq %rbx, %rsi
callq 0x632f0
jmp 0x3baa57
cmpl $0x3, %ebp
jne 0x3baf6c
movq 0x138(%rsp), %rax
movl 0x38(%rax), %ecx
imull 0x34(%rax), %ecx
movq 0x8(%rbx), %r8
leaq 0xf0(%rsp), %r14
leaq 0x10(%rsp), %rsi
movq %r14, %rdi
movl 0xe8(%rsp), %edx
callq 0x62e4e
movq 0xf8(%rsp), %rax
cmpq %r14, %r12
je 0x3bb3c8
testq %rax, %rax
je 0x3ba17b
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3bb362
lock
decl (%rax)
jne 0x3bb362
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3bb35a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb362
movq 0x138(%rsp), %rax
movl 0x7c(%rax), %ecx
movl 0x80(%rax), %r8d
movq 0x8(%rbx), %r9
leaq 0xf0(%rsp), %r14
leaq 0x10(%rsp), %rsi
movq %r14, %rdi
movl 0xe8(%rsp), %edx
callq 0x63020
movq 0xf8(%rsp), %rax
cmpq %r14, %r12
je 0x3bb20e
testq %rax, %rax
je 0x3ba1fa
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3bb1a8
lock
decl (%rax)
jne 0x3bb1a8
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3bb1a0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb1a8
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%rbx)
vmovups %xmm0, 0xc(%rbx)
andl $0x0, 0x38(%rbx)
vmovups %xmm0, 0x28(%rbx)
movq %r14, %rsi
vmovups (%r14), %xmm0
vmovups %xmm0, (%rbx)
movq 0x10(%r14), %rax
movq %rax, 0x10(%rbx)
movl 0x18(%r14), %eax
movl %eax, 0x18(%rbx)
movq 0x20(%r14), %rax
movq %rax, 0x20(%rbx)
vmovups 0x28(%r14), %xmm0
vmovups %xmm0, 0x28(%rbx)
movl 0x38(%r14), %eax
movl %eax, 0x38(%rbx)
movq 0x40(%r14), %rax
movq %rax, 0x40(%rbx)
pushq $0x1
popq %rcx
movq 0x98(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xd0(%rdx,%rax)
movl 0x74(%rsi), %eax
movl %eax, %edx
cmovel %ecx, %edx
cmovel %eax, %ecx
leaq 0x10(%rsp), %r14
movq %r14, %rdi
movq 0x88(%rsp), %rsi
xorl %r8d, %r8d
callq 0x62e4e
movq 0xa0(%rsp), %rbx
leaq 0x48(%rbx), %rcx
movq 0x18(%rsp), %rax
cmpq %r14, %rcx
je 0x3ba35e
testq %rax, %rax
je 0x3ba2e9
lock
incl (%rax)
movq 0x50(%rbx), %rax
testq %rax, %rax
je 0x3ba314
lock
decl (%rax)
jne 0x3ba314
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x3ba30c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3ba314
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
vmovaps 0x10(%rsp), %xmm0
vmovups %xmm0, 0x48(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x58(%rbx)
movl 0x28(%rsp), %ecx
movl %ecx, 0x60(%rbx)
movq 0x30(%rsp), %rcx
movq %rcx, 0x68(%rbx)
vmovups 0x38(%rsp), %xmm0
vmovups %xmm0, 0x70(%rbx)
movl 0x48(%rsp), %ecx
movl %ecx, 0x80(%rbx)
movq 0x50(%rsp), %rcx
movq %rcx, 0x88(%rbx)
testq %rax, %rax
je 0x3ba387
lock
decl (%rax)
jne 0x3ba387
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3ba37f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3ba387
movq %rsi, %rdi
callq 0x5f3e0
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0xa0(%rsp), %rsi
movq 0x70(%rsp), %rdx
movq %r15, %rcx
callq *0x30(%rax)
movl 0x30(%r12), %edx
movq 0x8(%r15), %rcx
leaq 0x10(%rsp), %r14
movq %r14, %rdi
movq %r12, %rsi
callq 0x62c8a
movq 0x18(%rsp), %rax
cmpq %r14, %r12
je 0x3ba453
testq %rax, %rax
je 0x3ba3d8
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3ba405
lock
decl (%rax)
jne 0x3ba405
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3ba3fd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3ba405
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
vmovaps 0x10(%rsp), %xmm0
vmovups %xmm0, (%r12)
movq 0x20(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x28(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x30(%rsp), %rcx
movq %rcx, 0x20(%r12)
vmovups 0x38(%rsp), %xmm0
vmovups %xmm0, 0x28(%r12)
movl 0x48(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x50(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3ba47c
lock
decl (%rax)
jne 0x3ba47c
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3ba474
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3ba47c
movq %rsi, %rdi
callq 0x5f3e0
leaq 0xa0(%rsp), %rdi
callq 0x6fac4
xorl %eax, %eax
addq $0x1b8, %rsp # imm = 0x1B8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq 0x8(%rbx), %rax
vmovups (%rbx), %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq %r8, 0x20(%rsp)
movl 0x18(%rbx), %ecx
movl %ecx, 0x28(%rsp)
movq 0x20(%rbx), %rcx
movq %rcx, 0x30(%rsp)
movl %ebp, 0x38(%rsp)
vmovups 0x2c(%rbx), %xmm0
vmovups %xmm0, 0x3c(%rsp)
movq 0x40(%rbx), %rcx
movq %rcx, 0x50(%rsp)
testq %rax, %rax
je 0x3ba4e0
lock
incl (%rax)
cmpl $0x2, %r14d
jne 0x3ba510
movl 0x74(%rbx), %edx
movl 0x78(%rbx), %ecx
leaq 0xa0(%rsp), %rdi
movq %r8, %rbx
pushq $0x1
popq %r8
movq 0x88(%rsp), %rsi
xorl %r9d, %r9d
callq 0x63020
movq %rbx, %r8
jmp 0x3ba576
movq 0x50(%rbx), %rax
vmovups 0x48(%rbx), %xmm0
vmovaps %xmm0, 0xa0(%rsp)
movq 0x58(%rbx), %rcx
movq %rcx, 0xb0(%rsp)
movl 0x60(%rbx), %ecx
movl %ecx, 0xb8(%rsp)
movq 0x68(%rbx), %rcx
movq %rcx, 0xc0(%rsp)
vmovups 0x70(%rbx), %xmm0
vmovups %xmm0, 0xc8(%rsp)
movl 0x80(%rbx), %ecx
movl %ecx, 0xd8(%rsp)
movq 0x88(%rbx), %rcx
movq %rcx, 0xe0(%rsp)
testq %rax, %rax
je 0x3ba576
lock
incl (%rax)
movq 0x98(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rax
xorl %ecx, %ecx
cmpl $0x0, 0xd0(%rdx,%rax)
setne %cl
movl 0xcc(%rsp,%rcx,4), %esi
movl 0x40(%rsp), %edx
movl 0x48(%rsp), %eax
movl 0xd8(%rsp), %ecx
cmpl %ecx, %eax
cmovgl %eax, %ecx
movq 0x8(%r15), %r9
movq %r12, %rdi
movq %rcx, %rbx
callq 0x63810
cmpq $0x0, (%r12)
je 0x3ba9d3
movslq 0x38(%r12), %rax
imulq 0x40(%r12), %rax
testq %rax, %rax
je 0x3ba9d3
xorl %eax, %eax
testl %ebx, %ebx
cmovlel %eax, %ebx
movq %rbx, 0x88(%rsp)
leaq 0x150(%rsp), %r14
leaq 0xf0(%rsp), %r15
xorl %r13d, %r13d
cmpq %r13, 0x88(%rsp)
je 0x3bafe8
movl 0x48(%rsp), %ebp
movl 0xd8(%rsp), %ebx
movq %r14, %rdi
pushq $0x2
popq %rsi
movq %r15, %rdx
callq 0x6fa72
cmpl $0x1, %ebp
movslq 0x3c(%rsp), %rdi
movslq 0x40(%rsp), %r8
movl 0x44(%rsp), %esi
movq %r13, 0x70(%rsp)
movl $0x0, %eax
cmoveq %rax, %r13
imulq 0x50(%rsp), %r13
movq 0x20(%rsp), %r14
imulq %r14, %r13
addq 0x10(%rsp), %r13
movl 0x28(%rsp), %r9d
movq 0x30(%rsp), %r10
movq %r8, %rcx
imulq %rdi, %rcx
movq %r14, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r14
movq %rax, %r15
movl 0x38(%rsp), %r12d
cmpl $0x4, %r12d
cmoveq %rcx, %r15
movq 0x150(%rsp), %rbp
movq 0x8(%rbp), %rax
testq %rax, %rax
je 0x3ba710
lock
decl (%rax)
jne 0x3ba710
movq %r10, 0x78(%rsp)
movl %r9d, 0x8(%rsp)
movq %r8, 0x60(%rsp)
movq %rdi, 0x68(%rsp)
movl %esi, 0x80(%rsp)
movq (%rbp), %rsi
movq 0x20(%rbp), %rdi
testq %rdi, %rdi
je 0x3ba6ed
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
jmp 0x3ba710
movq %rsi, %rdi
callq 0x5f3e0
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movq 0x60(%rsp), %r8
movl 0x8(%rsp), %r9d
movq 0x78(%rsp), %r10
andq $0x0, 0x8(%rbp)
decl %r12d
cmpl $0x1, %ebx
movq %r13, (%rbp)
movq %r14, 0x10(%rbp)
movl %r9d, 0x18(%rbp)
movq %r10, 0x20(%rbp)
movl %r12d, 0x28(%rbp)
movl %edi, 0x2c(%rbp)
movl %r8d, 0x30(%rbp)
movl $0x1, 0x34(%rbp)
movl %esi, 0x38(%rbp)
movq %r15, 0x40(%rbp)
movslq 0xcc(%rsp), %rdi
movslq 0xd0(%rsp), %r12
movl 0xd4(%rsp), %esi
movq 0xb0(%rsp), %rbp
movl 0xb8(%rsp), %r8d
movq 0xc0(%rsp), %r9
movl 0xc8(%rsp), %ebx
movq 0x70(%rsp), %r13
movl $0x0, %eax
cmoveq %rax, %r13
imulq 0xe0(%rsp), %r13
imulq %rbp, %r13
addq 0xa0(%rsp), %r13
movq %r12, %rcx
imulq %rdi, %rcx
movq %rbp, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rbp
movq %rax, %r15
cmpl $0x4, %ebx
cmoveq %rcx, %r15
movq 0x150(%rsp), %r14
movq 0x50(%r14), %rax
testq %rax, %rax
je 0x3ba837
lock
decl (%rax)
jne 0x3ba837
movq %r9, 0x8(%rsp)
movl %r8d, 0x60(%rsp)
movq %rdi, 0x68(%rsp)
movl %esi, 0x80(%rsp)
movq 0x48(%r14), %rsi
movq 0x68(%r14), %rdi
testq %rdi, %rdi
je 0x3ba819
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movl 0x60(%rsp), %r8d
movq 0x8(%rsp), %r9
jmp 0x3ba837
movq %rsi, %rdi
callq 0x5f3e0
movl 0x80(%rsp), %esi
movq 0x68(%rsp), %rdi
movl 0x60(%rsp), %r8d
movq 0x8(%rsp), %r9
decl %ebx
movq %r13, 0x48(%r14)
andq $0x0, 0x50(%r14)
movq %rbp, 0x58(%r14)
movl %r8d, 0x60(%r14)
movq %r9, 0x68(%r14)
movl %ebx, 0x70(%r14)
movl %edi, 0x74(%r14)
movl %r12d, 0x78(%r14)
movl $0x1, 0x7c(%r14)
movl %esi, 0x80(%r14)
movq %r15, 0x88(%r14)
leaq 0xf0(%rsp), %rdi
pushq $0x1
popq %rsi
leaq 0x1a0(%rsp), %rdx
callq 0x6fa72
movq 0x148(%rsp), %rsi
movslq 0x2c(%rsi), %r8
movslq 0x30(%rsi), %r12
movl 0x34(%rsi), %edi
movq 0x40(%rsi), %rbp
imulq 0x70(%rsp), %rbp
movq 0x10(%rsi), %r13
imulq %r13, %rbp
addq (%rsi), %rbp
movl 0x18(%rsi), %r9d
movq 0x20(%rsi), %r10
movq %r12, %rcx
imulq %r8, %rcx
movq %rcx, %rax
imulq %r13, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r13
movq %rax, %r15
movl 0x28(%rsi), %ebx
cmpl $0x4, %ebx
cmoveq %rcx, %r15
movq 0xf0(%rsp), %r14
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x3ba954
lock
decl (%rax)
jne 0x3ba954
movq %r10, 0x8(%rsp)
movl %r9d, 0x60(%rsp)
movq %r8, 0x68(%rsp)
movl %edi, 0x80(%rsp)
movq (%r14), %rsi
movq 0x20(%r14), %rdi
testq %rdi, %rdi
je 0x3ba936
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x80(%rsp), %edi
movq 0x68(%rsp), %r8
movl 0x60(%rsp), %r9d
movq 0x8(%rsp), %r10
jmp 0x3ba954
movq %rsi, %rdi
callq 0x5f3e0
movl 0x80(%rsp), %edi
movq 0x68(%rsp), %r8
movl 0x60(%rsp), %r9d
movq 0x8(%rsp), %r10
movq %rbp, (%r14)
andq $0x0, 0x8(%r14)
decl %ebx
movq %r13, 0x10(%r14)
movl %r9d, 0x18(%r14)
movq %r10, 0x20(%r14)
movl %ebx, 0x28(%r14)
movl %r8d, 0x2c(%r14)
movl %r12d, 0x30(%r14)
movl $0x1, 0x34(%r14)
movl %edi, 0x38(%r14)
movq %r15, 0x40(%r14)
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0x150(%rsp), %r14
movq %r14, %rsi
leaq 0xf0(%rsp), %r15
movq %r15, %rdx
movq 0x140(%rsp), %rcx
callq *0x30(%rax)
movq %r15, %rdi
callq 0x6fac4
movq %r14, %rdi
callq 0x6fac4
movq 0x70(%rsp), %r13
incq %r13
jmp 0x3ba5fd
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3bb06c
lock
decl (%rax)
jne 0x3bb06c
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb064
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb06c
movq 0x8(%rbx), %rax
vmovups (%rbx), %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq %r8, %r13
movq %r8, 0x20(%rsp)
movl 0x18(%rbx), %ecx
movl %ecx, 0x28(%rsp)
movq 0x20(%rbx), %rcx
movq %rcx, 0x30(%rsp)
movl %ebp, 0x38(%rsp)
vmovups 0x2c(%rbx), %xmm0
vmovups %xmm0, 0x3c(%rsp)
movq 0x40(%rbx), %rcx
movq %rcx, 0x50(%rsp)
testq %rax, %rax
je 0x3baa57
lock
incl (%rax)
cmpl $0x3, %r14d
jne 0x3baa8a
movl 0x74(%rbx), %edx
movl 0x78(%rbx), %ecx
movl 0x80(%rbx), %r8d
andq $0x0, (%rsp)
leaq 0xa0(%rsp), %rdi
pushq $0x1
popq %r9
movq 0x88(%rsp), %rsi
callq 0x632f0
jmp 0x3baaf0
movq 0x50(%rbx), %rax
vmovups 0x48(%rbx), %xmm0
vmovaps %xmm0, 0xa0(%rsp)
movq 0x58(%rbx), %rcx
movq %rcx, 0xb0(%rsp)
movl 0x60(%rbx), %ecx
movl %ecx, 0xb8(%rsp)
movq 0x68(%rbx), %rcx
movq %rcx, 0xc0(%rsp)
vmovups 0x70(%rbx), %xmm0
vmovups %xmm0, 0xc8(%rsp)
movl 0x80(%rbx), %ecx
movl %ecx, 0xd8(%rsp)
movq 0x88(%rbx), %rcx
movq %rcx, 0xe0(%rsp)
testq %rax, %rax
je 0x3baaf0
lock
incl (%rax)
movq 0x98(%rsp), %rdx
movq (%rdx), %rax
movq -0x18(%rax), %rax
xorl %ecx, %ecx
cmpl $0x0, 0xd0(%rdx,%rax)
setne %cl
movl 0xcc(%rsp,%rcx,4), %esi
movl 0x40(%rsp), %edx
movl 0x44(%rsp), %eax
movl 0xd4(%rsp), %ecx
movl 0xd8(%rsp), %r8d
cmpl %ecx, %eax
cmovgl %eax, %ecx
movl 0x48(%rsp), %eax
cmpl %r8d, %eax
cmovgl %eax, %r8d
movq 0x8(%r15), %rax
movq %rax, (%rsp)
movq %r12, %rdi
movq %rcx, %rbx
movq %r8, %r14
movq %r13, %r9
callq 0x6393c
cmpq $0x0, (%r12)
je 0x3baf2e
movslq 0x38(%r12), %rax
imulq 0x40(%r12), %rax
testq %rax, %rax
je 0x3baf2e
xorl %eax, %eax
testl %ebx, %ebx
cmovlel %eax, %ebx
movq %rbx, 0x80(%rsp)
testl %r14d, %r14d
cmovlel %eax, %r14d
movq %r14, 0x138(%rsp)
leaq 0x150(%rsp), %r15
leaq 0xf0(%rsp), %r12
xorl %eax, %eax
cmpq 0x138(%rsp), %rax
je 0x3bb026
cmpl $0x1, 0x48(%rsp)
movq %rax, 0x88(%rsp)
movl %eax, %ecx
movq %rcx, %rdx
movl $0x0, %eax
cmoveq %rax, %rdx
movq %rdx, 0x68(%rsp)
cmpl $0x1, 0xd8(%rsp)
cmoveq %rax, %rcx
movq %rcx, 0x60(%rsp)
xorl %ebx, %ebx
cmpq %rbx, 0x80(%rsp)
je 0x3baf1e
movq %rbx, 0x70(%rsp)
movl 0x44(%rsp), %r14d
movl 0xd4(%rsp), %ebx
movq %r15, %rdi
pushq $0x2
popq %rsi
movq %r12, %rdx
callq 0x6fa72
movslq 0x3c(%rsp), %rcx
movslq 0x40(%rsp), %rbp
movq 0x50(%rsp), %r13
imulq 0x68(%rsp), %r13
movq 0x20(%rsp), %r12
imulq %r12, %r13
addq 0x10(%rsp), %r13
cmpl $0x1, %r14d
movl 0x28(%rsp), %esi
movq 0x70(%rsp), %rdx
movq %rdx, %r15
movl $0x0, %eax
cmoveq %rax, %r15
movq 0x30(%rsp), %rdi
movq 0x150(%rsp), %r14
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x3bacc0
lock
decl (%rax)
jne 0x3bacc0
movq %rdi, 0xe8(%rsp)
movl %esi, 0x78(%rsp)
movq %rcx, 0x8(%rsp)
movq (%r14), %rsi
movq 0x20(%r14), %rdi
testq %rdi, %rdi
je 0x3baca2
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x70(%rsp), %rdx
movq 0x8(%rsp), %rcx
movl 0x78(%rsp), %esi
movq 0xe8(%rsp), %rdi
jmp 0x3bacc0
movq %rsi, %rdi
callq 0x5f3e0
movq 0x70(%rsp), %rdx
movq 0x8(%rsp), %rcx
movl 0x78(%rsp), %esi
movq 0xe8(%rsp), %rdi
movq %rbp, %rax
imulq %rcx, %rax
imulq %rax, %r15
imulq %r12, %r15
addq %r15, %r13
movq %r13, (%r14)
andq $0x0, 0x8(%r14)
movq %r12, 0x10(%r14)
movl %esi, 0x18(%r14)
movq %rdi, 0x20(%r14)
movl $0x2, 0x28(%r14)
movl %ecx, 0x2c(%r14)
movl %ebp, 0x30(%r14)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x34(%r14)
movq %rax, 0x40(%r14)
movslq 0xcc(%rsp), %r15
movslq 0xd0(%rsp), %rbp
movq 0xb0(%rsp), %r13
movq 0xe0(%rsp), %r12
imulq 0x60(%rsp), %r12
imulq %r13, %r12
addq 0xa0(%rsp), %r12
cmpl $0x1, %ebx
movl 0xb8(%rsp), %esi
movq 0xc0(%rsp), %rdi
movq 0x150(%rsp), %rbx
movq 0x50(%rbx), %rax
movq %rdx, %r14
movl $0x0, %ecx
cmoveq %rcx, %r14
testq %rax, %rax
je 0x3bada6
lock
decl (%rax)
jne 0x3bada6
movq %rdi, 0x78(%rsp)
movl %esi, 0x8(%rsp)
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x3bad95
movq (%rdi), %rax
callq *0x18(%rax)
movl 0x8(%rsp), %esi
movq 0x78(%rsp), %rdi
jmp 0x3bada6
movq %rsi, %rdi
callq 0x5f3e0
movl 0x8(%rsp), %esi
movq 0x78(%rsp), %rdi
movq %rbp, %rax
imulq %r15, %rax
imulq %rax, %r14
imulq %r13, %r14
addq %r14, %r12
movq %r12, 0x48(%rbx)
andq $0x0, 0x50(%rbx)
movq %r13, 0x58(%rbx)
movl %esi, 0x60(%rbx)
movq %rdi, 0x68(%rbx)
movl $0x2, 0x70(%rbx)
movl %r15d, 0x74(%rbx)
movl %ebp, 0x78(%rbx)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x7c(%rbx)
movq %rax, 0x88(%rbx)
leaq 0xf0(%rsp), %rdi
pushq $0x1
popq %rsi
leaq 0x1a0(%rsp), %rdx
callq 0x6fa72
movq 0x148(%rsp), %rax
movslq 0x2c(%rax), %rbx
movslq 0x30(%rax), %r15
movq 0x40(%rax), %r12
imulq 0x88(%rsp), %r12
movq 0x10(%rax), %r14
imulq %r14, %r12
addq (%rax), %r12
movl 0x18(%rax), %ebp
movq 0x20(%rax), %rsi
movq 0xf0(%rsp), %r13
movq 0x8(%r13), %rax
testq %rax, %rax
movq 0x70(%rsp), %rdx
je 0x3bae87
lock
decl (%rax)
jne 0x3bae87
movq %rsi, 0x8(%rsp)
movq (%r13), %rsi
movq 0x20(%r13), %rdi
testq %rdi, %rdi
je 0x3bae75
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x70(%rsp), %rdx
movq 0x8(%rsp), %rsi
jmp 0x3bae87
movq %rsi, %rdi
callq 0x5f3e0
movq 0x70(%rsp), %rdx
movq 0x8(%rsp), %rsi
movq %r15, %rax
imulq %rbx, %rax
movq %rdx, %rcx
imulq %r14, %rcx
imulq %rax, %rcx
addq %rcx, %r12
movq %r12, (%r13)
andq $0x0, 0x8(%r13)
movq %r14, 0x10(%r13)
movl %ebp, 0x18(%r13)
movq %rsi, 0x20(%r13)
movl $0x2, 0x28(%r13)
movl %ebx, 0x2c(%r13)
movl %r15d, 0x30(%r13)
movabsq $0x100000001, %rcx # imm = 0x100000001
movq %rcx, 0x34(%r13)
movq %rax, 0x40(%r13)
movq 0x98(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
leaq 0x150(%rsp), %r15
movq %r15, %rsi
leaq 0xf0(%rsp), %r12
movq %rdx, %rbx
movq %r12, %rdx
movq 0x140(%rsp), %rcx
callq *0x30(%rax)
movq %r12, %rdi
callq 0x6fac4
movq %r15, %rdi
callq 0x6fac4
incq %rbx
jmp 0x3babe3
movq 0x88(%rsp), %rax
incq %rax
jmp 0x3baba2
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3bb09a
lock
decl (%rax)
jne 0x3bb09a
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb092
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb09a
movq 0x138(%rsp), %rax
movl 0x34(%rax), %ecx
movl 0x38(%rax), %r8d
movq 0x8(%rbx), %r9
leaq 0xf0(%rsp), %r14
leaq 0x10(%rsp), %rsi
movq %r14, %rdi
movl 0xe8(%rsp), %edx
callq 0x63020
movq 0xf8(%rsp), %rax
cmpq %r14, %r12
je 0x3bb46c
testq %rax, %rax
je 0x3bafb4
lock
incl (%rax)
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3bb406
lock
decl (%rax)
jne 0x3bb406
movq (%r12), %rsi
movq 0x20(%r12), %rdi
testq %rdi, %rdi
je 0x3bb3fe
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb406
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3bb2e8
lock
decl (%rax)
jne 0x3bb2e8
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb2e0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb2e8
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3bb325
lock
decl (%rax)
jne 0x3bb325
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb31d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb325
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3bb0c8
lock
decl (%rax)
jne 0x3bb0c8
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb0c0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb0c8
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3bb0c8
lock
decl (%rax)
jne 0x3bb0c8
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb0c0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb0c8
movq %rsi, %rdi
callq 0x5f3e0
pushq $-0x64
popq %rax
jmp 0x3ba48b
movq 0xd4f01(%rip), %rbx # 0x48ffd8
movq (%rbx), %rdi
leaq 0x413f5(%rip), %rsi # 0x3fc4d6
movl %ebp, %edx
movl %r14d, %ecx
xorl %eax, %eax
callq 0x5f150
movq (%rbx), %rsi
pushq $0xa
popq %rdi
callq 0x5f1c0
pushq $-0x1
jmp 0x3bb0ca
movq %rsi, %rdi
callq 0x5f3e0
movq 0xf8(%rsp), %rax
vmovaps 0xf0(%rsp), %xmm0
vmovups %xmm0, (%r12)
movq 0x100(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x108(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x110(%rsp), %rcx
movq %rcx, 0x20(%r12)
vmovups 0x118(%rsp), %xmm0
vmovups %xmm0, 0x28(%r12)
movl 0x128(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x130(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3bb23d
lock
decl (%rax)
jne 0x3bb23d
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb235
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb23d
movq %rsi, %rdi
callq 0x5f3e0
movq 0xf8(%rsp), %rax
vmovaps 0xf0(%rsp), %xmm0
vmovups %xmm0, (%r12)
movq 0x100(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x108(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x110(%rsp), %rcx
movq %rcx, 0x20(%r12)
vmovups 0x118(%rsp), %xmm0
vmovups %xmm0, 0x28(%r12)
movl 0x128(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x130(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3bb23d
lock
decl (%rax)
jne 0x3bb23d
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb235
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb23d
movq %rsi, %rdi
callq 0x5f3e0
movq 0x158(%rsp), %rax
testq %rax, %rax
je 0x3bb274
lock
decl (%rax)
jne 0x3bb274
movq 0x150(%rsp), %rsi
movq 0x170(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb26c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb274
movq %rsi, %rdi
callq 0x5f3e0
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3bb2ab
lock
decl (%rax)
jne 0x3bb2ab
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb2a3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb2ab
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3ba489
lock
decl (%rax)
jne 0x3ba489
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb53a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3ba489
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3ba489
lock
decl (%rax)
jne 0x3ba489
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb53a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3ba489
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3ba489
lock
decl (%rax)
jne 0x3ba489
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb53a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3ba489
movq %rsi, %rdi
callq 0x5f3e0
movq 0xf8(%rsp), %rax
vmovaps 0xf0(%rsp), %xmm0
vmovups %xmm0, (%r12)
movq 0x100(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x108(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x110(%rsp), %rcx
movq %rcx, 0x20(%r12)
vmovups 0x118(%rsp), %xmm0
vmovups %xmm0, 0x28(%r12)
movl 0x128(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x130(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3bb49b
lock
decl (%rax)
jne 0x3bb49b
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb493
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb49b
movq %rsi, %rdi
callq 0x5f3e0
movq 0xf8(%rsp), %rax
vmovaps 0xf0(%rsp), %xmm0
vmovups %xmm0, (%r12)
movq 0x100(%rsp), %rcx
movq %rcx, 0x10(%r12)
movl 0x108(%rsp), %ecx
movl %ecx, 0x18(%r12)
movq 0x110(%rsp), %rcx
movq %rcx, 0x20(%r12)
vmovups 0x118(%rsp), %xmm0
vmovups %xmm0, 0x28(%r12)
movl 0x128(%rsp), %ecx
movl %ecx, 0x38(%r12)
movq 0x130(%rsp), %rcx
movq %rcx, 0x40(%r12)
testq %rax, %rax
je 0x3bb49b
lock
decl (%rax)
jne 0x3bb49b
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb493
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb49b
movq %rsi, %rdi
callq 0x5f3e0
movq 0x158(%rsp), %rax
testq %rax, %rax
je 0x3bb4d2
lock
decl (%rax)
jne 0x3bb4d2
movq 0x150(%rsp), %rsi
movq 0x170(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb4ca
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb4d2
movq %rsi, %rdi
callq 0x5f3e0
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3bb509
lock
decl (%rax)
jne 0x3bb509
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb501
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bb509
movq %rsi, %rdi
callq 0x5f3e0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3ba489
lock
decl (%rax)
jne 0x3ba489
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb53a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3ba489
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3ba489
movq %rax, %rbx
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3bba03
lock
decl (%rax)
jne 0x3bba03
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb5bb
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bba03
jmp 0x3bbc4d
movq %rax, %rbx
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3bba03
lock
decl (%rax)
jne 0x3bba03
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
jne 0x3bb5c8
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3bba03
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bba03
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
movq %rax, %rbx
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3bbadc
lock
decl (%rax)
jne 0x3bbadc
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb66a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bbadc
jmp 0x3bbc4d
movq %rax, %rbx
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3bbadc
lock
decl (%rax)
jne 0x3bbadc
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
jne 0x3bb677
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3bbadc
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bbadc
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bb9dc
jmp 0x3bb9dc
jmp 0x3bbc4d
movq %rax, %rbx
jmp 0x3bbc15
movq %rax, %rbx
jmp 0x3bb97a
jmp 0x3bbab5
jmp 0x3bbab5
movq %rax, %rbx
jmp 0x3bba3a
movq %rax, %rbx
jmp 0x3bba71
jmp 0x3bbbb7
jmp 0x3bb91c
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3bbb97
lock
decl (%rax)
jne 0x3bbb97
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb8e1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bbb97
jmp 0x3bbc4d
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3bbb97
lock
decl (%rax)
jne 0x3bbb97
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb8e1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bbb97
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
movq %rax, %rbx
jmp 0x3bbb13
movq %rax, %rbx
jmp 0x3bbb4a
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3bbb97
lock
decl (%rax)
jne 0x3bbb97
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb8e1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bbb97
jmp 0x3bbc4d
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3bbb97
lock
decl (%rax)
jne 0x3bbb97
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb8e1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bbb97
jmp 0x3bbc4d
jmp 0x3bb933
jmp 0x3bb933
jmp 0x3bb921
jmp 0x3bb9f3
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bb9f3
jmp 0x3bb9e1
jmp 0x3bbacc
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3bbb97
lock
decl (%rax)
jne 0x3bbb97
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb8e1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bbb97
jmp 0x3bbc4d
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3bbb97
lock
decl (%rax)
jne 0x3bbb97
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bb8e1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bbb97
jmp 0x3bbc4d
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3bbb97
lock
decl (%rax)
jne 0x3bbb97
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x3bb8ee
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3bbb97
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bbb97
jmp 0x3bbc4d
jmp 0x3bbb94
jmp 0x3bbb94
jmp 0x3bbba9
jmp 0x3bbb94
jmp 0x3bbacc
jmp 0x3bbaba
movq %rax, %rbx
jmp 0x3bb943
movq %rax, %rbx
leaq 0xf0(%rsp), %rdi
callq 0x6fac4
jmp 0x3bb936
movq %rax, %rbx
leaq 0x150(%rsp), %rdi
callq 0x6fac4
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3bb97a
lock
decl (%rax)
jne 0x3bb97a
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
jne 0x3bb974
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3bb97a
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3bbc43
lock
decl (%rax)
jne 0x3bbc43
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bbc33
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bbc43
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbb94
jmp 0x3bbb94
jmp 0x3bbba9
jmp 0x3bbb94
movq %rax, %rbx
jmp 0x3bba03
movq %rax, %rbx
leaq 0x1a0(%rsp), %rdi
callq 0x6fac4
jmp 0x3bb9f6
movq %rax, %rbx
leaq 0xf0(%rsp), %rdi
callq 0x6fac4
movq 0x158(%rsp), %rax
testq %rax, %rax
je 0x3bba3a
lock
decl (%rax)
jne 0x3bba3a
movq 0x150(%rsp), %rsi
movq 0x170(%rsp), %rdi
testq %rdi, %rdi
jne 0x3bba34
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3bba3a
movq (%rdi), %rax
callq *0x18(%rax)
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3bba71
lock
decl (%rax)
jne 0x3bba71
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
jne 0x3bba6b
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3bba71
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3bbc43
lock
decl (%rax)
jne 0x3bbc43
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bbc33
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bbc43
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
movq %rax, %rbx
jmp 0x3bbadc
movq %rax, %rbx
leaq 0x1a0(%rsp), %rdi
callq 0x6fac4
jmp 0x3bbacf
movq %rax, %rbx
leaq 0xf0(%rsp), %rdi
callq 0x6fac4
movq 0x158(%rsp), %rax
testq %rax, %rax
je 0x3bbb13
lock
decl (%rax)
jne 0x3bbb13
movq 0x150(%rsp), %rsi
movq 0x170(%rsp), %rdi
testq %rdi, %rdi
jne 0x3bbb0d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3bbb13
movq (%rdi), %rax
callq *0x18(%rax)
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3bbb4a
lock
decl (%rax)
jne 0x3bbb4a
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
jne 0x3bbb44
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3bbb4a
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3bbc43
lock
decl (%rax)
jne 0x3bbc43
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x3bbc33
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bbc43
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbc4d
jmp 0x3bbb94
jmp 0x3bbb94
jmp 0x3bbb94
movq %rax, %rbx
leaq 0xa0(%rsp), %rdi
callq 0x6fac4
jmp 0x3bbc43
movq %rax, %rbx
jmp 0x3bbc43
jmp 0x3bbbce
jmp 0x3bbbce
jmp 0x3bbbbc
movq %rax, %rbx
jmp 0x3bbbde
movq %rax, %rbx
leaq 0xf0(%rsp), %rdi
callq 0x6fac4
jmp 0x3bbbd1
movq %rax, %rbx
leaq 0x150(%rsp), %rdi
callq 0x6fac4
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x3bbc15
lock
decl (%rax)
jne 0x3bbc15
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
jne 0x3bbc0f
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3bbc15
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x3bbc43
lock
decl (%rax)
jne 0x3bbc43
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x3bbc3d
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3bbc43
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x3bbc4d
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/matmul_x86_avx.cpp |
virtual thunk to ncnn::MatMul_x86_avx::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const | int MatMul_x86_avx::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& A = bottom_blobs[0];
const Mat& B = bottom_blobs[1];
Mat& top_blob = top_blobs[0];
const int Adims = A.dims;
const int Bdims = B.dims;
const int max_ABdims = std::max(Adims, Bdims);
const size_t elemsize = A.elemsize;
if (Adims == 1 && Bdims == 1)
{
// dot product
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A.reshape(A.w, 1);
_bottom_blobs[1] = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(1, opt.blob_allocator);
}
else if (Adims == 2 && Bdims == 2)
{
// matrix multiply
gemm->forward(bottom_blobs, top_blobs, opt);
}
else if (Adims == 1 && Bdims == 2)
{
// matrix multiply
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A.reshape(A.w, 1);
_bottom_blobs[1] = B;
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(top_blob.w, opt.blob_allocator);
}
else if (Adims == 2 && Bdims == 1)
{
// matrix multiply
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A;
_bottom_blobs[1] = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
gemm->forward(_bottom_blobs, top_blobs, opt);
top_blob = top_blob.reshape(top_blob.h, opt.blob_allocator);
}
else if (Adims == 1 && Bdims > 2)
{
// batched matrix multiply
const int N = transB == 0 ? B.w : B.h;
const int batch_size = B.d * B.c;
Mat top_blob1(N, 1, batch_size, elemsize, opt.blob_allocator);
if (top_blob1.empty())
return -100;
Mat A1 = A.reshape(A.w, 1);
Mat B1 = B.reshape(B.w, B.h, batch_size);
for (int p = 0; p < batch_size; p++)
{
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1;
_bottom_blobs[1] = B1.channel(p);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob1.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
if (Bdims == 3)
top_blob = top_blob1.reshape(N, B.d * B.c, opt.blob_allocator);
else
top_blob = top_blob1.reshape(N, B.d, B.c, opt.blob_allocator);
}
else if (Adims > 2 && Bdims == 1)
{
// batched matrix multiply
const int M = A.h;
const int batch_size = A.d * A.c;
Mat top_blob1(1, M, batch_size, elemsize, opt.blob_allocator);
if (top_blob1.empty())
return -100;
Mat A1 = A.reshape(A.w, A.h, batch_size);
Mat BT = transB ? B.reshape(B.w, 1) : B.reshape(1, B.w);
for (int p = 0; p < batch_size; p++)
{
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(p);
_bottom_blobs[1] = BT;
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob1.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
if (Adims == 3)
top_blob = top_blob1.reshape(M, A.d * A.c, opt.blob_allocator);
else
top_blob = top_blob1.reshape(M, A.d, A.c, opt.blob_allocator);
}
else if (max_ABdims == 3)
{
Mat A1 = Adims == 2 ? A.reshape(A.w, A.h, 1) : A;
Mat B1 = Bdims == 2 ? B.reshape(B.w, B.h, 1) : B;
const int M = A1.h;
const int N = transB == 0 ? B1.w : B1.h;
const int batch_size = std::max(A1.c, B1.c);
top_blob.create(N, M, batch_size, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
for (int p = 0; p < batch_size; p++)
{
int Ap = A1.c == 1 ? 0 : p;
int Bp = B1.c == 1 ? 0 : p;
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(Ap);
_bottom_blobs[1] = B1.channel(Bp);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob.channel(p);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
}
else if (max_ABdims == 4)
{
Mat A1 = Adims == 3 ? A.reshape(A.w, A.h, A.c, 1) : A;
Mat B1 = Bdims == 3 ? B.reshape(B.w, B.h, B.c, 1) : B;
const int M = A1.h;
const int N = transB == 0 ? B1.w : B1.h;
const int batch_size_d = std::max(A1.d, B1.d);
const int batch_size_c = std::max(A1.c, B1.c);
top_blob.create(N, M, batch_size_d, batch_size_c, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
for (int p = 0; p < batch_size_c; p++)
{
int Ap = A1.c == 1 ? 0 : p;
int Bp = B1.c == 1 ? 0 : p;
for (int q = 0; q < batch_size_d; q++)
{
int Ad = A1.d == 1 ? 0 : q;
int Bd = B1.d == 1 ? 0 : q;
std::vector<Mat> _bottom_blobs(2);
_bottom_blobs[0] = A1.channel(Ap).depth(Ad);
_bottom_blobs[1] = B1.channel(Bp).depth(Bd);
std::vector<Mat> _top_blobs(1);
_top_blobs[0] = top_blob.channel(p).depth(q);
gemm->forward(_bottom_blobs, _top_blobs, opt);
}
}
}
else
{
NCNN_LOGE("impossible matmul %d %d", Adims, Bdims);
return -1;
}
return 0;
} | movq (%rdi), %rax
addq -0x40(%rax), %rdi
jmp 0x3b8fd6
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/matmul_x86_avx.cpp |
ncnn::Deconvolution1D::load_model(ncnn::ModelBin const&) | int Deconvolution1D::load_model(const ModelBin& mb)
{
weight_data = mb.load(weight_data_size, 0);
if (weight_data.empty())
return -100;
if (bias_term)
{
bias_data = mb.load(num_output, 1);
if (bias_data.empty())
return -100;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x50, %rsp
movq %rsi, %r15
movq %rdi, %rbx
movl 0xf4(%rdi), %edx
movq (%rsi), %rax
leaq 0x8(%rsp), %r14
movq %r14, %rdi
xorl %ecx, %ecx
callq *0x10(%rax)
leaq 0x148(%rbx), %r12
movq 0x8(%r14), %rax
cmpq %r14, %r12
je 0x3bc096
testq %rax, %rax
je 0x3bc006
lock
incl (%rax)
movq 0x150(%rbx), %rax
testq %rax, %rax
je 0x3bc03a
lock
decl (%rax)
jne 0x3bc03a
movq 0x148(%rbx), %rsi
movq 0x168(%rbx), %rdi
testq %rdi, %rdi
je 0x3bc032
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bc03a
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rax
movq %rax, 0x148(%rbx)
movq 0x10(%rsp), %rax
movq %rax, 0x150(%rbx)
movq 0x18(%rsp), %rcx
movq %rcx, 0x158(%rbx)
movl 0x20(%rsp), %ecx
movl %ecx, 0x160(%rbx)
movq 0x28(%rsp), %rcx
movq %rcx, 0x168(%rbx)
movups 0x30(%rsp), %xmm0
movups %xmm0, 0x170(%rbx)
movl 0x40(%rsp), %ecx
movl %ecx, 0x180(%rbx)
movq 0x48(%rsp), %rcx
movq %rcx, 0x188(%rbx)
testq %rax, %rax
je 0x3bc0bf
lock
decl (%rax)
jne 0x3bc0bf
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x3bc0b7
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bc0bf
movq %rsi, %rdi
callq 0x5f3e0
pushq $-0x64
popq %rbp
cmpq $0x0, (%r12)
je 0x3bc1f9
movslq 0x180(%rbx), %rax
imulq 0x188(%rbx), %rax
testq %rax, %rax
je 0x3bc1f9
cmpl $0x0, 0xf0(%rbx)
je 0x3bc1f7
movl 0xd0(%rbx), %edx
movq (%r15), %rax
pushq $0x1
popq %rcx
movq %r14, %rdi
movq %r15, %rsi
callq *0x10(%rax)
leaq 0x190(%rbx), %r15
movq 0x10(%rsp), %rax
cmpq %r14, %r15
je 0x3bc1b4
testq %rax, %rax
je 0x3bc124
lock
incl (%rax)
movq 0x198(%rbx), %rax
testq %rax, %rax
je 0x3bc158
lock
decl (%rax)
jne 0x3bc158
movq 0x190(%rbx), %rsi
movq 0x1b0(%rbx), %rdi
testq %rdi, %rdi
je 0x3bc150
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bc158
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rax
movq %rax, 0x190(%rbx)
movq 0x10(%rsp), %rax
movq %rax, 0x198(%rbx)
movq 0x18(%rsp), %rcx
movq %rcx, 0x1a0(%rbx)
movl 0x20(%rsp), %ecx
movl %ecx, 0x1a8(%rbx)
movq 0x28(%rsp), %rcx
movq %rcx, 0x1b0(%rbx)
movups 0x30(%rsp), %xmm0
movups %xmm0, 0x1b8(%rbx)
movl 0x40(%rsp), %ecx
movl %ecx, 0x1c8(%rbx)
movq 0x48(%rsp), %rcx
movq %rcx, 0x1d0(%rbx)
testq %rax, %rax
je 0x3bc1dd
lock
decl (%rax)
jne 0x3bc1dd
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x3bc1d5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bc1dd
movq %rsi, %rdi
callq 0x5f3e0
cmpq $0x0, (%r15)
je 0x3bc1f9
movslq 0x1c8(%rbx), %rax
imulq 0x1d0(%rbx), %rax
testq %rax, %rax
je 0x3bc1f9
xorl %ebp, %ebp
movl %ebp, %eax
addq $0x50, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x3bc266
lock
decl (%rax)
jne 0x3bc266
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x3bc256
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bc266
jmp 0x3bc270
jmp 0x3bc270
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x3bc266
lock
decl (%rax)
jne 0x3bc266
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
jne 0x3bc260
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3bc266
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x3bc270
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/deconvolution1d.cpp |
ncnn::Deconvolution1D::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int Deconvolution1D::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
size_t elemsize = bottom_blob.elemsize;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
int outw = (w - 1) * stride_w + kernel_extent_w + output_pad_right;
Mat top_blob_bordered;
if (pad_left > 0 || pad_right > 0 || output_w > 0)
{
top_blob_bordered.create(outw, num_output, elemsize, opt.workspace_allocator);
}
else
{
top_blob_bordered = top_blob;
top_blob_bordered.create(outw, num_output, elemsize, opt.blob_allocator);
}
if (top_blob_bordered.empty())
return -100;
int ret = deconvolution1d(bottom_blob, top_blob_bordered, weight_data, bias_data, kernel_w, stride_w, dilation_w, activation_type, activation_params, opt);
if (ret != 0)
return ret;
cut_padding(top_blob_bordered, top_blob, opt);
if (top_blob.empty())
return -100;
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xe8, %rsp
movq %rcx, 0x80(%rsp)
movq %rdx, %r12
movl 0x2c(%rsi), %ebp
movl 0xd4(%rdi), %ebx
decl %ebx
imull 0xd8(%rdi), %ebx
movq %rsi, 0x90(%rsp)
movq 0x10(%rsi), %r13
decl %ebp
imull 0xdc(%rdi), %ebp
movl 0xe8(%rdi), %r14d
andq $0x0, 0x70(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x30(%rsp)
movups %xmm0, 0x3c(%rsp)
movaps %xmm0, 0x50(%rsp)
movups %xmm0, 0x5c(%rsp)
cmpl $0x0, 0xe0(%rdi)
pushq $0x10
popq %r15
movq %rdi, 0x8(%rsp)
jg 0x3bc3a1
cmpl $0x0, 0xe4(%rdi)
jg 0x3bc3a1
cmpl $0x0, 0xec(%rdi)
jg 0x3bc3a1
pushq $0x8
popq %r15
leaq 0x30(%rsp), %rax
cmpq %r12, %rax
je 0x3bc3a1
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x3bc35c
lock
incl (%rax)
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x3bc35c
lock
decl (%rax)
jne 0x3bc35c
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x3bc354
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bc35c
movq %rsi, %rdi
callq 0x5f3e0
movups (%r12), %xmm0
movaps %xmm0, 0x30(%rsp)
movq 0x10(%r12), %rax
movq %rax, 0x40(%rsp)
movl 0x18(%r12), %eax
movl %eax, 0x48(%rsp)
movq 0x20(%r12), %rax
movq %rax, 0x50(%rsp)
movups 0x28(%r12), %xmm0
movups %xmm0, 0x58(%rsp)
movl 0x38(%r12), %eax
movl %eax, 0x68(%rsp)
movq 0x40(%r12), %rax
movq %rax, 0x70(%rsp)
addl %ebp, %ebx
leal (%r14,%rbx), %esi
incl %esi
movq 0x8(%rsp), %rax
movl 0xd0(%rax), %edx
movq 0x80(%rsp), %rax
movq (%rax,%r15), %r8
leaq 0x30(%rsp), %rdi
movq %r13, %rcx
callq 0x636fa
movq 0x30(%rsp), %r13
pushq $-0x64
popq %rbx
testq %r13, %r13
je 0x3bc7a8
movslq 0x68(%rsp), %rax
imulq 0x70(%rsp), %rax
testq %rax, %rax
je 0x3bc7a8
movq 0x8(%rsp), %rdi
movslq 0xd4(%rdi), %rbp
movslq 0xdc(%rdi), %rax
movq %rax, 0x88(%rsp)
movslq 0xd8(%rdi), %rbx
movl 0xf8(%rdi), %ecx
movq 0x90(%rsp), %rax
movl 0x2c(%rax), %edx
movl 0x30(%rax), %esi
movslq 0x5c(%rsp), %r8
movl 0x60(%rsp), %r9d
movq 0x190(%rdi), %rax
movq %rax, 0xa0(%rsp)
testq %rax, %rax
movq %r12, 0x98(%rsp)
je 0x3bc467
movslq 0x1c8(%rdi), %rax
imulq 0x1d0(%rdi), %rax
testq %rax, %rax
sete 0x7(%rsp)
jmp 0x3bc46c
movb $0x1, 0x7(%rsp)
xorl %r10d, %r10d
testl %r8d, %r8d
movl $0x0, %eax
cmovgl %r8d, %eax
testl %ebp, %ebp
movl $0x0, %r15d
cmovgl %ebp, %r15d
testl %esi, %esi
movl $0x0, %r12d
cmovgl %esi, %r12d
testl %edx, %edx
cmovlel %r10d, %edx
movq %rdx, 0xc8(%rsp)
movq %rax, %rdx
testl %r9d, %r9d
cmovlel %r10d, %r9d
movq %r9, 0xb0(%rsp)
imulq 0x40(%rsp), %r8
movq %r8, 0xa8(%rsp)
shlq $0x2, 0x88(%rsp)
shlq $0x2, %rbx
imull %ebp, %esi
movl %esi, 0x2c(%rsp)
shlq $0x2, %rbp
decl %ecx
xorl %esi, %esi
movq %rcx, 0x18(%rsp)
movq %rax, 0x10(%rsp)
cmpq 0xb0(%rsp), %rsi
je 0x3bc772
xorps %xmm0, %xmm0
cmpb $0x0, 0x7(%rsp)
jne 0x3bc50c
movq 0xa0(%rsp), %rax
movss (%rax,%rsi,4), %xmm0
movslq %r10d, %rax
shlq $0x2, %rax
xorl %ecx, %ecx
cmpl %ecx, %edx
je 0x3bc525
movss %xmm0, (%r13,%rcx,4)
incq %rcx
jmp 0x3bc515
movq %rsi, 0xb8(%rsp)
movq %r10, 0xc0(%rsp)
addq 0x148(%rdi), %rax
movq %r13, %rcx
xorl %edx, %edx
cmpq 0xc8(%rsp), %rdx
je 0x3bc5bb
movq 0x90(%rsp), %r8
movslq 0x2c(%r8), %rsi
imulq 0x10(%r8), %rsi
leaq (,%rdx,4), %r14
addq (%r8), %r14
movq %rax, %r8
xorl %r9d, %r9d
cmpq %r12, %r9
je 0x3bc5ae
movq %rsi, %r10
imulq %r9, %r10
movss (%r14,%r10), %xmm0
movq %rcx, %r10
xorl %r11d, %r11d
cmpq %r11, %r15
je 0x3bc5a6
movss (%r8,%r11,4), %xmm1
mulss %xmm0, %xmm1
addss (%r10), %xmm1
movss %xmm1, (%r10)
incq %r11
addq %rbx, %r10
jmp 0x3bc585
incq %r9
addq %rbp, %r8
jmp 0x3bc56d
incq %rdx
addq 0x88(%rsp), %rcx
jmp 0x3bc541
movq 0x100(%rdi), %rax
movq %rax, 0x20(%rsp)
xorl %r14d, %r14d
movq 0x18(%rsp), %rcx
movq 0x10(%rsp), %rdx
leaq 0x3ff81(%rip), %rsi # 0x3fc55c
cmpq %r14, %rdx
je 0x3bc748
movss (%r13,%r14,4), %xmm4
cmpl $0x5, %ecx
ja 0x3bc601
movslq (%rsi,%rcx,4), %rax
addq %rsi, %rax
jmpq *%rax
maxss 0x31a0f(%rip), %xmm4 # 0x3ee010
movaps %xmm4, %xmm0
jmp 0x3bc739
movaps %xmm4, %xmm0
movaps %xmm4, 0xd0(%rsp)
callq 0x5f410
addss 0x32667(%rip), %xmm0 # 0x3eec88
callq 0x5f200
callq 0x5f160
leaq 0x3ff2a(%rip), %rsi # 0x3fc55c
movq 0x10(%rsp), %rdx
movq 0x18(%rsp), %rcx
mulss 0xd0(%rsp), %xmm0
jmp 0x3bc739
movq 0x20(%rsp), %rax
maxss (%rax), %xmm4
movss 0x4(%rax), %xmm1
ucomiss %xmm1, %xmm4
movaps %xmm4, %xmm0
ja 0x3bc736
jmp 0x3bc739
movss 0x34b47(%rip), %xmm2 # 0x3f11b8
minss %xmm2, %xmm4
movaps %xmm4, %xmm0
xorps 0x31a11(%rip), %xmm0 # 0x3ee090
cmpltss 0x34b34(%rip), %xmm4 # 0x3f11bc
movaps %xmm4, %xmm1
andnps %xmm0, %xmm1
andps %xmm2, %xmm4
orps %xmm1, %xmm4
movaps %xmm4, %xmm0
callq 0x5f410
leaq 0x3feb9(%rip), %rsi # 0x3fc55c
movq 0x10(%rsp), %rdx
movq 0x18(%rsp), %rcx
movaps %xmm0, %xmm1
movss 0x325d0(%rip), %xmm0 # 0x3eec88
addss %xmm0, %xmm1
divss %xmm1, %xmm0
jmp 0x3bc739
xorps %xmm0, %xmm0
cmpltss %xmm4, %xmm0
movaps %xmm0, %xmm1
movss 0x325b3(%rip), %xmm2 # 0x3eec88
andps %xmm2, %xmm1
movq 0x20(%rsp), %rax
movss (%rax), %xmm2
andnps %xmm2, %xmm0
orps %xmm1, %xmm0
mulss %xmm4, %xmm0
jmp 0x3bc739
movq 0x20(%rsp), %rax
movss (%rax), %xmm1
movss 0x4(%rax), %xmm2
movaps %xmm2, %xmm3
xorps 0x3198b(%rip), %xmm3 # 0x3ee090
divss %xmm1, %xmm3
xorps %xmm0, %xmm0
ucomiss %xmm3, %xmm4
jb 0x3bc739
movss 0x3256f(%rip), %xmm0 # 0x3eec88
divss %xmm1, %xmm0
addss %xmm0, %xmm3
ucomiss %xmm3, %xmm4
ja 0x3bc601
mulss %xmm4, %xmm1
addss %xmm2, %xmm1
mulss %xmm4, %xmm1
movaps %xmm1, %xmm0
movss %xmm0, (%r13,%r14,4)
incq %r14
jmp 0x3bc5db
movq 0xb8(%rsp), %rsi
incq %rsi
addq 0xa8(%rsp), %r13
movq 0xc0(%rsp), %r10
addl 0x2c(%rsp), %r10d
movq 0x8(%rsp), %rdi
jmp 0x3bc4e7
leaq 0x30(%rsp), %rsi
movq 0x98(%rsp), %rbx
movq %rbx, %rdx
movq 0x80(%rsp), %rcx
callq 0x3bc830
cmpq $0x0, (%rbx)
je 0x3bc7a5
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
xorl %ebx, %ebx
testq %rax, %rax
jne 0x3bc7a8
pushq $-0x64
popq %rbx
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x3bc7d6
lock
decl (%rax)
jne 0x3bc7d6
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x3bc7ce
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bc7d6
movq %rsi, %rdi
callq 0x5f3e0
movl %ebx, %eax
addq $0xe8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x3bc827
jmp 0x3bc7ee
movq %rax, %rbx
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x3bc81f
lock
decl (%rax)
jne 0x3bc81f
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
jne 0x3bc819
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3bc81f
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/deconvolution1d.cpp |
ncnn::Deconvolution1D::cut_padding(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | void Deconvolution1D::cut_padding(const Mat& top_blob_bordered, Mat& top_blob, const Option& opt) const
{
if (pad_left > 0 || pad_right > 0)
{
copy_cut_border(top_blob_bordered, top_blob, 0, 0, pad_left, pad_right, opt);
}
else if (output_w > 0)
{
int wcut = top_blob_bordered.w - output_w;
if (pad_left == -233 || pad_right == -233)
{
// onnx padding=SAME_UPPER
copy_cut_border(top_blob_bordered, top_blob, 0, 0, wcut / 2, wcut - wcut / 2, opt);
}
else if (pad_left == -234 || pad_right == -234)
{
// onnx padding=SAME_LOWER
copy_cut_border(top_blob_bordered, top_blob, 0, 0, wcut - wcut / 2, wcut / 2, opt);
}
}
else
{
top_blob = top_blob_bordered;
}
} | pushq %r14
pushq %rbx
pushq %rax
movq %rdx, %rbx
movq %rsi, %r14
movl 0xe0(%rdi), %r8d
movl 0xe4(%rdi), %r9d
testl %r8d, %r8d
setg %al
testl %r9d, %r9d
setg %dl
orb %al, %dl
cmpb $0x1, %dl
jne 0x3bc86e
movq %rcx, (%rsp)
movq %r14, %rdi
movq %rbx, %rsi
xorl %edx, %edx
xorl %ecx, %ecx
jmp 0x3bc91e
movl 0xec(%rdi), %eax
testl %eax, %eax
jle 0x3bc8b6
movl 0x2c(%r14), %r10d
subl %eax, %r10d
movl $0xffffff17, %eax # imm = 0xFFFFFF17
cmpl %eax, %r8d
setne %dl
cmpl %eax, %r9d
setne %al
testb %al, %dl
jne 0x3bc8e9
pushq $0x2
popq %rsi
movl %r10d, %eax
cltd
idivl %esi
subl %eax, %r10d
movq %rcx, (%rsp)
movq %r14, %rdi
movq %rbx, %rsi
xorl %edx, %edx
xorl %ecx, %ecx
movl %eax, %r8d
movl %r10d, %r9d
jmp 0x3bc91e
cmpq %r14, %rbx
je 0x3bc923
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x3bc8c7
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x3bc933
lock
decl (%rax)
jne 0x3bc933
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x3bc92b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bc933
movl $0xffffff16, %eax # imm = 0xFFFFFF16
cmpl %eax, %r8d
setne %dl
cmpl %eax, %r9d
setne %al
testb %al, %dl
jne 0x3bc923
pushq $0x2
popq %rsi
movl %r10d, %eax
cltd
idivl %esi
subl %eax, %r10d
movq %rcx, (%rsp)
movq %r14, %rdi
movq %rbx, %rsi
xorl %edx, %edx
xorl %ecx, %ecx
movl %r10d, %r8d
movl %eax, %r9d
callq 0x6489a
addq $0x8, %rsp
popq %rbx
popq %r14
retq
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movups %xmm0, 0xc(%rbx)
andl $0x0, 0x38(%rbx)
movups %xmm0, 0x28(%rbx)
movups (%r14), %xmm0
movups %xmm0, (%rbx)
movq 0x10(%r14), %rax
movq %rax, 0x10(%rbx)
movl 0x18(%r14), %eax
movl %eax, 0x18(%rbx)
movq 0x20(%r14), %rax
movq %rax, 0x20(%rbx)
movups 0x28(%r14), %xmm0
movups %xmm0, 0x28(%rbx)
movl 0x38(%r14), %eax
movl %eax, 0x38(%rbx)
movq 0x40(%r14), %rax
movq %rax, 0x40(%rbx)
jmp 0x3bc923
| /csukuangfj[P]ncnn/src/layer/deconvolution1d.cpp |
ncnn::DeconvolutionDepthWise1D::cut_padding(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | void DeconvolutionDepthWise1D::cut_padding(const Mat& top_blob_bordered, Mat& top_blob, const Option& opt) const
{
if (pad_left > 0 || pad_right > 0)
{
copy_cut_border(top_blob_bordered, top_blob, 0, 0, pad_left, pad_right, opt);
}
else if (output_w > 0)
{
int wcut = top_blob_bordered.w - output_w;
if (pad_left == -233 || pad_right == -233)
{
// onnx padding=SAME_UPPER
copy_cut_border(top_blob_bordered, top_blob, 0, 0, wcut / 2, wcut - wcut / 2, opt);
}
else if (pad_left == -234 || pad_right == -234)
{
// onnx padding=SAME_LOWER
copy_cut_border(top_blob_bordered, top_blob, 0, 0, wcut - wcut / 2, wcut / 2, opt);
}
}
else
{
top_blob = top_blob_bordered;
}
} | pushq %r14
pushq %rbx
pushq %rax
movq %rdx, %rbx
movq %rsi, %r14
movl 0xe0(%rdi), %r8d
movl 0xe4(%rdi), %r9d
testl %r8d, %r8d
setg %al
testl %r9d, %r9d
setg %dl
orb %al, %dl
cmpb $0x1, %dl
jne 0x3bd9ac
movq %rcx, (%rsp)
movq %r14, %rdi
movq %rbx, %rsi
xorl %edx, %edx
xorl %ecx, %ecx
jmp 0x3bda5c
movl 0xec(%rdi), %eax
testl %eax, %eax
jle 0x3bd9f4
movl 0x2c(%r14), %r10d
subl %eax, %r10d
movl $0xffffff17, %eax # imm = 0xFFFFFF17
cmpl %eax, %r8d
setne %dl
cmpl %eax, %r9d
setne %al
testb %al, %dl
jne 0x3bda27
pushq $0x2
popq %rsi
movl %r10d, %eax
cltd
idivl %esi
subl %eax, %r10d
movq %rcx, (%rsp)
movq %r14, %rdi
movq %rbx, %rsi
xorl %edx, %edx
xorl %ecx, %ecx
movl %eax, %r8d
movl %r10d, %r9d
jmp 0x3bda5c
cmpq %r14, %rbx
je 0x3bda61
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x3bda05
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x3bda71
lock
decl (%rax)
jne 0x3bda71
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x3bda69
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bda71
movl $0xffffff16, %eax # imm = 0xFFFFFF16
cmpl %eax, %r8d
setne %dl
cmpl %eax, %r9d
setne %al
testb %al, %dl
jne 0x3bda61
pushq $0x2
popq %rsi
movl %r10d, %eax
cltd
idivl %esi
subl %eax, %r10d
movq %rcx, (%rsp)
movq %r14, %rdi
movq %rbx, %rsi
xorl %edx, %edx
xorl %ecx, %ecx
movl %r10d, %r8d
movl %eax, %r9d
callq 0x6489a
addq $0x8, %rsp
popq %rbx
popq %r14
retq
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movups %xmm0, 0xc(%rbx)
andl $0x0, 0x38(%rbx)
movups %xmm0, 0x28(%rbx)
movups (%r14), %xmm0
movups %xmm0, (%rbx)
movq 0x10(%r14), %rax
movq %rax, 0x10(%rbx)
movl 0x18(%r14), %eax
movl %eax, 0x18(%rbx)
movq 0x20(%r14), %rax
movq %rax, 0x20(%rbx)
movups 0x28(%r14), %xmm0
movups %xmm0, 0x28(%rbx)
movl 0x38(%r14), %eax
movl %eax, 0x38(%rbx)
movq 0x40(%r14), %rax
movq %rax, 0x40(%rbx)
jmp 0x3bda61
| /csukuangfj[P]ncnn/src/layer/deconvolutiondepthwise1d.cpp |
ncnn::Deconvolution3D::load_param(ncnn::ParamDict const&) | int Deconvolution3D::load_param(const ParamDict& pd)
{
num_output = pd.get(0, 0);
kernel_w = pd.get(1, 0);
kernel_h = pd.get(11, kernel_w);
kernel_d = pd.get(21, kernel_w);
dilation_w = pd.get(2, 1);
dilation_h = pd.get(12, dilation_w);
dilation_d = pd.get(22, dilation_w);
stride_w = pd.get(3, 1);
stride_h = pd.get(13, stride_w);
stride_d = pd.get(23, stride_w);
pad_left = pd.get(4, 0);
pad_right = pd.get(15, pad_left);
pad_top = pd.get(14, pad_left);
pad_bottom = pd.get(16, pad_top);
pad_front = pd.get(24, pad_left);
pad_behind = pd.get(17, pad_front);
output_pad_right = pd.get(18, 0);
output_pad_bottom = pd.get(19, output_pad_right);
output_pad_behind = pd.get(20, output_pad_right);
output_w = pd.get(25, 0);
output_h = pd.get(26, output_w);
output_d = pd.get(27, output_w);
bias_term = pd.get(5, 0);
weight_data_size = pd.get(6, 0);
activation_type = pd.get(9, 0);
activation_params = pd.get(10, Mat());
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x98, %rsp
movq %rsi, %r14
movq %rdi, %rbx
movq %rsi, %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xd0(%rbx)
pushq $0x1
popq %rbp
movq %r14, %rdi
movl %ebp, %esi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xd4(%rbx)
pushq $0xb
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xd8(%rbx)
movl 0xd4(%rbx), %edx
pushq $0x15
popq %rsi
movq %r14, %rdi
callq 0x718a6
movl %eax, 0xdc(%rbx)
pushq $0x2
popq %rsi
movq %r14, %rdi
movl %ebp, %edx
callq 0x718a6
movl %eax, 0xe0(%rbx)
pushq $0xc
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xe4(%rbx)
movl 0xe0(%rbx), %edx
pushq $0x16
popq %rsi
movq %r14, %rdi
callq 0x718a6
movl %eax, 0xe8(%rbx)
pushq $0x3
popq %rsi
movq %r14, %rdi
movl %ebp, %edx
callq 0x718a6
movl %eax, 0xec(%rbx)
pushq $0xd
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xf0(%rbx)
movl 0xec(%rbx), %edx
pushq $0x17
popq %rsi
movq %r14, %rdi
callq 0x718a6
movl %eax, 0xf4(%rbx)
pushq $0x4
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xf8(%rbx)
pushq $0xf
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xfc(%rbx)
movl 0xf8(%rbx), %edx
pushq $0xe
popq %rsi
movq %r14, %rdi
callq 0x718a6
movl %eax, 0x100(%rbx)
pushq $0x10
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0x104(%rbx)
movl 0xf8(%rbx), %edx
pushq $0x18
popq %rsi
movq %r14, %rdi
callq 0x718a6
movl %eax, 0x108(%rbx)
pushq $0x11
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0x10c(%rbx)
pushq $0x12
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0x110(%rbx)
pushq $0x13
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0x114(%rbx)
movl 0x110(%rbx), %edx
pushq $0x14
popq %rsi
movq %r14, %rdi
callq 0x718a6
movl %eax, 0x118(%rbx)
pushq $0x19
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0x11c(%rbx)
pushq $0x1a
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0x120(%rbx)
movl 0x11c(%rbx), %edx
pushq $0x1b
popq %rsi
movq %r14, %rdi
callq 0x718a6
movl %eax, 0x124(%rbx)
pushq $0x5
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0x128(%rbx)
pushq $0x6
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0x12c(%rbx)
pushq $0x9
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0x130(%rbx)
leaq 0x50(%rsp), %rcx
andq $0x0, 0x40(%rcx)
xorps %xmm0, %xmm0
movaps %xmm0, (%rcx)
movups %xmm0, 0xc(%rcx)
movaps %xmm0, 0x20(%rcx)
movups %xmm0, 0x2c(%rcx)
movq %rsp, %r15
pushq $0xa
popq %rdx
movq %r15, %rdi
movq %r14, %rsi
callq 0x718da
leaq 0x138(%rbx), %rcx
movq 0x8(%rsp), %rax
cmpq %r15, %rcx
je 0x3bde44
testq %rax, %rax
je 0x3bddb5
lock
incl (%rax)
movq 0x140(%rbx), %rax
testq %rax, %rax
je 0x3bdde9
lock
decl (%rax)
jne 0x3bdde9
movq 0x138(%rbx), %rsi
movq 0x158(%rbx), %rdi
testq %rdi, %rdi
je 0x3bdde1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bdde9
movq %rsi, %rdi
callq 0x5f3e0
movq (%rsp), %rax
movq %rax, 0x138(%rbx)
movq 0x8(%rsp), %rax
movq %rax, 0x140(%rbx)
movq 0x10(%rsp), %rcx
movq %rcx, 0x148(%rbx)
movl 0x18(%rsp), %ecx
movl %ecx, 0x150(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x158(%rbx)
movups 0x28(%rsp), %xmm0
movups %xmm0, 0x160(%rbx)
movl 0x38(%rsp), %ecx
movl %ecx, 0x170(%rbx)
movq 0x40(%rsp), %rcx
movq %rcx, 0x178(%rbx)
testq %rax, %rax
je 0x3bde6c
lock
decl (%rax)
jne 0x3bde6c
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x3bde64
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bde6c
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
andl $0x0, 0x38(%rsp)
movups %xmm0, 0x28(%rsp)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x3bdeb6
lock
decl (%rax)
jne 0x3bdeb6
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x3bdeae
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bdeb6
movq %rsi, %rdi
callq 0x5f3e0
xorl %eax, %eax
addq $0x98, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x3bdef6
lock
decl (%rax)
jne 0x3bdef6
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x3bdef0
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3bdef6
movq (%rdi), %rax
callq *0x18(%rax)
andq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
movups %xmm0, 0x28(%rsp)
andl $0x0, 0x38(%rsp)
jmp 0x3bdf1d
jmp 0x3bdf53
jmp 0x3bdf53
jmp 0x3bdf53
movq %rax, %rbx
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x3bdf4b
lock
decl (%rax)
jne 0x3bdf4b
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
jne 0x3bdf45
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3bdf4b
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
movq %rax, %rdi
callq 0x61d68
nop
| /csukuangfj[P]ncnn/src/layer/deconvolution3d.cpp |
ncnn::DeconvolutionDepthWise3D::load_model(ncnn::ModelBin const&) | int DeconvolutionDepthWise3D::load_model(const ModelBin& mb)
{
weight_data = mb.load(weight_data_size, 0);
if (weight_data.empty())
return -100;
if (bias_term)
{
bias_data = mb.load(num_output, 1);
if (bias_data.empty())
return -100;
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x50, %rsp
movq %rsi, %r15
movq %rdi, %rbx
movl 0x12c(%rdi), %edx
movq (%rsi), %rax
leaq 0x8(%rsp), %r14
movq %r14, %rdi
xorl %ecx, %ecx
callq *0x10(%rax)
leaq 0x180(%rbx), %r12
movq 0x8(%r14), %rax
cmpq %r14, %r12
je 0x3bf392
testq %rax, %rax
je 0x3bf302
lock
incl (%rax)
movq 0x188(%rbx), %rax
testq %rax, %rax
je 0x3bf336
lock
decl (%rax)
jne 0x3bf336
movq 0x180(%rbx), %rsi
movq 0x1a0(%rbx), %rdi
testq %rdi, %rdi
je 0x3bf32e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bf336
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rax
movq %rax, 0x180(%rbx)
movq 0x10(%rsp), %rax
movq %rax, 0x188(%rbx)
movq 0x18(%rsp), %rcx
movq %rcx, 0x190(%rbx)
movl 0x20(%rsp), %ecx
movl %ecx, 0x198(%rbx)
movq 0x28(%rsp), %rcx
movq %rcx, 0x1a0(%rbx)
movups 0x30(%rsp), %xmm0
movups %xmm0, 0x1a8(%rbx)
movl 0x40(%rsp), %ecx
movl %ecx, 0x1b8(%rbx)
movq 0x48(%rsp), %rcx
movq %rcx, 0x1c0(%rbx)
testq %rax, %rax
je 0x3bf3bb
lock
decl (%rax)
jne 0x3bf3bb
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x3bf3b3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bf3bb
movq %rsi, %rdi
callq 0x5f3e0
pushq $-0x64
popq %rbp
cmpq $0x0, (%r12)
je 0x3bf4f5
movslq 0x1b8(%rbx), %rax
imulq 0x1c0(%rbx), %rax
testq %rax, %rax
je 0x3bf4f5
cmpl $0x0, 0x128(%rbx)
je 0x3bf4f3
movl 0xd0(%rbx), %edx
movq (%r15), %rax
pushq $0x1
popq %rcx
movq %r14, %rdi
movq %r15, %rsi
callq *0x10(%rax)
leaq 0x1c8(%rbx), %r15
movq 0x10(%rsp), %rax
cmpq %r14, %r15
je 0x3bf4b0
testq %rax, %rax
je 0x3bf420
lock
incl (%rax)
movq 0x1d0(%rbx), %rax
testq %rax, %rax
je 0x3bf454
lock
decl (%rax)
jne 0x3bf454
movq 0x1c8(%rbx), %rsi
movq 0x1e8(%rbx), %rdi
testq %rdi, %rdi
je 0x3bf44c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bf454
movq %rsi, %rdi
callq 0x5f3e0
movq 0x8(%rsp), %rax
movq %rax, 0x1c8(%rbx)
movq 0x10(%rsp), %rax
movq %rax, 0x1d0(%rbx)
movq 0x18(%rsp), %rcx
movq %rcx, 0x1d8(%rbx)
movl 0x20(%rsp), %ecx
movl %ecx, 0x1e0(%rbx)
movq 0x28(%rsp), %rcx
movq %rcx, 0x1e8(%rbx)
movups 0x30(%rsp), %xmm0
movups %xmm0, 0x1f0(%rbx)
movl 0x40(%rsp), %ecx
movl %ecx, 0x200(%rbx)
movq 0x48(%rsp), %rcx
movq %rcx, 0x208(%rbx)
testq %rax, %rax
je 0x3bf4d9
lock
decl (%rax)
jne 0x3bf4d9
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x3bf4d1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bf4d9
movq %rsi, %rdi
callq 0x5f3e0
cmpq $0x0, (%r15)
je 0x3bf4f5
movslq 0x200(%rbx), %rax
imulq 0x208(%rbx), %rax
testq %rax, %rax
je 0x3bf4f5
xorl %ebp, %ebp
movl %ebp, %eax
addq $0x50, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x3bf562
lock
decl (%rax)
jne 0x3bf562
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x3bf552
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3bf562
jmp 0x3bf56c
jmp 0x3bf56c
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x3bf562
lock
decl (%rax)
jne 0x3bf562
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
jne 0x3bf55c
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3bf562
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x5f340
jmp 0x3bf56c
movq %rax, %rdi
callq 0x61d68
| /csukuangfj[P]ncnn/src/layer/deconvolutiondepthwise3d.cpp |
virtual thunk to ncnn::DeformableConv2D_x86_avx512::create_pipeline(ncnn::Option const&) | int DeformableConv2D_x86_avx512::create_pipeline(const Option& opt)
{
activation = create_activation_layer(activation_type, activation_params, opt);
int kernel_size = kernel_w * kernel_h;
int num_input = weight_data_size / kernel_size / num_output;
int elempack = 1;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = num_input % 16 == 0 ? 16 : num_input % 8 == 0 ? 8 : num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = num_input % 8 == 0 ? 8 : num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
elempack = num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
if (opt.use_sgemm_convolution)
{
const int maxk = kernel_w * kernel_h;
gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
ncnn::ParamDict pd;
pd.set(2, 0); // transA
pd.set(3, 0); // transB
pd.set(4, 1); // constantA
pd.set(5, 0); // constantB
pd.set(6, 1); // constantC
pd.set(7, num_output); // M = outch
pd.set(8, 0); // N = size
pd.set(9, maxk * num_input); // K = maxk*inch
pd.set(10, bias_term ? 1 : -1); // constant_broadcast_type_C = (M)
pd.set(11, 1); // output_N1M
gemm->load_param(pd);
// maxk-inch-outch to pa-maxk-inch/pa-outch
Mat tmp;
{
Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output);
tmp.create(maxk * num_input, num_output);
for (int q = 0; q < num_output; q += 1)
{
float* g00 = tmp.row(q);
for (int p = 0; p + (elempack - 1) < num_input; p += elempack)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < elempack; i++)
{
const float* k00 = weight_data_r2.channel(q).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
if (bias_term)
{
ncnn::Mat weights[2];
weights[0] = tmp;
weights[1] = bias_data;
gemm->load_model(ModelBinFromMatArray(weights));
}
else
{
ncnn::Mat weights[1];
weights[0] = tmp;
gemm->load_model(ModelBinFromMatArray(weights));
}
gemm->create_pipeline(opt);
}
else if (elempack == 1 && out_elempack == 1)
{
weight_data_tm = weight_data;
}
else
{
deformableconv2d_transform_kernel_packed_sse(weight_data, weight_data_tm, num_input, num_output, kernel_w, kernel_h, elempack, out_elempack);
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x30(%rax), %rdi
callq 0x3c7244
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deformableconv2d_x86_avx512.cpp |
ncnn::DeformableConv2D_x86_fma::destroy_pipeline(ncnn::Option const&) | int DeformableConv2D_x86_fma::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
if (gemm)
{
gemm->destroy_pipeline(opt);
delete gemm;
gemm = 0;
}
return 0;
} | pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %r14
movq %rdi, %rbx
movq 0x8(%rdi), %rdi
testq %rdi, %rdi
je 0x3d8350
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x3d834b
movq (%rdi), %rax
callq *0x8(%rax)
andq $0x0, 0x8(%rbx)
movq 0x58(%rbx), %rdi
testq %rdi, %rdi
je 0x3d8376
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x58(%rbx), %rdi
testq %rdi, %rdi
je 0x3d8371
movq (%rdi), %rax
callq *0x8(%rax)
andq $0x0, 0x58(%rbx)
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deformableconv2d_x86_fma.cpp |
virtual thunk to ncnn::DeformableConv2D_x86_fma::destroy_pipeline(ncnn::Option const&) | int DeformableConv2D_x86_fma::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
if (gemm)
{
gemm->destroy_pipeline(opt);
delete gemm;
gemm = 0;
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x38(%rax), %rdi
callq 0x3d8320
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deformableconv2d_x86_fma.cpp |
ncnn::DeformableConv2D_x86_avx::create_pipeline(ncnn::Option const&) | int DeformableConv2D_x86_avx::create_pipeline(const Option& opt)
{
activation = create_activation_layer(activation_type, activation_params, opt);
int kernel_size = kernel_w * kernel_h;
int num_input = weight_data_size / kernel_size / num_output;
int elempack = 1;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = num_input % 16 == 0 ? 16 : num_input % 8 == 0 ? 8 : num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = num_input % 8 == 0 ? 8 : num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
elempack = num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
if (opt.use_sgemm_convolution)
{
const int maxk = kernel_w * kernel_h;
gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
ncnn::ParamDict pd;
pd.set(2, 0); // transA
pd.set(3, 0); // transB
pd.set(4, 1); // constantA
pd.set(5, 0); // constantB
pd.set(6, 1); // constantC
pd.set(7, num_output); // M = outch
pd.set(8, 0); // N = size
pd.set(9, maxk * num_input); // K = maxk*inch
pd.set(10, bias_term ? 1 : -1); // constant_broadcast_type_C = (M)
pd.set(11, 1); // output_N1M
gemm->load_param(pd);
// maxk-inch-outch to pa-maxk-inch/pa-outch
Mat tmp;
{
Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output);
tmp.create(maxk * num_input, num_output);
for (int q = 0; q < num_output; q += 1)
{
float* g00 = tmp.row(q);
for (int p = 0; p + (elempack - 1) < num_input; p += elempack)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < elempack; i++)
{
const float* k00 = weight_data_r2.channel(q).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
if (bias_term)
{
ncnn::Mat weights[2];
weights[0] = tmp;
weights[1] = bias_data;
gemm->load_model(ModelBinFromMatArray(weights));
}
else
{
ncnn::Mat weights[1];
weights[0] = tmp;
gemm->load_model(ModelBinFromMatArray(weights));
}
gemm->create_pipeline(opt);
}
else if (elempack == 1 && out_elempack == 1)
{
weight_data_tm = weight_data;
}
else
{
deformableconv2d_transform_kernel_packed_sse(weight_data, weight_data_tm, num_input, num_output, kernel_w, kernel_h, elempack, out_elempack);
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x188, %rsp # imm = 0x188
movq %rsi, (%rsp)
movq %rdi, %r12
movq (%rdi), %rax
movq -0x18(%rax), %rbx
movl 0x104(%rdi,%rbx), %ecx
decl %ecx
cmpl $0x5, %ecx
ja 0x3e107a
leaq 0x1c878(%rip), %rax # 0x3fd154
movslq (%rax,%rcx,4), %rcx
addq %rax, %rcx
jmpq *%rcx
pushq $0x1a
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x30(%rsp), %r14
movq %r14, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r14, %rsi
callq *0x10(%rax)
movq (%rsp), %rbx
jmp 0x3e09c2
pushq $0x47
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x30(%rsp), %r14
movq %r14, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r14, %rsi
callq *0x10(%rax)
movq (%rsp), %rbx
jmp 0x3e09c2
pushq $0x36
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x30(%rsp), %r14
movq %r14, %rdi
callq 0x71548
movq 0x108(%r12,%rbx), %rax
vmovss (%rax), %xmm0
movq %r14, %rdi
xorl %esi, %esi
callq 0x71952
movq 0x108(%r12,%rbx), %rax
vmovss 0x4(%rax), %xmm0
leaq 0x30(%rsp), %rdi
pushq $0x1
popq %rsi
callq 0x71952
movq (%r15), %rax
leaq 0x30(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
jmp 0x3e0a65
pushq $0x1e
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x30(%rsp), %r14
movq %r14, %rdi
callq 0x71548
movq (%r15), %rax
movq %r15, %rdi
movq %r14, %rsi
callq *0x10(%rax)
movq (%rsp), %rbx
leaq 0x30(%rsp), %rdi
callq 0x71614
jmp 0x3e0a73
pushq $0x1a
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x30(%rsp), %r14
movq %r14, %rdi
callq 0x71548
movq 0x108(%r12,%rbx), %rax
vmovss (%rax), %xmm0
movq %r14, %rdi
xorl %esi, %esi
callq 0x71952
movq (%r15), %rax
leaq 0x30(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
jmp 0x3e0a65
pushq $0x43
popq %rdi
callq 0x782bf
movq %rax, %r15
leaq 0x30(%rsp), %r14
movq %r14, %rdi
callq 0x71548
movq 0x108(%r12,%rbx), %rax
vmovss (%rax), %xmm0
movq %r14, %rdi
xorl %esi, %esi
callq 0x71952
movq 0x108(%r12,%rbx), %rax
vmovss 0x4(%rax), %xmm0
leaq 0x30(%rsp), %rdi
pushq $0x1
popq %rsi
callq 0x71952
movq (%r15), %rax
leaq 0x30(%rsp), %rsi
movq %r15, %rdi
callq *0x10(%rax)
leaq 0x30(%rsp), %rdi
callq 0x71614
movq (%rsp), %rbx
movq (%r15), %rax
movq %r15, %rdi
movq %rbx, %rsi
callq *0x20(%rax)
movq (%r12), %rax
movq %r15, 0x8(%r12)
movq -0x18(%rax), %r14
movslq 0xd4(%r12,%r14), %rax
movslq 0xd8(%r12,%r14), %rcx
movq %rcx, 0x138(%rsp)
movq %rax, 0x140(%rsp)
imull %eax, %ecx
movl 0xd0(%r12,%r14), %r9d
movq %r12, 0x18(%rsp)
movl 0x100(%r12,%r14), %eax
cltd
movq %rcx, 0x20(%rsp)
idivl %ecx
cltd
idivl %r9d
movl %eax, %r15d
cmpb $0x1, 0x27(%rbx)
jne 0x3e0b0a
pushq $0x8
popq %rax
xorl %ecx, %ecx
testb $0x3, %r15b
sete %cl
testb $0x7, %r15b
leal 0x1(%rcx,%rcx,2), %r12d
cmovel %eax, %r12d
xorl %ecx, %ecx
testb $0x3, %r9b
sete %cl
testb $0x7, %r9b
leal 0x1(%rcx,%rcx,2), %r13d
cmovel %eax, %r13d
jmp 0x3e0b11
pushq $0x1
popq %r13
movl %r13d, %r12d
cmpb $0x1, 0x1d(%rbx)
movslq %r15d, %rbp
jne 0x3e0d56
pushq $0x4a
popq %rdi
callq 0x782bf
movq 0x18(%rsp), %r13
movq %rax, 0x58(%r13)
leaq 0x8(%rsp), %r14
movq %r14, %rdi
callq 0x71548
pushq $0x2
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0x3
popq %rsi
xorl %edx, %edx
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0x4
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0x5
popq %rsi
xorl %edx, %edx
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0x6
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r13,%rax), %edx
leaq 0x8(%rsp), %rdi
pushq $0x7
popq %rsi
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0x8
popq %rsi
xorl %edx, %edx
callq 0x7193a
movl %r15d, %r14d
imull 0x20(%rsp), %r14d
leaq 0x8(%rsp), %rdi
pushq $0x9
popq %rsi
movl %r14d, %edx
callq 0x7193a
movq (%r13), %rax
movq -0x18(%rax), %rax
xorl %edx, %edx
cmpl $0x1, 0xfc(%r13,%rax)
sbbl %edx, %edx
orl $0x1, %edx
leaq 0x8(%rsp), %rdi
pushq $0xa
popq %rsi
callq 0x7193a
leaq 0x8(%rsp), %rdi
pushq $0xb
popq %rsi
pushq $0x1
popq %rdx
callq 0x7193a
movq 0x58(%r13), %rdi
movq (%rdi), %rax
leaq 0x8(%rsp), %rsi
callq *0x10(%rax)
andq $0x0, 0x130(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovups %xmm0, 0xfc(%rsp)
vmovaps %xmm0, 0x110(%rsp)
vmovups %xmm0, 0x11c(%rsp)
movq (%r13), %rax
movq -0x18(%rax), %rax
leaq 0x150(%r13,%rax), %rsi
movl -0x80(%rsi), %r8d
leaq 0x30(%rsp), %rdi
movq 0x20(%rsp), %rdx
movl %r15d, %ecx
xorl %r9d, %r9d
callq 0x63020
movq (%r13), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r13,%rax), %edx
xorl %ebx, %ebx
leaq 0xf0(%rsp), %rdi
pushq $0x4
popq %rcx
movl %r14d, %esi
xorl %r8d, %r8d
callq 0x636fa
movq (%r13), %rax
movq 0xf0(%rsp), %rcx
movslq 0x11c(%rsp), %rdx
imulq 0x100(%rsp), %rdx
leal -0x1(%r12), %edi
movq 0x20(%rsp), %r8
testl %r8d, %r8d
cmovlel %ebx, %r8d
movl %r12d, %esi
movq %r8, %r12
subq %rdi, %rbp
movq -0x18(%rax), %rdi
movslq 0xd0(%r13,%rdi), %rdi
cmpq %rdi, %rbx
jge 0x3e0dc3
movq %rdx, %rdi
imulq %rbx, %rdi
addq %rcx, %rdi
xorl %r8d, %r8d
cmpq %rbp, %r8
jge 0x3e0d4e
movslq 0x5c(%rsp), %r9
movq 0x40(%rsp), %r11
movq %r11, %r10
imulq %r9, %r10
movq 0x70(%rsp), %r14
imulq %rbx, %r14
imulq %r8, %r9
addq %r14, %r9
imulq %r11, %r9
addq 0x30(%rsp), %r9
xorl %r11d, %r11d
cmpq %r12, %r11
je 0x3e0d49
movq %r9, %r14
movq %rsi, %r15
subq $0x1, %r15
jb 0x3e0d40
vmovss (%r14), %xmm0
vmovss %xmm0, (%rdi)
addq $0x4, %rdi
addq %r10, %r14
jmp 0x3e0d28
incq %r11
addq $0x4, %r9
jmp 0x3e0d1d
addq %rsi, %r8
jmp 0x3e0ceb
incq %rbx
jmp 0x3e0cc9
cmpl $0x1, %r13d
jne 0x3e0dfc
cmpl $0x1, %r12d
jne 0x3e0dfc
addq $0x150, %r14 # imm = 0x150
cmpq $0x10, %r14
movq 0x18(%rsp), %r13
je 0x3e14b5
movq 0x8(%r13,%r14), %rax
testq %rax, %rax
je 0x3e0d8d
lock
incl (%rax)
leaq 0x10(%r13), %rbx
movq 0x18(%r13), %rax
testq %rax, %rax
je 0x3e144d
lock
decl (%rax)
jne 0x3e144d
movq 0x10(%r13), %rsi
movq 0x30(%r13), %rdi
testq %rdi, %rdi
je 0x3e1445
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3e144d
movq 0x38(%rsp), %rax
testq %rax, %rax
movq (%rsp), %rbx
je 0x3e109c
lock
decl (%rax)
jne 0x3e109c
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x3e1094
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3e109c
movq 0x18(%rsp), %rbx
leaq 0x10(%rbx), %rdi
movq 0x150(%rbx,%r14), %rax
movq %rax, 0xc8(%rsp)
movl %r15d, %eax
imull 0x20(%rsp), %eax
imull %r9d, %eax
movl %r12d, %ecx
imull %r13d, %ecx
cltd
idivl %ecx
leal (,%rcx,4), %edx
movq %rdi, 0x148(%rsp)
movl %eax, %esi
xorl %r8d, %r8d
movl %r9d, %r14d
callq 0x626da
xorl %r10d, %r10d
movq 0x10(%rbx), %rax
movq %rax, 0x180(%rsp)
movq 0xc8(%rsp), %rcx
testl %r15d, %r15d
movl %r15d, 0x2c(%rsp)
movl %r15d, %r11d
cmovlel %r10d, %r11d
movq 0x140(%rsp), %rax
testl %eax, %eax
movl $0x0, %edx
cmovgl %eax, %edx
movq %rdx, 0x170(%rsp)
movq 0x138(%rsp), %r15
testl %r15d, %r15d
movl $0x0, %edx
cmovgl %r15d, %edx
movq %rdx, 0x168(%rsp)
testl %r14d, %r14d
movl $0x0, %edx
movl %r14d, 0xec(%rsp)
cmovgl %r14d, %edx
movq %rdx, 0x150(%rsp)
imulq %rax, %r15
imulq %r15, %rbp
shlq $0x2, %rbp
leaq (,%rax,4), %rax
movq %rax, 0x158(%rsp)
shlq $0x2, %r15
movq %rbp, 0x160(%rsp)
cmpq 0x150(%rsp), %r10
je 0x3e0ff4
movq %rcx, 0xc8(%rsp)
movq %rcx, 0xd0(%rsp)
xorl %ebx, %ebx
cmpq 0x168(%rsp), %rbx
je 0x3e0fe1
movq 0xd0(%rsp), %r14
xorl %ebp, %ebp
cmpq 0x170(%rsp), %rbp
je 0x3e0fb9
movq %r14, 0x178(%rsp)
xorl %ecx, %ecx
cmpq %rcx, %r11
je 0x3e0fa5
vmovss (%r14), %xmm0
movl %ecx, %eax
cltd
idivl %r12d
movl %edx, %esi
movl %eax, %edi
movl %r10d, %eax
cltd
idivl %r13d
movl %edx, %r8d
movl %eax, %r9d
movl 0x2c(%rsp), %eax
cltd
idivl %r12d
imull 0x138(%rsp), %r9d
addl %ebx, %r9d
imull 0x140(%rsp), %r9d
addl %ebp, %r9d
imull %eax, %r9d
addl %edi, %r9d
imull %r12d, %r9d
addl %esi, %r9d
imull %r13d, %r9d
addl %r8d, %r9d
movslq %r9d, %rax
movq 0x180(%rsp), %rdx
vmovss %xmm0, (%rdx,%rax,4)
incq %rcx
addq %r15, %r14
jmp 0x3e0f37
incq %rbp
movq 0x178(%rsp), %r14
addq $0x4, %r14
jmp 0x3e0f1f
incq %rbx
movq 0xd0(%rsp), %rax
addq 0x158(%rsp), %rax
movq %rax, 0xd0(%rsp)
movq 0x160(%rsp), %rbp
jmp 0x3e0f07
incq %r10
movq 0xc8(%rsp), %rcx
addq %rbp, %rcx
jmp 0x3e0ee7
movl 0x2c(%rsp), %eax
cltd
idivl %r12d
movl %eax, %ecx
movl 0xec(%rsp), %eax
cltd
idivl %r13d
leaq 0x30(%rsp), %r14
movq %r14, %rdi
movq 0x148(%rsp), %rbx
movq %rbx, %rsi
movl %ecx, %edx
movq 0x20(%rsp), %rcx
movl %eax, %r8d
xorl %r9d, %r9d
callq 0x63020
movq 0x8(%r14), %rax
cmpq %rbx, %r14
je 0x3e1086
testq %rax, %rax
movq 0x18(%rsp), %r13
movq (%rsp), %rbx
je 0x3e1048
lock
incl (%rax)
movq 0x18(%r13), %rax
testq %rax, %rax
je 0x3e13ca
lock
decl (%rax)
jne 0x3e13ca
movq 0x10(%r13), %rsi
movq 0x30(%r13), %rdi
testq %rdi, %rdi
je 0x3e13c2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3e13ca
xorl %r15d, %r15d
movq (%rsp), %rbx
jmp 0x3e0a83
movq 0x18(%rsp), %r13
movq (%rsp), %rbx
jmp 0x3e1412
movq %rsi, %rdi
callq 0x5f3e0
movq (%r13), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xfc(%r13,%rax)
je 0x3e113d
andq $0x0, 0x70(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x30(%rsp)
vmovups %xmm0, 0x3c(%rsp)
vmovaps %xmm0, 0x50(%rsp)
vmovups %xmm0, 0x5c(%rsp)
andq $0x0, 0xb8(%rsp)
vmovups %xmm0, 0x78(%rsp)
vmovups %xmm0, 0x84(%rsp)
vmovups %xmm0, 0x98(%rsp)
vmovups %xmm0, 0xa4(%rsp)
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3e110f
lock
incl (%rax)
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x3e11b0
lock
decl (%rax)
jne 0x3e11b0
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x3e11a8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3e11b0
andq $0x0, 0x70(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x30(%rsp)
vmovups %xmm0, 0x3c(%rsp)
vmovaps %xmm0, 0x50(%rsp)
vmovups %xmm0, 0x5c(%rsp)
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3e1544
lock
incl (%rax)
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x3e1544
lock
decl (%rax)
jne 0x3e1544
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x3e153c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3e1544
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0xf0(%rsp), %xmm0
leaq 0x78(%rsp), %rax
vmovaps %xmm0, -0x48(%rax)
movq 0x100(%rsp), %rcx
movq %rcx, -0x38(%rax)
movl 0x108(%rsp), %ecx
movl %ecx, -0x30(%rax)
movq 0x110(%rsp), %rcx
movq %rcx, -0x28(%rax)
vmovups 0x118(%rsp), %xmm0
vmovups %xmm0, -0x20(%rax)
movl 0x128(%rsp), %ecx
movl %ecx, -0x10(%rax)
movq 0x130(%rsp), %rcx
movq %rcx, -0x8(%rax)
movq (%r13), %rcx
movq -0x18(%rcx), %rbx
leaq (%rbx,%r13), %rcx
addq $0x198, %rcx # imm = 0x198
cmpq %rcx, %rax
je 0x3e12d1
addq %r13, %rbx
movq 0x1a0(%rbx), %rax
testq %rax, %rax
je 0x3e1237
lock
incl (%rax)
movq 0x80(%rsp), %rax
testq %rax, %rax
je 0x3e126b
lock
decl (%rax)
jne 0x3e126b
movq 0x78(%rsp), %rsi
movq 0x98(%rsp), %rdi
testq %rdi, %rdi
je 0x3e1263
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3e126b
movq %rsi, %rdi
callq 0x5f3e0
vmovups 0x198(%rbx), %xmm0
vmovups %xmm0, 0x78(%rsp)
movq 0x1a8(%rbx), %rax
movq %rax, 0x88(%rsp)
movl 0x1b0(%rbx), %eax
movl %eax, 0x90(%rsp)
movq 0x1b8(%rbx), %rax
movq %rax, 0x98(%rsp)
vmovups 0x1c0(%rbx), %xmm0
vmovaps %xmm0, 0xa0(%rsp)
movl 0x1d0(%rbx), %eax
movl %eax, 0xb0(%rsp)
movq 0x1d8(%rbx), %rax
movq %rax, 0xb8(%rsp)
movq 0x58(%r13), %r14
leaq 0xd8(%rsp), %rdi
leaq 0x30(%rsp), %rsi
callq 0x6b00e
movq (%r14), %rax
leaq 0xd8(%rsp), %rsi
movq %r14, %rdi
callq *0x18(%rax)
leaq 0xd8(%rsp), %rdi
callq 0x6b03a
pushq $0x48
popq %rbx
vxorps %xmm0, %xmm0, %xmm0
movq 0x38(%rsp,%rbx), %rax
testq %rax, %rax
je 0x3e1342
lock
decl (%rax)
jne 0x3e1342
movq 0x30(%rsp,%rbx), %rsi
movq 0x50(%rsp,%rbx), %rdi
testq %rdi, %rdi
je 0x3e1336
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x3e1342
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%rbx), %rax
addq $0x30, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %rbx
cmpq $-0x48, %rbx
jne 0x3e130c
movq (%rsp), %rbx
movq 0x58(%r13), %rdi
movq (%rdi), %rax
movq %rbx, %rsi
callq *0x20(%rax)
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3e13b3
lock
decl (%rax)
jne 0x3e13b3
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
je 0x3e13ab
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3e13b3
movq %rsi, %rdi
callq 0x5f3e0
leaq 0x8(%rsp), %rdi
callq 0x71614
jmp 0x3e14b5
movq %rsi, %rdi
callq 0x5f3e0
movq 0x38(%rsp), %rax
vmovaps 0x30(%rsp), %xmm0
vmovups %xmm0, 0x10(%r13)
movq 0x40(%rsp), %rcx
movq %rcx, 0x20(%r13)
movl 0x48(%rsp), %ecx
movl %ecx, 0x28(%r13)
movq 0x50(%rsp), %rcx
movq %rcx, 0x30(%r13)
vmovups 0x58(%rsp), %xmm0
vmovups %xmm0, 0x38(%r13)
movl 0x68(%rsp), %ecx
movl %ecx, 0x48(%r13)
movq 0x70(%rsp), %rcx
movq %rcx, 0x50(%r13)
testq %rax, %rax
je 0x3e14b5
lock
decl (%rax)
jne 0x3e14b5
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x3e143b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3e14b5
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3e14b5
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x50(%r13)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rbx)
vmovups %xmm0, (%rbx)
andl $0x0, 0x48(%r13)
vmovups %xmm0, 0x38(%r13)
vmovups (%r13,%r14), %xmm0
vmovups %xmm0, 0x10(%r13)
movq 0x10(%r13,%r14), %rax
movq %rax, 0x20(%r13)
movl 0x18(%r13,%r14), %eax
movl %eax, 0x28(%r13)
movq 0x20(%r13,%r14), %rax
movq %rax, 0x30(%r13)
vmovups 0x28(%r13,%r14), %xmm0
vmovups %xmm0, 0x38(%r13)
movl 0x38(%r13,%r14), %eax
movl %eax, 0x48(%r13)
movq 0x40(%r13,%r14), %rax
movq %rax, 0x50(%r13)
movq (%rsp), %rbx
cmpb $0x1, (%rbx)
jne 0x3e1528
movq (%r13), %rax
movq -0x18(%rax), %rax
leaq (%rax,%r13), %rbx
leaq (%rax,%r13), %r14
addq $0x150, %r14 # imm = 0x150
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x3e1502
lock
decl (%rax)
jne 0x3e1502
movq 0x150(%rbx), %rsi
movq 0x170(%rbx), %rdi
testq %rdi, %rdi
je 0x3e14fa
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3e1502
movq %rsi, %rdi
callq 0x5f3e0
andq $0x0, 0x190(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%r14)
vmovups %xmm0, (%r14)
vmovups %xmm0, 0x178(%rbx)
andl $0x0, 0x188(%rbx)
xorl %eax, %eax
addq $0x188, %rsp # imm = 0x188
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rsi, %rdi
callq 0x5f3e0
vmovaps 0xf0(%rsp), %xmm0
leaq 0x30(%rsp), %rsi
vmovaps %xmm0, (%rsi)
movq 0x100(%rsp), %rax
movq %rax, 0x10(%rsi)
movl 0x108(%rsp), %eax
movl %eax, 0x18(%rsi)
movq 0x110(%rsp), %rax
movq %rax, 0x20(%rsi)
vmovups 0x118(%rsp), %xmm0
vmovups %xmm0, 0x28(%rsi)
movl 0x128(%rsp), %eax
movl %eax, 0x38(%rsi)
movq 0x130(%rsp), %rax
movq %rax, 0x40(%rsi)
movq 0x58(%r13), %r14
leaq 0xd8(%rsp), %rdi
callq 0x6b00e
movq (%r14), %rax
leaq 0xd8(%rsp), %rsi
movq %r14, %rdi
callq *0x18(%rax)
leaq 0xd8(%rsp), %rdi
callq 0x6b03a
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x3e136f
lock
decl (%rax)
jne 0x3e136f
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x3e15fc
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3e136f
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3e136f
jmp 0x3e1688
movq %rax, %rbx
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x3e17e6
lock
decl (%rax)
jne 0x3e17e6
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
jne 0x3e1641
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3e17e6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3e17e6
jmp 0x3e17d1
jmp 0x3e17d1
jmp 0x3e17d1
jmp 0x3e17d1
jmp 0x3e17d1
jmp 0x3e16dc
jmp 0x3e174f
jmp 0x3e174f
jmp 0x3e174f
movq %rax, %rbx
leaq 0xd8(%rsp), %rdi
callq 0x6b03a
jmp 0x3e168b
movq %rax, %rbx
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x3e1796
lock
decl (%rax)
jne 0x3e1796
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x3e177f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3e1796
jmp 0x3e17d1
jmp 0x3e174f
movq %rax, %rbx
leaq 0xd8(%rsp), %rdi
callq 0x6b03a
jmp 0x3e16df
movq %rax, %rbx
pushq $0x48
popq %r14
vxorps %xmm0, %xmm0, %xmm0
movq 0x38(%rsp,%r14), %rax
testq %rax, %rax
je 0x3e171d
lock
decl (%rax)
jne 0x3e171d
movq 0x30(%rsp,%r14), %rsi
movq 0x50(%rsp,%r14), %rdi
testq %rdi, %rdi
je 0x3e1711
movq (%rdi), %rax
callq *0x18(%rax)
vxorps %xmm0, %xmm0, %xmm0
jmp 0x3e171d
movq %rsi, %rdi
callq 0x5f3e0
vxorps %xmm0, %xmm0, %xmm0
leaq (%rsp,%r14), %rax
addq $0x30, %rax
andq $0x0, 0x40(%rax)
vmovups %xmm0, 0xc(%rax)
vmovups %xmm0, (%rax)
andl $0x0, 0x38(%rax)
vmovups %xmm0, 0x28(%rax)
addq $-0x48, %r14
cmpq $-0x48, %r14
jne 0x3e16e7
jmp 0x3e1796
jmp 0x3e17d1
jmp 0x3e174f
movq %rax, %rbx
leaq 0x30(%rsp), %rdi
jmp 0x3e17e1
jmp 0x3e1793
movq %rax, %rbx
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x3e1796
lock
decl (%rax)
jne 0x3e1796
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
jne 0x3e1789
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3e1796
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3e1796
jmp 0x3e17d1
movq %rax, %rbx
movq 0xf8(%rsp), %rax
testq %rax, %rax
je 0x3e17dc
lock
decl (%rax)
jne 0x3e17dc
movq 0xf0(%rsp), %rsi
movq 0x110(%rsp), %rdi
testq %rdi, %rdi
jne 0x3e17c7
movq %rsi, %rdi
callq 0x5f3e0
jmp 0x3e17dc
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3e17dc
jmp 0x3e17d1
movq %rax, %rdi
callq 0x61d68
movq %rax, %rbx
leaq 0x8(%rsp), %rdi
callq 0x71614
movq %rbx, %rdi
callq 0x5f340
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deformableconv2d_x86_avx.cpp |
ncnn::DeformableConv2D_x86_avx::destroy_pipeline(ncnn::Option const&) | int DeformableConv2D_x86_avx::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
if (gemm)
{
gemm->destroy_pipeline(opt);
delete gemm;
gemm = 0;
}
return 0;
} | pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %r14
movq %rdi, %rbx
movq 0x8(%rdi), %rdi
testq %rdi, %rdi
je 0x3e1830
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x3e182b
movq (%rdi), %rax
callq *0x8(%rax)
andq $0x0, 0x8(%rbx)
movq 0x58(%rbx), %rdi
testq %rdi, %rdi
je 0x3e1856
movq (%rdi), %rax
movq %r14, %rsi
callq *0x28(%rax)
movq 0x58(%rbx), %rdi
testq %rdi, %rdi
je 0x3e1851
movq (%rdi), %rax
callq *0x8(%rax)
andq $0x0, 0x58(%rbx)
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deformableconv2d_x86_avx.cpp |
virtual thunk to ncnn::DeformableConv2D_x86_avx::destroy_pipeline(ncnn::Option const&) | int DeformableConv2D_x86_avx::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
if (gemm)
{
gemm->destroy_pipeline(opt);
delete gemm;
gemm = 0;
}
return 0;
} | pushq %rax
movq (%rdi), %rax
addq -0x38(%rax), %rdi
callq 0x3e1800
xorl %eax, %eax
popq %rcx
retq
nop
| /csukuangfj[P]ncnn/build_O2/src/layer/x86/deformableconv2d_x86_avx.cpp |
ncnn::GLU::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const | int GLU::forward(const Mat& bottom_blob, Mat& top_blob,
const Option& opt) const
{
int dims = bottom_blob.dims;
int positive_axis = axis < 0 ? dims + axis : axis;
if (dims == 1)
{ // ignore axis
int w = bottom_blob.w;
int out_w = w / 2;
top_blob.create(out_w, sizeof(float), opt.blob_allocator);
const float* in_ptr = bottom_blob;
float* out_ptr = top_blob;
#pragma omp parallel for num_threads(opt.num_threads)
for (int x = 0; x < out_w; ++x)
{
float sigmoid = 1.f / (1.f + expf(-in_ptr[x + out_w]));
out_ptr[x] = in_ptr[x] * sigmoid;
}
return 0;
} // if (dims == 1)
if (dims == 2 && positive_axis == 0)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int out_w = w;
int out_h = h / 2;
top_blob.create(out_w, out_h, sizeof(float), opt.blob_allocator);
int offset = out_w * out_h;
#if 0
// this one is equivalent to the else branch. It is more readable
// but less efficient
#pragma omp parallel for num_threads(opt.num_threads)
for (int y = 0; y < out_h; ++y) {
const float *in_ptr = bottom_blob.row(y);
float *out_ptr = top_blob.row(y);
for (int x = 0; x < w; ++x) {
float sigmoid =
1.f / (1.f + expf(-in_ptr[x + offset]));
out_ptr[x] = in_ptr[x] * sigmoid;
}
}
#else
int size = offset;
const float* in_ptr = bottom_blob;
float* out_ptr = top_blob;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < size; ++i)
{
float sigmoid = 1.f / (1.f + expf(-in_ptr[i + offset]));
out_ptr[i] = in_ptr[i] * sigmoid;
}
#endif
return 0;
} // if (dims == 2 && positive_axis == 0)
if (dims == 2 && positive_axis == 1)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int out_w = w / 2;
int out_h = h;
top_blob.create(out_w, out_h, sizeof(float), opt.blob_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int y = 0; y < h; ++y)
{
const float* in_ptr = bottom_blob.row(y);
float* out_ptr = top_blob.row(y);
for (int x = 0; x < out_w; ++x)
{
float sigmoid = 1.f / (1.f + expf(-in_ptr[x + out_w]));
out_ptr[x] = in_ptr[x] * sigmoid;
}
}
return 0;
} // if (dims == 2 && positive_axis == 1)
if (dims == 3 && positive_axis == 0)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int c = bottom_blob.c;
int out_w = w;
int out_h = h;
int out_c = c / 2;
top_blob.create(out_w, out_h, out_c, sizeof(float), opt.blob_allocator);
int offset = out_c * bottom_blob.cstep;
int size = w * h;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < out_c; ++q)
{
const float* in_ptr = bottom_blob.channel(q);
float* out_ptr = top_blob.channel(q);
for (int i = 0; i < size; ++i)
{
float sigmoid = 1.f / (1.f + expf(-in_ptr[i + offset]));
out_ptr[i] = in_ptr[i] * sigmoid;
}
}
return 0;
} // if (dims == 3 && positive_axis == 0) {
if (dims == 3 && positive_axis == 1)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int c = bottom_blob.c;
int out_w = w;
int out_h = h / 2;
int out_c = c;
top_blob.create(out_w, out_h, out_c, sizeof(float), opt.blob_allocator);
int offset = out_h * out_w;
int size = offset;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < c; ++q)
{
const float* in_ptr = bottom_blob.channel(q);
float* out_ptr = top_blob.channel(q);
for (int i = 0; i < size; ++i)
{
float sigmoid = 1.f / (1.f + expf(-in_ptr[i + offset]));
out_ptr[i] = in_ptr[i] * sigmoid;
}
}
return 0;
} // if (dims == 3 && positive_axis == 1)
if (dims == 3 && positive_axis == 2)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int c = bottom_blob.c;
int out_w = w / 2;
int out_h = h;
int out_c = c;
top_blob.create(out_w, out_h, out_c, sizeof(float), opt.blob_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < c; ++q)
{
const float* in_ptr = bottom_blob.channel(q);
float* out_ptr = top_blob.channel(q);
for (int y = 0; y < h; ++y)
{
for (int x = 0; x < out_w; ++x)
{
float sigmoid = 1.f / (1.f + expf(-in_ptr[x + out_w]));
out_ptr[x] = in_ptr[x] * sigmoid;
}
in_ptr += w;
out_ptr += out_w;
}
}
return 0;
} // if (dims == 3 && positive_axis == 2)
return -100;
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x58, %rsp
movq %rdx, %r15
movq %rsi, %r12
movl 0x28(%rsi), %eax
cmpl $0x1, %eax
jne 0x3ea1d4
movl 0x2c(%r12), %eax
pushq $0x2
popq %rsi
cltd
idivl %esi
movl %eax, %ebx
movq 0x8(%rcx), %rcx
pushq $0x4
popq %rdx
movq %r15, %rdi
movl %eax, %esi
callq 0x635fa
movq (%r12), %r14
movq (%r15), %r15
movslq %ebx, %rax
movq $0x0, 0x10(%rsp)
testl %eax, %eax
movl $0x0, %ebx
cmovgl %eax, %ebx
leaq (%r14,%rax,4), %r12
xorl %r13d, %r13d
cmpq %r13, %rbx
je 0x3ea68f
movss (%r12,%r13,4), %xmm0
xorps 0x3ede(%rip), %xmm0 # 0x3ee090
callq 0x5f410
addss 0x4ac9(%rip), %xmm0 # 0x3eec88
movss (%r14,%r13,4), %xmm1
divss %xmm0, %xmm1
movss %xmm1, (%r15,%r13,4)
incq %r13
jmp 0x3ea19c
movl 0xd0(%rdi), %esi
movl %esi, %edx
sarl $0x1f, %edx
andl %eax, %edx
addl %esi, %edx
cmpl $0x2, %eax
jne 0x3ea27a
testl %edx, %edx
jne 0x3ea27a
movl 0x2c(%r12), %ebx
movl 0x30(%r12), %eax
pushq $0x2
popq %rsi
cltd
idivl %esi
movl %eax, %ebp
movq 0x8(%rcx), %r8
pushq $0x4
popq %rcx
movq %r15, %rdi
movl %ebx, %esi
movl %eax, %edx
callq 0x636fa
imull %ebx, %ebp
movq (%r12), %rbx
movq (%r15), %r14
movslq %ebp, %rax
movq $0x0, 0x10(%rsp)
testl %eax, %eax
movl $0x0, %r15d
cmovgl %eax, %r15d
leaq (%rbx,%rax,4), %r12
xorl %r13d, %r13d
cmpq %r13, %r15
je 0x3ea68f
movss (%r12,%r13,4), %xmm0
xorps 0x3e38(%rip), %xmm0 # 0x3ee090
callq 0x5f410
addss 0x4a23(%rip), %xmm0 # 0x3eec88
movss (%rbx,%r13,4), %xmm1
divss %xmm0, %xmm1
movss %xmm1, (%r14,%r13,4)
incq %r13
jmp 0x3ea242
cmpl $0x2, %eax
jne 0x3ea360
cmpl $0x1, %edx
jne 0x3ea360
movl 0x2c(%r12), %eax
movl 0x30(%r12), %ebp
pushq $0x2
popq %rsi
cltd
idivl %esi
movl %eax, %r14d
movq 0x8(%rcx), %r8
pushq $0x4
popq %rcx
movq %r15, %rdi
movl %eax, %esi
movl %ebp, %edx
callq 0x636fa
movq (%r12), %r13
movslq 0x2c(%r12), %rsi
movq (%r15), %rbx
movslq %r14d, %rax
xorl %edx, %edx
testl %eax, %eax
movl $0x0, %r14d
cmovgl %eax, %r14d
movslq 0x2c(%r15), %rdi
testl %ebp, %ebp
movl $0x0, %ecx
movq %rcx, 0x10(%rsp)
cmovlel %edx, %ebp
movq %rbp, 0x20(%rsp)
imulq 0x10(%r12), %rsi
movq %rsi, 0x8(%rsp)
imulq 0x10(%r15), %rdi
movq %rdi, 0x18(%rsp)
leaq (,%rax,4), %r12
addq %r13, %r12
xorl %r15d, %r15d
cmpq 0x20(%rsp), %r15
je 0x3ea68f
xorl %ebp, %ebp
cmpq %rbp, %r14
je 0x3ea34b
movss (%r12,%rbp,4), %xmm0
xorps 0x3d67(%rip), %xmm0 # 0x3ee090
callq 0x5f410
addss 0x4952(%rip), %xmm0 # 0x3eec88
movss (%r13,%rbp,4), %xmm1
divss %xmm0, %xmm1
movss %xmm1, (%rbx,%rbp,4)
incq %rbp
jmp 0x3ea317
incq %r15
movq 0x8(%rsp), %rax
addq %rax, %r12
addq 0x18(%rsp), %rbx
addq %rax, %r13
jmp 0x3ea30a
cmpl $0x3, %eax
jne 0x3ea454
testl %edx, %edx
jne 0x3ea454
movl 0x2c(%r12), %r14d
movq %r12, %rbx
movl 0x30(%r12), %r12d
movl 0x38(%rbx), %eax
pushq $0x2
popq %rsi
cltd
idivl %esi
movl %eax, %r13d
movq 0x8(%rcx), %r9
pushq $0x4
popq %r8
movq %r15, %rdi
movl %r14d, %esi
movl %r12d, %edx
movl %eax, %ecx
callq 0x63810
movq (%rbx), %rbp
movq 0x40(%rbx), %rsi
movl %esi, %eax
imull %r13d, %eax
imull %r14d, %r12d
movq (%r15), %r14
movq 0x40(%r15), %rdi
xorl %edx, %edx
testl %r12d, %r12d
cmovlel %edx, %r12d
cltq
testl %r13d, %r13d
movl $0x0, %ecx
movq %rcx, 0x10(%rsp)
cmovlel %edx, %r13d
movq %r13, 0x20(%rsp)
imulq 0x10(%rbx), %rsi
movq %rsi, 0x8(%rsp)
imulq 0x10(%r15), %rdi
movq %rdi, 0x18(%rsp)
leaq (,%rax,4), %r13
addq %rbp, %r13
xorl %r15d, %r15d
cmpq 0x20(%rsp), %r15
je 0x3ea68f
xorl %ebx, %ebx
cmpq %rbx, %r12
je 0x3ea43f
movss (%r13,%rbx,4), %xmm0
xorps 0x3c73(%rip), %xmm0 # 0x3ee090
callq 0x5f410
addss 0x485e(%rip), %xmm0 # 0x3eec88
movss (%rbp,%rbx,4), %xmm1
divss %xmm0, %xmm1
movss %xmm1, (%r14,%rbx,4)
incq %rbx
jmp 0x3ea40a
incq %r15
movq 0x8(%rsp), %rax
addq %rax, %r13
addq 0x18(%rsp), %r14
addq %rax, %rbp
jmp 0x3ea3fd
cmpl $0x3, %eax
jne 0x3ea544
cmpl $0x1, %edx
jne 0x3ea544
movl 0x2c(%r12), %ebx
movl 0x30(%r12), %eax
movl 0x38(%r12), %r13d
pushq $0x2
popq %rsi
cltd
idivl %esi
movl %eax, %r14d
movq 0x8(%rcx), %r9
pushq $0x4
popq %r8
movq %r15, %rdi
movl %ebx, %esi
movl %eax, %edx
movl %r13d, %ecx
callq 0x63810
imull %ebx, %r14d
movq (%r12), %rbx
movq 0x40(%r12), %rsi
movq (%r15), %rbp
movslq %r14d, %rax
xorl %edx, %edx
testl %eax, %eax
movl $0x0, %r14d
cmovgl %eax, %r14d
movq 0x40(%r15), %rdi
testl %r13d, %r13d
movl $0x0, %ecx
movq %rcx, 0x10(%rsp)
cmovlel %edx, %r13d
movq %r13, 0x20(%rsp)
imulq 0x10(%r12), %rsi
movq %rsi, 0x8(%rsp)
imulq 0x10(%r15), %rdi
movq %rdi, 0x18(%rsp)
leaq (%rbx,%rax,4), %r12
xorl %r15d, %r15d
cmpq 0x20(%rsp), %r15
je 0x3ea68f
xorl %r13d, %r13d
cmpq %r13, %r14
je 0x3ea52f
movss (%r12,%r13,4), %xmm0
xorps 0x3b84(%rip), %xmm0 # 0x3ee090
callq 0x5f410
addss 0x476f(%rip), %xmm0 # 0x3eec88
movss (%rbx,%r13,4), %xmm1
divss %xmm0, %xmm1
movss %xmm1, (%rbp,%r13,4)
incq %r13
jmp 0x3ea4fa
incq %r15
movq 0x8(%rsp), %rax
addq %rax, %r12
addq 0x18(%rsp), %rbp
addq %rax, %rbx
jmp 0x3ea4ec
pushq $-0x64
popq %rsi
movq %rsi, 0x10(%rsp)
cmpl $0x3, %eax
jne 0x3ea68f
cmpl $0x2, %edx
jne 0x3ea68f
movslq 0x2c(%r12), %rax
movq %rax, 0x8(%rsp)
movl 0x30(%r12), %ebp
movl 0x38(%r12), %r14d
pushq $0x2
popq %rsi
cltd
idivl %esi
movl %eax, %ebx
movq 0x8(%rcx), %r9
pushq $0x4
popq %r8
movq %r15, %rdi
movl %eax, %esi
movl %ebp, %edx
movl %r14d, %ecx
callq 0x63810
movq (%r12), %rdx
movq 0x40(%r12), %rsi
movq 0x40(%r15), %rdi
movslq %ebx, %r8
xorl %ecx, %ecx
testl %r8d, %r8d
movl $0x0, %r13d
cmovgl %r8d, %r13d
testl %ebp, %ebp
cmovlel %ecx, %ebp
movl %ebp, 0x20(%rsp)
testl %r14d, %r14d
movl $0x0, %eax
movq %rax, 0x10(%rsp)
cmovlel %ecx, %r14d
movq %r14, 0x38(%rsp)
imulq 0x10(%r15), %rdi
movq %rdi, 0x28(%rsp)
movq (%r15), %rax
movq %rdx, %r15
shlq $0x2, %r8
imulq 0x10(%r12), %rsi
movq %rax, %r12
movq %rsi, 0x30(%rsp)
shlq $0x2, 0x8(%rsp)
xorl %eax, %eax
movq %r8, 0x18(%rsp)
cmpq 0x38(%rsp), %rax
je 0x3ea68f
movq %rax, 0x40(%rsp)
xorl %r14d, %r14d
movq %r15, 0x50(%rsp)
movq %r12, 0x48(%rsp)
cmpl 0x20(%rsp), %r14d
je 0x3ea66e
leaq (%r15,%r8), %rbp
xorl %ebx, %ebx
cmpq %rbx, %r13
je 0x3ea65c
movss (%rbp,%rbx,4), %xmm0
xorps 0x3a56(%rip), %xmm0 # 0x3ee090
callq 0x5f410
addss 0x4641(%rip), %xmm0 # 0x3eec88
movss (%r15,%rbx,4), %xmm1
divss %xmm0, %xmm1
movss %xmm1, (%r12,%rbx,4)
incq %rbx
jmp 0x3ea628
incl %r14d
movq 0x18(%rsp), %r8
addq %r8, %r12
addq 0x8(%rsp), %r15
jmp 0x3ea61b
movq 0x40(%rsp), %rax
incq %rax
movq 0x48(%rsp), %r12
addq 0x28(%rsp), %r12
movq 0x50(%rsp), %r15
addq 0x30(%rsp), %r15
jmp 0x3ea5fe
movq 0x10(%rsp), %rax
addq $0x58, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
| /csukuangfj[P]ncnn/src/layer/glu.cpp |
ncnn::Unfold::load_param(ncnn::ParamDict const&) | int Unfold::load_param(const ParamDict& pd)
{
kernel_w = pd.get(1, 0);
kernel_h = pd.get(11, kernel_w);
dilation_w = pd.get(2, 1);
dilation_h = pd.get(12, dilation_w);
stride_w = pd.get(3, 1);
stride_h = pd.get(13, stride_w);
pad_left = pd.get(4, 0);
pad_right = pd.get(15, pad_left);
pad_top = pd.get(14, pad_left);
pad_bottom = pd.get(16, pad_top);
pad_value = pd.get(18, 0.f);
return 0;
} | pushq %rbp
pushq %r14
pushq %rbx
movq %rsi, %r14
movq %rdi, %rbx
pushq $0x1
popq %rbp
movq %rsi, %rdi
movl %ebp, %esi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xd0(%rbx)
pushq $0xb
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xd4(%rbx)
pushq $0x2
popq %rsi
movq %r14, %rdi
movl %ebp, %edx
callq 0x718a6
movl %eax, 0xd8(%rbx)
pushq $0xc
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xdc(%rbx)
pushq $0x3
popq %rsi
movq %r14, %rdi
movl %ebp, %edx
callq 0x718a6
movl %eax, 0xe0(%rbx)
pushq $0xd
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xe4(%rbx)
pushq $0x4
popq %rsi
movq %r14, %rdi
xorl %edx, %edx
callq 0x718a6
movl %eax, 0xe8(%rbx)
pushq $0xf
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xec(%rbx)
movl 0xe8(%rbx), %edx
pushq $0xe
popq %rsi
movq %r14, %rdi
callq 0x718a6
movl %eax, 0xf0(%rbx)
pushq $0x10
popq %rsi
movq %r14, %rdi
movl %eax, %edx
callq 0x718a6
movl %eax, 0xf4(%rbx)
pushq $0x12
popq %rsi
xorps %xmm0, %xmm0
movq %r14, %rdi
callq 0x718c0
movss %xmm0, 0xf8(%rbx)
xorl %eax, %eax
popq %rbx
popq %r14
popq %rbp
retq
nop
| /csukuangfj[P]ncnn/src/layer/unfold.cpp |
void ncnn::copy_to_image<float>(ncnn::Mat const&, ncnn::Mat&, int, int) | static void copy_to_image(const Mat& src, Mat& self, int top, int left)
{
int w = src.w;
int h = src.h;
const T* ptr = src;
T* outptr = self.row<T>(top) + left;
for (int y = 0; y < h; y++)
{
memcpy(outptr, ptr, w * sizeof(T));
ptr += w;
outptr += self.w;
}
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movq %rsi, %rbx
movslq 0x2c(%rdi), %r14
movl 0x30(%rdi), %eax
movslq 0x2c(%rsi), %rsi
movslq %edx, %rdx
imulq %rsi, %rdx
imulq 0x10(%rbx), %rdx
addq (%rbx), %rdx
movq (%rdi), %r15
movslq %ecx, %rcx
leaq (%rdx,%rcx,4), %r12
shlq $0x2, %r14
xorl %ebp, %ebp
testl %eax, %eax
cmovgl %eax, %ebp
subl $0x1, %ebp
jb 0x3ede5d
movq %r12, %rdi
movq %r15, %rsi
movq %r14, %rdx
callq 0x5f3c0
movslq 0x2c(%rbx), %rax
leaq (%r12,%rax,4), %r12
addq %r14, %r15
jmp 0x3ede3d
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
| /csukuangfj[P]ncnn/src/layer/copyto.cpp |
nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>::basic_json(std::initializer_list<nlohmann::json_abi_v3_11_3::detail::json_ref<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>>>, bool, nlohmann::json_abi_v3_11_3::detail::value_t) | basic_json(initializer_list_t init,
bool type_deduction = true,
value_t manual_type = value_t::array)
{
// check if each element is an array with two elements whose first
// element is a string
bool is_an_object = std::all_of(init.begin(), init.end(),
[](const detail::json_ref<basic_json>& element_ref)
{
// The cast is to ensure op[size_type] is called, bearing in mind size_type may not be int;
// (many string types can be constructed from 0 via its null-pointer guise, so we get a
// broken call to op[key_type], the wrong semantics and a 4804 warning on Windows)
return element_ref->is_array() && element_ref->size() == 2 && (*element_ref)[static_cast<size_type>(0)].is_string();
});
// adjust type if type deduction is not wanted
if (!type_deduction)
{
// if array is wanted, do not create an object though possible
if (manual_type == value_t::array)
{
is_an_object = false;
}
// if object is wanted but impossible, throw an exception
if (JSON_HEDLEY_UNLIKELY(manual_type == value_t::object && !is_an_object))
{
JSON_THROW(type_error::create(301, "cannot create object from initializer list", nullptr));
}
}
if (is_an_object)
{
// the initializer list is a list of pairs -> create object
m_data.m_type = value_t::object;
m_data.m_value = value_t::object;
for (auto& element_ref : init)
{
auto element = element_ref.moved_or_copied();
m_data.m_value.object->emplace(
std::move(*((*element.m_data.m_value.array)[0].m_data.m_value.string)),
std::move((*element.m_data.m_value.array)[1]));
}
}
else
{
// the initializer list describes an array -> create array
m_data.m_type = value_t::array;
m_data.m_value.array = create<array_t>(init.begin(), init.end());
}
set_parents();
assert_invariant();
} | pushq %rbp
movq %rsp, %rbp
subq $0x120, %rsp # imm = 0x120
movb %r8b, %al
movq %rsi, -0x10(%rbp)
movq %rdx, -0x8(%rbp)
movq %rdi, -0x18(%rbp)
andb $0x1, %cl
movb %cl, -0x19(%rbp)
movb %al, -0x1a(%rbp)
movq -0x18(%rbp), %rdi
movq %rdi, -0xd8(%rbp)
movq %rdi, %rax
movq %rax, -0xd0(%rbp)
xorps %xmm0, %xmm0
movups %xmm0, (%rdi)
callq 0x3f870
leaq -0x10(%rbp), %rdi
movq %rdi, -0xc8(%rbp)
callq 0x42ec0
movq -0xc8(%rbp), %rdi
movq %rax, -0xc0(%rbp)
callq 0x42ee0
movq -0xc0(%rbp), %rdi
movq %rax, %rsi
callq 0x42e80
movb %al, -0xb1(%rbp)
jmp 0x42b7d
movb -0xb1(%rbp), %al
andb $0x1, %al
movb %al, -0x1b(%rbp)
testb $0x1, -0x19(%rbp)
jne 0x42c9e
cmpb $0x2, -0x1a(%rbp)
jne 0x42baf
movb $0x0, -0x1b(%rbp)
jmp 0x42baf
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x28(%rbp)
movl %eax, -0x2c(%rbp)
jmp 0x42e5c
xorl %eax, %eax
cmpb $0x1, -0x1a(%rbp)
movb %al, -0xd9(%rbp)
jne 0x42bc8
movb -0x1b(%rbp), %al
xorb $-0x1, %al
movb %al, -0xd9(%rbp)
movb -0xd9(%rbp), %al
xorb $-0x1, %al
xorb $-0x1, %al
testb $0x1, %al
jne 0x42bdb
jmp 0x42c9c
movb $0x1, -0x52(%rbp)
movl $0x20, %edi
callq 0x30190
movq %rax, -0xf0(%rbp)
leaq -0x51(%rbp), %rdi
movq %rdi, -0xe8(%rbp)
callq 0x305f0
movq -0xe8(%rbp), %rdx
leaq 0x20795(%rip), %rsi # 0x633a3
leaq -0x50(%rbp), %rdi
callq 0x3cb40
jmp 0x42c19
movq -0xf0(%rbp), %rdi
xorl %eax, %eax
movl %eax, %ecx
movl $0x12d, %esi # imm = 0x12D
leaq -0x50(%rbp), %rdx
callq 0x42f20
jmp 0x42c34
movq -0xf0(%rbp), %rdi
movb $0x0, -0x52(%rbp)
leaq 0x39f32(%rip), %rsi # 0x7cb78
leaq -0xd5d(%rip), %rdx # 0x41ef0
callq 0x305c0
jmp 0x42e71
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x28(%rbp)
movl %eax, -0x2c(%rbp)
jmp 0x42c7a
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x28(%rbp)
movl %eax, -0x2c(%rbp)
leaq -0x50(%rbp), %rdi
callq 0x30210
leaq -0x51(%rbp), %rdi
callq 0x303d0
testb $0x1, -0x52(%rbp)
jne 0x42c8b
jmp 0x42c97
movq -0xf0(%rbp), %rdi
callq 0x30290
jmp 0x42e5c
jmp 0x42c9e
testb $0x1, -0x1b(%rbp)
je 0x42dd4
movq -0xd0(%rbp), %rax
movb $0x1, (%rax)
leaq -0x60(%rbp), %rdi
movl $0x1, %esi
callq 0x3c460
jmp 0x42cc2
movq -0xd0(%rbp), %rax
movq -0x60(%rbp), %rcx
movq %rcx, 0x8(%rax)
leaq -0x10(%rbp), %rax
movq %rax, -0x68(%rbp)
movq -0x68(%rbp), %rdi
callq 0x42ec0
movq %rax, -0x70(%rbp)
movq -0x68(%rbp), %rdi
callq 0x42ee0
movq %rax, -0x78(%rbp)
movq -0x70(%rbp), %rax
cmpq -0x78(%rbp), %rax
je 0x42dd2
movq -0x70(%rbp), %rax
movq %rax, -0x80(%rbp)
movq -0x80(%rbp), %rsi
leaq -0x90(%rbp), %rdi
callq 0x430d0
jmp 0x42d1b
movq -0xd0(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, -0x110(%rbp)
movq -0x88(%rbp), %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x42ae0
movq 0x8(%rax), %rax
movq %rax, -0x108(%rbp)
movq -0x88(%rbp), %rdi
movl $0x1, %esi
callq 0x42ae0
movq -0x110(%rbp), %rdi
movq -0x108(%rbp), %rsi
movq %rax, %rdx
callq 0x43130
movb %dl, -0xf9(%rbp)
movq %rax, -0xf8(%rbp)
jmp 0x42d7e
movb -0xf9(%rbp), %al
movq -0xf8(%rbp), %rcx
movq %rcx, -0xa0(%rbp)
movb %al, -0x98(%rbp)
leaq -0x90(%rbp), %rdi
callq 0x3b520
movq -0x70(%rbp), %rax
addq $0x18, %rax
movq %rax, -0x70(%rbp)
jmp 0x42cf3
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x28(%rbp)
movl %eax, -0x2c(%rbp)
leaq -0x90(%rbp), %rdi
callq 0x3b520
jmp 0x42e5c
jmp 0x42e36
movq -0xd0(%rbp), %rax
movb $0x2, (%rax)
leaq -0x10(%rbp), %rdi
movq %rdi, -0x120(%rbp)
callq 0x42ec0
movq -0x120(%rbp), %rdi
movq %rax, -0xa8(%rbp)
callq 0x42ee0
movq %rax, -0xb0(%rbp)
leaq -0xa8(%rbp), %rdi
leaq -0xb0(%rbp), %rsi
callq 0x43180
movq %rax, -0x118(%rbp)
jmp 0x42e24
movq -0xd0(%rbp), %rax
movq -0x118(%rbp), %rcx
movq %rcx, 0x8(%rax)
movq -0xd0(%rbp), %rdi
callq 0x3f8d0
movq -0xd0(%rbp), %rdi
movl $0x1, %esi
callq 0x3c2f0
addq $0x120, %rsp # imm = 0x120
popq %rbp
retq
movq -0xd8(%rbp), %rdi
callq 0x42980
movq -0x28(%rbp), %rdi
callq 0x305e0
nopw %cs:(%rax,%rax)
nopl (%rax,%rax)
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
nlohmann::json_abi_v3_11_3::detail::type_error nlohmann::json_abi_v3_11_3::detail::type_error::create<std::nullptr_t, 0>(int, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&, std::nullptr_t) | static type_error create(int id_, const std::string& what_arg, BasicJsonContext context)
{
const std::string w = concat(exception::name("type_error", id_), exception::diagnostics(context), what_arg);
return {id_, w.c_str()};
} | pushq %rbp
movq %rsp, %rbp
subq $0xe0, %rsp
movq %rdi, -0xd0(%rbp)
movq %rdi, %rax
movq %rax, -0xc8(%rbp)
movq %rdi, -0x8(%rbp)
movl %esi, -0xc(%rbp)
movq %rdx, -0x18(%rbp)
movq %rcx, -0x20(%rbp)
leaq -0x81(%rbp), %rdi
movq %rdi, -0xc0(%rbp)
callq 0x305f0
movq -0xc0(%rbp), %rdx
leaq 0x203f3(%rip), %rsi # 0x6335f
leaq -0x80(%rbp), %rdi
callq 0x3cb40
jmp 0x42f77
movl -0xc(%rbp), %edx
leaq -0x60(%rbp), %rdi
leaq -0x80(%rbp), %rsi
callq 0x3eb80
jmp 0x42f89
xorl %eax, %eax
movl %eax, %esi
leaq -0xb8(%rbp), %rdi
callq 0x3ec10
jmp 0x42f9b
movq -0x18(%rbp), %rcx
leaq -0x40(%rbp), %rdi
leaq -0x60(%rbp), %rsi
leaq -0xb8(%rbp), %rdx
callq 0x3ead0
jmp 0x42fb5
leaq -0xb8(%rbp), %rdi
callq 0x30210
leaq -0x60(%rbp), %rdi
callq 0x30210
leaq -0x80(%rbp), %rdi
callq 0x30210
leaq -0x81(%rbp), %rdi
callq 0x303d0
movl -0xc(%rbp), %eax
movl %eax, -0xd4(%rbp)
leaq -0x40(%rbp), %rdi
callq 0x30110
movq -0xd0(%rbp), %rdi
movl -0xd4(%rbp), %esi
movq %rax, %rdx
callq 0x427f0
jmp 0x43008
leaq -0x40(%rbp), %rdi
callq 0x30210
movq -0xc8(%rbp), %rax
addq $0xe0, %rsp
popq %rbp
retq
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x90(%rbp)
movl %eax, -0x94(%rbp)
jmp 0x4308d
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x90(%rbp)
movl %eax, -0x94(%rbp)
jmp 0x43084
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x90(%rbp)
movl %eax, -0x94(%rbp)
jmp 0x4307b
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x90(%rbp)
movl %eax, -0x94(%rbp)
leaq -0xb8(%rbp), %rdi
callq 0x30210
leaq -0x60(%rbp), %rdi
callq 0x30210
leaq -0x80(%rbp), %rdi
callq 0x30210
leaq -0x81(%rbp), %rdi
callq 0x303d0
jmp 0x430b6
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x90(%rbp)
movl %eax, -0x94(%rbp)
leaq -0x40(%rbp), %rdi
callq 0x30210
movq -0x90(%rbp), %rdi
callq 0x305e0
nopw %cs:(%rax,%rax)
nopl (%rax)
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>::basic_json(nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void> const&) | basic_json(const basic_json& other)
: json_base_class_t(other)
{
m_data.m_type = other.m_data.m_type;
// check of passed value is valid
other.assert_invariant();
switch (m_data.m_type)
{
case value_t::object:
{
m_data.m_value = *other.m_data.m_value.object;
break;
}
case value_t::array:
{
m_data.m_value = *other.m_data.m_value.array;
break;
}
case value_t::string:
{
m_data.m_value = *other.m_data.m_value.string;
break;
}
case value_t::boolean:
{
m_data.m_value = other.m_data.m_value.boolean;
break;
}
case value_t::number_integer:
{
m_data.m_value = other.m_data.m_value.number_integer;
break;
}
case value_t::number_unsigned:
{
m_data.m_value = other.m_data.m_value.number_unsigned;
break;
}
case value_t::number_float:
{
m_data.m_value = other.m_data.m_value.number_float;
break;
}
case value_t::binary:
{
m_data.m_value = *other.m_data.m_value.binary;
break;
}
case value_t::null:
case value_t::discarded:
default:
break;
}
set_parents();
assert_invariant();
} | pushq %rbp
movq %rsp, %rbp
subq $0x80, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq -0x8(%rbp), %rdi
movq %rdi, -0x70(%rbp)
movq %rdi, %rax
movq %rax, -0x78(%rbp)
xorps %xmm0, %xmm0
movups %xmm0, (%rdi)
callq 0x3f870
movq -0x70(%rbp), %rax
movq -0x10(%rbp), %rcx
movb (%rcx), %cl
movb %cl, (%rax)
movq -0x10(%rbp), %rdi
movl $0x1, %esi
callq 0x3c2f0
movq -0x70(%rbp), %rax
movzbl (%rax), %eax
movq %rax, -0x68(%rbp)
subq $0x9, %rax
ja 0x4383d
movq -0x68(%rbp), %rax
leaq 0x1ea39(%rip), %rcx # 0x62140
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rsi
leaq -0x18(%rbp), %rdi
callq 0x43870
jmp 0x43723
movq -0x78(%rbp), %rax
movq -0x18(%rbp), %rcx
movq %rcx, 0x8(%rax)
jmp 0x4383f
movq -0x70(%rbp), %rdi
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x20(%rbp)
movl %eax, -0x24(%rbp)
callq 0x42980
jmp 0x4385f
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rsi
leaq -0x30(%rbp), %rdi
callq 0x438b0
jmp 0x43761
movq -0x78(%rbp), %rax
movq -0x30(%rbp), %rcx
movq %rcx, 0x8(%rax)
jmp 0x4383f
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rsi
leaq -0x38(%rbp), %rdi
callq 0x438f0
jmp 0x43785
movq -0x78(%rbp), %rax
movq -0x38(%rbp), %rcx
movq %rcx, 0x8(%rax)
jmp 0x4383f
movq -0x10(%rbp), %rax
movb 0x8(%rax), %al
leaq -0x40(%rbp), %rdi
andb $0x1, %al
movzbl %al, %esi
callq 0x43930
movq -0x78(%rbp), %rax
movq -0x40(%rbp), %rcx
movq %rcx, 0x8(%rax)
jmp 0x4383f
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rsi
leaq -0x48(%rbp), %rdi
callq 0x43950
movq -0x78(%rbp), %rax
movq -0x48(%rbp), %rcx
movq %rcx, 0x8(%rax)
jmp 0x4383f
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rsi
leaq -0x50(%rbp), %rdi
callq 0x3ff30
movq -0x78(%rbp), %rax
movq -0x50(%rbp), %rcx
movq %rcx, 0x8(%rax)
jmp 0x4383f
movq -0x10(%rbp), %rax
movsd 0x8(%rax), %xmm0
leaq -0x58(%rbp), %rdi
callq 0x43970
movq -0x78(%rbp), %rax
movq -0x58(%rbp), %rcx
movq %rcx, 0x8(%rax)
jmp 0x4383f
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rsi
leaq -0x60(%rbp), %rdi
callq 0x43990
jmp 0x4382d
movq -0x78(%rbp), %rax
movq -0x60(%rbp), %rcx
movq %rcx, 0x8(%rax)
jmp 0x4383f
jmp 0x4383d
jmp 0x4383f
movq -0x78(%rbp), %rdi
callq 0x3f8d0
movq -0x78(%rbp), %rdi
movl $0x1, %esi
callq 0x3c2f0
addq $0x80, %rsp
popq %rbp
retq
movq -0x20(%rbp), %rdi
callq 0x305e0
nopl (%rax,%rax)
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
decltype(from_json_array_impl(fp, fp0, nlohmann::json_abi_v3_11_3::detail::priority_tag<3u>{}), fp.get<std::vector<double, std::allocator<double>>::value_type>(), (void)()) nlohmann::json_abi_v3_11_3::detail::from_json<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>, std::vector<double, std::allocator<double>>, 0>(nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void> const&, std::vector<double, std::allocator<double>>&) | auto from_json(const BasicJsonType& j, ConstructibleArrayType& arr)
-> decltype(from_json_array_impl(j, arr, priority_tag<3> {}),
j.template get<typename ConstructibleArrayType::value_type>(),
void())
{
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
{
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
}
from_json_array_impl(j, arr, priority_tag<3> {});
} | pushq %rbp
movq %rsp, %rbp
subq $0x50, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq -0x8(%rbp), %rdi
callq 0x403d0
xorb $-0x1, %al
xorb $-0x1, %al
xorb $-0x1, %al
testb $0x1, %al
jne 0x48828
jmp 0x488c8
movb $0x1, -0x45(%rbp)
movl $0x20, %edi
callq 0x30190
movq %rax, -0x50(%rbp)
movq -0x8(%rbp), %rdi
callq 0x41e50
movq %rax, -0x38(%rbp)
leaq 0x1b7d1(%rip), %rsi # 0x6401f
leaq -0x30(%rbp), %rdi
leaq -0x38(%rbp), %rdx
callq 0x488f0
jmp 0x4885d
movq -0x50(%rbp), %rdi
movq -0x8(%rbp), %rcx
movl $0x12e, %esi # imm = 0x12E
leaq -0x30(%rbp), %rdx
callq 0x47f50
jmp 0x48875
movq -0x50(%rbp), %rdi
movb $0x0, -0x45(%rbp)
leaq 0x342f4(%rip), %rsi # 0x7cb78
leaq -0x699b(%rip), %rdx # 0x41ef0
callq 0x305c0
jmp 0x488e4
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x40(%rbp)
movl %eax, -0x44(%rbp)
jmp 0x488b5
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x40(%rbp)
movl %eax, -0x44(%rbp)
leaq -0x30(%rbp), %rdi
callq 0x30210
testb $0x1, -0x45(%rbp)
jne 0x488bd
jmp 0x488c6
movq -0x50(%rbp), %rdi
callq 0x30290
jmp 0x488db
movq -0x8(%rbp), %rdi
movq -0x10(%rbp), %rsi
callq 0x48990
addq $0x50, %rsp
popq %rbp
retq
movq -0x40(%rbp), %rdi
callq 0x305e0
nopw %cs:(%rax,%rax)
nop
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
decltype(fp0.reserve(std::declval<std::vector<double, std::allocator<double>>::size_type>()), fp.get<std::vector<double, std::allocator<double>>::value_type>(), (void)()) nlohmann::json_abi_v3_11_3::detail::from_json_array_impl<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>, std::vector<double, std::allocator<double>>, 0>(nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void> const&, std::vector<double, std::allocator<double>>&, nlohmann::json_abi_v3_11_3::detail::priority_tag<1u>) | auto from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr, priority_tag<1> /*unused*/)
-> decltype(
arr.reserve(std::declval<typename ConstructibleArrayType::size_type>()),
j.template get<typename ConstructibleArrayType::value_type>(),
void())
{
using std::end;
ConstructibleArrayType ret;
ret.reserve(j.size());
std::transform(j.begin(), j.end(),
std::inserter(ret, end(ret)), [](const BasicJsonType & i)
{
// get<BasicJsonType>() returns *this, this won't call a from_json
// method when value_type is BasicJsonType
return i.template get<typename ConstructibleArrayType::value_type>();
});
arr = std::move(ret);
} | pushq %rbp
movq %rsp, %rbp
subq $0xe0, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
leaq -0x30(%rbp), %rdi
movq %rdi, -0xb8(%rbp)
callq 0x33120
movq -0x10(%rbp), %rdi
callq 0x3bdb0
movq -0xb8(%rbp), %rdi
movq %rax, %rsi
callq 0x48b10
jmp 0x489cd
movq -0x10(%rbp), %rsi
leaq -0x60(%rbp), %rdi
callq 0x48ca0
movq -0x10(%rbp), %rsi
leaq -0x80(%rbp), %rdi
callq 0x48cd0
leaq -0x30(%rbp), %rdi
callq 0x48d40
movq %rax, -0xc0(%rbp)
jmp 0x489f9
movq -0xc0(%rbp), %rax
movq %rax, -0x98(%rbp)
movq -0x98(%rbp), %rsi
leaq -0x30(%rbp), %rdi
callq 0x48d00
movq %rdx, -0xd0(%rbp)
movq %rax, -0xc8(%rbp)
jmp 0x48a27
movq -0xd0(%rbp), %rax
movq -0xc8(%rbp), %rcx
movq %rcx, -0x90(%rbp)
movq %rax, -0x88(%rbp)
movq -0x90(%rbp), %rdx
movq -0x88(%rbp), %rcx
leaq -0x60(%rbp), %rdi
leaq -0x80(%rbp), %rsi
callq 0x48c10
movq %rdx, -0xe0(%rbp)
movq %rax, -0xd8(%rbp)
jmp 0x48a6e
movq -0xe0(%rbp), %rax
movq -0xd8(%rbp), %rcx
movq %rcx, -0xb0(%rbp)
movq %rax, -0xa8(%rbp)
movq -0x18(%rbp), %rdi
leaq -0x30(%rbp), %rsi
callq 0x3be80
leaq -0x30(%rbp), %rdi
callq 0x31650
addq $0xe0, %rsp
popq %rbp
retq
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x38(%rbp)
movl %eax, -0x3c(%rbp)
leaq -0x30(%rbp), %rdi
callq 0x31650
movq -0x38(%rbp), %rdi
callq 0x305e0
nopw (%rax,%rax)
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
nlohmann::json_abi_v3_11_3::detail::iter_impl<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void> const>::operator++() | iter_impl& operator++()
{
JSON_ASSERT(m_object != nullptr);
switch (m_object->m_data.m_type)
{
case value_t::object:
{
std::advance(m_it.object_iterator, 1);
break;
}
case value_t::array:
{
std::advance(m_it.array_iterator, 1);
break;
}
case value_t::null:
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
{
++m_it.primitive_iterator;
break;
}
}
return *this;
} | pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movq %rdi, -0x8(%rbp)
movq -0x8(%rbp), %rax
movq %rax, -0x10(%rbp)
cmpq $0x0, (%rax)
je 0x491ac
jmp 0x491cb
leaq 0x1aeb9(%rip), %rdi # 0x6406c
leaq 0x196ec(%rip), %rsi # 0x628a6
movl $0x33bf, %edx # imm = 0x33BF
leaq 0x1b0b6(%rip), %rcx # 0x6427c
callq 0x30240
movq -0x10(%rbp), %rax
movq (%rax), %rax
movzbl (%rax), %eax
movq %rax, -0x18(%rbp)
subq $0x9, %rax
ja 0x49221
movq -0x18(%rbp), %rax
leaq 0x19006(%rip), %rcx # 0x621f0
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movq -0x10(%rbp), %rdi
addq $0x8, %rdi
movl $0x1, %esi
callq 0x49ec0
jmp 0x49232
movq -0x10(%rbp), %rdi
addq $0x8, %rdi
addq $0x8, %rdi
movl $0x1, %esi
callq 0x49f10
jmp 0x49232
jmp 0x49221
movq -0x10(%rbp), %rdi
addq $0x8, %rdi
addq $0x10, %rdi
callq 0x49f60
movq -0x10(%rbp), %rax
addq $0x20, %rsp
popq %rbp
retq
nopl (%rax)
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
bool nlohmann::json_abi_v3_11_3::detail::iter_impl<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void> const>::operator==<nlohmann::json_abi_v3_11_3::detail::iter_impl<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void> const>, nullptr>(nlohmann::json_abi_v3_11_3::detail::iter_impl<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void> const> const&) const | bool operator==(const IterImpl& other) const
{
// if objects are not the same, the comparison is undefined
if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object))
{
JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", m_object));
}
JSON_ASSERT(m_object != nullptr);
switch (m_object->m_data.m_type)
{
case value_t::object:
return (m_it.object_iterator == other.m_it.object_iterator);
case value_t::array:
return (m_it.array_iterator == other.m_it.array_iterator);
case value_t::null:
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
return (m_it.primitive_iterator == other.m_it.primitive_iterator);
}
} | pushq %rbp
movq %rsp, %rbp
subq $0x80, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq -0x10(%rbp), %rax
movq %rax, -0x68(%rbp)
movq (%rax), %rax
movq -0x18(%rbp), %rcx
cmpq (%rcx), %rax
setne %al
xorb $-0x1, %al
xorb $-0x1, %al
testb $0x1, %al
jne 0x49285
jmp 0x49337
movb $0x1, -0x4d(%rbp)
movl $0x20, %edi
callq 0x30190
movq %rax, -0x78(%rbp)
leaq -0x39(%rbp), %rdi
movq %rdi, -0x70(%rbp)
callq 0x305f0
movq -0x70(%rbp), %rdx
leaq 0x1ad8c(%rip), %rsi # 0x6403b
leaq -0x38(%rbp), %rdi
callq 0x3cb40
jmp 0x492ba
movq -0x78(%rbp), %rdi
movq -0x68(%rbp), %rax
movq (%rax), %rcx
movl $0xd4, %esi
leaq -0x38(%rbp), %rdx
callq 0x49410
jmp 0x492d5
movq -0x78(%rbp), %rdi
movb $0x0, -0x4d(%rbp)
leaq 0x338d4(%rip), %rsi # 0x7cbb8
leaq 0x2d5(%rip), %rdx # 0x495c0
callq 0x305c0
jmp 0x4940d
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x48(%rbp)
movl %eax, -0x4c(%rbp)
jmp 0x49318
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x48(%rbp)
movl %eax, -0x4c(%rbp)
leaq -0x38(%rbp), %rdi
callq 0x30210
leaq -0x39(%rbp), %rdi
callq 0x303d0
testb $0x1, -0x4d(%rbp)
jne 0x49329
jmp 0x49332
movq -0x78(%rbp), %rdi
callq 0x30290
jmp 0x49404
movq -0x68(%rbp), %rax
cmpq $0x0, (%rax)
je 0x49343
jmp 0x49362
leaq 0x1ad22(%rip), %rdi # 0x6406c
leaq 0x19555(%rip), %rsi # 0x628a6
movl $0x3421, %edx # imm = 0x3421
leaq 0x1ad23(%rip), %rcx # 0x64080
callq 0x30240
movq -0x68(%rbp), %rax
movq (%rax), %rax
movzbl (%rax), %eax
movq %rax, -0x80(%rbp)
subq $0x9, %rax
ja 0x493cc
movq -0x80(%rbp), %rax
leaq 0x18e97(%rip), %rcx # 0x62218
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movq -0x68(%rbp), %rdi
addq $0x8, %rdi
movq -0x18(%rbp), %rsi
addq $0x8, %rsi
callq 0x425c0
andb $0x1, %al
movb %al, -0x1(%rbp)
jmp 0x493f6
movq -0x68(%rbp), %rdi
addq $0x8, %rdi
addq $0x8, %rdi
movq -0x18(%rbp), %rsi
addq $0x8, %rsi
addq $0x8, %rsi
callq 0x495e0
andb $0x1, %al
movb %al, -0x1(%rbp)
jmp 0x493f6
jmp 0x493cc
movq -0x68(%rbp), %rax
movq 0x18(%rax), %rax
movq %rax, -0x58(%rbp)
movq -0x18(%rbp), %rax
movq 0x18(%rax), %rax
movq %rax, -0x60(%rbp)
movq -0x58(%rbp), %rdi
movq -0x60(%rbp), %rsi
callq 0x49620
andb $0x1, %al
movb %al, -0x1(%rbp)
movb -0x1(%rbp), %al
andb $0x1, %al
addq $0x80, %rsp
popq %rbp
retq
movq -0x48(%rbp), %rdi
callq 0x305e0
nopl (%rax)
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
nlohmann::json_abi_v3_11_3::detail::iter_impl<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void> const>::iter_impl(nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void> const*) | explicit iter_impl(pointer object) noexcept : m_object(object)
{
JSON_ASSERT(m_object != nullptr);
switch (m_object->m_data.m_type)
{
case value_t::object:
{
m_it.object_iterator = typename object_t::iterator();
break;
}
case value_t::array:
{
m_it.array_iterator = typename array_t::iterator();
break;
}
case value_t::null:
case value_t::string:
case value_t::boolean:
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::number_float:
case value_t::binary:
case value_t::discarded:
default:
{
m_it.primitive_iterator = primitive_iterator_t();
break;
}
}
} | pushq %rbp
movq %rsp, %rbp
subq $0x50, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq -0x8(%rbp), %rdi
movq %rdi, -0x30(%rbp)
movq -0x10(%rbp), %rax
movq %rax, (%rdi)
addq $0x8, %rdi
movq %rdi, -0x40(%rbp)
callq 0x4a2d0
movq -0x40(%rbp), %rdi
addq $0x8, %rdi
callq 0x4a2f0
movq -0x40(%rbp), %rdi
addq $0x10, %rdi
movq %rdi, -0x38(%rbp)
xorl %esi, %esi
movl $0x8, %edx
callq 0x301d0
movq -0x38(%rbp), %rdi
callq 0x4a310
movq -0x30(%rbp), %rax
cmpq $0x0, (%rax)
je 0x4a146
jmp 0x4a165
leaq 0x19f1f(%rip), %rdi # 0x6406c
leaq 0x18752(%rip), %rsi # 0x628a6
movl $0x32aa, %edx # imm = 0x32AA
leaq 0x1a1ad(%rip), %rcx # 0x6430d
callq 0x30240
movq -0x30(%rbp), %rax
movq (%rax), %rax
movzbl (%rax), %eax
movq %rax, -0x48(%rbp)
subq $0x9, %rax
ja 0x4a1bd
movq -0x48(%rbp), %rax
leaq 0x180bc(%rip), %rcx # 0x62240
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
leaq -0x18(%rbp), %rdi
callq 0x4a2d0
movq -0x30(%rbp), %rax
movq -0x18(%rbp), %rcx
movq %rcx, 0x8(%rax)
jmp 0x4a1e2
leaq -0x20(%rbp), %rdi
callq 0x4a2f0
movq -0x30(%rbp), %rax
movq -0x20(%rbp), %rcx
movq %rcx, 0x10(%rax)
jmp 0x4a1e2
jmp 0x4a1bd
leaq -0x28(%rbp), %rdi
xorl %esi, %esi
movl $0x8, %edx
callq 0x301d0
leaq -0x28(%rbp), %rdi
callq 0x4a310
movq -0x30(%rbp), %rax
movq -0x28(%rbp), %rcx
movq %rcx, 0x18(%rax)
addq $0x50, %rsp
popq %rbp
retq
nopl (%rax,%rax)
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
nlohmann::json_abi_v3_11_3::detail::serializer<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>>::dump(nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void> const&, bool, bool, unsigned int, unsigned int) | void dump(const BasicJsonType& val,
const bool pretty_print,
const bool ensure_ascii,
const unsigned int indent_step,
const unsigned int current_indent = 0)
{
switch (val.m_data.m_type)
{
case value_t::object:
{
if (val.m_data.m_value.object->empty())
{
o->write_characters("{}", 2);
return;
}
if (pretty_print)
{
o->write_characters("{\n", 2);
// variable to hold indentation for recursive calls
const auto new_indent = current_indent + indent_step;
if (JSON_HEDLEY_UNLIKELY(indent_string.size() < new_indent))
{
indent_string.resize(indent_string.size() * 2, ' ');
}
// first n-1 elements
auto i = val.m_data.m_value.object->cbegin();
for (std::size_t cnt = 0; cnt < val.m_data.m_value.object->size() - 1; ++cnt, ++i)
{
o->write_characters(indent_string.c_str(), new_indent);
o->write_character('\"');
dump_escaped(i->first, ensure_ascii);
o->write_characters("\": ", 3);
dump(i->second, true, ensure_ascii, indent_step, new_indent);
o->write_characters(",\n", 2);
}
// last element
JSON_ASSERT(i != val.m_data.m_value.object->cend());
JSON_ASSERT(std::next(i) == val.m_data.m_value.object->cend());
o->write_characters(indent_string.c_str(), new_indent);
o->write_character('\"');
dump_escaped(i->first, ensure_ascii);
o->write_characters("\": ", 3);
dump(i->second, true, ensure_ascii, indent_step, new_indent);
o->write_character('\n');
o->write_characters(indent_string.c_str(), current_indent);
o->write_character('}');
}
else
{
o->write_character('{');
// first n-1 elements
auto i = val.m_data.m_value.object->cbegin();
for (std::size_t cnt = 0; cnt < val.m_data.m_value.object->size() - 1; ++cnt, ++i)
{
o->write_character('\"');
dump_escaped(i->first, ensure_ascii);
o->write_characters("\":", 2);
dump(i->second, false, ensure_ascii, indent_step, current_indent);
o->write_character(',');
}
// last element
JSON_ASSERT(i != val.m_data.m_value.object->cend());
JSON_ASSERT(std::next(i) == val.m_data.m_value.object->cend());
o->write_character('\"');
dump_escaped(i->first, ensure_ascii);
o->write_characters("\":", 2);
dump(i->second, false, ensure_ascii, indent_step, current_indent);
o->write_character('}');
}
return;
}
case value_t::array:
{
if (val.m_data.m_value.array->empty())
{
o->write_characters("[]", 2);
return;
}
if (pretty_print)
{
o->write_characters("[\n", 2);
// variable to hold indentation for recursive calls
const auto new_indent = current_indent + indent_step;
if (JSON_HEDLEY_UNLIKELY(indent_string.size() < new_indent))
{
indent_string.resize(indent_string.size() * 2, ' ');
}
// first n-1 elements
for (auto i = val.m_data.m_value.array->cbegin();
i != val.m_data.m_value.array->cend() - 1; ++i)
{
o->write_characters(indent_string.c_str(), new_indent);
dump(*i, true, ensure_ascii, indent_step, new_indent);
o->write_characters(",\n", 2);
}
// last element
JSON_ASSERT(!val.m_data.m_value.array->empty());
o->write_characters(indent_string.c_str(), new_indent);
dump(val.m_data.m_value.array->back(), true, ensure_ascii, indent_step, new_indent);
o->write_character('\n');
o->write_characters(indent_string.c_str(), current_indent);
o->write_character(']');
}
else
{
o->write_character('[');
// first n-1 elements
for (auto i = val.m_data.m_value.array->cbegin();
i != val.m_data.m_value.array->cend() - 1; ++i)
{
dump(*i, false, ensure_ascii, indent_step, current_indent);
o->write_character(',');
}
// last element
JSON_ASSERT(!val.m_data.m_value.array->empty());
dump(val.m_data.m_value.array->back(), false, ensure_ascii, indent_step, current_indent);
o->write_character(']');
}
return;
}
case value_t::string:
{
o->write_character('\"');
dump_escaped(*val.m_data.m_value.string, ensure_ascii);
o->write_character('\"');
return;
}
case value_t::binary:
{
if (pretty_print)
{
o->write_characters("{\n", 2);
// variable to hold indentation for recursive calls
const auto new_indent = current_indent + indent_step;
if (JSON_HEDLEY_UNLIKELY(indent_string.size() < new_indent))
{
indent_string.resize(indent_string.size() * 2, ' ');
}
o->write_characters(indent_string.c_str(), new_indent);
o->write_characters("\"bytes\": [", 10);
if (!val.m_data.m_value.binary->empty())
{
for (auto i = val.m_data.m_value.binary->cbegin();
i != val.m_data.m_value.binary->cend() - 1; ++i)
{
dump_integer(*i);
o->write_characters(", ", 2);
}
dump_integer(val.m_data.m_value.binary->back());
}
o->write_characters("],\n", 3);
o->write_characters(indent_string.c_str(), new_indent);
o->write_characters("\"subtype\": ", 11);
if (val.m_data.m_value.binary->has_subtype())
{
dump_integer(val.m_data.m_value.binary->subtype());
}
else
{
o->write_characters("null", 4);
}
o->write_character('\n');
o->write_characters(indent_string.c_str(), current_indent);
o->write_character('}');
}
else
{
o->write_characters("{\"bytes\":[", 10);
if (!val.m_data.m_value.binary->empty())
{
for (auto i = val.m_data.m_value.binary->cbegin();
i != val.m_data.m_value.binary->cend() - 1; ++i)
{
dump_integer(*i);
o->write_character(',');
}
dump_integer(val.m_data.m_value.binary->back());
}
o->write_characters("],\"subtype\":", 12);
if (val.m_data.m_value.binary->has_subtype())
{
dump_integer(val.m_data.m_value.binary->subtype());
o->write_character('}');
}
else
{
o->write_characters("null}", 5);
}
}
return;
}
case value_t::boolean:
{
if (val.m_data.m_value.boolean)
{
o->write_characters("true", 4);
}
else
{
o->write_characters("false", 5);
}
return;
}
case value_t::number_integer:
{
dump_integer(val.m_data.m_value.number_integer);
return;
}
case value_t::number_unsigned:
{
dump_integer(val.m_data.m_value.number_unsigned);
return;
}
case value_t::number_float:
{
dump_float(val.m_data.m_value.number_float);
return;
}
case value_t::discarded:
{
o->write_characters("<discarded>", 11);
return;
}
case value_t::null:
{
o->write_characters("null", 4);
return;
}
default: // LCOV_EXCL_LINE
JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE
}
} | pushq %rbp
movq %rsp, %rbp
subq $0x170, %rsp # imm = 0x170
movb %cl, %al
movb %dl, %cl
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
andb $0x1, %cl
movb %cl, -0x11(%rbp)
andb $0x1, %al
movb %al, -0x12(%rbp)
movl %r8d, -0x18(%rbp)
movl %r9d, -0x1c(%rbp)
movq -0x8(%rbp), %rax
movq %rax, -0x100(%rbp)
movq -0x10(%rbp), %rax
movzbl (%rax), %eax
movq %rax, -0xf8(%rbp)
subq $0x9, %rax
ja 0x4bb2c
movq -0xf8(%rbp), %rax
leaq 0x177ed(%rip), %rcx # 0x622b8
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4a7a0
testb $0x1, %al
jne 0x4aae7
jmp 0x4ab0d
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x1997e(%rip), %rsi # 0x6447e
movl $0x2, %edx
callq *0x8(%rax)
jmp 0x4bb4b
testb $0x1, -0x11(%rbp)
je 0x4aec6
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x19951(%rip), %rsi # 0x64481
movl $0x2, %edx
callq *0x8(%rax)
movq -0x100(%rbp), %rdi
movl -0x1c(%rbp), %eax
addl -0x18(%rbp), %eax
movl %eax, -0x20(%rbp)
addq $0x260, %rdi # imm = 0x260
callq 0x30270
movl -0x20(%rbp), %ecx
cmpq %rcx, %rax
setb %al
xorb $-0x1, %al
xorb $-0x1, %al
testb $0x1, %al
jne 0x4ab67
jmp 0x4aba1
movq -0x100(%rbp), %rdi
movq %rdi, %rax
addq $0x260, %rax # imm = 0x260
movq %rax, -0x108(%rbp)
addq $0x260, %rdi # imm = 0x260
callq 0x30270
movq -0x108(%rbp), %rdi
movq %rax, %rsi
shlq %rsi
movl $0x20, %edx
callq 0x300b0
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4caa0
movq %rax, -0x28(%rbp)
movq $0x0, -0x30(%rbp)
movq -0x30(%rbp), %rax
movq %rax, -0x110(%rbp)
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x40170
movq %rax, %rcx
movq -0x110(%rbp), %rax
subq $0x1, %rcx
cmpq %rcx, %rax
jae 0x4acea
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq -0x100(%rbp), %rdi
movq %rax, -0x118(%rbp)
addq $0x260, %rdi # imm = 0x260
callq 0x30110
movq -0x118(%rbp), %rdi
movq %rax, %rsi
movl -0x20(%rbp), %eax
movl %eax, %edx
movq (%rdi), %rax
callq *0x8(%rax)
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x22, %esi
callq *(%rax)
leaq -0x28(%rbp), %rdi
callq 0x4d600
movq -0x100(%rbp), %rdi
movq %rax, %rsi
movb -0x12(%rbp), %al
andb $0x1, %al
movzbl %al, %edx
callq 0x4cad0
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x1996a(%rip), %rsi # 0x645e0
movl $0x3, %edx
callq *0x8(%rax)
leaq -0x28(%rbp), %rdi
callq 0x4d600
movq -0x100(%rbp), %rdi
movq %rax, %rsi
addq $0x20, %rsi
movb -0x12(%rbp), %al
movl -0x18(%rbp), %r8d
movl -0x20(%rbp), %r9d
movl $0x1, %edx
andb $0x1, %al
movzbl %al, %ecx
callq 0x4aa70
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x1990d(%rip), %rsi # 0x645d5
movl $0x2, %edx
callq *0x8(%rax)
movq -0x30(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x30(%rbp)
leaq -0x28(%rbp), %rdi
callq 0x4d620
jmp 0x4abba
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4d680
movq %rax, -0x38(%rbp)
leaq -0x28(%rbp), %rdi
leaq -0x38(%rbp), %rsi
callq 0x4d650
testb $0x1, %al
jne 0x4ad0e
jmp 0x4ad10
jmp 0x4ad2f
leaq 0x1976d(%rip), %rdi # 0x64484
leaq 0x17b88(%rip), %rsi # 0x628a6
movl $0x46eb, %edx # imm = 0x46EB
leaq 0x19781(%rip), %rcx # 0x644ab
callq 0x30240
movq -0x28(%rbp), %rax
movq %rax, -0x48(%rbp)
movq -0x48(%rbp), %rdi
movl $0x1, %esi
callq 0x4d6e0
movq %rax, -0x40(%rbp)
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4d680
movq %rax, -0x50(%rbp)
leaq -0x40(%rbp), %rdi
leaq -0x50(%rbp), %rsi
callq 0x4d6b0
testb $0x1, %al
jne 0x4ad6d
jmp 0x4ad6f
jmp 0x4ad8e
leaq 0x197f5(%rip), %rdi # 0x6456b
leaq 0x17b29(%rip), %rsi # 0x628a6
movl $0x46ec, %edx # imm = 0x46EC
leaq 0x19722(%rip), %rcx # 0x644ab
callq 0x30240
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq -0x100(%rbp), %rdi
movq %rax, -0x128(%rbp)
addq $0x260, %rdi # imm = 0x260
callq 0x30110
movq -0x128(%rbp), %rdi
movq %rax, %rsi
movl -0x20(%rbp), %eax
movl %eax, %edx
movq (%rdi), %rax
callq *0x8(%rax)
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x22, %esi
callq *(%rax)
leaq -0x28(%rbp), %rdi
callq 0x4d600
movq -0x100(%rbp), %rdi
movq %rax, %rsi
movb -0x12(%rbp), %al
andb $0x1, %al
movzbl %al, %edx
callq 0x4cad0
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x197c5(%rip), %rsi # 0x645e0
movl $0x3, %edx
callq *0x8(%rax)
leaq -0x28(%rbp), %rdi
callq 0x4d600
movq -0x100(%rbp), %rdi
movq %rax, %rsi
addq $0x20, %rsi
movb -0x12(%rbp), %al
movl -0x18(%rbp), %r8d
movl -0x20(%rbp), %r9d
movl $0x1, %edx
andb $0x1, %al
movzbl %al, %ecx
callq 0x4aa70
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0xa, %esi
callq *(%rax)
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq -0x100(%rbp), %rdi
movq %rax, -0x120(%rbp)
addq $0x260, %rdi # imm = 0x260
callq 0x30110
movq -0x120(%rbp), %rdi
movq %rax, %rsi
movl -0x1c(%rbp), %eax
movl %eax, %edx
movq (%rdi), %rax
callq *0x8(%rax)
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x7d, %esi
callq *(%rax)
jmp 0x4b127
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x7b, %esi
callq *(%rax)
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4caa0
movq %rax, -0x58(%rbp)
movq $0x0, -0x60(%rbp)
movq -0x60(%rbp), %rax
movq %rax, -0x130(%rbp)
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x40170
movq %rax, %rcx
movq -0x130(%rbp), %rax
subq $0x1, %rcx
cmpq %rcx, %rax
jae 0x4afe2
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x22, %esi
callq *(%rax)
leaq -0x58(%rbp), %rdi
callq 0x4d600
movq -0x100(%rbp), %rdi
movq %rax, %rsi
movb -0x12(%rbp), %al
andb $0x1, %al
movzbl %al, %edx
callq 0x4cad0
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x19680(%rip), %rsi # 0x645f9
movl $0x2, %edx
callq *0x8(%rax)
leaq -0x58(%rbp), %rdi
callq 0x4d600
movq -0x100(%rbp), %rdi
movq %rax, %rsi
addq $0x20, %rsi
movb -0x12(%rbp), %al
movl -0x18(%rbp), %r8d
movl -0x1c(%rbp), %r9d
xorl %edx, %edx
andb $0x1, %al
movzbl %al, %ecx
callq 0x4aa70
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x2c, %esi
callq *(%rax)
movq -0x60(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x60(%rbp)
leaq -0x58(%rbp), %rdi
callq 0x4d620
jmp 0x4aef8
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4d680
movq %rax, -0x68(%rbp)
leaq -0x58(%rbp), %rdi
leaq -0x68(%rbp), %rsi
callq 0x4d650
testb $0x1, %al
jne 0x4b006
jmp 0x4b008
jmp 0x4b027
leaq 0x19475(%rip), %rdi # 0x64484
leaq 0x17890(%rip), %rsi # 0x628a6
movl $0x4707, %edx # imm = 0x4707
leaq 0x19489(%rip), %rcx # 0x644ab
callq 0x30240
movq -0x58(%rbp), %rax
movq %rax, -0x78(%rbp)
movq -0x78(%rbp), %rdi
movl $0x1, %esi
callq 0x4d6e0
movq %rax, -0x70(%rbp)
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4d680
movq %rax, -0x80(%rbp)
leaq -0x70(%rbp), %rdi
leaq -0x80(%rbp), %rsi
callq 0x4d6b0
testb $0x1, %al
jne 0x4b065
jmp 0x4b067
jmp 0x4b086
leaq 0x194fd(%rip), %rdi # 0x6456b
leaq 0x17831(%rip), %rsi # 0x628a6
movl $0x4708, %edx # imm = 0x4708
leaq 0x1942a(%rip), %rcx # 0x644ab
callq 0x30240
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x22, %esi
callq *(%rax)
leaq -0x58(%rbp), %rdi
callq 0x4d600
movq -0x100(%rbp), %rdi
movq %rax, %rsi
movb -0x12(%rbp), %al
andb $0x1, %al
movzbl %al, %edx
callq 0x4cad0
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x19521(%rip), %rsi # 0x645f9
movl $0x2, %edx
callq *0x8(%rax)
leaq -0x58(%rbp), %rdi
callq 0x4d600
movq -0x100(%rbp), %rdi
movq %rax, %rsi
addq $0x20, %rsi
movb -0x12(%rbp), %al
movl -0x18(%rbp), %r8d
movl -0x1c(%rbp), %r9d
xorl %edx, %edx
andb $0x1, %al
movzbl %al, %ecx
callq 0x4aa70
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x7d, %esi
callq *(%rax)
jmp 0x4bb4b
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x402b0
testb $0x1, %al
jne 0x4b13f
jmp 0x4b165
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x19445(%rip), %rsi # 0x6459d
movl $0x2, %edx
callq *0x8(%rax)
jmp 0x4bb4b
testb $0x1, -0x11(%rbp)
je 0x4b418
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x19418(%rip), %rsi # 0x645a0
movl $0x2, %edx
callq *0x8(%rax)
movq -0x100(%rbp), %rdi
movl -0x1c(%rbp), %eax
addl -0x18(%rbp), %eax
movl %eax, -0x84(%rbp)
addq $0x260, %rdi # imm = 0x260
callq 0x30270
movl -0x84(%rbp), %ecx
cmpq %rcx, %rax
setb %al
xorb $-0x1, %al
xorb $-0x1, %al
testb $0x1, %al
jne 0x4b1c5
jmp 0x4b1ff
movq -0x100(%rbp), %rdi
movq %rdi, %rax
addq $0x260, %rax # imm = 0x260
movq %rax, -0x138(%rbp)
addq $0x260, %rdi # imm = 0x260
callq 0x30270
movq -0x138(%rbp), %rdi
movq %rax, %rsi
shlq %rsi
movl $0x20, %edx
callq 0x300b0
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4d710
movq %rax, -0x90(%rbp)
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4d740
movq %rax, -0xa0(%rbp)
leaq -0xa0(%rbp), %rdi
movl $0x1, %esi
callq 0x4d770
movq %rax, -0x98(%rbp)
leaq -0x90(%rbp), %rdi
leaq -0x98(%rbp), %rsi
callq 0x44f90
testb $0x1, %al
jne 0x4b25b
jmp 0x4b2fe
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq -0x100(%rbp), %rdi
movq %rax, -0x140(%rbp)
addq $0x260, %rdi # imm = 0x260
callq 0x30110
movq -0x140(%rbp), %rdi
movq %rax, %rsi
movl -0x84(%rbp), %eax
movl %eax, %edx
movq (%rdi), %rax
callq *0x8(%rax)
leaq -0x90(%rbp), %rdi
callq 0x45000
movq -0x100(%rbp), %rdi
movq %rax, %rsi
movb -0x12(%rbp), %al
movl -0x18(%rbp), %r8d
movl -0x84(%rbp), %r9d
movl $0x1, %edx
andb $0x1, %al
movzbl %al, %ecx
callq 0x4aa70
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x192f0(%rip), %rsi # 0x645d5
movl $0x2, %edx
callq *0x8(%rax)
leaq -0x90(%rbp), %rdi
callq 0x45020
jmp 0x4b213
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x402b0
xorb $-0x1, %al
testb $0x1, %al
jne 0x4b313
jmp 0x4b315
jmp 0x4b334
leaq 0x19287(%rip), %rdi # 0x645a3
leaq 0x17583(%rip), %rsi # 0x628a6
movl $0x4731, %edx # imm = 0x4731
leaq 0x1917c(%rip), %rcx # 0x644ab
callq 0x30240
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq -0x100(%rbp), %rdi
movq %rax, -0x150(%rbp)
addq $0x260, %rdi # imm = 0x260
callq 0x30110
movq -0x150(%rbp), %rdi
movq %rax, %rsi
movl -0x84(%rbp), %eax
movl %eax, %edx
movq (%rdi), %rax
callq *0x8(%rax)
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x402f0
movq -0x100(%rbp), %rdi
movq %rax, %rsi
movb -0x12(%rbp), %al
movl -0x18(%rbp), %r8d
movl -0x84(%rbp), %r9d
movl $0x1, %edx
andb $0x1, %al
movzbl %al, %ecx
callq 0x4aa70
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0xa, %esi
callq *(%rax)
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq -0x100(%rbp), %rdi
movq %rax, -0x148(%rbp)
addq $0x260, %rdi # imm = 0x260
callq 0x30110
movq -0x148(%rbp), %rdi
movq %rax, %rsi
movl -0x1c(%rbp), %eax
movl %eax, %edx
movq (%rdi), %rax
callq *0x8(%rax)
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x5d, %esi
callq *(%rax)
jmp 0x4b55e
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x5b, %esi
callq *(%rax)
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4d710
movq %rax, -0xa8(%rbp)
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4d740
movq %rax, -0xb8(%rbp)
leaq -0xb8(%rbp), %rdi
movl $0x1, %esi
callq 0x4d770
movq %rax, -0xb0(%rbp)
leaq -0xa8(%rbp), %rdi
leaq -0xb0(%rbp), %rsi
callq 0x44f90
testb $0x1, %al
jne 0x4b48a
jmp 0x4b4e1
leaq -0xa8(%rbp), %rdi
callq 0x45000
movq -0x100(%rbp), %rdi
movq %rax, %rsi
movb -0x12(%rbp), %al
movl -0x18(%rbp), %r8d
movl -0x1c(%rbp), %r9d
xorl %edx, %edx
andb $0x1, %al
movzbl %al, %ecx
callq 0x4aa70
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x2c, %esi
callq *(%rax)
leaq -0xa8(%rbp), %rdi
callq 0x45020
jmp 0x4b445
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x402b0
xorb $-0x1, %al
testb $0x1, %al
jne 0x4b4f6
jmp 0x4b4f8
jmp 0x4b517
leaq 0x190a4(%rip), %rdi # 0x645a3
leaq 0x173a0(%rip), %rsi # 0x628a6
movl $0x4746, %edx # imm = 0x4746
leaq 0x18f99(%rip), %rcx # 0x644ab
callq 0x30240
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x402f0
movq -0x100(%rbp), %rdi
movq %rax, %rsi
movb -0x12(%rbp), %al
movl -0x18(%rbp), %r8d
movl -0x1c(%rbp), %r9d
xorl %edx, %edx
andb $0x1, %al
movzbl %al, %ecx
callq 0x4aa70
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x5d, %esi
callq *(%rax)
jmp 0x4bb4b
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x22, %esi
callq *(%rax)
movq -0x100(%rbp), %rdi
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rsi
movb -0x12(%rbp), %al
andb $0x1, %al
movzbl %al, %edx
callq 0x4cad0
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x22, %esi
callq *(%rax)
jmp 0x4bb4b
testb $0x1, -0x11(%rbp)
je 0x4b8ca
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x18ea8(%rip), %rsi # 0x64481
movl $0x2, %edx
callq *0x8(%rax)
movq -0x100(%rbp), %rdi
movl -0x1c(%rbp), %eax
addl -0x18(%rbp), %eax
movl %eax, -0xbc(%rbp)
addq $0x260, %rdi # imm = 0x260
callq 0x30270
movl -0xbc(%rbp), %ecx
cmpq %rcx, %rax
setb %al
xorb $-0x1, %al
xorb $-0x1, %al
testb $0x1, %al
jne 0x4b616
jmp 0x4b650
movq -0x100(%rbp), %rdi
movq %rdi, %rax
addq $0x260, %rax # imm = 0x260
movq %rax, -0x158(%rbp)
addq $0x260, %rdi # imm = 0x260
callq 0x30270
movq -0x158(%rbp), %rdi
movq %rax, %rsi
shlq %rsi
movl $0x20, %edx
callq 0x300b0
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq -0x100(%rbp), %rdi
movq %rax, -0x160(%rbp)
addq $0x260, %rdi # imm = 0x260
callq 0x30110
movq -0x160(%rbp), %rdi
movq %rax, %rsi
movl -0xbc(%rbp), %eax
movl %eax, %edx
movq (%rdi), %rax
callq *0x8(%rax)
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x18f1f(%rip), %rsi # 0x645c6
movl $0xa, %edx
callq *0x8(%rax)
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4d7b0
testb $0x1, %al
jne 0x4b786
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4d7f0
movq %rax, -0xc8(%rbp)
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4d860
movq %rax, -0xd8(%rbp)
leaq -0xd8(%rbp), %rdi
movl $0x1, %esi
callq 0x4d890
movq %rax, -0xd0(%rbp)
leaq -0xc8(%rbp), %rdi
leaq -0xd0(%rbp), %rsi
callq 0x4d820
testb $0x1, %al
jne 0x4b71d
jmp 0x4b76a
leaq -0xc8(%rbp), %rdi
callq 0x4db30
movq -0x100(%rbp), %rdi
movzbl (%rax), %esi
callq 0x4d8d0
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x18e80(%rip), %rsi # 0x645d1
movl $0x2, %edx
callq *0x8(%rax)
leaq -0xc8(%rbp), %rdi
callq 0x4db50
jmp 0x4b6d8
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4db70
movq -0x100(%rbp), %rdi
movzbl (%rax), %esi
callq 0x4d8d0
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x18e35(%rip), %rsi # 0x645d4
movl $0x3, %edx
callq *0x8(%rax)
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq -0x100(%rbp), %rdi
movq %rax, -0x168(%rbp)
addq $0x260, %rdi # imm = 0x260
callq 0x30110
movq -0x168(%rbp), %rdi
movq %rax, %rsi
movl -0xbc(%rbp), %eax
movl %eax, %edx
movq (%rdi), %rax
callq *0x8(%rax)
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x18dda(%rip), %rsi # 0x645d8
movl $0xb, %edx
callq *0x8(%rax)
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4dbb0
testb $0x1, %al
jne 0x4b819
jmp 0x4b837
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4de30
movq -0x100(%rbp), %rdi
movq %rax, %rsi
callq 0x4dbd0
jmp 0x4b858
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x17b1a(%rip), %rsi # 0x6336a
movl $0x4, %edx
callq *0x8(%rax)
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0xa, %esi
callq *(%rax)
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq -0x100(%rbp), %rdi
movq %rax, -0x170(%rbp)
addq $0x260, %rdi # imm = 0x260
callq 0x30110
movq -0x170(%rbp), %rdi
movq %rax, %rsi
movl -0x1c(%rbp), %eax
movl %eax, %edx
movq (%rdi), %rax
callq *0x8(%rax)
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x7d, %esi
callq *(%rax)
jmp 0x4ba48
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x18d01(%rip), %rsi # 0x645e4
movl $0xa, %edx
callq *0x8(%rax)
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4d7b0
testb $0x1, %al
jne 0x4b9ba
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4d7f0
movq %rax, -0xe0(%rbp)
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4d860
movq %rax, -0xf0(%rbp)
leaq -0xf0(%rbp), %rdi
movl $0x1, %esi
callq 0x4d890
movq %rax, -0xe8(%rbp)
leaq -0xe0(%rbp), %rdi
leaq -0xe8(%rbp), %rsi
callq 0x4d820
testb $0x1, %al
jne 0x4b959
jmp 0x4b99e
leaq -0xe0(%rbp), %rdi
callq 0x4db30
movq -0x100(%rbp), %rdi
movzbl (%rax), %esi
callq 0x4d8d0
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x2c, %esi
callq *(%rax)
leaq -0xe0(%rbp), %rdi
callq 0x4db50
jmp 0x4b914
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4db70
movq -0x100(%rbp), %rdi
movzbl (%rax), %esi
callq 0x4d8d0
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x18c1c(%rip), %rsi # 0x645ef
movl $0xc, %edx
callq *0x8(%rax)
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4dbb0
testb $0x1, %al
jne 0x4b9ee
jmp 0x4ba25
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x4de30
movq -0x100(%rbp), %rdi
movq %rax, %rsi
callq 0x4dbd0
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x7d, %esi
callq *(%rax)
jmp 0x4ba46
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x18bbe(%rip), %rsi # 0x645fc
movl $0x5, %edx
callq *0x8(%rax)
jmp 0x4ba48
jmp 0x4bb4b
movq -0x10(%rbp), %rax
testb $0x1, 0x8(%rax)
je 0x4ba7a
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x18b92(%rip), %rsi # 0x64602
movl $0x4, %edx
callq *0x8(%rax)
jmp 0x4ba9b
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x18b74(%rip), %rsi # 0x64607
movl $0x5, %edx
callq *0x8(%rax)
jmp 0x4bb4b
movq -0x100(%rbp), %rdi
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rsi
callq 0x4de70
jmp 0x4bb4b
movq -0x100(%rbp), %rdi
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rsi
callq 0x4dbd0
jmp 0x4bb4b
movq -0x100(%rbp), %rdi
movq -0x10(%rbp), %rax
movsd 0x8(%rax), %xmm0
callq 0x4e0d0
jmp 0x4bb4b
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x18b0e(%rip), %rsi # 0x6460d
movl $0xb, %edx
callq *0x8(%rax)
jmp 0x4bb4b
movq -0x100(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
leaq 0x17848(%rip), %rsi # 0x6336a
movl $0x4, %edx
callq *0x8(%rax)
jmp 0x4bb4b
leaq 0x18ad4(%rip), %rdi # 0x64607
leaq 0x16d6c(%rip), %rsi # 0x628a6
movl $0x47cc, %edx # imm = 0x47CC
leaq 0x18965(%rip), %rcx # 0x644ab
callq 0x30240
addq $0x170, %rsp # imm = 0x170
popq %rbp
retq
nopw %cs:(%rax,%rax)
nop
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
void nlohmann::json_abi_v3_11_3::detail::serializer<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>>::dump_integer<unsigned long, 0>(unsigned long) | void dump_integer(NumberType x)
{
static constexpr std::array<std::array<char, 2>, 100> digits_to_99
{
{
{{'0', '0'}}, {{'0', '1'}}, {{'0', '2'}}, {{'0', '3'}}, {{'0', '4'}}, {{'0', '5'}}, {{'0', '6'}}, {{'0', '7'}}, {{'0', '8'}}, {{'0', '9'}},
{{'1', '0'}}, {{'1', '1'}}, {{'1', '2'}}, {{'1', '3'}}, {{'1', '4'}}, {{'1', '5'}}, {{'1', '6'}}, {{'1', '7'}}, {{'1', '8'}}, {{'1', '9'}},
{{'2', '0'}}, {{'2', '1'}}, {{'2', '2'}}, {{'2', '3'}}, {{'2', '4'}}, {{'2', '5'}}, {{'2', '6'}}, {{'2', '7'}}, {{'2', '8'}}, {{'2', '9'}},
{{'3', '0'}}, {{'3', '1'}}, {{'3', '2'}}, {{'3', '3'}}, {{'3', '4'}}, {{'3', '5'}}, {{'3', '6'}}, {{'3', '7'}}, {{'3', '8'}}, {{'3', '9'}},
{{'4', '0'}}, {{'4', '1'}}, {{'4', '2'}}, {{'4', '3'}}, {{'4', '4'}}, {{'4', '5'}}, {{'4', '6'}}, {{'4', '7'}}, {{'4', '8'}}, {{'4', '9'}},
{{'5', '0'}}, {{'5', '1'}}, {{'5', '2'}}, {{'5', '3'}}, {{'5', '4'}}, {{'5', '5'}}, {{'5', '6'}}, {{'5', '7'}}, {{'5', '8'}}, {{'5', '9'}},
{{'6', '0'}}, {{'6', '1'}}, {{'6', '2'}}, {{'6', '3'}}, {{'6', '4'}}, {{'6', '5'}}, {{'6', '6'}}, {{'6', '7'}}, {{'6', '8'}}, {{'6', '9'}},
{{'7', '0'}}, {{'7', '1'}}, {{'7', '2'}}, {{'7', '3'}}, {{'7', '4'}}, {{'7', '5'}}, {{'7', '6'}}, {{'7', '7'}}, {{'7', '8'}}, {{'7', '9'}},
{{'8', '0'}}, {{'8', '1'}}, {{'8', '2'}}, {{'8', '3'}}, {{'8', '4'}}, {{'8', '5'}}, {{'8', '6'}}, {{'8', '7'}}, {{'8', '8'}}, {{'8', '9'}},
{{'9', '0'}}, {{'9', '1'}}, {{'9', '2'}}, {{'9', '3'}}, {{'9', '4'}}, {{'9', '5'}}, {{'9', '6'}}, {{'9', '7'}}, {{'9', '8'}}, {{'9', '9'}},
}
};
// special case for "0"
if (x == 0)
{
o->write_character('0');
return;
}
// use a pointer to fill the buffer
auto buffer_ptr = number_buffer.begin(); // NOLINT(llvm-qualified-auto,readability-qualified-auto,cppcoreguidelines-pro-type-vararg,hicpp-vararg)
number_unsigned_t abs_value;
unsigned int n_chars{};
if (is_negative_number(x))
{
*buffer_ptr = '-';
abs_value = remove_sign(static_cast<number_integer_t>(x));
// account one more byte for the minus sign
n_chars = 1 + count_digits(abs_value);
}
else
{
abs_value = static_cast<number_unsigned_t>(x);
n_chars = count_digits(abs_value);
}
// spare 1 byte for '\0'
JSON_ASSERT(n_chars < number_buffer.size() - 1);
// jump to the end to generate the string from backward,
// so we later avoid reversing the result
buffer_ptr += n_chars;
// Fast int2ascii implementation inspired by "Fastware" talk by Andrei Alexandrescu
// See: https://www.youtube.com/watch?v=o4-CwDo2zpg
while (abs_value >= 100)
{
const auto digits_index = static_cast<unsigned>((abs_value % 100));
abs_value /= 100;
*(--buffer_ptr) = digits_to_99[digits_index][1];
*(--buffer_ptr) = digits_to_99[digits_index][0];
}
if (abs_value >= 10)
{
const auto digits_index = static_cast<unsigned>(abs_value);
*(--buffer_ptr) = digits_to_99[digits_index][1];
*(--buffer_ptr) = digits_to_99[digits_index][0];
}
else
{
*(--buffer_ptr) = static_cast<char>('0' + abs_value);
}
o->write_characters(number_buffer.data(), n_chars);
} | pushq %rbp
movq %rsp, %rbp
subq $0x50, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq -0x8(%rbp), %rax
movq %rax, -0x38(%rbp)
cmpq $0x0, -0x10(%rbp)
jne 0x4dc0a
movq -0x38(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x30, %esi
callq *(%rax)
jmp 0x4de22
movq -0x38(%rbp), %rdi
addq $0x10, %rdi
callq 0x4ec50
movq -0x38(%rbp), %rdi
movq %rax, -0x18(%rbp)
movl $0x0, -0x24(%rbp)
movq -0x10(%rbp), %rsi
callq 0x4ef30
testb $0x1, %al
jne 0x4dc35
jmp 0x4dc62
movq -0x38(%rbp), %rdi
movq -0x18(%rbp), %rax
movb $0x2d, (%rax)
movq -0x10(%rbp), %rsi
callq 0x4ec90
movq -0x38(%rbp), %rdi
movq %rax, -0x20(%rbp)
movq -0x20(%rbp), %rsi
callq 0x4ed10
addl $0x1, %eax
movl %eax, -0x24(%rbp)
jmp 0x4dc7a
movq -0x38(%rbp), %rdi
movq -0x10(%rbp), %rax
movq %rax, -0x20(%rbp)
movq -0x20(%rbp), %rsi
callq 0x4ed10
movl %eax, -0x24(%rbp)
movq -0x38(%rbp), %rdi
movl -0x24(%rbp), %eax
movq %rax, -0x40(%rbp)
addq $0x10, %rdi
callq 0x4ed90
movq %rax, %rcx
movq -0x40(%rbp), %rax
subq $0x1, %rcx
cmpq %rcx, %rax
jae 0x4dca0
jmp 0x4dcbf
leaq 0x16b49(%rip), %rdi # 0x647f0
leaq 0x14bf8(%rip), %rsi # 0x628a6
movl $0x4951, %edx # imm = 0x4951
leaq 0x16cbd(%rip), %rcx # 0x64977
callq 0x30240
movl -0x24(%rbp), %ecx
movq -0x18(%rbp), %rax
movl %ecx, %ecx
addq %rcx, %rax
movq %rax, -0x18(%rbp)
cmpq $0x64, -0x20(%rbp)
jb 0x4dd67
movq -0x20(%rbp), %rax
movl $0x64, %ecx
xorl %edx, %edx
divq %rcx
movl %edx, %eax
movl %eax, -0x28(%rbp)
movq -0x20(%rbp), %rax
movl $0x64, %ecx
xorl %edx, %edx
divq %rcx
movq %rax, -0x20(%rbp)
movl -0x28(%rbp), %eax
movl %eax, %esi
leaq 0x17dd6(%rip), %rdi # 0x65ae1
callq 0x4eda0
movq %rax, %rdi
movl $0x1, %esi
callq 0x4edd0
movb (%rax), %cl
movq -0x18(%rbp), %rax
movq %rax, %rdx
addq $-0x1, %rdx
movq %rdx, -0x18(%rbp)
movb %cl, -0x1(%rax)
movl -0x28(%rbp), %eax
movl %eax, %esi
leaq 0x17da4(%rip), %rdi # 0x65ae1
callq 0x4eda0
movq %rax, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x4edd0
movb (%rax), %cl
movq -0x18(%rbp), %rax
movq %rax, %rdx
addq $-0x1, %rdx
movq %rdx, -0x18(%rbp)
movb %cl, -0x1(%rax)
jmp 0x4dccf
cmpq $0xa, -0x20(%rbp)
jb 0x4ddda
movq -0x20(%rbp), %rax
movl %eax, -0x2c(%rbp)
movl -0x2c(%rbp), %eax
movl %eax, %esi
leaq 0x17d60(%rip), %rdi # 0x65ae1
callq 0x4eda0
movq %rax, %rdi
movl $0x1, %esi
callq 0x4edd0
movb (%rax), %cl
movq -0x18(%rbp), %rax
movq %rax, %rdx
addq $-0x1, %rdx
movq %rdx, -0x18(%rbp)
movb %cl, -0x1(%rax)
movl -0x2c(%rbp), %eax
movl %eax, %esi
leaq 0x17d2e(%rip), %rdi # 0x65ae1
callq 0x4eda0
movq %rax, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x4edd0
movb (%rax), %cl
movq -0x18(%rbp), %rax
movq %rax, %rdx
addq $-0x1, %rdx
movq %rdx, -0x18(%rbp)
movb %cl, -0x1(%rax)
jmp 0x4ddf6
movq -0x20(%rbp), %rax
addq $0x30, %rax
movb %al, %cl
movq -0x18(%rbp), %rax
movq %rax, %rdx
addq $-0x1, %rdx
movq %rdx, -0x18(%rbp)
movb %cl, -0x1(%rax)
movq -0x38(%rbp), %rdi
callq 0x4ca80
movq -0x38(%rbp), %rdi
movq %rax, -0x48(%rbp)
addq $0x10, %rdi
callq 0x4ee00
movq -0x48(%rbp), %rdi
movq %rax, %rsi
movl -0x24(%rbp), %eax
movl %eax, %edx
movq (%rdi), %rax
callq *0x8(%rax)
addq $0x50, %rsp
popq %rbp
retq
nopl (%rax,%rax)
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
void nlohmann::json_abi_v3_11_3::detail::serializer<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>>::dump_integer<long, 0>(long) | void dump_integer(NumberType x)
{
static constexpr std::array<std::array<char, 2>, 100> digits_to_99
{
{
{{'0', '0'}}, {{'0', '1'}}, {{'0', '2'}}, {{'0', '3'}}, {{'0', '4'}}, {{'0', '5'}}, {{'0', '6'}}, {{'0', '7'}}, {{'0', '8'}}, {{'0', '9'}},
{{'1', '0'}}, {{'1', '1'}}, {{'1', '2'}}, {{'1', '3'}}, {{'1', '4'}}, {{'1', '5'}}, {{'1', '6'}}, {{'1', '7'}}, {{'1', '8'}}, {{'1', '9'}},
{{'2', '0'}}, {{'2', '1'}}, {{'2', '2'}}, {{'2', '3'}}, {{'2', '4'}}, {{'2', '5'}}, {{'2', '6'}}, {{'2', '7'}}, {{'2', '8'}}, {{'2', '9'}},
{{'3', '0'}}, {{'3', '1'}}, {{'3', '2'}}, {{'3', '3'}}, {{'3', '4'}}, {{'3', '5'}}, {{'3', '6'}}, {{'3', '7'}}, {{'3', '8'}}, {{'3', '9'}},
{{'4', '0'}}, {{'4', '1'}}, {{'4', '2'}}, {{'4', '3'}}, {{'4', '4'}}, {{'4', '5'}}, {{'4', '6'}}, {{'4', '7'}}, {{'4', '8'}}, {{'4', '9'}},
{{'5', '0'}}, {{'5', '1'}}, {{'5', '2'}}, {{'5', '3'}}, {{'5', '4'}}, {{'5', '5'}}, {{'5', '6'}}, {{'5', '7'}}, {{'5', '8'}}, {{'5', '9'}},
{{'6', '0'}}, {{'6', '1'}}, {{'6', '2'}}, {{'6', '3'}}, {{'6', '4'}}, {{'6', '5'}}, {{'6', '6'}}, {{'6', '7'}}, {{'6', '8'}}, {{'6', '9'}},
{{'7', '0'}}, {{'7', '1'}}, {{'7', '2'}}, {{'7', '3'}}, {{'7', '4'}}, {{'7', '5'}}, {{'7', '6'}}, {{'7', '7'}}, {{'7', '8'}}, {{'7', '9'}},
{{'8', '0'}}, {{'8', '1'}}, {{'8', '2'}}, {{'8', '3'}}, {{'8', '4'}}, {{'8', '5'}}, {{'8', '6'}}, {{'8', '7'}}, {{'8', '8'}}, {{'8', '9'}},
{{'9', '0'}}, {{'9', '1'}}, {{'9', '2'}}, {{'9', '3'}}, {{'9', '4'}}, {{'9', '5'}}, {{'9', '6'}}, {{'9', '7'}}, {{'9', '8'}}, {{'9', '9'}},
}
};
// special case for "0"
if (x == 0)
{
o->write_character('0');
return;
}
// use a pointer to fill the buffer
auto buffer_ptr = number_buffer.begin(); // NOLINT(llvm-qualified-auto,readability-qualified-auto,cppcoreguidelines-pro-type-vararg,hicpp-vararg)
number_unsigned_t abs_value;
unsigned int n_chars{};
if (is_negative_number(x))
{
*buffer_ptr = '-';
abs_value = remove_sign(static_cast<number_integer_t>(x));
// account one more byte for the minus sign
n_chars = 1 + count_digits(abs_value);
}
else
{
abs_value = static_cast<number_unsigned_t>(x);
n_chars = count_digits(abs_value);
}
// spare 1 byte for '\0'
JSON_ASSERT(n_chars < number_buffer.size() - 1);
// jump to the end to generate the string from backward,
// so we later avoid reversing the result
buffer_ptr += n_chars;
// Fast int2ascii implementation inspired by "Fastware" talk by Andrei Alexandrescu
// See: https://www.youtube.com/watch?v=o4-CwDo2zpg
while (abs_value >= 100)
{
const auto digits_index = static_cast<unsigned>((abs_value % 100));
abs_value /= 100;
*(--buffer_ptr) = digits_to_99[digits_index][1];
*(--buffer_ptr) = digits_to_99[digits_index][0];
}
if (abs_value >= 10)
{
const auto digits_index = static_cast<unsigned>(abs_value);
*(--buffer_ptr) = digits_to_99[digits_index][1];
*(--buffer_ptr) = digits_to_99[digits_index][0];
}
else
{
*(--buffer_ptr) = static_cast<char>('0' + abs_value);
}
o->write_characters(number_buffer.data(), n_chars);
} | pushq %rbp
movq %rsp, %rbp
subq $0x50, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq -0x8(%rbp), %rax
movq %rax, -0x38(%rbp)
cmpq $0x0, -0x10(%rbp)
jne 0x4deaa
movq -0x38(%rbp), %rdi
callq 0x4ca80
movq %rax, %rdi
movq (%rdi), %rax
movl $0x30, %esi
callq *(%rax)
jmp 0x4e0c2
movq -0x38(%rbp), %rdi
addq $0x10, %rdi
callq 0x4ec50
movq -0x38(%rbp), %rdi
movq %rax, -0x18(%rbp)
movl $0x0, -0x24(%rbp)
movq -0x10(%rbp), %rsi
callq 0x4ef50
testb $0x1, %al
jne 0x4ded5
jmp 0x4df02
movq -0x38(%rbp), %rdi
movq -0x18(%rbp), %rax
movb $0x2d, (%rax)
movq -0x10(%rbp), %rsi
callq 0x4ec90
movq -0x38(%rbp), %rdi
movq %rax, -0x20(%rbp)
movq -0x20(%rbp), %rsi
callq 0x4ed10
addl $0x1, %eax
movl %eax, -0x24(%rbp)
jmp 0x4df1a
movq -0x38(%rbp), %rdi
movq -0x10(%rbp), %rax
movq %rax, -0x20(%rbp)
movq -0x20(%rbp), %rsi
callq 0x4ed10
movl %eax, -0x24(%rbp)
movq -0x38(%rbp), %rdi
movl -0x24(%rbp), %eax
movq %rax, -0x40(%rbp)
addq $0x10, %rdi
callq 0x4ed90
movq %rax, %rcx
movq -0x40(%rbp), %rax
subq $0x1, %rcx
cmpq %rcx, %rax
jae 0x4df40
jmp 0x4df5f
leaq 0x168a9(%rip), %rdi # 0x647f0
leaq 0x14958(%rip), %rsi # 0x628a6
movl $0x4951, %edx # imm = 0x4951
leaq 0x16ab6(%rip), %rcx # 0x64a10
callq 0x30240
movl -0x24(%rbp), %ecx
movq -0x18(%rbp), %rax
movl %ecx, %ecx
addq %rcx, %rax
movq %rax, -0x18(%rbp)
cmpq $0x64, -0x20(%rbp)
jb 0x4e007
movq -0x20(%rbp), %rax
movl $0x64, %ecx
xorl %edx, %edx
divq %rcx
movl %edx, %eax
movl %eax, -0x28(%rbp)
movq -0x20(%rbp), %rax
movl $0x64, %ecx
xorl %edx, %edx
divq %rcx
movq %rax, -0x20(%rbp)
movl -0x28(%rbp), %eax
movl %eax, %esi
leaq 0x17bfe(%rip), %rdi # 0x65ba9
callq 0x4eda0
movq %rax, %rdi
movl $0x1, %esi
callq 0x4edd0
movb (%rax), %cl
movq -0x18(%rbp), %rax
movq %rax, %rdx
addq $-0x1, %rdx
movq %rdx, -0x18(%rbp)
movb %cl, -0x1(%rax)
movl -0x28(%rbp), %eax
movl %eax, %esi
leaq 0x17bcc(%rip), %rdi # 0x65ba9
callq 0x4eda0
movq %rax, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x4edd0
movb (%rax), %cl
movq -0x18(%rbp), %rax
movq %rax, %rdx
addq $-0x1, %rdx
movq %rdx, -0x18(%rbp)
movb %cl, -0x1(%rax)
jmp 0x4df6f
cmpq $0xa, -0x20(%rbp)
jb 0x4e07a
movq -0x20(%rbp), %rax
movl %eax, -0x2c(%rbp)
movl -0x2c(%rbp), %eax
movl %eax, %esi
leaq 0x17b88(%rip), %rdi # 0x65ba9
callq 0x4eda0
movq %rax, %rdi
movl $0x1, %esi
callq 0x4edd0
movb (%rax), %cl
movq -0x18(%rbp), %rax
movq %rax, %rdx
addq $-0x1, %rdx
movq %rdx, -0x18(%rbp)
movb %cl, -0x1(%rax)
movl -0x2c(%rbp), %eax
movl %eax, %esi
leaq 0x17b56(%rip), %rdi # 0x65ba9
callq 0x4eda0
movq %rax, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x4edd0
movb (%rax), %cl
movq -0x18(%rbp), %rax
movq %rax, %rdx
addq $-0x1, %rdx
movq %rdx, -0x18(%rbp)
movb %cl, -0x1(%rax)
jmp 0x4e096
movq -0x20(%rbp), %rax
addq $0x30, %rax
movb %al, %cl
movq -0x18(%rbp), %rax
movq %rax, %rdx
addq $-0x1, %rdx
movq %rdx, -0x18(%rbp)
movb %cl, -0x1(%rax)
movq -0x38(%rbp), %rdi
callq 0x4ca80
movq -0x38(%rbp), %rdi
movq %rax, -0x48(%rbp)
addq $0x10, %rdi
callq 0x4ee00
movq -0x48(%rbp), %rdi
movq %rax, %rsi
movl -0x24(%rbp), %eax
movl %eax, %edx
movq (%rdi), %rax
callq *0x8(%rax)
addq $0x50, %rsp
popq %rbp
retq
nopl (%rax,%rax)
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
nlohmann::json_abi_v3_11_3::detail::serializer<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>>::decode(unsigned char&, unsigned int&, unsigned char) | static std::uint8_t decode(std::uint8_t& state, std::uint32_t& codep, const std::uint8_t byte) noexcept
{
static const std::array<std::uint8_t, 400> utf8d =
{
{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1F
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20..3F
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40..5F
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60..7F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // 80..9F
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // A0..BF
8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C0..DF
0xA, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, // E0..EF
0xB, 0x6, 0x6, 0x6, 0x5, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, // F0..FF
0x0, 0x1, 0x2, 0x3, 0x5, 0x8, 0x7, 0x1, 0x1, 0x1, 0x4, 0x6, 0x1, 0x1, 0x1, 0x1, // s0..s0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, // s1..s2
1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // s3..s4
1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, // s5..s6
1, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // s7..s8
}
};
JSON_ASSERT(byte < utf8d.size());
const std::uint8_t type = utf8d[byte];
codep = (state != UTF8_ACCEPT)
? (byte & 0x3fu) | (codep << 6u)
: (0xFFu >> type) & (byte);
const std::size_t index = 256u + static_cast<size_t>(state) * 16u + static_cast<size_t>(type);
JSON_ASSERT(index < utf8d.size());
state = utf8d[index];
return state;
} | pushq %rbp
movq %rsp, %rbp
subq $0x40, %rsp
movb %dl, %al
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movb %al, -0x11(%rbp)
movzbl -0x11(%rbp), %eax
movq %rax, -0x28(%rbp)
leaq 0x175dc(%rip), %rdi # 0x657c0
callq 0x4e6b0
movq %rax, %rcx
movq -0x28(%rbp), %rax
cmpq %rcx, %rax
jae 0x4e1f7
jmp 0x4e216
leaq 0x16506(%rip), %rdi # 0x64704
leaq 0x146a1(%rip), %rsi # 0x628a6
movl $0x49f2, %edx # imm = 0x49F2
leaq 0x16507(%rip), %rcx # 0x64718
callq 0x30240
movzbl -0x11(%rbp), %eax
movl %eax, %esi
leaq 0x1759d(%rip), %rdi # 0x657c0
callq 0x4e6c0
movb (%rax), %al
movb %al, -0x12(%rbp)
movq -0x8(%rbp), %rax
movzbl (%rax), %eax
cmpl $0x0, %eax
je 0x4e250
movzbl -0x11(%rbp), %eax
andl $0x3f, %eax
movq -0x10(%rbp), %rcx
movl (%rcx), %ecx
shll $0x6, %ecx
orl %ecx, %eax
movl %eax, -0x2c(%rbp)
jmp 0x4e264
movzbl -0x12(%rbp), %ecx
movl $0xff, %eax
shrl %cl, %eax
movzbl -0x11(%rbp), %ecx
andl %ecx, %eax
movl %eax, -0x2c(%rbp)
movl -0x2c(%rbp), %ecx
movq -0x10(%rbp), %rax
movl %ecx, (%rax)
movq -0x8(%rbp), %rax
movzbl (%rax), %eax
shlq $0x4, %rax
addq $0x100, %rax # imm = 0x100
movzbl -0x12(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x20(%rbp)
movq -0x20(%rbp), %rax
movq %rax, -0x38(%rbp)
leaq 0x17528(%rip), %rdi # 0x657c0
callq 0x4e6b0
movq %rax, %rcx
movq -0x38(%rbp), %rax
cmpq %rcx, %rax
jae 0x4e2ab
jmp 0x4e2ca
leaq 0x16515(%rip), %rdi # 0x647c7
leaq 0x145ed(%rip), %rsi # 0x628a6
movl $0x49fa, %edx # imm = 0x49FA
leaq 0x16453(%rip), %rcx # 0x64718
callq 0x30240
movq -0x20(%rbp), %rsi
leaq 0x174eb(%rip), %rdi # 0x657c0
callq 0x4e6c0
movb (%rax), %cl
movq -0x8(%rbp), %rax
movb %cl, (%rax)
movq -0x8(%rbp), %rax
movb (%rax), %al
addq $0x40, %rsp
popq %rbp
retq
nop
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
nlohmann::json_abi_v3_11_3::detail::serializer<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>>::hex_bytes(unsigned char) | static std::string hex_bytes(std::uint8_t byte)
{
std::string result = "FF";
constexpr const char* nibble_to_hex = "0123456789ABCDEF";
result[0] = nibble_to_hex[byte / 16];
result[1] = nibble_to_hex[byte % 16];
return result;
} | pushq %rbp
movq %rsp, %rbp
subq $0x60, %rsp
movq %rdi, -0x38(%rbp)
movb %sil, %al
movq %rdi, %rcx
movq %rcx, -0x40(%rbp)
movq %rdi, -0x8(%rbp)
movb %al, -0x9(%rbp)
movb $0x0, -0xa(%rbp)
leaq -0xb(%rbp), %rdi
movq %rdi, -0x30(%rbp)
callq 0x305f0
movq -0x38(%rbp), %rdi
movq -0x30(%rbp), %rdx
leaq 0x162af(%rip), %rsi # 0x647dc
callq 0x3cb40
jmp 0x4e534
leaq -0xb(%rbp), %rdi
callq 0x303d0
movq -0x38(%rbp), %rdi
leaq 0x16297(%rip), %rcx # 0x647df
movq %rcx, -0x28(%rbp)
movzbl -0x9(%rbp), %eax
shrl $0x4, %eax
movb (%rax,%rcx), %al
movb %al, -0x49(%rbp)
xorl %eax, %eax
movl %eax, %esi
callq 0x306b0
movq %rax, -0x48(%rbp)
jmp 0x4e568
movq -0x38(%rbp), %rdi
movq -0x48(%rbp), %rax
movb -0x49(%rbp), %cl
movb %cl, (%rax)
movzbl -0x9(%rbp), %eax
andl $0xf, %eax
leaq 0x1625c(%rip), %rcx # 0x647df
movb (%rax,%rcx), %al
movb %al, -0x59(%rbp)
movl $0x1, %esi
callq 0x306b0
movq %rax, -0x58(%rbp)
jmp 0x4e599
movq -0x58(%rbp), %rax
movb -0x59(%rbp), %cl
movb %cl, (%rax)
movb $0x1, -0xa(%rbp)
testb $0x1, -0xa(%rbp)
jne 0x4e5e5
jmp 0x4e5dc
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x18(%rbp)
movl %eax, -0x1c(%rbp)
leaq -0xb(%rbp), %rdi
callq 0x303d0
jmp 0x4e5ef
movq -0x38(%rbp), %rdi
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x18(%rbp)
movl %eax, -0x1c(%rbp)
callq 0x30210
jmp 0x4e5ef
movq -0x38(%rbp), %rdi
callq 0x30210
movq -0x40(%rbp), %rax
addq $0x60, %rsp
popq %rbp
retq
movq -0x18(%rbp), %rdi
callq 0x305e0
nopl (%rax,%rax)
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
nlohmann::json_abi_v3_11_3::detail::dtoa_impl::boundaries nlohmann::json_abi_v3_11_3::detail::dtoa_impl::compute_boundaries<double>(double) | boundaries compute_boundaries(FloatType value)
{
JSON_ASSERT(std::isfinite(value));
JSON_ASSERT(value > 0);
// Convert the IEEE representation into a diyfp.
//
// If v is denormal:
// value = 0.F * 2^(1 - bias) = ( F) * 2^(1 - bias - (p-1))
// If v is normalized:
// value = 1.F * 2^(E - bias) = (2^(p-1) + F) * 2^(E - bias - (p-1))
static_assert(std::numeric_limits<FloatType>::is_iec559,
"internal error: dtoa_short requires an IEEE-754 floating-point implementation");
constexpr int kPrecision = std::numeric_limits<FloatType>::digits; // = p (includes the hidden bit)
constexpr int kBias = std::numeric_limits<FloatType>::max_exponent - 1 + (kPrecision - 1);
constexpr int kMinExp = 1 - kBias;
constexpr std::uint64_t kHiddenBit = std::uint64_t{1} << (kPrecision - 1); // = 2^(p-1)
using bits_type = typename std::conditional<kPrecision == 24, std::uint32_t, std::uint64_t >::type;
const auto bits = static_cast<std::uint64_t>(reinterpret_bits<bits_type>(value));
const std::uint64_t E = bits >> (kPrecision - 1);
const std::uint64_t F = bits & (kHiddenBit - 1);
const bool is_denormal = E == 0;
const diyfp v = is_denormal
? diyfp(F, kMinExp)
: diyfp(F + kHiddenBit, static_cast<int>(E) - kBias);
// Compute the boundaries m- and m+ of the floating-point value
// v = f * 2^e.
//
// Determine v- and v+, the floating-point predecessor and successor if v,
// respectively.
//
// v- = v - 2^e if f != 2^(p-1) or e == e_min (A)
// = v - 2^(e-1) if f == 2^(p-1) and e > e_min (B)
//
// v+ = v + 2^e
//
// Let m- = (v- + v) / 2 and m+ = (v + v+) / 2. All real numbers _strictly_
// between m- and m+ round to v, regardless of how the input rounding
// algorithm breaks ties.
//
// ---+-------------+-------------+-------------+-------------+--- (A)
// v- m- v m+ v+
//
// -----------------+------+------+-------------+-------------+--- (B)
// v- m- v m+ v+
const bool lower_boundary_is_closer = F == 0 && E > 1;
const diyfp m_plus = diyfp(2 * v.f + 1, v.e - 1);
const diyfp m_minus = lower_boundary_is_closer
? diyfp(4 * v.f - 1, v.e - 2) // (B)
: diyfp(2 * v.f - 1, v.e - 1); // (A)
// Determine the normalized w+ = m+.
const diyfp w_plus = diyfp::normalize(m_plus);
// Determine w- = m- such that e_(w-) = e_(w+).
const diyfp w_minus = diyfp::normalize_to(m_minus, w_plus.e);
return {diyfp::normalize(v), w_minus, w_plus};
} | pushq %rbp
movq %rsp, %rbp
subq $0x110, %rsp # imm = 0x110
movq %rdi, -0x100(%rbp)
movq %rdi, -0xf8(%rbp)
movsd %xmm0, -0x8(%rbp)
movsd -0x8(%rbp), %xmm0
callq 0x4ef70
testb $0x1, %al
jne 0x4f62e
jmp 0x4f630
jmp 0x4f64f
leaq 0x15469(%rip), %rdi # 0x64aa0
leaq 0x13268(%rip), %rsi # 0x628a6
movl $0x42ca, %edx # imm = 0x42CA
leaq 0x1564b(%rip), %rcx # 0x64c95
callq 0x30240
movsd -0x8(%rbp), %xmm0
xorps %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jbe 0x4f65f
jmp 0x4f67e
leaq 0x15625(%rip), %rdi # 0x64c8b
leaq 0x13239(%rip), %rsi # 0x628a6
movl $0x42cb, %edx # imm = 0x42CB
leaq 0x1561c(%rip), %rcx # 0x64c95
callq 0x30240
movl $0x35, -0xc(%rbp)
movl $0x433, -0x10(%rbp) # imm = 0x433
movl $0xfffffbce, -0x14(%rbp) # imm = 0xFFFFFBCE
movabsq $0x10000000000000, %rax # imm = 0x10000000000000
movq %rax, -0x20(%rbp)
movsd -0x8(%rbp), %xmm0
callq 0x4fae0
movq %rax, -0x28(%rbp)
movq -0x28(%rbp), %rax
shrq $0x34, %rax
movq %rax, -0x30(%rbp)
movabsq $0xfffffffffffff, %rax # imm = 0xFFFFFFFFFFFFF
andq -0x28(%rbp), %rax
movq %rax, -0x38(%rbp)
cmpq $0x0, -0x30(%rbp)
sete %al
andb $0x1, %al
movb %al, -0x39(%rbp)
testb $0x1, -0x39(%rbp)
je 0x4f6f4
movq -0x38(%rbp), %rsi
leaq -0x50(%rbp), %rdi
movl $0xfffffbce, %edx # imm = 0xFFFFFBCE
callq 0x4fb00
jmp 0x4f717
movabsq $0x10000000000000, %rsi # imm = 0x10000000000000
addq -0x38(%rbp), %rsi
movq -0x30(%rbp), %rax
movl %eax, %edx
subl $0x433, %edx # imm = 0x433
leaq -0x50(%rbp), %rdi
callq 0x4fb00
xorl %eax, %eax
cmpq $0x0, -0x38(%rbp)
movb %al, -0x101(%rbp)
jne 0x4f734
cmpq $0x1, -0x30(%rbp)
seta %al
movb %al, -0x101(%rbp)
movb -0x101(%rbp), %al
andb $0x1, %al
movb %al, -0x51(%rbp)
movq -0x50(%rbp), %rsi
shlq %rsi
addq $0x1, %rsi
movl -0x48(%rbp), %edx
subl $0x1, %edx
leaq -0x68(%rbp), %rdi
callq 0x4fb00
testb $0x1, -0x51(%rbp)
je 0x4f77c
movq -0x50(%rbp), %rsi
shlq $0x2, %rsi
subq $0x1, %rsi
movl -0x48(%rbp), %edx
subl $0x2, %edx
leaq -0x78(%rbp), %rdi
callq 0x4fb00
jmp 0x4f796
movq -0x50(%rbp), %rsi
shlq %rsi
subq $0x1, %rsi
movl -0x48(%rbp), %edx
subl $0x1, %edx
leaq -0x78(%rbp), %rdi
callq 0x4fb00
movups -0x68(%rbp), %xmm0
movaps %xmm0, -0xa0(%rbp)
movq -0xa0(%rbp), %rdi
movl -0x98(%rbp), %esi
callq 0x4fb30
movl %edx, -0xa8(%rbp)
movq %rax, -0xb0(%rbp)
movl -0xa8(%rbp), %eax
movl %eax, -0x80(%rbp)
movq -0xb0(%rbp), %rax
movq %rax, -0x88(%rbp)
movl -0x80(%rbp), %esi
leaq -0x78(%rbp), %rdi
callq 0x4fbb0
movl %edx, -0xc8(%rbp)
movq %rax, -0xd0(%rbp)
movl -0xc8(%rbp), %eax
movl %eax, -0xb8(%rbp)
movq -0xd0(%rbp), %rax
movq %rax, -0xc0(%rbp)
movups -0x50(%rbp), %xmm0
movaps %xmm0, -0xe0(%rbp)
movq -0xe0(%rbp), %rdi
movl -0xd8(%rbp), %esi
callq 0x4fb30
movq -0x100(%rbp), %rcx
movq %rax, %rsi
movq -0xf8(%rbp), %rax
movq %rsi, -0x110(%rbp)
movl %edx, %esi
movq -0x110(%rbp), %rdx
movl %esi, -0xe8(%rbp)
movq %rdx, -0xf0(%rbp)
movq -0xf0(%rbp), %rdx
movq %rdx, (%rcx)
movl -0xe8(%rbp), %edx
movl %edx, 0x8(%rcx)
movq -0xc0(%rbp), %rdx
movq %rdx, 0x10(%rcx)
movl -0xb8(%rbp), %edx
movl %edx, 0x18(%rcx)
movq -0x88(%rbp), %rdx
movq %rdx, 0x20(%rcx)
movl -0x80(%rbp), %edx
movl %edx, 0x28(%rcx)
addq $0x110, %rsp # imm = 0x110
popq %rbp
retq
nopw %cs:(%rax,%rax)
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
nlohmann::json_abi_v3_11_3::detail::dtoa_impl::diyfp::mul(nlohmann::json_abi_v3_11_3::detail::dtoa_impl::diyfp const&, nlohmann::json_abi_v3_11_3::detail::dtoa_impl::diyfp const&) | static diyfp mul(const diyfp& x, const diyfp& y) noexcept
{
static_assert(kPrecision == 64, "internal error");
// Computes:
// f = round((x.f * y.f) / 2^q)
// e = x.e + y.e + q
// Emulate the 64-bit * 64-bit multiplication:
//
// p = u * v
// = (u_lo + 2^32 u_hi) (v_lo + 2^32 v_hi)
// = (u_lo v_lo ) + 2^32 ((u_lo v_hi ) + (u_hi v_lo )) + 2^64 (u_hi v_hi )
// = (p0 ) + 2^32 ((p1 ) + (p2 )) + 2^64 (p3 )
// = (p0_lo + 2^32 p0_hi) + 2^32 ((p1_lo + 2^32 p1_hi) + (p2_lo + 2^32 p2_hi)) + 2^64 (p3 )
// = (p0_lo ) + 2^32 (p0_hi + p1_lo + p2_lo ) + 2^64 (p1_hi + p2_hi + p3)
// = (p0_lo ) + 2^32 (Q ) + 2^64 (H )
// = (p0_lo ) + 2^32 (Q_lo + 2^32 Q_hi ) + 2^64 (H )
//
// (Since Q might be larger than 2^32 - 1)
//
// = (p0_lo + 2^32 Q_lo) + 2^64 (Q_hi + H)
//
// (Q_hi + H does not overflow a 64-bit int)
//
// = p_lo + 2^64 p_hi
const std::uint64_t u_lo = x.f & 0xFFFFFFFFu;
const std::uint64_t u_hi = x.f >> 32u;
const std::uint64_t v_lo = y.f & 0xFFFFFFFFu;
const std::uint64_t v_hi = y.f >> 32u;
const std::uint64_t p0 = u_lo * v_lo;
const std::uint64_t p1 = u_lo * v_hi;
const std::uint64_t p2 = u_hi * v_lo;
const std::uint64_t p3 = u_hi * v_hi;
const std::uint64_t p0_hi = p0 >> 32u;
const std::uint64_t p1_lo = p1 & 0xFFFFFFFFu;
const std::uint64_t p1_hi = p1 >> 32u;
const std::uint64_t p2_lo = p2 & 0xFFFFFFFFu;
const std::uint64_t p2_hi = p2 >> 32u;
std::uint64_t Q = p0_hi + p1_lo + p2_lo;
// The full product might now be computed as
//
// p_hi = p3 + p2_hi + p1_hi + (Q >> 32)
// p_lo = p0_lo + (Q << 32)
//
// But in this particular case here, the full p_lo is not required.
// Effectively we only need to add the highest bit in p_lo to p_hi (and
// Q_hi + 1 does not overflow).
Q += std::uint64_t{1} << (64u - 32u - 1u); // round, ties up
const std::uint64_t h = p3 + p2_hi + p1_hi + (Q >> 32u);
return {h, x.e + y.e + 64};
} | pushq %rbp
movq %rsp, %rbp
subq $0xa0, %rsp
movq %rdi, -0x18(%rbp)
movq %rsi, -0x20(%rbp)
movq -0x18(%rbp), %rax
movl (%rax), %eax
movq %rax, -0x28(%rbp)
movq -0x18(%rbp), %rax
movl 0x4(%rax), %eax
movq %rax, -0x30(%rbp)
movq -0x20(%rbp), %rax
movl (%rax), %eax
movq %rax, -0x38(%rbp)
movq -0x20(%rbp), %rax
movl 0x4(%rax), %eax
movq %rax, -0x40(%rbp)
movq -0x28(%rbp), %rax
movq -0x38(%rbp), %rcx
imulq %rcx, %rax
movq %rax, -0x48(%rbp)
movq -0x28(%rbp), %rax
movq -0x40(%rbp), %rcx
imulq %rcx, %rax
movq %rax, -0x50(%rbp)
movq -0x30(%rbp), %rax
movq -0x38(%rbp), %rcx
imulq %rcx, %rax
movq %rax, -0x58(%rbp)
movq -0x30(%rbp), %rax
movq -0x40(%rbp), %rcx
imulq %rcx, %rax
movq %rax, -0x60(%rbp)
movl -0x44(%rbp), %eax
movq %rax, -0x68(%rbp)
movl -0x50(%rbp), %eax
movq %rax, -0x70(%rbp)
movl -0x4c(%rbp), %eax
movq %rax, -0x78(%rbp)
movl -0x58(%rbp), %eax
movq %rax, -0x80(%rbp)
movl -0x54(%rbp), %eax
movq %rax, -0x88(%rbp)
movq -0x68(%rbp), %rax
movq -0x70(%rbp), %rcx
addq %rcx, %rax
movq -0x80(%rbp), %rcx
addq %rcx, %rax
movq %rax, -0x90(%rbp)
movq -0x90(%rbp), %rax
subq $-0x80000000, %rax # imm = 0x80000000
movq %rax, -0x90(%rbp)
movq -0x60(%rbp), %rax
movq -0x88(%rbp), %rcx
addq %rcx, %rax
movq -0x78(%rbp), %rcx
addq %rcx, %rax
movl -0x8c(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x98(%rbp)
movq -0x98(%rbp), %rsi
movq -0x18(%rbp), %rax
movl 0x8(%rax), %edx
movq -0x20(%rbp), %rax
movl 0x8(%rax), %eax
movl %eax, %ecx
movl %edx, %eax
leal 0x40(%rax,%rcx), %edx
leaq -0x10(%rbp), %rdi
callq 0x4fb00
movq -0x10(%rbp), %rax
movl -0x8(%rbp), %edx
addq $0xa0, %rsp
popq %rbp
retq
nopl (%rax,%rax)
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
decltype(from_json_array_impl(fp, fp0, nlohmann::json_abi_v3_11_3::detail::priority_tag<3u>{}), fp.get<std::array<double, 7ul>::value_type>(), (void)()) nlohmann::json_abi_v3_11_3::detail::from_json<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>, std::array<double, 7ul>, 0>(nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void> const&, std::array<double, 7ul>&) | auto from_json(const BasicJsonType& j, ConstructibleArrayType& arr)
-> decltype(from_json_array_impl(j, arr, priority_tag<3> {}),
j.template get<typename ConstructibleArrayType::value_type>(),
void())
{
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
{
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
}
from_json_array_impl(j, arr, priority_tag<3> {});
} | pushq %rbp
movq %rsp, %rbp
subq $0x50, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq -0x8(%rbp), %rdi
callq 0x403d0
xorb $-0x1, %al
xorb $-0x1, %al
xorb $-0x1, %al
testb $0x1, %al
jne 0x53688
jmp 0x53728
movb $0x1, -0x45(%rbp)
movl $0x20, %edi
callq 0x30190
movq %rax, -0x50(%rbp)
movq -0x8(%rbp), %rdi
callq 0x41e50
movq %rax, -0x38(%rbp)
leaq 0x10971(%rip), %rsi # 0x6401f
leaq -0x30(%rbp), %rdi
leaq -0x38(%rbp), %rdx
callq 0x488f0
jmp 0x536bd
movq -0x50(%rbp), %rdi
movq -0x8(%rbp), %rcx
movl $0x12e, %esi # imm = 0x12E
leaq -0x30(%rbp), %rdx
callq 0x47f50
jmp 0x536d5
movq -0x50(%rbp), %rdi
movb $0x0, -0x45(%rbp)
leaq 0x29494(%rip), %rsi # 0x7cb78
leaq -0x117fb(%rip), %rdx # 0x41ef0
callq 0x305c0
jmp 0x53744
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x40(%rbp)
movl %eax, -0x44(%rbp)
jmp 0x53715
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x40(%rbp)
movl %eax, -0x44(%rbp)
leaq -0x30(%rbp), %rdi
callq 0x30210
testb $0x1, -0x45(%rbp)
jne 0x5371d
jmp 0x53726
movq -0x50(%rbp), %rdi
callq 0x30290
jmp 0x5373b
movq -0x8(%rbp), %rdi
movq -0x10(%rbp), %rsi
callq 0x53750
addq $0x50, %rsp
popq %rbp
retq
movq -0x40(%rbp), %rdi
callq 0x305e0
nopw %cs:(%rax,%rax)
nop
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
decltype(from_json_array_impl(fp, fp0, nlohmann::json_abi_v3_11_3::detail::priority_tag<3u>{}), fp.get<std::array<double, 8ul>::value_type>(), (void)()) nlohmann::json_abi_v3_11_3::detail::from_json<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>, std::array<double, 8ul>, 0>(nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void> const&, std::array<double, 8ul>&) | auto from_json(const BasicJsonType& j, ConstructibleArrayType& arr)
-> decltype(from_json_array_impl(j, arr, priority_tag<3> {}),
j.template get<typename ConstructibleArrayType::value_type>(),
void())
{
if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
{
JSON_THROW(type_error::create(302, concat("type must be array, but is ", j.type_name()), &j));
}
from_json_array_impl(j, arr, priority_tag<3> {});
} | pushq %rbp
movq %rsp, %rbp
subq $0x50, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq -0x8(%rbp), %rdi
callq 0x403d0
xorb $-0x1, %al
xorb $-0x1, %al
xorb $-0x1, %al
testb $0x1, %al
jne 0x53fb8
jmp 0x54058
movb $0x1, -0x45(%rbp)
movl $0x20, %edi
callq 0x30190
movq %rax, -0x50(%rbp)
movq -0x8(%rbp), %rdi
callq 0x41e50
movq %rax, -0x38(%rbp)
leaq 0x10041(%rip), %rsi # 0x6401f
leaq -0x30(%rbp), %rdi
leaq -0x38(%rbp), %rdx
callq 0x488f0
jmp 0x53fed
movq -0x50(%rbp), %rdi
movq -0x8(%rbp), %rcx
movl $0x12e, %esi # imm = 0x12E
leaq -0x30(%rbp), %rdx
callq 0x47f50
jmp 0x54005
movq -0x50(%rbp), %rdi
movb $0x0, -0x45(%rbp)
leaq 0x28b64(%rip), %rsi # 0x7cb78
leaq -0x1212b(%rip), %rdx # 0x41ef0
callq 0x305c0
jmp 0x54074
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x40(%rbp)
movl %eax, -0x44(%rbp)
jmp 0x54045
movq %rax, %rcx
movl %edx, %eax
movq %rcx, -0x40(%rbp)
movl %eax, -0x44(%rbp)
leaq -0x30(%rbp), %rdi
callq 0x30210
testb $0x1, -0x45(%rbp)
jne 0x5404d
jmp 0x54056
movq -0x50(%rbp), %rdi
callq 0x30290
jmp 0x5406b
movq -0x8(%rbp), %rdi
movq -0x10(%rbp), %rsi
callq 0x54080
addq $0x50, %rsp
popq %rbp
retq
movq -0x40(%rbp), %rdi
callq 0x305e0
nopw %cs:(%rax,%rax)
nop
| /pantor[P]ruckig/third_party/nlohmann/json.hpp |
ruckig::BrakeProfile::finalize(double&, double&, double&) | void finalize(double& ps, double& vs, double& as) {
if (t[0] <= 0.0 && t[1] <= 0.0) {
duration = 0.0;
return;
}
duration = t[0];
p[0] = ps;
v[0] = vs;
a[0] = as;
std::tie(ps, vs, as) = integrate(t[0], ps, vs, as, j[0]);
if (t[1] > 0.0) {
duration += t[1];
p[1] = ps;
v[1] = vs;
a[1] = as;
std::tie(ps, vs, as) = integrate(t[1], ps, vs, as, j[1]);
}
} | pushq %rbp
movq %rsp, %rbp
subq $0x100, %rsp # imm = 0x100
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq %rdx, -0x18(%rbp)
movq %rcx, -0x20(%rbp)
movq -0x8(%rbp), %rdi
movq %rdi, -0x88(%rbp)
addq $0x8, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x54ba0
xorps %xmm0, %xmm0
ucomisd (%rax), %xmm0
jb 0x54f2d
movq -0x88(%rbp), %rdi
addq $0x8, %rdi
movl $0x1, %esi
callq 0x54ba0
xorps %xmm0, %xmm0
ucomisd (%rax), %xmm0
jb 0x54f2d
movq -0x88(%rbp), %rax
xorps %xmm0, %xmm0
movsd %xmm0, (%rax)
jmp 0x5521b
movq -0x88(%rbp), %rdi
addq $0x8, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x54ba0
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, (%rdi)
movq -0x10(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0xc0(%rbp)
addq $0x48, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x54ba0
movsd -0xc0(%rbp), %xmm0
movq -0x88(%rbp), %rdi
movsd %xmm0, (%rax)
movq -0x18(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0xb8(%rbp)
addq $0x38, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x54ba0
movsd -0xb8(%rbp), %xmm0
movq -0x88(%rbp), %rdi
movsd %xmm0, (%rax)
movq -0x20(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0xb0(%rbp)
addq $0x28, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x54ba0
movsd -0xb0(%rbp), %xmm0
movq -0x88(%rbp), %rdi
movsd %xmm0, (%rax)
addq $0x8, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x54ba0
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0xa8(%rbp)
movq -0x10(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0xa0(%rbp)
movq -0x18(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0x98(%rbp)
movq -0x20(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0x90(%rbp)
addq $0x18, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x54ba0
movsd -0xa8(%rbp), %xmm0
movsd -0xa0(%rbp), %xmm1
movsd -0x98(%rbp), %xmm2
movsd -0x90(%rbp), %xmm3
movsd (%rax), %xmm4
leaq -0x38(%rbp), %rdi
callq 0x56e70
movq -0x10(%rbp), %rsi
movq -0x18(%rbp), %rdx
movq -0x20(%rbp), %rcx
leaq -0x50(%rbp), %rdi
callq 0x56f60
leaq -0x50(%rbp), %rdi
leaq -0x38(%rbp), %rsi
callq 0x56fa0
movq -0x88(%rbp), %rdi
addq $0x8, %rdi
movl $0x1, %esi
callq 0x54ba0
movsd (%rax), %xmm0
xorps %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jbe 0x5521b
movq -0x88(%rbp), %rdi
addq $0x8, %rdi
movl $0x1, %esi
callq 0x54ba0
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
addsd (%rdi), %xmm0
movsd %xmm0, (%rdi)
movq -0x10(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0xf8(%rbp)
addq $0x48, %rdi
movl $0x1, %esi
callq 0x54ba0
movsd -0xf8(%rbp), %xmm0
movq -0x88(%rbp), %rdi
movsd %xmm0, (%rax)
movq -0x18(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0xf0(%rbp)
addq $0x38, %rdi
movl $0x1, %esi
callq 0x54ba0
movsd -0xf0(%rbp), %xmm0
movq -0x88(%rbp), %rdi
movsd %xmm0, (%rax)
movq -0x20(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0xe8(%rbp)
addq $0x28, %rdi
movl $0x1, %esi
callq 0x54ba0
movsd -0xe8(%rbp), %xmm0
movq -0x88(%rbp), %rdi
movsd %xmm0, (%rax)
addq $0x8, %rdi
movl $0x1, %esi
callq 0x54ba0
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0xe0(%rbp)
movq -0x10(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0xd8(%rbp)
movq -0x18(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0xd0(%rbp)
movq -0x20(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0xc8(%rbp)
addq $0x18, %rdi
movl $0x1, %esi
callq 0x54ba0
movsd -0xe0(%rbp), %xmm0
movsd -0xd8(%rbp), %xmm1
movsd -0xd0(%rbp), %xmm2
movsd -0xc8(%rbp), %xmm3
movsd (%rax), %xmm4
leaq -0x68(%rbp), %rdi
callq 0x56e70
movq -0x10(%rbp), %rsi
movq -0x18(%rbp), %rdx
movq -0x20(%rbp), %rcx
leaq -0x80(%rbp), %rdi
callq 0x56f60
leaq -0x80(%rbp), %rdi
leaq -0x68(%rbp), %rsi
callq 0x56fa0
addq $0x100, %rsp # imm = 0x100
popq %rbp
retq
nopw %cs:(%rax,%rax)
nop
| /pantor[P]ruckig/include/ruckig/brake.hpp |
ruckig::BrakeProfile::finalize_second_order(double&, double&, double&) | void finalize_second_order(double& ps, double& vs, double& as) {
if (t[0] <= 0.0) {
duration = 0.0;
return;
}
duration = t[0];
p[0] = ps;
v[0] = vs;
std::tie(ps, vs, as) = integrate(t[0], ps, vs, a[0], 0.0);
} | pushq %rbp
movq %rsp, %rbp
subq $0x80, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq %rdx, -0x18(%rbp)
movq %rcx, -0x20(%rbp)
movq -0x8(%rbp), %rdi
movq %rdi, -0x58(%rbp)
addq $0x8, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x54ba0
xorps %xmm0, %xmm0
ucomisd (%rax), %xmm0
jb 0x55279
movq -0x58(%rbp), %rax
xorps %xmm0, %xmm0
movsd %xmm0, (%rax)
jmp 0x55366
movq -0x58(%rbp), %rdi
addq $0x8, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x54ba0
movq -0x58(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, (%rdi)
movq -0x10(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0x80(%rbp)
addq $0x48, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x54ba0
movsd -0x80(%rbp), %xmm0
movq -0x58(%rbp), %rdi
movsd %xmm0, (%rax)
movq -0x18(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0x78(%rbp)
addq $0x38, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x54ba0
movsd -0x78(%rbp), %xmm0
movq -0x58(%rbp), %rdi
movsd %xmm0, (%rax)
addq $0x8, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x54ba0
movq -0x58(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x70(%rbp)
movq -0x10(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0x68(%rbp)
movq -0x18(%rbp), %rax
movsd (%rax), %xmm0
movsd %xmm0, -0x60(%rbp)
addq $0x28, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x54ba0
movsd -0x70(%rbp), %xmm0
movsd -0x68(%rbp), %xmm1
movsd -0x60(%rbp), %xmm2
movsd (%rax), %xmm3
leaq -0x38(%rbp), %rdi
xorps %xmm4, %xmm4
callq 0x56e70
movq -0x10(%rbp), %rsi
movq -0x18(%rbp), %rdx
movq -0x20(%rbp), %rcx
leaq -0x50(%rbp), %rdi
callq 0x56f60
leaq -0x50(%rbp), %rdi
leaq -0x38(%rbp), %rsi
callq 0x56fa0
addq $0x80, %rsp
popq %rbp
retq
nop
| /pantor[P]ruckig/include/ruckig/brake.hpp |
bool ruckig::Profile::check_for_velocity<(ruckig::Profile::ControlSigns)0, (ruckig::Profile::ReachedLimits)7>(double, double, double) | bool check_for_velocity(double jf, double aMax, double aMin) {
if (t[0] < 0) {
return false;
}
t_sum[0] = t[0];
for (size_t i = 0; i < 6; ++i) {
if (t[i+1] < 0) {
return false;
}
t_sum[i+1] = t_sum[i] + t[i+1];
}
if constexpr (limits == ReachedLimits::ACC0) {
if (t[1] < std::numeric_limits<double>::epsilon()) {
return false;
}
}
if (t_sum.back() > t_max) { // For numerical reasons, is that needed?
return false;
}
if constexpr (control_signs == ControlSigns::UDDU) {
j = {(t[0] > 0 ? jf : 0), 0, (t[2] > 0 ? -jf : 0), 0, (t[4] > 0 ? -jf : 0), 0, (t[6] > 0 ? jf : 0)};
} else {
j = {(t[0] > 0 ? jf : 0), 0, (t[2] > 0 ? -jf : 0), 0, (t[4] > 0 ? jf : 0), 0, (t[6] > 0 ? -jf : 0)};
}
for (size_t i = 0; i < 7; ++i) {
a[i+1] = a[i] + t[i] * j[i];
v[i+1] = v[i] + t[i] * (a[i] + t[i] * j[i] / 2);
p[i+1] = p[i] + t[i] * (v[i] + t[i] * (a[i] / 2 + t[i] * j[i] / 6));
}
this->control_signs = control_signs;
this->limits = limits;
direction = (aMax > 0) ? Profile::Direction::UP : Profile::Direction::DOWN;
const double aUppLim = (direction == Profile::Direction::UP ? aMax : aMin) + a_eps;
const double aLowLim = (direction == Profile::Direction::UP ? aMin : aMax) - a_eps;
// Velocity limit can be broken in the beginning if both initial velocity and acceleration are too high
// std::cout << std::setprecision(15) << "target: " << std::abs(p.back() - pf) << " " << std::abs(v.back() - vf) << " " << std::abs(a.back() - af) << " T: " << t_sum.back() << " " << to_string() << std::endl;
return std::abs(v.back() - vf) < v_precision && std::abs(a.back() - af) < a_precision
&& a[1] >= aLowLim && a[3] >= aLowLim && a[5] >= aLowLim
&& a[1] <= aUppLim && a[3] <= aUppLim && a[5] <= aUppLim;
} | pushq %rbp
movq %rsp, %rbp
subq $0x170, %rsp # imm = 0x170
movq %rdi, -0x10(%rbp)
movsd %xmm0, -0x18(%rbp)
movsd %xmm1, -0x20(%rbp)
movsd %xmm2, -0x28(%rbp)
movq -0x10(%rbp), %rdi
movq %rdi, -0x88(%rbp)
xorl %eax, %eax
movl %eax, %esi
callq 0x53a00
xorps %xmm0, %xmm0
ucomisd (%rax), %xmm0
jbe 0x5c3e4
movb $0x0, -0x1(%rbp)
jmp 0x5cc60
movq -0x88(%rbp), %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x53a00
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x90(%rbp)
addq $0x38, %rdi
xorl %eax, %eax
movl %eax, %esi
callq 0x53a00
movsd -0x90(%rbp), %xmm0
movsd %xmm0, (%rax)
movq $0x0, -0x30(%rbp)
cmpq $0x6, -0x30(%rbp)
jae 0x5c4d6
movq -0x88(%rbp), %rdi
movq -0x30(%rbp), %rsi
addq $0x1, %rsi
callq 0x53a00
xorps %xmm0, %xmm0
ucomisd (%rax), %xmm0
jbe 0x5c459
movb $0x0, -0x1(%rbp)
jmp 0x5cc60
movq -0x88(%rbp), %rdi
addq $0x38, %rdi
movq -0x30(%rbp), %rsi
callq 0x53a00
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0xa0(%rbp)
movq -0x30(%rbp), %rsi
addq $0x1, %rsi
callq 0x53a00
movsd -0xa0(%rbp), %xmm0
movq -0x88(%rbp), %rdi
addsd (%rax), %xmm0
movsd %xmm0, -0x98(%rbp)
addq $0x38, %rdi
movq -0x30(%rbp), %rsi
addq $0x1, %rsi
callq 0x53a00
movsd -0x98(%rbp), %xmm0
movsd %xmm0, (%rax)
movq -0x30(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x30(%rbp)
jmp 0x5c428
movq -0x88(%rbp), %rdi
addq $0x38, %rdi
callq 0x54c90
movsd (%rax), %xmm0
movsd 0x5b5e(%rip), %xmm1 # 0x62050
ucomisd %xmm1, %xmm0
jbe 0x5c501
movb $0x0, -0x1(%rbp)
jmp 0x5cc60
movq -0x88(%rbp), %rdi
leaq -0x68(%rbp), %rax
movq %rax, -0xa8(%rbp)
xorl %eax, %eax
movl %eax, %esi
callq 0x53a00
movsd (%rax), %xmm0
xorps %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jbe 0x5c538
movsd -0x18(%rbp), %xmm0
movsd %xmm0, -0xb0(%rbp)
jmp 0x5c545
xorps %xmm0, %xmm0
movsd %xmm0, -0xb0(%rbp)
jmp 0x5c545
movq -0x88(%rbp), %rdi
movq -0xa8(%rbp), %rax
movsd -0xb0(%rbp), %xmm0
movsd %xmm0, (%rax)
xorps %xmm0, %xmm0
movsd %xmm0, 0x8(%rax)
addq $0x10, %rax
movq %rax, -0xb8(%rbp)
movl $0x2, %esi
callq 0x53a00
movsd (%rax), %xmm0
xorps %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jbe 0x5c5af
movsd -0x18(%rbp), %xmm0
movq %xmm0, %rax
movabsq $-0x8000000000000000, %rcx # imm = 0x8000000000000000
xorq %rcx, %rax
movq %rax, %xmm0
movsd %xmm0, -0xc0(%rbp)
jmp 0x5c5bc
xorps %xmm0, %xmm0
movsd %xmm0, -0xc0(%rbp)
jmp 0x5c5bc
movq -0x88(%rbp), %rdi
movq -0xa8(%rbp), %rax
movq -0xb8(%rbp), %rcx
movsd -0xc0(%rbp), %xmm0
movsd %xmm0, (%rcx)
xorps %xmm0, %xmm0
movsd %xmm0, 0x18(%rax)
addq $0x20, %rax
movq %rax, -0xc8(%rbp)
movl $0x4, %esi
callq 0x53a00
movsd (%rax), %xmm0
xorps %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jbe 0x5c62d
movsd -0x18(%rbp), %xmm0
movq %xmm0, %rax
movabsq $-0x8000000000000000, %rcx # imm = 0x8000000000000000
xorq %rcx, %rax
movq %rax, %xmm0
movsd %xmm0, -0xd0(%rbp)
jmp 0x5c63a
xorps %xmm0, %xmm0
movsd %xmm0, -0xd0(%rbp)
jmp 0x5c63a
movq -0x88(%rbp), %rdi
movq -0xa8(%rbp), %rax
movq -0xc8(%rbp), %rcx
movsd -0xd0(%rbp), %xmm0
movsd %xmm0, (%rcx)
xorps %xmm0, %xmm0
movsd %xmm0, 0x28(%rax)
addq $0x30, %rax
movq %rax, -0xd8(%rbp)
movl $0x6, %esi
callq 0x53a00
movsd (%rax), %xmm0
xorps %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jbe 0x5c694
movsd -0x18(%rbp), %xmm0
movsd %xmm0, -0xe0(%rbp)
jmp 0x5c6a1
xorps %xmm0, %xmm0
movsd %xmm0, -0xe0(%rbp)
jmp 0x5c6a1
movq -0x88(%rbp), %rdi
movq -0xd8(%rbp), %rax
movsd -0xe0(%rbp), %xmm0
movsd %xmm0, (%rax)
addq $0x70, %rdi
leaq -0x68(%rbp), %rsi
movl $0x38, %edx
callq 0x302d0
movq $0x0, -0x70(%rbp)
cmpq $0x7, -0x70(%rbp)
jae 0x5c9ec
movq -0x88(%rbp), %rdi
addq $0xa8, %rdi
movq -0x70(%rbp), %rsi
callq 0x540f0
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x150(%rbp)
movq -0x70(%rbp), %rsi
callq 0x53a00
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x158(%rbp)
addq $0x70, %rdi
movq -0x70(%rbp), %rsi
callq 0x53a00
movsd -0x158(%rbp), %xmm0
movsd -0x150(%rbp), %xmm1
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm2
mulsd %xmm2, %xmm0
addsd %xmm1, %xmm0
movsd %xmm0, -0x148(%rbp)
addq $0xa8, %rdi
movq -0x70(%rbp), %rsi
addq $0x1, %rsi
callq 0x540f0
movsd -0x148(%rbp), %xmm0
movq -0x88(%rbp), %rdi
movsd %xmm0, (%rax)
addq $0xe8, %rdi
movq -0x70(%rbp), %rsi
callq 0x540f0
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x128(%rbp)
movq -0x70(%rbp), %rsi
callq 0x53a00
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x130(%rbp)
addq $0xa8, %rdi
movq -0x70(%rbp), %rsi
callq 0x540f0
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x138(%rbp)
movq -0x70(%rbp), %rsi
callq 0x53a00
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x140(%rbp)
addq $0x70, %rdi
movq -0x70(%rbp), %rsi
callq 0x53a00
movsd -0x140(%rbp), %xmm3
movsd -0x138(%rbp), %xmm2
movsd -0x130(%rbp), %xmm0
movsd -0x128(%rbp), %xmm1
movq -0x88(%rbp), %rdi
mulsd (%rax), %xmm3
movsd 0x57dd(%rip), %xmm4 # 0x62020
divsd %xmm4, %xmm3
addsd %xmm3, %xmm2
mulsd %xmm2, %xmm0
addsd %xmm1, %xmm0
movsd %xmm0, -0x120(%rbp)
addq $0xe8, %rdi
movq -0x70(%rbp), %rsi
addq $0x1, %rsi
callq 0x540f0
movsd -0x120(%rbp), %xmm0
movq -0x88(%rbp), %rdi
movsd %xmm0, (%rax)
addq $0x128, %rdi # imm = 0x128
movq -0x70(%rbp), %rsi
callq 0x540f0
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0xf0(%rbp)
movq -0x70(%rbp), %rsi
callq 0x53a00
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0xf8(%rbp)
addq $0xe8, %rdi
movq -0x70(%rbp), %rsi
callq 0x540f0
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x100(%rbp)
movq -0x70(%rbp), %rsi
callq 0x53a00
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x108(%rbp)
addq $0xa8, %rdi
movq -0x70(%rbp), %rsi
callq 0x540f0
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd 0x56fd(%rip), %xmm1 # 0x62020
divsd %xmm1, %xmm0
movsd %xmm0, -0x110(%rbp)
movq -0x70(%rbp), %rsi
callq 0x53a00
movq -0x88(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x118(%rbp)
addq $0x70, %rdi
movq -0x70(%rbp), %rsi
callq 0x53a00
movsd -0x118(%rbp), %xmm5
movsd -0x110(%rbp), %xmm4
movsd -0x108(%rbp), %xmm2
movsd -0x100(%rbp), %xmm3
movsd -0xf8(%rbp), %xmm0
movsd -0xf0(%rbp), %xmm1
movq -0x88(%rbp), %rdi
mulsd (%rax), %xmm5
movsd 0x56ad(%rip), %xmm6 # 0x62048
divsd %xmm6, %xmm5
addsd %xmm5, %xmm4
mulsd %xmm4, %xmm2
addsd %xmm3, %xmm2
mulsd %xmm2, %xmm0
addsd %xmm1, %xmm0
movsd %xmm0, -0xe8(%rbp)
addq $0x128, %rdi # imm = 0x128
movq -0x70(%rbp), %rsi
addq $0x1, %rsi
callq 0x540f0
movsd -0xe8(%rbp), %xmm0
movsd %xmm0, (%rax)
movq -0x70(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x70(%rbp)
jmp 0x5c6d5
movq -0x88(%rbp), %rax
movl $0x0, 0x238(%rax)
movl $0x7, 0x230(%rax)
movsd -0x20(%rbp), %xmm0
xorps %xmm1, %xmm1
movl $0x1, %ecx
xorl %edx, %edx
ucomisd %xmm1, %xmm0
cmoval %edx, %ecx
movl %ecx, 0x234(%rax)
cmpl $0x0, 0x234(%rax)
jne 0x5ca3b
movsd -0x20(%rbp), %xmm0
movsd %xmm0, -0x160(%rbp)
jmp 0x5ca48
movsd -0x28(%rbp), %xmm0
movsd %xmm0, -0x160(%rbp)
movq -0x88(%rbp), %rax
movsd -0x160(%rbp), %xmm0
movsd 0x55e1(%rip), %xmm1 # 0x62040
addsd %xmm1, %xmm0
movsd %xmm0, -0x78(%rbp)
cmpl $0x0, 0x234(%rax)
jne 0x5ca80
movsd -0x28(%rbp), %xmm0
movsd %xmm0, -0x168(%rbp)
jmp 0x5ca8d
movsd -0x20(%rbp), %xmm0
movsd %xmm0, -0x168(%rbp)
movq -0x88(%rbp), %rdi
movsd -0x168(%rbp), %xmm0
movsd 0x559c(%rip), %xmm1 # 0x62040
subsd %xmm1, %xmm0
movsd %xmm0, -0x80(%rbp)
addq $0xe8, %rdi
callq 0x54c70
movq %rax, %rcx
movq -0x88(%rbp), %rax
movsd (%rcx), %xmm0
subsd 0x220(%rax), %xmm0
callq 0x56ce0
movaps %xmm0, %xmm1
xorl %eax, %eax
movsd 0x5577(%rip), %xmm0 # 0x62058
ucomisd %xmm1, %xmm0
movb %al, -0x169(%rbp)
jbe 0x5cc55
movq -0x88(%rbp), %rdi
addq $0xa8, %rdi
callq 0x54c70
movq %rax, %rcx
movq -0x88(%rbp), %rax
movsd (%rcx), %xmm0
subsd 0x228(%rax), %xmm0
callq 0x56ce0
movaps %xmm0, %xmm1
xorl %eax, %eax
movsd 0x5534(%rip), %xmm0 # 0x62060
ucomisd %xmm1, %xmm0
movb %al, -0x169(%rbp)
jbe 0x5cc55
movq -0x88(%rbp), %rdi
addq $0xa8, %rdi
movl $0x1, %esi
callq 0x540f0
movsd (%rax), %xmm0
xorl %eax, %eax
ucomisd -0x80(%rbp), %xmm0
movb %al, -0x169(%rbp)
jb 0x5cc55
movq -0x88(%rbp), %rdi
addq $0xa8, %rdi
movl $0x3, %esi
callq 0x540f0
movsd (%rax), %xmm0
xorl %eax, %eax
ucomisd -0x80(%rbp), %xmm0
movb %al, -0x169(%rbp)
jb 0x5cc55
movq -0x88(%rbp), %rdi
addq $0xa8, %rdi
movl $0x5, %esi
callq 0x540f0
movsd (%rax), %xmm0
xorl %eax, %eax
ucomisd -0x80(%rbp), %xmm0
movb %al, -0x169(%rbp)
jb 0x5cc55
movq -0x88(%rbp), %rdi
addq $0xa8, %rdi
movl $0x1, %esi
callq 0x540f0
movsd (%rax), %xmm1
movsd -0x78(%rbp), %xmm0
xorl %eax, %eax
ucomisd %xmm1, %xmm0
movb %al, -0x169(%rbp)
jb 0x5cc55
movq -0x88(%rbp), %rdi
addq $0xa8, %rdi
movl $0x3, %esi
callq 0x540f0
movsd (%rax), %xmm1
movsd -0x78(%rbp), %xmm0
xorl %eax, %eax
ucomisd %xmm1, %xmm0
movb %al, -0x169(%rbp)
jb 0x5cc55
movq -0x88(%rbp), %rdi
addq $0xa8, %rdi
movl $0x5, %esi
callq 0x540f0
movsd (%rax), %xmm1
movsd -0x78(%rbp), %xmm0
ucomisd %xmm1, %xmm0
setae %al
movb %al, -0x169(%rbp)
movb -0x169(%rbp), %al
andb $0x1, %al
movb %al, -0x1(%rbp)
movb -0x1(%rbp), %al
andb $0x1, %al
addq $0x170, %rsp # imm = 0x170
popq %rbp
retq
nop
| /pantor[P]ruckig/include/ruckig/profile.hpp |
bool ruckig::Profile::check_for_second_order_velocity<(ruckig::Profile::ControlSigns)1, (ruckig::Profile::ReachedLimits)7>(double) | bool check_for_second_order_velocity(double aUp) {
// ReachedLimits::ACC0
if (t[1] < 0.0) {
return false;
}
t_sum = {0, t[1], t[1], t[1], t[1], t[1], t[1]};
if (t_sum.back() > t_max) { // For numerical reasons, is that needed?
return false;
}
j = {0, 0, 0, 0, 0, 0, 0};
a = {0, (t[1] > 0) ? aUp : 0, 0, 0, 0, 0, 0, af};
for (size_t i = 0; i < 7; ++i) {
v[i+1] = v[i] + t[i] * a[i];
p[i+1] = p[i] + t[i] * (v[i] + t[i] * a[i] / 2);
}
this->control_signs = control_signs;
this->limits = limits;
direction = (aUp > 0) ? Profile::Direction::UP : Profile::Direction::DOWN;
// Velocity limit can be broken in the beginning if both initial velocity and acceleration are too high
// std::cout << std::setprecision(15) << "target: " << std::abs(p.back() - pf) << " " << std::abs(v.back() - vf) << " " << std::abs(a.back() - af) << " T: " << t_sum.back() << " " << to_string() << std::endl;
return std::abs(v.back() - vf) < v_precision;
} | pushq %rbp
movq %rsp, %rbp
subq $0x130, %rsp # imm = 0x130
movq %rdi, -0x10(%rbp)
movsd %xmm0, -0x18(%rbp)
movq -0x10(%rbp), %rdi
movq %rdi, -0xd8(%rbp)
movl $0x1, %esi
callq 0x53a00
xorps %xmm0, %xmm0
ucomisd (%rax), %xmm0
jbe 0x5daab
movb $0x0, -0x1(%rbp)
jmp 0x5deea
movq -0xd8(%rbp), %rdi
xorps %xmm0, %xmm0
movsd %xmm0, -0x50(%rbp)
movl $0x1, %esi
callq 0x53a00
movq -0xd8(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x48(%rbp)
movl $0x1, %esi
callq 0x53a00
movq -0xd8(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x40(%rbp)
movl $0x1, %esi
callq 0x53a00
movq -0xd8(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x38(%rbp)
movl $0x1, %esi
callq 0x53a00
movq -0xd8(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x30(%rbp)
movl $0x1, %esi
callq 0x53a00
movq -0xd8(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x28(%rbp)
movl $0x1, %esi
callq 0x53a00
movq -0xd8(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x20(%rbp)
addq $0x38, %rdi
leaq -0x50(%rbp), %rsi
movl $0x38, %edx
callq 0x302d0
movq -0xd8(%rbp), %rdi
addq $0x38, %rdi
callq 0x54c90
movsd (%rax), %xmm0
movsd 0x44cc(%rip), %xmm1 # 0x62050
ucomisd %xmm1, %xmm0
jbe 0x5db93
movb $0x0, -0x1(%rbp)
jmp 0x5deea
leaq -0x88(%rbp), %rdi
xorl %esi, %esi
movl $0x38, %edx
callq 0x301d0
leaq -0x88(%rbp), %rdi
leaq 0x499c(%rip), %rsi # 0x62550
movl $0x38, %edx
callq 0x302d0
movq -0xd8(%rbp), %rdi
addq $0x70, %rdi
leaq -0x88(%rbp), %rsi
movl $0x38, %edx
callq 0x302d0
leaq -0xc8(%rbp), %rdi
xorl %esi, %esi
movl $0x40, %edx
callq 0x301d0
movq -0xd8(%rbp), %rdi
leaq -0xc8(%rbp), %rax
movq %rax, -0xe8(%rbp)
addq $0x8, %rax
movq %rax, -0xe0(%rbp)
movl $0x1, %esi
callq 0x53a00
movsd (%rax), %xmm0
xorps %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jbe 0x5dc33
movsd -0x18(%rbp), %xmm0
movsd %xmm0, -0xf0(%rbp)
jmp 0x5dc40
xorps %xmm0, %xmm0
movsd %xmm0, -0xf0(%rbp)
jmp 0x5dc40
movq -0xd8(%rbp), %rdi
movq -0xe8(%rbp), %rax
movq -0xe0(%rbp), %rcx
movsd -0xf0(%rbp), %xmm0
movsd %xmm0, (%rcx)
movsd 0x228(%rdi), %xmm0
movsd %xmm0, 0x38(%rax)
addq $0xa8, %rdi
leaq -0xc8(%rbp), %rsi
movl $0x40, %edx
callq 0x302d0
movq $0x0, -0xd0(%rbp)
cmpq $0x7, -0xd0(%rbp)
jae 0x5de75
movq -0xd8(%rbp), %rdi
addq $0xe8, %rdi
movq -0xd0(%rbp), %rsi
callq 0x540f0
movq -0xd8(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x128(%rbp)
movq -0xd0(%rbp), %rsi
callq 0x53a00
movq -0xd8(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x130(%rbp)
addq $0xa8, %rdi
movq -0xd0(%rbp), %rsi
callq 0x540f0
movsd -0x130(%rbp), %xmm0
movsd -0x128(%rbp), %xmm1
movq -0xd8(%rbp), %rdi
movsd (%rax), %xmm2
mulsd %xmm2, %xmm0
addsd %xmm1, %xmm0
movsd %xmm0, -0x120(%rbp)
addq $0xe8, %rdi
movq -0xd0(%rbp), %rsi
addq $0x1, %rsi
callq 0x540f0
movsd -0x120(%rbp), %xmm0
movq -0xd8(%rbp), %rdi
movsd %xmm0, (%rax)
addq $0x128, %rdi # imm = 0x128
movq -0xd0(%rbp), %rsi
callq 0x540f0
movq -0xd8(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x100(%rbp)
movq -0xd0(%rbp), %rsi
callq 0x53a00
movq -0xd8(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x108(%rbp)
addq $0xe8, %rdi
movq -0xd0(%rbp), %rsi
callq 0x540f0
movq -0xd8(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x110(%rbp)
movq -0xd0(%rbp), %rsi
callq 0x53a00
movq -0xd8(%rbp), %rdi
movsd (%rax), %xmm0
movsd %xmm0, -0x118(%rbp)
addq $0xa8, %rdi
movq -0xd0(%rbp), %rsi
callq 0x540f0
movsd -0x118(%rbp), %xmm3
movsd -0x110(%rbp), %xmm2
movsd -0x108(%rbp), %xmm0
movsd -0x100(%rbp), %xmm1
movq -0xd8(%rbp), %rdi
mulsd (%rax), %xmm3
movsd 0x41fd(%rip), %xmm4 # 0x62020
divsd %xmm4, %xmm3
addsd %xmm3, %xmm2
mulsd %xmm2, %xmm0
addsd %xmm1, %xmm0
movsd %xmm0, -0xf8(%rbp)
addq $0x128, %rdi # imm = 0x128
movq -0xd0(%rbp), %rsi
addq $0x1, %rsi
callq 0x540f0
movsd -0xf8(%rbp), %xmm0
movsd %xmm0, (%rax)
movq -0xd0(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xd0(%rbp)
jmp 0x5dc91
movq -0xd8(%rbp), %rdi
movl $0x1, 0x238(%rdi)
movl $0x7, 0x230(%rdi)
movsd -0x18(%rbp), %xmm0
xorps %xmm1, %xmm1
movl $0x1, %eax
xorl %ecx, %ecx
ucomisd %xmm1, %xmm0
cmoval %ecx, %eax
movl %eax, 0x234(%rdi)
addq $0xe8, %rdi
callq 0x54c70
movq %rax, %rcx
movq -0xd8(%rbp), %rax
movsd (%rcx), %xmm0
subsd 0x220(%rax), %xmm0
callq 0x56ce0
movaps %xmm0, %xmm1
movsd 0x417a(%rip), %xmm0 # 0x62058
ucomisd %xmm1, %xmm0
seta %al
andb $0x1, %al
movb %al, -0x1(%rbp)
movb -0x1(%rbp), %al
andb $0x1, %al
addq $0x130, %rsp # imm = 0x130
popq %rbp
retq
nopl (%rax,%rax)
| /pantor[P]ruckig/include/ruckig/profile.hpp |
flatbuffers::FlatBufferBuilder::~FlatBufferBuilder() | explicit FlatBufferBuilder(uoffset_t initial_size = 1024)
: buf_(initial_size), minalign_(1), force_defaults_(false) {
offsetbuf_.reserve(16); // Avoid first few reallocs.
vtables_.reserve(16);
EndianCheck();
flatbuffer_version_string =
"FlatBuffers "
FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MAJOR) "."
FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MINOR) "."
FLATBUFFERS_STRING(FLATBUFFERS_VERSION_REVISION);
} | pushq %rbx
movq %rdi, %rbx
movq 0x30(%rdi), %rdi
testq %rdi, %rdi
je 0x2758
callq 0x20a0
movq 0x18(%rbx), %rdi
testq %rdi, %rdi
je 0x2766
callq 0x20a0
movq 0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x2775
popq %rbx
jmp 0x20d0
popq %rbx
retq
nop
| /AlexStocks[P]flatbuffers/include/flatbuffers/flatbuffers.h |
void flatbuffers::FlatBufferBuilder::AddElement<unsigned int>(unsigned short, unsigned int, unsigned int) | void AddElement(voffset_t field, T e, T def) {
// We don't serialize values equal to the default.
if (e == def && !force_defaults_) return;
auto off = PushElement(e);
TrackField(field, off);
} | pushq %rbp
pushq %r14
pushq %rbx
movl %edx, %r14d
movl %esi, %ebp
movq %rdi, %rbx
cmpl %ecx, %edx
jne 0x2b64
cmpb $0x1, 0x50(%rbx)
jne 0x2b98
movl $0x4, %esi
movq %rbx, %rdi
callq 0x2ad6
movl $0x4, %esi
movq %rbx, %rdi
callq 0x2a0a
movl %r14d, (%rax)
movl (%rbx), %edx
subl 0x10(%rbx), %edx
addl 0x8(%rbx), %edx
movzwl %bp, %esi
movq %rbx, %rdi
popq %rbx
popq %r14
popq %rbp
jmp 0x2bea
popq %rbx
popq %r14
popq %rbp
retq
nop
| /AlexStocks[P]flatbuffers/include/flatbuffers/flatbuffers.h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.