name
stringlengths
1
473k
code
stringlengths
7
647k
asm
stringlengths
4
3.39M
file
stringlengths
8
196
char* fmt::v7::detail::write<char, char*, unsigned long long, 0>(char*, unsigned long long)
inline int count_digits(uint64_t n) { // https://github.com/fmtlib/format-benchmark/blob/master/digits10 auto t = bsr2log10(FMT_BUILTIN_CLZLL(n | 1) ^ 63); return t - (n < data::zero_or_powers_of_10_64_new[t]); }
movq %rsi, %rcx orq $0x1, %rcx leaq 0x1282c(%rip), %rdx # 0x209a0 movq %rdi, %rax lzcntq %rcx, %rcx xorl $0x3f, %ecx movzwl (%rdx,%rcx,2), %ecx leaq 0x12996(%rip), %rdx # 0x20b20 cmpq (%rdx,%rcx,8), %rsi sbbq $0x0, %rax addq %rcx, %rax leaq 0x128b4(%rip), %rcx # 0x20a50 cmpq $0x64, %rsi jb 0xe1e8 movabsq $0x28f5c28f5c28f5c3, %r8 # imm = 0x28F5C28F5C28F5C3 movq %rax, %r9 movq %rsi, %rdx shrq $0x2, %rdx movq %rsi, %r11 leaq -0x2(%r9), %rdi mulxq %r8, %rdx, %rdx shrq $0x2, %rdx imulq $0x64, %rdx, %r10 subq %r10, %r11 movzwl (%rcx,%r11,2), %r10d movw %r10w, -0x2(%r9) movq %rdi, %r9 cmpq $0x270f, %rsi # imm = 0x270F movq %rdx, %rsi ja 0xe1af jmp 0xe1ee movq %rax, %rdi movq %rsi, %rdx cmpq $0x9, %rdx ja 0xe1fb orb $0x30, %dl movb %dl, -0x1(%rdi) retq movzwl (%rcx,%rdx,2), %ecx movw %cx, -0x2(%rdi) retq
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> fmt::v7::to_string<__int128, 0>(__int128)
inline std::string to_string(T value) { // The buffer should be large enough to store the number including the sign or // "false" for bool. constexpr int max_size = detail::digits10<T>() + 2; char buffer[max_size > 5 ? static_cast<unsigned>(max_size) : 5]; char* begin = buffer; return std::string(begin, detail::write<char>(begin, value)); }
pushq %r14 pushq %rbx subq $0x28, %rsp movq %rdi, %rbx movq %rsp, %r14 movq %r14, %rdi callq 0xe239 leaq 0x10(%rbx), %rcx movq %rbx, %rdi movq %r14, %rsi movq %rax, %rdx movq %rcx, (%rbx) callq 0xdeb0 movq %rbx, %rax addq $0x28, %rsp popq %rbx popq %r14 retq
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
char* fmt::v7::detail::write<char, char*, __int128, 0>(char*, __int128)
OutputIt write(OutputIt out, T value) { auto abs_value = static_cast<uint32_or_64_or_128_t<T>>(value); bool negative = is_negative(value); // Don't do -abs_value since it trips unsigned-integer-overflow sanitizer. if (negative) abs_value = ~abs_value + 1; int num_digits = count_digits(abs_value); auto size = (negative ? 1 : 0) + static_cast<size_t>(num_digits); auto it = reserve(out, size); if (auto ptr = to_pointer<Char>(it, size)) { if (negative) *ptr++ = static_cast<Char>('-'); format_decimal<Char>(ptr, abs_value, num_digits); return out; } if (negative) *it++ = static_cast<Char>('-'); it = format_decimal<Char>(it, abs_value, num_digits).end; return base_iterator(out, it); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movq %rdx, %r12 movq %rsi, %r14 testq %rdx, %rdx js 0xe254 movq %r12, %r15 jmp 0xe25d xorl %r15d, %r15d negq %r14 sbbq %r12, %r15 cmpq $0xa, %r14 movq %r15, %rax movl $0x1, %ebx sbbq $0x0, %rax jb 0xe2f3 movl $0x4, %ebx movq %rdi, (%rsp) movq %r14, %r13 movq %r15, %rbp movl $0x63, %eax cmpq %r13, %rax movl $0x0, %eax sbbq %rbp, %rax jae 0xe2e8 movl $0x3e7, %eax # imm = 0x3E7 cmpq %r13, %rax movl $0x0, %eax sbbq %rbp, %rax jae 0xe2ed cmpq $0x2710, %r13 # imm = 0x2710 movq %rbp, %rax sbbq $0x0, %rax jb 0xe2ef movl $0x2710, %edx # imm = 0x2710 movq %r13, %rdi movq %rbp, %rsi xorl %ecx, %ecx callq 0x3080 movl $0x1869f, %ecx # imm = 0x1869F addl $0x4, %ebx cmpq %r13, %rcx movl $0x0, %ecx movq %rax, %r13 sbbq %rbp, %rcx movq %rdx, %rbp jb 0xe282 addl $-0x3, %ebx jmp 0xe2ef addl $-0x2, %ebx jmp 0xe2ef decl %ebx movq (%rsp), %rdi testq %r12, %r12 jns 0xe2fe movb $0x2d, (%rdi) incq %rdi cmpq $0xa, %r14 movq %r15, %rax movl $0x1, %ebp sbbq $0x0, %rax jb 0xe394 movl $0x4, %ebp movq %rdi, (%rsp) movq %r14, %r12 movq %r15, %r13 movl $0x63, %eax cmpq %r12, %rax movl $0x0, %eax sbbq %r13, %rax jae 0xe389 movl $0x3e7, %eax # imm = 0x3E7 cmpq %r12, %rax movl $0x0, %eax sbbq %r13, %rax jae 0xe38e cmpq $0x2710, %r12 # imm = 0x2710 movq %r13, %rax sbbq $0x0, %rax jb 0xe390 movl $0x2710, %edx # imm = 0x2710 movq %r12, %rdi movq %r13, %rsi xorl %ecx, %ecx callq 0x3080 movl $0x1869f, %ecx # imm = 0x1869F addl $0x4, %ebp cmpq %r12, %rcx movl $0x0, %ecx movq %rax, %r12 sbbq %r13, %rcx movq %rdx, %r13 jb 0xe323 addl $-0x3, %ebp jmp 0xe390 addl $-0x2, %ebp jmp 0xe390 decl %ebp movq (%rsp), %rdi cmpl %ebx, %ebp jg 0xe441 movslq %ebx, %rax leaq 0x126aa(%rip), %rbx # 0x20a50 addq %rax, %rdi cmpq $0x64, %r14 movq %r15, %rax sbbq $0x0, %rax movq %rdi, (%rsp) jb 0xe403 movl $0x270f, %r13d # imm = 0x270F movq %rdi, %rbp movl $0x64, %edx leaq -0x2(%rbp), %r12 movq %r14, %rdi movq %r15, %rsi xorl %ecx, %ecx callq 0x3080 imulq $0x64, %rax, %rcx movq %r14, %rsi subq %rcx, %rsi cmpq %r14, %r13 movq %rax, %r14 movzwl (%rbx,%rsi,2), %ecx movw %cx, -0x2(%rbp) movl $0x0, %ecx movq %r12, %rbp sbbq %r15, %rcx movq %rdx, %r15 jb 0xe3c3 jmp 0xe40c movq %r14, %rax movq %r15, %rdx movq %rdi, %r12 movl $0x9, %esi xorl %ecx, %ecx cmpq %rax, %rsi sbbq %rdx, %rcx jb 0xe424 orb $0x30, %al movb %al, -0x1(%r12) jmp 0xe42e movzwl (%rbx,%rax,2), %eax movw %ax, -0x2(%r12) movq (%rsp), %rax addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq leaq 0x11c49(%rip), %rdi # 0x20091 leaq 0x11ca9(%rip), %rdx # 0x200f8 movl $0x41b, %esi # imm = 0x41B callq 0xde89
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>> fmt::v7::detail::write<char, std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, float, 0>(std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, float)
OutputIt write(OutputIt out, T value) { if (const_check(!is_supported_floating_point(value))) return out; using floaty = conditional_t<std::is_same<T, long double>::value, double, T>; using uint = typename dragonbox::float_info<floaty>::carrier_uint; auto bits = bit_cast<uint>(value); auto fspecs = float_specs(); auto sign_bit = bits & (uint(1) << (num_bits<uint>() - 1)); if (sign_bit != 0) { fspecs.sign = sign::minus; value = -value; } static const auto specs = basic_format_specs<Char>(); uint mask = exponent_mask<floaty>(); if ((bits & mask) == mask) return write_nonfinite(out, std::isinf(value), specs, fspecs); auto dec = dragonbox::to_decimal(static_cast<floaty>(value)); return write_float(out, dec, specs, fspecs, static_cast<Char>('.')); }
pushq %rbx subq $0x10, %rsp vmovd %xmm0, %eax movq %rdi, %rbx movq $0x0, (%rsp) testl %eax, %eax jns 0xe6ff vpxord 0x111b9(%rip){1to4}, %xmm0, %xmm0 # 0x1f8b0 movl $0x100, 0x4(%rsp) # imm = 0x100 notl %eax testl $0x7f800000, %eax # imm = 0x7F800000 jne 0xe730 vmovd %xmm0, %eax xorl %esi, %esi leaq 0x124b3(%rip), %rdx # 0x20bc8 movq %rsp, %rcx movq %rbx, %rdi andl $0x7fffffff, %eax # imm = 0x7FFFFFFF cmpl $0x7f800000, %eax # imm = 0x7F800000 sete %sil callq 0xe75c jmp 0xe756 callq 0xe874 movq (%rsp), %rcx leaq 0x8(%rsp), %rsi leaq 0x12483(%rip), %rdx # 0x20bc8 movl $0x2e, %r8d movq %rbx, %rdi movq %rax, (%rsi) callq 0xebf4 addq $0x10, %rsp popq %rbx retq
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>> fmt::v7::detail::write_nonfinite<char, std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>>(std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, bool, fmt::v7::basic_format_specs<char> const&, fmt::v7::detail::float_specs const&)
OutputIt write_nonfinite(OutputIt out, bool isinf, const basic_format_specs<Char>& specs, const float_specs& fspecs) { auto str = isinf ? (fspecs.upper ? "INF" : "inf") : (fspecs.upper ? "NAN" : "nan"); constexpr size_t str_size = 3; auto sign = fspecs.sign; auto size = str_size + (sign ? 1 : 0); using iterator = remove_reference_t<decltype(reserve(out, 0))>; return write_padded(out, specs, size, [=](iterator it) { if (sign) *it++ = static_cast<Char>(data::signs[sign]); return copy_str<Char>(str, str + str_size, it); }); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movslq (%rdx), %rax movl %esi, 0x4(%rsp) testq %rax, %rax js 0xe85c movl 0x4(%rcx), %ecx movq %rdx, %r14 movq 0x8(%rdi), %r13 movq %rdi, %rbx movzbl %ch, %ebp movl %ecx, (%rsp) xorl %ecx, %ecx testl %ebp, %ebp setne %cl xorl %r15d, %r15d leaq 0x3(%rcx), %rdx leaq 0x3(%r13,%rcx), %rsi subq %rdx, %rax leaq 0x12431(%rip), %rdx # 0x20bd8 cmovaeq %rax, %r15 movzbl 0x9(%r14), %eax andl $0xf, %eax movb (%rax,%rdx), %al xorl %edx, %edx shrxq %rax, %r15, %r12 movzbl 0xe(%r14), %eax addq $0xa, %r14 imulq %r15, %rax addq %rax, %rsi callq 0xefee addq (%rbx), %r13 movq %r12, %rsi movq %r14, %rdx movq %r13, %rdi callq 0xef86 testl %ebp, %ebp je 0xe7f7 leaq 0x123ef(%rip), %rcx # 0x20bdd movb (%rbp,%rcx), %cl movb %cl, (%rax) incq %rax btl $0x10, (%rsp) leaq 0x11976(%rip), %rcx # 0x20179 leaq 0x1196b(%rip), %rdx # 0x20175 leaq 0x1196c(%rip), %rsi # 0x2017d cmovaeq %rcx, %rdx leaq 0x11965(%rip), %rcx # 0x20181 cmovaeq %rcx, %rsi cmpb $0x0, 0x4(%rsp) cmovneq %rdx, %rsi subq %r12, %r15 movb 0x2(%rsi), %cl movzwl (%rsi), %edx movq %r15, %rsi movw %dx, (%rax) movb %cl, 0x2(%rax) addq $0x3, %rax movq %r14, %rdx movq %rax, %rdi callq 0xef86 movq %rbx, %rax addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq leaq 0x11922(%rip), %rdi # 0x20185 leaq 0x11980(%rip), %rdx # 0x201ea movl $0x146, %esi # imm = 0x146 callq 0xde89
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>> fmt::v7::detail::write_float<std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, fmt::v7::detail::dragonbox::decimal_fp<float>, char>(std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, fmt::v7::detail::dragonbox::decimal_fp<float> const&, fmt::v7::basic_format_specs<char> const&, fmt::v7::detail::float_specs, char)
OutputIt write_float(OutputIt out, const DecimalFP& fp, const basic_format_specs<Char>& specs, float_specs fspecs, Char decimal_point) { auto significand = fp.significand; int significand_size = get_significand_size(fp); static const Char zero = static_cast<Char>('0'); auto sign = fspecs.sign; size_t size = to_unsigned(significand_size) + (sign ? 1 : 0); using iterator = remove_reference_t<decltype(reserve(out, 0))>; int output_exp = fp.exponent + significand_size - 1; auto use_exp_format = [=]() { if (fspecs.format == float_format::exp) return true; if (fspecs.format != float_format::general) return false; // Use the fixed notation if the exponent is in [exp_lower, exp_upper), // e.g. 0.0001 instead of 1e-04. Otherwise use the exponent notation. const int exp_lower = -4, exp_upper = 16; return output_exp < exp_lower || output_exp >= (fspecs.precision > 0 ? fspecs.precision : exp_upper); }; if (use_exp_format()) { int num_zeros = 0; if (fspecs.showpoint) { num_zeros = (std::max)(fspecs.precision - significand_size, 0); size += to_unsigned(num_zeros); } else if (significand_size == 1) { decimal_point = Char(); } auto abs_output_exp = output_exp >= 0 ? output_exp : -output_exp; int exp_digits = 2; if (abs_output_exp >= 100) exp_digits = abs_output_exp >= 1000 ? 4 : 3; size += to_unsigned((decimal_point ? 1 : 0) + 2 + exp_digits); char exp_char = fspecs.upper ? 'E' : 'e'; auto write = [=](iterator it) { if (sign) *it++ = static_cast<Char>(data::signs[sign]); // Insert a decimal point after the first digit and add an exponent. it = write_significand(it, significand, significand_size, 1, decimal_point); if (num_zeros > 0) it = std::fill_n(it, num_zeros, zero); *it++ = static_cast<Char>(exp_char); return write_exponent<Char>(output_exp, it); }; return specs.width > 0 ? write_padded<align::right>(out, specs, size, write) : base_iterator(out, write(reserve(out, size))); } int exp = fp.exponent + significand_size; if (fp.exponent >= 0) { // 1234e5 -> 123400000[.0+] size += to_unsigned(fp.exponent); int num_zeros = fspecs.precision - exp; #ifdef FMT_FUZZ if (num_zeros > 5000) throw std::runtime_error("fuzz mode - avoiding excessive cpu use"); #endif if (fspecs.showpoint) { if (num_zeros <= 0 && fspecs.format != float_format::fixed) num_zeros = 1; if (num_zeros > 0) size += to_unsigned(num_zeros); } return write_padded<align::right>(out, specs, size, [&](iterator it) { if (sign) *it++ = static_cast<Char>(data::signs[sign]); it = write_significand<Char>(it, significand, significand_size); it = std::fill_n(it, fp.exponent, zero); if (!fspecs.showpoint) return it; *it++ = decimal_point; return num_zeros > 0 ? std::fill_n(it, num_zeros, zero) : it; }); } else if (exp > 0) { // 1234e-2 -> 12.34[0+] int num_zeros = fspecs.showpoint ? fspecs.precision - significand_size : 0; size += 1 + to_unsigned(num_zeros > 0 ? num_zeros : 0); return write_padded<align::right>(out, specs, size, [&](iterator it) { if (sign) *it++ = static_cast<Char>(data::signs[sign]); it = write_significand(it, significand, significand_size, exp, decimal_point); return num_zeros > 0 ? std::fill_n(it, num_zeros, zero) : it; }); } // 1234e-6 -> 0.001234 int num_zeros = -exp; if (significand_size == 0 && fspecs.precision >= 0 && fspecs.precision < num_zeros) { num_zeros = fspecs.precision; } size += 2 + to_unsigned(num_zeros); return write_padded<align::right>(out, specs, size, [&](iterator it) { if (sign) *it++ = static_cast<Char>(data::signs[sign]); *it++ = zero; if (num_zeros == 0 && significand_size == 0 && !fspecs.showpoint) return it; *it++ = decimal_point; it = std::fill_n(it, num_zeros, zero); return write_significand<Char>(it, significand, significand_size); }); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x78, %rsp movq %rcx, 0x38(%rsp) movb %r8b, 0xf(%rsp) leaq 0x11d8d(%rip), %r9 # 0x209a0 movl (%rsi), %r15d movl %r15d, %eax orl $0x1, %eax movl %r15d, 0x1c(%rsp) lzcntl %eax, %eax xorl $0x1f, %eax movzwl (%r9,%rax,2), %r14d leaq 0x11dec(%rip), %rax # 0x20a20 xorl %r9d, %r9d cmpl %r15d, (%rax,%r14,4) movq %rcx, %rax seta %r9b shrq $0x28, %rax subl %r9d, %r14d movzbl %al, %r12d movl %r14d, 0x18(%rsp) movl %r12d, 0x14(%rsp) js 0xef6e movl 0x4(%rsi), %eax movq %rcx, %rbx shrq $0x20, %rbx cmpl $0x1, %r12d movl %r14d, %r11d sbbl $-0x1, %r11d leal (%rax,%r14), %r9d cmpb $0x1, %bl je 0xecdf movzbl %bl, %r10d testl %r10d, %r10d jne 0xec9b testl %ecx, %ecx movl $0x10, %r10d cmovgl %ecx, %r10d cmpl $-0x3, %r9d jl 0xecdf cmpl %r10d, %r9d jg 0xecdf movl %r9d, 0x2c(%rsp) testl %eax, %eax js 0xee26 subl %r9d, %ecx addl %r11d, %eax btl $0x14, %ebx movl %ecx, 0x10(%rsp) jae 0xee9b cmpb $0x2, %bl je 0xee92 testl %ecx, %ecx jg 0xee92 movl $0x1, 0x10(%rsp) movl $0x1, %ecx jmp 0xee96 leal -0x1(%rax,%r14), %esi btl $0x14, %ebx jb 0xecfb xorl %r10d, %r10d cmpl $0x1, %r14d movzbl %r8b, %r8d cmovel %r10d, %r8d jmp 0xed0b subl %r14d, %ecx movl %ecx, %eax sarl $0x1f, %eax andnl %ecx, %eax, %r10d addl %r10d, %r11d movl $0x1, %eax movl %r12d, 0x40(%rsp) movl %r15d, 0x44(%rsp) movl %r14d, 0x48(%rsp) movb %r8b, 0x4c(%rsp) movl %r10d, 0x50(%rsp) subl %r9d, %eax testl %r9d, %r9d cmovgl %esi, %eax xorl %ecx, %ecx cmpl $0x3e8, %eax # imm = 0x3E8 setge %cl addq $0x3, %rcx cmpl $0x64, %eax movl $0x2, %eax cmovgeq %rcx, %rax cmpb $0x1, %r8b movl $0x3, %ecx sbbq $0x0, %rcx addq %r11, %rcx addq %rax, %rcx btl $0x10, %ebx setae %bl shlb $0x5, %bl orb $0x45, %bl movb %bl, 0x54(%rsp) movl %esi, 0x58(%rsp) cmpl $0x0, (%rdx) jle 0xed8e leaq 0x40(%rsp), %r8 movq %rdx, %rsi movq %rcx, %rdx callq 0xf339 jmp 0xeee7 movq 0x8(%rdi), %r13 movl %r10d, 0x20(%rsp) movl %esi, 0x24(%rsp) movl %r8d, 0x28(%rsp) movq %rdi, %rbp xorl %edx, %edx addq %r13, %rcx movq %rcx, %rsi callq 0xefee addq (%rbp), %r13 movq %rbp, 0x30(%rsp) testl %r12d, %r12d je 0xedd0 leaq 0x11e18(%rip), %rax # 0x20bdd movb (%r12,%rax), %al movb %al, (%r13) incq %r13 movl 0x28(%rsp), %eax movl 0x20(%rsp), %ebp movl $0x1, %ecx movq %r13, %rdi movl %r15d, %esi movl %r14d, %edx movsbl %al, %r8d callq 0xf451 testl %ebp, %ebp je 0xee0b movl $0x30, %esi movl %ebp, %edx movq %rax, %r14 movq %rax, %rdi addq %rdx, %r14 callq 0x3110 movq %r14, %rax movl 0x24(%rsp), %edi movq 0x30(%rsp), %r14 movb %bl, (%rax) incq %rax movq %rax, %rsi callq 0xf5e0 jmp 0xeeea testl %r9d, %r9d jle 0xeefc shll $0xb, %ebx subl %r14d, %ecx leaq 0x10(%rsp), %rax leaq 0x14(%rsp), %r9 leaq 0x40(%rsp), %r8 leaq 0x1c(%rsp), %r10 sarl $0x1f, %ebx movq %r9, (%r8) leaq 0x18(%rsp), %r9 movq %r10, 0x8(%r8) leaq 0x2c(%rsp), %r10 andl %ebx, %ecx movq %r9, 0x10(%r8) leaq 0xf(%rsp), %r9 movq %r10, 0x18(%r8) movl %ecx, %esi sarl $0x1f, %esi movl %ecx, (%rax) movq %r9, 0x20(%r8) movq %rax, 0x28(%r8) andnl %ecx, %esi, %ecx movq %rdx, %rsi leaq 0x1(%rcx,%r11), %rcx movq %rcx, %rdx callq 0xf847 jmp 0xeee7 testl %ecx, %ecx jle 0xee9b movl %ecx, %ecx addq %rcx, %rax leaq 0x14(%rsp), %rcx leaq 0x40(%rsp), %r8 leaq 0x1c(%rsp), %r10 leaq 0x18(%rsp), %r9 movq %rcx, (%r8) movq %r10, 0x8(%r8) movq %r9, 0x10(%r8) movq %rsi, 0x18(%r8) leaq 0x38(%rsp), %rsi leaq 0xf(%rsp), %r9 movq %rax, %rcx movq %rsi, 0x20(%r8) leaq 0x10(%rsp), %rsi movq %r9, 0x28(%r8) movq %rsi, 0x30(%r8) movq %rdx, %rsi movq %rax, %rdx callq 0xf67d movq %rax, %r14 movq %r14, %rax addq $0x78, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq negl %r9d cmpl %r9d, %ecx movl %r9d, %eax cmovll %ecx, %eax testl %ecx, %ecx cmovsl %r9d, %eax testl %r14d, %r14d cmovnel %r9d, %eax movl %eax, 0x10(%rsp) testl %eax, %eax js 0xef6e movl %eax, %eax leaq 0x2(%rax,%r11), %rcx leaq 0x14(%rsp), %rax leaq 0x40(%rsp), %r8 leaq 0x10(%rsp), %rsi leaq 0x18(%rsp), %r9 movq %rax, (%r8) movq %rsi, 0x8(%r8) leaq 0x38(%rsp), %rsi movq %r9, 0x10(%r8) leaq 0xf(%rsp), %r9 movq %rsi, 0x18(%r8) leaq 0x1c(%rsp), %rsi movq %r9, 0x20(%r8) movq %rsi, 0x28(%r8) movq %rdx, %rsi movq %rcx, %rdx callq 0xf953 jmp 0xeee7 leaq 0x11210(%rip), %rdi # 0x20185 leaq 0x1126e(%rip), %rdx # 0x201ea movl $0x146, %esi # imm = 0x146 callq 0xde89
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
char* fmt::v7::detail::fill<char*, char>(char*, unsigned long, fmt::v7::detail::fill_t<char> const&)
FMT_NOINLINE OutputIt fill(OutputIt it, size_t n, const fill_t<Char>& fill) { auto fill_size = fill.size(); if (fill_size == 1) return std::fill_n(it, n, fill[0]); for (size_t i = 0; i < n; ++i) it = std::copy_n(fill.data(), fill_size, it); return it; }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx pushq %rax movzbl 0x4(%rdx), %r12d movq %rdx, %r15 movq %rsi, %r14 movq %rdi, %rbx cmpq $0x1, %r12 jne 0xefbf testq %r14, %r14 je 0xefdf movzbl (%r15), %esi leaq (%rbx,%r14), %r12 movq %rbx, %rdi movq %r14, %rdx callq 0x3110 movq %r12, %rbx jmp 0xefdf testq %r14, %r14 je 0xefdf testb %r12b, %r12b je 0xefd7 movq %rbx, %rdi movq %r15, %rsi movq %r12, %rdx callq 0x3330 addq %r12, %rbx decq %r14 jne 0xefc4 movq %rbx, %rax addq $0x8, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
bool fmt::v7::detail::dragonbox::is_center_integer<float>(fmt::v7::detail::dragonbox::float_info<float>::carrier_uint, int, int)
bool is_center_integer(typename float_info<T>::carrier_uint two_f, int exponent, int minus_k) FMT_NOEXCEPT { // Exponent for 5 is negative. if (exponent > float_info<T>::divisibility_check_by_5_threshold) return false; if (exponent > float_info<T>::case_fc_upper_threshold) return divisible_by_power_of_5(two_f, minus_k); // Both exponents are nonnegative. if (exponent >= float_info<T>::case_fc_lower_threshold) return true; // Exponent for 2 is negative. return divisible_by_power_of_2(two_f, minus_k - exponent + 1); }
pushq %rax cmpl $0x27, %esi jle 0xf2b4 xorl %eax, %eax jmp 0xf2ef cmpl $0x7, %esi jl 0xf2d5 cmpl $0xb, %edx jge 0xf2f1 movslq %edx, %rax leaq 0x11b98(%rip), %rcx # 0x20e60 imull (%rcx,%rax,8), %edi cmpl 0x4(%rcx,%rax,8), %edi setbe %al jmp 0xf2ef movb $0x1, %al cmpl $-0x3, %esi jg 0xf2ef subl %esi, %edx js 0xf309 testl %edi, %edi je 0xf321 tzcntl %edi, %eax incl %edx cmpl %edx, %eax setae %al popq %rcx retq leaq 0x10f1e(%rip), %rdi # 0x20216 leaq 0x10f82(%rip), %rdx # 0x20281 movl $0x6ed, %esi # imm = 0x6ED callq 0xde89 leaq 0x10f06(%rip), %rdi # 0x20216 leaq 0x115e6(%rip), %rdx # 0x208fd movl $0x6d9, %esi # imm = 0x6D9 callq 0xde89 leaq 0x10eee(%rip), %rdi # 0x20216 leaq 0x115ce(%rip), %rdx # 0x208fd movl $0x6da, %esi # imm = 0x6DA callq 0xde89
/quesnel[P]baryonyx/external/fmt/include/fmt/format-inl.h
char* fmt::v7::detail::write_significand<char, unsigned int, 0>(char*, unsigned int, int, int, char)
inline Char* write_significand(Char* out, UInt significand, int significand_size, int integral_size, Char decimal_point) { if (!decimal_point) return format_decimal(out, significand, significand_size).end; auto end = format_decimal(out + 1, significand, significand_size).end; if (integral_size == 1) out[0] = out[1]; else std::copy_n(out + 1, integral_size, out); out[integral_size] = decimal_point; return end; }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx pushq %rax movl %r8d, %ebp movq %rdi, %rbx testb %bpl, %bpl je 0xf4e0 movl %esi, %eax orl $0x1, %eax movl %ecx, %r14d leaq 0x1152e(%rip), %rcx # 0x209a0 lzcntl %eax, %eax xorl $0x1f, %eax movzwl (%rcx,%rax,2), %eax leaq 0x1159c(%rip), %rcx # 0x20a20 cmpl (%rcx,%rax,4), %esi sbbl $0x0, %eax cmpl %edx, %eax jg 0xf5c8 leaq 0x1(%rbx), %rax movslq %edx, %r15 leaq 0x115b0(%rip), %rcx # 0x20a50 addq %rax, %r15 movq %r15, %rdx cmpl $0x64, %esi jb 0xf550 movl %esi, %edi imulq $0x51eb851f, %rdi, %rdi # imm = 0x51EB851F movl %esi, %r9d shrq $0x25, %rdi imull $0x64, %edi, %r8d subl %r8d, %r9d movzwl (%rcx,%r9,2), %r8d movw %r8w, -0x2(%rdx) addq $-0x2, %rdx cmpl $0x270f, %esi # imm = 0x270F movl %edi, %esi ja 0xf4af jmp 0xf552 movl %esi, %eax orl $0x1, %eax leaq 0x114b4(%rip), %rcx # 0x209a0 lzcntl %eax, %eax xorl $0x1f, %eax movzwl (%rcx,%rax,2), %eax leaq 0x11522(%rip), %rcx # 0x20a20 cmpl (%rcx,%rax,4), %esi sbbl $0x0, %eax cmpl %edx, %eax jg 0xf5c8 movslq %edx, %rax addq %rax, %rbx cmpl $0x64, %esi jb 0xf591 leaq 0x11532(%rip), %rdx # 0x20a50 movq %rbx, %rax movl %esi, %ecx imulq $0x51eb851f, %rcx, %rcx # imm = 0x51EB851F movl %esi, %r8d shrq $0x25, %rcx imull $0x64, %ecx, %edi subl %edi, %r8d movzwl (%rdx,%r8,2), %edi movw %di, -0x2(%rax) addq $-0x2, %rax cmpl $0x270f, %esi # imm = 0x270F movl %ecx, %esi ja 0xf521 jmp 0xf596 movl %esi, %edi cmpl $0x9, %edi ja 0xf561 orb $0x30, %dil movb %dil, -0x1(%rdx) jmp 0xf56b movl %edi, %esi movzwl (%rcx,%rsi,2), %ecx movw %cx, -0x2(%rdx) cmpl $0x1, %r14d jne 0xf578 movb 0x1(%rbx), %al movb %al, (%rbx) jmp 0xf588 jl 0xf588 movl %r14d, %edx movq %rbx, %rdi movq %rax, %rsi callq 0x3330 movslq %r14d, %rax movb %bpl, (%rbx,%rax) jmp 0xf5ba movq %rbx, %rax movl %esi, %ecx cmpl $0x9, %ecx ja 0xf5a6 orb $0x30, %cl movq %rbx, %r15 movb %cl, -0x1(%rax) jmp 0xf5ba leaq 0x114a3(%rip), %rdx # 0x20a50 movl %ecx, %ecx movq %rbx, %r15 movzwl (%rdx,%rcx,2), %ecx movw %cx, -0x2(%rax) movq %r15, %rax addq $0x8, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq leaq 0x10ac2(%rip), %rdi # 0x20091 leaq 0x10b22(%rip), %rdx # 0x200f8 movl $0x41b, %esi # imm = 0x41B callq 0xde89
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
fmt::v7::detail::dragonbox::decimal_fp<float> fmt::v7::detail::write_padded<(fmt::v7::align::type)2, std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, char, std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>> fmt::v7::detail::write_float<std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, fmt::v7::detail::dragonbox::decimal_fp<float>, char>(std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, fmt::v7::detail::dragonbox::decimal_fp<float> const&, fmt::v7::basic_format_specs<char> const&, fmt::v7::detail::float_specs, char)::'lambda1'(char*)&>(fmt::v7::detail::dragonbox::decimal_fp<float>, fmt::v7::basic_format_specs<char> const&, unsigned long, unsigned long, std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>> fmt::v7::detail::write_float<std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, fmt::v7::detail::dragonbox::decimal_fp<float>, char>(std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, fmt::v7::detail::dragonbox::decimal_fp<float> const&, fmt::v7::basic_format_specs<char> const&, fmt::v7::detail::float_specs, char)::'lambda1'(char*)&)
inline OutputIt write_padded(OutputIt out, const basic_format_specs<Char>& specs, size_t size, size_t width, F&& f) { static_assert(align == align::left || align == align::right, ""); unsigned spec_width = to_unsigned(specs.width); size_t padding = spec_width > width ? spec_width - width : 0; auto* shifts = align == align::left ? data::left_padding_shifts : data::right_padding_shifts; size_t left_padding = padding >> shifts[specs.align]; auto it = reserve(out, size + padding * specs.fill.size()); it = fill(it, left_padding, specs.fill); it = f(it); it = fill(it, padding - left_padding, specs.fill); return base_iterator(out, it); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movslq (%rsi), %rax testq %rax, %rax js 0xf93b xorl %r15d, %r15d subq %rcx, %rax leaq 0x1164d(%rip), %rcx # 0x20eb8 movq 0x8(%rdi), %rbp movq %r8, %r12 movq %rsi, %r14 movq %rdi, %rbx cmovaeq %rax, %r15 movzbl 0x9(%rsi), %eax addq $0xa, %r14 addq %rbp, %rdx andl $0xf, %eax movb (%rax,%rcx), %al shrxq %rax, %r15, %r13 movzbl 0xe(%rsi), %eax imulq %r15, %rax addq %rax, %rdx movq %rdx, %rsi xorl %edx, %edx callq 0xefee addq (%rbx), %rbp movq %r13, %rsi movq %r14, %rdx movq %rbp, %rdi callq 0xef86 movq (%r12), %rcx movl (%rcx), %ecx testq %rcx, %rcx je 0xf8d2 leaq 0x11313(%rip), %rdx # 0x20bdd movb (%rcx,%rdx), %cl movb %cl, (%rax) incq %rax movq 0x8(%r12), %rcx movq 0x10(%r12), %rdx movq 0x20(%r12), %rdi movl (%rcx), %esi movq 0x18(%r12), %rcx movsbl (%rdi), %r8d movl (%rdx), %edx movq %rax, %rdi movl (%rcx), %ecx callq 0xf451 movq 0x28(%r12), %rcx movq %rax, %rdi movslq (%rcx), %rdx testq %rdx, %rdx jle 0xf91b movl $0x30, %esi movq %rdi, %r12 addq %rdx, %r12 callq 0x3110 movq %r12, %rdi subq %r13, %r15 movq %r14, %rdx movq %r15, %rsi callq 0xef86 movq %rbx, %rax addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq leaq 0x10843(%rip), %rdi # 0x20185 leaq 0x108a1(%rip), %rdx # 0x201ea movl $0x146, %esi # imm = 0x146 callq 0xde89
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> fmt::v7::to_string<double, 0>(double const&)
inline std::string to_string(const T& value) { std::string result; detail::write<char>(std::back_inserter(result), value); return result; }
pushq %r15 pushq %r14 pushq %rbx leaq 0x10(%rdi), %r15 movq %rdi, %rbx movq %r15, (%rdi) movq $0x0, 0x8(%rdi) movb $0x0, 0x10(%rdi) vmovsd (%rsi), %xmm0 callq 0xfb63 movq %rbx, %rax popq %rbx popq %r14 popq %r15 retq movq (%rbx), %rdi movq %rax, %r14 cmpq %r15, %rdi je 0xfb5b movq (%r15), %rsi incq %rsi callq 0x3230 movq %r14, %rdi callq 0x3390
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>> fmt::v7::detail::write<char, std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, double, 0>(std::back_insert_iterator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, double)
OutputIt write(OutputIt out, T value) { if (const_check(!is_supported_floating_point(value))) return out; using floaty = conditional_t<std::is_same<T, long double>::value, double, T>; using uint = typename dragonbox::float_info<floaty>::carrier_uint; auto bits = bit_cast<uint>(value); auto fspecs = float_specs(); auto sign_bit = bits & (uint(1) << (num_bits<uint>() - 1)); if (sign_bit != 0) { fspecs.sign = sign::minus; value = -value; } static const auto specs = basic_format_specs<Char>(); uint mask = exponent_mask<floaty>(); if ((bits & mask) == mask) return write_nonfinite(out, std::isinf(value), specs, fspecs); auto dec = dragonbox::to_decimal(static_cast<floaty>(value)); return write_float(out, dec, specs, fspecs, static_cast<Char>('.')); }
pushq %rbx subq $0x20, %rsp vmovq %xmm0, %rax movq %rdi, %rbx movq $0x0, 0x8(%rsp) testq %rax, %rax jns 0xfb90 vpxorq 0xfc48(%rip){1to2}, %xmm0, %xmm0 # 0x1f7d0 movl $0x100, 0xc(%rsp) # imm = 0x100 movabsq $0x7ff0000000000000, %rcx # imm = 0x7FF0000000000000 andnq %rcx, %rax, %rax jne 0xfbca vmovq %xmm0, %rax xorl %esi, %esi leaq 0x11311(%rip), %rdx # 0x20ec0 movq %rbx, %rdi btrq $0x3f, %rax cmpq %rcx, %rax leaq 0x8(%rsp), %rcx sete %sil callq 0xe75c jmp 0xfbf4 callq 0xfbfa movq 0x8(%rsp), %rcx leaq 0x10(%rsp), %rsi movl $0x2e, %r8d movq %rbx, %rdi movq %rax, (%rsi) movl %edx, 0x8(%rsi) leaq 0x112d1(%rip), %rdx # 0x20ec0 callq 0x101fe addq $0x20, %rsp popq %rbx retq
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> fmt::v7::to_string<long double, 0>(long double const&)
inline std::string to_string(const T& value) { std::string result; detail::write<char>(std::back_inserter(result), value); return result; }
pushq %r15 pushq %r14 pushq %rbx subq $0x10, %rsp leaq 0x10(%rdi), %r15 movq %rdi, %rbx movq %r15, (%rdi) movq $0x0, 0x8(%rdi) movb $0x0, 0x10(%rdi) fldt (%rsi) movabsq $-0x100000000, %rsi # imm = 0xFFFFFFFF00000000 movabsq $0x1000000200000, %rdx # imm = 0x1000000200000 xorl %ecx, %ecx fstpt (%rsp) callq 0x11057 movq %rbx, %rax addq $0x10, %rsp popq %rbx popq %r14 popq %r15 retq movq (%rbx), %rdi movq %rax, %r14 cmpq %r15, %rdi je 0x1104f movq (%r15), %rsi incq %rsi callq 0x3230 movq %r14, %rdi callq 0x3390
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
fmt::v7::detail::float_specs fmt::v7::detail::parse_float_type_spec<fmt::v7::detail::error_handler, char>(fmt::v7::basic_format_specs<char> const&, fmt::v7::detail::error_handler&&)
FMT_CONSTEXPR float_specs parse_float_type_spec( const basic_format_specs<Char>& specs, ErrorHandler&& eh = {}) { auto result = float_specs(); result.showpoint = specs.alt; switch (specs.type) { case 0: result.format = float_format::general; result.showpoint |= specs.precision > 0; break; case 'G': result.upper = true; FMT_FALLTHROUGH; case 'g': result.format = float_format::general; break; case 'E': result.upper = true; FMT_FALLTHROUGH; case 'e': result.format = float_format::exp; result.showpoint |= specs.precision != 0; break; case 'F': result.upper = true; FMT_FALLTHROUGH; case 'f': result.format = float_format::fixed; result.showpoint |= specs.precision != 0; break; case 'A': result.upper = true; FMT_FALLTHROUGH; case 'a': result.format = float_format::hex; break; #ifdef FMT_DEPRECATED_N_SPECIFIER case 'n': #endif case 'L': result.locale = true; break; default: eh.on_error("invalid type specifier"); break; } return result; }
movzbl 0x9(%rdi), %ecx movzbl 0x8(%rdi), %edx shrl $0x7, %ecx leal -0x41(%rdx), %r8d movl %ecx, %eax shll $0x14, %eax cmpl $0xb, %r8d jbe 0x1139d leal -0x61(%rdx), %r8d cmpl $0x6, %r8d ja 0x113b8 leaq 0xe5d5(%rip), %rdx # 0x1f968 movslq (%rdx,%r8,4), %r8 addq %rdx, %r8 jmpq *%r8 leaq 0xe594(%rip), %rdx # 0x1f938 movslq (%rdx,%r8,4), %r8 addq %rdx, %r8 jmpq *%r8 orl $0x10000, %eax # imm = 0x10000 orl $0x3, %eax jmp 0x11417 testl %edx, %edx jne 0x1141c cmpl $0x0, 0x4(%rdi) setg %al orb %cl, %al movzbl %al, %eax shll $0x14, %eax jmp 0x11417 orl $0x10000, %eax # imm = 0x10000 cmpl $0x0, 0x4(%rdi) setne %dl andl $0xffffe, %eax # imm = 0xFFFFE orb %cl, %dl movzbl %dl, %ecx shll $0x14, %ecx leal 0x1(%rcx,%rax), %eax jmp 0x11417 orl $0x10000, %eax # imm = 0x10000 cmpl $0x0, 0x4(%rdi) setne %dl andl $0xffffd, %eax # imm = 0xFFFFD orb %cl, %dl movzbl %dl, %ecx shll $0x14, %ecx leal 0x2(%rcx,%rax), %eax jmp 0x11417 orl $0x10000, %eax # imm = 0x10000 jmp 0x11417 orl $0x20000, %eax # imm = 0x20000 shlq $0x20, %rax retq pushq %rax leaq 0xeed1(%rip), %rax # 0x202f5 movq %rsi, %rdi movq %rax, %rsi callq 0xd936
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
fmt::v7::detail::fixed_handler::on_start(unsigned long, unsigned long, unsigned long, int&)
digits::result on_start(uint64_t divisor, uint64_t remainder, uint64_t error, int& exp) { // Non-fixed formats require at least one digit and no precision adjustment. if (!fixed) return digits::more; // Adjust fixed precision by exponent because it is relative to decimal // point. precision += exp + exp10; // Check if precision is satisfied just by leading zeros, e.g. // format("{:.2f}", 0.001) gives "0.00" without generating any digits. if (precision > 0) return digits::more; if (precision < 0) return digits::done; auto dir = get_round_direction(divisor, remainder, error); if (dir == round_direction::unknown) return digits::error; buf[size++] = dir == round_direction::up ? '1' : '0'; return digits::done; }
pushq %rax xorl %eax, %eax cmpb $0x1, 0x14(%rdi) jne 0x120d6 movl 0x10(%rdi), %r9d addl (%r8), %r9d addl 0xc(%rdi), %r9d movl %r9d, 0xc(%rdi) jle 0x120d8 popq %rcx retq movl $0x1, %eax js 0x120d6 movq %rsi, %rax subq %rdx, %rax jbe 0x1213b movq %rsi, %r8 subq %rcx, %r8 jbe 0x12153 cmpq %rcx, %r8 jbe 0x1216b cmpq %rdx, %rax jb 0x1210e movq %rsi, %r9 leaq (%rcx,%rcx), %rax subq %rdx, %r9 subq %rdx, %r9 movb $0x30, %r8b cmpq %r9, %rax jbe 0x12123 movl $0x2, %eax subq %rcx, %rdx jb 0x120d6 subq %rdx, %rsi movb $0x31, %r8b cmpq %rsi, %rdx jb 0x120d6 movslq 0x8(%rdi), %rcx movq (%rdi), %rax leal 0x1(%rcx), %edx movl %edx, 0x8(%rdi) movb %r8b, (%rax,%rcx) movl $0x1, %eax jmp 0x120d6 leaq 0xe0d4(%rip), %rdi # 0x20216 leaq 0xe7b4(%rip), %rdx # 0x208fd movl $0x5cb, %esi # imm = 0x5CB callq 0xde89 leaq 0xe0bc(%rip), %rdi # 0x20216 leaq 0xe79c(%rip), %rdx # 0x208fd movl $0x5cc, %esi # imm = 0x5CC callq 0xde89 leaq 0xe0a4(%rip), %rdi # 0x20216 leaq 0xe784(%rip), %rdx # 0x208fd movl $0x5cd, %esi # imm = 0x5CD callq 0xde89 nop
/quesnel[P]baryonyx/external/fmt/include/fmt/format-inl.h
fmt::v7::detail::bigint::assign(unsigned long)
void assign(uint64_t n) { size_t num_bigits = 0; do { bigits_[num_bigits++] = n & ~bigit(0); n >>= bigit_bits; } while (n != 0); bigits_.resize(num_bigits); exp_ = 0; }
pushq %r14 pushq %rbx pushq %rax movq 0x8(%rdi), %rax movq %rdi, %rbx xorl %r14d, %r14d movl %esi, (%rax,%r14,4) incq %r14 shrq $0x20, %rsi jne 0x12304 movq 0x18(%rbx), %rax leaq -0x1(%r14), %rcx cmpq %rcx, %rax ja 0x1232d movq (%rbx), %rax movq %rbx, %rdi movq %r14, %rsi callq *(%rax) movq 0x18(%rbx), %rax cmpq %r14, %rax cmovbq %rax, %r14 movq %r14, 0x10(%rbx) movl $0x0, 0xa8(%rbx) addq $0x8, %rsp popq %rbx popq %r14 retq
/quesnel[P]baryonyx/external/fmt/include/fmt/format-inl.h
fmt::v7::detail::bigint::divmod_assign(fmt::v7::detail::bigint const&)
int divmod_assign(const bigint& divisor) { FMT_ASSERT(this != &divisor, ""); if (compare(*this, divisor) < 0) return 0; FMT_ASSERT(divisor.bigits_[divisor.bigits_.size() - 1u] != 0, ""); align(divisor); int quotient = 0; do { subtract_aligned(divisor); ++quotient; } while (compare(*this, divisor) >= 0); return quotient; }
pushq %rbp pushq %r14 pushq %rbx cmpq %rsi, %rdi je 0x12573 movq %rsi, %rbx movq %rdi, %r14 callq 0x125a3 testl %eax, %eax js 0x1256a movq 0x8(%rbx), %rax movq 0x10(%rbx), %rcx cmpl $0x0, -0x4(%rax,%rcx,4) je 0x1258b movq %r14, %rdi movq %rbx, %rsi callq 0x12f60 xorl %ebp, %ebp movq %r14, %rdi movq %rbx, %rsi callq 0x13028 incl %ebp movq %r14, %rdi movq %rbx, %rsi callq 0x125a3 testl %eax, %eax jns 0x1254c jmp 0x1256c xorl %ebp, %ebp movl %ebp, %eax popq %rbx popq %r14 popq %rbp retq leaq 0xdc9c(%rip), %rdi # 0x20216 leaq 0xe37c(%rip), %rdx # 0x208fd movl $0x5b6, %esi # imm = 0x5B6 callq 0xde89 leaq 0xdc84(%rip), %rdi # 0x20216 leaq 0xe364(%rip), %rdx # 0x208fd movl $0x5b8, %esi # imm = 0x5B8 callq 0xde89
/quesnel[P]baryonyx/external/fmt/include/fmt/format-inl.h
fmt::v7::detail::bigint::multiply(unsigned int)
size_t size() const FMT_NOEXCEPT { return size_; }
movq 0x10(%rdi), %rax testq %rax, %rax je 0x1318f pushq %r14 pushq %rbx pushq %rax movq 0x8(%rdi), %rdx movl %esi, %ecx movq %rdi, %rbx xorl %esi, %esi xorl %r14d, %r14d movl (%rdx,%rsi,4), %edi imulq %rcx, %rdi addq %rdi, %r14 movl %r14d, (%rdx,%rsi,4) shrq $0x20, %r14 incq %rsi cmpq %rsi, %rax jne 0x13143 testq %r14, %r14 je 0x13188 leaq 0x1(%rax), %rsi cmpq %rsi, 0x18(%rbx) jae 0x1317c movq (%rbx), %rax movq %rbx, %rdi callq *(%rax) movq 0x10(%rbx), %rax leaq 0x1(%rax), %rsi movq 0x8(%rbx), %rcx movq %rsi, 0x10(%rbx) movl %r14d, (%rcx,%rax,4) addq $0x8, %rsp popq %rbx popq %r14 retq
/quesnel[P]baryonyx/external/fmt/include/fmt/core.h
fmt::v7::detail::buffer_appender<char> fmt::v7::detail::write<char, fmt::v7::detail::buffer_appender<char>, __int128, 0>(fmt::v7::detail::buffer_appender<char>, __int128)
OutputIt write(OutputIt out, T value) { auto abs_value = static_cast<uint32_or_64_or_128_t<T>>(value); bool negative = is_negative(value); // Don't do -abs_value since it trips unsigned-integer-overflow sanitizer. if (negative) abs_value = ~abs_value + 1; int num_digits = count_digits(abs_value); auto size = (negative ? 1 : 0) + static_cast<size_t>(num_digits); auto it = reserve(out, size); if (auto ptr = to_pointer<Char>(it, size)) { if (negative) *ptr++ = static_cast<Char>('-'); format_decimal<Char>(ptr, abs_value, num_digits); return out; } if (negative) *it++ = static_cast<Char>('-'); it = format_decimal<Char>(it, abs_value, num_digits).end; return base_iterator(out, it); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x48, %rsp movq %rsi, %r14 movq %rdi, %r12 testq %rdx, %rdx js 0x13f99 movq %rdx, %r15 jmp 0x13fa2 xorl %r15d, %r15d negq %r14 sbbq %rdx, %r15 cmpq $0xa, %r14 movq %r15, %rax movl $0x1, %r13d movq %r12, 0x18(%rsp) sbbq $0x0, %rax jb 0x1405c movl $0x4, %ebx movl $0x63, %r12d movq %rdx, 0x10(%rsp) movq %r14, %r13 movq %r15, %rbp cmpq %r13, %r12 movl $0x0, %eax sbbq %rbp, %rax jae 0x14039 movl $0x3e7, %eax # imm = 0x3E7 cmpq %r13, %rax movl $0x0, %eax sbbq %rbp, %rax jae 0x14042 cmpq $0x2710, %r13 # imm = 0x2710 movq %rbp, %rax sbbq $0x0, %rax jb 0x1404f movl $0x2710, %edx # imm = 0x2710 movq %r13, %rdi movq %rbp, %rsi xorl %ecx, %ecx callq 0x3080 movl $0x1869f, %ecx # imm = 0x1869F addl $0x4, %ebx cmpq %r13, %rcx movl $0x0, %ecx movq %rax, %r13 sbbq %rbp, %rcx movq %rdx, %rbp jb 0x13fd4 movl %ebx, %r13d addl $-0x3, %r13d jmp 0x14048 movl %ebx, %r13d addl $-0x2, %r13d jmp 0x14048 movl %ebx, %r13d decl %r13d movq 0x18(%rsp), %r12 jmp 0x14057 movq 0x18(%rsp), %r12 movl %ebx, %r13d movq 0x10(%rsp), %rdx movq %rdx, %rbx movslq %r13d, %rcx shrq $0x3f, %rbx movq 0x10(%r12), %rax movq %rdx, %rbp addq %rcx, %rbx movq %rcx, 0x10(%rsp) movq 0x18(%r12), %rcx leaq (%rbx,%rax), %rsi cmpq %rsi, %rcx jae 0x1409d movq (%r12), %rax movq %r12, %rdi callq *(%rax) movq 0x10(%r12), %rax movq 0x18(%r12), %rcx addq %rax, %rbx movq %rbx, %rsi movq %rbp, %rdx cmpq %rsi, %rcx jae 0x140ad movq %rax, %rsi jmp 0x1416a movq %rsi, 0x10(%r12) movq 0x8(%r12), %rbp testq %rbp, %rbp je 0x1416a addq %rax, %rbp testq %rdx, %rdx jns 0x140cf movb $0x2d, (%rbp) incq %rbp cmpq $0xa, %r14 movq %r15, %rax movl $0x1, %ebx sbbq $0x0, %rax jb 0x142fd movl $0x4, %ebx movl %r13d, 0xc(%rsp) movq %r14, %r12 movq %r15, %r13 movl $0x63, %eax cmpq %r12, %rax movl $0x0, %eax sbbq %r13, %rax jae 0x142f1 movl $0x3e7, %eax # imm = 0x3E7 cmpq %r12, %rax movl $0x0, %eax sbbq %r13, %rax jae 0x142f6 cmpq $0x2710, %r12 # imm = 0x2710 movq %r13, %rax sbbq $0x0, %rax jb 0x142f8 movl $0x2710, %edx # imm = 0x2710 movq %r12, %rdi movq %r13, %rsi xorl %ecx, %ecx callq 0x3080 movl $0x1869f, %ecx # imm = 0x1869F addl $0x4, %ebx cmpq %r12, %rcx movl $0x0, %ecx movq %rax, %r12 sbbq %r13, %rcx movq %rdx, %r13 jb 0x140f5 addl $-0x3, %ebx jmp 0x142f8 testq %rdx, %rdx jns 0x1419b leaq 0x1(%rsi), %rax cmpq %rax, %rcx jae 0x1418d movq (%r12), %rcx movq %r12, %rdi movq %rax, %rsi callq *(%rcx) movq 0x10(%r12), %rsi leaq 0x1(%rsi), %rax movq 0x8(%r12), %rcx movq %rax, 0x10(%r12) movb $0x2d, (%rcx,%rsi) cmpq $0xa, %r14 movq %r15, %rax movl $0x1, %ebx sbbq $0x0, %rax jb 0x14233 movl $0x4, %ebx movl $0x63, %ebp movl %r13d, 0xc(%rsp) movq %r14, %r12 movq %r15, %r13 cmpq %r12, %rbp movl $0x0, %eax sbbq %r13, %rax jae 0x14227 movl $0x3e7, %eax # imm = 0x3E7 cmpq %r12, %rax movl $0x0, %eax sbbq %r13, %rax jae 0x1422c cmpq $0x2710, %r12 # imm = 0x2710 movq %r13, %rax sbbq $0x0, %rax jb 0x1422e movl $0x2710, %edx # imm = 0x2710 movq %r12, %rdi movq %r13, %rsi xorl %ecx, %ecx callq 0x3080 movl $0x1869f, %ecx # imm = 0x1869F addl $0x4, %ebx cmpq %r12, %rcx movl $0x0, %ecx movq %rax, %r12 sbbq %r13, %rcx movq %rdx, %r13 jb 0x141c6 addl $-0x3, %ebx jmp 0x1422e addl $-0x2, %ebx jmp 0x1422e decl %ebx movl 0xc(%rsp), %r13d cmpl %r13d, %ebx jg 0x143a8 movq 0x10(%rsp), %rax cmpq $0x64, %r14 leaq 0x20(%rsp,%rax), %r12 movq %r15, %rax sbbq $0x0, %rax movq %r12, 0x10(%rsp) jb 0x142a9 leaq 0xc7f1(%rip), %r13 # 0x20a50 movl $0x270f, %ebp # imm = 0x270F movq %r12, %rbx movl $0x64, %edx leaq -0x2(%rbx), %r12 movq %r14, %rdi movq %r15, %rsi xorl %ecx, %ecx callq 0x3080 imulq $0x64, %rax, %rcx movq %r14, %rsi subq %rcx, %rsi cmpq %r14, %rbp movq %rax, %r14 movzwl (%r13,%rsi,2), %ecx movw %cx, -0x2(%rbx) movl $0x0, %ecx movq %r12, %rbx sbbq %r15, %rcx movq %rdx, %r15 jb 0x14267 jmp 0x142af movq %r14, %rax movq %r15, %rdx movl $0x9, %esi xorl %ecx, %ecx cmpq %rax, %rsi sbbq %rdx, %rcx jb 0x142c7 orb $0x30, %al movb %al, -0x1(%r12) jmp 0x142d8 leaq 0xc782(%rip), %rcx # 0x20a50 movzwl (%rcx,%rax,2), %eax movw %ax, -0x2(%r12) movq 0x10(%rsp), %rsi movq 0x18(%rsp), %rdx leaq 0x20(%rsp), %rdi callq 0x12062 jmp 0x14399 addl $-0x2, %ebx jmp 0x142f8 decl %ebx movl 0xc(%rsp), %r13d cmpl %r13d, %ebx jg 0x143a8 addq 0x10(%rsp), %rbp cmpq $0x64, %r14 movq %r15, %rax sbbq $0x0, %rax jb 0x14366 leaq 0xc731(%rip), %r12 # 0x20a50 movl $0x270f, %r13d # imm = 0x270F movl $0x64, %edx leaq -0x2(%rbp), %rbx movq %r14, %rdi movq %r15, %rsi xorl %ecx, %ecx callq 0x3080 imulq $0x64, %rax, %rcx movq %r14, %rsi subq %rcx, %rsi cmpq %r14, %r13 movq %rax, %r14 movzwl (%r12,%rsi,2), %ecx movw %cx, -0x2(%rbp) movl $0x0, %ecx movq %rbx, %rbp sbbq %r15, %rcx movq %rdx, %r15 jb 0x14325 jmp 0x1436f movq %r14, %rax movq %r15, %rdx movq %rbp, %rbx movl $0x9, %esi xorl %ecx, %ecx cmpq %rax, %rsi sbbq %rdx, %rcx jb 0x14385 orb $0x30, %al movb %al, -0x1(%rbx) jmp 0x14394 leaq 0xc6c4(%rip), %rcx # 0x20a50 movzwl (%rcx,%rax,2), %eax movw %ax, -0x2(%rbx) movq 0x18(%rsp), %rax addq $0x48, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq leaq 0xbce2(%rip), %rdi # 0x20091 leaq 0xbd42(%rip), %rdx # 0x200f8 movl $0x41b, %esi # imm = 0x41B callq 0xde89
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
fmt::v7::detail::buffer_appender<char> fmt::v7::detail::write<char, fmt::v7::detail::buffer_appender<char>, double, 0>(fmt::v7::detail::buffer_appender<char>, double)
OutputIt write(OutputIt out, T value) { if (const_check(!is_supported_floating_point(value))) return out; using floaty = conditional_t<std::is_same<T, long double>::value, double, T>; using uint = typename dragonbox::float_info<floaty>::carrier_uint; auto bits = bit_cast<uint>(value); auto fspecs = float_specs(); auto sign_bit = bits & (uint(1) << (num_bits<uint>() - 1)); if (sign_bit != 0) { fspecs.sign = sign::minus; value = -value; } static const auto specs = basic_format_specs<Char>(); uint mask = exponent_mask<floaty>(); if ((bits & mask) == mask) return write_nonfinite(out, std::isinf(value), specs, fspecs); auto dec = dragonbox::to_decimal(static_cast<floaty>(value)); return write_float(out, dec, specs, fspecs, static_cast<Char>('.')); }
pushq %r14 pushq %rbx subq $0x18, %rsp vmovq %xmm0, %r14 movabsq $0x7ff0000000000000, %rax # imm = 0x7FF0000000000000 movq %rdi, %rbx andnq %rax, %r14, %rcx jne 0x15746 movq %r14, %rcx btrq $0x3f, %rcx leaq 0xaa74(%rip), %rdx # 0x20181 leaq 0x8(%rsp), %r8 leaq 0xbf83(%rip), %rsi # 0x2169c movq %rbx, %rdi cmpq %rax, %rcx leaq 0xaa53(%rip), %rcx # 0x20179 cmoveq %rcx, %rdx shrq $0x3f, %r14 movq %r14, %rcx addq $0x3, %rcx movl %r14d, (%r8) movq %rdx, 0x8(%r8) movq %rcx, %rdx callq 0x14d47 jmp 0x15786 vpandq 0xa088(%rip){1to2}, %xmm0, %xmm0 # 0x1f7d8 callq 0xfbfa leaq 0x8(%rsp), %rsi shrq $0x17, %r14 movabsq $0x10000000000, %rcx # imm = 0x10000000000 movl $0x2e, %r8d movq %rbx, %rdi movq %rax, (%rsi) movl %edx, 0x8(%rsi) andq %r14, %rcx leaq 0xbf1b(%rip), %rdx # 0x2169c callq 0x1578e addq $0x18, %rsp popq %rbx popq %r14 retq
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
fmt::v7::detail::buffer_appender<char> fmt::v7::detail::write_float<fmt::v7::detail::buffer_appender<char>, fmt::v7::detail::dragonbox::decimal_fp<double>, char>(fmt::v7::detail::buffer_appender<char>, fmt::v7::detail::dragonbox::decimal_fp<double> const&, fmt::v7::basic_format_specs<char> const&, fmt::v7::detail::float_specs, char)
OutputIt write_float(OutputIt out, const DecimalFP& fp, const basic_format_specs<Char>& specs, float_specs fspecs, Char decimal_point) { auto significand = fp.significand; int significand_size = get_significand_size(fp); static const Char zero = static_cast<Char>('0'); auto sign = fspecs.sign; size_t size = to_unsigned(significand_size) + (sign ? 1 : 0); using iterator = remove_reference_t<decltype(reserve(out, 0))>; int output_exp = fp.exponent + significand_size - 1; auto use_exp_format = [=]() { if (fspecs.format == float_format::exp) return true; if (fspecs.format != float_format::general) return false; // Use the fixed notation if the exponent is in [exp_lower, exp_upper), // e.g. 0.0001 instead of 1e-04. Otherwise use the exponent notation. const int exp_lower = -4, exp_upper = 16; return output_exp < exp_lower || output_exp >= (fspecs.precision > 0 ? fspecs.precision : exp_upper); }; if (use_exp_format()) { int num_zeros = 0; if (fspecs.showpoint) { num_zeros = (std::max)(fspecs.precision - significand_size, 0); size += to_unsigned(num_zeros); } else if (significand_size == 1) { decimal_point = Char(); } auto abs_output_exp = output_exp >= 0 ? output_exp : -output_exp; int exp_digits = 2; if (abs_output_exp >= 100) exp_digits = abs_output_exp >= 1000 ? 4 : 3; size += to_unsigned((decimal_point ? 1 : 0) + 2 + exp_digits); char exp_char = fspecs.upper ? 'E' : 'e'; auto write = [=](iterator it) { if (sign) *it++ = static_cast<Char>(data::signs[sign]); // Insert a decimal point after the first digit and add an exponent. it = write_significand(it, significand, significand_size, 1, decimal_point); if (num_zeros > 0) it = std::fill_n(it, num_zeros, zero); *it++ = static_cast<Char>(exp_char); return write_exponent<Char>(output_exp, it); }; return specs.width > 0 ? write_padded<align::right>(out, specs, size, write) : base_iterator(out, write(reserve(out, size))); } int exp = fp.exponent + significand_size; if (fp.exponent >= 0) { // 1234e5 -> 123400000[.0+] size += to_unsigned(fp.exponent); int num_zeros = fspecs.precision - exp; #ifdef FMT_FUZZ if (num_zeros > 5000) throw std::runtime_error("fuzz mode - avoiding excessive cpu use"); #endif if (fspecs.showpoint) { if (num_zeros <= 0 && fspecs.format != float_format::fixed) num_zeros = 1; if (num_zeros > 0) size += to_unsigned(num_zeros); } return write_padded<align::right>(out, specs, size, [&](iterator it) { if (sign) *it++ = static_cast<Char>(data::signs[sign]); it = write_significand<Char>(it, significand, significand_size); it = std::fill_n(it, fp.exponent, zero); if (!fspecs.showpoint) return it; *it++ = decimal_point; return num_zeros > 0 ? std::fill_n(it, num_zeros, zero) : it; }); } else if (exp > 0) { // 1234e-2 -> 12.34[0+] int num_zeros = fspecs.showpoint ? fspecs.precision - significand_size : 0; size += 1 + to_unsigned(num_zeros > 0 ? num_zeros : 0); return write_padded<align::right>(out, specs, size, [&](iterator it) { if (sign) *it++ = static_cast<Char>(data::signs[sign]); it = write_significand(it, significand, significand_size, exp, decimal_point); return num_zeros > 0 ? std::fill_n(it, num_zeros, zero) : it; }); } // 1234e-6 -> 0.001234 int num_zeros = -exp; if (significand_size == 0 && fspecs.precision >= 0 && fspecs.precision < num_zeros) { num_zeros = fspecs.precision; } size += 2 + to_unsigned(num_zeros); return write_padded<align::right>(out, specs, size, [&](iterator it) { if (sign) *it++ = static_cast<Char>(data::signs[sign]); *it++ = zero; if (num_zeros == 0 && significand_size == 0 && !fspecs.showpoint) return it; *it++ = decimal_point; it = std::fill_n(it, num_zeros, zero); return write_significand<Char>(it, significand, significand_size); }); }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x60, %rsp movq %rcx, 0x58(%rsp) movb %r8b, 0x3(%rsp) movq %rdx, %rbx movq %rsi, %rax movq %rdi, %r14 movq (%rsi), %r9 leaq 0xb1e9(%rip), %rsi # 0x209a0 movq %r9, %rdx orq $0x1, %rdx movq %r9, 0x10(%rsp) lzcntq %rdx, %rdx xorl $0x3f, %edx movzwl (%rsi,%rdx,2), %edi leaq 0xb34a(%rip), %rdx # 0x20b20 xorl %esi, %esi cmpq %r9, (%rdx,%rdi,8) movq %rcx, %rdx seta %sil shrq $0x28, %rdx subl %esi, %edi movzbl %dl, %r11d movl %edi, 0xc(%rsp) movl %r11d, 0x8(%rsp) js 0x15bad movl 0x8(%rax), %esi movq %rcx, %r10 shrq $0x20, %r10 cmpl $0x1, %r11d movl %edi, %edx sbbl $-0x1, %edx leal (%rsi,%rdi), %ebp cmpb $0x1, %r10b je 0x1587a movzbl %r10b, %r15d testl %r15d, %r15d jne 0x15837 testl %ecx, %ecx movl $0x10, %r15d cmovgl %ecx, %r15d cmpl $-0x3, %ebp jl 0x1587a cmpl %r15d, %ebp jg 0x1587a movl %ebp, 0x1c(%rsp) testl %esi, %esi js 0x1599b subl %ebp, %ecx addl %edx, %esi btl $0x14, %r10d movl %ecx, 0x4(%rsp) jae 0x15a17 cmpb $0x2, %r10b je 0x15a0e testl %ecx, %ecx jg 0x15a0e movl $0x1, 0x4(%rsp) movl $0x1, %ecx jmp 0x15a12 leal -0x1(%rsi,%rdi), %eax btl $0x14, %r10d jb 0x15894 xorl %ecx, %ecx cmpl $0x1, %edi movzbl %r8b, %r8d cmovel %ecx, %r8d jmp 0x158a2 subl %edi, %ecx movl %ecx, %esi sarl $0x1f, %esi andnl %ecx, %esi, %ecx addl %ecx, %edx movl $0x1, %esi movl $0x2, %r12d movl %r11d, 0x20(%rsp) movq %r9, 0x28(%rsp) movl %edi, 0x30(%rsp) movb %r8b, 0x34(%rsp) movl %ecx, 0x38(%rsp) subl %ebp, %esi testl %ebp, %ebp cmovgl %eax, %esi xorl %r15d, %r15d cmpl $0x3e8, %esi # imm = 0x3E8 setge %r15b addq $0x3, %r15 cmpl $0x64, %esi movl $0x3, %esi cmovgeq %r15, %r12 cmpb $0x1, %r8b sbbq $0x0, %rsi addq %rdx, %rsi addq %r12, %rsi btl $0x10, %r10d setae %dl shlb $0x5, %dl orb $0x45, %dl movb %dl, 0x3c(%rsp) movl %eax, 0x40(%rsp) movslq (%rbx), %rax testq %rax, %rax jle 0x15977 xorl %r15d, %r15d subq %rsi, %rax leaq 0xb597(%rip), %rcx # 0x20eb8 cmovaeq %rax, %r15 movzbl 0x9(%rbx), %eax addq 0x10(%r14), %rsi andl $0xf, %eax movsbq (%rax,%rcx), %r12 movzbl 0xe(%rbx), %eax imulq %r15, %rax addq %rax, %rsi cmpq %rsi, 0x18(%r14) jae 0x1594e movq (%r14), %rax movq %r14, %rdi callq *(%rax) addq $0xa, %rbx shrxq %r12, %r15, %r12 movq %r14, %rdi movq %r12, %rsi movq %rbx, %rdx callq 0x14e42 leaq 0x20(%rsp), %rdi movq %rax, %rsi callq 0x15bc6 jmp 0x15ac2 addq 0x10(%r14), %rsi cmpq %rsi, 0x18(%r14) jae 0x15989 movq (%r14), %rax movq %r14, %rdi callq *(%rax) leaq 0x20(%rsp), %rdi movq %r14, %rsi callq 0x15bc6 jmp 0x15ad3 testl %ebp, %ebp jle 0x15ae0 shll $0xb, %r10d subl %edi, %ecx leaq 0x4(%rsp), %rax leaq 0x20(%rsp), %r8 leaq 0x10(%rsp), %rdi sarl $0x1f, %r10d andl %r10d, %ecx movl %ecx, %esi sarl $0x1f, %esi movl %ecx, (%rax) andnl %ecx, %esi, %ecx leaq 0x8(%rsp), %rsi leaq 0x1(%rcx,%rdx), %rcx movq %rsi, (%r8) leaq 0xc(%rsp), %rsi movq %rdi, 0x8(%r8) leaq 0x1c(%rsp), %rdi movq %rsi, 0x10(%r8) leaq 0x3(%rsp), %rsi movq %rdi, 0x18(%r8) movq %r14, %rdi movq %rsi, 0x20(%r8) movq %rax, 0x28(%r8) movq %rbx, %rsi movq %rcx, %rdx callq 0x15ead jmp 0x15ad3 testl %ecx, %ecx jle 0x15a17 movl %ecx, %ecx addq %rcx, %rsi leaq 0x8(%rsp), %rcx leaq 0x10(%rsp), %rdi leaq 0xc(%rsp), %rdx movq %rcx, 0x20(%rsp) leaq 0x58(%rsp), %rcx movq %rdi, 0x28(%rsp) movq %rdx, 0x30(%rsp) movq %rax, 0x38(%rsp) leaq 0x3(%rsp), %rdx movq %rcx, 0x40(%rsp) leaq 0x4(%rsp), %rcx movq %rdx, 0x48(%rsp) movq %rcx, 0x50(%rsp) movslq (%rbx), %rax testq %rax, %rax js 0x15bad xorl %r15d, %r15d subq %rsi, %rax leaq 0xb447(%rip), %rcx # 0x20eb8 cmovaeq %rax, %r15 movzbl 0x9(%rbx), %eax addq 0x10(%r14), %rsi andl $0xf, %eax movsbq (%rax,%rcx), %r12 movzbl 0xe(%rbx), %eax imulq %r15, %rax addq %rax, %rsi cmpq %rsi, 0x18(%r14) jae 0x15a9e movq (%r14), %rax movq %r14, %rdi callq *(%rax) addq $0xa, %rbx shrxq %r12, %r15, %r12 movq %r14, %rdi movq %r12, %rsi movq %rbx, %rdx callq 0x14e42 leaq 0x20(%rsp), %rdi movq %rax, %rsi callq 0x15cbe subq %r12, %r15 movq %rax, %rdi movq %rbx, %rdx movq %r15, %rsi callq 0x14e42 addq $0x60, %rsp popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq negl %ebp cmpl %ebp, %ecx movl %ebp, %eax cmovll %ecx, %eax testl %ecx, %ecx cmovsl %ebp, %eax testl %edi, %edi cmovnel %ebp, %eax movl %eax, 0x4(%rsp) testl %eax, %eax js 0x15bad leaq 0x8(%rsp), %rcx leaq 0x4(%rsp), %rsi leaq 0xc(%rsp), %rdi movq %rcx, 0x20(%rsp) movq %rsi, 0x28(%rsp) leaq 0x58(%rsp), %rsi movq %rdi, 0x30(%rsp) leaq 0x3(%rsp), %rdi movq %rsi, 0x38(%rsp) leaq 0x10(%rsp), %rsi movq %rdi, 0x40(%rsp) movq %rsi, 0x48(%rsp) movslq (%rbx), %rcx testq %rcx, %rcx js 0x15bad movl %eax, %eax leaq 0x2(%rax,%rdx), %rsi movzbl 0x9(%rbx), %eax xorl %r15d, %r15d subq %rsi, %rcx cmovaeq %rcx, %r15 leaq 0xb359(%rip), %rcx # 0x20eb8 addq 0x10(%r14), %rsi andl $0xf, %eax movsbq (%rax,%rcx), %r12 movzbl 0xe(%rbx), %eax imulq %r15, %rax addq %rax, %rsi cmpq %rsi, 0x18(%r14) jae 0x15b84 movq (%r14), %rax movq %r14, %rdi callq *(%rax) addq $0xa, %rbx shrxq %r12, %r15, %r12 movq %r14, %rdi movq %r12, %rsi movq %rbx, %rdx callq 0x14e42 leaq 0x20(%rsp), %rdi movq %rax, %rsi callq 0x1601a jmp 0x15ac2 leaq 0xa5d1(%rip), %rdi # 0x20185 leaq 0xa62f(%rip), %rdx # 0x201ea movl $0x146, %esi # imm = 0x146 callq 0xde89 nop
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
fmt::v7::detail::dragonbox::decimal_fp<double> fmt::v7::detail::write_padded<(fmt::v7::align::type)2, fmt::v7::detail::buffer_appender<char>, char, fmt::v7::detail::buffer_appender<char> fmt::v7::detail::write_float<fmt::v7::detail::buffer_appender<char>, fmt::v7::detail::dragonbox::decimal_fp<double>, char>(fmt::v7::detail::buffer_appender<char>, fmt::v7::detail::dragonbox::decimal_fp<double> const&, fmt::v7::basic_format_specs<char> const&, fmt::v7::detail::float_specs, char)::'lambda1'(fmt::v7::detail::buffer_appender<char>)&>(fmt::v7::detail::dragonbox::decimal_fp<double>, fmt::v7::basic_format_specs<char> const&, unsigned long, unsigned long, fmt::v7::detail::buffer_appender<char> fmt::v7::detail::write_float<fmt::v7::detail::buffer_appender<char>, fmt::v7::detail::dragonbox::decimal_fp<double>, char>(fmt::v7::detail::buffer_appender<char>, fmt::v7::detail::dragonbox::decimal_fp<double> const&, fmt::v7::basic_format_specs<char> const&, fmt::v7::detail::float_specs, char)::'lambda1'(fmt::v7::detail::buffer_appender<char>)&)
inline OutputIt write_padded(OutputIt out, const basic_format_specs<Char>& specs, size_t size, size_t width, F&& f) { static_assert(align == align::left || align == align::right, ""); unsigned spec_width = to_unsigned(specs.width); size_t padding = spec_width > width ? spec_width - width : 0; auto* shifts = align == align::left ? data::left_padding_shifts : data::right_padding_shifts; size_t left_padding = padding >> shifts[specs.align]; auto it = reserve(out, size + padding * specs.fill.size()); it = fill(it, left_padding, specs.fill); it = f(it); it = fill(it, padding - left_padding, specs.fill); return base_iterator(out, it); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movslq (%rsi), %rax testq %rax, %rax js 0x16002 xorl %r14d, %r14d subq %rcx, %rax leaq 0xafe4(%rip), %rcx # 0x20eb8 movq %rsi, %rbx movq %r8, %r12 movq %rdi, %r13 cmovaeq %rax, %r14 movzbl 0x9(%rsi), %eax addq 0x10(%rdi), %rdx addq $0xa, %rbx andl $0xf, %eax movb (%rax,%rcx), %al shrxq %rax, %r14, %r15 movzbl 0xe(%rsi), %eax imulq %r14, %rax addq %rax, %rdx cmpq %rdx, 0x18(%rdi) jae 0x15f15 movq (%r13), %rax movq %r13, %rdi movq %rdx, %rsi callq *(%rax) movq %r13, %rdi movq %r15, %rsi movq %rbx, %rdx callq 0x14e42 movq (%r12), %rcx movq %rax, %r13 movl (%rcx), %eax testq %rax, %rax je 0x15f67 leaq 0xaca5(%rip), %rcx # 0x20bdd movb (%rax,%rcx), %bpl movq 0x10(%r13), %rax leaq 0x1(%rax), %rsi cmpq %rsi, 0x18(%r13) jae 0x15f5b movq (%r13), %rax movq %r13, %rdi callq *(%rax) movq 0x10(%r13), %rax leaq 0x1(%rax), %rsi movq 0x8(%r13), %rcx movq %rsi, 0x10(%r13) movb %bpl, (%rcx,%rax) movq 0x8(%r12), %rax movq 0x10(%r12), %rcx movq 0x20(%r12), %rdi movq 0x18(%r12), %r9 movq %rsp, %rbp movl (%rcx), %edx movsbl (%rdi), %r8d movq (%rax), %rsi movl (%r9), %ecx movq %rbp, %rdi callq 0x10954 movq %rbp, %rdi movq %rax, %rsi movq %r13, %rdx callq 0x12062 movq 0x28(%r12), %rcx movq %rax, %r13 movl (%rcx), %ebp testl %ebp, %ebp jle 0x15fe2 incl %ebp movq 0x10(%r13), %rax leaq 0x1(%rax), %rsi cmpq %rsi, 0x18(%r13) jae 0x15fcf movq (%r13), %rax movq %r13, %rdi callq *(%rax) movq 0x10(%r13), %rax leaq 0x1(%rax), %rsi movq 0x8(%r13), %rcx decl %ebp movq %rsi, 0x10(%r13) movb $0x30, (%rcx,%rax) cmpl $0x1, %ebp jg 0x15fb0 subq %r15, %r14 movq %r13, %rdi movq %rbx, %rdx movq %r14, %rsi callq 0x14e42 addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq leaq 0xa17c(%rip), %rdi # 0x20185 leaq 0xa1da(%rip), %rdx # 0x201ea movl $0x146, %esi # imm = 0x146 callq 0xde89
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
int fmt::v7::detail::get_dynamic_spec<fmt::v7::detail::width_checker, fmt::v7::basic_format_arg<fmt::v7::basic_format_context<fmt::v7::detail::buffer_appender<char>, char>>, fmt::v7::detail::error_handler>(fmt::v7::basic_format_arg<fmt::v7::basic_format_context<fmt::v7::detail::buffer_appender<char>, char>>, fmt::v7::detail::error_handler)
FMT_CONSTEXPR int get_dynamic_spec(FormatArg arg, ErrorHandler eh) { unsigned long long value = visit_format_arg(Handler<ErrorHandler>(eh), arg); if (value > to_unsigned(max_value<int>())) eh.on_error("number is too big"); return static_cast<int>(value); }
pushq %rax movl 0x20(%rsp), %ecx decl %ecx cmpl $0xe, %ecx ja 0x183c9 leaq 0x76b1(%rip), %rdx # 0x1fa38 leaq 0x10(%rsp), %rax movslq (%rdx,%rcx,4), %rcx addq %rdx, %rcx jmpq *%rcx movslq (%rax), %rax testq %rax, %rax jns 0x183c7 jmp 0x183b3 cmpq $0x0, 0x8(%rax) js 0x183b3 movq (%rax), %rax jmp 0x183be movq (%rax), %rax testq %rax, %rax jns 0x183be leaq 0x8250(%rip), %rsi # 0x2060a jmp 0x183d9 movl (%rax), %eax movq %rax, %rcx shrq $0x1f, %rcx jne 0x183d2 popq %rcx retq leaq 0x8249(%rip), %rsi # 0x20619 jmp 0x183d9 leaq 0x7f0a(%rip), %rsi # 0x202e3 leaq 0x7(%rsp), %rdi callq 0xd936 nop
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
fmt::v7::detail::arg_formatter_base<fmt::v7::detail::buffer_appender<char>, char, fmt::v7::detail::error_handler>::operator()(void const*)
iterator operator()(const void* value) { if (specs_) check_pointer_type_spec(specs_->type, error_handler()); write_pointer(value); return out_; }
pushq %rbx subq $0x10, %rsp movq 0x10(%rdi), %rdx movq %rdi, %rbx testq %rdx, %rdx je 0x18812 movzbl 0x8(%rdx), %eax testl %eax, %eax je 0x18812 cmpl $0x70, %eax jne 0x18823 movq (%rbx), %rdi callq 0x16fe8 movq %rax, (%rbx) addq $0x10, %rsp popq %rbx retq leaq 0x7acb(%rip), %rsi # 0x202f5 leaq 0xf(%rsp), %rdi callq 0xd936
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
fmt::v7::detail::int_writer<fmt::v7::detail::buffer_appender<char>, char, unsigned int>::on_bin()
void on_bin() { if (specs.alt) { prefix[prefix_size++] = '0'; prefix[prefix_size++] = static_cast<char>(specs.type); } int num_digits = count_digits<1>(abs_value); out = write_int(out, num_digits, get_prefix(), specs, [this, num_digits](iterator it) { return format_uint<1, Char>(it, abs_value, num_digits); }); }
pushq %rbx subq $0x20, %rsp movq 0x10(%rdi), %r8 movq %rdi, %rbx cmpb $0x0, 0x9(%r8) jns 0x18a84 movl 0x20(%rbx), %eax leal 0x1(%rax), %ecx movl %ecx, 0x20(%rbx) movb $0x30, 0x1c(%rbx,%rax) movl 0x20(%rbx), %ecx movb 0x8(%r8), %al leal 0x1(%rcx), %edx movl %edx, 0x20(%rbx) movb %al, 0x1c(%rbx,%rcx) movl 0x18(%rbx), %eax xorl %esi, %esi movl %eax, %ecx incl %esi shrl %ecx cmpl $0x1, %eax movl %ecx, %eax ja 0x18a8b movq (%rbx), %rdi movl 0x20(%rbx), %ecx movq %rbx, 0x10(%rsp) movl %esi, 0x18(%rsp) leaq 0x1c(%rbx), %rdx vmovups 0x10(%rsp), %xmm0 vmovups %xmm0, (%rsp) callq 0x19453 movq %rax, (%rbx) addq $0x20, %rsp popq %rbx retq
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
fmt::v7::detail::buffer_appender<char> fmt::v7::detail::write_int<fmt::v7::detail::buffer_appender<char>, char, fmt::v7::detail::int_writer<fmt::v7::detail::buffer_appender<char>, char, unsigned int>::on_bin()::'lambda'(fmt::v7::detail::buffer_appender<char>)>(fmt::v7::detail::buffer_appender<char>, int, fmt::v7::basic_string_view<char>, fmt::v7::basic_format_specs<char> const&, fmt::v7::detail::int_writer<fmt::v7::detail::buffer_appender<char>, char, unsigned int>::on_bin()::'lambda'(fmt::v7::detail::buffer_appender<char>))
OutputIt write_int(OutputIt out, int num_digits, string_view prefix, const basic_format_specs<Char>& specs, F f) { auto data = write_int_data<Char>(num_digits, prefix, specs); using iterator = remove_reference_t<decltype(reserve(out, 0))>; return write_padded<align::right>(out, specs, data.size, [=](iterator it) { if (prefix.size() != 0) it = copy_str<Char>(prefix.begin(), prefix.end(), it); it = std::fill_n(it, data.padding, static_cast<Char>('0')); return f(it); }); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x40, %rsp leaq 0x30(%rsp), %r13 movq %rdi, %r14 movq %r8, %rbx movq %rcx, %r15 movq %rdx, %r12 movq %r13, %rdi callq 0x19098 vmovups (%r13), %xmm0 movl 0x78(%rsp), %eax movq 0x70(%rsp), %rcx movq %r12, (%rsp) movq %r15, 0x8(%rsp) movq (%r13), %rsi vmovups %xmm0, 0x10(%rsp) movl %eax, 0x28(%rsp) movq %rcx, 0x20(%rsp) movslq (%rbx), %rax testq %rax, %rax js 0x19527 xorl %r15d, %r15d subq %rsi, %rax leaq 0x79ff(%rip), %rcx # 0x20eb8 cmovaeq %rax, %r15 movzbl 0x9(%rbx), %eax addq 0x10(%r14), %rsi andl $0xf, %eax movsbq (%rax,%rcx), %r12 movzbl 0xe(%rbx), %eax imulq %r15, %rax addq %rax, %rsi cmpq %rsi, 0x18(%r14) jae 0x194e6 movq (%r14), %rax movq %r14, %rdi callq *(%rax) addq $0xa, %rbx shrxq %r12, %r15, %r12 movq %r14, %rdi movq %r12, %rsi movq %rbx, %rdx callq 0x14e42 movq %rsp, %rdi movq %rax, %rsi callq 0x19540 subq %r12, %r15 movq %rax, %rdi movq %rbx, %rdx movq %r15, %rsi callq 0x14e42 addq $0x40, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq leaq 0x6c57(%rip), %rdi # 0x20185 leaq 0x6cb5(%rip), %rdx # 0x201ea movl $0x146, %esi # imm = 0x146 callq 0xde89 nop
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
void fmt::v7::detail::arg_formatter_base<fmt::v7::detail::buffer_appender<char>, char, fmt::v7::detail::error_handler>::write_int<unsigned int>(unsigned int, fmt::v7::basic_format_specs<char> const&)
void write_int(T value, const format_specs& spec) { using uint_type = uint32_or_64_or_128_t<T>; int_writer<iterator, Char, uint_type> w(out_, locale_, value, spec); handle_int_type_spec(spec.type, w); out_ = w.out; }
pushq %r14 pushq %rbx subq $0x28, %rsp vmovups (%rdi), %xmm0 movq %rdi, %rbx vmovaps %xmm0, (%rsp) movq %rdx, 0x10(%rsp) movl %esi, 0x18(%rsp) movl $0x0, 0x20(%rsp) movb 0x9(%rdx), %al shrb $0x4, %al andb $0x7, %al cmpb $0x2, %al jb 0x1988b movl $0x2b, %eax movl $0x20, %ecx cmovel %eax, %ecx movb %cl, 0x1c(%rsp) movl $0x1, 0x20(%rsp) movsbl 0x8(%rdx), %edi movq %rsp, %r14 movq %r14, %rsi callq 0x18907 movq (%r14), %rax movq %rax, (%rbx) addq $0x28, %rsp popq %rbx popq %r14 retq
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
void fmt::v7::detail::handle_int_type_spec<fmt::v7::detail::int_writer<fmt::v7::detail::buffer_appender<char>, char, unsigned long>&>(char, fmt::v7::detail::int_writer<fmt::v7::detail::buffer_appender<char>, char, unsigned long>&)
FMT_CONSTEXPR void handle_int_type_spec(char spec, Handler&& handler) { switch (spec) { case 0: case 'd': handler.on_dec(); break; case 'x': case 'X': handler.on_hex(); break; case 'b': case 'B': handler.on_bin(); break; case 'o': handler.on_oct(); break; #ifdef FMT_DEPRECATED_N_SPECIFIER case 'n': #endif case 'L': handler.on_num(); break; case 'c': handler.on_chr(); break; default: handler.on_error(); } }
cmpl $0x61, %edi jle 0x19946 cmpl $0x63, %edi jle 0x19968 cmpl $0x78, %edi je 0x19960 cmpl $0x6f, %edi je 0x19992 cmpl $0x64, %edi je 0x19982 jmp 0x1999a cmpl $0x4b, %edi jg 0x19956 testl %edi, %edi je 0x19982 cmpl $0x42, %edi je 0x1997a jmp 0x1999a cmpl $0x4c, %edi je 0x1998a cmpl $0x58, %edi jne 0x1999a movq %rsi, %rdi jmp 0x19a08 cmpl $0x62, %edi je 0x1997a cmpl $0x63, %edi jne 0x1999a movq %rsi, %rdi jmp 0x19f7e movq %rsi, %rdi jmp 0x19a7e movq %rsi, %rdi jmp 0x199a4 movq %rsi, %rdi jmp 0x19b6e movq %rsi, %rdi jmp 0x19af4 pushq %rax movq %rsi, %rdi callq 0x19fbc nop
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
fmt::v7::detail::int_writer<fmt::v7::detail::buffer_appender<char>, char, unsigned long>::on_bin()
void on_bin() { if (specs.alt) { prefix[prefix_size++] = '0'; prefix[prefix_size++] = static_cast<char>(specs.type); } int num_digits = count_digits<1>(abs_value); out = write_int(out, num_digits, get_prefix(), specs, [this, num_digits](iterator it) { return format_uint<1, Char>(it, abs_value, num_digits); }); }
pushq %rbx subq $0x20, %rsp movq 0x10(%rdi), %r8 movq %rdi, %rbx cmpb $0x0, 0x9(%r8) jns 0x19ab0 movl 0x24(%rbx), %eax leal 0x1(%rax), %ecx movl %ecx, 0x24(%rbx) movb $0x30, 0x20(%rbx,%rax) movl 0x24(%rbx), %ecx movb 0x8(%r8), %al leal 0x1(%rcx), %edx movl %edx, 0x24(%rbx) movb %al, 0x20(%rbx,%rcx) movq 0x18(%rbx), %rax xorl %esi, %esi movq %rax, %rcx incl %esi shrq %rcx cmpq $0x1, %rax movq %rcx, %rax ja 0x19ab9 movq (%rbx), %rdi movl 0x24(%rbx), %ecx movq %rbx, 0x10(%rsp) movl %esi, 0x18(%rsp) leaq 0x20(%rbx), %rdx vmovups 0x10(%rsp), %xmm0 vmovups %xmm0, (%rsp) callq 0x1a461 movq %rax, (%rbx) addq $0x20, %rsp popq %rbx retq nop
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
fmt::v7::detail::buffer_appender<char> fmt::v7::detail::write_int<fmt::v7::detail::buffer_appender<char>, char, fmt::v7::detail::int_writer<fmt::v7::detail::buffer_appender<char>, char, unsigned long>::on_dec()::'lambda'(fmt::v7::detail::buffer_appender<char>)>(fmt::v7::detail::buffer_appender<char>, int, fmt::v7::basic_string_view<char>, fmt::v7::basic_format_specs<char> const&, fmt::v7::detail::int_writer<fmt::v7::detail::buffer_appender<char>, char, unsigned long>::on_dec()::'lambda'(fmt::v7::detail::buffer_appender<char>))
OutputIt write_int(OutputIt out, int num_digits, string_view prefix, const basic_format_specs<Char>& specs, F f) { auto data = write_int_data<Char>(num_digits, prefix, specs); using iterator = remove_reference_t<decltype(reserve(out, 0))>; return write_padded<align::right>(out, specs, data.size, [=](iterator it) { if (prefix.size() != 0) it = copy_str<Char>(prefix.begin(), prefix.end(), it); it = std::fill_n(it, data.padding, static_cast<Char>('0')); return f(it); }); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x40, %rsp leaq 0x30(%rsp), %r13 movq %rdi, %r14 movq %r8, %rbx movq %rcx, %r15 movq %rdx, %r12 movq %r13, %rdi callq 0x19098 vmovups (%r13), %xmm0 movl 0x78(%rsp), %eax movq 0x70(%rsp), %rcx movq %r12, (%rsp) movq %r15, 0x8(%rsp) movq (%r13), %rsi vmovups %xmm0, 0x10(%rsp) movl %eax, 0x28(%rsp) movq %rcx, 0x20(%rsp) movslq (%rbx), %rax testq %rax, %rax js 0x1a0d9 xorl %r15d, %r15d subq %rsi, %rax leaq 0x6e4d(%rip), %rcx # 0x20eb8 cmovaeq %rax, %r15 movzbl 0x9(%rbx), %eax addq 0x10(%r14), %rsi andl $0xf, %eax movsbq (%rax,%rcx), %r12 movzbl 0xe(%rbx), %eax imulq %r15, %rax addq %rax, %rsi cmpq %rsi, 0x18(%r14) jae 0x1a098 movq (%r14), %rax movq %r14, %rdi callq *(%rax) addq $0xa, %rbx shrxq %r12, %r15, %r12 movq %r14, %rdi movq %r12, %rsi movq %rbx, %rdx callq 0x14e42 movq %rsp, %rdi movq %rax, %rsi callq 0x1a0f2 subq %r12, %r15 movq %rax, %rdi movq %rbx, %rdx movq %r15, %rsi callq 0x14e42 addq $0x40, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq leaq 0x60a5(%rip), %rdi # 0x20185 leaq 0x6103(%rip), %rdx # 0x201ea movl $0x146, %esi # imm = 0x146 callq 0xde89 nop
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
fmt::v7::detail::int_writer<fmt::v7::detail::buffer_appender<char>, char, unsigned __int128>::on_hex()
void on_hex() { if (specs.alt) { prefix[prefix_size++] = '0'; prefix[prefix_size++] = specs.type; } int num_digits = count_digits<4>(abs_value); out = write_int(out, num_digits, get_prefix(), specs, [this, num_digits](iterator it) { return format_uint<4, Char>(it, abs_value, num_digits, specs.type != 'x'); }); }
pushq %rbx subq $0x20, %rsp movq 0x10(%rdi), %r8 movq %rdi, %rbx cmpb $0x0, 0x9(%r8) jns 0x1aade movl 0x34(%rbx), %eax leal 0x1(%rax), %ecx movl %ecx, 0x34(%rbx) movb $0x30, 0x30(%rbx,%rax) movl 0x34(%rbx), %ecx movb 0x8(%r8), %al leal 0x1(%rcx), %edx movl %edx, 0x34(%rbx) movb %al, 0x30(%rbx,%rcx) movq 0x28(%rbx), %rcx movq 0x20(%rbx), %rdx movl $0xf, %eax xorl %esi, %esi movq %rcx, %rdi movq %rcx, %r9 incl %esi shldq $0x3c, %rdx, %r9 shrq $0x4, %rdi cmpq %rdx, %rax movl $0x0, %edx sbbq %rcx, %rdx movq %rdi, %rcx movq %r9, %rdx jb 0x1aaf0 movq (%rbx), %rdi movl 0x34(%rbx), %ecx movq %rbx, 0x10(%rsp) movl %esi, 0x18(%rsp) leaq 0x30(%rbx), %rdx vmovups 0x10(%rsp), %xmm0 vmovups %xmm0, (%rsp) callq 0x1b513 movq %rax, (%rbx) addq $0x20, %rsp popq %rbx retq nop
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
fmt::v7::detail::int_writer<fmt::v7::detail::buffer_appender<char>, char, unsigned __int128>::on_num()
void on_num() { std::string groups = grouping<Char>(locale); if (groups.empty()) return on_dec(); auto sep = thousands_sep<Char>(locale); if (!sep) return on_dec(); int num_digits = count_digits(abs_value); int size = num_digits, n = num_digits; std::string::const_iterator group = groups.cbegin(); while (group != groups.cend() && n > *group && *group > 0 && *group != max_value<char>()) { size += sep_size; n -= *group; ++group; } if (group == groups.cend()) size += sep_size * ((n - 1) / groups.back()); char digits[40]; format_decimal(digits, abs_value, num_digits); basic_memory_buffer<Char> buffer; size += static_cast<int>(prefix_size); const auto usize = to_unsigned(size); buffer.resize(usize); basic_string_view<Char> s(&sep, sep_size); // Index of a decimal digit with the least significant digit having index 0. int digit_index = 0; group = groups.cbegin(); auto p = buffer.data() + size - 1; for (int i = num_digits - 1; i > 0; --i) { *p-- = static_cast<Char>(digits[i]); if (*group <= 0 || ++digit_index % *group != 0 || *group == max_value<char>()) continue; if (group + 1 != groups.cend()) { digit_index = 0; ++group; } std::uninitialized_copy(s.data(), s.data() + s.size(), make_checked(p, s.size())); p -= s.size(); } *p-- = static_cast<Char>(*digits); if (prefix_size != 0) *p = static_cast<Char>('-'); auto data = buffer.data(); out = write_padded<align::right>( out, specs, usize, usize, [=](iterator it) { return copy_str<Char>(data, data + size, it); }); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x288, %rsp # imm = 0x288 movq 0x8(%rdi), %rsi movq %rdi, %rbx leaq 0x70(%rsp), %rdi testq %rsi, %rsi je 0x1ac8f callq 0x3120 jmp 0x1ac94 callq 0x33d0 leaq 0x70(%rsp), %rdi callq 0x30d0 movq (%rax), %rcx leaq 0x20(%rsp), %rdi movq %rax, %rsi callq *0x20(%rcx) leaq 0x70(%rsp), %rdi callq 0x32c0 cmpq $0x0, 0x28(%rsp) je 0x1acd3 movq 0x8(%rbx), %rsi leaq 0x70(%rsp), %rdi testq %rsi, %rsi je 0x1ace0 callq 0x3120 jmp 0x1ace5 movq %rbx, %rdi callq 0x1a9d2 jmp 0x1b0f4 callq 0x33d0 leaq 0x70(%rsp), %rdi callq 0x30d0 movq (%rax), %rcx movq %rax, %rdi callq *0x18(%rcx) leaq 0x70(%rsp), %rdi movl %eax, %ebp callq 0x32c0 testb %bpl, %bpl je 0x1adaa movq 0x20(%rbx), %r15 movb %bpl, 0xf(%rsp) movq 0x28(%rbx), %rbp movl $0x1, %r14d movq %rbx, 0x18(%rsp) cmpq $0xa, %r15 movq %rbp, %rax sbbq $0x0, %rax jb 0x1adc0 movl $0x4, %r14d movl $0x63, %ebx movq %r15, %r12 movq %rbp, %r13 cmpq %r12, %rbx movl $0x0, %eax sbbq %r13, %rax jae 0x1adb7 movl $0x3e7, %eax # imm = 0x3E7 cmpq %r12, %rax movl $0x0, %eax sbbq %r13, %rax jae 0x1adbd cmpq $0x2710, %r12 # imm = 0x2710 movq %r13, %rax sbbq $0x0, %rax jb 0x1adc0 movl $0x2710, %edx # imm = 0x2710 movq %r12, %rdi movq %r13, %rsi xorl %ecx, %ecx callq 0x3080 movl $0x1869f, %ecx # imm = 0x1869F addl $0x4, %r14d cmpq %r12, %rcx movl $0x0, %ecx movq %rax, %r12 sbbq %r13, %rcx movq %rdx, %r13 jb 0x1ad47 addl $-0x3, %r14d jmp 0x1adc0 movq %rbx, %rdi callq 0x1a9d2 jmp 0x1b0f4 addl $-0x2, %r14d jmp 0x1adc0 decl %r14d movq 0x20(%rsp), %rcx movq 0x28(%rsp), %rdx movl %r14d, %eax movl %r14d, %esi testq %rdx, %rdx je 0x1adfa leal (%r14,%rdx), %esi xorl %r12d, %r12d movl %r14d, %eax movsbl (%rcx,%r12), %edi subl %edi, %eax jle 0x1ae0c addb $-0x7f, %dil cmpb $-0x7e, %dil jb 0x1ae0c incq %r12 cmpq %r12, %rdx jne 0x1addf movsbl -0x1(%rcx,%rdx), %ecx decl %eax cltd idivl %ecx movl %eax, %r12d addl %esi, %r12d jmp 0x1ae0f addl %r14d, %r12d cmpq $0xa, %r15 movq %rbp, %rax movl $0x1, %ebx movq %r14, 0x10(%rsp) sbbq $0x0, %rax jb 0x1aea3 movl $0x4, %ebx movq %r15, %r13 movq %rbp, %r14 movl $0x63, %eax cmpq %r13, %rax movl $0x0, %eax sbbq %r14, %rax jae 0x1ae97 movl $0x3e7, %eax # imm = 0x3E7 cmpq %r13, %rax movl $0x0, %eax sbbq %r14, %rax jae 0x1ae9c cmpq $0x2710, %r13 # imm = 0x2710 movq %r14, %rax sbbq $0x0, %rax jb 0x1ae9e movl $0x2710, %edx # imm = 0x2710 movq %r13, %rdi movq %r14, %rsi xorl %ecx, %ecx callq 0x3080 movl $0x1869f, %ecx # imm = 0x1869F addl $0x4, %ebx cmpq %r13, %rcx movl $0x0, %ecx movq %rax, %r13 sbbq %r14, %rcx movq %rdx, %r14 jb 0x1ae31 addl $-0x3, %ebx jmp 0x1ae9e addl $-0x2, %ebx jmp 0x1ae9e decl %ebx movq 0x10(%rsp), %r14 cmpl %r14d, %ebx jg 0x1b139 movslq %r14d, %rax cmpq $0x64, %r15 leaq 0x5b96(%rip), %rbx # 0x20a50 leaq 0x40(%rsp,%rax), %r13 movq %rbp, %rax sbbq $0x0, %rax jb 0x1af0e movl $0x64, %edx leaq -0x2(%r13), %r14 movq %r15, %rdi movq %rbp, %rsi xorl %ecx, %ecx callq 0x3080 imulq $0x64, %rax, %rcx movq %r15, %rsi subq %rcx, %rsi movzwl (%rbx,%rsi,2), %ecx movw %cx, -0x2(%r13) movl $0x270f, %ecx # imm = 0x270F movq %r14, %r13 cmpq %r15, %rcx movl $0x0, %ecx movq %rax, %r15 sbbq %rbp, %rcx movq %rdx, %rbp jb 0x1aec8 jmp 0x1af17 movq %r15, %rax movq %rbp, %rdx movq %r13, %r14 movl $0x9, %esi xorl %ecx, %ecx cmpq %rax, %rsi sbbq %rdx, %rcx jb 0x1af2e orb $0x30, %al movb %al, -0x1(%r14) jmp 0x1af37 movzwl (%rbx,%rax,2), %eax movw %ax, -0x2(%r14) movq 0x18(%rsp), %rbx leaq 0x90(%rsp), %rdx leaq 0xee0d(%rip), %rax # 0x29d58 movq 0x10(%rsp), %r15 movq $0x0, -0x10(%rdx) movq %rax, -0x20(%rdx) movq %rdx, -0x18(%rdx) movq $0x1f4, -0x8(%rdx) # imm = 0x1F4 addl 0x34(%rbx), %r12d js 0x1b121 movb 0xf(%rsp), %bpl movl $0x1f4, %ecx # imm = 0x1F4 movl %r12d, %r14d movq %rdx, %rax cmpl $0x1f5, %r12d # imm = 0x1F5 jb 0x1afa5 leaq 0x70(%rsp), %rdi movq %r14, %rsi callq 0x139dc movq 0x78(%rsp), %rax movq 0x88(%rsp), %rcx cmpq %r14, %rcx leaq -0x1(%rax,%r14), %r9 cmovaeq %r14, %rcx movq %rcx, 0x80(%rsp) cmpl $0x2, %r15d jl 0x1b027 movq 0x20(%rsp), %rsi movl %r15d, %edi incq %rdi xorl %r8d, %r8d xorl %ecx, %ecx movb 0x3e(%rsp,%rdi), %al leaq -0x1(%r9), %r10 movb %al, (%r9) movsbl (%rsi), %r11d testl %r11d, %r11d jle 0x1b019 incl %ecx movl %ecx, %eax cltd idivl %r11d cmpb $0x7f, %r11b je 0x1b019 testl %edx, %edx jne 0x1b019 movq 0x20(%rsp), %rdx leaq 0x1(%rsi), %rax addq 0x28(%rsp), %rdx movb %bpl, -0x1(%r9) cmpq %rdx, %rax cmovneq %rax, %rsi cmovnel %r8d, %ecx addq $-0x2, %r9 movq %r9, %r10 decq %rdi movq %r10, %r9 cmpq $0x2, %rdi ja 0x1afcf jmp 0x1b02a movq %r9, %r10 movb 0x40(%rsp), %al movb %al, (%r10) cmpl $0x0, 0x34(%rbx) je 0x1b03c movb $0x2d, -0x1(%r10) movq 0x10(%rbx), %r15 movslq (%r15), %rax testq %rax, %rax js 0x1b121 xorl %r12d, %r12d subq %r14, %rax movq (%rbx), %rbp leaq 0x5e5c(%rip), %rcx # 0x20eb8 movq 0x78(%rsp), %r13 cmovaeq %rax, %r12 movzbl 0x9(%r15), %eax movq 0x10(%rbp), %rsi andl $0xf, %eax movsbq (%rax,%rcx), %rbx movzbl 0xe(%r15), %eax addq %r14, %rsi imulq %r12, %rax addq %rax, %rsi cmpq %rsi, 0x18(%rbp) jae 0x1b094 movq (%rbp), %rax movq %rbp, %rdi callq *(%rax) addq $0xa, %r15 shrxq %rbx, %r12, %rbx movq %rbp, %rdi movq %rbx, %rsi movq %r15, %rdx callq 0x14e42 addq %r13, %r14 movq %r13, %rdi movq %r14, %rsi movq %rax, %rdx callq 0x12062 subq %rbx, %r12 movq %rax, %rdi movq %r12, %rsi movq %r15, %rdx callq 0x14e42 movq 0x18(%rsp), %rcx movq %rax, (%rcx) leaq 0x90(%rsp), %rax movq 0x78(%rsp), %rdi cmpq %rax, %rdi je 0x1b0f4 movq 0x88(%rsp), %rsi callq 0x3230 leaq 0x30(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x1b10f movq 0x30(%rsp), %rsi incq %rsi callq 0x3230 addq $0x288, %rsp # imm = 0x288 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq leaq 0x505d(%rip), %rdi # 0x20185 leaq 0x50bb(%rip), %rdx # 0x201ea movl $0x146, %esi # imm = 0x146 callq 0xde89 leaq 0x4f51(%rip), %rdi # 0x20091 leaq 0x4fb1(%rip), %rdx # 0x200f8 movl $0x41b, %esi # imm = 0x41B callq 0xde89 jmp 0x1b169 jmp 0x1b155 movq %rax, %rbx jmp 0x1b18b leaq 0x70(%rsp), %rdi movq %rax, %rbx callq 0x32c0 jmp 0x1b18b movq 0x78(%rsp), %rdi movq %rax, %rbx leaq 0x90(%rsp), %rax cmpq %rax, %rdi je 0x1b18b movq 0x88(%rsp), %rsi callq 0x3230 leaq 0x30(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x1b1b5 movq 0x30(%rsp), %rsi incq %rsi callq 0x3230 jmp 0x1b1b5 leaq 0x70(%rsp), %rdi movq %rax, %rbx callq 0x32c0 movq %rbx, %rdi callq 0x3390 nop
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
fmt::v7::detail::arg_formatter_base<fmt::v7::detail::buffer_appender<char>, char, fmt::v7::detail::error_handler>::write(bool)
void write(bool value) { if (specs_) write(string_view(value ? "true" : "false"), *specs_); else out_ = detail::write<Char>(out_, value); }
pushq %r14 pushq %rbx pushq %rax movq 0x10(%rdi), %rcx movq %rdi, %rbx testq %rcx, %rcx je 0x1bc73 movq (%rbx), %rdi leaq 0x451a(%rip), %rdx # 0x2016a leaq 0x4518(%rip), %rax # 0x2016f testb %sil, %sil cmovneq %rdx, %rax movzbl %sil, %edx xorq $0x5, %rdx movq %rax, %rsi callq 0x1bcac movq %rax, %r14 jmp 0x1bca1 movq (%rbx), %r14 leaq 0x44ed(%rip), %rcx # 0x2016a leaq 0x44eb(%rip), %rax # 0x2016f testb %sil, %sil movzbl %sil, %edx cmovneq %rcx, %rax xorq $0x5, %rdx addq %rax, %rdx movq %rax, %rsi movq %r14, %rdi callq 0x1477a movq %r14, (%rbx) addq $0x8, %rsp popq %rbx popq %r14 retq
/quesnel[P]baryonyx/external/fmt/include/fmt/format.h
int fmt::v7::detail::snprintf_float<double>(double, int, fmt::v7::detail::float_specs, fmt::v7::detail::buffer<char>&)
int snprintf_float(T value, int precision, float_specs specs, buffer<char>& buf) { // Buffer capacity must be non-zero, otherwise MSVC's vsnprintf_s will fail. FMT_ASSERT(buf.capacity() > buf.size(), "empty buffer"); static_assert(!std::is_same<T, float>::value, ""); // Subtract 1 to account for the difference in precision since we use %e for // both general and exponent format. if (specs.format == float_format::general || specs.format == float_format::exp) precision = (precision >= 0 ? precision : 6) - 1; // Build the format string. enum { max_format_size = 7 }; // The longest format is "%#.*Le". char format[max_format_size]; char* format_ptr = format; *format_ptr++ = '%'; if (specs.showpoint && specs.format == float_format::hex) *format_ptr++ = '#'; if (precision >= 0) { *format_ptr++ = '.'; *format_ptr++ = '*'; } if (std::is_same<T, long double>()) *format_ptr++ = 'L'; *format_ptr++ = specs.format != float_format::hex ? (specs.format == float_format::fixed ? 'f' : 'e') : (specs.upper ? 'A' : 'a'); *format_ptr = '\0'; // Format using snprintf. auto offset = buf.size(); for (;;) { auto begin = buf.data() + offset; auto capacity = buf.capacity() - offset; #ifdef FMT_FUZZ if (precision > 100000) throw std::runtime_error( "fuzz mode - avoid large allocation inside snprintf"); #endif // Suppress the warning about a nonliteral format string. // Cannot use auto because of a bug in MinGW (#1532). int (*snprintf_ptr)(char*, size_t, const char*, ...) = FMT_SNPRINTF; int result = precision >= 0 ? snprintf_ptr(begin, capacity, format, precision, value) : snprintf_ptr(begin, capacity, format, value); if (result < 0) { // The buffer will grow exponentially. buf.try_reserve(buf.capacity() + 1); continue; } auto size = to_unsigned(result); // Size equal to capacity means that the last character was truncated. if (size >= capacity) { buf.try_reserve(size + offset + 1); // Add 1 for the terminating '\0'. continue; } auto is_digit = [](char c) { return c >= '0' && c <= '9'; }; if (specs.format == float_format::fixed) { if (precision == 0) { buf.try_resize(size); return 0; } // Find and remove the decimal point. auto end = begin + size, p = end; do { --p; } while (is_digit(*p)); int fraction_size = static_cast<int>(end - p - 1); std::memmove(p, p + 1, to_unsigned(fraction_size)); buf.try_resize(size - 1); return -fraction_size; } if (specs.format == float_format::hex) { buf.try_resize(size + offset); return 0; } // Find and parse the exponent. auto end = begin + size, exp_pos = end; do { --exp_pos; } while (*exp_pos != 'e'); char sign = exp_pos[1]; assert(sign == '+' || sign == '-'); int exp = 0; auto p = exp_pos + 2; // Skip 'e' and sign. do { assert(is_digit(*p)); exp = exp * 10 + (*p++ - '0'); } while (p != end); if (sign == '-') exp = -exp; int fraction_size = 0; if (exp_pos != begin + 1) { // Remove trailing zeros. auto fraction_end = exp_pos - 1; while (*fraction_end == '0') --fraction_end; // Move the fractional part left to get rid of the decimal point. fraction_size = static_cast<int>(fraction_end - begin - 1); std::memmove(begin + 1, begin + 2, to_unsigned(fraction_size)); } buf.try_resize(to_unsigned(fraction_size) + offset + 1); return exp - fraction_size; } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq 0x18(%rdx), %rax vmovsd %xmm0, 0x10(%rsp) cmpq 0x10(%rdx), %rax jbe 0x1c666 shrq $0x20, %rsi leal -0x1(%rdi), %eax testl %edi, %edi movl $0x5, %ebp movq %rdx, %r12 movb $0x25, 0x1(%rsp) cmovnsl %eax, %ebp cmpb $0x2, %sil cmovael %edi, %ebp btl $0x14, %esi setae %al cmpb $0x3, %sil setne %cl orb %al, %cl jne 0x1c3bf leaq 0x3(%rsp), %rax movb $0x23, -0x1(%rax) jmp 0x1c3c4 leaq 0x2(%rsp), %rax testl %ebp, %ebp js 0x1c3d1 movw $0x2a2e, (%rax) # imm = 0x2A2E addq $0x2, %rax movq %rsi, 0x20(%rsp) movq %rdi, 0x18(%rsp) cmpb $0x3, %sil jne 0x1c3f0 btl $0x10, %esi setae %cl shlb $0x5, %cl orb $0x41, %cl jmp 0x1c3fa cmpb $0x2, %sil sete %cl addb $0x65, %cl movb %cl, (%rax) movb $0x0, 0x1(%rax) movq 0x10(%r12), %r13 leaq 0x1(%r13), %rax movq %rax, 0x8(%rsp) movq 0x18(%r12), %rbx movq 0x8(%r12), %r15 leaq 0x1(%rsp), %rdx addq %r13, %r15 subq %r13, %rbx movq %r15, %rdi movq %rbx, %rsi testl %ebp, %ebp js 0x1c43e vmovsd 0x10(%rsp), %xmm0 movb $0x1, %al movl %ebp, %ecx callq 0x3320 jmp 0x1c44b vmovsd 0x10(%rsp), %xmm0 movb $0x1, %al callq 0x3320 movl %eax, %r14d testl %eax, %eax js 0x1c475 movl %ebp, %ecx movq %r12, %rbp movl %r14d, %r12d cmpq %r12, %rbx ja 0x1c48e movq 0x8(%rsp), %rax leaq (%rax,%r12), %rsi movq %rbp, %r12 cmpq %rsi, 0x18(%rbp) movl %ecx, %ebp jae 0x1c40e jmp 0x1c483 movq 0x18(%r12), %rsi cmpq $-0x1, %rsi je 0x1c40e incq %rsi movq (%r12), %rax movq %r12, %rdi callq *(%rax) jmp 0x1c40e movq 0x20(%rsp), %rax cmpb $0x2, %al je 0x1c4d2 movzbl %al, %eax cmpl $0x3, %eax jne 0x1c544 movq 0x18(%rbp), %rax addq %r12, %r13 cmpq %r13, %rax jae 0x1c4bf movq (%rbp), %rax movq %rbp, %rdi movq %r13, %rsi callq *(%rax) movq 0x18(%rbp), %rax cmpq %r13, %rax cmovbq %rax, %r13 xorl %r12d, %r12d movq %r13, 0x10(%rbp) jmp 0x1c635 cmpl $0x0, 0x18(%rsp) je 0x1c5d9 addq %r12, %r15 movl $0x1, %r12d movb -0x2(%r15,%r12), %al decq %r12 addb $-0x30, %al cmpb $0xa, %al jb 0x1c4e6 movq %r12, %rdx negq %rdx testl %edx, %edx js 0x1c67e leaq -0x1(%r15,%r12), %rdi addq %r12, %r15 andl $0x7fffffff, %edx # imm = 0x7FFFFFFF movq %r15, %rsi callq 0x3330 movq 0x18(%rbp), %rax decl %r14d cmpq %r14, %rax jae 0x1c534 movq (%rbp), %rax movq %rbp, %rdi movq %r14, %rsi callq *(%rax) movq 0x18(%rbp), %rax cmpq %r14, %rax cmovbq %rax, %r14 movq %r14, 0x10(%rbp) jmp 0x1c635 movq %r12, %rbx decq %rbx movq $-0x2, %rax incq %rax cmpb $0x65, (%r15,%rbx) leaq -0x1(%rbx), %rbx jne 0x1c551 movzbl 0x2(%rbx,%r15), %ecx cmpl $0x2b, %ecx je 0x1c573 cmpl $0x2d, %ecx jne 0x1c696 leaq (%r15,%r12), %rdx negq %rax xorl %esi, %esi movzbl (%rdx,%rax), %edi leal -0x30(%rdi), %r8d cmpb $0xa, %r8b jae 0x1c647 leal (%rsi,%rsi,4), %esi incq %rax leal -0x30(%rdi,%rsi,2), %esi jne 0x1c57c movl %esi, %r12d negl %r12d cmpb $0x2d, %cl cmovnel %esi, %r12d testq %rbx, %rbx je 0x1c602 leaq 0x1(%r15), %rdi cmpb $0x30, (%r15,%rbx) leaq -0x1(%rbx), %rbx je 0x1c5b0 testl %ebx, %ebx js 0x1c67e movl %ebx, %edx addq $0x2, %r15 andl $0x7fffffff, %edx # imm = 0x7FFFFFFF movq %r15, %rsi callq 0x3330 jmp 0x1c604 movq 0x18(%rbp), %rax cmpq %r12, %rax jae 0x1c5f2 movq (%rbp), %rax movq %rbp, %rdi movq %r12, %rsi callq *(%rax) movq 0x18(%rbp), %rax cmpq %r12, %rax cmovaeq %r12, %rax xorl %r12d, %r12d movq %rax, 0x10(%rbp) jmp 0x1c635 xorl %ebx, %ebx movq 0x8(%rsp), %r15 movl %ebx, %eax addq %rax, %r15 movq 0x18(%rbp), %rax cmpq %r15, %rax jae 0x1c627 movq (%rbp), %rax movq %rbp, %rdi movq %r15, %rsi callq *(%rax) movq 0x18(%rbp), %rax cmpq %r15, %rax cmovbq %rax, %r15 subl %ebx, %r12d movq %r15, 0x10(%rbp) movl %r12d, %eax addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq leaq 0x3d3d(%rip), %rdi # 0x2038b leaq 0x3bc1(%rip), %rsi # 0x20216 leaq 0x4069(%rip), %rcx # 0x206c5 movl $0xa12, %edx # imm = 0xA12 callq 0x3150 leaq 0x3ba9(%rip), %rdi # 0x20216 leaq 0x3c98(%rip), %rdx # 0x2030c movl $0x9c0, %esi # imm = 0x9C0 callq 0xde89 leaq 0x3b00(%rip), %rdi # 0x20185 leaq 0x3b5e(%rip), %rdx # 0x201ea movl $0x146, %esi # imm = 0x146 callq 0xde89 leaq 0x3c7c(%rip), %rdi # 0x20319 leaq 0x3b72(%rip), %rsi # 0x20216 leaq 0x401a(%rip), %rcx # 0x206c5 movl $0xa0e, %edx # imm = 0xA0E callq 0x3150
/quesnel[P]baryonyx/external/fmt/include/fmt/format-inl.h
int fmt::v7::detail::format_float<double>(double, int, fmt::v7::detail::float_specs, fmt::v7::detail::buffer<char>&)
int format_float(T value, int precision, float_specs specs, buffer<char>& buf) { static_assert(!std::is_same<T, float>::value, ""); FMT_ASSERT(value >= 0, "value is negative"); const bool fixed = specs.format == float_format::fixed; if (value <= 0) { // <= instead of == to silence a warning. if (precision <= 0 || !fixed) { buf.push_back('0'); return 0; } buf.try_resize(to_unsigned(precision)); std::uninitialized_fill_n(buf.data(), precision, '0'); return -precision; } if (!specs.use_grisu) return snprintf_float(value, precision, specs, buf); if (precision < 0) { // Use Dragonbox for the shortest format. if (specs.binary32) { auto dec = dragonbox::to_decimal(static_cast<float>(value)); write<char>(buffer_appender<char>(buf), dec.significand); return dec.exponent; } auto dec = dragonbox::to_decimal(static_cast<double>(value)); write<char>(buffer_appender<char>(buf), dec.significand); return dec.exponent; } // Use Grisu + Dragon4 for the given precision: // https://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf. int exp = 0; const int min_exp = -60; // alpha in Grisu. int cached_exp10 = 0; // K in Grisu. fp normalized = normalize(fp(value)); const auto cached_pow = get_cached_power( min_exp - (normalized.e + fp::significand_size), cached_exp10); normalized = normalized * cached_pow; // Limit precision to the maximum possible number of significant digits in an // IEEE754 double because we don't need to generate zeros. const int max_double_digits = 767; if (precision > max_double_digits) precision = max_double_digits; fixed_handler handler{buf.data(), 0, precision, -cached_exp10, fixed}; if (grisu_gen_digits(normalized, 1, exp, handler) == digits::error) { exp += handler.size - cached_exp10 - 1; fallback_format(value, handler.precision, specs.binary32, buf, exp); } else { exp += handler.exp10; buf.try_resize(to_unsigned(handler.size)); } if (!fixed && !specs.showpoint) { // Remove trailing zeros. auto num_digits = buf.size(); while (num_digits > 0 && buf[num_digits - 1] == '0') { --num_digits; ++exp; } buf.try_resize(num_digits); } return exp; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x58, %rsp vxorpd %xmm1, %xmm1, %xmm1 vucomisd %xmm1, %xmm0 jb 0x1cc56 movabsq $0xff00000000, %rbp # imm = 0xFF00000000 movabsq $0x200000000, %rcx # imm = 0x200000000 movq %rdx, %r12 movl %edi, %r15d andq %rsi, %rbp cmpq %rcx, %rbp sete %al vucomisd %xmm0, %xmm1 jae 0x1c720 movq %rsi, %rdi shrq $0x20, %rdi btl $0x13, %edi jb 0x1c778 movl %r15d, %edi movq %r12, %rdx addq $0x58, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp jmp 0x1c360 cmpq %rcx, %rbp sete %al testl %r15d, %r15d setg %cl testb %al, %cl je 0x1c7b8 movq 0x18(%r12), %rax movl %r15d, %r14d cmpq %r14, %rax jae 0x1c752 movq (%r12), %rax movq %r12, %rdi movq %r14, %rsi callq *(%rax) movq 0x18(%r12), %rax cmpq %r14, %rax movl $0x30, %esi movq %r14, %rdx cmovaeq %r14, %rax movq %rax, 0x10(%r12) movq 0x8(%r12), %rdi callq 0x3110 negl %r15d jmp 0x1cc27 testl %r15d, %r15d js 0x1c7f0 movabsq $0x10000000000000, %rdx # imm = 0x10000000000000 vmovq %xmm0, %rcx movq %rdi, 0x38(%rsp) decq %rdx andq %rcx, %rdx shrq $0x34, %rcx jne 0x1c812 lzcntq %rdx, %rsi movl $0xfffffbd9, %ecx # imm = 0xFFFFFBD9 subl %esi, %ecx xorb $-0x40, %sil addb $0x35, %sil shlxq %rsi, %rdx, %rdx jmp 0x1c81d movq 0x10(%r12), %rax leaq 0x1(%rax), %rsi cmpq %rsi, 0x18(%r12) jae 0x1c7da movq (%r12), %rax movq %r12, %rdi callq *(%rax) movq 0x10(%r12), %rax leaq 0x1(%rax), %rsi movq 0x8(%r12), %rcx movq %rsi, 0x10(%r12) xorl %r15d, %r15d movb $0x30, (%rcx,%rax) jmp 0x1cc27 btl $0x12, %edi jb 0x1cc0d callq 0xfbfa movl %edx, %r15d movq %r12, %rdi movq %rax, %rsi callq 0x11eda jmp 0x1cc27 btsq $0x34, %rdx addl $0xfffffbcd, %ecx # imm = 0xFFFFFBCD movl $0xffffffce, %esi # imm = 0xFFFFFFCE movl $0xffffffff, %r9d # imm = 0xFFFFFFFF shlq $0xb, %rdx leaq 0x499d(%rip), %rdi # 0x211d0 leaq 0x4c56(%rip), %r8 # 0x21490 movl $0xffffffcb, %ebx # imm = 0xFFFFFFCB subl %ecx, %esi movslq %esi, %rsi imulq $0x4d104d42, %rsi, %rsi # imm = 0x4D104D42 addq %rsi, %r9 shrq $0x20, %r9 leal 0x15b(%r9), %esi addl $0x162, %r9d # imm = 0x162 testl %esi, %esi cmovnsl %esi, %r9d movl %r9d, %esi sarl $0x3, %esi andl $-0x8, %r9d movslq %esi, %rsi movq %r9, 0x40(%rsp) mulxq 0x8(%rdi,%rsi,8), %r14, %rdx movswl 0x2(%r8,%rsi,2), %r8d addl %ecx, %r8d movl $0x2ff, %ecx # imm = 0x2FF subl %r8d, %ebx shrq $0x3f, %r14 addq %rdx, %r14 movq 0x8(%r12), %rdx cmpl %ecx, %r15d cmovbl %r15d, %ecx shrxq %rbx, %r14, %r13 movq %rdx, 0x18(%rsp) movl $0x154, %edx # imm = 0x154 movl $0x0, 0x20(%rsp) movl %ecx, 0x24(%rsp) subl %r9d, %edx movl %edx, 0x28(%rsp) movb %al, 0x2c(%rsp) testl %r13d, %r13d je 0x1cc6e movq %r13, %rax shrq $0x20, %rax vmovsd %xmm0, 0x48(%rsp) jne 0x1cc86 movl %r13d, %eax orl $0x1, %eax leaq 0x40ae(%rip), %rcx # 0x209a0 leaq 0x14(%rsp), %r8 movq %r14, %rdx leaq 0x18(%rsp), %rdi lzcntl %eax, %eax xorl $0x1f, %eax movzwl (%rcx,%rax,2), %eax leaq 0x410f(%rip), %rcx # 0x20a20 cmpl (%rcx,%rax,4), %r13d leaq 0x4c24(%rip), %rcx # 0x21540 sbbq $0x0, %rax shlxq %rbx, -0x8(%rcx,%rax,8), %rsi movl %eax, (%r8) movabsq $-0x3333333333333333, %rax # imm = 0xCCCCCCCCCCCCCCCD movl $0xa, %ecx mulxq %rax, %rdx, %rdx shrq $0x3, %rdx callq 0x120bc testl %eax, %eax jne 0x1cb15 movl 0x14(%rsp), %r9d movl $0x1, %eax movq %r12, 0x30(%rsp) leaq 0x314b(%rip), %r12 # 0x1fab0 shlxq %rbx, %rax, %rax leaq -0x1(%rax), %r15 movq %rax, 0x50(%rsp) andq %r15, %r14 decl %r9d cmpl $0x9, %r9d ja 0x1cc9e movslq (%r12,%r9,4), %rax addq %r12, %rax jmpq *%rax movl %r13d, %eax xorl %r13d, %r13d jmp 0x1ca6e movl $0xd1b71759, %ecx # imm = 0xD1B71759 movl %r13d, %eax imulq %rcx, %rax shrq $0x2d, %rax imull $0x2710, %eax, %ecx # imm = 0x2710 jmp 0x1ca6b movl %r13d, %eax shrl $0x9, %eax imulq $0x44b83, %rax, %rax # imm = 0x44B83 shrq $0x27, %rax imull $0x3b9aca00, %eax, %ecx # imm = 0x3B9ACA00 jmp 0x1ca6b movl %r13d, %eax imulq $0x51eb851f, %rax, %rax # imm = 0x51EB851F shrq $0x25, %rax imull $0x64, %eax, %ecx jmp 0x1ca6b movl %r13d, %eax imulq $0x10624dd3, %rax, %rax # imm = 0x10624DD3 shrq $0x26, %rax imull $0x3e8, %eax, %ecx # imm = 0x3E8 jmp 0x1ca6b movl %r13d, %eax imulq $0x6b5fca6b, %rax, %rax # imm = 0x6B5FCA6B shrq $0x36, %rax imull $0x989680, %eax, %ecx # imm = 0x989680 jmp 0x1ca6b movl $0xcccccccd, %ecx # imm = 0xCCCCCCCD movl %r13d, %eax imulq %rcx, %rax shrq $0x23, %rax leal (%rax,%rax), %ecx leal (%rcx,%rcx,4), %ecx jmp 0x1ca6b movl %r13d, %eax shrl $0x5, %eax imulq $0xa7c5ac5, %rax, %rax # imm = 0xA7C5AC5 shrq $0x27, %rax imull $0x186a0, %eax, %ecx # imm = 0x186A0 jmp 0x1ca6b movl %r13d, %eax imulq $0x431bde83, %rax, %rax # imm = 0x431BDE83 shrq $0x32, %rax imull $0xf4240, %eax, %ecx # imm = 0xF4240 jmp 0x1ca6b movl %r13d, %eax imulq $0x55e63b89, %rax, %rax # imm = 0x55E63B89 shrq $0x39, %rax imull $0x5f5e100, %eax, %ecx # imm = 0x5F5E100 subl %ecx, %r13d leaq 0x4acb(%rip), %rdx # 0x21540 movl %r13d, %ecx shlxq %rbx, %rcx, %rcx addb $0x30, %al movl $0x1, %r8d leaq 0x18(%rsp), %rdi movl %r9d, 0x14(%rsp) shlxq %rbx, (%rdx,%r9,8), %rdx addq %r14, %rcx movsbl %al, %esi movl $0x1, (%rsp) callq 0x12184 testl %eax, %eax jne 0x1cb10 movl 0x14(%rsp), %r9d testl %r9d, %r9d jg 0x1c976 movl $0x1, %r12d leaq 0x18(%rsp), %r13 movq %r15, %rcx movl 0x14(%rsp), %r9d addq %r14, %r14 addq %r12, %r12 movq %r13, %rdi leaq (%r14,%r14,4), %r14 leaq (%r12,%r12,4), %r12 shrxq %rbx, %r14, %rax andq %rcx, %r14 movq %r12, %r8 addb $0x30, %al movq %r14, %rcx decl %r9d movsbl %al, %esi movl %r9d, 0x14(%rsp) movl $0x0, (%rsp) movq 0x50(%rsp), %rdx callq 0x12184 movq %r15, %rcx testl %eax, %eax je 0x1cac7 movq 0x30(%rsp), %r12 vmovsd 0x48(%rsp), %xmm0 movl 0x14(%rsp), %ecx cmpl $0x2, %eax jne 0x1cb67 movq 0x40(%rsp), %rdx movl 0x20(%rsp), %eax movq 0x38(%rsp), %rbx movl 0x24(%rsp), %edi addl $0x8, %edx subl %edx, %eax movl $0x112, %edx # imm = 0x112 leal 0x15b(%rax,%rcx), %eax leaq 0x14(%rsp), %rcx bextrl %edx, %ebx, %esi movq %r12, %rdx movl %eax, (%rcx) callq 0x1ccce movabsq $0x200000000, %rax # imm = 0x200000000 jmp 0x1cbb3 addl 0x28(%rsp), %ecx movslq 0x20(%rsp), %r14 movl %ecx, 0x14(%rsp) testq %r14, %r14 js 0x1ccb6 movq 0x18(%r12), %rax cmpq %r14, %rax jae 0x1cb98 movq (%r12), %rax movq %r12, %rdi movq %r14, %rsi callq *(%rax) movq 0x18(%r12), %rax movq 0x38(%rsp), %rbx cmpq %r14, %rax cmovbq %rax, %r14 movabsq $0x200000000, %rax # imm = 0x200000000 movq %r14, 0x10(%r12) cmpq %rax, %rbp movl $0x114, %ecx # imm = 0x114 sete %al bextrl %ecx, %ebx, %ecx orb %al, %cl jne 0x1cc06 movq 0x10(%r12), %r14 testq %r14, %r14 je 0x1cbf2 movl 0x14(%rsp), %eax movq 0x8(%r12), %rcx incl %eax cmpb $0x30, -0x1(%rcx,%r14) jne 0x1cc39 movl %eax, 0x14(%rsp) decq %r14 incl %eax testq %r14, %r14 jne 0x1cbdc movq 0x18(%r12), %rax xorl %r14d, %r14d cmpq %r14, %rax cmovbq %rax, %r14 movq %r14, 0x10(%r12) movl 0x14(%rsp), %r15d jmp 0x1cc27 vcvtsd2ss %xmm0, %xmm0, %xmm0 callq 0xe874 movq %rax, %r15 shrq $0x20, %r15 movq %r12, %rdi movl %eax, %esi callq 0x11d86 movl %r15d, %eax addq $0x58, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq 0x18(%r12), %rax cmpq %r14, %rax jae 0x1cbfa movq (%r12), %rax movq %r12, %rdi movq %r14, %rsi callq *(%rax) movq 0x18(%r12), %rax jmp 0x1cbfa leaq 0x35b9(%rip), %rdi # 0x20216 leaq 0x3734(%rip), %rdx # 0x20398 movl $0x980, %esi # imm = 0x980 callq 0xde89 leaq 0x35a1(%rip), %rdi # 0x20216 leaq 0x3c81(%rip), %rdx # 0x208fd movl $0x5ec, %esi # imm = 0x5EC callq 0xde89 leaq 0x3589(%rip), %rdi # 0x20216 leaq 0x3c69(%rip), %rdx # 0x208fd movl $0x5ed, %esi # imm = 0x5ED callq 0xde89 leaq 0x3571(%rip), %rdi # 0x20216 leaq 0x36fe(%rip), %rdx # 0x203aa movl $0x61f, %esi # imm = 0x61F callq 0xde89 leaq 0x34c8(%rip), %rdi # 0x20185 leaq 0x3526(%rip), %rdx # 0x201ea movl $0x146, %esi # imm = 0x146 callq 0xde89
/quesnel[P]baryonyx/external/fmt/include/fmt/format-inl.h
void fmt::v7::detail::fallback_format<double>(double, int, bool, fmt::v7::detail::buffer<char>&, int&)
void fallback_format(Double d, int num_digits, bool binary32, buffer<char>& buf, int& exp10) { bigint numerator; // 2 * R in (FPP)^2. bigint denominator; // 2 * S in (FPP)^2. // lower and upper are differences between value and corresponding boundaries. bigint lower; // (M^- in (FPP)^2). bigint upper_store; // upper's value if different from lower. bigint* upper = nullptr; // (M^+ in (FPP)^2). fp value; // Shift numerator and denominator by an extra bit or two (if lower boundary // is closer) to make lower and upper integers. This eliminates multiplication // by 2 during later computations. const bool is_predecessor_closer = binary32 ? value.assign(static_cast<float>(d)) : value.assign(d); int shift = is_predecessor_closer ? 2 : 1; uint64_t significand = value.f << shift; if (value.e >= 0) { numerator.assign(significand); numerator <<= value.e; lower.assign(1); lower <<= value.e; if (shift != 1) { upper_store.assign(1); upper_store <<= value.e + 1; upper = &upper_store; } denominator.assign_pow10(exp10); denominator <<= shift; } else if (exp10 < 0) { numerator.assign_pow10(-exp10); lower.assign(numerator); if (shift != 1) { upper_store.assign(numerator); upper_store <<= 1; upper = &upper_store; } numerator *= significand; denominator.assign(1); denominator <<= shift - value.e; } else { numerator.assign(significand); denominator.assign_pow10(exp10); denominator <<= shift - value.e; lower.assign(1); if (shift != 1) { upper_store.assign(1ULL << 1); upper = &upper_store; } } // Invariant: value == (numerator / denominator) * pow(10, exp10). if (num_digits < 0) { // Generate the shortest representation. if (!upper) upper = &lower; bool even = (value.f & 1) == 0; num_digits = 0; char* data = buf.data(); for (;;) { int digit = numerator.divmod_assign(denominator); bool low = compare(numerator, lower) - even < 0; // numerator <[=] lower. // numerator + upper >[=] pow10: bool high = add_compare(numerator, *upper, denominator) + even > 0; data[num_digits++] = static_cast<char>('0' + digit); if (low || high) { if (!low) { ++data[num_digits - 1]; } else if (high) { int result = add_compare(numerator, numerator, denominator); // Round half to even. if (result > 0 || (result == 0 && (digit % 2) != 0)) ++data[num_digits - 1]; } buf.try_resize(to_unsigned(num_digits)); exp10 -= num_digits - 1; return; } numerator *= 10; lower *= 10; if (upper != &lower) *upper *= 10; } } // Generate the given number of digits. exp10 -= num_digits - 1; if (num_digits == 0) { buf.try_resize(1); denominator *= 10; buf[0] = add_compare(numerator, numerator, denominator) > 0 ? '1' : '0'; return; } buf.try_resize(to_unsigned(num_digits)); for (int i = 0; i < num_digits - 1; ++i) { int digit = numerator.divmod_assign(denominator); buf[i] = static_cast<char>('0' + digit); numerator *= 10; } int digit = numerator.divmod_assign(denominator); auto result = add_compare(numerator, numerator, denominator); if (result > 0 || (result == 0 && (digit % 2) != 0)) { if (digit == 9) { const auto overflow = '0' + 10; buf[num_digits - 1] = overflow; // Propagate the carry. for (int i = num_digits - 1; i > 0 && buf[i] == overflow; --i) { buf[i] = '0'; ++buf[i - 1]; } if (buf[0] == overflow) { buf[0] = '1'; ++exp10; } return; } ++digit; } buf[num_digits - 1] = static_cast<char>('0' + digit); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x2f8, %rsp # imm = 0x2F8 movq %rcx, 0x18(%rsp) movl %edi, %r15d leaq 0xe8(%rsp), %rcx movl $0x20, %edi movq %rdx, 0x8(%rsp) leaq 0xd018(%rip), %rdx # 0x29d18 leaq 0x38(%rsp), %r9 xorl %eax, %eax leaq 0x198(%rsp), %r10 leaq (%rcx,%rdi), %r8 movq %rax, 0x10(%rcx) movq %rdx, (%rcx) movq %rax, 0x10(%r9) movq %rdx, (%r9) movq %rax, 0x10(%r10) movq %rdx, (%r10) movq %r8, 0x8(%rcx) leaq (%r9,%rdi), %r8 movq %rdi, 0x18(%rcx) movl %eax, 0xa8(%rcx) leaq (%r10,%rdi), %rcx movq %r8, 0x8(%r9) movq %rdi, 0x18(%r9) movl %eax, 0xa8(%r9) leaq 0x248(%rsp), %r9 movq %rcx, 0x8(%r10) movq %rdi, 0x18(%r10) movl %eax, 0xa8(%r10) leaq (%r9,%rdi), %rcx movq %rax, 0x10(%r9) movq %rdx, (%r9) movq %r9, 0x10(%rsp) movq %rcx, 0x8(%r9) movq %rdi, 0x18(%r9) movl %eax, 0xa8(%r9) testl %esi, %esi je 0x1cdcb vcvtsd2ss %xmm0, %xmm0, %xmm0 movl $0xffffff6b, %ebp # imm = 0xFFFFFF6B vmovd %xmm0, %eax movl %eax, %r13d andl $0x7fffff, %r13d # imm = 0x7FFFFF sete %cl cmpl $0x1000000, %eax # imm = 0x1000000 setae %r12b andb %cl, %r12b cmpl $0x800000, %eax # imm = 0x800000 jb 0x1ce9d shrl $0x17, %eax orq $0x800000, %r13 # imm = 0x800000 addl $0xffffff6a, %eax # imm = 0xFFFFFF6A movl %eax, %ebp jmp 0x1ce0e vmovq %xmm0, %rbp movb $0x34, %al bzhiq %rax, %rbp, %r13 shrq $0x34, %rbp andl $0x7ff, %ebp # imm = 0x7FF testq %r13, %r13 sete %al cmpl $0x2, %ebp setae %r12b andb %al, %r12b testl %ebp, %ebp je 0x1ce98 movabsq $0xfffffffffffff, %rax # imm = 0xFFFFFFFFFFFFF addl $0xfffffbcd, %ebp # imm = 0xFFFFFBCD leaq 0x1(%r13,%rax), %r13 movzbl %r12b, %r14d incl %r14d shlxq %r14, %r13, %rbx testl %ebp, %ebp js 0x1cea9 leaq 0xe8(%rsp), %rdi movq %rbx, %rsi callq 0x122f6 leaq 0xe8(%rsp), %rdi movl %ebp, %esi callq 0x1234a leaq 0x198(%rsp), %rdi movl $0x1, %esi callq 0x122f6 leaq 0x198(%rsp), %rdi movl %ebp, %esi callq 0x1234a testb %r12b, %r12b je 0x1cf75 leaq 0x248(%rsp), %rdi movl $0x1, %esi movq %rdi, 0x10(%rsp) callq 0x122f6 incl %ebp leaq 0x248(%rsp), %rdi movl %ebp, %esi callq 0x1234a jmp 0x1cf7e movl $0xfffffbce, %ebp # imm = 0xFFFFFBCE movzbl %r12b, %r14d incl %r14d shlxq %r14, %r13, %rbx movq 0x18(%rsp), %rax movl (%rax), %esi testl %esi, %esi js 0x1cf13 leaq 0xe8(%rsp), %rdi movq %rbx, %rsi callq 0x122f6 movq 0x18(%rsp), %rax movl (%rax), %esi leaq 0x38(%rsp), %rdi callq 0x123ec subl %ebp, %r14d leaq 0x38(%rsp), %rdi movl %r14d, %esi callq 0x1234a leaq 0x198(%rsp), %rdi movl $0x1, %esi callq 0x122f6 testb %r12b, %r12b je 0x1cf6a movl $0x2, %esi leaq 0x248(%rsp), %rdi callq 0x122f6 jmp 0x1cfd6 negl %esi leaq 0xe8(%rsp), %rdi callq 0x123ec leaq 0x198(%rsp), %rdi leaq 0xe8(%rsp), %rsi callq 0x1248a testb %r12b, %r12b je 0x1cf9e leaq 0x248(%rsp), %rdi leaq 0xe8(%rsp), %rsi movq %rdi, 0x10(%rsp) callq 0x1248a leaq 0x248(%rsp), %rdi movl $0x1, %esi callq 0x1234a jmp 0x1cfa7 movq $0x0, 0x10(%rsp) jmp 0x1cfd6 movq $0x0, 0x10(%rsp) movq 0x18(%rsp), %rax movl (%rax), %esi leaq 0x38(%rsp), %rdi callq 0x123ec leaq 0x38(%rsp), %rdi movl %r14d, %esi callq 0x1234a jmp 0x1cfd6 movq $0x0, 0x10(%rsp) leaq 0xe8(%rsp), %rdi movq %rbx, %rsi callq 0x124ec leaq 0x38(%rsp), %rdi movl $0x1, %esi callq 0x122f6 subl %ebp, %r14d leaq 0x38(%rsp), %rdi movl %r14d, %esi callq 0x1234a testl %r15d, %r15d js 0x1d05f movq 0x18(%rsp), %rcx leal -0x1(%r15), %eax movl %r15d, %ebp subl %eax, (%rcx) subl $0x1, %ebp jae 0x1d17f movq 0x8(%rsp), %rdi movl $0x1, %eax cmpq $0x0, 0x18(%rdi) jne 0x1d020 movq (%rdi), %rax movl $0x1, %esi callq *(%rax) movq 0x8(%rsp), %rdi xorl %eax, %eax cmpq $0x0, 0x18(%rdi) setne %al movq %rax, 0x10(%rdi) leaq 0x38(%rsp), %rdi movl $0xa, %esi callq 0x13128 leaq 0xe8(%rsp), %rdi leaq 0x38(%rsp), %rdx movq %rdi, %rsi callq 0x1263b movq 0x8(%rsp), %rcx testl %eax, %eax setg %al orb $0x30, %al movq 0x8(%rcx), %rcx movb %al, (%rcx) jmp 0x1d2e5 movq 0x10(%rsp), %rax movq 0x8(%rsp), %rdx leaq 0x198(%rsp), %rsi notl %r13d leaq 0xe8(%rsp), %rbx leaq 0x38(%rsp), %r15 testq %rax, %rax movq %rax, %rcx movq 0x8(%rdx), %rax cmoveq %rsi, %rcx andl $0x1, %r13d movq %rcx, 0x30(%rsp) movl %r13d, %ecx negl %ecx movq %r13, %r14 xorl %r13d, %r13d movl %ecx, 0x24(%rsp) movq %rax, 0x28(%rsp) movq %rbx, %rdi movq %r15, %rsi callq 0x12518 leaq 0x198(%rsp), %rsi movq %rbx, %rdi movl %eax, %ebp callq 0x125a3 movq 0x30(%rsp), %rsi movl %eax, %r12d movq %rbx, %rdi movq %r15, %rdx callq 0x1263b movq 0x28(%rsp), %rdx leal 0x30(%rbp), %ecx movb %cl, (%rdx,%r13) cmpl %r14d, %r12d jl 0x1d131 cmpl 0x24(%rsp), %eax jg 0x1d131 movl $0xa, %esi movq %rbx, %rdi callq 0x13128 leaq 0x198(%rsp), %rdi movl $0xa, %esi callq 0x13128 cmpq $0x0, 0x10(%rsp) je 0x1d129 movq 0x10(%rsp), %rdi movl $0xa, %esi callq 0x13128 incq %r13 jmp 0x1d0ac cmpl %r14d, %r12d jge 0x1d28d cmpl 0x24(%rsp), %eax jle 0x1d299 leaq 0xe8(%rsp), %rdi leaq 0x38(%rsp), %rdx movq %rdi, %rsi callq 0x1263b testl %eax, %eax jg 0x1d16f setne %al testb $0x1, %bpl sete %cl orb %al, %cl jne 0x1d299 movq 0x28(%rsp), %rax movb (%rax,%r13), %al incb %al jmp 0x1d290 movq 0x8(%rsp), %rdi movl %r15d, %ebx movq 0x18(%rdi), %rax cmpq %rbx, %rax jae 0x1d1a1 movq (%rdi), %rax movq %rbx, %rsi callq *(%rax) movq 0x8(%rsp), %rdi movq 0x18(%rdi), %rax cmpq %rbx, %rax cmovbq %rax, %rbx movq %rbx, 0x10(%rdi) cmpl $0x1, %r15d je 0x1d1f4 leaq 0xe8(%rsp), %rbx leaq 0x38(%rsp), %r14 movl %ebp, %r12d xorl %r13d, %r13d movq %rbx, %rdi movq %r14, %rsi callq 0x12518 movq 0x8(%rsp), %rcx addb $0x30, %al movq 0x8(%rcx), %rcx movb %al, (%rcx,%r13) movl $0xa, %esi movq %rbx, %rdi callq 0x13128 incq %r13 cmpq %r13, %r12 jne 0x1d1c5 leaq 0xe8(%rsp), %rdi leaq 0x38(%rsp), %rsi callq 0x12518 leaq 0xe8(%rsp), %rdi leaq 0x38(%rsp), %rdx movl %eax, %ebx movq %rdi, %rsi callq 0x1263b testl %eax, %eax jg 0x1d232 setne %al testb $0x1, %bl sete %cl orb %al, %cl jne 0x1d2d3 cmpl $0x9, %ebx jne 0x1d2d1 movq 0x8(%rsp), %rdx movslq %ebp, %rcx movq 0x8(%rdx), %rax movb $0x3a, (%rax,%rcx) cmpl $0x1, %r15d je 0x1d278 leal -0x2(%r15), %eax movq 0x8(%rdx), %rcx decl %r15d cmpb $0x3a, (%rcx,%r15) jne 0x1d278 movb $0x30, (%rcx,%r15) movq 0x8(%rdx), %rcx incb (%rcx,%rax) decq %rax cmpl $0x1, %r15d jg 0x1d255 movq 0x8(%rdx), %rax cmpb $0x3a, (%rax) jne 0x1d2e5 movq 0x18(%rsp), %rcx movb $0x31, (%rax) incl (%rcx) jmp 0x1d2e5 leal 0x31(%rbp), %eax movq 0x28(%rsp), %rcx movb %al, (%rcx,%r13) movq 0x8(%rsp), %r14 leaq 0x1(%r13), %rax movl %eax, %ebx movq 0x18(%r14), %rax cmpq %rbx, %rax jae 0x1d2bc movq (%r14), %rax movq %r14, %rdi movq %rbx, %rsi callq *(%rax) movq 0x18(%r14), %rax movq 0x18(%rsp), %rcx cmpq %rbx, %rax cmovbq %rax, %rbx movq %rbx, 0x10(%r14) subl %r13d, (%rcx) jmp 0x1d2e5 incl %ebx movq 0x8(%rsp), %rax addb $0x30, %bl movslq %ebp, %rcx movq 0x8(%rax), %rax movb %bl, (%rax,%rcx) leaq 0x248(%rsp), %rdi callq 0x127ce leaq 0x198(%rsp), %rdi callq 0x127ce leaq 0x38(%rsp), %rdi callq 0x127ce leaq 0xe8(%rsp), %rdi callq 0x127ce addq $0x2f8, %rsp # imm = 0x2F8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq jmp 0x1d338 jmp 0x1d338 jmp 0x1d338 jmp 0x1d338 jmp 0x1d338 jmp 0x1d338 jmp 0x1d338 jmp 0x1d338 leaq 0x248(%rsp), %rdi movq %rax, %rbx callq 0x127ce leaq 0x198(%rsp), %rdi callq 0x127ce leaq 0x38(%rsp), %rdi callq 0x127ce leaq 0xe8(%rsp), %rdi callq 0x127ce movq %rbx, %rdi callq 0x3390
/quesnel[P]baryonyx/external/fmt/include/fmt/format-inl.h
void boost::ext::ut::v1_1_8::reporter<boost::ext::ut::v1_1_8::printer>::on<boost::ext::ut::v1_1_8::detail::eq_<std::basic_string_view<char, std::char_traits<char>>, std::basic_string_view<char, std::char_traits<char>>>>(boost::ext::ut::v1_1_8::events::assertion_fail<boost::ext::ut::v1_1_8::detail::eq_<std::basic_string_view<char, std::char_traits<char>>, std::basic_string_view<char, std::char_traits<char>>>>)
auto on(events::assertion_fail<TExpr> assertion) -> void { constexpr auto short_name = [](std::string_view name) { return name.rfind('/') != std::string_view::npos ? name.substr(name.rfind('/') + 1) : name; }; printer_ << "\n " << short_name(assertion.location.file_name()) << ':' << assertion.location.line() << ':' << printer_.colors().fail << "FAILED" << printer_.colors().none << " [" << std::boolalpha << assertion.expr << printer_.colors().none << ']'; ++asserts_.fail; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax leaq 0x70(%rdi), %r15 leaq 0x22fd(%rip), %rsi # 0x1fc55 movl $0x3, %edx movq %rdi, %rbx leaq 0x40(%rsp), %r14 movq %r15, %rdi callq 0x3270 movq 0x28(%r14), %r13 movq %r13, %rdi callq 0x30c0 testq %rax, %rax je 0x1d9bc movq %rax, %rdx movq %rax, %rcx subq $0x1, %rcx jb 0x1d9be cmpb $0x2f, -0x1(%r13,%rdx) movq %rcx, %rdx jne 0x1d984 addq %rax, %r13 negq %rax xorl %edx, %edx movq %rdx, %rcx cmpq %rdx, %rax je 0x1d9b1 cmpb $0x2f, -0x1(%r13,%rcx) leaq -0x1(%rcx), %rdx jne 0x1d99d addq %rcx, %r13 negq %rcx movq %rcx, %rax jmp 0x1d9be xorl %eax, %eax leaq 0x30(%rbx), %r12 movq %r15, %rdi movq %r13, %rsi movq %rax, %rdx callq 0x3270 movb $0x3a, %bpl leaq 0x5(%rsp), %rsi movl $0x1, %edx movq %r15, %rdi movb %bpl, (%rsi) callq 0x3270 movl 0x30(%r14), %esi movq %r15, %rdi callq 0x3380 leaq 0x6(%rsp), %rsi movl $0x1, %edx movq %r15, %rdi movb %bpl, (%rsi) callq 0x3270 movq 0x50(%rbx), %rdx movq 0x58(%rbx), %rsi movq %r15, %rdi callq 0x3270 leaq 0x2239(%rip), %rsi # 0x1fc59 movl $0x6, %edx movq %r15, %rdi callq 0x3270 movq 0x30(%rbx), %rdx movq 0x38(%rbx), %rsi movq %r15, %rdi callq 0x3270 leaq 0x221c(%rip), %rsi # 0x1fc60 movl $0x2, %edx movq %r15, %rdi callq 0x3270 movq 0x70(%rbx), %rax movq %r12, %rdi movq %r14, %rsi movq -0x18(%rax), %rax orl $0x1, 0x88(%rbx,%rax) callq 0x1daa8 movq 0x30(%rbx), %rdx movq 0x38(%rbx), %rsi leaq 0x40(%rax), %r14 movq %r14, %rdi callq 0x3270 leaq 0x7(%rsp), %rsi movl $0x1, %edx movq %r14, %rdi movb $0x5d, (%rsi) callq 0x3270 incq 0x20(%rbx) addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/quesnel[P]baryonyx/external/ut/include/boost/ut.hpp
nlohmann::json_abi_v3_11_2::detail::lexer<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>, nlohmann::json_abi_v3_11_2::detail::iterator_input_adapter<char const*>>::get_codepoint()
int get_codepoint() { // this function only makes sense after reading `\u` JSON_ASSERT(current == 'u'); int codepoint = 0; const auto factors = { 12u, 8u, 4u, 0u }; for (const auto factor : factors) { get(); if (current >= '0' && current <= '9') { codepoint += static_cast<int>((static_cast<unsigned int>(current) - 0x30u) << factor); } else if (current >= 'A' && current <= 'F') { codepoint += static_cast<int>((static_cast<unsigned int>(current) - 0x37u) << factor); } else if (current >= 'a' && current <= 'f') { codepoint += static_cast<int>((static_cast<unsigned int>(current) - 0x57u) << factor); } else { return -1; } } JSON_ASSERT(0x0000 <= codepoint && codepoint <= 0xFFFF); return codepoint; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movq %rdi, %rbx movaps 0x3751a(%rip), %xmm0 # 0x3f0c0 movaps %xmm0, (%rsp) xorl %r14d, %r14d pushq $-0x30 popq %r12 pushq $-0x37 popq %r13 xorl %r15d, %r15d movl (%rsp,%r15), %ebp movq %rbx, %rdi callq 0x7ae8 movl 0x14(%rbx), %eax leal -0x30(%rax), %edx movl %r12d, %ecx cmpl $0xa, %edx jb 0x7be8 leal -0x41(%rax), %edx movl %r13d, %ecx cmpl $0x6, %edx jb 0x7be8 leal -0x61(%rax), %edx pushq $-0x57 popq %rcx cmpl $0x5, %edx ja 0x7c06 addl %ecx, %eax movl %ebp, %ecx shll %cl, %eax addl %r14d, %eax movb $0x1, %cl movl %eax, %r14d testb %cl, %cl je 0x7c0a addq $0x4, %r15 cmpq $0x10, %r15 jne 0x7bb8 jmp 0x7c10 xorl %ecx, %ecx jmp 0x7bf6 pushq $-0x1 popq %rax movl %eax, %r14d movl %r14d, %eax addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/input/lexer.hpp
nlohmann::json_abi_v3_11_2::detail::parser<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>, nlohmann::json_abi_v3_11_2::detail::iterator_input_adapter<char const*>>::parser(nlohmann::json_abi_v3_11_2::detail::iterator_input_adapter<char const*>&&, std::function<bool (int, nlohmann::json_abi_v3_11_2::detail::parse_event_t, nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>&)>, bool, bool)
explicit parser(InputAdapterType&& adapter, const parser_callback_t<BasicJsonType> cb = nullptr, const bool allow_exceptions_ = true, const bool skip_comments = false) : callback(cb) , m_lexer(std::move(adapter), skip_comments) , allow_exceptions(allow_exceptions_) { // read first token get_token(); }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx movl %r8d, %ebp movl %ecx, %r14d movq %rsi, %r12 movq %rdi, %rbx movq %rdx, %rsi callq 0x92be movl $0x0, 0x20(%rbx) leaq 0x28(%rbx), %r15 movq %r15, %rdi movq %r12, %rsi movl %ebp, %edx callq 0x9d70 movb %r14b, 0xc0(%rbx) movq %r15, %rdi callq 0x9e10 movl %eax, 0x20(%rbx) popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq movq %rax, %r14 movq %r15, %rdi callq 0x9ddc movq 0x10(%rbx), %rax testq %rax, %rax je 0x9d60 movq %rbx, %rdi movq %rbx, %rsi movl $0x3, %edx callq *%rax movq %r14, %rdi callq 0x75d0 movq %rax, %rdi callq 0x9104
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/input/parser.hpp
nlohmann::json_abi_v3_11_2::detail::lexer<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>, nlohmann::json_abi_v3_11_2::detail::iterator_input_adapter<char const*>>::scan_number()
token_type scan_number() // lgtm [cpp/use-of-goto] { // reset token_buffer to store the number's bytes reset(); // the type of the parsed number; initially set to unsigned; will be // changed if minus sign, decimal point or exponent is read token_type number_type = token_type::value_unsigned; // state (init): we just found out we need to scan a number switch (current) { case '-': { add(current); goto scan_number_minus; } case '0': { add(current); goto scan_number_zero; } case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { add(current); goto scan_number_any1; } // all other characters are rejected outside scan_number() default: // LCOV_EXCL_LINE JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } scan_number_minus: // state: we just parsed a leading minus sign number_type = token_type::value_integer; switch (get()) { case '0': { add(current); goto scan_number_zero; } case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { add(current); goto scan_number_any1; } default: { error_message = "invalid number; expected digit after '-'"; return token_type::parse_error; } } scan_number_zero: // state: we just parse a zero (maybe with a leading minus sign) switch (get()) { case '.': { add(decimal_point_char); goto scan_number_decimal1; } case 'e': case 'E': { add(current); goto scan_number_exponent; } default: goto scan_number_done; } scan_number_any1: // state: we just parsed a number 0-9 (maybe with a leading minus sign) switch (get()) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { add(current); goto scan_number_any1; } case '.': { add(decimal_point_char); goto scan_number_decimal1; } case 'e': case 'E': { add(current); goto scan_number_exponent; } default: goto scan_number_done; } scan_number_decimal1: // state: we just parsed a decimal point number_type = token_type::value_float; switch (get()) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { add(current); goto scan_number_decimal2; } default: { error_message = "invalid number; expected digit after '.'"; return token_type::parse_error; } } scan_number_decimal2: // we just parsed at least one number after a decimal point switch (get()) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { add(current); goto scan_number_decimal2; } case 'e': case 'E': { add(current); goto scan_number_exponent; } default: goto scan_number_done; } scan_number_exponent: // we just parsed an exponent number_type = token_type::value_float; switch (get()) { case '+': case '-': { add(current); goto scan_number_sign; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { add(current); goto scan_number_any2; } default: { error_message = "invalid number; expected '+', '-', or digit after exponent"; return token_type::parse_error; } } scan_number_sign: // we just parsed an exponent sign switch (get()) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { add(current); goto scan_number_any2; } default: { error_message = "invalid number; expected digit after exponent sign"; return token_type::parse_error; } } scan_number_any2: // we just parsed a number after the exponent or exponent sign switch (get()) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { add(current); goto scan_number_any2; } default: goto scan_number_done; } scan_number_done: // unget the character after the number (we only read it to know that // we are done scanning a number) unget(); char* endptr = nullptr; // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg) errno = 0; // try to parse integers first and fall back to floats if (number_type == token_type::value_unsigned) { const auto x = std::strtoull(token_buffer.data(), &endptr, 10); // we checked the number format before JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size()); if (errno == 0) { value_unsigned = static_cast<number_unsigned_t>(x); if (value_unsigned == x) { return token_type::value_unsigned; } } } else if (number_type == token_type::value_integer) { const auto x = std::strtoll(token_buffer.data(), &endptr, 10); // we checked the number format before JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size()); if (errno == 0) { value_integer = static_cast<number_integer_t>(x); if (value_integer == x) { return token_type::value_integer; } } } // this code is reached if we parse a floating-point number or if an // integer conversion above failed strtof(value_float, token_buffer.data(), &endptr); // we checked the number format before JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size()); return token_type::value_float; }
pushq %rbp pushq %r14 pushq %rbx subq $0x10, %rsp movq %rdi, %rbx callq 0xa4a4 movl 0x14(%rbx), %eax movl $0x5, %ebp leal -0x31(%rax), %ecx cmpl $0x9, %ecx jae 0xa168 leaq 0x50(%rbx), %r14 movsbl %al, %esi movq %r14, %rdi callq 0x72e0 movq %rbx, %rdi callq 0x7ae8 leal -0x30(%rax), %ecx cmpl $0xa, %ecx jae 0xa117 movsbl 0x14(%rbx), %esi jmp 0xa0f9 cmpl $0x2e, %eax je 0xa22e cmpl $0x45, %eax je 0xa12e cmpl $0x65, %eax jne 0xa299 movsbl 0x14(%rbx), %esi movq %r14, %rdi callq 0x72e0 movq %rbx, %rdi callq 0x7ae8 leal -0x30(%rax), %ecx cmpl $0xa, %ecx jb 0xa1ad cmpl $0x2d, %eax je 0xa33e cmpl $0x2b, %eax je 0xa33e leaq 0x36108(%rip), %rax # 0x4026b jmp 0xa258 cmpl $0x30, %eax je 0xa1fb cmpl $0x2d, %eax jne 0xa184 leaq 0x50(%rbx), %rdi movl $0x2d, %esi callq 0x72e0 movq %rbx, %rdi callq 0x7ae8 leal -0x31(%rax), %ecx cmpl $0x9, %ecx jb 0xa326 cmpl $0x30, %eax je 0xa36e leaq 0x36071(%rip), %rax # 0x40219 jmp 0xa258 leaq 0x50(%rbx), %r14 movsbl 0x14(%rbx), %esi movq %r14, %rdi callq 0x72e0 movq %rbx, %rdi callq 0x7ae8 addl $-0x30, %eax movl $0x7, %ebp cmpl $0x9, %eax ja 0xa299 leaq 0x50(%rbx), %r14 movsbl 0x14(%rbx), %esi movq %r14, %rdi callq 0x72e0 movq %rbx, %rdi callq 0x7ae8 addl $-0x30, %eax cmpl $0xa, %eax jb 0xa1da jmp 0xa299 leaq 0x50(%rbx), %rdi movl $0x30, %esi callq 0x72e0 movl $0x5, %ebp movq %rbx, %rdi callq 0x7ae8 cmpl $0x2e, %eax je 0xa22e cmpl $0x65, %eax je 0xa225 cmpl $0x45, %eax jne 0xa299 leaq 0x50(%rbx), %r14 jmp 0xa12e movsbl 0x90(%rbx), %esi leaq 0x50(%rbx), %r14 movq %r14, %rdi callq 0x72e0 movq %rbx, %rdi callq 0x7ae8 addl $-0x30, %eax cmpl $0x9, %eax jbe 0xa266 leaq 0x35fea(%rip), %rax # 0x40242 movq %rax, 0x70(%rbx) movl $0xe, %eax jmp 0xa31d movsbl 0x14(%rbx), %esi movq %r14, %rdi callq 0x72e0 movq %rbx, %rdi callq 0x7ae8 leal -0x30(%rax), %ecx cmpl $0xa, %ecx jb 0xa266 cmpl $0x65, %eax je 0xa12e cmpl $0x45, %eax je 0xa12e movl $0x7, %ebp movq %rbx, %rdi callq 0x7b58 movq $0x0, 0x8(%rsp) callq 0x7060 movq %rax, %r14 movl $0x0, (%rax) cmpl $0x6, %ebp je 0xa2e9 cmpl $0x5, %ebp jne 0xa302 movq 0x50(%rbx), %rdi leaq 0x8(%rsp), %rsi movl $0xa, %edx callq 0x7480 cmpl $0x0, (%r14) jne 0xa302 movq %rax, 0x80(%rbx) movl $0x5, %eax jmp 0xa31d movq 0x50(%rbx), %rdi leaq 0x8(%rsp), %rsi movl $0xa, %edx callq 0x7080 cmpl $0x0, (%r14) je 0xa333 movq 0x50(%rbx), %rdi leaq 0x8(%rsp), %rsi callq 0x72a0 movsd %xmm0, 0x88(%rbx) movl $0x7, %eax addq $0x10, %rsp popq %rbx popq %r14 popq %rbp retq movl 0x14(%rbx), %eax movl $0x6, %ebp jmp 0xa0f2 movq %rax, 0x78(%rbx) movl $0x6, %eax jmp 0xa31d movsbl 0x14(%rbx), %esi leaq 0x50(%rbx), %r14 movq %r14, %rdi callq 0x72e0 movq %rbx, %rdi callq 0x7ae8 addl $-0x30, %eax cmpl $0xa, %eax jb 0xa1b1 leaq 0x35f3d(%rip), %rax # 0x402a6 jmp 0xa258 movsbl 0x14(%rbx), %esi leaq 0x50(%rbx), %rdi callq 0x72e0 movl $0x6, %ebp jmp 0xa20e nop
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/input/lexer.hpp
nlohmann::json_abi_v3_11_2::detail::json_sax_dom_callback_parser<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>>::end_object()
bool end_object() { if (ref_stack.back()) { if (!callback(static_cast<int>(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back())) { // discard object *ref_stack.back() = discarded; } else { ref_stack.back()->set_parents(); } } JSON_ASSERT(!ref_stack.empty()); JSON_ASSERT(!keep_stack.empty()); ref_stack.pop_back(); keep_stack.pop_back(); if (!ref_stack.empty() && ref_stack.back() && ref_stack.back()->is_structured()) { // remove discarded value for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it) { if (it->is_discarded()) { ref_stack.back()->erase(it); break; } } } return true; }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x70, %rsp movq %rdi, %rbx movq 0x10(%rdi), %rax movq -0x8(%rax), %rcx testq %rcx, %rcx je 0xcbe0 subq 0x8(%rbx), %rax shrq $0x3, %rax decl %eax movl %eax, 0x20(%rsp) movb $0x1, (%rsp) cmpq $0x0, 0x90(%rbx) je 0xcd4d leaq 0x80(%rbx), %rdi leaq 0x20(%rsp), %rsi movq %rsp, %rdx callq *0x98(%rbx) testb %al, %al jne 0xcbe0 leaq 0xa8(%rbx), %rsi leaq 0x40(%rsp), %r14 movq %r14, %rdi callq 0xdc84 movq 0x10(%rbx), %rax movq -0x8(%rax), %rax movb (%rax), %cl movb (%r14), %dl movb %dl, (%rax) movb %cl, (%r14) movq 0x8(%rax), %rcx movq 0x8(%r14), %rdx movq %rdx, 0x8(%rax) movq %rcx, 0x8(%r14) movq %r14, %rdi callq 0x8f12 movq 0x10(%rbx), %rax leaq -0x8(%rax), %rcx movq %rcx, 0x10(%rbx) subl $0x1, 0x38(%rbx) jae 0xcbfe movl $0x3f, 0x38(%rbx) addq $-0x8, 0x30(%rbx) cmpq %rcx, 0x8(%rbx) je 0xcd3d movq -0x10(%rax), %rcx testq %rcx, %rcx je 0xcd3d movb (%rcx), %cl decb %cl cmpb $0x1, %cl ja 0xcd3d movabsq $-0x8000000000000000, %r12 # imm = 0x8000000000000000 movq -0x10(%rax), %rsi leaq 0x20(%rsp), %r14 movq %r14, %rdi callq 0xdd02 leaq 0x8(%rsp), %r13 movq %rsp, %r15 movq 0x10(%rbx), %rax movq -0x8(%rax), %rax movq %rax, (%rsp) xorps %xmm0, %xmm0 movups %xmm0, (%r13) movq %r12, 0x18(%rsp) movzbl (%rax), %ecx cmpl $0x2, %ecx je 0xcc76 cmpl $0x1, %ecx jne 0xcc81 movq $0x0, 0x8(%rsp) jmp 0xcc86 movq $0x0, 0x10(%rsp) jmp 0xcc86 movq %r12, 0x18(%rsp) cmpl $0x2, %ecx je 0xcc9f cmpl $0x1, %ecx jne 0xccae movq 0x8(%rax), %rax addq $0x8, %rax movq %rax, 0x8(%rsp) jmp 0xccb7 movq 0x8(%rax), %rax movq 0x8(%rax), %rax movq %rax, 0x10(%rsp) jmp 0xccb7 movq $0x1, 0x18(%rsp) movq %r14, %rdi movq %r15, %rsi callq 0xe756 testb %al, %al jne 0xcd3d movq %r14, %rdi callq 0xddc4 cmpb $0x9, (%rax) je 0xcd0e movq 0x20(%rsp), %rax movzbl (%rax), %eax cmpl $0x2, %eax je 0xccf9 cmpl $0x1, %eax jne 0xcd04 movq 0x28(%rsp), %rdi callq 0x7240 movq %rax, 0x28(%rsp) jmp 0xcc45 addq $0x10, 0x30(%rsp) jmp 0xcc45 incq 0x38(%rsp) jmp 0xcc45 movq 0x10(%rbx), %rax movq -0x8(%rax), %rsi movq 0x20(%rsp), %rax leaq 0x50(%rsp), %rdx movq %rax, (%rdx) movups 0x28(%rsp), %xmm0 movups %xmm0, 0x8(%rdx) movq 0x38(%rsp), %rax movq %rax, 0x18(%rdx) movq %rsp, %rdi callq 0xde98 movb $0x1, %al addq $0x70, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq callq 0x7130
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/input/json_sax.hpp
nlohmann::json_abi_v3_11_2::detail::invalid_iterator nlohmann::json_abi_v3_11_2::detail::invalid_iterator::create<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>*, 0>(int, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&, nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>*)
static invalid_iterator create(int id_, const std::string& what_arg, BasicJsonContext context) { std::string w = concat(exception::name("invalid_iterator", id_), exception::diagnostics(context), what_arg); return {id_, w.c_str()}; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x98, %rsp movq %rdx, %r15 movl %esi, %ebp movq %rdi, %rbx leaq 0x38(%rsp), %r13 movq %r13, -0x10(%r13) leaq 0x31b20(%rip), %rsi # 0x40381 leaq 0x31b29(%rip), %rdx # 0x40391 leaq 0x28(%rsp), %rdi callq 0x9c60 leaq 0x48(%rsp), %rdi leaq 0x28(%rsp), %rsi movl %ebp, %edx callq 0x9984 leaq 0x78(%rsp), %r14 movq %r14, -0x10(%r14) xorl %eax, %eax movq %rax, -0x8(%r14) movb %al, (%r14) leaq 0x18(%rsp), %r12 movq %r12, -0x10(%r12) movq %rax, -0x8(%r12) movb %al, (%r12) movq 0x8(%r15), %rsi addq 0x50(%rsp), %rsi leaq 0x8(%rsp), %rdi callq 0x7530 movq 0x48(%rsp), %rsi movq 0x50(%rsp), %rdx leaq 0x8(%rsp), %rdi callq 0x70d0 movq 0x68(%rsp), %rsi movq 0x70(%rsp), %rdx leaq 0x8(%rsp), %rdi callq 0x70d0 movq (%r15), %rsi movq 0x8(%r15), %rdx leaq 0x8(%rsp), %rdi callq 0x70d0 movq 0x68(%rsp), %rdi cmpq %r14, %rdi je 0xe90b movq 0x78(%rsp), %rsi incq %rsi callq 0x7350 leaq 0x58(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0xe926 movq 0x58(%rsp), %rsi incq %rsi callq 0x7350 movq 0x28(%rsp), %rdi cmpq %r13, %rdi je 0xe93d movq 0x38(%rsp), %rsi incq %rsi callq 0x7350 movq 0x8(%rsp), %rdx movq %rbx, %rdi movl %ebp, %esi callq 0x9bec leaq 0x484c5(%rip), %rax # 0x56e18 movq %rax, (%rbx) movq 0x8(%rsp), %rdi cmpq %r12, %rdi je 0xe96d movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 movq %rbx, %rax addq $0x98, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %rbx movq 0x8(%rsp), %rdi cmpq %r12, %rdi je 0xe9fb movq 0x18(%rsp), %rsi jmp 0xe9f3 movq %rax, %rbx jmp 0xe9e4 movq %rax, %rbx jmp 0xe9fb movq %rdx, %rbx movq %rax, %r15 movq 0x8(%rsp), %rdi cmpq %r12, %rdi je 0xe9bd movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 leaq 0x68(%rsp), %rdi leaq 0x48(%rsp), %rdx leaq 0x88(%rsp), %r12 movq %r14, %rsi movq %r15, %rcx movl %ebx, %r8d movq %r12, %r9 callq 0x7d06 movq (%r12), %rbx movq 0x28(%rsp), %rdi cmpq %r13, %rdi je 0xe9fb movq 0x38(%rsp), %rsi incq %rsi callq 0x7350 movq %rbx, %rdi callq 0x75d0 nop
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/exceptions.hpp
std::pair<bool, nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>*> nlohmann::json_abi_v3_11_2::detail::json_sax_dom_callback_parser<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>>::handle_value<double&>(double&, bool)
std::pair<bool, BasicJsonType*> handle_value(Value&& v, const bool skip_callback = false) { JSON_ASSERT(!keep_stack.empty()); // do not handle this value if we know it would be added to a discarded // container if (!keep_stack.back()) { return {false, nullptr}; } // create value auto value = BasicJsonType(std::forward<Value>(v)); // check callback const bool keep = skip_callback || callback(static_cast<int>(ref_stack.size()), parse_event_t::value, value); // do not handle this value if we just learnt it shall be discarded if (!keep) { return {false, nullptr}; } if (ref_stack.empty()) { root = std::move(value); return {true, &root}; } // skip this value if we already decided to skip the parent // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360) if (!ref_stack.back()) { return {false, nullptr}; } // we now only expect arrays and objects JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); // array if (ref_stack.back()->is_array()) { ref_stack.back()->m_value.array->emplace_back(std::move(value)); return {true, &(ref_stack.back()->m_value.array->back())}; } // object JSON_ASSERT(ref_stack.back()->is_object()); // check if we should store an element for the current key JSON_ASSERT(!key_keep_stack.empty()); const bool store_element = key_keep_stack.back(); key_keep_stack.pop_back(); if (!store_element) { return {false, nullptr}; } JSON_ASSERT(object_element); *object_element = std::move(value); return {true, object_element}; }
pushq %rbp pushq %r14 pushq %rbx subq $0x40, %rsp movl %edx, %ebp movq %rdi, %rbx movabsq $-0x8000000000000000, %r14 # imm = 0x8000000000000000 movl 0x38(%rdi), %eax leaq -0x1(%rax), %rcx addq $0x3e, %rax testq %rcx, %rcx cmovnsq %rcx, %rax sarq $0x6, %rax shlq $0x3, %rax addq 0x30(%rdi), %rax leaq 0x3f(%r14), %rdx andq %rcx, %rdx xorl %edi, %edi cmpq %r14, %rdx setbe %dil movq -0x8(%rax,%rdi,8), %rax btq %rcx, %rax jae 0xf776 leaq 0x8(%rsp), %rdi movb $0x0, (%rdi) movq $0x0, 0x8(%rdi) movsd (%rsi), %xmm0 callq 0xf8a4 testb %bpl, %bpl jne 0xf733 movq 0x10(%rbx), %rax subq 0x8(%rbx), %rax shrq $0x3, %rax movl %eax, 0x1c(%rsp) movb $0x5, 0x7(%rsp) cmpq $0x0, 0x90(%rbx) je 0xf889 leaq 0x80(%rbx), %rdi leaq 0x1c(%rsp), %rsi leaq 0x7(%rsp), %rdx leaq 0x8(%rsp), %rcx callq *0x98(%rbx) testb %al, %al je 0xf86c movq 0x10(%rbx), %rax cmpq %rax, 0x8(%rbx) je 0xf780 movq -0x8(%rax), %rax testq %rax, %rax je 0xf86c cmpb $0x2, (%rax) jne 0xf7c8 movq 0x8(%rax), %rdi leaq 0x8(%rsp), %rsi callq 0xd8c2 movq 0x10(%rbx), %rax movq -0x8(%rax), %rax movq 0x8(%rax), %rax movq 0x8(%rax), %r14 addq $-0x10, %r14 jmp 0xf868 xorl %ebx, %ebx xorl %r14d, %r14d jmp 0xf87b movb 0x8(%rsp), %al leaq 0x30(%rsp), %rdi movb %al, (%rdi) movq 0x10(%rsp), %rcx movq %rcx, 0x8(%rdi) movb $0x0, 0x8(%rsp) movq $0x0, 0x10(%rsp) movq (%rbx), %rcx movb (%rcx), %dl movb %al, (%rcx) movb %dl, (%rdi) movq 0x8(%rcx), %rax movq 0x8(%rdi), %rdx movq %rdx, 0x8(%rcx) movq %rax, 0x8(%rdi) callq 0x8f12 movq (%rbx), %r14 jmp 0xf868 movq 0x58(%rbx), %rax movl 0x60(%rbx), %esi leaq -0x1(%rsi), %rcx movq %rsi, %rdx addq $0x3e, %rdx testq %rcx, %rcx cmovnsq %rcx, %rdx sarq $0x6, %rdx leaq (%rax,%rdx,8), %rdi leaq 0x3f(%r14), %rdx andq %rcx, %rdx xorl %r8d, %r8d cmpq %r14, %rdx setbe %r8b movl $0x1, %edx shlq %cl, %rdx andq -0x8(%rdi,%r8,8), %rdx subl $0x1, %esi movl %esi, 0x60(%rbx) jae 0xf81e movl $0x3f, 0x60(%rbx) addq $-0x8, %rax movq %rax, 0x58(%rbx) testq %rdx, %rdx je 0xf86c movb 0x8(%rsp), %al leaq 0x20(%rsp), %rdi movb %al, (%rdi) movq 0x10(%rsp), %rcx movq %rcx, 0x8(%rdi) movb $0x0, 0x8(%rsp) movq $0x0, 0x10(%rsp) movq 0x70(%rbx), %rcx movb (%rcx), %dl movb %al, (%rcx) movb %dl, (%rdi) movq 0x8(%rcx), %rax movq 0x8(%rdi), %rdx movq %rdx, 0x8(%rcx) movq %rax, 0x8(%rdi) callq 0x8f12 movq 0x70(%rbx), %r14 movb $0x1, %bl jmp 0xf871 xorl %ebx, %ebx xorl %r14d, %r14d leaq 0x8(%rsp), %rdi callq 0x8f12 movl %ebx, %eax movq %r14, %rdx addq $0x40, %rsp popq %rbx popq %r14 popq %rbp retq callq 0x7130 movq %rax, %rbx leaq 0x8(%rsp), %rdi callq 0x8f12 movq %rbx, %rdi callq 0x75d0 nop
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/input/json_sax.hpp
std::pair<bool, nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>*> nlohmann::json_abi_v3_11_2::detail::json_sax_dom_callback_parser<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>>::handle_value<unsigned long&>(unsigned long&, bool)
std::pair<bool, BasicJsonType*> handle_value(Value&& v, const bool skip_callback = false) { JSON_ASSERT(!keep_stack.empty()); // do not handle this value if we know it would be added to a discarded // container if (!keep_stack.back()) { return {false, nullptr}; } // create value auto value = BasicJsonType(std::forward<Value>(v)); // check callback const bool keep = skip_callback || callback(static_cast<int>(ref_stack.size()), parse_event_t::value, value); // do not handle this value if we just learnt it shall be discarded if (!keep) { return {false, nullptr}; } if (ref_stack.empty()) { root = std::move(value); return {true, &root}; } // skip this value if we already decided to skip the parent // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360) if (!ref_stack.back()) { return {false, nullptr}; } // we now only expect arrays and objects JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); // array if (ref_stack.back()->is_array()) { ref_stack.back()->m_value.array->emplace_back(std::move(value)); return {true, &(ref_stack.back()->m_value.array->back())}; } // object JSON_ASSERT(ref_stack.back()->is_object()); // check if we should store an element for the current key JSON_ASSERT(!key_keep_stack.empty()); const bool store_element = key_keep_stack.back(); key_keep_stack.pop_back(); if (!store_element) { return {false, nullptr}; } JSON_ASSERT(object_element); *object_element = std::move(value); return {true, object_element}; }
pushq %rbp pushq %r14 pushq %rbx subq $0x40, %rsp movl %edx, %ebp movq %rdi, %rbx movabsq $-0x8000000000000000, %r14 # imm = 0x8000000000000000 movl 0x38(%rdi), %eax leaq -0x1(%rax), %rcx addq $0x3e, %rax testq %rcx, %rcx cmovnsq %rcx, %rax sarq $0x6, %rax shlq $0x3, %rax addq 0x30(%rdi), %rax leaq 0x3f(%r14), %rdx andq %rcx, %rdx xorl %edi, %edi cmpq %r14, %rdx setbe %dil movq -0x8(%rax,%rdi,8), %rax btq %rcx, %rax jae 0x102d1 leaq 0x8(%rsp), %rdi movb $0x0, (%rdi) movq $0x0, 0x8(%rdi) movq (%rsi), %rsi callq 0x103fe testb %bpl, %bpl jne 0x1028e movq 0x10(%rbx), %rax subq 0x8(%rbx), %rax shrq $0x3, %rax movl %eax, 0x1c(%rsp) movb $0x5, 0x7(%rsp) cmpq $0x0, 0x90(%rbx) je 0x103e4 leaq 0x80(%rbx), %rdi leaq 0x1c(%rsp), %rsi leaq 0x7(%rsp), %rdx leaq 0x8(%rsp), %rcx callq *0x98(%rbx) testb %al, %al je 0x103c7 movq 0x10(%rbx), %rax cmpq %rax, 0x8(%rbx) je 0x102db movq -0x8(%rax), %rax testq %rax, %rax je 0x103c7 cmpb $0x2, (%rax) jne 0x10323 movq 0x8(%rax), %rdi leaq 0x8(%rsp), %rsi callq 0xd8c2 movq 0x10(%rbx), %rax movq -0x8(%rax), %rax movq 0x8(%rax), %rax movq 0x8(%rax), %r14 addq $-0x10, %r14 jmp 0x103c3 xorl %ebx, %ebx xorl %r14d, %r14d jmp 0x103d6 movb 0x8(%rsp), %al leaq 0x30(%rsp), %rdi movb %al, (%rdi) movq 0x10(%rsp), %rcx movq %rcx, 0x8(%rdi) movb $0x0, 0x8(%rsp) movq $0x0, 0x10(%rsp) movq (%rbx), %rcx movb (%rcx), %dl movb %al, (%rcx) movb %dl, (%rdi) movq 0x8(%rcx), %rax movq 0x8(%rdi), %rdx movq %rdx, 0x8(%rcx) movq %rax, 0x8(%rdi) callq 0x8f12 movq (%rbx), %r14 jmp 0x103c3 movq 0x58(%rbx), %rax movl 0x60(%rbx), %esi leaq -0x1(%rsi), %rcx movq %rsi, %rdx addq $0x3e, %rdx testq %rcx, %rcx cmovnsq %rcx, %rdx sarq $0x6, %rdx leaq (%rax,%rdx,8), %rdi leaq 0x3f(%r14), %rdx andq %rcx, %rdx xorl %r8d, %r8d cmpq %r14, %rdx setbe %r8b movl $0x1, %edx shlq %cl, %rdx andq -0x8(%rdi,%r8,8), %rdx subl $0x1, %esi movl %esi, 0x60(%rbx) jae 0x10379 movl $0x3f, 0x60(%rbx) addq $-0x8, %rax movq %rax, 0x58(%rbx) testq %rdx, %rdx je 0x103c7 movb 0x8(%rsp), %al leaq 0x20(%rsp), %rdi movb %al, (%rdi) movq 0x10(%rsp), %rcx movq %rcx, 0x8(%rdi) movb $0x0, 0x8(%rsp) movq $0x0, 0x10(%rsp) movq 0x70(%rbx), %rcx movb (%rcx), %dl movb %al, (%rcx) movb %dl, (%rdi) movq 0x8(%rcx), %rax movq 0x8(%rdi), %rdx movq %rdx, 0x8(%rcx) movq %rax, 0x8(%rdi) callq 0x8f12 movq 0x70(%rbx), %r14 movb $0x1, %bl jmp 0x103cc xorl %ebx, %ebx xorl %r14d, %r14d leaq 0x8(%rsp), %rdi callq 0x8f12 movl %ebx, %eax movq %r14, %rdx addq $0x40, %rsp popq %rbx popq %r14 popq %rbp retq callq 0x7130 movq %rax, %rbx leaq 0x8(%rsp), %rdi callq 0x8f12 movq %rbx, %rdi callq 0x75d0
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/input/json_sax.hpp
std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> nlohmann::json_abi_v3_11_2::detail::concat<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, char const (&) [10], std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, char const (&) [10], std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>(char const (&) [10], std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>&&, char const (&) [10], std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>&&)
inline OutStringType concat(Args && ... args) { OutStringType str; str.reserve(concat_length(std::forward<Args>(args)...)); concat_into(str, std::forward<Args>(args)...); return str; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movq %r8, %r14 movq %rcx, %r15 movq %rdx, %r12 movq %rsi, %r13 movq %rdi, %rbx leaq 0x10(%rdi), %rax movq %rax, (%rsp) movq %rax, (%rdi) movq $0x0, 0x8(%rdi) movb $0x0, 0x10(%rdi) movq %rsi, %rdi callq 0x7180 movq %rax, %rbp addq 0x8(%r12), %rbp movq %r15, %rdi callq 0x7180 addq %rax, %rbp addq 0x8(%r14), %rbp movq %rbx, %rdi movq %rbp, %rsi callq 0x7530 movq %rbx, %rdi movq %r13, %rsi callq 0x7650 movq (%r12), %rsi movq 0x8(%r12), %rdx movq %rbx, %rdi callq 0x70d0 movq %rbx, %rdi movq %r15, %rsi callq 0x7650 movq (%r14), %rsi movq 0x8(%r14), %rdx movq %rbx, %rdi callq 0x70d0 movq %rbx, %rax addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %r14 movq (%rbx), %rdi cmpq (%rsp), %rdi je 0x1082a movq (%rsp), %rax movq (%rax), %rsi incq %rsi callq 0x7350 movq %r14, %rdi callq 0x75d0
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/string_concat.hpp
nlohmann::json_abi_v3_11_2::detail::type_error nlohmann::json_abi_v3_11_2::detail::type_error::create<std::nullptr_t, 0>(int, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&, std::nullptr_t)
static type_error create(int id_, const std::string& what_arg, BasicJsonContext context) { std::string w = concat(exception::name("type_error", id_), exception::diagnostics(context), what_arg); return {id_, w.c_str()}; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x98, %rsp movq %rdx, %r15 movl %esi, %ebp movq %rdi, %rbx leaq 0x38(%rsp), %r13 movq %r13, -0x10(%r13) leaq 0x2e7c7(%rip), %rsi # 0x403f6 leaq 0x2e7ca(%rip), %rdx # 0x40400 leaq 0x28(%rsp), %rdi callq 0x9c60 leaq 0x48(%rsp), %rdi leaq 0x28(%rsp), %rsi movl %ebp, %edx callq 0x9984 leaq 0x78(%rsp), %r14 movq %r14, -0x10(%r14) xorl %eax, %eax movq %rax, -0x8(%r14) movb %al, (%r14) leaq 0x18(%rsp), %r12 movq %r12, -0x10(%r12) movq %rax, -0x8(%r12) movb %al, (%r12) movq 0x8(%r15), %rsi addq 0x50(%rsp), %rsi leaq 0x8(%rsp), %rdi callq 0x7530 movq 0x48(%rsp), %rsi movq 0x50(%rsp), %rdx leaq 0x8(%rsp), %rdi callq 0x70d0 movq 0x68(%rsp), %rsi movq 0x70(%rsp), %rdx leaq 0x8(%rsp), %rdi callq 0x70d0 movq (%r15), %rsi movq 0x8(%r15), %rdx leaq 0x8(%rsp), %rdi callq 0x70d0 movq 0x68(%rsp), %rdi cmpq %r14, %rdi je 0x11cd9 movq 0x78(%rsp), %rsi incq %rsi callq 0x7350 leaq 0x58(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x11cf4 movq 0x58(%rsp), %rsi incq %rsi callq 0x7350 movq 0x28(%rsp), %rdi cmpq %r13, %rdi je 0x11d0b movq 0x38(%rsp), %rsi incq %rsi callq 0x7350 movq 0x8(%rsp), %rdx movq %rbx, %rdi movl %ebp, %esi callq 0x9bec leaq 0x45137(%rip), %rax # 0x56e58 movq %rax, (%rbx) movq 0x8(%rsp), %rdi cmpq %r12, %rdi je 0x11d3b movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 movq %rbx, %rax addq $0x98, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %rbx movq 0x8(%rsp), %rdi cmpq %r12, %rdi je 0x11dc9 movq 0x18(%rsp), %rsi jmp 0x11dc1 movq %rax, %rbx jmp 0x11db2 movq %rax, %rbx jmp 0x11dc9 movq %rdx, %rbx movq %rax, %r15 movq 0x8(%rsp), %rdi cmpq %r12, %rdi je 0x11d8b movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 leaq 0x68(%rsp), %rdi leaq 0x48(%rsp), %rdx leaq 0x88(%rsp), %r12 movq %r14, %rsi movq %r15, %rcx movl %ebx, %r8d movq %r12, %r9 callq 0x7da2 movq (%r12), %rbx movq 0x28(%rsp), %rdi cmpq %r13, %rdi je 0x11dc9 movq 0x38(%rsp), %rsi incq %rsi callq 0x7350 movq %rbx, %rdi callq 0x75d0 nop
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/exceptions.hpp
nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>::operator[](unsigned long) const
const_reference operator[](size_type idx) const { // const operator[] only works for arrays if (JSON_HEDLEY_LIKELY(is_array())) { return m_value.array->operator[](idx); } JSON_THROW(type_error::create(305, detail::concat("cannot use operator[] with a numeric argument with ", type_name()), this)); }
pushq %rbp pushq %r14 pushq %rbx subq $0x30, %rsp movq %rdi, %r14 cmpb $0x2, (%rdi) jne 0x11f97 movq 0x8(%r14), %rax shlq $0x4, %rsi addq (%rax), %rsi movq %rsi, %rax addq $0x30, %rsp popq %rbx popq %r14 popq %rbp retq movl $0x20, %edi callq 0x7190 movq %rax, %rbx movq %r14, %rdi callq 0xec86 leaq 0x8(%rsp), %rdx movq %rax, (%rdx) leaq 0x2e626(%rip), %rsi # 0x405e1 leaq 0x10(%rsp), %rdi callq 0x121fb movb $0x1, %bpl leaq 0x10(%rsp), %rdx movq %rbx, %rdi movl $0x131, %esi # imm = 0x131 movq %r14, %rcx callq 0x12030 xorl %ebp, %ebp leaq 0x44e4a(%rip), %rsi # 0x56e30 leaq -0x675f(%rip), %rdx # 0xb88e movq %rbx, %rdi callq 0x75a0 movq %rax, %r14 leaq 0x20(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x1201b movq 0x20(%rsp), %rsi incq %rsi callq 0x7350 jmp 0x1201b movq %rax, %r14 movb $0x1, %bpl testb %bpl, %bpl je 0x12028 movq %rbx, %rdi callq 0x7270 movq %r14, %rdi callq 0x75d0
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/json.hpp
nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>::basic_json<nlohmann::json_abi_v3_11_2::detail::json_ref<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>>, 0>(nlohmann::json_abi_v3_11_2::detail::json_ref<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>> const&)
basic_json(const JsonRef& ref) : basic_json(ref.moved_or_copied()) {}
pushq %rbx subq $0x10, %rsp movq %rdi, %rbx movq 0x10(%rsi), %rax testq %rax, %rax je 0x124f4 movq %rsp, %rdi movq %rax, %rsi callq 0xdc84 jmp 0x1250d movb (%rsi), %al movb %al, (%rsp) movq 0x8(%rsi), %rax movq %rax, 0x8(%rsp) movb $0x0, (%rsi) movq $0x0, 0x8(%rsi) movq %rsp, %rdi movb (%rdi), %al movb %al, (%rbx) movq 0x8(%rdi), %rax movq %rax, 0x8(%rbx) movb $0x0, (%rdi) movq $0x0, 0x8(%rdi) callq 0x8f12 addq $0x10, %rsp popq %rbx retq
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/json.hpp
(anonymous namespace)::logical_combination<((anonymous namespace)::logical_combination_types)0>::validate(nlohmann::json_abi_v3_11_2::json_pointer<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>> const&, nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>> const&, nlohmann::json_patch&, nlohmann::json_schema::error_handler&) const
void validate(const json::json_pointer &ptr, const json &instance, json_patch &patch, error_handler &e) const final { size_t count = 0; logical_combination_error_handler error_summary; for (std::size_t index = 0; index < subschemata_.size(); ++index) { const std::shared_ptr<schema> &s = subschemata_[index]; logical_combination_error_handler esub; auto oldPatchSize = patch.get_json().size(); s->validate(ptr, instance, patch, esub); if (!esub) count++; else { patch.get_json().get_ref<nlohmann::json::array_t &>().resize(oldPatchSize); esub.propagate(error_summary, "case#" + std::to_string(index) + "] "); } if (is_validate_complete(instance, ptr, e, esub, count, index)) return; } if (count == 0) { e.error(ptr, instance, "no subschema has succeeded, but one of them is required to validate. Type: " + key + ", number of failed subschemas: " + std::to_string(subschemata_.size())); error_summary.propagate(e, "[combination: " + key + " / "); } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0xd8, %rsp movq %r8, 0x90(%rsp) movq %rcx, %r12 movq %rdx, 0xb0(%rsp) movq %rsi, 0xa8(%rsp) leaq 0x366ab(%rip), %r13 # 0x57798 movq %r13, 0xb8(%rsp) xorps %xmm0, %xmm0 movups %xmm0, 0xc0(%rsp) movq $0x0, 0xd0(%rsp) movq 0x20(%rdi), %rax movq %rdi, 0xa0(%rsp) cmpq %rax, 0x28(%rdi) sete %cl je 0x21648 movb %cl, 0x7(%rsp) leaq 0x80(%rsp), %rbx leaq 0x40(%rsp), %r15 leaq 0x18(%rsp), %rbp movq $0x0, 0x98(%rsp) movq $0x0, 0x28(%rsp) movq %r13, 0x50(%rsp) xorps %xmm0, %xmm0 leaq 0x58(%rsp), %rcx movups %xmm0, (%rcx) movq $0x0, 0x10(%rcx) movzbl (%r12), %r14d testl %r14d, %r14d je 0x211a1 cmpl $0x1, %r14d je 0x21190 cmpl $0x2, %r14d jne 0x2119b movq 0x8(%r12), %rcx movq 0x8(%rcx), %r14 subq (%rcx), %r14 sarq $0x4, %r14 jmp 0x211a1 movq 0x8(%r12), %rcx movq 0x28(%rcx), %r14 jmp 0x211a1 movl $0x1, %r14d movq 0x28(%rsp), %rcx shlq $0x4, %rcx addq %rcx, %rax movq (%rax), %rdi movq (%rdi), %rax movq 0xa8(%rsp), %rsi movq 0xb0(%rsp), %rdx movq %r12, %rcx leaq 0x50(%rsp), %r8 callq *0x18(%rax) movq 0x58(%rsp), %rax cmpq 0x60(%rsp), %rax je 0x21249 movq %r12, %rdi callq 0x3a024 movq %rax, %rdi movq %r14, %rsi callq 0x39fb8 movl $0x1, %esi cmpq $0xa, 0x28(%rsp) leaq 0x70(%rsp), %r14 jb 0x2125d movl $0x4, %esi movq 0x28(%rsp), %rcx movabsq $0x346dc5d63886594b, %rdi # imm = 0x346DC5D63886594B cmpq $0x63, %rcx jbe 0x21256 cmpq $0x3e7, %rcx # imm = 0x3E7 jbe 0x2125b cmpq $0x2710, %rcx # imm = 0x2710 jb 0x2125d movq %rcx, %rax mulq %rdi shrq $0xb, %rdx addl $0x4, %esi cmpq $0x1869f, %rcx # imm = 0x1869F movq %rdx, %rcx ja 0x21213 addl $-0x3, %esi jmp 0x2125d incq 0x98(%rsp) jmp 0x2138e addl $-0x2, %esi jmp 0x2125d decl %esi movl %esi, %esi movq %rbx, 0x70(%rsp) movq %r14, %rdi xorl %edx, %edx callq 0x7410 movq 0x70(%rsp), %rdi movl 0x78(%rsp), %esi movq 0x28(%rsp), %rdx callq 0xdc09 movl $0x5, %r8d movq %r14, %rdi xorl %esi, %esi xorl %edx, %edx leaq 0x21578(%rip), %rcx # 0x4280d callq 0x73c0 movq %r15, 0x30(%rsp) movq (%rax), %rdx movq %rax, %rcx addq $0x10, %rcx cmpq %rcx, %rdx je 0x212bd movq %rdx, 0x30(%rsp) movq (%rcx), %rdx movq %rdx, 0x40(%rsp) jmp 0x212c4 movups (%rcx), %xmm0 movups %xmm0, (%r15) movq 0x8(%rax), %rdx movq %rdx, 0x38(%rsp) movq %rcx, (%rax) movq $0x0, 0x8(%rax) movb $0x0, 0x10(%rax) leaq 0x30(%rsp), %rdi leaq 0x1e3e0(%rip), %rsi # 0x3f6c8 callq 0x7650 movq %rbp, 0x8(%rsp) movq (%rax), %rdx movq %rax, %rcx addq $0x10, %rcx cmpq %rcx, %rdx je 0x21310 movq %rdx, 0x8(%rsp) movq (%rcx), %rdx movq %rdx, 0x18(%rsp) jmp 0x21317 movups (%rcx), %xmm0 movups %xmm0, (%rbp) movq 0x8(%rax), %rdx movq %rdx, 0x10(%rsp) movq %rcx, (%rax) movq $0x0, 0x8(%rax) movb $0x0, 0x10(%rax) leaq 0x50(%rsp), %rdi leaq 0xb8(%rsp), %rsi leaq 0x8(%rsp), %rdx callq 0x21b2e movq 0x8(%rsp), %rdi cmpq %rbp, %rdi je 0x2135d movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 movq 0x30(%rsp), %rdi cmpq %r15, %rdi je 0x21374 movq 0x40(%rsp), %rsi incq %rsi callq 0x7350 movq 0x70(%rsp), %rdi cmpq %rbx, %rdi je 0x2138e movq 0x80(%rsp), %rsi incq %rsi callq 0x7350 movq 0x58(%rsp), %r14 cmpq 0x60(%rsp), %r14 je 0x21577 leaq 0x28(%r14), %rdx leaq 0x8(%rsp), %rdi leaq 0x213fa(%rip), %rsi # 0x427a8 callq 0x11ad1 leaq 0x18(%r14), %rdx movq 0x90(%rsp), %rdi movq (%rdi), %rax movq %r14, %rsi leaq 0x8(%rsp), %rcx callq *0x10(%rax) movq 0x8(%rsp), %rdi cmpq %rbp, %rdi je 0x213e4 movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 movl $0x1, %esi cmpq $0xa, 0x28(%rsp) leaq 0x70(%rsp), %rdi jb 0x21447 movl $0x4, %esi movq 0x28(%rsp), %rcx movabsq $0x346dc5d63886594b, %r8 # imm = 0x346DC5D63886594B cmpq $0x63, %rcx jbe 0x21440 cmpq $0x3e7, %rcx # imm = 0x3E7 jbe 0x21445 cmpq $0x2710, %rcx # imm = 0x2710 jb 0x21447 movq %rcx, %rax mulq %r8 shrq $0xb, %rdx addl $0x4, %esi cmpq $0x1869f, %rcx # imm = 0x1869F movq %rdx, %rcx ja 0x2140a addl $-0x3, %esi jmp 0x21447 addl $-0x2, %esi jmp 0x21447 decl %esi movl %esi, %esi movq %rbx, 0x70(%rsp) xorl %edx, %edx callq 0x7410 movq 0x70(%rsp), %rdi movl 0x78(%rsp), %esi movq 0x28(%rsp), %rdx callq 0xdc09 movl $0x1b, %r8d leaq 0x70(%rsp), %rdi xorl %esi, %esi xorl %edx, %edx leaq 0x21379(%rip), %rcx # 0x427f7 callq 0x73c0 movq %r15, 0x30(%rsp) movq (%rax), %rdx movq %rax, %rcx addq $0x10, %rcx cmpq %rcx, %rdx je 0x214a6 movq %rdx, 0x30(%rsp) movq (%rcx), %rdx movq %rdx, 0x40(%rsp) jmp 0x214ad movups (%rcx), %xmm0 movups %xmm0, (%r15) movq 0x8(%rax), %rdx movq %rdx, 0x38(%rsp) movq %rcx, (%rax) movq $0x0, 0x8(%rax) movb $0x0, 0x10(%rax) leaq 0x30(%rsp), %rdi leaq 0x1e1f7(%rip), %rsi # 0x3f6c8 callq 0x7650 movq %rbp, 0x8(%rsp) movq (%rax), %rdx movq %rax, %rcx addq $0x10, %rcx cmpq %rcx, %rdx je 0x214f9 movq %rdx, 0x8(%rsp) movq (%rcx), %rdx movq %rdx, 0x18(%rsp) jmp 0x21500 movups (%rcx), %xmm0 movups %xmm0, (%rbp) movq 0x8(%rax), %rdx movq %rdx, 0x10(%rsp) movq %rcx, (%rax) movq $0x0, 0x8(%rax) movb $0x0, 0x10(%rax) leaq 0x50(%rsp), %rdi movq 0x90(%rsp), %rsi leaq 0x8(%rsp), %rdx callq 0x21b2e movq 0x8(%rsp), %rdi cmpq %rbp, %rdi je 0x21546 movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 movq 0x30(%rsp), %rdi cmpq %r15, %rdi je 0x2155d movq 0x40(%rsp), %rsi incq %rsi callq 0x7350 movq 0x70(%rsp), %rdi cmpq %rbx, %rdi je 0x21577 movq 0x80(%rsp), %rsi incq %rsi callq 0x7350 movq %r12, %rbp movq 0x58(%rsp), %r12 movq 0x60(%rsp), %rbx movq %r13, %r15 movq %r13, 0x50(%rsp) cmpq %rbx, %r12 je 0x215d0 leaq 0x38(%r12), %r13 movq -0x10(%r13), %rdi cmpq %rdi, %r13 je 0x215ab movq (%r13), %rsi incq %rsi callq 0x7350 leaq -0x38(%r13), %r14 leaq -0x20(%r13), %rdi callq 0x8f12 movq %r14, %rdi callq 0x24912 leaq 0x48(%r13), %rax addq $0x10, %r13 cmpq %rbx, %r13 movq %rax, %r13 jne 0x21596 movq 0x58(%rsp), %rdi testq %rdi, %rdi je 0x215e7 movq 0x68(%rsp), %rsi subq %rdi, %rsi callq 0x7350 cmpq %rbx, %r12 movq %r15, %r13 leaq 0x80(%rsp), %rbx leaq 0x40(%rsp), %r15 movq %rbp, %r12 leaq 0x18(%rsp), %rbp jne 0x21636 movq 0x28(%rsp), %rsi incq %rsi movq 0xa0(%rsp), %rcx movq 0x20(%rcx), %rax movq 0x28(%rcx), %rcx subq %rax, %rcx sarq $0x4, %rcx movq %rsi, 0x28(%rsp) cmpq %rcx, %rsi setae 0x7(%rsp) jb 0x21150 cmpq $0x0, 0x98(%rsp) sete %al movb 0x7(%rsp), %cl jmp 0x2164a movb $0x1, %al andb %al, %cl cmpb $0x1, %cl jne 0x2193d leaq 0x2107b(%rip), %rsi # 0x426d7 leaq 0x3715d(%rip), %rdx # 0x587c0 leaq 0x70(%rsp), %rdi callq 0x11ad1 leaq 0x210af(%rip), %rsi # 0x42723 leaq 0x70(%rsp), %rdi callq 0x7650 leaq 0x40(%rsp), %rbx movq %rbx, -0x10(%rbx) movq (%rax), %rdx movq %rax, %rcx addq $0x10, %rcx cmpq %rcx, %rdx je 0x216a5 movq %rdx, 0x30(%rsp) movq (%rcx), %rdx movq %rdx, 0x40(%rsp) jmp 0x216ab movups (%rcx), %xmm0 movups %xmm0, (%rbx) movq 0x8(%rax), %rdx movq %rdx, 0x38(%rsp) movq %rcx, (%rax) movq $0x0, 0x8(%rax) movb $0x0, 0x10(%rax) movq 0xa0(%rsp), %rax movq 0x28(%rax), %r14 subq 0x20(%rax), %r14 sarq $0x4, %r14 movl $0x1, %esi cmpq $0xa, %r14 jb 0x21731 movl $0x4, %esi movabsq $0x346dc5d63886594b, %rdi # imm = 0x346DC5D63886594B movq %r14, %rcx cmpq $0x63, %rcx jbe 0x2172a cmpq $0x3e7, %rcx # imm = 0x3E7 jbe 0x2172f cmpq $0x2710, %rcx # imm = 0x2710 jb 0x21731 movq %rcx, %rax mulq %rdi shrq $0xb, %rdx addl $0x4, %esi cmpq $0x1869f, %rcx # imm = 0x1869F movq %rdx, %rcx ja 0x216f4 addl $-0x3, %esi jmp 0x21731 addl $-0x2, %esi jmp 0x21731 decl %esi movl %esi, %esi leaq 0x60(%rsp), %r15 movq %r15, -0x10(%r15) leaq 0x50(%rsp), %rdi xorl %edx, %edx callq 0x7410 movq 0x50(%rsp), %rdi movl 0x58(%rsp), %esi movq %r14, %rdx callq 0xdc09 movq 0x30(%rsp), %rcx movq 0x38(%rsp), %r8 movq 0x58(%rsp), %rdx leaq (%rdx,%r8), %rax movl $0xf, %esi cmpq %rbx, %rcx je 0x2177b movq 0x40(%rsp), %rsi cmpq %rsi, %rax jbe 0x21796 movl $0xf, %esi cmpq %r15, 0x50(%rsp) je 0x21791 movq 0x60(%rsp), %rsi cmpq %rsi, %rax jbe 0x217a7 movq 0x50(%rsp), %rsi leaq 0x30(%rsp), %rdi callq 0x70d0 jmp 0x217b5 leaq 0x50(%rsp), %rdi xorl %esi, %esi xorl %edx, %edx callq 0x73c0 leaq 0x18(%rsp), %rdx movq %rdx, -0x10(%rdx) movq (%rax), %rsi leaq 0x10(%rax), %rcx cmpq %rcx, %rsi je 0x217d9 movq %rsi, 0x8(%rsp) movq (%rcx), %rdx movq %rdx, 0x18(%rsp) jmp 0x217df movups (%rcx), %xmm0 movups %xmm0, (%rdx) movq %rax, %rdx addq $0x8, %rdx movq 0x8(%rax), %rsi movq %rsi, 0x10(%rsp) movq %rcx, (%rax) movq $0x0, (%rdx) movb $0x0, (%rcx) movq 0x90(%rsp), %rdi movq (%rdi), %rax leaq 0x8(%rsp), %rcx movq 0xa8(%rsp), %rsi movq 0xb0(%rsp), %rdx callq *0x10(%rax) leaq 0x18(%rsp), %r12 movq -0x10(%r12), %rdi cmpq %r12, %rdi je 0x2183b movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 movq 0x50(%rsp), %rdi cmpq %r15, %rdi je 0x21852 movq 0x60(%rsp), %rsi incq %rsi callq 0x7350 movq 0x30(%rsp), %rdi cmpq %rbx, %rdi je 0x21869 movq 0x40(%rsp), %rsi incq %rsi callq 0x7350 leaq 0x80(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x2188a movq 0x80(%rsp), %rsi incq %rsi callq 0x7350 leaq 0x20eb2(%rip), %rsi # 0x42743 leaq 0x36f28(%rip), %rdx # 0x587c0 leaq 0x30(%rsp), %rdi callq 0x11ad1 leaq 0x20ea9(%rip), %rsi # 0x42752 leaq 0x30(%rsp), %rdi callq 0x7650 movq %r12, 0x8(%rsp) movq (%rax), %rdx movq %rax, %rcx addq $0x10, %rcx cmpq %rcx, %rdx je 0x218d6 movq %rdx, 0x8(%rsp) movq (%rcx), %rdx movq %rdx, 0x18(%rsp) jmp 0x218de movups (%rcx), %xmm0 movups %xmm0, (%r12) movq 0x8(%rax), %rsi leaq 0x8(%rsp), %rdx movq %rsi, 0x8(%rdx) movq %rcx, (%rax) movq $0x0, 0x8(%rax) movb $0x0, 0x10(%rax) leaq 0xb8(%rsp), %rdi movq 0x90(%rsp), %rsi callq 0x21b2e movq 0x8(%rsp), %rdi cmpq %r12, %rdi je 0x21926 movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 movq 0x30(%rsp), %rdi cmpq %rbx, %rdi je 0x2193d movq 0x40(%rsp), %rsi incq %rsi callq 0x7350 movq %r13, 0xb8(%rsp) movq 0xc0(%rsp), %r15 movq 0xc8(%rsp), %r14 cmpq %r14, %r15 je 0x21997 addq $0x38, %r15 movq -0x10(%r15), %rdi cmpq %rdi, %r15 je 0x21972 movq (%r15), %rsi incq %rsi callq 0x7350 leaq -0x38(%r15), %rbx leaq -0x20(%r15), %rdi callq 0x8f12 movq %rbx, %rdi callq 0x24912 leaq 0x48(%r15), %rax addq $0x10, %r15 cmpq %r14, %r15 movq %rax, %r15 jne 0x2195e movq 0xc0(%rsp), %rdi testq %rdi, %rdi je 0x219b4 movq 0xd0(%rsp), %rsi subq %rdi, %rsi callq 0x7350 addq $0xd8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %r14 movq 0x8(%rsp), %rdi cmpq %r12, %rdi je 0x219e5 movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 jmp 0x219e5 movq %rax, %r14 movq 0x30(%rsp), %rdi cmpq %rbx, %rdi je 0x21b19 movq 0x40(%rsp), %rsi jmp 0x21a77 jmp 0x21a84 movq %rax, %r14 leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x21a22 movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 jmp 0x21a22 movq %rax, %r14 movq 0x50(%rsp), %rdi cmpq %r15, %rdi je 0x21a3e movq 0x60(%rsp), %rsi incq %rsi callq 0x7350 jmp 0x21a3e movq %rax, %r14 movq 0x30(%rsp), %rdi cmpq %rbx, %rdi je 0x21a5a movq 0x40(%rsp), %rsi incq %rsi callq 0x7350 jmp 0x21a5a movq %rax, %r14 leaq 0x80(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x21b19 movq 0x80(%rsp), %rsi incq %rsi callq 0x7350 jmp 0x21b19 movq %rax, %r14 jmp 0x21b19 movq %rax, %r14 movq 0x8(%rsp), %rdi cmpq %rbp, %rdi je 0x21b0f movq 0x18(%rsp), %rsi jmp 0x21b00 jmp 0x21aa2 movq %rax, %r14 movq 0x8(%rsp), %rdi cmpq %rbp, %rdi je 0x21ac3 movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 jmp 0x21ac3 jmp 0x21ac0 movq %rax, %r14 movq 0x30(%rsp), %rdi leaq 0x40(%rsp), %rax cmpq %rax, %rdi je 0x21ae6 movq 0x40(%rsp), %rsi incq %rsi callq 0x7350 jmp 0x21ae6 jmp 0x21ae3 movq %rax, %r14 movq 0x70(%rsp), %rdi leaq 0x80(%rsp), %rax cmpq %rax, %rdi je 0x21b0f movq 0x80(%rsp), %rsi incq %rsi callq 0x7350 jmp 0x21b0f jmp 0x21b0c movq %rax, %r14 leaq 0x50(%rsp), %rdi callq 0x21bd4 leaq 0xb8(%rsp), %rdi callq 0x21bd4 movq %r14, %rdi callq 0x75d0
/pboettch[P]json-schema-validator/src/json-validator.cpp
nlohmann::json_abi_v3_11_2::detail::iter_impl<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>> nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>::find<char const (&) [4], 0>(char const (&) [4])
iterator find(KeyType && key) { auto result = end(); if (is_object()) { result.m_it.object_iterator = m_value.object->find(std::forward<KeyType>(key)); } return result; }
pushq %r15 pushq %r14 pushq %rbx movq %rdx, %r14 movq %rsi, %r15 movq %rdi, %rbx callq 0xdd54 cmpb $0x1, (%r15) jne 0x24b93 movq 0x8(%r15), %rdi movq %r14, %rsi callq 0x25c42 movq %rax, 0x8(%rbx) movq %rbx, %rax popq %rbx popq %r14 popq %r15 retq
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/json.hpp
nlohmann::json_abi_v3_11_2::detail::iter_impl<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>> nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>::find<char const (&) [8], 0>(char const (&) [8])
iterator find(KeyType && key) { auto result = end(); if (is_object()) { result.m_it.object_iterator = m_value.object->find(std::forward<KeyType>(key)); } return result; }
pushq %r15 pushq %r14 pushq %rbx movq %rdx, %r14 movq %rsi, %r15 movq %rdi, %rbx callq 0xdd54 cmpb $0x1, (%r15) jne 0x24c2f movq 0x8(%r15), %rdi movq %r14, %rsi callq 0x28e38 movq %rax, 0x8(%rbx) movq %rbx, %rax popq %rbx popq %r14 popq %r15 retq
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/json.hpp
nlohmann::json_abi_v3_11_2::detail::iteration_proxy_value<nlohmann::json_abi_v3_11_2::detail::iter_impl<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>>>::key() const
const string_type& key() const { JSON_ASSERT(anchor.m_object != nullptr); switch (anchor.m_object->type()) { // use integer array index as key case value_t::array: { if (array_index != array_index_last) { int_to_string( array_index_str, array_index ); array_index_last = array_index; } return array_index_str; } // use key from the object case value_t::object: return anchor.key(); // use an empty key for all primitive types case value_t::null: case value_t::string: case value_t::boolean: case value_t::number_integer: case value_t::number_unsigned: case value_t::number_float: case value_t::binary: case value_t::discarded: default: return empty_str; } }
pushq %rbx movq %rdi, %rbx movq (%rdi), %rax movzbl (%rax), %eax cmpl $0x1, %eax je 0x2541f cmpl $0x2, %eax jne 0x25428 movq 0x20(%rbx), %rsi cmpq 0x28(%rbx), %rsi je 0x25419 leaq 0x30(%rbx), %rdi callq 0x39509 movq 0x20(%rbx), %rax movq %rax, 0x28(%rbx) addq $0x30, %rbx jmp 0x2542c movq %rbx, %rdi popq %rbx jmp 0x3b58c addq $0x50, %rbx movq %rbx, %rax popq %rbx retq nop
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/iterators/iteration_proxy.hpp
void nlohmann::json_abi_v3_11_2::detail::from_json<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>>(nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>> const&, nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>::boolean_t&)
inline void from_json(const BasicJsonType& j, typename BasicJsonType::boolean_t& b) { if (JSON_HEDLEY_UNLIKELY(!j.is_boolean())) { JSON_THROW(type_error::create(302, concat("type must be boolean, but is ", j.type_name()), &j)); } b = *j.template get_ptr<const typename BasicJsonType::boolean_t*>(); }
pushq %rbp pushq %r14 pushq %rbx subq $0x30, %rsp movq %rdi, %r14 cmpb $0x4, (%rdi) jne 0x25b21 movb 0x8(%r14), %al movb %al, (%rsi) addq $0x30, %rsp popq %rbx popq %r14 popq %rbp retq movl $0x20, %edi callq 0x7190 movq %rax, %rbx movq %r14, %rdi callq 0xec86 leaq 0x8(%rsp), %rdx movq %rax, (%rdx) leaq 0x1bab9(%rip), %rsi # 0x415fe leaq 0x10(%rsp), %rdi callq 0x25bba movb $0x1, %bpl leaq 0x10(%rsp), %rdx movq %rbx, %rdi movl $0x12e, %esi # imm = 0x12E movq %r14, %rcx callq 0x12030 xorl %ebp, %ebp leaq 0x312c0(%rip), %rsi # 0x56e30 leaq -0x1a2e9(%rip), %rdx # 0xb88e movq %rbx, %rdi callq 0x75a0 movq %rax, %r14 leaq 0x20(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x25ba5 movq 0x20(%rsp), %rsi incq %rsi callq 0x7350 jmp 0x25ba5 movq %rax, %r14 movb $0x1, %bpl testb %bpl, %bpl je 0x25bb2 movq %rbx, %rdi callq 0x7270 movq %r14, %rdi callq 0x75d0
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/conversions/from_json.hpp
nlohmann::json_abi_v3_11_2::json_pointer<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>::to_string() const
string_t to_string() const { return std::accumulate(reference_tokens.begin(), reference_tokens.end(), string_t{}, [](const string_t& a, const string_t& b) { return detail::concat(a, '/', detail::escape(b)); }); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x58, %rsp movq %rdi, 0x30(%rsp) movq (%rsi), %r14 movq 0x8(%rsi), %rbp leaq 0x20(%rsp), %rax movq %rax, -0x10(%rax) movq $0x0, -0x8(%rax) movb $0x0, (%rax) cmpq %rbp, %r14 je 0x262df leaq 0x48(%rsp), %rbx leaq 0x38(%rsp), %r15 leaq 0xf(%rsp), %r12 leaq 0x10(%rsp), %r13 movq %r15, %rdi movq %r12, %rsi movq %r13, %rdx movq %r14, %rcx callq 0x2659e movq %r13, %rdi movq %r15, %rsi callq 0x7420 movq 0x38(%rsp), %rdi cmpq %rbx, %rdi je 0x262d6 movq 0x48(%rsp), %rsi incq %rsi callq 0x7350 addq $0x20, %r14 cmpq %rbp, %r14 jne 0x262a3 movq 0x30(%rsp), %rax leaq 0x10(%rax), %rdx movq %rdx, (%rax) movq 0x10(%rsp), %rcx leaq 0x20(%rsp), %rsi cmpq %rsi, %rcx je 0x26308 movq %rcx, (%rax) movq 0x20(%rsp), %rcx movq %rcx, 0x10(%rax) jmp 0x2630e movups (%rsi), %xmm0 movups %xmm0, (%rdx) movq 0x18(%rsp), %rcx movq %rcx, 0x8(%rax) movq %rsi, 0x10(%rsp) movq $0x0, 0x18(%rsp) movb $0x0, 0x20(%rsp) addq $0x58, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %rbx movq 0x10(%rsp), %rdi leaq 0x20(%rsp), %rax cmpq %rax, %rdi je 0x26358 movq 0x20(%rsp), %rsi incq %rsi callq 0x7350 movq %rbx, %rdi callq 0x75d0
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/json_pointer.hpp
std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> nlohmann::json_abi_v3_11_2::detail::concat<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, char const (&) [29], char const*>(char const (&) [29], char const*&&)
inline OutStringType concat(Args && ... args) { OutStringType str; str.reserve(concat_length(std::forward<Args>(args)...)); concat_into(str, std::forward<Args>(args)...); return str; }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq %rdx, %r14 movq %rsi, %r15 movq %rdi, %rbx leaq 0x10(%rdi), %r13 movq %r13, (%rdi) movq $0x0, 0x8(%rdi) movb $0x0, 0x10(%rdi) movq %rsi, %rdi callq 0x7180 movq %rax, %r12 movq (%r14), %rdi callq 0x7180 addq %rax, %r12 movq %rbx, %rdi movq %r12, %rsi callq 0x7530 movq %rbx, %rdi movq %r15, %rsi callq 0x7650 movq (%r14), %rsi movq %rbx, %rdi callq 0x7650 movq %rbx, %rax popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %r14 movq (%rbx), %rdi cmpq %r13, %rdi je 0x26bbf movq (%r13), %rsi incq %rsi callq 0x7350 movq %r14, %rdi callq 0x75d0 nop
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/string_concat.hpp
nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>::at(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
reference at(const typename object_t::key_type& key) { // at only works for objects if (JSON_HEDLEY_UNLIKELY(!is_object())) { JSON_THROW(type_error::create(304, detail::concat("cannot use at() with ", type_name()), this)); } auto it = m_value.object->find(key); if (it == m_value.object->end()) { JSON_THROW(out_of_range::create(403, detail::concat("key '", key, "' not found"), this)); } return set_parent(it->second); }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx subq $0x28, %rsp movq %rdi, %r14 cmpb $0x1, (%rdi) jne 0x28678 movq %rsi, %r15 movq 0x8(%r14), %rdi callq 0x27048 movq 0x8(%r14), %rcx addq $0x8, %rcx cmpq %rcx, %rax je 0x286d4 addq $0x40, %rax addq $0x28, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq movl $0x20, %edi callq 0x7190 movq %rax, %rbx movq %r14, %rdi callq 0xec86 leaq 0x20(%rsp), %rdx movq %rax, (%rdx) leaq 0x190a4(%rip), %rsi # 0x41740 movq %rsp, %rdi callq 0x289d5 movb $0x1, %bpl movq %rsp, %rdx movq %rbx, %rdi movl $0x130, %esi # imm = 0x130 movq %r14, %rcx callq 0xea32 xorl %ebp, %ebp leaq 0x2e76d(%rip), %rsi # 0x56e30 leaq -0x1ce3c(%rip), %rdx # 0xb88e movq %rbx, %rdi callq 0x75a0 jmp 0x28728 movl $0x20, %edi callq 0x7190 movq %rax, %rbx leaq 0x1906e(%rip), %rsi # 0x41756 leaq 0x1906d(%rip), %rcx # 0x4175c movq %rsp, %rdi movq %r15, %rdx callq 0x28a5d movb $0x1, %bpl movq %rsp, %rdx movq %rbx, %rdi movl $0x193, %esi # imm = 0x193 movq %r14, %rcx callq 0xd5e4 xorl %ebp, %ebp leaq 0x2e697(%rip), %rsi # 0x56db0 leaq -0x1ce92(%rip), %rdx # 0xb88e movq %rbx, %rdi callq 0x75a0 jmp 0x2872c jmp 0x2874c movq %rax, %r14 leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x28752 movq 0x10(%rsp), %rsi incq %rsi callq 0x7350 jmp 0x28752 movq %rax, %r14 movb $0x1, %bpl testb %bpl, %bpl je 0x2875f movq %rbx, %rdi callq 0x7270 movq %r14, %rdi callq 0x75d0
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/json.hpp
nlohmann::json_abi_v3_11_2::detail::iter_impl<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>> const> nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>::find<char const (&) [8], 0>(char const (&) [8]) const
const_iterator find(KeyType && key) const { auto result = cend(); if (is_object()) { result.m_it.object_iterator = m_value.object->find(std::forward<KeyType>(key)); } return result; }
pushq %r15 pushq %r14 pushq %rbx movq %rdx, %r14 movq %rsi, %r15 movq %rdi, %rbx callq 0x292e4 cmpb $0x1, (%r15) jne 0x29277 movq 0x8(%r15), %rdi movq %r14, %rsi callq 0x28e38 movq %rax, 0x8(%rbx) movq %rbx, %rax popq %rbx popq %r14 popq %r15 retq
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/json.hpp
nlohmann::json_abi_v3_11_2::detail::iter_impl<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>> const> nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>::find<char const (&) [11], 0>(char const (&) [11]) const
const_iterator find(KeyType && key) const { auto result = cend(); if (is_object()) { result.m_it.object_iterator = m_value.object->find(std::forward<KeyType>(key)); } return result; }
pushq %r15 pushq %r14 pushq %rbx movq %rdx, %r14 movq %rsi, %r15 movq %rdi, %rbx callq 0x292e4 cmpb $0x1, (%r15) jne 0x292db movq 0x8(%r15), %rdi movq %r14, %rsi callq 0x29a0a movq %rax, 0x8(%rbx) movq %rbx, %rax popq %rbx popq %r14 popq %r15 retq
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/json.hpp
bool nlohmann::json_abi_v3_11_2::detail::iter_impl<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>> const>::operator==<nlohmann::json_abi_v3_11_2::detail::iter_impl<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>> const>, nullptr>(nlohmann::json_abi_v3_11_2::detail::iter_impl<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>> const> const&) const
bool operator==(const IterImpl& other) const { // if objects are not the same, the comparison is undefined if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object)) { JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", m_object)); } JSON_ASSERT(m_object != nullptr); switch (m_object->m_type) { case value_t::object: return (m_it.object_iterator == other.m_it.object_iterator); case value_t::array: return (m_it.array_iterator == other.m_it.array_iterator); case value_t::null: case value_t::string: case value_t::boolean: case value_t::number_integer: case value_t::number_unsigned: case value_t::number_float: case value_t::binary: case value_t::discarded: default: return (m_it.primitive_iterator == other.m_it.primitive_iterator); } }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx subq $0x28, %rsp movq %rdi, %r14 movq (%rdi), %rax cmpq (%rsi), %rax jne 0x293a0 movzbl (%rax), %eax cmpl $0x2, %eax je 0x29380 cmpl $0x1, %eax jne 0x2938a movq 0x8(%r14), %rax cmpq 0x8(%rsi), %rax jmp 0x29392 movq 0x10(%r14), %rax cmpq 0x10(%rsi), %rax jmp 0x29392 movq 0x18(%r14), %rax cmpq 0x18(%rsi), %rax sete %al addq $0x28, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq movl $0x20, %edi callq 0x7190 movq %rax, %rbx leaq 0x18(%rsp), %r15 movq %r15, -0x10(%r15) leaq 0x16f93(%rip), %rsi # 0x40350 leaq 0x16fbc(%rip), %rdx # 0x40380 leaq 0x8(%rsp), %rdi callq 0x9c60 movq (%r14), %rcx movb $0x1, %bpl leaq 0x8(%rsp), %rdx movq %rbx, %rdi movl $0xd4, %esi callq 0x29436 xorl %ebp, %ebp leaq 0x2da01(%rip), %rsi # 0x56df0 leaq -0x1db68(%rip), %rdx # 0xb88e movq %rbx, %rdi callq 0x75a0 movq %rax, %r14 movq 0x8(%rsp), %rdi cmpq %r15, %rdi je 0x29420 movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 jmp 0x29420 movq %rax, %r14 movb $0x1, %bpl testb %bpl, %bpl je 0x2942d movq %rbx, %rdi callq 0x7270 movq %r14, %rdi callq 0x75d0 nop
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/iterators/iter_impl.hpp
void nlohmann::json_abi_v3_11_2::detail::get_arithmetic_value<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>, long, 0>(nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>> const&, long&)
void get_arithmetic_value(const BasicJsonType& j, ArithmeticType& val) { switch (static_cast<value_t>(j)) { case value_t::number_unsigned: { val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_unsigned_t*>()); break; } case value_t::number_integer: { val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_integer_t*>()); break; } case value_t::number_float: { val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_float_t*>()); break; } case value_t::null: case value_t::object: case value_t::array: case value_t::string: case value_t::boolean: case value_t::binary: case value_t::discarded: default: JSON_THROW(type_error::create(302, concat("type must be number, but is ", j.type_name()), &j)); } }
pushq %rbp pushq %r14 pushq %rbx subq $0x30, %rsp movq %rdi, %r14 movzbl (%rdi), %eax cmpl $0x5, %eax je 0x2975d cmpl $0x7, %eax je 0x29763 cmpl $0x6, %eax jne 0x29775 movq 0x8(%r14), %rax jmp 0x29769 cvttsd2si 0x8(%r14), %rax movq %rax, (%rsi) addq $0x30, %rsp popq %rbx popq %r14 popq %rbp retq movl $0x20, %edi callq 0x7190 movq %rax, %rbx movq %r14, %rdi callq 0xec86 leaq 0x8(%rsp), %rdx movq %rax, (%rdx) leaq 0x180a9(%rip), %rsi # 0x41842 leaq 0x10(%rsp), %rdi callq 0x26b3f movb $0x1, %bpl leaq 0x10(%rsp), %rdx movq %rbx, %rdi movl $0x12e, %esi # imm = 0x12E movq %r14, %rcx callq 0x12030 xorl %ebp, %ebp leaq 0x2d66c(%rip), %rsi # 0x56e30 leaq -0x1df3d(%rip), %rdx # 0xb88e movq %rbx, %rdi callq 0x75a0 movq %rax, %r14 leaq 0x20(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x297f9 movq 0x20(%rsp), %rsi incq %rsi callq 0x7350 jmp 0x297f9 movq %rax, %r14 movb $0x1, %bpl testb %bpl, %bpl je 0x29806 movq %rbx, %rdi callq 0x7270 movq %r14, %rdi callq 0x75d0
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/conversions/from_json.hpp
void nlohmann::json_abi_v3_11_2::detail::get_arithmetic_value<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>, double, 0>(nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>> const&, double&)
void get_arithmetic_value(const BasicJsonType& j, ArithmeticType& val) { switch (static_cast<value_t>(j)) { case value_t::number_unsigned: { val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_unsigned_t*>()); break; } case value_t::number_integer: { val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_integer_t*>()); break; } case value_t::number_float: { val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_float_t*>()); break; } case value_t::null: case value_t::object: case value_t::array: case value_t::string: case value_t::boolean: case value_t::binary: case value_t::discarded: default: JSON_THROW(type_error::create(302, concat("type must be number, but is ", j.type_name()), &j)); } }
pushq %rbp pushq %r14 pushq %rbx subq $0x30, %rsp movq %rdi, %r14 movzbl (%rdi), %eax cmpl $0x5, %eax je 0x29abb cmpl $0x7, %eax je 0x29ab3 cmpl $0x6, %eax jne 0x29ace movsd 0x8(%r14), %xmm1 unpcklps 0x16fb3(%rip), %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] subpd 0x16fbb(%rip), %xmm1 # 0x40a60 movapd %xmm1, %xmm0 unpckhpd %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] addsd %xmm1, %xmm0 jmp 0x29ac1 movsd 0x8(%r14), %xmm0 jmp 0x29ac1 cvtsi2sdq 0x8(%r14), %xmm0 movsd %xmm0, (%rsi) addq $0x30, %rsp popq %rbx popq %r14 popq %rbp retq movl $0x20, %edi callq 0x7190 movq %rax, %rbx movq %r14, %rdi callq 0xec86 leaq 0x8(%rsp), %rdx movq %rax, (%rdx) leaq 0x17d50(%rip), %rsi # 0x41842 leaq 0x10(%rsp), %rdi callq 0x26b3f movb $0x1, %bpl leaq 0x10(%rsp), %rdx movq %rbx, %rdi movl $0x12e, %esi # imm = 0x12E movq %r14, %rcx callq 0x12030 xorl %ebp, %ebp leaq 0x2d313(%rip), %rsi # 0x56e30 leaq -0x1e296(%rip), %rdx # 0xb88e movq %rbx, %rdi callq 0x75a0 movq %rax, %r14 leaq 0x20(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x29b52 movq 0x20(%rsp), %rsi incq %rsi callq 0x7350 jmp 0x29b52 movq %rax, %r14 movb $0x1, %bpl testb %bpl, %bpl je 0x29b5f movq %rbx, %rdi callq 0x7270 movq %r14, %rdi callq 0x75d0
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/conversions/from_json.hpp
void nlohmann::json_abi_v3_11_2::detail::serializer<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>>::dump_integer<unsigned long, 0>(unsigned long)
void dump_integer(NumberType x) { static constexpr std::array<std::array<char, 2>, 100> digits_to_99 { { {{'0', '0'}}, {{'0', '1'}}, {{'0', '2'}}, {{'0', '3'}}, {{'0', '4'}}, {{'0', '5'}}, {{'0', '6'}}, {{'0', '7'}}, {{'0', '8'}}, {{'0', '9'}}, {{'1', '0'}}, {{'1', '1'}}, {{'1', '2'}}, {{'1', '3'}}, {{'1', '4'}}, {{'1', '5'}}, {{'1', '6'}}, {{'1', '7'}}, {{'1', '8'}}, {{'1', '9'}}, {{'2', '0'}}, {{'2', '1'}}, {{'2', '2'}}, {{'2', '3'}}, {{'2', '4'}}, {{'2', '5'}}, {{'2', '6'}}, {{'2', '7'}}, {{'2', '8'}}, {{'2', '9'}}, {{'3', '0'}}, {{'3', '1'}}, {{'3', '2'}}, {{'3', '3'}}, {{'3', '4'}}, {{'3', '5'}}, {{'3', '6'}}, {{'3', '7'}}, {{'3', '8'}}, {{'3', '9'}}, {{'4', '0'}}, {{'4', '1'}}, {{'4', '2'}}, {{'4', '3'}}, {{'4', '4'}}, {{'4', '5'}}, {{'4', '6'}}, {{'4', '7'}}, {{'4', '8'}}, {{'4', '9'}}, {{'5', '0'}}, {{'5', '1'}}, {{'5', '2'}}, {{'5', '3'}}, {{'5', '4'}}, {{'5', '5'}}, {{'5', '6'}}, {{'5', '7'}}, {{'5', '8'}}, {{'5', '9'}}, {{'6', '0'}}, {{'6', '1'}}, {{'6', '2'}}, {{'6', '3'}}, {{'6', '4'}}, {{'6', '5'}}, {{'6', '6'}}, {{'6', '7'}}, {{'6', '8'}}, {{'6', '9'}}, {{'7', '0'}}, {{'7', '1'}}, {{'7', '2'}}, {{'7', '3'}}, {{'7', '4'}}, {{'7', '5'}}, {{'7', '6'}}, {{'7', '7'}}, {{'7', '8'}}, {{'7', '9'}}, {{'8', '0'}}, {{'8', '1'}}, {{'8', '2'}}, {{'8', '3'}}, {{'8', '4'}}, {{'8', '5'}}, {{'8', '6'}}, {{'8', '7'}}, {{'8', '8'}}, {{'8', '9'}}, {{'9', '0'}}, {{'9', '1'}}, {{'9', '2'}}, {{'9', '3'}}, {{'9', '4'}}, {{'9', '5'}}, {{'9', '6'}}, {{'9', '7'}}, {{'9', '8'}}, {{'9', '9'}}, } }; // special case for "0" if (x == 0) { o->write_character('0'); return; } // use a pointer to fill the buffer auto buffer_ptr = number_buffer.begin(); // NOLINT(llvm-qualified-auto,readability-qualified-auto,cppcoreguidelines-pro-type-vararg,hicpp-vararg) number_unsigned_t abs_value; unsigned int n_chars{}; if (is_negative_number(x)) { *buffer_ptr = '-'; abs_value = remove_sign(static_cast<number_integer_t>(x)); // account one more byte for the minus sign n_chars = 1 + count_digits(abs_value); } else { abs_value = static_cast<number_unsigned_t>(x); n_chars = count_digits(abs_value); } // spare 1 byte for '\0' JSON_ASSERT(n_chars < number_buffer.size() - 1); // jump to the end to generate the string from backward, // so we later avoid reversing the result buffer_ptr += n_chars; // Fast int2ascii implementation inspired by "Fastware" talk by Andrei Alexandrescu // See: https://www.youtube.com/watch?v=o4-CwDo2zpg while (abs_value >= 100) { const auto digits_index = static_cast<unsigned>((abs_value % 100)); abs_value /= 100; *(--buffer_ptr) = digits_to_99[digits_index][1]; *(--buffer_ptr) = digits_to_99[digits_index][0]; } if (abs_value >= 10) { const auto digits_index = static_cast<unsigned>(abs_value); *(--buffer_ptr) = digits_to_99[digits_index][1]; *(--buffer_ptr) = digits_to_99[digits_index][0]; } else { *(--buffer_ptr) = static_cast<char>('0' + abs_value); } o->write_characters(number_buffer.data(), n_chars); }
testq %rsi, %rsi je 0x2abd7 movq %rsi, %rcx leaq 0x10(%rdi), %rsi movl $0x1, %r9d cmpq $0xa, %rcx jb 0x2abf0 movl $0x4, %r9d movabsq $0x346dc5d63886594b, %r10 # imm = 0x346DC5D63886594B movq %rcx, %r8 cmpq $0x63, %r8 jbe 0x2abe7 cmpq $0x3e7, %r8 # imm = 0x3E7 jbe 0x2abed cmpq $0x2710, %r8 # imm = 0x2710 jb 0x2abf0 movq %r8, %rax mulq %r10 shrq $0xb, %rdx addl $0x4, %r9d cmpq $0x1869f, %r8 # imm = 0x1869F movq %rdx, %r8 ja 0x2ab9f addl $-0x3, %r9d jmp 0x2abf0 movq (%rdi), %rdi movq (%rdi), %rax movq (%rax), %rax movl $0x30, %esi jmpq *%rax addl $-0x2, %r9d jmp 0x2abf0 decl %r9d pushq %rbx movl %r9d, %r8d leaq (%rsi,%r8), %r9 leaq 0x181d9(%rip), %r10 # 0x42dd8 cmpq $0x64, %rcx jb 0x2ac40 movabsq $0x28f5c28f5c28f5c3, %r11 # imm = 0x28F5C28F5C28F5C3 movq %rcx, %rax shrq $0x2, %rax mulq %r11 shrq $0x2, %rdx imull $0x64, %edx, %eax movl %ecx, %ebx subl %eax, %ebx movzwl (%r10,%rbx,2), %eax movw %ax, -0x2(%r9) addq $-0x2, %r9 cmpq $0x270f, %rcx # imm = 0x270F movq %rdx, %rcx ja 0x2ac0f jmp 0x2ac43 movq %rcx, %rdx cmpq $0xa, %rdx jb 0x2ac5f movb (%r10,%rdx,2), %al movb 0x1(%r10,%rdx,2), %cl movb %cl, -0x1(%r9) movq $-0x2, %rcx jmp 0x2ac6b orb $0x30, %dl movq $-0x1, %rcx movl %edx, %eax movb %al, (%r9,%rcx) movq (%rdi), %rdi movq (%rdi), %rax movq 0x8(%rax), %rax movq %r8, %rdx popq %rbx jmpq *%rax nop
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/output/serializer.hpp
nlohmann::json_abi_v3_11_2::detail::dtoa_impl::boundaries nlohmann::json_abi_v3_11_2::detail::dtoa_impl::compute_boundaries<double>(double)
boundaries compute_boundaries(FloatType value) { JSON_ASSERT(std::isfinite(value)); JSON_ASSERT(value > 0); // Convert the IEEE representation into a diyfp. // // If v is denormal: // value = 0.F * 2^(1 - bias) = ( F) * 2^(1 - bias - (p-1)) // If v is normalized: // value = 1.F * 2^(E - bias) = (2^(p-1) + F) * 2^(E - bias - (p-1)) static_assert(std::numeric_limits<FloatType>::is_iec559, "internal error: dtoa_short requires an IEEE-754 floating-point implementation"); constexpr int kPrecision = std::numeric_limits<FloatType>::digits; // = p (includes the hidden bit) constexpr int kBias = std::numeric_limits<FloatType>::max_exponent - 1 + (kPrecision - 1); constexpr int kMinExp = 1 - kBias; constexpr std::uint64_t kHiddenBit = std::uint64_t{1} << (kPrecision - 1); // = 2^(p-1) using bits_type = typename std::conditional<kPrecision == 24, std::uint32_t, std::uint64_t >::type; const auto bits = static_cast<std::uint64_t>(reinterpret_bits<bits_type>(value)); const std::uint64_t E = bits >> (kPrecision - 1); const std::uint64_t F = bits & (kHiddenBit - 1); const bool is_denormal = E == 0; const diyfp v = is_denormal ? diyfp(F, kMinExp) : diyfp(F + kHiddenBit, static_cast<int>(E) - kBias); // Compute the boundaries m- and m+ of the floating-point value // v = f * 2^e. // // Determine v- and v+, the floating-point predecessor and successor if v, // respectively. // // v- = v - 2^e if f != 2^(p-1) or e == e_min (A) // = v - 2^(e-1) if f == 2^(p-1) and e > e_min (B) // // v+ = v + 2^e // // Let m- = (v- + v) / 2 and m+ = (v + v+) / 2. All real numbers _strictly_ // between m- and m+ round to v, regardless of how the input rounding // algorithm breaks ties. // // ---+-------------+-------------+-------------+-------------+--- (A) // v- m- v m+ v+ // // -----------------+------+------+-------------+-------------+--- (B) // v- m- v m+ v+ const bool lower_boundary_is_closer = F == 0 && E > 1; const diyfp m_plus = diyfp(2 * v.f + 1, v.e - 1); const diyfp m_minus = lower_boundary_is_closer ? diyfp(4 * v.f - 1, v.e - 2) // (B) : diyfp(2 * v.f - 1, v.e - 1); // (A) // Determine the normalized w+ = m+. const diyfp w_plus = diyfp::normalize(m_plus); // Determine w- = m- such that e_(w-) = e_(w+). const diyfp w_minus = diyfp::normalize_to(m_minus, w_plus.e); return {diyfp::normalize(v), w_minus, w_plus}; }
movq %rdi, %rax movq %xmm0, %rdi movabsq $0x10000000000000, %r10 # imm = 0x10000000000000 decq %r10 andq %rdi, %r10 movq %rdi, %rcx shrq $0x34, %rcx movq %r10, %r8 btsq $0x34, %r8 testq %rcx, %rcx cmoveq %r10, %r8 leal -0x433(%rcx), %ecx movl $0xfffffbce, %r11d # imm = 0xFFFFFBCE cmovnel %ecx, %r11d leaq (%r8,%r8), %rsi leaq 0x1(,%r8,2), %rdx leal -0x1(%r11), %ecx movl %ecx, %r9d addq %rdx, %rdx decl %r9d testq %rdx, %rdx jns 0x2b387 pushq %rbp pushq %rbx movq %r8, %rbx movl %r11d, %ebp addq %rbx, %rbx decl %ebp testq %rbx, %rbx jns 0x2b39a testq %r10, %r10 sete %r10b shrq $0x35, %rdi setne %dil shlq $0x2, %r8 addl $-0x2, %r11d testb %r10b, %dil cmovneq %r8, %rsi cmovnel %r11d, %ecx decq %rsi subl %r9d, %ecx shlq %cl, %rsi movq %rbx, (%rax) movl %ebp, 0x8(%rax) movq %rsi, 0x10(%rax) movl %r9d, 0x18(%rax) movq %rdx, 0x20(%rax) movl %r9d, 0x28(%rax) popq %rbx popq %rbp retq
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/conversions/to_chars.hpp
decltype(fp0.reserve(std::declval<std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>>::size_type>()), fp.get<std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>>::value_type>(), (void)()) nlohmann::json_abi_v3_11_2::detail::from_json_array_impl<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>>, std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>>, 0>(nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>> const&, std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>>&, nlohmann::json_abi_v3_11_2::detail::priority_tag<1u>)
auto from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr, priority_tag<1> /*unused*/) -> decltype( arr.reserve(std::declval<typename ConstructibleArrayType::size_type>()), j.template get<typename ConstructibleArrayType::value_type>(), void()) { using std::end; ConstructibleArrayType ret; ret.reserve(j.size()); std::transform(j.begin(), j.end(), std::inserter(ret, end(ret)), [](const BasicJsonType & i) { // get<BasicJsonType>() returns *this, this won't call a from_json // method when value_type is BasicJsonType return i.template get<typename ConstructibleArrayType::value_type>(); }); arr = std::move(ret); }
pushq %r15 pushq %r14 pushq %rbx subq $0x80, %rsp movq %rsi, %rbx movq %rdi, %r14 xorps %xmm0, %xmm0 movaps %xmm0, 0x20(%rsp) movq $0x0, 0x30(%rsp) movzbl (%rdi), %esi testl %esi, %esi je 0x38cb1 cmpl $0x1, %esi je 0x38ca2 cmpl $0x2, %esi jne 0x38cac movq 0x8(%r14), %rax movq 0x8(%rax), %rsi subq (%rax), %rsi sarq $0x4, %rsi jmp 0x38cb1 movq 0x8(%r14), %rax movq 0x28(%rax), %rsi jmp 0x38cb1 movl $0x1, %esi leaq 0x20(%rsp), %rdi callq 0x38dc6 movabsq $-0x8000000000000000, %r15 # imm = 0x8000000000000000 leaq 0x40(%rsp), %rdi movq %r14, %rsi callq 0x390cc movq %r14, (%rsp) xorps %xmm0, %xmm0 movups %xmm0, 0x8(%rsp) movq %r15, 0x18(%rsp) movzbl (%r14), %eax cmpl $0x2, %eax je 0x38cfc cmpl $0x1, %eax jne 0x38d07 movq $0x0, 0x8(%rsp) jmp 0x38d0c movq $0x0, 0x10(%rsp) jmp 0x38d0c movq %r15, 0x18(%rsp) cmpl $0x2, %eax je 0x38d25 cmpl $0x1, %eax jne 0x38d34 movq 0x8(%r14), %rax addq $0x8, %rax movq %rax, 0x8(%rsp) jmp 0x38d3d movq 0x8(%r14), %rax movq 0x8(%rax), %rax movq %rax, 0x10(%rsp) jmp 0x38d3d movq $0x1, 0x18(%rsp) leaq 0x20(%rsp), %rdx movq 0x8(%rdx), %rcx leaq 0x40(%rsp), %rdi movq %rsp, %rsi callq 0x38ead xorps %xmm0, %xmm0 leaq 0x60(%rsp), %rdi movaps %xmm0, (%rdi) movq $0x0, 0x10(%rdi) movups (%rbx), %xmm0 movq 0x10(%rbx), %rax leaq 0x20(%rsp), %r14 movaps (%r14), %xmm1 movups %xmm1, (%rbx) movq 0x10(%r14), %rcx movq %rcx, 0x10(%rbx) movq 0x10(%rdi), %rcx movq %rax, 0x10(%rdi) movaps (%rdi), %xmm1 movaps %xmm0, (%rdi) movaps %xmm1, (%r14) movq %rcx, 0x10(%r14) callq 0x24912 movq %r14, %rdi callq 0x24912 addq $0x80, %rsp popq %rbx popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x20(%rsp), %rdi callq 0x24912 movq %rbx, %rdi callq 0x75d0
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/conversions/from_json.hpp
nlohmann::json_abi_v3_11_2::detail::iter_impl<nlohmann::json_abi_v3_11_2::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_2::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>> const>::operator+=(long)
iter_impl& operator+=(difference_type i) { JSON_ASSERT(m_object != nullptr); switch (m_object->m_type) { case value_t::object: JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators", m_object)); case value_t::array: { std::advance(m_it.array_iterator, i); break; } case value_t::null: case value_t::string: case value_t::boolean: case value_t::number_integer: case value_t::number_unsigned: case value_t::number_float: case value_t::binary: case value_t::discarded: default: { m_it.primitive_iterator += i; break; } } return *this; }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx subq $0x28, %rsp movq %rdi, %r14 movq (%rdi), %rax movzbl (%rax), %eax cmpl $0x2, %eax je 0x39dbb cmpl $0x1, %eax je 0x39dd1 addq %rsi, 0x18(%r14) jmp 0x39dc3 shlq $0x4, %rsi addq %rsi, 0x10(%r14) movq %r14, %rax addq $0x28, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq movl $0x20, %edi callq 0x7190 movq %rax, %rbx leaq 0x18(%rsp), %r15 movq %r15, -0x10(%r15) leaq 0x8880(%rip), %rsi # 0x4266e leaq 0x88a1(%rip), %rdx # 0x42696 leaq 0x8(%rsp), %rdi callq 0x9c60 movq (%r14), %rcx movb $0x1, %bpl leaq 0x8(%rsp), %rdx movq %rbx, %rdi movl $0xd1, %esi callq 0x29436 xorl %ebp, %ebp leaq 0x1cfd0(%rip), %rsi # 0x56df0 leaq -0x2e599(%rip), %rdx # 0xb88e movq %rbx, %rdi callq 0x75a0 movq %rax, %r14 movq 0x8(%rsp), %rdi cmpq %r15, %rdi je 0x39e51 movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 jmp 0x39e51 movq %rax, %r14 movb $0x1, %bpl testb %bpl, %bpl je 0x39e5e movq %rbx, %rdi callq 0x7270 movq %r14, %rdi callq 0x75d0
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/iterators/iter_impl.hpp
nlohmann::json_abi_v3_11_2::json_pointer<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>::operator/=(unsigned long)
json_pointer& operator/=(std::size_t array_idx) { return *this /= std::to_string(array_idx); }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x28, %rsp movq %rsi, %r14 movq %rdi, %rbx movl $0x1, %esi cmpq $0xa, %r14 jb 0x39ed1 movl $0x4, %esi movabsq $0x346dc5d63886594b, %rdi # imm = 0x346DC5D63886594B movq %r14, %rcx cmpq $0x63, %rcx jbe 0x39eca cmpq $0x3e7, %rcx # imm = 0x3E7 jbe 0x39ecf cmpq $0x2710, %rcx # imm = 0x2710 jb 0x39ed1 movq %rcx, %rax mulq %rdi shrq $0xb, %rdx addl $0x4, %esi cmpq $0x1869f, %rcx # imm = 0x1869F movq %rdx, %rcx ja 0x39e94 addl $-0x3, %esi jmp 0x39ed1 addl $-0x2, %esi jmp 0x39ed1 decl %esi movl %esi, %esi leaq 0x18(%rsp), %r12 movq %r12, -0x10(%r12) leaq 0x8(%rsp), %r15 movq %r15, %rdi xorl %edx, %edx callq 0x7410 movq (%r15), %rdi movl 0x8(%r15), %esi movq %r14, %rdx callq 0xdc09 movq %rbx, %rdi movq %r15, %rsi callq 0x2584e movq 0x8(%rsp), %rdi cmpq %r12, %rdi je 0x39f1d movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 movq %rbx, %rax addq $0x28, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq movq %rax, %rbx movq 0x8(%rsp), %rdi cmpq %r12, %rdi je 0x39f46 movq 0x18(%rsp), %rsi incq %rsi callq 0x7350 movq %rbx, %rdi callq 0x75d0
/pboettch[P]json-schema-validator/build_O1/_deps/nlohmann_json-src/include/nlohmann/detail/json_pointer.hpp
nlohmann::json_uri::location[abi:cxx11]() const
std::string json_uri::location() const { if (urn_.size()) return urn_; std::stringstream s; if (scheme_.size() > 0) s << scheme_ << "://"; s << authority_ << path_; return s.str(); }
pushq %r14 pushq %rbx subq $0x188, %rsp # imm = 0x188 movq %rsi, %r14 movq %rdi, %rbx movq 0x8(%rsi), %rdx testq %rdx, %rdx je 0x3ddcc leaq 0x10(%rbx), %rax movq %rax, (%rbx) movq (%r14), %rsi addq %rsi, %rdx movq %rbx, %rdi callq 0xe3e6 jmp 0x3de4a movq %rsp, %rdi callq 0x7250 movq 0x28(%r14), %rdx testq %rdx, %rdx je 0x3ddff leaq 0x10(%rsp), %rdi movq 0x20(%r14), %rsi callq 0x73e0 leaq 0x2781(%rip), %rsi # 0x40573 movl $0x3, %edx movq %rax, %rdi callq 0x73e0 leaq 0x10(%rsp), %rdi movq 0x40(%r14), %rsi movq 0x48(%r14), %rdx callq 0x73e0 movq 0x60(%r14), %rsi movq 0x68(%r14), %rdx movq %rax, %rdi callq 0x73e0 leaq 0x18(%rsp), %rsi movq %rbx, %rdi callq 0x7540 movq 0x1a11b(%rip), %rsi # 0x57f50 movq %rsp, %rdi callq 0x7290 leaq 0x80(%rsp), %rdi callq 0x7100 movq %rbx, %rax addq $0x188, %rsp # imm = 0x188 popq %rbx popq %r14 retq movq %rax, %rbx movq 0x1a0ee(%rip), %rsi # 0x57f50 movq %rsp, %rdi callq 0x7290 leaq 0x80(%rsp), %rdi callq 0x7100 movq %rbx, %rdi callq 0x75d0 nop
/pboettch[P]json-schema-validator/src/json-uri.cpp
nlohmann::json_uri::escape(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
std::string json_uri::escape(const std::string &src) { std::vector<std::pair<std::string, std::string>> chars = { {"~", "~0"}, {"/", "~1"}}; std::string l = src; for (const auto &c : chars) { std::size_t pos = 0; do { pos = l.find(c.first, pos); if (pos == std::string::npos) break; l.replace(pos, 1, c.second); pos += c.second.size(); } while (1); } return l; }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0xa0, %rsp movq %rsi, %r14 movq %rdi, %rbx leaq 0x20(%rsp), %r12 leaq 0x35bd(%rip), %rsi # 0x41641 leaq 0x35b8(%rip), %rdx # 0x41643 movq %r12, %r15 movq %r12, %rdi callq 0x3e21a leaq 0x60(%rsp), %r15 leaq 0x24d3(%rip), %rsi # 0x40575 leaq 0x359d(%rip), %rdx # 0x41646 movq %r15, %rdi callq 0x3e21a leaq 0x8(%rsp), %rdi leaq 0x20(%rsp), %rsi leaq 0x7(%rsp), %rcx movl $0x2, %edx callq 0x3e298 movq $-0x80, %r15 leaq 0x90(%rsp), %r12 movq -0x10(%r12), %rdi cmpq %rdi, %r12 je 0x3e0ef movq (%r12), %rsi incq %rsi callq 0x7350 movq -0x30(%r12), %rdi leaq -0x20(%r12), %rax cmpq %rdi, %rax je 0x3e109 movq (%rax), %rsi incq %rsi callq 0x7350 addq $-0x40, %r12 addq $0x40, %r15 jne 0x3e0d9 leaq 0x10(%rbx), %r15 movq %r15, (%rbx) movq (%r14), %rsi movq 0x8(%r14), %rdx addq %rsi, %rdx movq %rbx, %rdi callq 0xe3e6 movq 0x8(%rsp), %r12 movq 0x10(%rsp), %r13 cmpq %r13, %r12 je 0x3e18d movq (%r12), %rsi movq 0x8(%r12), %rcx movq %rbx, %rdi xorl %edx, %edx callq 0x75c0 movq %rax, %r14 cmpq $-0x1, %rax je 0x3e187 movq 0x20(%r12), %rcx movq 0x28(%r12), %r8 movl $0x1, %edx movq %rbx, %rdi movq %r14, %rsi callq 0x73c0 addq 0x28(%r12), %r14 movq (%r12), %rsi movq 0x8(%r12), %rcx movq %rbx, %rdi movq %r14, %rdx jmp 0x3e149 addq $0x40, %r12 jmp 0x3e136 leaq 0x8(%rsp), %rdi callq 0x318fc movq %rbx, %rax addq $0xa0, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %r14 jmp 0x3e207 movq %rax, %r14 movl $0x40, %ebx leaq (%rsp,%rbx), %rdi addq $0x20, %rdi callq 0x31984 addq $-0x40, %rbx cmpq $-0x40, %rbx jne 0x3e1b8 jmp 0x3e211 movq %rax, %r14 cmpq %r15, %r12 je 0x3e211 leaq 0x20(%rsp), %rbx addq $-0x40, %r15 movq %r15, %rdi callq 0x31984 cmpq %rbx, %r15 jne 0x3e1de jmp 0x3e211 movq %rax, %r14 movq (%rbx), %rdi cmpq %r15, %rdi je 0x3e207 movq (%r15), %rsi incq %rsi callq 0x7350 leaq 0x8(%rsp), %rdi callq 0x318fc movq %r14, %rdi callq 0x75d0 nop
/pboettch[P]json-schema-validator/src/json-uri.cpp
printf
int printf( const char * _PDCLIB_restrict format, ... ) { int rc; va_list ap; va_start( ap, format ); rc = vfprintf( stdout, format, ap ); va_end( ap ); return rc; }
pushq %rbp movq %rsp, %rbp subq $0xe0, %rsp testb %al, %al je 0x2268 movaps %xmm0, -0xb0(%rbp) movaps %xmm1, -0xa0(%rbp) movaps %xmm2, -0x90(%rbp) movaps %xmm3, -0x80(%rbp) movaps %xmm4, -0x70(%rbp) movaps %xmm5, -0x60(%rbp) movaps %xmm6, -0x50(%rbp) movaps %xmm7, -0x40(%rbp) movq %r9, -0xb8(%rbp) movq %r8, -0xc0(%rbp) movq %rcx, -0xc8(%rbp) movq %rdx, -0xd0(%rbp) movq %rsi, -0xd8(%rbp) movq %rdi, -0x8(%rbp) leaq -0xe0(%rbp), %rax movq %rax, -0x20(%rbp) leaq 0x10(%rbp), %rax movq %rax, -0x28(%rbp) movl $0x30, -0x2c(%rbp) movl $0x8, -0x30(%rbp) leaq 0x6da1(%rip), %rax # 0x9058 movq (%rax), %rdi movq -0x8(%rbp), %rsi leaq -0x30(%rbp), %rdx callq 0x22e0 movl %eax, -0xc(%rbp) movl -0xc(%rbp), %eax addq $0xe0, %rsp popq %rbp retq nopw %cs:(%rax,%rax)
/DevSolar[P]pdclib/functions/stdio/printf.c
fputc
int fputc( int c, struct _PDCLIB_file_t * stream ) { _PDCLIB_LOCK( stream->mtx ); if ( _PDCLIB_prepwrite( stream ) == EOF ) { _PDCLIB_UNLOCK( stream->mtx ); return EOF; } stream->buffer[stream->bufidx++] = ( char )c; if ( ( stream->bufidx == stream->bufsize ) /* _IOFBF */ || ( ( stream->status & _IOLBF ) && ( ( char )c == '\n' ) ) /* _IOLBF */ || ( stream->status & _IONBF ) /* _IONBF */ ) { /* buffer filled, unbuffered stream, or end-of-line. */ c = ( _PDCLIB_flushbuffer( stream ) == 0 ) ? c : EOF; } _PDCLIB_UNLOCK( stream->mtx ); return c; }
pushq %rbp movq %rsp, %rbp subq $0x20, %rsp movl %edi, -0x8(%rbp) movq %rsi, -0x10(%rbp) movq -0x10(%rbp), %rdi addq $0x48, %rdi callq 0x5080 movq -0x10(%rbp), %rdi callq 0x24a0 cmpl $-0x1, %eax jne 0x5953 movq -0x10(%rbp), %rdi addq $0x48, %rdi callq 0x50c0 movl $0xffffffff, -0x4(%rbp) # imm = 0xFFFFFFFF jmp 0x59ea movl -0x8(%rbp), %eax movb %al, %dl movq -0x10(%rbp), %rax movq 0x8(%rax), %rax movq -0x10(%rbp), %rsi movq 0x18(%rsi), %rcx movq %rcx, %rdi addq $0x1, %rdi movq %rdi, 0x18(%rsi) movb %dl, (%rax,%rcx) movq -0x10(%rbp), %rax movq 0x18(%rax), %rax movq -0x10(%rbp), %rcx cmpq 0x10(%rcx), %rax je 0x59b1 movq -0x10(%rbp), %rax movl 0x44(%rax), %eax andl $0x2, %eax cmpl $0x0, %eax je 0x59a2 movl -0x8(%rbp), %eax movsbl %al, %eax cmpl $0xa, %eax je 0x59b1 movq -0x10(%rbp), %rax movl 0x44(%rax), %eax andl $0x4, %eax cmpl $0x0, %eax je 0x59d7 movq -0x10(%rbp), %rdi callq 0x4f00 cmpl $0x0, %eax jne 0x59c7 movl -0x8(%rbp), %eax movl %eax, -0x14(%rbp) jmp 0x59d1 movl $0xffffffff, %eax # imm = 0xFFFFFFFF movl %eax, -0x14(%rbp) jmp 0x59d1 movl -0x14(%rbp), %eax movl %eax, -0x8(%rbp) movq -0x10(%rbp), %rdi addq $0x48, %rdi callq 0x50c0 movl -0x8(%rbp), %eax movl %eax, -0x4(%rbp) movl -0x4(%rbp), %eax addq $0x20, %rsp popq %rbp retq nopw %cs:(%rax,%rax) nopl (%rax)
/DevSolar[P]pdclib/functions/stdio/fputc.c
rtosc::canonicalize_arg_vals(rtosc_arg_val_t*, unsigned long, char const*, rtosc::Port::MetaContainer)
int rtosc::canonicalize_arg_vals(rtosc_arg_val_t* av, size_t n, const char* port_args, Port::MetaContainer meta) { const char* first0 = port_args; int errors_found = 0; // skip "[]:" for( ; *first0 && (*first0 == ':' || *first0 == '[' || *first0 == ']'); ++first0) ; size_t arr_size; size_t max; bool is_array; rtosc_arg_val_t* start = av; if(av->type == 'a') { arr_size = rtosc_av_arr_len(av); ++av; max = 1; // only one element per bundle element // TODO: multiple may be possible is_array = true; } else { arr_size = 1; max = n; is_array = false; } for(size_t a = 0; a < arr_size; ++a) { const char* first = first0; for(size_t i = 0; i < max; ++i, ++first, ++av) { // skip "[]" for( ; *first && (*first == '[' || *first == ']'); ++first) ; assert(!strchr(first0, '#')); // if(is_array) // TODO: currently, only one element per bundle element // assert(first[1] == 0); if(!*first || *first == ':') { // (n-i) arguments left, but we have no recipe to convert them return n-i; } if(av->type == 'S' && *first == 'i') { int val = enum_key(meta, av->val.s); if(val == std::numeric_limits<int>::min()) ++errors_found; else { av->type = 'i'; av->val.i = val; } } } } if(is_array && arr_size) rtosc_av_arr_type_set(start, av[-1].type); return errors_found; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %rcx, 0x20(%rsp) movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movabsq $0xa00000001, %rax # imm = 0xA00000001 movzbl (%r12), %ecx addl $-0x3a, %ecx cmpl $0x23, %ecx ja 0x4ffa btq %rcx, %rax jae 0x4ffa incq %r12 jmp 0x4fe2 movb (%r15), %bpl movl $0x1, %edx movq %r15, %rbx movq %r14, %rdi cmpb $0x61, %bpl jne 0x502a movq %r15, %rdi callq 0xf0b9 testl %eax, %eax je 0x510e movslq %eax, %rdx leaq 0x18(%r15), %rbx movl $0x1, %edi movb %bpl, 0x3(%rsp) movq %r15, 0x18(%rsp) xorl %ebp, %ebp xorl %eax, %eax testq %rdi, %rdi je 0x50d2 xorl %r15d, %r15d movq %r12, %r13 incq %r13 movb -0x1(%r13), %sil movzbl %sil, %ecx cmpl $0x5c, %ecx jg 0x506f cmpl $0x5b, %ecx je 0x5047 testl %ecx, %ecx je 0x50f9 cmpl $0x3a, %ecx je 0x50f9 jmp 0x5074 cmpl $0x5d, %ecx je 0x5047 cmpb $0x69, %sil jne 0x50c2 cmpb $0x53, (%rbx) jne 0x50c2 movq %rdi, 0x8(%rsp) movl %eax, 0x4(%rsp) movq %rdx, 0x10(%rsp) movq 0x8(%rbx), %rsi movq 0x20(%rsp), %rdi callq 0x5112 movl %eax, %ecx negl %ecx jno 0x50ae movl 0x4(%rsp), %eax incl %eax movq 0x10(%rsp), %rdx jmp 0x50bd movb $0x69, (%rbx) movl %eax, 0x8(%rbx) movq 0x10(%rsp), %rdx movl 0x4(%rsp), %eax movq 0x8(%rsp), %rdi incq %r15 addq $0x18, %rbx cmpq %rdi, %r15 jne 0x5047 incq %rbp cmpq %rdx, %rbp jne 0x5038 cmpb $0x61, 0x3(%rsp) jne 0x50ff movl %eax, %ebp movsbl -0x18(%rbx), %esi movq 0x18(%rsp), %rdi callq 0xf0bd movl %ebp, %eax jmp 0x50ff subl %r15d, %r14d movl %r14d, %eax addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq xorl %eax, %eax jmp 0x50ff
/fundamental[P]rtosc/src/cpp/ports.cpp
rtosc::Ports::collapsePath(char*)
char *Ports::collapsePath(char *p) { //obtain the pointer to the last non-null char char *p_end = p; while(*p_end) p_end++; p_end--; //number of subpaths to consume int consuming = 0; char *write_pos = p_end; char *read_pos = p_end; while(read_pos >= p) { //per path chunk either //(1) find a parent ref and inc consuming //(2) find a normal ref and consume //(3) find a normal ref and write through bool ppath = parent_path_p(read_pos, p); if(ppath) { read_path(read_pos, p); consuming++; } else if(consuming) { read_path(read_pos, p); consuming--; } else move_path(read_pos, write_pos, p); } //return last written location, not next to write return write_pos+1; }
leaq -0x2(%rdi), %rcx cmpb $0x0, 0x2(%rcx) leaq 0x1(%rcx), %rcx jne 0x5398 cmpq %rdi, %rcx jae 0x53ae movq %rcx, %rax incq %rax retq xorl %edx, %edx movq %rcx, %rax movq %rcx, %rsi subq %rdi, %rsi cmpq $0x2, %rsi jl 0x53e3 cmpb $0x2e, (%rcx) jne 0x53e3 cmpb $0x2e, -0x1(%rcx) jne 0x53e3 cmpb $0x2f, -0x2(%rcx) jne 0x53e3 cmpq %rdi, %rcx jb 0x5433 leaq -0x1(%rcx), %rsi cmpb $0x2f, (%rcx) movq %rsi, %rcx jne 0x53d0 jmp 0x5436 testl %edx, %edx je 0x53fa cmpq %rdi, %rcx jb 0x541c leaq -0x1(%rcx), %rsi cmpb $0x2f, (%rcx) movq %rsi, %rcx jne 0x53e7 jmp 0x541f movq %rax, %rsi xorl %edx, %edx cmpq %rdi, %rcx jb 0x5426 movb (%rcx), %r8b decq %rcx leaq -0x1(%rsi), %rax movb %r8b, (%rsi) movq %rax, %rsi cmpb $0x2f, %r8b jne 0x53fd jmp 0x5429 movq %rcx, %rsi decl %edx movq %rsi, %rcx jmp 0x5429 movq %rsi, %rax cmpq %rdi, %rcx jae 0x53b3 jmp 0x53aa movq %rcx, %rsi incl %edx jmp 0x5421
/fundamental[P]rtosc/src/cpp/ports.cpp
rtosc::path_search(rtosc::Ports const&, char const*, unsigned long, char*, unsigned long, rtosc::path_search_opts, bool)
std::size_t rtosc::path_search(const Ports &root, const char *m, std::size_t max_ports, char *msgbuf, std::size_t bufsize, path_search_opts opts, bool reply_with_query) { const char *str = rtosc_argument(m,0).s; const char *needle = rtosc_argument(m,1).s; size_t max_args = max_ports << 1; size_t max_types = max_args + 1; STACKALLOC(char, types, max_types); STACKALLOC(rtosc_arg_t, args, max_args); path_search(root, str, needle, types, max_types, args, max_args, opts, reply_with_query); size_t length = rtosc_amessage(msgbuf, bufsize, "/paths", types, args); return length; }
pushq %rbp movq %rsp, %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movq %r9, -0x30(%rbp) movq %r8, -0x40(%rbp) movq %rcx, -0x38(%rbp) movq %rdx, %rbx movq %rsi, %r13 movq %rdi, %r12 movq %rsi, %rdi xorl %esi, %esi callq 0x109e0 movq %rax, %r14 movq %r13, %rdi movl $0x1, %esi callq 0x109e0 leaq (%rbx,%rbx), %r10 leaq 0x1(,%rbx,2), %r8 movq %rsp, %r13 leaq 0x10(,%rbx,2), %rcx andq $-0x10, %rcx subq %rcx, %r13 movq %r13, %rsp movq %rsp, %r15 shlq $0x5, %rbx subq %rbx, %r15 movq %r15, %rsp subq $0x8, %rsp movzbl 0x10(%rbp), %r11d movq %r12, %rdi movq %r14, %rsi movq %rax, %rdx movq %r13, %rcx movq %r15, %r9 pushq %r11 pushq -0x30(%rbp) pushq %r10 callq 0x5c68 addq $0x20, %rsp leaq 0xbfc4(%rip), %rdx # 0x121ac movq -0x38(%rbp), %rdi movq -0x40(%rbp), %rsi movq %r13, %rcx movq %r15, %r8 callq 0x103a4 leaq -0x28(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/fundamental[P]rtosc/src/cpp/ports.cpp
do_hash(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>> const&, std::vector<int, std::allocator<int>> const&, std::vector<int, std::allocator<int>> const&)
static ivec_t do_hash(const words_t &strs, const ivec_t &pos, const ivec_t &assoc) { ivec_t ivec; ivec.reserve(strs.size()); for(auto &s:strs) { int t = s.length(); for(auto p:pos) if(p < (int)s.size()) t += assoc[s[p]]; ivec.push_back(t); } return ivec; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movq %rcx, %r14 movq %rdx, %r15 movq %rsi, %r12 movq %rdi, %rbx xorps %xmm0, %xmm0 movups %xmm0, (%rdi) movq $0x0, 0x10(%rdi) movq 0x8(%rsi), %rsi subq (%r12), %rsi sarq $0x5, %rsi callq 0xa118 movq (%r12), %r13 movq 0x8(%r12), %rbp cmpq %rbp, %r13 je 0x77be leaq 0x4(%rsp), %r12 movl 0x8(%r13), %eax movl %eax, 0x4(%rsp) movq (%r15), %rdx movq 0x8(%r15), %rsi cmpq %rsi, %rdx je 0x7792 movq (%r14), %rdi movl %eax, %ecx movslq (%rdx), %r8 cmpl %eax, %r8d jge 0x7787 movq (%r13), %r9 movsbq (%r9,%r8), %r8 addl (%rdi,%r8,4), %ecx movl %ecx, 0x4(%rsp) addq $0x4, %rdx cmpq %rsi, %rdx jne 0x776e jmp 0x7794 movl %eax, %ecx movq 0x8(%rbx), %rsi cmpq 0x10(%rbx), %rsi je 0x77aa movl %ecx, (%rsi) addq $0x4, %rsi movq %rsi, 0x8(%rbx) jmp 0x77b5 movq %rbx, %rdi movq %r12, %rdx callq 0x9e56 addq $0x20, %r13 cmpq %rbp, %r13 jne 0x7755 addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq jmp 0x77cf movq %rax, %r14 movq (%rbx), %rdi testq %rdi, %rdi je 0x77e6 movq 0x10(%rbx), %rsi subq %rdi, %rsi callq 0x3250 movq %r14, %rdi callq 0x33b0
/fundamental[P]rtosc/src/cpp/ports.cpp
add_options(std::ostream&, rtosc::Port::MetaContainer)
static ostream &add_options(ostream &o, Port::MetaContainer meta) { string sym_names = "xyzabcdefghijklmnopqrstuvw"; int sym_idx = 0; bool has_options = false; for(auto m:meta) if(strstr(m.title, "map ")) has_options = true; for(auto m:meta) if(strcmp(m.title, "documentation") && strcmp(m.title, "parameter") && strcmp(m.title, "max") && strcmp(m.title, "min")) printf("m.title = <%s>\n", m.title); if(!has_options) return o; o << " <hints>\n"; for(auto m:meta) { if(strstr(m.title, "map ")) { o << " <point symbol=\"" << sym_names[sym_idx++] << "\" value=\""; o << m.title+4 << "\">" << m.value << "</point>\n"; } } o << " </hints>\n"; return o; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x48, %rsp movq %rsi, %r13 movq %rdi, %rbx leaq 0x30(%rsp), %rax movq %rax, -0x10(%rax) leaq 0xa971(%rip), %rsi # 0x125f6 leaq 0xa984(%rip), %rdx # 0x12610 leaq 0x20(%rsp), %rdi callq 0x9684 testq %r13, %r13 je 0x7cb0 cmpb $0x3a, (%r13) jne 0x7cb0 leaq 0x1(%r13), %rsi movq %rsp, %rdi callq 0x3ca0 jmp 0x7cbb movq %rsp, %rdi movq %r13, %rsi callq 0x3ca0 movq %rsp, %rdi movaps (%rdi), %xmm0 movaps %xmm0, 0x10(%rsp) xorl %esi, %esi callq 0x3ca0 movq (%rsp), %r12 movq 0x10(%rsp), %rdi xorl %ebp, %ebp cmpq %r12, %rdi je 0x7d0c leaq 0xa4cf(%rip), %r14 # 0x121b3 leaq 0x10(%rsp), %r15 movq %r14, %rsi callq 0x3090 testq %rax, %rax setne %al orb %al, %bpl movq %r15, %rdi callq 0x3cdc movq 0x10(%rsp), %rdi cmpq %r12, %rdi jne 0x7ce9 testq %r13, %r13 je 0x7d26 cmpb $0x3a, (%r13) jne 0x7d26 leaq 0x1(%r13), %rsi movq %rsp, %rdi callq 0x3ca0 jmp 0x7d31 movq %rsp, %rdi movq %r13, %rsi callq 0x3ca0 movq %r13, 0x40(%rsp) movq %rsp, %rdi movaps (%rdi), %xmm0 movaps %xmm0, 0x10(%rsp) xorl %esi, %esi callq 0x3ca0 movq (%rsp), %r13 movq 0x10(%rsp), %r14 cmpq %r13, %r14 je 0x7dcd leaq 0xa5e3(%rip), %r15 # 0x12340 leaq 0x10(%rsp), %r12 movq %r14, %rdi movq %r15, %rsi callq 0x3310 testl %eax, %eax je 0x7dbb movq %r14, %rdi leaq 0xa5bb(%rip), %rsi # 0x12336 callq 0x3310 testl %eax, %eax je 0x7dbb movq %r14, %rdi leaq 0xa652(%rip), %rsi # 0x123e0 callq 0x3310 testl %eax, %eax je 0x7dbb movq %r14, %rdi leaq 0xa63b(%rip), %rsi # 0x123dc callq 0x3310 testl %eax, %eax je 0x7dbb leaq 0xa860(%rip), %rdi # 0x12611 movq %r14, %rsi xorl %eax, %eax callq 0x3050 movq %r12, %rdi callq 0x3cdc movq 0x10(%rsp), %r14 cmpq %r13, %r14 jne 0x7d62 testb $0x1, %bpl je 0x7f45 leaq 0xa843(%rip), %rsi # 0x12621 movl $0xc, %edx movq %rbx, %rdi callq 0x32c0 movq 0x40(%rsp), %rsi testq %rsi, %rsi je 0x7e07 cmpb $0x3a, (%rsi) jne 0x7e07 incq %rsi movq %rsp, %rdi callq 0x3ca0 jmp 0x7e0f movq %rsp, %rdi callq 0x3ca0 movq %rsp, %rdi movq (%rdi), %rbp movq 0x8(%rdi), %rax movq %rbp, 0x10(%rsp) movq %rax, 0x18(%rsp) xorl %esi, %esi callq 0x3ca0 movq (%rsp), %r13 cmpq %r13, %rbp je 0x7f31 xorl %r14d, %r14d leaq 0x10(%rsp), %r15 movq 0x18(%rsp), %r12 movq %rbp, %rdi leaq 0xa365(%rip), %rsi # 0x121b3 callq 0x3090 testq %rax, %rax je 0x7f1b movl $0x15, %edx movq %rbx, %rdi leaq 0xa7c3(%rip), %rsi # 0x1262e callq 0x32c0 movslq %r14d, %rax movq 0x20(%rsp), %rcx movb (%rcx,%rax), %al movb %al, (%rsp) movl $0x1, %edx movq %rbx, %rdi movq %rsp, %rsi callq 0x32c0 movl $0x9, %edx movq %rax, %rdi leaq 0xa7a7(%rip), %rsi # 0x12644 callq 0x32c0 addq $0x4, %rbp movq %rbp, %rdi callq 0x3100 movq %rbx, %rdi movq %rbp, %rsi movq %rax, %rdx callq 0x32c0 movl $0x2, %edx movq %rbx, %rdi leaq 0xa783(%rip), %rsi # 0x1264e callq 0x32c0 testq %r12, %r12 je 0x7eed movq %r12, %rdi callq 0x3100 movq %rbx, %rdi movq %r12, %rsi movq %rax, %rdx callq 0x32c0 jmp 0x7f04 movq (%rbx), %rax movq -0x18(%rax), %rax leaq (%rbx,%rax), %rdi movl 0x20(%rbx,%rax), %esi orl $0x1, %esi callq 0x3390 movl $0x9, %edx movq %rbx, %rdi leaq 0xa73e(%rip), %rsi # 0x12651 callq 0x32c0 incl %r14d movq %r15, %rdi callq 0x3cdc movq 0x10(%rsp), %rbp cmpq %r13, %rbp jne 0x7e3f leaq 0xa723(%rip), %rsi # 0x1265b movl $0xd, %edx movq %rbx, %rdi callq 0x32c0 movq 0x20(%rsp), %rdi leaq 0x30(%rsp), %rax cmpq %rax, %rdi je 0x7f61 movq 0x30(%rsp), %rsi incq %rsi callq 0x3250 movq %rbx, %rax addq $0x48, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq jmp 0x7f81 jmp 0x7f81 jmp 0x7f81 jmp 0x7f81 jmp 0x7f81 jmp 0x7f81 jmp 0x7f81 movq %rax, %rbx movq 0x20(%rsp), %rdi leaq 0x30(%rsp), %rax cmpq %rax, %rdi je 0x7fa0 movq 0x30(%rsp), %rsi incq %rsi callq 0x3250 movq %rbx, %rdi callq 0x33b0
/fundamental[P]rtosc/src/cpp/ports.cpp
rtosc_type
char rtosc_type(const char *msg, unsigned nargument) { assert(nargument < rtosc_narguments(msg)); const char *arg = rtosc_argument_string(msg); while(1) { if(*arg == '[' || *arg == ']') ++arg; else if(!nargument || !*arg) return *arg; else ++arg, --nargument; } }
incq %rdi cmpb $0x0, (%rdi) leaq 0x1(%rdi), %rdi jne 0xffa9 cmpb $0x0, (%rdi) leaq 0x1(%rdi), %rdi je 0xffb2 jmp 0xffc6 testb %al, %al je 0xffd7 decl %esi incq %rdi movzbl (%rdi), %eax cmpl $0x5b, %eax je 0xffc3 cmpl $0x5d, %eax je 0xffc3 testl %esi, %esi jne 0xffbd retq
/fundamental[P]rtosc/src/rtosc.c
rtosc_vmessage
size_t rtosc_vmessage(char *buffer, size_t len, const char *address, const char *arguments, va_list ap) { const unsigned nargs = nreserved(arguments); if(!nargs) return rtosc_amessage(buffer,len,address,arguments,NULL); STACKALLOC(rtosc_arg_t, args, nargs); rtosc_va_list_t ap2; va_copy(ap2.a, ap); rtosc_v2args(args, nargs, arguments, &ap2); return rtosc_amessage(buffer,len,address,arguments,args); }
pushq %rbp movq %rsp, %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %r8, %r13 movq %rcx, %rbx movq %rdx, %r14 movq %rsi, %r15 movq %rdi, %r12 movq %rcx, %rdi callq 0x10364 testl %eax, %eax je 0x100e1 movq %rsp, -0x38(%rbp) movl %eax, %esi movq %r14, -0x30(%rbp) movq %rsp, %r14 movq %rsi, %rax shlq $0x4, %rax subq %rax, %r14 movq %r14, %rsp movq 0x10(%r13), %rax leaq -0x50(%rbp), %rcx movq %rax, 0x10(%rcx) movups (%r13), %xmm0 movaps %xmm0, (%rcx) movq %r14, %rdi movq %rbx, %rdx callq 0x10160 movq %r12, %rdi movq %r15, %rsi movq -0x30(%rbp), %rdx movq %rbx, %rcx movq %r14, %r8 callq 0x103a4 movq -0x38(%rbp), %rsp leaq -0x28(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %r12, %rdi movq %r15, %rsi movq %r14, %rdx movq %rbx, %rcx xorl %r8d, %r8d leaq -0x28(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp jmp 0x103a4
/fundamental[P]rtosc/src/rtosc.c
rtosc_v2args
static void rtosc_v2args(rtosc_arg_t* args, size_t nargs, const char* arg_str, rtosc_va_list_t* ap) { unsigned arg_pos = 0; uint8_t *midi_tmp; while(arg_pos < nargs) { switch(*arg_str++) { case 'h': case 't': args[arg_pos++].h = va_arg(ap->a, int64_t); break; case 'd': args[arg_pos++].d = va_arg(ap->a, double); break; case 'c': case 'i': case 'r': args[arg_pos++].i = va_arg(ap->a, int); break; case 'm': midi_tmp = va_arg(ap->a, uint8_t *); args[arg_pos].m[0] = midi_tmp[0]; args[arg_pos].m[1] = midi_tmp[1]; args[arg_pos].m[2] = midi_tmp[2]; args[arg_pos++].m[3] = midi_tmp[3]; break; case 'S': case 's': args[arg_pos++].s = va_arg(ap->a, const char *); break; case 'b': args[arg_pos].b.len = va_arg(ap->a, int); args[arg_pos].b.data = va_arg(ap->a, unsigned char *); arg_pos++; break; case 'f': args[arg_pos++].f = va_arg(ap->a, double); break; default: ; } } }
testq %rsi, %rsi je 0x10363 xorl %r9d, %r9d leaq 0x2ad5(%rip), %rax # 0x12c48 xorl %r8d, %r8d movzbl (%rdx), %r10d leal -0x62(%r10), %r11d cmpl $0x12, %r11d ja 0x101c6 movslq (%rax,%r11,4), %r10 addq %rax, %r10 jmpq *%r10 movl (%rcx), %r11d cmpq $0x28, %r11 ja 0x101a7 movq %r11, %r10 addq 0x10(%rcx), %r10 addl $0x8, %r11d movl %r11d, (%rcx) jmp 0x101b3 movq 0x8(%rcx), %r10 leaq 0x8(%r10), %r11 movq %r11, 0x8(%rcx) movl (%r10), %r10d incl %r8d shlq $0x4, %r9 movl %r10d, (%rdi,%r9) jmp 0x10354 cmpl $0x53, %r10d jne 0x10354 movl (%rcx), %r11d cmpq $0x28, %r11 ja 0x101e9 movq %r11, %r10 addq 0x10(%rcx), %r10 addl $0x8, %r11d movl %r11d, (%rcx) jmp 0x101f5 movq 0x8(%rcx), %r10 leaq 0x8(%r10), %r11 movq %r11, 0x8(%rcx) movq (%r10), %r10 incl %r8d shlq $0x4, %r9 movq %r10, (%rdi,%r9) jmp 0x10354 movl (%rcx), %r11d cmpq $0x28, %r11 ja 0x1028b movq %r11, %r10 addq 0x10(%rcx), %r10 addl $0x8, %r11d movl %r11d, (%rcx) jmp 0x10297 movl (%rcx), %r11d cmpq $0x28, %r11 ja 0x102c8 movq %r11, %r10 addq 0x10(%rcx), %r10 addl $0x8, %r11d movl %r11d, (%rcx) jmp 0x102d4 movl 0x4(%rcx), %r11d cmpq $0xa0, %r11 ja 0x10313 movq %r11, %r10 addq 0x10(%rcx), %r10 addl $0x10, %r11d movl %r11d, 0x4(%rcx) jmp 0x1031f movl 0x4(%rcx), %r11d cmpq $0xa0, %r11 ja 0x10333 movq %r11, %r10 addq 0x10(%rcx), %r10 addl $0x10, %r11d movl %r11d, 0x4(%rcx) jmp 0x1033f movq 0x8(%rcx), %r10 leaq 0x8(%r10), %r11 movq %r11, 0x8(%rcx) movq (%r10), %r10 movb (%r10), %r11b shlq $0x4, %r9 movb %r11b, (%rdi,%r9) movb 0x1(%r10), %r11b movb %r11b, 0x1(%rdi,%r9) movb 0x2(%r10), %r11b movb %r11b, 0x2(%rdi,%r9) movb 0x3(%r10), %r10b incl %r8d movb %r10b, 0x3(%rdi,%r9) jmp 0x10354 movq 0x8(%rcx), %r10 leaq 0x8(%r10), %r11 movq %r11, 0x8(%rcx) movl (%r10), %r10d shlq $0x4, %r9 movl %r10d, (%rdi,%r9) movl (%rcx), %r11d cmpq $0x28, %r11 ja 0x102f8 movq %r11, %r10 addq 0x10(%rcx), %r10 addl $0x8, %r11d movl %r11d, (%rcx) jmp 0x10304 movq 0x8(%rcx), %r10 leaq 0x8(%r10), %r11 movq %r11, 0x8(%rcx) addq %rdi, %r9 movq (%r10), %r10 movq %r10, 0x8(%r9) incl %r8d jmp 0x10354 movq 0x8(%rcx), %r10 leaq 0x8(%r10), %r11 movq %r11, 0x8(%rcx) movsd (%r10), %xmm0 incl %r8d shlq $0x4, %r9 movsd %xmm0, (%rdi,%r9) jmp 0x10354 movq 0x8(%rcx), %r10 leaq 0x8(%r10), %r11 movq %r11, 0x8(%rcx) xorps %xmm0, %xmm0 cvtsd2ss (%r10), %xmm0 incl %r8d shlq $0x4, %r9 movss %xmm0, (%rdi,%r9) incq %rdx movl %r8d, %r9d cmpq %rsi, %r9 jb 0x10176 retq
/fundamental[P]rtosc/src/rtosc.c
rtosc_amessage
size_t rtosc_amessage(char *buffer, size_t len, const char *address, const char *arguments, const rtosc_arg_t *args) { const size_t total_len = vsosc_null(address, arguments, args); if(!buffer) return total_len; //Abort if the message cannot fit if(total_len>len) { memset(buffer, 0, len); return 0; } memset(buffer, 0, total_len); unsigned pos = 0; while(*address) buffer[pos++] = *address++; //get 32 bit alignment pos += 4-pos%4; buffer[pos++] = ','; const char *arg_str = arguments; while(*arg_str) buffer[pos++] = *arg_str++; pos += 4-pos%4; unsigned toparse = nreserved(arguments); unsigned arg_pos = 0; while(toparse) { char arg = *arguments++; assert(arg); int32_t i; int64_t d; const uint8_t *m; const char *s; const unsigned char *u; rtosc_blob_t b; switch(arg) { case 'h': case 't': case 'd': d = args[arg_pos++].t; buffer[pos++] = ((d>>56) & 0xff); buffer[pos++] = ((d>>48) & 0xff); buffer[pos++] = ((d>>40) & 0xff); buffer[pos++] = ((d>>32) & 0xff); buffer[pos++] = ((d>>24) & 0xff); buffer[pos++] = ((d>>16) & 0xff); buffer[pos++] = ((d>>8) & 0xff); buffer[pos++] = (d & 0xff); --toparse; break; case 'r': case 'f': case 'c': case 'i': i = args[arg_pos++].i; buffer[pos++] = ((i>>24) & 0xff); buffer[pos++] = ((i>>16) & 0xff); buffer[pos++] = ((i>>8) & 0xff); buffer[pos++] = (i & 0xff); --toparse; break; case 'm': //TODO verify ordering of spec m = args[arg_pos++].m; buffer[pos++] = m[0]; buffer[pos++] = m[1]; buffer[pos++] = m[2]; buffer[pos++] = m[3]; --toparse; break; case 'S': case 's': s = args[arg_pos++].s; while(*s) buffer[pos++] = *s++; pos += 4-pos%4; --toparse; break; case 'b': b = args[arg_pos++].b; i = b.len; buffer[pos++] = ((i>>24) & 0xff); buffer[pos++] = ((i>>16) & 0xff); buffer[pos++] = ((i>>8) & 0xff); buffer[pos++] = (i & 0xff); u = b.data; if(u) { while(i--) buffer[pos++] = *u++; } else pos += i; if(pos%4) pos += 4-pos%4; --toparse; break; default: ; } } return pos; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %r8, 0x8(%rsp) movq %rcx, %r14 movq %rsi, 0x20(%rsp) movq %rdi, %rbp movq %rdx, 0x10(%rsp) movq %rdx, %rdi callq 0x3100 movq %rax, %r13 addl $0x4, %r13d movq %r14, %rdi callq 0x3100 andl $-0x4, %r13d leal (%rax,%r13), %r12d incl %r12d andl $-0x4, %r12d addl $0x4, %r12d movq %r14, %rdi callq 0x10364 testl %eax, %eax movq %r14, 0x18(%rsp) je 0x1047f movl %eax, %r13d xorl %ebx, %ebx leaq 0x2886(%rip), %r15 # 0x12c94 movq 0x8(%rsp), %rdx movzbl (%r14), %eax leal -0x62(%rax), %ecx cmpl $0x12, %ecx ja 0x10434 movslq (%r15,%rcx,4), %rax addq %r15, %rax jmpq *%rax incl %ebx jmp 0x10456 incl %ebx addl $0x8, %r12d jmp 0x1045a cmpl $0x53, %eax jne 0x1045d movl %ebx, %eax incl %ebx shlq $0x4, %rax movq (%rdx,%rax), %rdi callq 0x3100 movq 0x8(%rsp), %rdx addl %eax, %r12d andl $-0x4, %r12d addl $0x4, %r12d decl %r13d incq %r14 testl %r13d, %r13d jne 0x10413 jmp 0x1047f movl %ebx, %eax incl %ebx shlq $0x4, %rax movl (%rdx,%rax), %eax addl %eax, %r12d addl $0x7, %r12d andl $-0x4, %r12d jmp 0x1045a movl %r12d, %r13d testq %rbp, %rbp je 0x10728 movq 0x20(%rsp), %rdx cmpq %rdx, %r13 jbe 0x104a7 xorl %r13d, %r13d movq %rbp, %rdi xorl %esi, %esi callq 0x3150 jmp 0x10728 xorl %r12d, %r12d movq %rbp, %rdi xorl %esi, %esi movq %r13, %rdx callq 0x3150 movq 0x10(%rsp), %rdx movb (%rdx), %al testb %al, %al movq 0x18(%rsp), %r14 je 0x104e1 xorl %r12d, %r12d movl %r12d, %ecx movb %al, (%rbp,%rcx) movb 0x1(%rdx,%r12), %al incq %r12 testb %al, %al jne 0x104ca andl $-0x4, %r12d leal 0x4(%r12), %eax addl $0x5, %r12d movb $0x2c, (%rbp,%rax) movb (%r14), %al testb %al, %al je 0x1050d leaq 0x1(%r14), %rcx movl %r12d, %edx incl %r12d movb %al, (%rbp,%rdx) movb (%rcx), %al incq %rcx testb %al, %al jne 0x104fa andl $-0x4, %r12d addl $0x4, %r12d movq %r14, %rdi callq 0x10364 testl %eax, %eax je 0x10725 xorl %ecx, %ecx leaq 0x27b2(%rip), %r9 # 0x12ce0 movq 0x8(%rsp), %r11 movzbl (%r14), %esi leal -0x62(%rsi), %edi cmpl $0x12, %edi ja 0x10614 movslq (%r9,%rdi,4), %rsi addq %r9, %rsi jmpq *%rsi movl %ecx, %edx incl %ecx shlq $0x4, %rdx movl (%r11,%rdx), %ebx movl %ebx, %edx shrl $0x18, %edx leal 0x1(%r12), %esi movl %r12d, %edi movb %dl, (%rbp,%rdi) movl %ebx, %edx shrl $0x10, %edx leal 0x2(%r12), %edi movb %dl, (%rbp,%rsi) leal 0x3(%r12), %edx movb %bh, (%rbp,%rdi) addl $0x4, %r12d movb %bl, (%rbp,%rdx) jmp 0x10718 movl %ecx, %esi incl %ecx shlq $0x4, %rsi movq (%r11,%rsi), %rbx movq %rbx, %rsi shrq $0x38, %rsi leal 0x1(%r12), %edi movl %r12d, %r8d movb %sil, (%rbp,%r8) movq %rbx, %rsi shrq $0x30, %rsi leal 0x2(%r12), %r8d movb %sil, (%rbp,%rdi) movq %rbx, %rsi shrq $0x28, %rsi leal 0x3(%r12), %edi movb %sil, (%rbp,%r8) movq %rbx, %rsi shrq $0x20, %rsi leal 0x4(%r12), %r8d movb %sil, (%rbp,%rdi) movl %ebx, %esi shrl $0x18, %esi leal 0x5(%r12), %edi movb %sil, (%rbp,%r8) movl %ebx, %esi shrl $0x10, %esi leal 0x6(%r12), %edx movb %sil, (%rbp,%rdi) leal 0x7(%r12), %esi movb %bh, (%rbp,%rdx) addl $0x8, %r12d movb %bl, (%rbp,%rsi) jmp 0x10718 cmpl $0x53, %esi jne 0x1071a movl %ecx, %edx shlq $0x4, %rdx movq (%r11,%rdx), %rsi movb (%rsi), %dil testb %dil, %dil je 0x10648 incq %rsi movl %r12d, %edx incl %r12d movb %dil, (%rbp,%rdx) movb (%rsi), %dil incq %rsi testb %dil, %dil jne 0x10632 incl %ecx andl $-0x4, %r12d addl $0x4, %r12d jmp 0x10718 movl %ecx, %edx incl %ecx shlq $0x4, %rdx movb (%r11,%rdx), %sil leal 0x1(%r12), %edi movl %r12d, %r8d movb %sil, (%rbp,%r8) movb 0x1(%r11,%rdx), %sil leal 0x2(%r12), %r8d movb %sil, (%rbp,%rdi) movb 0x2(%r11,%rdx), %sil leal 0x3(%r12), %edi movb %sil, (%rbp,%r8) movb 0x3(%r11,%rdx), %dl addl $0x4, %r12d movb %dl, (%rbp,%rdi) jmp 0x10718 movl %ecx, %edx shlq $0x4, %rdx movl (%r11,%rdx), %ebx movq 0x8(%r11,%rdx), %rsi movl %ebx, %edx shrl $0x18, %edx leal 0x1(%r12), %r10d movl %r12d, %r8d movb %dl, (%rbp,%r8) movl %ebx, %edx shrl $0x10, %edx leal 0x2(%r12), %edi movb %dl, (%rbp,%r10) leal 0x3(%r12), %edx movb %bh, (%rbp,%rdi) addl $0x4, %r12d movb %bl, (%rbp,%rdx) testq %rsi, %rsi je 0x10708 testq %rbx, %rbx je 0x1070e movl %r12d, %edi xorl %r8d, %r8d leal (%rdi,%r8), %edx movb (%rsi,%r8), %r10b movb %r10b, (%rbp,%rdx) incq %r8 cmpl %r8d, %ebx jne 0x106ee addl %r8d, %r12d jmp 0x1070e addl %r12d, %ebx movl %ebx, %r12d incl %ecx addl $0x3, %r12d andl $-0x4, %r12d decl %eax incq %r14 testl %eax, %eax jne 0x10533 movl %r12d, %r13d movq %r13, %rax addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/fundamental[P]rtosc/src/rtosc.c
rtosc_itr_next
rtosc_arg_val_t rtosc_itr_next(rtosc_arg_itr_t *itr) { //current position provides the value rtosc_arg_val_t result = {0,{0}}; result.type = *itr->type_pos; if(result.type) result.val = extract_arg(itr->value_pos, result.type); //advance itr->type_pos = advance_past_dummy_args(itr->type_pos+1); char type = result.type; int size = arg_size(itr->value_pos, type); itr->value_pos += size; return result; }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx pushq %rax movq %rsi, %rbx movq %rdi, %r14 xorps %xmm0, %xmm0 movups %xmm0, (%rdi) movq $0x0, 0x10(%rdi) movq (%rsi), %r15 movb (%r15), %al movb %al, (%rdi) movsbl %al, %ebp testb %al, %al je 0x107e4 movq 0x8(%rbx), %rdi movl %ebp, %esi callq 0x1081d movq %rax, 0x8(%r14) movq %rdx, 0x10(%r14) incq %r15 movzbl (%r15), %eax cmpl $0x5d, %eax je 0x107e4 cmpl $0x5b, %eax je 0x107e4 movq %r15, (%rbx) movq 0x8(%rbx), %r15 movq %r15, %rdi movl %ebp, %esi callq 0x10933 cltq addq %r15, %rax movq %rax, 0x8(%rbx) movq %r14, %rax addq $0x8, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq
/fundamental[P]rtosc/src/rtosc.c
arg_size
static unsigned arg_size(const uint8_t *arg_mem, char type) { if(!has_reserved(type)) return 0; const uint8_t *arg_pos=arg_mem; uint32_t blob_length = 0; switch(type) { case 'h': case 't': case 'd': return 8; case 'm': case 'r': case 'f': case 'c': case 'i': return 4; case 'S': case 's': while(*++arg_pos); arg_pos += 4-(arg_pos-arg_mem)%4; return arg_pos-arg_mem; case 'b': blob_length |= (*arg_pos++ << 24); blob_length |= (*arg_pos++ << 16); blob_length |= (*arg_pos++ << 8); blob_length |= (*arg_pos++); if(blob_length%4) blob_length += 4-blob_length%4; arg_pos += blob_length; return arg_pos-arg_mem; default: assert("Invalid Type"); } return -1; }
xorl %eax, %eax leal -0x53(%rsi), %ecx cmpl $0x21, %ecx ja 0x1099a movabsq $0x3846b8001, %rdx # imm = 0x3846B8001 btq %rcx, %rdx jae 0x1099a movsbl %sil, %ecx leal -0x62(%rcx), %eax cmpl $0x12, %eax ja 0x10975 leaq 0x2418(%rip), %rcx # 0x12d78 movslq (%rcx,%rax,4), %rax addq %rcx, %rax jmpq *%rax movl $0x4, %eax retq movl $0x8, %eax retq cmpl $0x53, %ecx jne 0x10995 xorl %ecx, %ecx cmpb $0x0, 0x1(%rdi,%rcx) leaq 0x1(%rcx), %rcx jne 0x1097c leal 0x3(%rcx), %eax testq %rcx, %rcx cmovnsl %ecx, %eax andl $-0x4, %eax jmp 0x109c8 movl $0xffffffff, %eax # imm = 0xFFFFFFFF retq movzbl (%rdi), %eax shll $0x18, %eax movzbl 0x1(%rdi), %ecx shll $0x10, %ecx orl %eax, %ecx movzbl 0x2(%rdi), %edx shll $0x8, %edx orl %ecx, %edx movzbl 0x3(%rdi), %ecx orl %ecx, %edx andl $0x3, %ecx movl %edx, %eax subl %ecx, %eax addl $0x4, %eax testl %ecx, %ecx cmovel %edx, %eax addl $0x4, %eax retq
/fundamental[P]rtosc/src/rtosc.c
rtosc_argument
rtosc_arg_t rtosc_argument(const char *msg, unsigned idx) { char type = rtosc_type(msg, idx); uint8_t *arg_mem = (uint8_t*)msg + arg_off(msg, idx); return extract_arg(arg_mem, type); }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx movl %esi, %ebp movq %rdi, %rbx leaq 0x1(%rdi), %rax cmpb $0x0, (%rax) leaq 0x1(%rax), %rax jne 0x109f1 cmpb $0x0, (%rax) leaq 0x1(%rax), %rax je 0x109fa movl %ebp, %ecx movzbl (%rax), %r15d cmpl $0x5b, %r15d je 0x10a20 cmpl $0x5d, %r15d je 0x10a20 testl %ecx, %ecx je 0x10a25 testb %r15b, %r15b je 0x10a25 decl %ecx incq %rax jmp 0x10a05 leaq 0x1(%rbx), %rax cmpb $0x0, (%rax) leaq 0x1(%rax), %rax jne 0x10a29 cmpb $0x0, (%rax) leaq 0x1(%rax), %rax je 0x10a32 movl %ebp, %ecx movzbl (%rax), %edx cmpl $0x5b, %edx je 0x10a54 cmpl $0x5d, %edx je 0x10a54 testl %ecx, %ecx je 0x10a59 testb %dl, %dl je 0x10a59 decl %ecx incq %rax jmp 0x10a3d xorl %r14d, %r14d addl $-0x53, %edx cmpl $0x21, %edx ja 0x10b0d movabsq $0x3846b8001, %rax # imm = 0x3846B8001 btq %rdx, %rax jae 0x10b0d leaq 0x2(%rbx), %r12 cmpb $0x0, -0x1(%r12) leaq 0x1(%r12), %r12 jne 0x10a80 cmpb $0x0, -0x1(%r12) leaq 0x1(%r12), %r12 je 0x10a8d leaq 0x3(%r12), %r14 movl $0x1, %eax incq %rax cmpb $0x0, -0x3(%r14) leaq 0x1(%r14), %r14 jne 0x10aa4 leaq 0x3(%rax), %rcx testq %rax, %rax cmovnsq %rax, %rcx andq $-0x4, %rcx subq %rcx, %rax movzbl -0x1(%r12), %ecx cmpl $0x5b, %ecx je 0x10ad4 cmpl $0x5d, %ecx jne 0x10ad9 incq %r12 jmp 0x10ac4 subq %rax, %r14 testl %ebp, %ebp je 0x10b0a cmpb $0x5b, %cl je 0x10b01 movzbl %cl, %eax cmpl $0x5d, %eax je 0x10b01 movsbl %cl, %esi movq %r14, %rdi callq 0x10933 movl %eax, %eax addq %rax, %r14 decl %ebp je 0x10b0a movb (%r12), %cl incq %r12 jmp 0x10ae0 subl %ebx, %r14d addq %r14, %rbx movsbl %r15b, %esi movq %rbx, %rdi popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp jmp 0x1081d
/fundamental[P]rtosc/src/rtosc.c
rtosc_message_ring_length
size_t rtosc_message_ring_length(ring_t *ring) { //Check if the message is a bundle if(deref(0,ring) == '#' && deref(1,ring) == 'b' && deref(2,ring) == 'u' && deref(3,ring) == 'n' && deref(4,ring) == 'd' && deref(5,ring) == 'l' && deref(6,ring) == 'e' && deref(7,ring) == '\0') return bundle_ring_length(ring); //Proceed for normal messages //Consume path unsigned pos = 0; while(deref(pos++,ring)); pos--; //Travel through the null word end [1..4] bytes for(int i=0; i<4; ++i) if(deref(++pos, ring)) break; if(deref(pos, ring) != ',') return 0; unsigned aligned_pos = pos; int arguments = pos+1; while(deref(++pos,ring)); pos += 4-(pos-aligned_pos)%4; unsigned toparse = 0; { int arg = arguments-1; while(deref(++arg,ring)) toparse += has_reserved(deref(arg,ring)); } //Take care of varargs while(toparse) { char arg = deref(arguments++,ring); assert(arg); uint32_t i; switch(arg) { case 'h': case 't': case 'd': pos += 8; --toparse; break; case 'm': case 'r': case 'c': case 'f': case 'i': pos += 4; --toparse; break; case 'S': case 's': while(deref(++pos,ring)); pos += 4-(pos-aligned_pos)%4; --toparse; break; case 'b': i = 0; i |= (deref(pos++,ring) << 24); i |= (deref(pos++,ring) << 16); i |= (deref(pos++,ring) << 8); i |= (deref(pos++,ring)); pos += i; if((pos-aligned_pos)%4) pos += 4-(pos-aligned_pos)%4; --toparse; break; default: ; } } return pos <= (ring[0].len+ring[1].len) ? pos : 0; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq 0x8(%rdi), %rcx testq %rcx, %rcx je 0x10b54 movq (%rdi), %rdx cmpb $0x23, (%rdx) jne 0x10c81 movl $0x1, %eax cmpq $0x1, %rcx jne 0x10b83 movq 0x18(%rdi), %rdx jmp 0x10b6e movq 0x18(%rdi), %rdx testq %rdx, %rdx je 0x10c81 movq 0x10(%rdi), %rax cmpb $0x23, (%rax) jne 0x10c81 movl $0x1, %eax subq %rcx, %rax cmpq %rdx, %rax jae 0x10c81 movq 0x10(%rdi), %rdx cmpb $0x62, (%rdx,%rax) jne 0x10c81 movl $0x2, %eax movq %rdi, %rdx cmpq $0x2, %rcx ja 0x10bac subq %rcx, %rax cmpq 0x18(%rdi), %rax jae 0x10c81 leaq 0x10(%rdi), %rdx movq (%rdx), %rdx cmpb $0x75, (%rdx,%rax) jne 0x10c81 movl $0x3, %eax movq %rdi, %rdx cmpq $0x3, %rcx ja 0x10bd8 subq %rcx, %rax cmpq 0x18(%rdi), %rax jae 0x10c81 leaq 0x10(%rdi), %rdx movq (%rdx), %rdx cmpb $0x6e, (%rdx,%rax) jne 0x10c81 movl $0x4, %eax movq %rdi, %rdx cmpq $0x4, %rcx ja 0x10c04 subq %rcx, %rax cmpq 0x18(%rdi), %rax jae 0x10c81 leaq 0x10(%rdi), %rdx movq (%rdx), %rdx cmpb $0x64, (%rdx,%rax) jne 0x10c81 movl $0x5, %eax movq %rdi, %rdx cmpq $0x5, %rcx ja 0x10c28 subq %rcx, %rax cmpq 0x18(%rdi), %rax jae 0x10c81 leaq 0x10(%rdi), %rdx movq (%rdx), %rdx cmpb $0x6c, (%rdx,%rax) jne 0x10c81 movl $0x6, %eax movq %rdi, %rdx cmpq $0x6, %rcx ja 0x10c4c subq %rcx, %rax cmpq 0x18(%rdi), %rax jae 0x10c81 leaq 0x10(%rdi), %rdx movq (%rdx), %rdx cmpb $0x65, (%rdx,%rax) jne 0x10c81 movl $0x7, %eax movq %rdi, %rdx cmpq $0x7, %rcx ja 0x10c74 subq %rcx, %rax cmpq 0x18(%rdi), %rax jae 0x10fb1 leaq 0x10(%rdi), %rdx movq (%rdx), %rdx cmpb $0x0, (%rdx,%rax) je 0x10fb1 leaq 0x10(%rdi), %rax movl $0xfffffffd, %r10d # imm = 0xFFFFFFFD xorl %r9d, %r9d movl %r9d, %edx movl %r10d, %esi leal 0x3(%rsi), %r8d movq %r8, %r10 movq %rdi, %r9 subq %rcx, %r10 jb 0x10caf movq %rax, %r9 movq %r10, %r8 cmpq 0x18(%rdi), %r10 jae 0x10cc1 movq (%r9), %r11 leal 0x3(%rdx), %r9d leal 0x1(%rsi), %r10d cmpb $0x0, (%r11,%r8) jne 0x10c8e xorl %r8d, %r8d leal 0x4(%rsi), %r10d movq %r10, %r9 subq %rcx, %r9 jae 0x10ce2 movq (%rdi), %r9 cmpb $0x0, (%r9,%r10) jne 0x10d0a cmpl $0x3, %r8d jb 0x10d00 jmp 0x10d0a cmpq 0x18(%rdi), %r9 jae 0x10cfa movq (%rax), %r10 cmpb $0x0, (%r10,%r9) jne 0x10d12 cmpl $0x3, %r8d jb 0x10d00 jmp 0x10d12 cmpl $0x2, %r8d ja 0x10d12 incl %r8d addb $0x3, %dl incl %esi jmp 0x10cc4 movq %rdi, %r8 movq %r10, %r9 jmp 0x10d1f movq %rax, %r8 cmpq 0x18(%rdi), %r9 jae 0x10ddc movq (%r8), %r8 cmpb $0x2c, (%r8,%r9) jne 0x10ddc leal 0x9(%rsi), %ebx leal 0x5(%rsi), %r10d movb $0x1, %bpl movl %r10d, %r9d movl %ebx, %r8d movl %ebp, %r11d movl %r9d, %r9d movq %r9, %r15 movq %rdi, %rbx movq %r9, %r14 subq %rcx, %r15 jb 0x10d5d movq %rax, %rbx movq %r15, %r14 cmpq 0x18(%rdi), %r15 jae 0x10d72 movq (%rbx), %r15 leal 0x1(%r8), %ebx leal 0x1(%r11), %ebp incl %r9d cmpb $0x0, (%r15,%r14) jne 0x10d3a movzbl %r11b, %r9d andl $0x3, %r9d subl %r9d, %r8d xorl %r9d, %r9d movabsq $0x708d70002000, %r11 # imm = 0x708D70002000 movl $0xa04109, %ebx # imm = 0xA04109 movl %r10d, %r10d movq %r10, %r14 subq %rcx, %r14 jae 0x10da3 movq (%rdi), %r14 movb (%r14,%r10), %bpl jmp 0x10db0 cmpq 0x18(%rdi), %r14 jae 0x10de3 movq (%rax), %r15 movb (%r15,%r14), %bpl testb %bpl, %bpl je 0x10de3 movzbl %bpl, %r14d addl $-0x46, %r14d cmpl $0x2e, %r14d ja 0x10dd2 movl $0x1, %ebp btq %r14, %r11 jb 0x10dd4 btq %r14, %rbx xorl %ebp, %ebp addl %ebp, %r9d incl %r10d jmp 0x10d8f xorl %eax, %eax jmp 0x10fa6 testl %r9d, %r9d je 0x10f96 leal 0x5(%rsi), %r10d negl %esi leaq 0x1fcb(%rip), %r13 # 0x12dc4 movl %r10d, %r10d movq %r10, %rbx movq %rdi, %r11 movq %r10, %r14 subq %rcx, %rbx jb 0x10e16 movq %rax, %r11 movq %rbx, %r14 cmpq 0x18(%rdi), %rbx jae 0x10e3f movq (%r11), %r11 movzbl (%r11,%r14), %r11d leal -0x62(%r11), %ebx cmpl $0x12, %ebx ja 0x10e4c movslq (%r13,%rbx,4), %r11 addq %r13, %r11 jmpq *%r11 addl $0x4, %r8d jmp 0x10e3c addl $0x8, %r8d decl %r9d incl %r10d testl %r9d, %r9d jne 0x10df9 jmp 0x10f96 cmpl $0x53, %r11d jne 0x10e3f leal 0x5(%r8), %ebp leal (%rdx,%r8), %r12d incl %r8d movl %ebp, %ebx movl %r12d, %r14d movl %r8d, %r8d movq %r8, %r11 movq %rdi, %r12 movq %r8, %r15 subq %rcx, %r11 jb 0x10e7f movq %rax, %r12 movq %r11, %r15 cmpq 0x18(%rdi), %r11 jae 0x10e94 movq (%r12), %r11 leal 0x1(%rbx), %ebp leal 0x1(%r14), %r12d incl %r8d cmpb $0x0, (%r11,%r15) jne 0x10e5d movzbl %r14b, %r8d andl $0x3, %r8d subl %r8d, %ebx decl %r9d movl %ebx, %r8d jmp 0x10e3f movl %r8d, %r14d movq %r14, %r12 movq %rdi, %r15 subq %rcx, %r12 jb 0x10ec3 xorl %ebx, %ebx movq %rax, %r15 movq %r12, %r14 cmpq 0x18(%rdi), %r12 jae 0x10ece movq (%r15), %r11 movzbl (%r11,%r14), %ebx shll $0x18, %ebx leal 0x1(%r8), %r11d movl %r11d, %r14d movq %r14, %r12 movq %rdi, %r15 subq %rcx, %r12 jb 0x10eee xorl %ebp, %ebp movq %rax, %r15 movq %r12, %r14 cmpq 0x18(%rdi), %r12 jae 0x10ef9 movq (%r15), %r11 movzbl (%r11,%r14), %ebp shll $0x10, %ebp leal 0x2(%r8), %r11d movl %r11d, %r15d movq %r15, %r13 movq %rdi, %r12 subq %rcx, %r13 jb 0x10f1a xorl %r14d, %r14d movq %rax, %r12 movq %r13, %r15 cmpq 0x18(%rdi), %r13 jae 0x10f27 movq (%r12), %r11 movzbl (%r11,%r15), %r14d shll $0x8, %r14d leal 0x3(%r8), %r11d movl %r11d, %r11d movq %r11, %r12 movq %rdi, %r13 subq %rcx, %r12 jb 0x10f48 xorl %r15d, %r15d movq %rax, %r13 movq %r12, %r11 cmpq 0x18(%rdi), %r12 jae 0x10f51 movq (%r13), %r15 movzbl (%r15,%r11), %r15d movl %ebp, %r11d orl %ebx, %r11d orl %r14d, %r11d leal 0x4(%r8), %r12d orl %r15d, %r11d addl %r12d, %r11d addl %r15d, %r8d addl %ebx, %ebp addl %r8d, %ebp addl %esi, %r14d addl %ebp, %r14d andl $0x3, %r14d movl %r11d, %r8d subl %r14d, %r8d addl $0x4, %r8d testl %r14d, %r14d cmovel %r11d, %r8d decl %r9d leaq 0x1e33(%rip), %r13 # 0x12dc4 jmp 0x10e3f movl %r8d, %edx addq 0x18(%rdi), %rcx xorl %eax, %eax cmpq %rdx, %rcx cmovaeq %rdx, %rax popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp jmp 0x10fbd
/fundamental[P]rtosc/src/rtosc.c
bundle_ring_length
static size_t bundle_ring_length(ring_t *ring) { unsigned pos = 8+8;//goto first length field uint32_t advance = 0; do { advance = deref(pos+0, ring) << (8*3) | deref(pos+1, ring) << (8*2) | deref(pos+2, ring) << (8*1) | deref(pos+3, ring) << (8*0); if(advance) pos += 4+advance; } while(advance); return pos <= (ring[0].len+ring[1].len) ? pos : 0; }
pushq %r14 pushq %rbx movq 0x8(%rdi), %rcx leaq 0x10(%rdi), %rax movl $0x10, %esi movl %esi, %edx movq %rdx, %r10 movq %rdi, %r8 movq %rdx, %r9 subq %rcx, %r10 jb 0x10feb xorl %esi, %esi movq %rax, %r8 movq %r10, %r9 cmpq 0x18(%rdi), %r10 jae 0x10ff6 movq (%r8), %rsi movzbl (%rsi,%r9), %esi shll $0x18, %esi leal 0x1(%rdx), %r9d movq %r9, %r11 movq %rdi, %r10 subq %rcx, %r11 jb 0x11014 xorl %r8d, %r8d movq %rax, %r10 movq %r11, %r9 cmpq 0x18(%rdi), %r11 jae 0x11020 movq (%r10), %r8 movzbl (%r8,%r9), %r8d shll $0x10, %r8d leal 0x2(%rdx), %r10d movq %r10, %rbx movq %rdi, %r11 subq %rcx, %rbx jb 0x1103e xorl %r9d, %r9d movq %rax, %r11 movq %rbx, %r10 cmpq 0x18(%rdi), %rbx jae 0x1104a movq (%r11), %r9 movzbl (%r9,%r10), %r9d shll $0x8, %r9d leal 0x3(%rdx), %r10d movq %r10, %r14 movq %rdi, %rbx subq %rcx, %r14 jb 0x11068 xorl %r11d, %r11d movq %rax, %rbx movq %r14, %r10 cmpq 0x18(%rdi), %r14 jae 0x11070 movq (%rbx), %r11 movzbl (%r11,%r10), %r11d orl %esi, %r8d orl %r9d, %r8d orl %r11d, %r8d leal 0x4(%rdx,%r8), %esi jne 0x10fcd addq 0x18(%rdi), %rcx xorl %eax, %eax cmpq %rdx, %rcx cmovaeq %rdx, %rax popq %rbx popq %r14 retq
/fundamental[P]rtosc/src/rtosc.c
rtosc_valid_message_p
bool rtosc_valid_message_p(const char *msg, size_t len) { //Validate Path Characters (assumes printable characters are sufficient) if(*msg != '/') return false; const char *tmp = msg; for(unsigned i=0; i<len; ++i) { if(*tmp == 0) break; if(!isprint(*tmp)) return false; tmp++; } //tmp is now either pointing to a null or the end of the string const size_t offset1 = tmp-msg; size_t offset2 = tmp-msg; for(; offset2<len; offset2++) { if(*tmp == ',') break; tmp++; } //Too many NULL bytes if(offset2-offset1 > 4) return false; if((offset2 % 4) != 0) return false; size_t observed_length = rtosc_message_length(msg, len); return observed_length == len; }
cmpb $0x2f, (%rdi) jne 0x11161 pushq %r14 pushq %rbx subq $0x28, %rsp movq %rsi, %rbx movq %rdi, %r14 movq %rdi, %rax testq %rsi, %rsi je 0x11108 callq 0x3290 movq (%rax), %rcx movb $0x2f, %dl movl $0x1, %esi movsbq %dl, %rdx testb $0x40, 0x1(%rcx,%rdx,2) je 0x11164 movq %rsi, %rax movl %eax, %edx cmpq %rbx, %rdx jae 0x11105 movb (%r14,%rax), %dl leaq 0x1(%rax), %rsi testb %dl, %dl jne 0x110e4 addq %r14, %rax movq %rax, %rcx subq %r14, %rcx movq %rcx, %rdx cmpq %rbx, %rcx jae 0x1112c movq %rcx, %rdx cmpb $0x2c, (%rax) je 0x1112c incq %rax incq %rdx cmpq %rbx, %rdx jb 0x11119 movq %rbx, %rdx movq %rdx, %rax subq %rcx, %rax cmpq $0x5, %rax setae %al testb $0x3, %dl setne %cl orb %al, %cl jne 0x11164 movq %rsp, %rdi movq %r14, (%rdi) movq %rbx, 0x8(%rdi) xorps %xmm0, %xmm0 movaps %xmm0, 0x10(%rdi) callq 0x10b24 cmpq %rbx, %rax sete %al jmp 0x11166 xorl %eax, %eax retq xorl %eax, %eax addq $0x28, %rsp popq %rbx popq %r14 retq
/fundamental[P]rtosc/src/rtosc.c
ncnn::copy_cut_border(ncnn::Mat const&, ncnn::Mat&, int, int, int, int, ncnn::Option const&)
void copy_cut_border(const Mat& src, Mat& dst, int top, int bottom, int left, int right, const Option& opt) { if (left + right > src.w || top + bottom > src.h) { NCNN_LOGE("copy_cut_border parameter error, top: %d, bottom: %d, left: %d, right: %d, src.w: %d, src.h: %d", top, bottom, left, right, src.w, src.h); return; } Layer* crop = create_layer(LayerType::Crop); ParamDict pd; pd.set(0, left); pd.set(1, top); pd.set(2, 0); pd.set(3, src.w - left - right); pd.set(4, src.h - top - bottom); pd.set(5, -233); crop->load_param(pd); crop->create_pipeline(opt); crop->forward(src, dst, opt); crop->destroy_pipeline(opt); delete crop; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movl %r8d, %ebp movl %edx, %r13d movq %rdi, %r14 leal (%r9,%rbp), %edx movl 0x2c(%rdi), %r10d cmpl %r10d, %edx jg 0x2c4fb leal (%rcx,%r13), %eax movl 0x30(%r14), %r11d cmpl %r11d, %eax jg 0x2c4ff movl %edx, 0x1c(%rsp) movl %eax, 0x18(%rsp) movq %rsi, 0x20(%rsp) movq 0x60(%rsp), %rbx movl $0x7, %edi callq 0x42721 movq %rax, %r15 leaq 0x8(%rsp), %r12 movq %r12, %rdi callq 0x39dcc movq %r12, %rdi xorl %esi, %esi movl %ebp, %edx callq 0x3a32e leaq 0x8(%rsp), %rdi movl $0x1, %esi movl %r13d, %edx callq 0x3a32e leaq 0x8(%rsp), %rdi movl $0x2, %esi xorl %edx, %edx callq 0x3a32e movl 0x2c(%r14), %edx subl 0x1c(%rsp), %edx leaq 0x8(%rsp), %rdi movl $0x3, %esi callq 0x3a32e movl 0x30(%r14), %edx subl 0x18(%rsp), %edx leaq 0x8(%rsp), %rdi movl $0x4, %esi callq 0x3a32e leaq 0x8(%rsp), %rdi movl $0x5, %esi movl $0xffffff17, %edx # imm = 0xFFFFFF17 callq 0x3a32e movq (%r15), %rax leaq 0x8(%rsp), %rsi movq %r15, %rdi callq *0x10(%rax) movq (%r15), %rax movq %r15, %rdi movq %rbx, %rsi callq *0x20(%rax) movq (%r15), %rax movq %r15, %rdi movq %r14, %rsi movq 0x20(%rsp), %rdx movq %rbx, %rcx callq *0x38(%rax) movq (%r15), %rax movq %r15, %rdi movq %rbx, %rsi callq *0x28(%rax) movq (%r15), %rax movq %r15, %rdi callq *0x8(%rax) leaq 0x8(%rsp), %rdi callq 0x39ed4 addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movl 0x30(%r14), %r11d movq 0x4deada(%rip), %rbx # 0x50afe0 movq (%rbx), %rdi leaq 0x478e44(%rip), %rsi # 0x4a5354 movl %r13d, %edx movl %ebp, %r8d xorl %eax, %eax pushq %r11 pushq %r10 callq 0x24180 addq $0x10, %rsp movq (%rbx), %rsi movl $0xa, %edi addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp jmp 0x24260 movq %rax, %rbx leaq 0x8(%rsp), %rdi callq 0x39ed4 movq %rbx, %rdi callq 0x243e0
/Tencent[P]ncnn/src/mat.cpp
ncnn::Convolution_x86_fma::forward_int8_x86(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
int Convolution_x86_fma::forward_int8_x86(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const { int elembits = bottom_blob.elembits(); Mat bottom_blob_int8 = bottom_blob; if (elembits != 8) { Option opt_q = opt; opt_q.blob_allocator = opt.workspace_allocator; quantize_to_int8(bottom_blob, bottom_blob_int8, bottom_blob_int8_scales, opt_q); if (bottom_blob_int8.empty()) return -100; } // NCNN_LOGE("Convolution_x86_fma input %d x %d ksize=%d %d stride=%d %d", w, h, kernel_w, kernel_h, stride_w, stride_h); Mat bottom_blob_bordered; make_padding(bottom_blob_int8, bottom_blob_bordered, opt); if (bottom_blob_bordered.empty()) return -100; int w = bottom_blob_bordered.w; int h = bottom_blob_bordered.h; int channels = bottom_blob_bordered.c; int elempack = bottom_blob_bordered.elempack; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; int outw = (w - kernel_extent_w) / stride_w + 1; int outh = (h - kernel_extent_h) / stride_h + 1; bool use_int8_requantize = int8_scale_term > 100; int out_elempack = 1; #if __SSE2__ if (opt.use_packing_layout) { if (use_int8_requantize) out_elempack = num_output % 8 == 0 ? 8 : 1; else { #if __AVX512F__ out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1; #elif __AVX__ out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1; #else out_elempack = num_output % 4 == 0 ? 4 : 1; #endif } } #endif // __SSE2__ size_t out_elemsize = use_int8_requantize ? 1u * out_elempack : 4u * out_elempack; // NCNN_LOGE("forward_int8_x86 %d %d %d %d %d", w, h, bottom_blob_bordered.c, elempack, out_elempack); top_blob.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator); if (top_blob.empty()) return -100; const int num_input = channels * elempack; int out_elempack_int32 = 1; #if __SSE2__ if (opt.use_packing_layout) { if (use_int8_requantize) { #if __AVX__ out_elempack_int32 = num_output % 8 == 0 ? 8 : 1; #else out_elempack_int32 = num_output % 4 == 0 ? 4 : 1; #endif } else { #if __AVX512F__ out_elempack_int32 = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1; #elif __AVX__ out_elempack_int32 = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1; #else out_elempack_int32 = num_output % 4 == 0 ? 4 : 1; #endif } } #endif // __SSE2__ bool prefer_winograd = (opt.use_winograd23_convolution || opt.use_winograd43_convolution) && (num_input > 8 || num_output > 8); #if __SSE2__ if (opt.use_packing_layout) { if ((opt.use_winograd_convolution && prefer_winograd && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1) || (!opt.use_sgemm_convolution)) { // TODO implement winograd and packed int8 avx pack8 output out_elempack_int32 = num_output % 4 == 0 ? 4 : 1; } } #endif // __SSE2__ Mat top_blob_int32; top_blob_int32.create(outw, outh, num_output / out_elempack_int32, (size_t)(4u * out_elempack_int32), out_elempack_int32, opt.workspace_allocator); if (top_blob_int32.empty()) return -100; int _nT = nT ? nT : opt.num_threads; if (nT != 0 && opt.num_threads != nT) { // force num_threads the same as in create_pipeline // so we could use pre-packed A/B from the same tile config NCNN_LOGE("opt.num_threads %d changed, convolution gemm will use load-time value %d", opt.num_threads, nT); } int ret = 0; if (opt.use_winograd_convolution && prefer_winograd && kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1) { if (opt.use_winograd43_convolution && !weight_winograd43_data.empty()) ret = conv3x3s1_winograd43_int8(bottom_blob_bordered, top_blob_int32, weight_winograd43_data, _nT, opt); else ret = conv3x3s1_winograd23_int8(bottom_blob_bordered, top_blob_int32, weight_winograd23_data, _nT, opt); } else if (opt.use_sgemm_convolution) { ret = convolution_im2col_gemm_int8(bottom_blob_bordered, top_blob_int32, weight_sgemm_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, _nT, opt); } else { convolution_packed_int8(bottom_blob_bordered, top_blob_int32, weight_data_tm, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt); } if (ret != 0) return ret; #if __SSE2__ if (opt.use_packing_layout) { // NCNN_LOGE("top_blob_int32 %d %d", top_blob_int32.c, top_blob_int32.elempack); if (use_int8_requantize) { // TODO implement winograd and packed int8 pack1 output if (top_blob_int32.elempack == 4 && top_blob_int32.c % 2 == 1) { Mat tmp; convert_packing(top_blob_int32, tmp, 1, opt); top_blob_int32 = tmp; } if (top_blob_int32.elempack == 4 && top_blob_int32.c % 2 == 0) { Mat tmp; convert_packing(top_blob_int32, tmp, 8, opt); top_blob_int32 = tmp; } } else { #if __AVX__ // TODO implement winograd and packed int8 avx pack8 output if (top_blob_int32.elempack == 4 && top_blob_int32.c % 2 == 0) { Mat tmp; convert_packing(top_blob_int32, tmp, 8, opt); top_blob_int32 = tmp; } #endif // __AVX__ } } #endif if (use_int8_requantize) { requantize_from_int32_to_int8(top_blob_int32, top_blob, scale_in_data, top_blob_int8_scales, bias_data, activation_type, activation_params, opt); } else { dequantize_from_int32(top_blob_int32, top_blob, scale_in_data, bias_data, opt); if (activation) { activation->forward_inplace(top_blob, opt); } } return 0; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x488, %rsp # imm = 0x488 movq %rcx, %rbx movq %rdx, %r13 movq %rdi, %r14 movl 0x18(%rsi), %ecx movq 0x10(%rsi), %rdi testl %ecx, %ecx je 0xd31c5 leal (,%rdi,8), %eax cltd idivl %ecx cmpl $0x8, %eax sete %al jmp 0xd31c7 xorl %eax, %eax movq 0x8(%rsi), %rdx vmovups (%rsi), %xmm0 vmovaps %xmm0, 0x410(%rsp) movq %rdi, 0x420(%rsp) movl %ecx, 0x428(%rsp) movq 0x20(%rsi), %rcx movq %rcx, 0x430(%rsp) vmovdqu 0x28(%rsi), %xmm0 vmovdqu %xmm0, 0x438(%rsp) movl 0x38(%rsi), %ecx movl %ecx, 0x448(%rsp) movq 0x40(%rsi), %rcx movq %rcx, 0x450(%rsp) testq %rdx, %rdx je 0xd321f lock incl (%rdx) testb %al, %al jne 0xd328b vmovdqu (%rbx), %ymm0 vmovdqu 0x20(%rbx), %ymm1 leaq 0x200(%rsp), %rcx vmovdqu %ymm1, 0x20(%rcx) vmovdqu %ymm0, (%rcx) movq 0x10(%rbx), %rax movq %rax, 0x8(%rcx) leaq 0x238(%r14), %rdx leaq 0x410(%rsp), %rax movq %rsi, %rdi movq %rax, %rsi vzeroupper callq 0x2cdc4 cmpq $0x0, 0x410(%rsp) je 0xd3391 movslq 0x448(%rsp), %rax imulq 0x450(%rsp), %rax testq %rax, %rax je 0xd3391 leaq 0xf0(%rsp), %rdx movq $0x0, 0x40(%rdx) vpxor %xmm0, %xmm0, %xmm0 vmovdqa %xmm0, (%rdx) vmovdqu %xmm0, 0xc(%rdx) vmovdqa %xmm0, 0x20(%rdx) vmovdqu %xmm0, 0x2c(%rdx) leaq 0x410(%rsp), %rsi movq %r14, %rdi movq %rbx, 0x178(%rsp) movq %rbx, %rcx callq 0x504a0 movl $0xffffff9c, %ebx # imm = 0xFFFFFF9C cmpq $0x0, 0xf0(%rsp) je 0xdb477 movq %r14, 0xd0(%rsp) movslq 0x128(%rsp), %r15 movq 0x130(%rsp), %rax imulq %r15, %rax testq %rax, %rax je 0xdb477 movq 0xd0(%rsp), %rdi movl 0xd0(%rdi), %ecx movl 0xd4(%rdi), %eax decl %eax imull 0xdc(%rdi), %eax notl %eax movl 0xd8(%rdi), %esi decl %esi imull 0xe0(%rdi), %esi notl %esi addl 0x11c(%rsp), %eax cltd idivl 0xe4(%rdi) movl %eax, %ebp incl %ebp addl 0x120(%rsp), %esi movl %esi, %eax cltd idivl 0xe8(%rdi) movl %eax, %r14d incl %r14d movl 0x108(%rdi), %edx movl $0x1, %r9d movq 0x178(%rsp), %rax cmpb $0x1, 0x27(%rax) jne 0xd33b4 cmpl $0x65, %edx jl 0xd339b testb $0x7, %cl movl $0x8, %eax movl $0x1, %r9d cmovel %eax, %r9d jmp 0xd33b4 movl $0xffffff9c, %ebx # imm = 0xFFFFFF9C jmp 0xdb4b9 xorl %eax, %eax testb $0x3, %cl sete %al testb $0x7, %cl leal 0x1(%rax,%rax,2), %eax movl $0x8, %r9d cmovnel %eax, %r9d movl 0x108(%rsp), %r12d leal (,%r9,4), %r8d movl %edx, 0x28c(%rsp) cmpl $0x65, %edx cmovgel %r9d, %r8d movl %ecx, %eax cltd idivl %r9d subq $0x8, %rsp movq %r13, %rdi movl %ebp, %esi movl %r14d, %edx movl %eax, %ecx movq 0x180(%rsp), %rax pushq 0x8(%rax) callq 0x2a094 addq $0x10, %rsp cmpq $0x0, (%r13) je 0xdb477 movslq 0x38(%r13), %rax imulq 0x40(%r13), %rax testq %rax, %rax je 0xdb477 movq 0x178(%rsp), %r10 movb 0x27(%r10), %al movl $0x1, %r9d cmpb $0x1, %al jne 0xd3472 movq 0xd0(%rsp), %rcx movl 0xd0(%rcx), %ecx cmpl $0x65, 0x28c(%rsp) jl 0xd3459 testb $0x7, %cl movl $0x8, %ecx movl $0x1, %r9d cmovel %ecx, %r9d jmp 0xd3472 xorl %edx, %edx testb $0x3, %cl sete %dl testb $0x7, %cl leal 0x1(%rdx,%rdx,2), %ecx movl $0x8, %r9d cmovnel %ecx, %r9d cmpb $0x0, 0x37(%r10) jne 0xd3480 cmpb $0x1, 0x38(%r10) jne 0xd34a2 imull %r15d, %r12d movb $0x1, %r15b cmpl $0x8, %r12d jg 0xd34a5 movq 0xd0(%rsp), %rcx cmpl $0x9, 0xd0(%rcx) setge %r15b jmp 0xd34a5 xorl %r15d, %r15d testb %al, %al je 0xd3506 movq 0xd0(%rsp), %rsi vmovdqu 0xd4(%rsi), %xmm0 cmpl $0x1, 0xe4(%rsi) sete %al cmpl $0x1, 0xe8(%rsi) sete %cl vpxor 0x3d406b(%rip), %xmm0, %xmm0 # 0x4a7540 vptest %xmm0, %xmm0 sete %dl andb 0x1c(%r10), %al andb %cl, %al andb %dl, %al testb %r15b, %al jne 0xd34f1 cmpb $0x0, 0x1d(%r10) jne 0xd350e xorl %eax, %eax testb $0x3, 0xd0(%rsi) sete %al leal (%rax,%rax,2), %r9d incl %r9d jmp 0xd350e movq 0xd0(%rsp), %rsi movq %r13, 0x338(%rsp) leaq 0x1a0(%rsp), %rdi movq $0x0, 0x40(%rdi) vpxor %xmm0, %xmm0, %xmm0 vmovdqa %xmm0, (%rdi) vmovdqu %xmm0, 0xc(%rdi) vmovdqa %xmm0, 0x20(%rdi) vmovdqu %xmm0, 0x2c(%rdi) movl 0xd0(%rsi), %eax cltd idivl %r9d leal (,%r9,4), %r8d subq $0x8, %rsp movl %ebp, %esi movl %r14d, %edx movl %eax, %ecx pushq 0x10(%r10) callq 0x2a094 addq $0x10, %rsp movl $0xffffff9c, %ebx # imm = 0xFFFFFF9C cmpq $0x0, 0x1a0(%rsp) je 0xdb435 movslq 0x1d8(%rsp), %rax imulq 0x1e0(%rsp), %rax testq %rax, %rax je 0xdb435 movq 0xd0(%rsp), %r14 movl 0x2d0(%r14), %esi testl %esi, %esi movq 0x178(%rsp), %rbx movl 0x4(%rbx), %edi movl %esi, %r12d cmovel %edi, %r12d sete %al cmpl %esi, %edi sete %cl orb %al, %cl je 0xd6cf7 vmovdqu 0xd4(%r14), %xmm1 movl 0xe4(%r14), %eax movq %rax, 0x58(%rsp) cmpl $0x1, %eax sete %al movl 0xe8(%r14), %ecx movq %rcx, 0x28(%rsp) cmpl $0x1, %ecx sete %cl vpxor 0x3d3f43(%rip), %xmm1, %xmm0 # 0x4a7540 vptest %xmm0, %xmm0 sete %dl andb 0x1c(%rbx), %al andb %cl, %al andb %r15b, %al andb %dl, %al cmpb $0x1, %al jne 0xd3672 cmpb $0x1, 0x38(%rbx) movq 0xd0(%rsp), %rcx jne 0xd57f0 cmpq $0x0, 0x3b0(%rcx) je 0xd57f0 movslq 0x3e8(%rcx), %rax imulq 0x3f0(%rcx), %rax testq %rax, %rax je 0xd57f0 leaq 0x3b0(%rcx), %rdx leaq 0xf0(%rsp), %rdi leaq 0x1a0(%rsp), %rsi movl %r12d, %ecx movq %rbx, %r8 callq 0xec1d4 jmp 0xd6d31 vmovd %xmm1, %ebp cmpb $0x1, 0x1d(%rbx) vmovdqa %xmm1, 0xc0(%rsp) jne 0xd5837 vpextrd $0x1, %xmm1, %ecx movq %r12, %rax movq %rcx, 0x40(%rsp) movl %ecx, %r13d imull %ebp, %r13d movl 0x1b8(%rsp), %ebx movl 0x1d0(%rsp), %r12d imull 0x1d8(%rsp), %ebx imull 0x1cc(%rsp), %r12d movl 0x128(%rsp), %r14d imull %r13d, %r14d imull 0x108(%rsp), %r14d subq $0x8, %rsp leaq 0x18c(%rsp), %rcx leaq 0x154(%rsp), %r8 leaq 0x204(%rsp), %r9 movl %ebx, %edi movl %r12d, %esi movl %r14d, %edx movq %rax, 0x148(%rsp) pushq %rax callq 0xf4d09 addq $0x10, %rsp movl 0x184(%rsp), %ecx leal (%rbx,%rcx), %eax decl %eax cltd movq %rcx, (%rsp) idivl %ecx movl %eax, 0xe0(%rsp) movl 0x14c(%rsp), %r15d leal (%r12,%r15), %eax decl %eax cltd idivl %r15d movl %eax, %ecx movl 0x1fc(%rsp), %esi leal (%r14,%rsi), %eax decl %eax cltd idivl %esi movl %eax, %edx imull %r15d, %esi movq 0x178(%rsp), %rax movq 0x10(%rax), %r9 leaq 0x200(%rsp), %rdi movq $0x0, 0x40(%rdi) vpxor %xmm0, %xmm0, %xmm0 vmovdqa %xmm0, (%rdi) vmovdqu %xmm0, 0xc(%rdi) vmovdqa %xmm0, 0x20(%rdi) vmovdqu %xmm0, 0x2c(%rdi) movl $0x1, %r8d movl %edx, 0x8(%rsp) movl %ecx, 0xa0(%rsp) callq 0x2b0d2 movq %r14, 0xb8(%rsp) movq %r12, 0xb0(%rsp) movq %rbx, 0xe8(%rsp) movl $0xffffff9c, %ebx # imm = 0xFFFFFF9C cmpq $0x0, 0x200(%rsp) je 0xdae62 movslq 0x238(%rsp), %rax imulq 0x240(%rsp), %rax testq %rax, %rax je 0xdae62 movl 0xa0(%rsp), %eax imull 0x8(%rsp), %eax movl %eax, 0xa0(%rsp) testl %eax, %eax movq (%rsp), %rsi jle 0xd5968 cmpl $0x1, 0x28(%rsp) sete %al movq 0x58(%rsp), %r9 cmpl $0x1, %r9d sete %cl vmovdqa 0xc0(%rsp), %xmm1 vpcmpeqd 0x3d3d20(%rip), %xmm1, %xmm0 # 0x4a7540 vmovmskps %xmm0, %edx movl %edx, %esi shrb $0x3, %sil shrb $0x2, %dl movl %ebp, %edi xorl $0x1, %edi movq 0x40(%rsp), %r8 xorl $0x1, %r8d orl %edi, %r8d sete %dil andb %sil, %dl andb %al, %dil andb %cl, %dil andb %dl, %dil movb %dil, 0xd8(%rsp) leal -0x1(%rbp), %eax vpextrd $0x2, %xmm1, %ebx imull %ebx, %eax notl %eax movq %rax, 0x1e8(%rsp) vmovd %r9d, %xmm0 vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0] vmovdqa %xmm0, 0x150(%rsp) vpextrd $0x3, %xmm1, %eax movq %rax, 0x38(%rsp) xorl %eax, %eax movq 0xb0(%rsp), %rdi movq 0xb8(%rsp), %r8 movq %rbp, 0x60(%rsp) movl %r13d, 0x68(%rsp) movq %rbx, 0x70(%rsp) movq %rax, 0x48(%rsp) cltd idivl 0x8(%rsp) movl 0x14c(%rsp), %ecx movl %ecx, %r9d imull %eax, %r9d movl 0x1fc(%rsp), %esi movl %esi, %r14d imull %edx, %r14d movl %edi, %r12d movq %r9, 0x30(%rsp) subl %r9d, %r12d cmpl %r12d, %ecx cmovll %ecx, %r12d subl %r14d, %r8d cmpl %r8d, %esi cmovll %esi, %r8d movq %r8, (%rsp) cltq imulq 0x240(%rsp), %rax movq 0x210(%rsp), %rcx imulq %rcx, %rax addq 0x200(%rsp), %rax movslq 0x22c(%rsp), %rsi movl 0x218(%rsp), %edi movq 0x220(%rsp), %r8 imulq %rsi, %rdx imulq %rcx, %rdx addq %rax, %rdx movq %rdx, 0x290(%rsp) movq $0x0, 0x298(%rsp) movq %rcx, 0x2a0(%rsp) movl %edi, 0x2a8(%rsp) movq %r8, 0x2b0(%rsp) movl $0x2, 0x2b8(%rsp) movl %esi, 0x2bc(%rsp) movabsq $0x100000001, %rax # imm = 0x100000001 movq %rax, 0x2c0(%rsp) movl $0x1, 0x2c8(%rsp) movq %rsi, 0x2d0(%rsp) callq 0x3c859 testl %eax, %eax je 0xd39dc leaq 0xf0(%rsp), %rdi leaq 0x290(%rsp), %rsi movq 0x30(%rsp), %rdx movl %r12d, %ecx movl %r14d, %r8d movq (%rsp), %r9 pushq 0x28(%rsp) pushq 0x60(%rsp) pushq 0x48(%rsp) pushq %rbx pushq 0x60(%rsp) pushq %rbp callq 0x151518 addq $0x30, %rsp jmp 0xd3a68 callq 0x3c84b testl %eax, %eax je 0xd3a21 leaq 0xf0(%rsp), %rdi leaq 0x290(%rsp), %rsi movq 0x30(%rsp), %rdx movl %r12d, %ecx movl %r14d, %r8d movq (%rsp), %r9 pushq 0x28(%rsp) pushq 0x60(%rsp) pushq 0x48(%rsp) pushq %rbx pushq 0x60(%rsp) pushq %rbp callq 0x14720e addq $0x30, %rsp jmp 0xd3a68 callq 0x3c83d testl %eax, %eax je 0xd3acd leaq 0xf0(%rsp), %rdi leaq 0x290(%rsp), %rsi movq 0x30(%rsp), %rdx movl %r12d, %ecx movl %r14d, %r8d movq (%rsp), %r9 pushq 0x28(%rsp) pushq 0x60(%rsp) pushq 0x48(%rsp) pushq %rbx pushq 0x60(%rsp) pushq %rbp callq 0x159e94 addq $0x30, %rsp movq 0x298(%rsp), %rax testq %rax, %rax je 0xd3aa4 lock decl (%rax) jne 0xd3aa4 movq 0x290(%rsp), %rsi movq 0x2b0(%rsp), %rdi testq %rdi, %rdi je 0xd3a97 movq (%rdi), %rax callq *0x18(%rax) jmp 0xd3aa4 testq %rsi, %rsi je 0xd3aa4 movq %rsi, %rdi callq 0x244a0 movq 0x48(%rsp), %rax incl %eax cmpl 0xa0(%rsp), %eax movq 0xb0(%rsp), %rdi movq 0xb8(%rsp), %r8 jne 0xd38ab jmp 0xd5959 movq %r14, 0xc0(%rsp) movq 0x290(%rsp), %r11 movq 0x130(%rsp), %r10 cmpb $0x0, 0xd8(%rsp) movq %r10, 0x10(%rsp) movl %r12d, 0x20(%rsp) je 0xd434f movl 0x108(%rsp), %r15d xorl %edi, %edi movq 0xc0(%rsp), %rcx movslq %ecx, %rax movq %rax, 0x78(%rsp) movslq 0x30(%rsp), %rsi cmpl $0x8, %r12d movl %r15d, 0x18(%rsp) movq %rsi, 0x170(%rsp) jl 0xd3d2c leal 0x7(%rcx), %eax testl %ecx, %ecx cmovnsl %ecx, %eax sarl $0x3, %eax cltq movq %rax, 0x90(%rsp) movq (%rsp), %rcx leal 0x7(%rcx), %eax testl %ecx, %ecx cmovnsl %ecx, %eax sarl $0x3, %eax movl %eax, 0x88(%rsp) leal (,%r10,8), %eax movslq %eax, %rdx movslq %r10d, %rdi leal (%r10,%r10), %eax movslq %eax, %r8 movl %ecx, %eax andl $-0x2, %eax movl %eax, 0x98(%rsp) movl %r12d, %eax movq %rax, 0x30(%rsp) leaq (%rsi,%rdi), %r14 movq %rsi, %r15 movq %rdi, %rsi xorl %edi, %edi movq %rsi, 0x80(%rsp) movq %rdi, %rax cmpl $0x8, 0x18(%rsp) jne 0xd3c5b cmpl $0x8, (%rsp) jl 0xd3d0c movq 0x130(%rsp), %rcx imulq 0x90(%rsp), %rcx imulq 0x100(%rsp), %rcx addq 0xf0(%rsp), %rcx movq 0x170(%rsp), %rsi leaq (%rax,%rsi), %rdi movq 0x80(%rsp), %rsi leaq (%rcx,%rdi,8), %rdi movl 0x88(%rsp), %r9d vmovdqa (%rdi), %xmm0 vmovdqa 0x10(%rdi), %xmm1 vmovdqa 0x20(%rdi), %xmm2 vmovdqa 0x30(%rdi), %xmm3 vpunpcklwd %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] vpunpckhwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] vpunpcklwd %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] vpunpckhwd %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] vpunpcklwd %xmm0, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3] vpunpckhwd %xmm0, %xmm4, %xmm0 # xmm0 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] vpunpcklwd %xmm2, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] vpunpckhwd %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] vpunpcklqdq %xmm4, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm4[0] vpunpckhqdq %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[1],xmm4[1] vpunpcklqdq %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0] vpunpckhqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1] vmovdqa %xmm2, (%r11) vmovdqa %xmm3, 0x10(%r11) vmovdqa %xmm4, 0x20(%r11) vmovdqa %xmm0, 0x30(%r11) addq $0x40, %r11 addq %rdx, %rdi decl %r9d jne 0xd3bf5 cmpl $0x1, 0x18(%rsp) jne 0xd3d0c movq 0xf0(%rsp), %rdi movq 0x130(%rsp), %r9 imulq 0x78(%rsp), %r9 imulq 0x100(%rsp), %r9 cmpl $0x2, (%rsp) jl 0xd3ce1 leaq (%rdi,%r15), %rcx addq %r14, %rdi movl $0x1, %r10d movq (%rsp), %rsi vmovq (%rcx,%r9), %xmm0 vmovq (%rdi,%r9), %xmm1 vpunpcklbw %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] vmovdqu %xmm0, (%r11) addq $0x10, %r11 addq %r8, %rcx addq %r8, %rdi addl $0x2, %r10d cmpl %esi, %r10d jl 0xd3c9c addq %r9, %rcx movl 0x98(%rsp), %r9d movq %rcx, %rdi movq 0x10(%rsp), %r10 movq 0x80(%rsp), %rsi jmp 0xd3cf2 addq %rax, %rdi addq 0x170(%rsp), %rdi addq %r9, %rdi xorl %r9d, %r9d movq (%rsp), %rcx subl %r9d, %ecx jle 0xd3d0c movq (%rdi), %r9 movq %r9, (%r11) addq $0x8, %r11 addq %rsi, %rdi decl %ecx jne 0xd3cfb leaq 0x8(%rax), %rdi addq $0xf, %rax addq $0x8, %r15 addq $0x8, %r14 cmpq 0x30(%rsp), %rax jb 0xd3b9b movl 0x18(%rsp), %r15d movl %edi, %eax orl $0x3, %eax movslq %r12d, %r8 cmpl %r12d, %eax movq %r8, 0x90(%rsp) jge 0xd403f movq 0xc0(%rsp), %rcx leal 0x7(%rcx), %eax testl %ecx, %ecx cmovnsl %ecx, %eax sarl $0x3, %eax movq (%rsp), %r14 leal 0x7(%r14), %ecx testl %r14d, %r14d cmovnsl %r14d, %ecx cltq movq %rax, 0x188(%rsp) sarl $0x3, %ecx movl %ecx, 0x168(%rsp) leal (,%r10,8), %eax movslq %eax, %r9 movq %r10, %rdx shlq $0x20, %rdx movslq %r10d, %rax movq %rax, 0x198(%rsp) movabsq $0x100000000, %rax # imm = 0x100000000 leaq (%rdx,%rax), %r12 sarq $0x20, %r12 movabsq $0x200000000, %rax # imm = 0x200000000 leaq (%rdx,%rax), %rsi sarq $0x20, %rsi movabsq $0x300000000, %rax # imm = 0x300000000 addq %rax, %rdx sarq $0x20, %rdx leal (%r10,%r10), %eax movslq %eax, %rcx movl %r14d, %eax andl $-0x2, %eax movl %eax, 0xdc(%rsp) movl %edi, %edi movq 0x170(%rsp), %rax addq %rdi, %rax leaq 0x10(,%rax,8), %r14 movq %r14, 0x80(%rsp) addq %rax, %rdx movq 0x198(%rsp), %r14 addq %rax, %r14 movq %r14, 0x50(%rsp) addq %rax, %rsi movq %rsi, 0x88(%rsp) movq %rax, 0x98(%rsp) addq %rax, %r12 movq %r9, 0x1f0(%rsp) movq %rdi, %r14 cmpl $0x8, %r15d movq %rdx, 0x30(%rsp) jne 0xd3ea9 cmpl $0x8, (%rsp) jl 0xd3ffb movq 0x130(%rsp), %rax imulq 0x188(%rsp), %rax imulq 0x100(%rsp), %rax movq 0xf0(%rsp), %rsi addq 0x80(%rsp), %rsi movl 0x168(%rsp), %edi vmovdqa -0x10(%rsi,%rax), %xmm0 vmovdqa (%rsi,%rax), %xmm1 vpunpcklwd %xmm1, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] vpunpckhwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] vpunpcklwd %xmm0, %xmm2, %xmm1 # xmm1 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] vpunpckhwd %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] vmovdqa %xmm1, (%r11) vmovdqa %xmm0, 0x10(%r11) addq $0x20, %r11 addq %r9, %rsi decl %edi jne 0xd3e78 cmpl $0x1, %r15d jne 0xd3ffb movq %r14, 0xa8(%rsp) movq 0xf0(%rsp), %rdi movq 0x130(%rsp), %r14 imulq 0x78(%rsp), %r14 imulq 0x100(%rsp), %r14 cmpl $0x2, (%rsp) jl 0xd3f9b movq 0x98(%rsp), %rax leaq (%rdi,%rax), %r15 leaq (%rdi,%rdx), %r8 movq 0x50(%rsp), %rax leaq (%rdi,%rax), %r10 movq 0x88(%rsp), %rax leaq (%rdi,%rax), %rsi addq %r12, %rdi movl $0x1, %eax movq (%rsp), %r9 movb (%r15,%r14), %dl movb %dl, (%r11) movb (%r10,%r14), %dl movb %dl, 0x1(%r11) movb 0x1(%r15,%r14), %dl movb %dl, 0x2(%r11) movb (%rdi,%r14), %dl movb %dl, 0x3(%r11) movb 0x2(%r15,%r14), %dl movb %dl, 0x4(%r11) movb (%rsi,%r14), %dl movb %dl, 0x5(%r11) movb 0x3(%r15,%r14), %dl movb %dl, 0x6(%r11) movb (%r8,%r14), %dl movb %dl, 0x7(%r11) addq $0x8, %r11 addl $0x2, %eax addq %rcx, %r15 addq %rcx, %r8 addq %rcx, %r10 addq %rcx, %rsi addq %rcx, %rdi cmpl %r9d, %eax jl 0xd3f15 addq %r14, %r15 movq %r15, %rdi movl 0xdc(%rsp), %esi movq 0x10(%rsp), %r10 movl 0x18(%rsp), %r15d movq 0x90(%rsp), %r8 movq 0x1f0(%rsp), %r9 jmp 0xd3fb0 addq 0xa8(%rsp), %rdi addq 0x170(%rsp), %rdi addq %r14, %rdi xorl %esi, %esi movq (%rsp), %rax subl %esi, %eax jle 0xd3ff3 addq $0x3, %rdi movq 0x198(%rsp), %rsi movq 0xa8(%rsp), %r14 movb -0x3(%rdi), %dl movb %dl, (%r11) movb -0x2(%rdi), %dl movb %dl, 0x1(%r11) movb -0x1(%rdi), %dl movb %dl, 0x2(%r11) movb (%rdi), %dl movb %dl, 0x3(%r11) addq $0x4, %r11 addq %rsi, %rdi decl %eax jne 0xd3fcc jmp 0xd3ffb movq 0xa8(%rsp), %r14 leaq 0x4(%r14), %rdi addq $0x7, %r14 addq $0x20, 0x80(%rsp) addq $0x4, 0x98(%rsp) movq 0x30(%rsp), %rdx addq $0x4, %rdx addq $0x4, 0x50(%rsp) addq $0x4, 0x88(%rsp) addq $0x4, %r12 cmpq %r8, %r14 jl 0xd3e2f movl 0x20(%rsp), %r12d movl %edi, %eax orl $0x1, %eax cmpl %r12d, %eax jge 0xd425f movq 0xc0(%rsp), %rcx leal 0x7(%rcx), %eax testl %ecx, %ecx cmovnsl %ecx, %eax sarl $0x3, %eax cltq movq %rax, 0x50(%rsp) movq (%rsp), %rcx leal 0x7(%rcx), %eax testl %ecx, %ecx cmovnsl %ecx, %eax sarl $0x3, %eax movl %eax, 0xa8(%rsp) leal (,%r10,8), %eax movslq %eax, %rdx movq %r10, %r14 shlq $0x20, %r14 movslq %r10d, %rsi movq %rsi, 0x30(%rsp) movabsq $0x100000000, %rax # imm = 0x100000000 addq %rax, %r14 sarq $0x20, %r14 leal (%r10,%r10), %eax movslq %eax, %r9 movl %ecx, %eax andl $-0x2, %eax movl %eax, 0x168(%rsp) movl %edi, %edi movq 0x170(%rsp), %rax leaq (%rax,%rdi), %rcx leaq 0x8(,%rcx,8), %rax movq %rax, 0x88(%rsp) addq %rcx, %r14 movq %rcx, %rax leaq (%rcx,%rsi), %r12 movq %rdi, 0x80(%rsp) cmpl $0x8, %r15d jne 0xd4148 cmpl $0x8, (%rsp) jl 0xd422c movq 0x130(%rsp), %rcx imulq 0x50(%rsp), %rcx imulq 0x100(%rsp), %rcx movq 0xf0(%rsp), %rsi addq 0x88(%rsp), %rsi movl 0xa8(%rsp), %edi vmovq -0x8(%rsi,%rcx), %xmm0 vmovq (%rsi,%rcx), %xmm1 vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] vmovdqu %xmm0, (%r11) addq $0x10, %r11 addq %rdx, %rsi decl %edi jne 0xd4129 cmpl $0x1, %r15d jne 0xd422c movq 0xf0(%rsp), %rdi movq 0x130(%rsp), %rcx imulq 0x78(%rsp), %rcx imulq 0x100(%rsp), %rcx cmpl $0x2, (%rsp) jl 0xd41f1 leaq (%rdi,%rax), %r8 movq %r14, 0x98(%rsp) leaq (%rdi,%r14), %r10 addq %r12, %rdi movl $0x1, %esi movq (%rsp), %r14 movb (%r8,%rcx), %r15b movb %r15b, (%r11) movb (%rdi,%rcx), %r15b movb %r15b, 0x1(%r11) movb 0x1(%r8,%rcx), %r15b movb %r15b, 0x2(%r11) movb (%r10,%rcx), %r15b movb %r15b, 0x3(%r11) addq $0x4, %r11 addl $0x2, %esi addq %r9, %r8 addq %r9, %r10 addq %r9, %rdi cmpl %r14d, %esi jl 0xd4193 addq %rcx, %r8 movq %r8, %rdi movl 0x168(%rsp), %esi movq 0x10(%rsp), %r10 movl 0x18(%rsp), %r15d movq 0x90(%rsp), %r8 movq 0x98(%rsp), %r14 jmp 0xd4206 addq 0x80(%rsp), %rdi addq 0x170(%rsp), %rdi addq %rcx, %rdi xorl %esi, %esi movq (%rsp), %rcx subl %esi, %ecx jle 0xd422c incq %rdi movb -0x1(%rdi), %sil movb %sil, (%r11) movb (%rdi), %sil movb %sil, 0x1(%r11) addq $0x2, %r11 addq 0x30(%rsp), %rdi decl %ecx jne 0xd4211 movq 0x80(%rsp), %rcx leaq 0x2(%rcx), %rdi addq $0x3, %rcx addq $0x10, 0x88(%rsp) addq $0x2, %rax addq $0x2, %r14 addq $0x2, %r12 cmpq %r8, %rcx jl 0xd40e3 movl 0x20(%rsp), %r12d cmpl %r12d, %edi movq 0xc0(%rsp), %rcx movq 0x170(%rsp), %r12 jge 0xd3a68 leal 0x7(%rcx), %eax testl %ecx, %ecx cmovnsl %ecx, %eax sarl $0x3, %eax cltq movq (%rsp), %rdx leal 0x7(%rdx), %ecx testl %edx, %edx cmovnsl %edx, %ecx setle %r9b sarl $0x3, %ecx leal (,%r10,8), %edx movslq %edx, %rdx cmpl $0x1, %r15d setne %sil movslq %r10d, %r8 movslq %edi, %rdi orb %r9b, %sil cmpl $0x8, %r15d jne 0xd42fa cmpl $0x8, (%rsp) jl 0xd4339 movq 0x130(%rsp), %r9 imulq %rax, %r9 imulq 0x100(%rsp), %r9 addq 0xf0(%rsp), %r9 leaq (%rdi,%r12), %r10 leaq (%r9,%r10,8), %r9 movl %ecx, %r10d movq (%r9), %r14 movq %r14, (%r11) addq $0x8, %r11 addq %rdx, %r9 decl %r10d jne 0xd42e8 testb %sil, %sil jne 0xd4339 movq 0x130(%rsp), %r9 imulq 0x78(%rsp), %r9 imulq 0x100(%rsp), %r9 addq 0xf0(%rsp), %r9 addq %rdi, %r9 addq %r12, %r9 movq (%rsp), %r10 movb (%r9), %r14b movb %r14b, (%r11) incq %r11 addq %r8, %r9 decl %r10d jne 0xd4328 incq %rdi cmpq 0x90(%rsp), %rdi jne 0xd42b4 jmp 0xd3a68 movl 0x108(%rsp), %eax movl %eax, 0x18(%rsp) movl 0x11c(%rsp), %ecx movq 0x1e8(%rsp), %rax movq %rcx, 0x88(%rsp) addl %ecx, %eax cltd idivl 0x58(%rsp) leal 0x1(%rax), %r8d testl %eax, %eax je 0xd43b0 bsrl %eax, %esi movl %esi, %edx xorl $0x1f, %edx movl %edx, %ecx negb %cl movl $0x1, %eax shll %cl, %eax movl $0x1, %ecx testl %edx, %edx cmovel %edx, %eax subl %r8d, %eax shlq $0x20, %rax movl %r8d, %edi xorl %edx, %edx divq %rdi incl %eax jmp 0xd43b9 movl $0x1, %eax xorl %esi, %esi xorl %ecx, %ecx vmovd %eax, %xmm0 vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0] vmovd %ecx, %xmm2 vmovd %esi, %xmm1 vmovd %r8d, %xmm3 vpmovzxdq %xmm2, %xmm2 # xmm2 = xmm2[0],zero,xmm2[1],zero cmpl $0x8, %r12d movl %r8d, 0x80(%rsp) jl 0xd4b82 vpshufd $0x0, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0] movq 0x88(%rsp), %rcx movl %ecx, %eax imull 0x28(%rsp), %eax movl %ecx, %r9d imull 0x38(%rsp), %r9d cmpl $0x8, 0x18(%rsp) setne %cl movq (%rsp), %rsi leal 0x7(%rsi), %edx testl %esi, %esi cmovnsl %esi, %edx vmovd %eax, %xmm5 sarl $0x3, %edx movl %edx, 0x90(%rsp) cmpl $0x8, %esi setl %r15b movq 0xc0(%rsp), %rax leal 0x7(%rax), %edx testl %eax, %eax cmovnsl %eax, %edx vpshufd $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0] sarl $0x3, %edx movl %edx, 0x98(%rsp) orb %cl, %r15b xorl %edx, %edx movb %r15b, 0x50(%rsp) movq 0x30(%rsp), %rax addl %edx, %eax vmovd %eax, %xmm6 vpshufd $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0] vpaddd 0x3d31e3(%rip), %xmm6, %xmm9 # 0x4a7650 vpmuludq %xmm0, %xmm9, %xmm7 vpsrlq $0x20, %xmm7, %xmm7 vpsrlq $0x20, %xmm9, %xmm8 vpmuludq %xmm0, %xmm8, %xmm8 vpblendw $0xcc, %xmm8, %xmm7, %xmm8 # xmm8 = xmm7[0,1],xmm8[2,3],xmm7[4,5],xmm8[6,7] vpsubd %xmm8, %xmm9, %xmm7 vpsrld %xmm2, %xmm7, %xmm10 vpaddd 0x3d31c9(%rip), %xmm6, %xmm7 # 0x4a7660 vpaddd %xmm8, %xmm10, %xmm6 vpsrld %xmm1, %xmm6, %xmm6 vpmuludq %xmm0, %xmm7, %xmm8 vpsrlq $0x20, %xmm8, %xmm8 vpsrlq $0x20, %xmm7, %xmm10 vpmuludq %xmm0, %xmm10, %xmm10 vpblendw $0xcc, %xmm10, %xmm8, %xmm8 # xmm8 = xmm8[0,1],xmm10[2,3],xmm8[4,5],xmm10[6,7] vpsubd %xmm8, %xmm7, %xmm10 vpsrld %xmm2, %xmm10, %xmm10 vpaddd %xmm8, %xmm10, %xmm8 vpsrld %xmm1, %xmm8, %xmm8 vpmulld %xmm4, %xmm6, %xmm10 vpsubd %xmm10, %xmm9, %xmm9 vpmulld 0x150(%rsp), %xmm9, %xmm11 vpmulld %xmm5, %xmm6, %xmm10 vpmulld %xmm5, %xmm8, %xmm9 vpaddd %xmm10, %xmm11, %xmm6 cmpl $0x1, 0x58(%rsp) movq %rdx, 0x78(%rsp) jne 0xd4645 vmovd %xmm10, %eax vpextrd $0x3, %xmm9, %ecx cmpl %ecx, %eax jne 0xd4645 vmovd %xmm6, %r10d cmpl $0x1, 0x18(%rsp) jne 0xd4aa9 xorl %ecx, %ecx cmpl $0x2, (%rsp) jl 0xd45d2 xorl %r14d, %r14d movq 0xc0(%rsp), %rbp movq 0x60(%rsp), %r13 movl 0x68(%rsp), %ebx movq 0x70(%rsp), %r12 movq 0x10(%rsp), %r15 leal (%r14,%rbp), %eax cltd idivl %ebx movl %eax, %edi movl %edx, %esi leal (%r14,%rbp), %eax incl %eax cltd idivl %ebx movl %edx, %r8d movl %eax, %ecx movl %esi, %eax cltd idivl %r13d movl %edx, %esi imull %r15d, %edi addl %r10d, %edi imull %r12d, %esi addl %edi, %esi imull %r9d, %eax addl %eax, %esi movl %r8d, %eax cltd idivl %r13d imull %r15d, %ecx addl %r10d, %ecx imull %r12d, %edx addl %ecx, %edx imull %r9d, %eax addl %eax, %edx movq 0xf0(%rsp), %rax movslq %esi, %rcx vmovq (%rax,%rcx), %xmm6 movslq %edx, %rcx vmovq (%rax,%rcx), %xmm7 vpunpcklbw %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7] vmovdqu %xmm6, (%r11) addq $0x10, %r11 leal 0x2(%r14), %ecx addl $0x3, %r14d cmpl (%rsp), %r14d movl %ecx, %r14d jl 0xd454a movq (%rsp), %rax movl %eax, %edi subl %ecx, %edi movl 0x20(%rsp), %r12d jle 0xd4a95 addl 0xc0(%rsp), %ecx movq 0x60(%rsp), %rbp movl 0x68(%rsp), %r13d movq 0x70(%rsp), %rbx movq 0x10(%rsp), %r8 movb 0x50(%rsp), %r15b movl %ecx, %eax cltd idivl %r13d movl %eax, %esi movl %edx, %eax cltd idivl %ebp imull %r8d, %esi addl %r10d, %esi imull %ebx, %edx addl %esi, %edx imull %r9d, %eax addl %eax, %edx movq 0xf0(%rsp), %rax movslq %edx, %rdx movq (%rax,%rdx), %rax movq %rax, (%r11) addq $0x8, %r11 incl %ecx decl %edi jne 0xd4605 jmp 0xd4aa9 vpmulld %xmm4, %xmm8, %xmm8 vpsubd %xmm8, %xmm7, %xmm7 vpmulld 0x150(%rsp), %xmm7, %xmm7 vpaddd %xmm7, %xmm9, %xmm7 cmpl $0x1, 0x18(%rsp) jne 0xd4965 xorl %ecx, %ecx cmpl $0x2, (%rsp) jl 0xd4844 xorl %r10d, %r10d movq 0xf0(%rsp), %r14 movq 0xc0(%rsp), %r15 movq 0x10(%rsp), %r12 leal (%r15,%r10), %eax cltd idivl %r13d movl %eax, %edi movl %edx, %esi leal (%r15,%r10), %eax incl %eax cltd idivl %r13d movl %edx, %r8d movl %eax, %ecx movl %esi, %eax cltd idivl %ebp movl %edx, %esi imull %r12d, %edi imull %ebx, %esi addl %edi, %esi imull %r9d, %eax addl %eax, %esi movl %r8d, %eax cltd idivl %ebp imull %r12d, %ecx imull %ebx, %edx addl %ecx, %edx imull %r9d, %eax addl %eax, %edx vmovd %esi, %xmm8 vpshufd $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0] vpaddd %xmm6, %xmm8, %xmm11 vpaddd %xmm7, %xmm8, %xmm8 vmovd %edx, %xmm9 vpshufd $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0] vpaddd %xmm6, %xmm9, %xmm10 vpaddd %xmm7, %xmm9, %xmm9 vmovd %xmm11, %eax cltq movb (%r14,%rax), %al movb %al, (%r11) movq 0xf0(%rsp), %rax vmovd %xmm10, %ecx movslq %ecx, %rcx movb (%rax,%rcx), %al movb %al, 0x1(%r11) movq 0xf0(%rsp), %rax vpextrd $0x1, %xmm11, %ecx movslq %ecx, %rcx movb (%rax,%rcx), %al movb %al, 0x2(%r11) vpextrd $0x1, %xmm10, %eax movq 0xf0(%rsp), %rcx cltq movb (%rcx,%rax), %al movb %al, 0x3(%r11) movq 0xf0(%rsp), %rax vpextrd $0x2, %xmm11, %ecx movslq %ecx, %rcx movb (%rax,%rcx), %al movb %al, 0x4(%r11) vpextrd $0x2, %xmm10, %eax movq 0xf0(%rsp), %rcx cltq movb (%rcx,%rax), %al movb %al, 0x5(%r11) movq 0xf0(%rsp), %rax vpextrd $0x3, %xmm11, %ecx movslq %ecx, %rcx movb (%rax,%rcx), %al movb %al, 0x6(%r11) vpextrd $0x3, %xmm10, %eax movq 0xf0(%rsp), %rcx cltq movb (%rcx,%rax), %al movb %al, 0x7(%r11) movq 0xf0(%rsp), %r14 vmovd %xmm8, %eax cltq movb (%r14,%rax), %al movb %al, 0x8(%r11) vmovd %xmm9, %eax cltq movb (%r14,%rax), %al movb %al, 0x9(%r11) vpextrd $0x1, %xmm8, %eax cltq movb (%r14,%rax), %al movb %al, 0xa(%r11) vpextrd $0x1, %xmm9, %eax cltq movb (%r14,%rax), %al vpextrd $0x2, %xmm8, %ecx movb %al, 0xb(%r11) movslq %ecx, %rax movb (%r14,%rax), %al movb %al, 0xc(%r11) vpextrd $0x2, %xmm9, %eax cltq movb (%r14,%rax), %al movb %al, 0xd(%r11) vpextrd $0x3, %xmm8, %eax cltq movb (%r14,%rax), %al movb %al, 0xe(%r11) vpextrd $0x3, %xmm9, %eax cltq movb (%r14,%rax), %al movb %al, 0xf(%r11) addq $0x10, %r11 leal 0x2(%r10), %ecx addl $0x3, %r10d cmpl (%rsp), %r10d movl %ecx, %r10d jl 0xd468c movq (%rsp), %rax movl %eax, %edi subl %ecx, %edi movl 0x20(%rsp), %r12d jle 0xd4960 addl 0xc0(%rsp), %ecx movq 0x10(%rsp), %r8 movb 0x50(%rsp), %r15b movl %ecx, %eax cltd idivl %r13d movl %eax, %esi movl %edx, %eax cltd idivl %ebp imull %r8d, %esi imull %r9d, %eax imull %ebx, %edx addl %esi, %edx addl %eax, %edx vmovd %edx, %xmm8 vpshufd $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0] vpaddd %xmm6, %xmm8, %xmm9 vpaddd %xmm7, %xmm8, %xmm8 movq 0xf0(%rsp), %rax vmovd %xmm9, %edx movslq %edx, %rdx movb (%rax,%rdx), %al movb %al, (%r11) movq 0xf0(%rsp), %rax vpextrd $0x1, %xmm9, %edx movslq %edx, %rdx movb (%rax,%rdx), %al movb %al, 0x1(%r11) vpextrd $0x2, %xmm9, %eax movq 0xf0(%rsp), %rdx cltq movb (%rdx,%rax), %al movb %al, 0x2(%r11) movq 0xf0(%rsp), %rax vpextrd $0x3, %xmm9, %edx movslq %edx, %rdx movb (%rax,%rdx), %al movb %al, 0x3(%r11) movq 0xf0(%rsp), %rax vmovd %xmm8, %edx movslq %edx, %rdx movb (%rax,%rdx), %al movb %al, 0x4(%r11) movq 0xf0(%rsp), %rax vpextrd $0x1, %xmm8, %edx movslq %edx, %rdx movb (%rax,%rdx), %al movb %al, 0x5(%r11) movq 0xf0(%rsp), %rax vpextrd $0x2, %xmm8, %edx movslq %edx, %rdx movb (%rax,%rdx), %al movb %al, 0x6(%r11) movq 0xf0(%rsp), %rax vpextrd $0x3, %xmm8, %edx movslq %edx, %rdx movb (%rax,%rdx), %al movb %al, 0x7(%r11) addq $0x8, %r11 incl %ecx decl %edi jne 0xd4868 jmp 0xd4965 movb 0x50(%rsp), %r15b testb %r15b, %r15b jne 0xd4aae movl 0x98(%rsp), %ecx movl 0x90(%rsp), %edi movq 0x10(%rsp), %r8 movl %ecx, %eax cltd idivl %r13d movl %eax, %esi movl %edx, %eax cltd idivl %ebp imull %r8d, %esi imull %r9d, %eax imull %ebx, %edx addl %esi, %edx addl %eax, %edx vmovd %edx, %xmm8 vpshufd $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0] vpaddd %xmm6, %xmm8, %xmm9 vpaddd %xmm7, %xmm8, %xmm8 vpslld $0x3, %xmm9, %xmm9 vpslld $0x3, %xmm8, %xmm8 movq 0xf0(%rsp), %rax vmovd %xmm9, %edx movslq %edx, %rdx vpextrd $0x1, %xmm9, %esi vmovq (%rax,%rdx), %xmm10 movslq %esi, %rdx vmovd %xmm8, %esi movslq %esi, %rsi vmovq (%rax,%rsi), %xmm11 vpextrd $0x2, %xmm9, %esi movslq %esi, %rsi vmovq (%rax,%rdx), %xmm12 vpextrd $0x3, %xmm9, %edx movslq %edx, %rdx vmovq (%rax,%rsi), %xmm9 vpextrd $0x1, %xmm8, %esi movslq %esi, %rsi vmovq (%rax,%rdx), %xmm13 vmovq (%rax,%rsi), %xmm14 vpextrd $0x2, %xmm8, %edx vpunpcklwd %xmm12, %xmm10, %xmm10 # xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3] movslq %edx, %rdx vpextrd $0x3, %xmm8, %esi vpunpcklwd %xmm13, %xmm9, %xmm8 # xmm8 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3] vmovq (%rax,%rdx), %xmm9 movslq %esi, %rdx vmovq (%rax,%rdx), %xmm12 vpunpcklwd %xmm14, %xmm11, %xmm11 # xmm11 = xmm11[0],xmm14[0],xmm11[1],xmm14[1],xmm11[2],xmm14[2],xmm11[3],xmm14[3] vpunpcklwd %xmm12, %xmm9, %xmm9 # xmm9 = xmm9[0],xmm12[0],xmm9[1],xmm12[1],xmm9[2],xmm12[2],xmm9[3],xmm12[3] vpunpckldq %xmm8, %xmm10, %xmm12 # xmm12 = xmm10[0],xmm8[0],xmm10[1],xmm8[1] vpunpckhdq %xmm8, %xmm10, %xmm8 # xmm8 = xmm10[2],xmm8[2],xmm10[3],xmm8[3] vpunpckldq %xmm9, %xmm11, %xmm10 # xmm10 = xmm11[0],xmm9[0],xmm11[1],xmm9[1] vpunpckhdq %xmm9, %xmm11, %xmm9 # xmm9 = xmm11[2],xmm9[2],xmm11[3],xmm9[3] vpunpcklqdq %xmm10, %xmm12, %xmm11 # xmm11 = xmm12[0],xmm10[0] vpunpckhqdq %xmm10, %xmm12, %xmm10 # xmm10 = xmm12[1],xmm10[1] vpunpcklqdq %xmm9, %xmm8, %xmm12 # xmm12 = xmm8[0],xmm9[0] vpunpckhqdq %xmm9, %xmm8, %xmm8 # xmm8 = xmm8[1],xmm9[1] vmovdqu %xmm11, (%r11) vmovdqu %xmm10, 0x10(%r11) vmovdqu %xmm12, 0x20(%r11) vmovdqu %xmm8, 0x30(%r11) addq $0x40, %r11 incl %ecx decl %edi jne 0xd4981 jmp 0xd4b67 movq 0x60(%rsp), %rbp movl 0x68(%rsp), %r13d movq 0x70(%rsp), %rbx movb 0x50(%rsp), %r15b testb %r15b, %r15b je 0xd4ab8 movq 0x10(%rsp), %r8 jmp 0xd4b67 movl 0x98(%rsp), %ecx movl 0x90(%rsp), %edi movq 0x10(%rsp), %r8 movl %ecx, %eax cltd idivl %r13d movl %eax, %esi movl %edx, %eax cltd idivl %ebp imull %r8d, %esi addl %r10d, %esi imull %r9d, %eax imull %ebx, %edx addl %esi, %edx addl %eax, %edx movslq %edx, %rax movq 0xf0(%rsp), %rdx vmovdqu (%rdx,%rax,8), %xmm6 vmovdqu 0x10(%rdx,%rax,8), %xmm7 vmovdqu 0x20(%rdx,%rax,8), %xmm8 vmovdqu 0x30(%rdx,%rax,8), %xmm9 vpunpcklwd %xmm7, %xmm6, %xmm10 # xmm10 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3] vpunpckhwd %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7] vpunpcklwd %xmm9, %xmm8, %xmm7 # xmm7 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3] vpunpckhwd %xmm9, %xmm8, %xmm8 # xmm8 = xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7] vpunpcklwd %xmm6, %xmm10, %xmm9 # xmm9 = xmm10[0],xmm6[0],xmm10[1],xmm6[1],xmm10[2],xmm6[2],xmm10[3],xmm6[3] vpunpcklwd %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3] vpunpckhwd %xmm6, %xmm10, %xmm6 # xmm6 = xmm10[4],xmm6[4],xmm10[5],xmm6[5],xmm10[6],xmm6[6],xmm10[7],xmm6[7] vpunpckhwd %xmm8, %xmm7, %xmm7 # xmm7 = xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7] vpunpcklqdq %xmm11, %xmm9, %xmm8 # xmm8 = xmm9[0],xmm11[0] vpunpckhqdq %xmm11, %xmm9, %xmm9 # xmm9 = xmm9[1],xmm11[1] vpunpcklqdq %xmm7, %xmm6, %xmm10 # xmm10 = xmm6[0],xmm7[0] vpunpckhqdq %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[1],xmm7[1] vmovdqu %xmm8, (%r11) vmovdqu %xmm9, 0x10(%r11) vmovdqu %xmm10, 0x20(%r11) vmovdqu %xmm6, 0x30(%r11) addq $0x40, %r11 incl %ecx decl %edi jne 0xd4acb movq 0x78(%rsp), %rax leal 0x8(%rax), %esi addl $0xf, %eax cmpl %r12d, %eax movl %esi, %edx movq %r8, %r10 jl 0xd4455 jmp 0xd4b84 xorl %esi, %esi movl %esi, %eax orl $0x3, %eax cmpl %r12d, %eax jge 0xd5143 vpshufd $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0] movq 0x88(%rsp), %rcx movl %ecx, %eax imull 0x28(%rsp), %eax vmovd %eax, %xmm4 vpshufd $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0] movl %ecx, %r15d imull 0x38(%rsp), %r15d cmpl $0x8, 0x18(%rsp) setne %al movq (%rsp), %rdx leal 0x7(%rdx), %ecx testl %edx, %edx cmovnsl %edx, %ecx sarl $0x3, %ecx movl %ecx, 0x98(%rsp) cmpl $0x8, %edx setl %dl movq 0xc0(%rsp), %rcx leal 0x7(%rcx), %edi testl %ecx, %ecx cmovnsl %ecx, %edi sarl $0x3, %edi movl %edi, 0x50(%rsp) orb %al, %dl movb %dl, 0x90(%rsp) movq 0x30(%rsp), %rax addl %esi, %eax vmovd %eax, %xmm5 vpshufd $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0] vpaddd 0x3d2a3c(%rip), %xmm5, %xmm5 # 0x4a7650 vpmuludq %xmm0, %xmm5, %xmm6 vpsrlq $0x20, %xmm6, %xmm6 vpsrlq $0x20, %xmm5, %xmm7 vpmuludq %xmm0, %xmm7, %xmm7 vpblendw $0xcc, %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5],xmm7[6,7] vpsubd %xmm6, %xmm5, %xmm7 vpsrld %xmm2, %xmm7, %xmm7 vpaddd %xmm6, %xmm7, %xmm6 vpsrld %xmm1, %xmm6, %xmm6 vpmulld %xmm3, %xmm6, %xmm7 vpsubd %xmm7, %xmm5, %xmm5 vpmulld 0x150(%rsp), %xmm5, %xmm5 vpmulld %xmm4, %xmm6, %xmm6 vpaddd %xmm6, %xmm5, %xmm5 cmpl $0x1, 0x58(%rsp) movq %rsi, 0x78(%rsp) jne 0xd4dd9 vmovd %xmm6, %eax vpextrd $0x3, %xmm6, %ecx cmpl %ecx, %eax jne 0xd4dd9 vmovd %xmm5, %r9d cmpl $0x1, 0x18(%rsp) jne 0xd50ba xorl %ecx, %ecx cmpl $0x2, (%rsp) jl 0xd4d3c xorl %r14d, %r14d movq 0xc0(%rsp), %rbp movq 0x60(%rsp), %r13 movl 0x68(%rsp), %ebx movq 0x70(%rsp), %r12 movq 0x10(%rsp), %r10 leal (%r14,%rbp), %eax cltd idivl %ebx movl %eax, %edi movl %edx, %esi leal (%r14,%rbp), %eax incl %eax cltd idivl %ebx movl %edx, %r8d movl %eax, %ecx movl %esi, %eax cltd idivl %r13d movl %edx, %esi imull %r10d, %edi addl %r9d, %edi imull %r12d, %esi addl %edi, %esi imull %r15d, %eax addl %eax, %esi movl %r8d, %eax cltd idivl %r13d imull %r10d, %ecx addl %r9d, %ecx imull %r12d, %edx addl %ecx, %edx imull %r15d, %eax addl %eax, %edx movq 0xf0(%rsp), %rax movslq %esi, %rcx vmovq (%rax,%rcx), %xmm5 movslq %edx, %rcx vmovq (%rax,%rcx), %xmm6 vpunpcklbw %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7] vmovq %xmm5, (%r11) addq $0x8, %r11 leal 0x2(%r14), %ecx addl $0x3, %r14d cmpl (%rsp), %r14d movl %ecx, %r14d jl 0xd4cb4 movq (%rsp), %rax movl %eax, %edi subl %ecx, %edi movl 0x20(%rsp), %r12d jle 0xd50a6 addl 0xc0(%rsp), %ecx movq 0x60(%rsp), %rbp movl 0x68(%rsp), %r13d movq 0x70(%rsp), %rbx movq 0x10(%rsp), %r10 movl %ecx, %eax cltd idivl %r13d movl %eax, %esi movl %edx, %eax cltd idivl %ebp imull %r10d, %esi addl %r9d, %esi imull %r15d, %eax imull %ebx, %edx addl %esi, %edx addl %eax, %edx movq 0xf0(%rsp), %rax movslq %edx, %rdx movb (%rax,%rdx), %al movb %al, (%r11) movq 0xf0(%rsp), %rax movb 0x1(%rax,%rdx), %al movb %al, 0x1(%r11) movq 0xf0(%rsp), %rax movb 0x2(%rax,%rdx), %al movb %al, 0x2(%r11) movq 0xf0(%rsp), %rax movb 0x3(%rax,%rdx), %al movb %al, 0x3(%r11) addq $0x4, %r11 incl %ecx decl %edi jne 0xd4d6a jmp 0xd50ba cmpl $0x1, 0x18(%rsp) jne 0xd4ff2 xorl %ecx, %ecx cmpl $0x2, (%rsp) jl 0xd4f38 xorl %r14d, %r14d movq 0xc0(%rsp), %r10 movq 0x10(%rsp), %r12 leal (%r10,%r14), %eax cltd idivl %r13d movl %eax, %esi movl %edx, %edi leal (%r10,%r14), %eax incl %eax cltd idivl %r13d movl %edx, %r8d movl %eax, %ecx movl %edi, %eax cltd idivl %ebp movl %edx, %edi movl %eax, %r9d movl %r8d, %eax cltd idivl %ebp imull %r12d, %esi imull %r15d, %r9d imull %ebx, %edi addl %esi, %edi addl %r9d, %edi imull %r12d, %ecx imull %r15d, %eax imull %ebx, %edx addl %ecx, %edx addl %eax, %edx vmovd %edi, %xmm6 vpshufd $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0] vpaddd %xmm5, %xmm6, %xmm7 vmovd %edx, %xmm6 vpshufd $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0] vpaddd %xmm5, %xmm6, %xmm6 movq 0xf0(%rsp), %rax vmovd %xmm7, %ecx movslq %ecx, %rcx movb (%rax,%rcx), %al movb %al, (%r11) movq 0xf0(%rsp), %rax vmovd %xmm6, %ecx movslq %ecx, %rcx movb (%rax,%rcx), %al movb %al, 0x1(%r11) movq 0xf0(%rsp), %rax vpextrd $0x1, %xmm7, %ecx movslq %ecx, %rcx movb (%rax,%rcx), %al movb %al, 0x2(%r11) movq 0xf0(%rsp), %rax vpextrd $0x1, %xmm6, %ecx movslq %ecx, %rcx movb (%rax,%rcx), %al movb %al, 0x3(%r11) movq 0xf0(%rsp), %rax vpextrd $0x2, %xmm7, %ecx movslq %ecx, %rcx movb (%rax,%rcx), %al movb %al, 0x4(%r11) movq 0xf0(%rsp), %rax vpextrd $0x2, %xmm6, %ecx movslq %ecx, %rcx movb (%rax,%rcx), %al movb %al, 0x5(%r11) movq 0xf0(%rsp), %rax vpextrd $0x3, %xmm7, %ecx movslq %ecx, %rcx movb (%rax,%rcx), %al movb %al, 0x6(%r11) movq 0xf0(%rsp), %rax vpextrd $0x3, %xmm6, %ecx movslq %ecx, %rcx movb (%rax,%rcx), %al movb %al, 0x7(%r11) addq $0x8, %r11 leal 0x2(%r14), %ecx addl $0x3, %r14d cmpl (%rsp), %r14d movl %ecx, %r14d jl 0xd4e00 movq (%rsp), %rax movl %eax, %edi subl %ecx, %edi movl 0x20(%rsp), %r12d jle 0xd4fed addl 0xc0(%rsp), %ecx movq 0x10(%rsp), %r10 movl %ecx, %eax cltd idivl %r13d movl %eax, %esi movl %edx, %eax cltd idivl %ebp imull %r10d, %esi imull %r15d, %eax imull %ebx, %edx addl %esi, %edx addl %eax, %edx vmovd %edx, %xmm6 vpshufd $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0] vpaddd %xmm5, %xmm6, %xmm6 movq 0xf0(%rsp), %rax vmovd %xmm6, %edx movslq %edx, %rdx movb (%rax,%rdx), %al movb %al, (%r11) movq 0xf0(%rsp), %rax vpextrd $0x1, %xmm6, %edx movslq %edx, %rdx movb (%rax,%rdx), %al movb %al, 0x1(%r11) movq 0xf0(%rsp), %rax vpextrd $0x2, %xmm6, %edx movslq %edx, %rdx movb (%rax,%rdx), %al movb %al, 0x2(%r11) movq 0xf0(%rsp), %rax vpextrd $0x3, %xmm6, %edx movslq %edx, %rdx movb (%rax,%rdx), %al movb %al, 0x3(%r11) addq $0x4, %r11 incl %ecx decl %edi jne 0xd4f57 jmp 0xd4ff2 movq 0x10(%rsp), %r10 cmpb $0x0, 0x90(%rsp) jne 0xd5129 movl 0x50(%rsp), %ecx movl 0x98(%rsp), %edi movl %ecx, %eax cltd idivl %r13d movl %eax, %esi movl %edx, %eax cltd idivl %ebp imull %r10d, %esi imull %r15d, %eax imull %ebx, %edx addl %esi, %edx addl %eax, %edx vmovd %edx, %xmm6 vpshufd $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0] vpaddd %xmm5, %xmm6, %xmm6 movq 0xf0(%rsp), %rax vpslld $0x3, %xmm6, %xmm6 vmovd %xmm6, %edx vpextrd $0x1, %xmm6, %esi movslq %edx, %rdx vmovq (%rax,%rdx), %xmm7 vpextrd $0x2, %xmm6, %edx movslq %esi, %rsi movslq %edx, %rdx vpextrd $0x3, %xmm6, %r8d vmovq (%rax,%rsi), %xmm6 vmovq (%rax,%rdx), %xmm8 movslq %r8d, %rdx vmovq (%rax,%rdx), %xmm9 vpunpcklwd %xmm6, %xmm7, %xmm6 # xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3] vpunpcklwd %xmm9, %xmm8, %xmm7 # xmm7 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3] vpunpckldq %xmm7, %xmm6, %xmm8 # xmm8 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] vpunpckhdq %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[2],xmm7[2],xmm6[3],xmm7[3] vmovdqu %xmm8, (%r11) vmovdqu %xmm6, 0x10(%r11) addq $0x20, %r11 incl %ecx decl %edi jne 0xd500b jmp 0xd5129 movq 0x60(%rsp), %rbp movl 0x68(%rsp), %r13d movq 0x70(%rsp), %rbx movq 0x10(%rsp), %r10 cmpb $0x0, 0x90(%rsp) jne 0xd5129 movl 0x50(%rsp), %ecx movl 0x98(%rsp), %edi movl %ecx, %eax cltd idivl %r13d movl %eax, %esi movl %edx, %eax cltd idivl %ebp imull %r10d, %esi addl %r9d, %esi imull %r15d, %eax imull %ebx, %edx addl %esi, %edx addl %eax, %edx movslq %edx, %rax movq 0xf0(%rsp), %rdx vmovdqu (%rdx,%rax,8), %xmm5 vmovdqu 0x10(%rdx,%rax,8), %xmm6 vpunpcklwd %xmm6, %xmm5, %xmm7 # xmm7 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3] vpunpckhwd %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7] vpunpcklwd %xmm5, %xmm7, %xmm6 # xmm6 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3] vpunpckhwd %xmm5, %xmm7, %xmm5 # xmm5 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7] vmovdqu %xmm6, (%r11) vmovdqu %xmm5, 0x10(%r11) addq $0x20, %r11 incl %ecx decl %edi jne 0xd50cf movq 0x78(%rsp), %rax leal 0x4(%rax), %r9d addl $0x7, %eax cmpl %r12d, %eax movl %r9d, %esi jl 0xd4bfc jmp 0xd5146 movl %esi, %r9d movl %r9d, %eax orl $0x1, %eax cmpl %r12d, %eax jge 0xd55f6 movq 0x88(%rsp), %r14 movl %r14d, %eax imull 0x28(%rsp), %eax movl %eax, 0x90(%rsp) movl %r14d, %r15d imull 0x38(%rsp), %r15d movq (%rsp), %rsi leal 0x7(%rsi), %eax testl %esi, %esi cmovnsl %esi, %eax sarl $0x3, %eax movl %eax, 0x98(%rsp) movq 0xc0(%rsp), %rax leal 0x7(%rax), %edx testl %eax, %eax cmovnsl %eax, %edx sarl $0x3, %edx movl %edx, 0x170(%rsp) cmpl $0x8, 0x18(%rsp) setne %al cmpl $0x8, %esi setl %cl orb %al, %cl movb %cl, 0xa8(%rsp) movl %r15d, 0x50(%rsp) movq 0x30(%rsp), %rsi leal (%r9,%rsi), %eax cltd movl 0x80(%rsp), %r8d idivl %r8d movl %eax, %edi movl %edx, %ecx leal (%r9,%rsi), %eax incl %eax cltd idivl %r8d movl %edx, %esi movq 0x58(%rsp), %rdx imull %edx, %ecx movl 0x90(%rsp), %r8d imull %r8d, %edi imull %r8d, %eax addl %edi, %ecx cmpl $0x1, %edx movq %r9, 0x78(%rsp) jne 0xd5366 cmpl %eax, %edi jne 0xd5366 cmpl $0x1, 0x18(%rsp) movq (%rsp), %r15 jne 0xd5578 xorl %esi, %esi cmpl $0x2, %r15d jl 0xd52e6 xorl %r10d, %r10d movq 0x60(%rsp), %r13 movl 0x68(%rsp), %ebx movq 0x70(%rsp), %r12 movq 0x10(%rsp), %r14 movl 0x50(%rsp), %ebp movq 0xc0(%rsp), %rsi leal (%rsi,%r10), %eax cltd idivl %ebx movl %eax, %r8d movl %edx, %edi leal (%rsi,%r10), %eax incl %eax cltd idivl %ebx movl %edx, %r9d movl %eax, %esi movl %edi, %eax cltd idivl %r13d movl %edx, %edi imull %r14d, %r8d addl %ecx, %r8d imull %r12d, %edi addl %r8d, %edi imull %ebp, %eax addl %eax, %edi movl %r9d, %eax cltd idivl %r13d imull %r14d, %esi addl %ecx, %esi imull %r12d, %edx addl %esi, %edx imull %ebp, %eax addl %eax, %edx movq 0xf0(%rsp), %rax movslq %edi, %rsi movslq %edx, %rdx movb (%rax,%rsi), %dil movb %dil, (%r11) movb (%rax,%rdx), %dil movb %dil, 0x1(%r11) movb 0x1(%rax,%rsi), %sil movb %sil, 0x2(%r11) movb 0x1(%rax,%rdx), %al movb %al, 0x3(%r11) addq $0x4, %r11 leal 0x2(%r10), %esi addl $0x3, %r10d cmpl %r15d, %r10d movl %esi, %r10d jl 0xd524b movl %r15d, %r8d subl %esi, %r8d movl 0x20(%rsp), %r12d jle 0xd555c addl 0xc0(%rsp), %esi movq 0x60(%rsp), %rbp movl 0x68(%rsp), %r13d movq 0x70(%rsp), %rbx movq 0x10(%rsp), %r10 movq 0x88(%rsp), %r14 movl 0x50(%rsp), %r15d movl %esi, %eax cltd idivl %r13d movl %eax, %edi movl %edx, %eax cltd idivl %ebp imull %r10d, %edi addl %ecx, %edi imull %r15d, %eax imull %ebx, %edx addl %edi, %edx movq 0xf0(%rsp), %rdi addl %eax, %edx movslq %edx, %rax movb (%rdi,%rax), %dl movb %dl, (%r11) movb 0x1(%rdi,%rax), %al movb %al, 0x1(%r11) addq $0x2, %r11 incl %esi decl %r8d jne 0xd531f jmp 0xd557d imull 0x58(%rsp), %esi addl %eax, %esi xorl %edi, %edi cmpl $0x1, 0x18(%rsp) movq (%rsp), %rax jne 0xd54e4 xorl %edi, %edi cmpl $0x2, %eax jl 0xd5445 xorl %r14d, %r14d movq 0x60(%rsp), %r13 movl 0x68(%rsp), %ebx movq 0x70(%rsp), %r12 movq %r10, %rbp movq 0xc0(%rsp), %r8 leal (%r8,%r14), %eax cltd idivl %ebx movl %eax, %r9d movl %edx, %edi leal (%r8,%r14), %eax incl %eax cltd idivl %ebx movl %edx, %r10d movl %eax, %r8d movl %edi, %eax cltd idivl %r13d movl %edx, %edi imull %ebp, %r9d imull %r12d, %edi addl %r9d, %edi imull %r15d, %eax addl %eax, %edi movl %r10d, %eax cltd idivl %r13d imull %ebp, %r8d imull %r15d, %eax imull %r12d, %edx addl %r8d, %edx addl %eax, %edx leal (%rdi,%rcx), %eax movq 0xf0(%rsp), %r8 cltq movb (%r8,%rax), %al movb %al, (%r11) leal (%rdx,%rcx), %eax cltq movb (%r8,%rax), %al movb %al, 0x1(%r11) addl %esi, %edi movslq %edi, %rax movb (%r8,%rax), %al movb %al, 0x2(%r11) addl %esi, %edx movslq %edx, %rax movb (%r8,%rax), %al movb %al, 0x3(%r11) addq $0x4, %r11 leal 0x2(%r14), %edi addl $0x3, %r14d cmpl (%rsp), %r14d movl %edi, %r14d jl 0xd539d movq (%rsp), %r10 movl %r10d, %r9d subl %edi, %r9d jle 0xd54c3 addl 0xc0(%rsp), %edi movq 0x60(%rsp), %rbp movl 0x68(%rsp), %r13d movq 0x70(%rsp), %rbx movl 0x20(%rsp), %r12d movq 0x10(%rsp), %r14 movl %edi, %eax cltd idivl %r13d movl %eax, %r8d movl %edx, %eax cltd idivl %ebp imull %r14d, %r8d imull %r15d, %eax imull %ebx, %edx addl %r8d, %edx addl %eax, %edx leal (%rdx,%rcx), %eax movq 0xf0(%rsp), %r8 cltq addl %esi, %edx movslq %edx, %rdx movb (%r8,%rax), %al movb %al, (%r11) movb (%r8,%rdx), %al movb %al, 0x1(%r11) addq $0x2, %r11 incl %edi decl %r9d jne 0xd5471 movl %r10d, %edi movq %r14, %r10 jmp 0xd54dc movq 0x60(%rsp), %rbp movl 0x68(%rsp), %r13d movq 0x70(%rsp), %rbx movl 0x20(%rsp), %r12d movq 0x10(%rsp), %r10 movq 0x88(%rsp), %r14 cmpl $0x8, 0x18(%rsp) jne 0xd55dd movl 0x98(%rsp), %r9d subl %edi, %r9d jle 0xd55dd addl 0x170(%rsp), %edi movl %edi, %eax cltd idivl %r13d movl %eax, %r8d movl %edx, %eax cltd idivl %ebp imull %r10d, %r8d imull %r15d, %eax imull %ebx, %edx addl %r8d, %edx addl %eax, %edx leal (%rdx,%rcx), %eax cltq addl %esi, %edx movslq %edx, %rdx movq 0xf0(%rsp), %r8 vmovq (%r8,%rax,8), %xmm0 vmovq (%r8,%rdx,8), %xmm1 vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] vmovdqu %xmm0, (%r11) addq $0x10, %r11 incl %edi decl %r9d jne 0xd5507 jmp 0xd55dd movq 0x60(%rsp), %rbp movl 0x68(%rsp), %r13d movq 0x70(%rsp), %rbx movq 0x10(%rsp), %r10 movq 0x88(%rsp), %r14 movl 0x50(%rsp), %r15d cmpb $0x0, 0xa8(%rsp) jne 0xd55dd movl 0x170(%rsp), %esi movl 0x98(%rsp), %r8d movl %esi, %eax cltd idivl %r13d movl %eax, %edi movl %edx, %eax cltd idivl %ebp imull %r10d, %edi addl %ecx, %edi imull %r15d, %eax imull %ebx, %edx addl %edi, %edx addl %eax, %edx movslq %edx, %rax movq 0xf0(%rsp), %rdx vmovdqu (%rdx,%rax,8), %xmm0 vpshufb 0x3d2583(%rip), %xmm0, %xmm0 # 0x4a7b50 vmovdqu %xmm0, (%r11) addq $0x10, %r11 incl %esi decl %r8d jne 0xd5596 movq 0x78(%rsp), %rax leal 0x2(%rax), %edi addl $0x3, %eax cmpl %r12d, %eax movl %edi, %r9d jl 0xd51c1 jmp 0xd5601 movl %r9d, %edi movq 0x88(%rsp), %r14 cmpl %r12d, %edi movq 0xc0(%rsp), %r9 jge 0xd3a68 movl %r14d, %eax imull 0x28(%rsp), %eax movl %eax, 0x90(%rsp) imull 0x38(%rsp), %r14d cmpl $0x8, 0x18(%rsp) setne %al movq (%rsp), %rcx leal 0x7(%rcx), %edx testl %ecx, %ecx cmovnsl %ecx, %edx sarl $0x3, %edx movl %edx, 0x98(%rsp) cmpl $0x8, %ecx setl %cl leal 0x7(%r9), %edx testl %r9d, %r9d cmovnsl %r9d, %edx sarl $0x3, %edx movl %edx, 0x50(%rsp) orb %al, %cl movb %cl, 0x88(%rsp) movl %r9d, %r15d movq 0x30(%rsp), %rax addl %edi, %eax cltd idivl 0x80(%rsp) movl %eax, %ecx imull 0x58(%rsp), %edx imull 0x90(%rsp), %ecx addl %edx, %ecx cmpl $0x1, 0x18(%rsp) movq %rdi, 0x78(%rsp) jne 0xd578a xorl %esi, %esi cmpl $0x2, (%rsp) jl 0xd5742 xorl %esi, %esi movq (%rsp), %r12 leal (%r15,%rsi), %eax cltd idivl %r13d movl %eax, %r9d movl %edx, %r8d leal (%r15,%rsi), %eax incl %eax cltd idivl %r13d movl %edx, %r10d movl %eax, %edi movl %r8d, %eax cltd idivl %ebp movl %edx, %r8d imull 0x10(%rsp), %r9d addl %ecx, %r9d imull %ebx, %r8d addl %r9d, %r8d imull %r14d, %eax addl %eax, %r8d movl %r10d, %eax movq 0x10(%rsp), %r10 cltd idivl %ebp imull %r10d, %edi addl %ecx, %edi imull %ebx, %edx addl %edi, %edx imull %r14d, %eax addl %eax, %edx movq 0xf0(%rsp), %rax movslq %r8d, %rdi movb (%rax,%rdi), %dil movb %dil, (%r11,%rsi) movslq %edx, %rdx movb (%rax,%rdx), %al movb %al, 0x1(%r11,%rsi) addq $0x2, %rsi leal 0x1(%rsi), %eax cmpl %r12d, %eax jl 0xd56ab addq %rsi, %r11 movq 0xc0(%rsp), %r9 movl 0x20(%rsp), %r12d movq (%rsp), %rax movl %eax, %r8d subl %esi, %r8d jle 0xd578a addl %r9d, %esi movl %esi, %eax cltd idivl %r13d movl %eax, %edi movl %edx, %eax cltd idivl %ebp imull %r10d, %edi addl %ecx, %edi imull %ebx, %edx addl %edi, %edx imull %r14d, %eax addl %eax, %edx movq 0xf0(%rsp), %rax movslq %edx, %rdx movb (%rax,%rdx), %al movb %al, (%r11) incq %r11 incl %esi decl %r8d jne 0xd5751 cmpb $0x0, 0x88(%rsp) jne 0xd57db movl 0x50(%rsp), %esi movl 0x98(%rsp), %r8d movl %esi, %eax cltd idivl %r13d movl %eax, %edi movl %edx, %eax cltd idivl %ebp imull %r10d, %edi addl %ecx, %edi imull %ebx, %edx addl %edi, %edx imull %r14d, %eax addl %eax, %edx movslq %edx, %rax movq 0xf0(%rsp), %rdx movq (%rdx,%rax,8), %rax movq %rax, (%r11) addq $0x8, %r11 incl %esi decl %r8d jne 0xd57a0 movq 0x78(%rsp), %rdi incl %edi cmpl %r12d, %edi jne 0xd5669 jmp 0xd3a68 movq %r12, %r15 callq 0x3c84b movq 0xd0(%rsp), %rcx leaq 0x368(%rcx), %rbx testl %eax, %eax je 0xd58b5 leaq 0xf0(%rsp), %rdi leaq 0x1a0(%rsp), %rsi movq %rbx, %rdx movl %r15d, %ecx movq 0x178(%rsp), %r8 callq 0x14a240 jmp 0xd6d31 callq 0x3c83d movq 0xd0(%rsp), %rcx leaq 0x2d8(%rcx), %rbx testl %eax, %eax je 0xd58ea vmovdqa 0xc0(%rsp), %xmm0 vpextrd $0x1, %xmm0, %r8d vpextrd $0x2, %xmm0, %r9d subq $0x20, %rsp movq 0x198(%rsp), %rax movq %rax, 0x18(%rsp) movq 0x48(%rsp), %rax movl %eax, 0x10(%rsp) movq 0x78(%rsp), %rax movl %eax, 0x8(%rsp) vpextrd $0x3, %xmm0, (%rsp) leaq 0x110(%rsp), %rdi leaq 0x1c0(%rsp), %rsi movq %rbx, %rdx movl %ebp, %ecx callq 0x155196 addq $0x20, %rsp jmp 0xd6d3b callq 0x3c83d testl %eax, %eax je 0xd6d01 leaq 0xf0(%rsp), %rdi leaq 0x1a0(%rsp), %rsi movq %rbx, %rdx movl %r15d, %ecx movq 0x178(%rsp), %r8 callq 0x15c972 jmp 0xd6d31 callq 0x3c821 testl %eax, %eax je 0xd6f2e vmovdqa 0xc0(%rsp), %xmm0 vpextrd $0x1, %xmm0, %r8d vpextrd $0x2, %xmm0, %r9d subq $0x20, %rsp movq 0x198(%rsp), %rax movq %rax, 0x18(%rsp) movq 0x48(%rsp), %rax movl %eax, 0x10(%rsp) movq 0x78(%rsp), %rax movl %eax, 0x8(%rsp) vpextrd $0x3, %xmm0, (%rsp) leaq 0x110(%rsp), %rdi leaq 0x1c0(%rsp), %rsi movq %rbx, %rdx movl %ebp, %ecx callq 0x1649e8 addq $0x20, %rsp jmp 0xd6d3b movl 0x14c(%rsp), %r15d movl 0x184(%rsp), %esi imull %r15d, %esi movq 0x178(%rsp), %rax movq 0x10(%rax), %r9 leaq 0x290(%rsp), %rdi movq $0x0, 0x40(%rdi) vpxor %xmm0, %xmm0, %xmm0 vmovdqa %xmm0, (%rdi) vmovdqu %xmm0, 0xc(%rdi) vmovdqa %xmm0, 0x20(%rdi) vmovdqu %xmm0, 0x2c(%rdi) movl $0x4, %r8d movl $0x1, %edx movq 0x140(%rsp), %rcx callq 0x2b0d2 movl $0xffffff9c, %ebx # imm = 0xFFFFFF9C cmpq $0x0, 0x290(%rsp) movq 0xe8(%rsp), %rcx movq 0xb0(%rsp), %r12 movq 0xb8(%rsp), %r13 je 0xd6cb6 movslq 0x2c8(%rsp), %rax imulq 0x2d0(%rsp), %rax testq %rax, %rax je 0xd6cb6 cmpl $0x0, 0xe0(%rsp) jle 0xd6cb4 xorl %edx, %edx movl 0x184(%rsp), %eax movl %eax, %esi movl %edx, 0x98(%rsp) imull %edx, %esi movl %esi, 0x20(%rsp) subl %esi, %ecx cmpl %ecx, %eax cmovll %eax, %ecx movl %ecx, 0x80(%rsp) vzeroupper callq 0x3cbe9 movslq 0x2bc(%rsp), %rdx movslq 0x2c0(%rsp), %rcx movl 0x2c4(%rsp), %esi cltq imulq 0x2d0(%rsp), %rax movq 0x2a0(%rsp), %rdi imulq %rdi, %rax addq 0x290(%rsp), %rax movl 0x2a8(%rsp), %r8d movq 0x2b0(%rsp), %r9 movq %rax, 0x2e0(%rsp) movq $0x0, 0x2e8(%rsp) movq %rdi, 0x2f0(%rsp) movl %r8d, 0x2f8(%rsp) movq %r9, 0x300(%rsp) movl %edx, 0x30c(%rsp) movl %ecx, 0x310(%rsp) movl $0x1, 0x314(%rsp) movl %esi, 0x318(%rsp) imulq %rdx, %rcx movq %rdi, %rax imulq %rcx, %rax addq $0xf, %rax andq $-0x10, %rax xorl %edx, %edx divq %rdi movq %rax, 0x320(%rsp) movl 0x2b8(%rsp), %eax leal -0x1(%rax), %edx movl %edx, 0x308(%rsp) cmpl $0x4, %eax jne 0xd5b0a movq %rcx, 0x320(%rsp) testl %r12d, %r12d jle 0xd6c96 movl 0x80(%rsp), %eax movl %eax, %ecx movq %rcx, 0x70(%rsp) movslq 0x20(%rsp), %rcx movq %rcx, 0x78(%rsp) cltq movq %rax, 0x28(%rsp) movl 0x14c(%rsp), %eax xorl %ecx, %ecx movl %ecx, 0x38(%rsp) subl %ecx, %r12d cmpl %r12d, %eax cmovll %eax, %r12d testl %r13d, %r13d jle 0xd5d9a xorl %ebp, %ebp movl 0x1fc(%rsp), %ecx movq %r13, %r15 movl %r13d, %r10d subl %ebp, %r10d cmpl %r10d, %ecx cmovll %ecx, %r10d movl 0x20(%rsp), %r13d movl %r13d, %eax cltd idivl 0x184(%rsp) movq 0xd0(%rsp), %rdx movslq 0x34c(%rdx), %rsi movslq %eax, %rdi imulq 0x360(%rdx), %rdi movq 0x330(%rdx), %r8 imulq %r8, %rdi addq 0x320(%rdx), %rdi movl 0x338(%rdx), %r9d movq 0x340(%rdx), %r11 movl %ebp, %eax cltd idivl %ecx movslq %eax, %rcx movq %r8, %rax imulq %rsi, %rax imulq %rcx, %rax addq %rdi, %rax movq %rax, 0x358(%rsp) xorl %r14d, %r14d movq %r14, 0x360(%rsp) movq %r8, 0x368(%rsp) movl %r9d, 0x370(%rsp) movq %r11, 0x378(%rsp) movl $0x2, %r8d movl %r8d, 0x380(%rsp) movl %esi, 0x384(%rsp) movabsq $0x100000001, %rbx # imm = 0x100000001 movq %rbx, 0x388(%rsp) movl $0x1, %r11d movl %r11d, 0x390(%rsp) movq %rsi, 0x398(%rsp) movl 0x38(%rsp), %r9d movl %r9d, %eax cltd idivl 0x14c(%rsp) movslq 0x22c(%rsp), %rdx movq 0x210(%rsp), %rsi movq %rdx, %rdi imulq %rsi, %rdi imulq %rcx, %rdi cltq imulq 0x240(%rsp), %rax imulq %rsi, %rax addq 0x200(%rsp), %rax addq %rax, %rdi movq %rdi, 0x3c8(%rsp) movq %r14, 0x3d0(%rsp) movq %rsi, 0x3d8(%rsp) movl 0x218(%rsp), %eax movl %eax, 0x3e0(%rsp) movq 0x220(%rsp), %rax movq %rax, 0x3e8(%rsp) movl %r8d, 0x3f0(%rsp) movl %edx, 0x3f4(%rsp) movq %rbx, 0x3f8(%rsp) movl %r11d, 0x400(%rsp) movq %rdx, 0x408(%rsp) subq $0x8, %rsp leaq 0x360(%rsp), %rdi leaq 0x3d0(%rsp), %rsi leaq 0x2e8(%rsp), %rdx movl %r13d, %ecx movl 0x88(%rsp), %r8d pushq %r10 pushq %rbp pushq %r12 vzeroupper callq 0x3c19a1 addq $0x20, %rsp movq 0x3d0(%rsp), %rax testq %rax, %rax movq %r15, %r13 je 0xd5d4c lock decl (%rax) jne 0xd5d4c movq 0x3c8(%rsp), %rsi movq 0x3e8(%rsp), %rdi testq %rdi, %rdi je 0xd5d3f movq (%rdi), %rax callq *0x18(%rax) jmp 0xd5d4c testq %rsi, %rsi je 0xd5d4c movq %rsi, %rdi callq 0x244a0 movq 0x360(%rsp), %rax testq %rax, %rax je 0xd5d88 lock decl (%rax) jne 0xd5d88 movq 0x358(%rsp), %rsi movq 0x378(%rsp), %rdi testq %rdi, %rdi je 0xd5d7b movq (%rdi), %rax callq *0x18(%rax) jmp 0xd5d88 testq %rsi, %rsi je 0xd5d88 movq %rsi, %rdi callq 0x244a0 movl 0x1fc(%rsp), %ecx addl %ecx, %ebp cmpl %r13d, %ebp jl 0xd5b5b vzeroupper callq 0x3c83d testl %eax, %eax je 0xd5dd3 leaq 0x2e0(%rsp), %rdi leaq 0x1a0(%rsp), %rsi movl 0x20(%rsp), %edx movl 0x80(%rsp), %ecx movl 0x38(%rsp), %r8d movl %r12d, %r9d callq 0x15b78e jmp 0xd6c2e xorl %r15d, %r15d movl 0x1b8(%rsp), %edx movq 0x1e0(%rsp), %rbx movq 0x2e0(%rsp), %rcx cmpl $0x8, 0x80(%rsp) movq %rbx, 0x58(%rsp) jl 0xd6534 leal (,%r12,4), %eax movslq %eax, %rdi movl %edx, %eax imull 0x38(%rsp), %eax cltq movq %rax, 0x60(%rsp) movq %rbx, %r8 shlq $0x20, %r8 leal (%rbx,%rbx), %eax movslq %eax, %r9 movq %rbx, %rbp movabsq $0x300000000, %rax # imm = 0x300000000 imulq %rax, %rbp sarq $0x20, %rbp leal (,%rbx,4), %eax movslq %eax, %r11 leaq (%r8,%r8,4), %rax sarq $0x20, %rax movq %rax, 0x10(%rsp) movq %rbx, %rax movabsq $0x600000000, %rsi # imm = 0x600000000 imulq %rsi, %rax sarq $0x20, %rax movq %rbx, %r10 movabsq $0x700000000, %rsi # imm = 0x700000000 imulq %rsi, %r10 sarq $0x20, %r10 movq %r10, (%rsp) movabsq $0x100000000, %rsi # imm = 0x100000000 addq %rsi, %r8 sarq $0x20, %r8 movq %r8, 0x68(%rsp) leal 0x1(,%rbx,2), %esi movslq %esi, %rsi movq %rsi, 0x30(%rsp) leal (%rbx,%rbx,2), %esi movslq %esi, %rsi movq %rsi, 0x48(%rsp) leal 0x1(%rbx,%rbx,2), %esi movslq %esi, %rsi movq %rsi, 0x18(%rsp) leal (%rbx,%rbx,4), %esi movslq %esi, %rsi movq %rsi, 0xa0(%rsp) leal 0x1(%rbx,%rbx,4), %esi movslq %esi, %rsi movq %rsi, 0x8(%rsp) leal (,%rbx,8), %esi subl %ebx, %esi movslq %esi, %r8 movq %r8, 0x40(%rsp) leal 0x1(%r8), %esi movslq %esi, %rsi movq %rsi, 0x150(%rsp) movl %r12d, %esi andl $-0x8, %esi movl %esi, 0x88(%rsp) movslq %ebx, %r8 movq %rdi, 0x90(%rsp) leaq (,%rdi,4), %rbx xorl %r13d, %r13d movq %rbp, 0xc0(%rsp) movq 0x78(%rsp), %rsi addq %r13, %rsi imulq %r8, %rsi shlq $0x2, %rsi addq 0x1a0(%rsp), %rsi movq 0x60(%rsp), %rdi leaq (%rsi,%rdi,4), %r14 cmpl $0x8, %r12d jl 0xd61c4 movl $0x7, %edi movq 0x10(%rsp), %rsi movq %rcx, %r15 vmovups (%rcx), %ymm0 vmovups 0x20(%rcx), %ymm1 vmovups 0x40(%rcx), %ymm2 vmovups 0x60(%rcx), %ymm3 vmovups (%rcx,%rbx), %ymm4 vmovups 0x20(%rcx,%rbx), %ymm5 vmovups 0x40(%rcx,%rbx), %ymm6 vmovups 0x60(%rcx,%rbx), %ymm7 vinsertf128 $0x1, %xmm4, %ymm0, %ymm8 vinsertf128 $0x1, %xmm5, %ymm1, %ymm9 vinsertf128 $0x1, %xmm6, %ymm2, %ymm10 vshufps $0x93, %ymm10, %ymm10, %ymm10 # ymm10 = ymm10[3,0,1,2,7,4,5,6] vinsertf128 $0x1, %xmm7, %ymm3, %ymm11 vshufps $0x93, %ymm11, %ymm11, %ymm11 # ymm11 = ymm11[3,0,1,2,7,4,5,6] vperm2f128 $0x31, %ymm4, %ymm0, %ymm0 # ymm0 = ymm0[2,3],ymm4[2,3] vperm2f128 $0x31, %ymm5, %ymm1, %ymm1 # ymm1 = ymm1[2,3],ymm5[2,3] vperm2f128 $0x31, %ymm6, %ymm2, %ymm2 # ymm2 = ymm2[2,3],ymm6[2,3] vshufps $0x93, %ymm2, %ymm2, %ymm2 # ymm2 = ymm2[3,0,1,2,7,4,5,6] vperm2f128 $0x31, %ymm7, %ymm3, %ymm3 # ymm3 = ymm3[2,3],ymm7[2,3] vshufps $0x93, %ymm3, %ymm3, %ymm3 # ymm3 = ymm3[3,0,1,2,7,4,5,6] vunpcklps %ymm11, %ymm8, %ymm4 # ymm4 = ymm8[0],ymm11[0],ymm8[1],ymm11[1],ymm8[4],ymm11[4],ymm8[5],ymm11[5] vunpcklps %ymm10, %ymm9, %ymm6 # ymm6 = ymm9[0],ymm10[0],ymm9[1],ymm10[1],ymm9[4],ymm10[4],ymm9[5],ymm10[5] vunpckhps %ymm10, %ymm9, %ymm9 # ymm9 = ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[6],ymm10[6],ymm9[7],ymm10[7] vunpckhps %ymm11, %ymm8, %ymm8 # ymm8 = ymm8[2],ymm11[2],ymm8[3],ymm11[3],ymm8[6],ymm11[6],ymm8[7],ymm11[7] vunpcklps %ymm3, %ymm0, %ymm10 # ymm10 = ymm0[0],ymm3[0],ymm0[1],ymm3[1],ymm0[4],ymm3[4],ymm0[5],ymm3[5] vunpcklps %ymm2, %ymm1, %ymm11 # ymm11 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5] vunpckhps %ymm2, %ymm1, %ymm2 # ymm2 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7] vunpckhps %ymm3, %ymm0, %ymm12 # ymm12 = ymm0[2],ymm3[2],ymm0[3],ymm3[3],ymm0[6],ymm3[6],ymm0[7],ymm3[7] vunpcklpd %ymm6, %ymm4, %ymm5 # ymm5 = ymm4[0],ymm6[0],ymm4[2],ymm6[2] vunpcklpd %ymm8, %ymm9, %ymm3 # ymm3 = ymm9[0],ymm8[0],ymm9[2],ymm8[2] vunpcklpd %ymm11, %ymm10, %ymm1 # ymm1 = ymm10[0],ymm11[0],ymm10[2],ymm11[2] vunpcklpd %ymm12, %ymm2, %ymm0 # ymm0 = ymm2[0],ymm12[0],ymm2[2],ymm12[2] vshufps $0xbe, %ymm6, %ymm4, %ymm4 # ymm4 = ymm4[2,3],ymm6[3,2],ymm4[6,7],ymm6[7,6] vshufps $0xd2, %ymm4, %ymm4, %ymm7 # ymm7 = ymm4[2,0,1,3,6,4,5,7] vshufps $0xbe, %ymm8, %ymm9, %ymm4 # ymm4 = ymm9[2,3],ymm8[3,2],ymm9[6,7],ymm8[7,6] vshufps $0xd2, %ymm4, %ymm4, %ymm6 # ymm6 = ymm4[2,0,1,3,6,4,5,7] vshufps $0xbe, %ymm11, %ymm10, %ymm4 # ymm4 = ymm10[2,3],ymm11[3,2],ymm10[6,7],ymm11[7,6] vshufps $0xd2, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[2,0,1,3,6,4,5,7] vshufps $0xbe, %ymm12, %ymm2, %ymm2 # ymm2 = ymm2[2,3],ymm12[3,2],ymm2[6,7],ymm12[7,6] vshufps $0xd2, %ymm2, %ymm2, %ymm2 # ymm2 = ymm2[2,0,1,3,6,4,5,7] cmpl $0x1, %edx je 0xd60f4 cmpl $0x4, %edx je 0xd6089 cmpl $0x8, %edx jne 0xd619b vmovaps %ymm5, (%r14) vmovaps %ymm7, 0x20(%r14) vmovaps %ymm3, 0x40(%r14) vmovaps %ymm6, 0x60(%r14) vmovaps %ymm1, 0x80(%r14) vmovaps %ymm4, 0xa0(%r14) vmovaps %ymm0, 0xc0(%r14) vmovaps %ymm2, 0xe0(%r14) addq $0x100, %r14 # imm = 0x100 jmp 0xd619b vinsertf128 $0x1, %xmm7, %ymm5, %ymm8 vinsertf128 $0x1, %xmm6, %ymm3, %ymm9 vinsertf128 $0x1, %xmm4, %ymm1, %ymm10 vinsertf128 $0x1, %xmm2, %ymm0, %ymm11 vperm2f128 $0x31, %ymm7, %ymm5, %ymm5 # ymm5 = ymm5[2,3],ymm7[2,3] vperm2f128 $0x31, %ymm6, %ymm3, %ymm3 # ymm3 = ymm3[2,3],ymm6[2,3] vperm2f128 $0x31, %ymm4, %ymm1, %ymm1 # ymm1 = ymm1[2,3],ymm4[2,3] vperm2f128 $0x31, %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[2,3],ymm2[2,3] vmovups %ymm8, (%r14) vmovups %ymm9, 0x20(%r14) vmovups %ymm10, 0x40(%r14) vmovups %ymm11, 0x60(%r14) vmovups %ymm5, (%r14,%r11,4) vmovups %ymm3, 0x20(%r14,%r11,4) vmovups %ymm1, 0x40(%r14,%r11,4) vmovups %ymm0, 0x60(%r14,%r11,4) subq $-0x80, %r14 jmp 0xd619b vunpcklps %ymm7, %ymm5, %ymm8 # ymm8 = ymm5[0],ymm7[0],ymm5[1],ymm7[1],ymm5[4],ymm7[4],ymm5[5],ymm7[5] vunpckhps %ymm7, %ymm5, %ymm5 # ymm5 = ymm5[2],ymm7[2],ymm5[3],ymm7[3],ymm5[6],ymm7[6],ymm5[7],ymm7[7] vunpcklps %ymm6, %ymm3, %ymm7 # ymm7 = ymm3[0],ymm6[0],ymm3[1],ymm6[1],ymm3[4],ymm6[4],ymm3[5],ymm6[5] vunpckhps %ymm6, %ymm3, %ymm3 # ymm3 = ymm3[2],ymm6[2],ymm3[3],ymm6[3],ymm3[6],ymm6[6],ymm3[7],ymm6[7] vunpcklps %ymm4, %ymm1, %ymm6 # ymm6 = ymm1[0],ymm4[0],ymm1[1],ymm4[1],ymm1[4],ymm4[4],ymm1[5],ymm4[5] vunpckhps %ymm4, %ymm1, %ymm1 # ymm1 = ymm1[2],ymm4[2],ymm1[3],ymm4[3],ymm1[6],ymm4[6],ymm1[7],ymm4[7] vunpcklps %ymm2, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[4],ymm2[4],ymm0[5],ymm2[5] vunpckhps %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[6],ymm2[6],ymm0[7],ymm2[7] vunpcklpd %ymm7, %ymm8, %ymm2 # ymm2 = ymm8[0],ymm7[0],ymm8[2],ymm7[2] vunpckhpd %ymm7, %ymm8, %ymm7 # ymm7 = ymm8[1],ymm7[1],ymm8[3],ymm7[3] vunpcklpd %ymm3, %ymm5, %ymm8 # ymm8 = ymm5[0],ymm3[0],ymm5[2],ymm3[2] vunpckhpd %ymm3, %ymm5, %ymm3 # ymm3 = ymm5[1],ymm3[1],ymm5[3],ymm3[3] vunpcklpd %ymm4, %ymm6, %ymm5 # ymm5 = ymm6[0],ymm4[0],ymm6[2],ymm4[2] vunpckhpd %ymm4, %ymm6, %ymm4 # ymm4 = ymm6[1],ymm4[1],ymm6[3],ymm4[3] vunpcklpd %ymm0, %ymm1, %ymm6 # ymm6 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] vunpckhpd %ymm0, %ymm1, %ymm0 # ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3] vinsertf128 $0x1, %xmm5, %ymm2, %ymm1 vinsertf128 $0x1, %xmm4, %ymm7, %ymm9 vinsertf128 $0x1, %xmm6, %ymm8, %ymm10 vinsertf128 $0x1, %xmm0, %ymm3, %ymm11 vperm2f128 $0x31, %ymm5, %ymm2, %ymm2 # ymm2 = ymm2[2,3],ymm5[2,3] vperm2f128 $0x31, %ymm4, %ymm7, %ymm4 # ymm4 = ymm7[2,3],ymm4[2,3] vperm2f128 $0x31, %ymm6, %ymm8, %ymm5 # ymm5 = ymm8[2,3],ymm6[2,3] vperm2f128 $0x31, %ymm0, %ymm3, %ymm0 # ymm0 = ymm3[2,3],ymm0[2,3] vmovups %ymm1, (%r14) vmovups %ymm9, (%r14,%r8,4) vmovups %ymm10, (%r14,%r9,4) vmovups %ymm11, (%r14,%rbp,4) vmovups %ymm2, (%r14,%r11,4) vmovups %ymm4, (%r14,%rsi,4) vmovups %ymm5, (%r14,%rax,4) movq (%rsp), %rcx vmovups %ymm0, (%r14,%rcx,4) addq $0x20, %r14 addl $0x8, %edi leaq 0x80(%r15), %rcx cmpl %r12d, %edi jl 0xd5f56 subq $-0x80, %r15 leaq (%r15,%rbx), %rbp movl 0x88(%rsp), %ecx movl %ecx, %edi movq %r15, %rcx jmp 0xd61d2 xorl %edi, %edi movq 0x90(%rsp), %rsi leaq (%rcx,%rsi,4), %rbp movl %edi, %esi orl $0x3, %esi cmpl %r12d, %esi jge 0xd6342 movl %edi, %r10d movq %rbp, %rsi movq %rcx, %r15 vmovups (%rcx), %ymm0 vmovups 0x20(%rcx), %ymm1 vmovups (%rbp), %ymm2 vmovups 0x20(%rbp), %ymm3 vinsertf128 $0x1, %xmm2, %ymm0, %ymm4 vinsertf128 $0x1, %xmm3, %ymm1, %ymm5 vperm2f128 $0x31, %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[2,3],ymm2[2,3] vshufps $0x93, %ymm0, %ymm0, %ymm0 # ymm0 = ymm0[3,0,1,2,7,4,5,6] vperm2f128 $0x31, %ymm3, %ymm1, %ymm1 # ymm1 = ymm1[2,3],ymm3[2,3] vshufps $0x93, %ymm1, %ymm1, %ymm1 # ymm1 = ymm1[3,0,1,2,7,4,5,6] vunpcklps %ymm1, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5] vunpckhps %ymm1, %ymm4, %ymm1 # ymm1 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7] vunpcklps %ymm0, %ymm5, %ymm4 # ymm4 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[4],ymm0[4],ymm5[5],ymm0[5] vunpckhps %ymm0, %ymm5, %ymm5 # ymm5 = ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[6],ymm0[6],ymm5[7],ymm0[7] vunpcklpd %ymm4, %ymm3, %ymm2 # ymm2 = ymm3[0],ymm4[0],ymm3[2],ymm4[2] vunpcklpd %ymm1, %ymm5, %ymm0 # ymm0 = ymm5[0],ymm1[0],ymm5[2],ymm1[2] vshufps $0xbe, %ymm4, %ymm3, %ymm3 # ymm3 = ymm3[2,3],ymm4[3,2],ymm3[6,7],ymm4[7,6] vshufps $0xd2, %ymm3, %ymm3, %ymm3 # ymm3 = ymm3[2,0,1,3,6,4,5,7] vshufps $0xbe, %ymm1, %ymm5, %ymm1 # ymm1 = ymm5[2,3],ymm1[3,2],ymm5[6,7],ymm1[7,6] vshufps $0xd2, %ymm1, %ymm1, %ymm1 # ymm1 = ymm1[2,0,1,3,6,4,5,7] cmpl $0x1, %edx je 0xd62b3 cmpl $0x4, %edx je 0xd627d cmpl $0x8, %edx jne 0xd631b vmovaps %ymm2, (%r14) vmovaps %ymm3, 0x20(%r14) vmovaps %ymm0, 0x40(%r14) vmovaps %ymm1, 0x60(%r14) subq $-0x80, %r14 jmp 0xd631b vinsertf128 $0x1, %xmm3, %ymm2, %ymm4 vinsertf128 $0x1, %xmm1, %ymm0, %ymm5 vperm2f128 $0x31, %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2,3],ymm3[2,3] vperm2f128 $0x31, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2,3],ymm1[2,3] vmovups %ymm4, (%r14) vmovups %ymm5, 0x20(%r14) vmovups %ymm2, (%r14,%r11,4) vmovups %ymm0, 0x20(%r14,%r11,4) addq $0x40, %r14 jmp 0xd631b vunpcklps %ymm3, %ymm2, %ymm4 # ymm4 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5] vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7] vunpcklps %ymm1, %ymm0, %ymm3 # ymm3 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] vunpcklpd %ymm3, %ymm4, %ymm1 # ymm1 = ymm4[0],ymm3[0],ymm4[2],ymm3[2] vunpckhpd %ymm3, %ymm4, %ymm3 # ymm3 = ymm4[1],ymm3[1],ymm4[3],ymm3[3] vunpcklpd %ymm0, %ymm2, %ymm4 # ymm4 = ymm2[0],ymm0[0],ymm2[2],ymm0[2] vunpckhpd %ymm0, %ymm2, %ymm0 # ymm0 = ymm2[1],ymm0[1],ymm2[3],ymm0[3] vmovups %xmm1, (%r14) vmovups %xmm3, (%r14,%r8,4) vmovups %xmm4, (%r14,%r9,4) movq 0xc0(%rsp), %rcx vmovups %xmm0, (%r14,%rcx,4) vextractf128 $0x1, %ymm1, (%r14,%r11,4) movq 0x10(%rsp), %rcx vextractf128 $0x1, %ymm3, (%r14,%rcx,4) vextractf128 $0x1, %ymm4, (%r14,%rax,4) movq (%rsp), %rcx vextractf128 $0x1, %ymm0, (%r14,%rcx,4) addq $0x10, %r14 leal 0x4(%r10), %edi addl $0x7, %r10d leaq 0x40(%r15), %rcx leaq 0x40(%rsi), %rbp cmpl %r12d, %r10d jl 0xd61e0 addq $0x40, %r15 addq $0x40, %rsi movq %rsi, %rbp movq %r15, %rcx movq %rcx, %r15 movl %edi, %esi orl $0x1, %esi movq %rbp, %rcx cmpl %r12d, %esi movq 0x68(%rsp), %r10 movq 0xc0(%rsp), %rbp jge 0xd6480 vmovups (%r15), %ymm0 vmovups (%rcx), %ymm1 vinsertf128 $0x1, %xmm1, %ymm0, %ymm2 vperm2f128 $0x31, %ymm1, %ymm0, %ymm1 # ymm1 = ymm0[2,3],ymm1[2,3] vblendps $0xaa, %ymm1, %ymm2, %ymm0 # ymm0 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7] vblendps $0xaa, %ymm2, %ymm1, %ymm1 # ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7] cmpl $0x1, %edx je 0xd63cb cmpl $0x4, %edx je 0xd63ab cmpl $0x8, %edx jne 0xd6467 vmovups %ymm0, (%r14) vmovups %ymm1, 0x20(%r14) addq $0x40, %r14 jmp 0xd6467 vinsertf128 $0x1, %xmm1, %ymm0, %ymm2 vperm2f128 $0x31, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2,3],ymm1[2,3] vmovups %ymm2, (%r14) vmovups %ymm0, (%r14,%r11,4) addq $0x20, %r14 jmp 0xd6467 vunpcklps %xmm1, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] vmovlps %xmm2, (%r14) vextractps $0x1, %xmm0, (%r14,%r8,4) vextractps $0x1, %xmm1, (%r14,%r10,4) vextractps $0x2, %xmm0, (%r14,%r9,4) movq 0x30(%rsp), %rsi vextractps $0x2, %xmm1, (%r14,%rsi,4) movq 0x48(%rsp), %rsi vextractps $0x3, %xmm0, (%r14,%rsi,4) movq 0x18(%rsp), %rsi vextractps $0x3, %xmm1, (%r14,%rsi,4) vextractf128 $0x1, %ymm1, %xmm1 vextractf128 $0x1, %ymm0, %xmm0 vunpcklps %xmm1, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] vmovlps %xmm2, (%r14,%r11,4) movq 0xa0(%rsp), %rsi vextractps $0x1, %xmm0, (%r14,%rsi,4) movq 0x8(%rsp), %rsi vextractps $0x1, %xmm1, (%r14,%rsi,4) vunpckhps %xmm1, %xmm0, %xmm2 # xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] vmovlps %xmm2, (%r14,%rax,4) movq 0x40(%rsp), %rsi vextractps $0x3, %xmm0, (%r14,%rsi,4) movq 0x150(%rsp), %rsi vextractps $0x3, %xmm1, (%r14,%rsi,4) addq $0x8, %r14 addq $0x20, %r15 addq $0x20, %rcx leal 0x2(%rdi), %esi addl $0x3, %edi cmpl %r12d, %edi movl %esi, %edi jl 0xd6363 movl %r12d, %r10d subl %edi, %r10d movq 0x10(%rsp), %rsi jle 0xd6519 vmovaps (%r15), %xmm1 vmovaps (%rcx), %xmm0 cmpl $0x1, %edx je 0xd64cb cmpl $0x4, %edx je 0xd64ba cmpl $0x8, %edx jne 0xd6508 vmovups %xmm0, 0x10(%r14) vmovups %xmm1, (%r14) addq $0x20, %r14 jmp 0xd6508 vmovaps %xmm1, (%r14) vmovaps %xmm0, (%r14,%r11,4) addq $0x10, %r14 jmp 0xd6508 vmovss %xmm1, (%r14) vextractps $0x1, %xmm1, (%r14,%r8,4) vextractps $0x2, %xmm1, (%r14,%r9,4) vextractps $0x3, %xmm1, (%r14,%rbp,4) vmovss %xmm0, (%r14,%r11,4) vextractps $0x1, %xmm0, (%r14,%rsi,4) vextractps $0x2, %xmm0, (%r14,%rax,4) movq (%rsp), %rdi vextractps $0x3, %xmm0, (%r14,%rdi,4) addq $0x4, %r14 addq $0x10, %r15 addq $0x10, %rcx decl %r10d jne 0xd6491 leaq 0x8(%r13), %r15 addq $0xf, %r13 cmpq 0x70(%rsp), %r13 movq %r15, %r13 jb 0xd5f21 movq 0x58(%rsp), %rbx movl %r15d, %eax orl $0x3, %eax cmpl 0x80(%rsp), %eax jge 0xd68d0 movl %edx, %eax imull 0x38(%rsp), %eax cltq movq %rbx, %rsi shlq $0x20, %rsi movslq %ebx, %rdi leal (%rbx,%rbx), %r8d movslq %r8d, %r8 movq %rbx, %r9 movabsq $0x300000000, %r10 # imm = 0x300000000 imulq %r10, %r9 sarq $0x20, %r9 movabsq $0x100000000, %r10 # imm = 0x100000000 addq %r10, %rsi sarq $0x20, %rsi leal (%rbx,%rbx,2), %r10d movslq %r10d, %r10 leal (%rbx,%rbx,2), %r11d incl %r11d movslq %r11d, %r11 movl %r12d, %ebx andl $-0x8, %ebx movl %r15d, %r14d movq 0x78(%rsp), %r15 addq %r14, %r15 imulq %rdi, %r15 shlq $0x2, %r15 addq 0x1a0(%rsp), %r15 xorl %r13d, %r13d leaq (%r15,%rax,4), %r15 cmpl $0x8, %r12d jl 0xd6722 movl $0x7, %ebp vmovaps (%rcx), %xmm0 vmovaps 0x10(%rcx), %xmm1 vmovaps 0x20(%rcx), %xmm2 vpermilps $0x93, 0x40(%rcx), %xmm3 # xmm3 = mem[3,0,1,2] vpermilps $0x93, 0x50(%rcx), %xmm4 # xmm4 = mem[3,0,1,2] vpermilps $0x93, 0x60(%rcx), %xmm5 # xmm5 = mem[3,0,1,2] vmovaps 0x30(%rcx), %xmm6 vpermilps $0x93, 0x70(%rcx), %xmm7 # xmm7 = mem[3,0,1,2] vunpcklps %xmm5, %xmm0, %xmm8 # xmm8 = xmm0[0],xmm5[0],xmm0[1],xmm5[1] vunpckhps %xmm5, %xmm0, %xmm9 # xmm9 = xmm0[2],xmm5[2],xmm0[3],xmm5[3] vunpcklps %xmm7, %xmm1, %xmm10 # xmm10 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] vunpckhps %xmm7, %xmm1, %xmm11 # xmm11 = xmm1[2],xmm7[2],xmm1[3],xmm7[3] vunpcklps %xmm3, %xmm2, %xmm7 # xmm7 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] vunpckhps %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] vunpcklps %xmm4, %xmm6, %xmm3 # xmm3 = xmm6[0],xmm4[0],xmm6[1],xmm4[1] vunpckhps %xmm4, %xmm6, %xmm12 # xmm12 = xmm6[2],xmm4[2],xmm6[3],xmm4[3] vmovlhps %xmm7, %xmm8, %xmm5 # xmm5 = xmm8[0],xmm7[0] vmovlhps %xmm9, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm9[0] vmovlhps %xmm3, %xmm10, %xmm1 # xmm1 = xmm10[0],xmm3[0] vmovlhps %xmm11, %xmm12, %xmm0 # xmm0 = xmm12[0],xmm11[0] vunpckhps %xmm7, %xmm8, %xmm6 # xmm6 = xmm8[2],xmm7[2],xmm8[3],xmm7[3] vshufps $0x63, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[3,0,2,1] vunpckhps %xmm9, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm9[2],xmm2[3],xmm9[3] vshufps $0x63, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[3,0,2,1] vunpckhps %xmm3, %xmm10, %xmm2 # xmm2 = xmm10[2],xmm3[2],xmm10[3],xmm3[3] vshufps $0x63, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[3,0,2,1] vunpckhps %xmm11, %xmm12, %xmm2 # xmm2 = xmm12[2],xmm11[2],xmm12[3],xmm11[3] vshufps $0x63, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[3,0,2,1] cmpl $0x1, %edx je 0xd6699 cmpl $0x4, %edx jne 0xd670f vmovaps %xmm5, (%r15) vmovaps %xmm7, 0x10(%r15) vmovaps %xmm4, 0x20(%r15) vmovaps %xmm6, 0x30(%r15) vmovaps %xmm1, 0x40(%r15) vmovaps %xmm3, 0x50(%r15) vmovaps %xmm0, 0x60(%r15) vmovaps %xmm2, 0x70(%r15) subq $-0x80, %r15 jmp 0xd670f vunpcklps %xmm7, %xmm5, %xmm8 # xmm8 = xmm5[0],xmm7[0],xmm5[1],xmm7[1] vunpckhps %xmm7, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm7[2],xmm5[3],xmm7[3] vunpcklps %xmm6, %xmm4, %xmm7 # xmm7 = xmm4[0],xmm6[0],xmm4[1],xmm6[1] vunpckhps %xmm6, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm6[2],xmm4[3],xmm6[3] vmovlhps %xmm7, %xmm8, %xmm6 # xmm6 = xmm8[0],xmm7[0] vunpckhpd %xmm7, %xmm8, %xmm7 # xmm7 = xmm8[1],xmm7[1] vmovlhps %xmm4, %xmm5, %xmm8 # xmm8 = xmm5[0],xmm4[0] vunpckhpd %xmm4, %xmm5, %xmm4 # xmm4 = xmm5[1],xmm4[1] vunpcklps %xmm3, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] vunpckhps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3] vunpcklps %xmm2, %xmm0, %xmm3 # xmm3 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3] vmovlhps %xmm3, %xmm5, %xmm2 # xmm2 = xmm5[0],xmm3[0] vunpckhpd %xmm3, %xmm5, %xmm3 # xmm3 = xmm5[1],xmm3[1] vmovlhps %xmm0, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm0[0] vunpckhpd %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[1],xmm0[1] vmovups %xmm6, (%r15) vmovups %xmm2, 0x10(%r15) vmovups %xmm7, (%r15,%rdi,4) vmovups %xmm3, 0x10(%r15,%rdi,4) vmovups %xmm8, (%r15,%r8,4) vmovups %xmm5, 0x10(%r15,%r8,4) vmovups %xmm4, (%r15,%r9,4) vmovups %xmm0, 0x10(%r15,%r9,4) addq $0x20, %r15 subq $-0x80, %rcx addl $0x8, %ebp cmpl %r12d, %ebp jl 0xd65cf movl %ebx, %r13d movl %r13d, %ebp orl $0x3, %ebp cmpl %r12d, %ebp jge 0xd67ec vmovaps (%rcx), %xmm0 vpermilps $0x93, 0x10(%rcx), %xmm1 # xmm1 = mem[3,0,1,2] vpermilps $0x93, 0x30(%rcx), %xmm2 # xmm2 = mem[3,0,1,2] vmovaps 0x20(%rcx), %xmm3 vunpcklps %xmm2, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] vunpckhps %xmm2, %xmm0, %xmm2 # xmm2 = xmm0[2],xmm2[2],xmm0[3],xmm2[3] vunpcklps %xmm1, %xmm3, %xmm5 # xmm5 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] vunpckhps %xmm1, %xmm3, %xmm6 # xmm6 = xmm3[2],xmm1[2],xmm3[3],xmm1[3] vmovlhps %xmm5, %xmm4, %xmm1 # xmm1 = xmm4[0],xmm5[0] vmovlhps %xmm2, %xmm6, %xmm0 # xmm0 = xmm6[0],xmm2[0] vunpckhps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[2],xmm5[2],xmm4[3],xmm5[3] vshufps $0x63, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[3,0,2,1] vunpckhps %xmm2, %xmm6, %xmm2 # xmm2 = xmm6[2],xmm2[2],xmm6[3],xmm2[3] vshufps $0x63, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[3,0,2,1] cmpl $0x1, %edx je 0xd6799 cmpl $0x4, %edx jne 0xd67d4 vmovaps %xmm1, (%r15) vmovaps %xmm3, 0x10(%r15) vmovaps %xmm0, 0x20(%r15) vmovaps %xmm2, 0x30(%r15) addq $0x40, %r15 jmp 0xd67d4 vunpcklps %xmm3, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] vunpckhps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3] vunpcklps %xmm2, %xmm0, %xmm3 # xmm3 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3] vmovlhps %xmm3, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm3[0] vunpckhpd %xmm3, %xmm4, %xmm3 # xmm3 = xmm4[1],xmm3[1] vmovlhps %xmm0, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm0[0] vunpckhpd %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[1],xmm0[1] vmovups %xmm2, (%r15) vmovups %xmm3, (%r15,%rdi,4) vmovups %xmm4, (%r15,%r8,4) vmovups %xmm0, (%r15,%r9,4) addq $0x10, %r15 addq $0x40, %rcx leal 0x4(%r13), %ebp addl $0x7, %r13d cmpl %r12d, %r13d movl %ebp, %r13d jl 0xd6731 movl %r13d, %ebp orl $0x1, %ebp cmpl %r12d, %ebp jge 0xd686e vmovaps (%rcx), %xmm0 vmovaps 0x10(%rcx), %xmm1 cmpl $0x1, %edx je 0xd6827 cmpl $0x4, %edx jne 0xd685a vblendps $0xa, %xmm0, %xmm1, %xmm2 # xmm2 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] vblendps $0xa, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] vmovaps %xmm0, (%r15) vmovaps %xmm2, 0x10(%r15) addq $0x20, %r15 jmp 0xd685a vunpcklps %xmm1, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] vmovlps %xmm2, (%r15) vextractps $0x1, %xmm1, (%r15,%rdi,4) vextractps $0x1, %xmm0, (%r15,%rsi,4) vunpckhps %xmm1, %xmm0, %xmm2 # xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] vmovlps %xmm2, (%r15,%r8,4) vextractps $0x3, %xmm1, (%r15,%r10,4) vextractps $0x3, %xmm0, (%r15,%r11,4) addq $0x8, %r15 addq $0x20, %rcx leal 0x2(%r13), %ebp addl $0x3, %r13d cmpl %r12d, %r13d movl %ebp, %r13d jl 0xd67f7 movl %r12d, %ebp subl %r13d, %ebp jle 0xd68b5 vmovaps (%rcx), %xmm0 cmpl $0x1, %edx je 0xd688f cmpl $0x4, %edx jne 0xd68ad vmovaps %xmm0, (%r15) addq $0x10, %r15 jmp 0xd68ad vmovss %xmm0, (%r15) vextractps $0x1, %xmm0, (%r15,%rdi,4) vextractps $0x2, %xmm0, (%r15,%r8,4) vextractps $0x3, %xmm0, (%r15,%r9,4) addq $0x4, %r15 addq $0x10, %rcx decl %ebp jne 0xd6876 leaq 0x4(%r14), %r15 addq $0x7, %r14 cmpq 0x28(%rsp), %r14 movq %r15, %r14 jl 0xd65a1 movq 0x58(%rsp), %rbx movl %r15d, %eax orl $0x1, %eax cmpl 0x80(%rsp), %eax jge 0xd6b44 movslq 0x38(%rsp), %rdi movq %rbx, %r9 shlq $0x20, %r9 movslq %ebx, %rsi movabsq $0x100000000, %rax # imm = 0x100000000 addq %rax, %r9 sarq $0x20, %r9 movl %r12d, %eax andl $-0x8, %eax movl %eax, 0x18(%rsp) movl %r15d, %r8d movq 0x78(%rsp), %rax addq %r8, %rax leaq (,%rax,4), %rdx imulq %rsi, %rdx leaq (%rdx,%rdi,4), %r11 leaq (,%rsi,8), %rbx leaq 0x4(,%rax,4), %rax imulq %rsi, %rax movq %rdi, 0x30(%rsp) leaq (%rax,%rdi,4), %r13 addq $0x10, %r13 shlq $0x2, %r9 movq %r9, 0x48(%rsp) leaq (,%rsi,4), %rax movq %rax, (%rsp) movq %rbx, 0xa0(%rsp) movq 0x1a0(%rsp), %r14 cmpl $0x8, %r12d jl 0xd6a0e movq %r13, %rdi leaq (%r14,%r13), %r15 addq %r11, %r14 movl $0x7, %eax xorl %r13d, %r13d movq %rcx, %rdx movq %rdx, %rbp leaq (%rcx,%r13,2), %rdx addq $0x40, %rdx vmovaps -0x40(%rdx), %xmm0 vpermilps $0xb1, -0x20(%rdx), %xmm1 # xmm1 = mem[1,0,3,2] vpermilps $0xb1, -0x10(%rdx), %xmm2 # xmm2 = mem[1,0,3,2] vmovaps -0x30(%rdx), %xmm3 vunpcklps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] vunpcklps %xmm2, %xmm3, %xmm1 # xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] vunpckhps %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[2],xmm2[2],xmm3[3],xmm2[3] vmovlhps %xmm0, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm0[0] vmovlhps %xmm2, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm2[0] vshufps $0xbb, %xmm0, %xmm4, %xmm0 # xmm0 = xmm4[3,2],xmm0[3,2] vshufps $0xbb, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[3,2],xmm2[3,2] vmovups %xmm3, (%r14,%r13) vmovups %xmm5, 0x10(%r14,%r13) vmovups %xmm0, -0x10(%r15,%r13) vmovups %xmm1, (%r15,%r13) addq $0x20, %r13 addl $0x8, %eax cmpl %r12d, %eax jl 0xd698d addq $0x40, %rbp addq %r13, %r14 movl 0x18(%rsp), %eax movl %eax, %r15d movq %rbp, %rcx movq %rdi, %r13 jmp 0xd6a2a movq 0x78(%rsp), %rax addq %r8, %rax imulq %rsi, %rax xorl %r15d, %r15d leaq (%r14,%rax,4), %rax movq 0x30(%rsp), %rdx leaq (%rax,%rdx,4), %r14 movl %r15d, %eax orl $0x3, %eax cmpl %r12d, %eax jge 0xd6a76 vmovaps (%rcx), %xmm0 vmovaps 0x10(%rcx), %xmm1 vunpcklps %xmm1, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] vmovlhps %xmm0, %xmm2, %xmm1 # xmm1 = xmm2[0],xmm0[0] vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3] vshufps $0x36, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,1,3,0] vmovups %xmm1, (%r14) vmovups %xmm0, (%r14,%rsi,4) addq $0x10, %r14 addq $0x20, %rcx leal 0x4(%r15), %eax addl $0x7, %r15d cmpl %r12d, %r15d movl %eax, %r15d jl 0xd6a35 movl %r15d, %eax orl $0x1, %eax cmpl %r12d, %eax jge 0xd6af8 movq %r13, 0xc0(%rsp) movq %r11, 0x10(%rsp) movq 0x48(%rsp), %rax addq %r14, %rax movq (%rsp), %rdx leaq (%r14,%rdx), %r13 movl %r15d, %edi xorl %edx, %edx xorl %ebx, %ebx xorl %ebp, %ebp movl 0x8(%rcx,%rbp,8), %r10d movl 0xc(%rcx,%rbp,8), %r9d movq (%rcx,%rbp,8), %r11 movq %r11, (%r14,%rbp,4) movl %r10d, (%r13,%rbp,4) movl %r9d, (%rax,%rbp,4) addq $0x2, %rbp addq $0x10, %rbx addq $0x8, %rdx leal (%rdi,%rbp), %r9d incl %r9d cmpl %r12d, %r9d jl 0xd6aa7 addq %rbx, %rcx addq %rdx, %r14 addl %ebp, %r15d movq 0x10(%rsp), %r11 movq 0xa0(%rsp), %rbx movq 0xc0(%rsp), %r13 movl %r12d, %eax subl %r15d, %eax jle 0xd6b23 movq (%rsp), %rdx addq %r14, %rdx xorl %edi, %edi movl (%rcx), %r9d movl 0x4(%rcx), %r10d movl %r9d, (%r14,%rdi,4) movl %r10d, (%rdx,%rdi,4) addq $0x8, %rcx incq %rdi cmpl %edi, %eax jne 0xd6b09 leaq 0x2(%r8), %r15 addq $0x3, %r8 addq %rbx, %r11 addq %rbx, %r13 cmpq 0x28(%rsp), %r8 movq %r15, %r8 jl 0xd6966 movq 0x58(%rsp), %rbx cmpl 0x80(%rsp), %r15d jge 0xd6c2e movslq 0x38(%rsp), %rax movl %r12d, %edx andl $-0x8, %edx movslq %r15d, %rsi movslq %ebx, %rdi movq 0x78(%rsp), %r8 addq %rsi, %r8 imulq %rdi, %r8 shlq $0x2, %r8 addq 0x1a0(%rsp), %r8 xorl %r9d, %r9d leaq (%r8,%rax,4), %r8 cmpl $0x8, %r12d jl 0xd6bab movl $0x7, %r9d vmovups (%rcx), %ymm0 vmovups %ymm0, (%r8) addq $0x20, %r8 addq $0x20, %rcx addl $0x8, %r9d cmpl %r12d, %r9d jl 0xd6b8e movl %edx, %r9d movl %r9d, %r10d orl $0x3, %r10d cmpl %r12d, %r10d jge 0xd6bd8 vmovups (%rcx), %xmm0 vmovups %xmm0, (%r8) addq $0x10, %r8 addq $0x10, %rcx leal 0x4(%r9), %r10d addl $0x7, %r9d cmpl %r12d, %r9d movl %r10d, %r9d jl 0xd6bb7 movl %r9d, %r10d orl $0x1, %r10d cmpl %r12d, %r10d jge 0xd6c02 movq (%rcx), %r10 movq %r10, (%r8) addq $0x8, %r8 addq $0x8, %rcx leal 0x2(%r9), %r10d addl $0x3, %r9d cmpl %r12d, %r9d movl %r10d, %r9d jl 0xd6be4 movl %r12d, %r10d subl %r9d, %r10d jle 0xd6c20 xorl %r9d, %r9d movl (%rcx), %r11d movl %r11d, (%r8,%r9,4) addq $0x4, %rcx incq %r9 cmpl %r9d, %r10d jne 0xd6c0d incq %rsi cmpq 0x28(%rsp), %rsi jne 0xd6b63 movl 0x14c(%rsp), %eax movl 0x38(%rsp), %ecx addl %eax, %ecx movq 0xb0(%rsp), %r12 cmpl %r12d, %ecx movq 0xb8(%rsp), %r13 jl 0xd5b3b movq 0x2e8(%rsp), %rax testq %rax, %rax je 0xd6c96 lock decl (%rax) jne 0xd6c96 movq 0x2e0(%rsp), %rsi movq 0x300(%rsp), %rdi testq %rdi, %rdi je 0xd6c86 movq (%rdi), %rax vzeroupper callq *0x18(%rax) jmp 0xd6c96 testq %rsi, %rsi je 0xd6c96 movq %rsi, %rdi vzeroupper callq 0x244a0 movl 0x98(%rsp), %edx incl %edx cmpl 0xe0(%rsp), %edx movq 0xe8(%rsp), %rcx jne 0xd5a0d xorl %ebx, %ebx movq 0x298(%rsp), %rax testq %rax, %rax je 0xdae62 lock decl (%rax) jne 0xdae62 movq 0x290(%rsp), %rsi movq 0x2b0(%rsp), %rdi testq %rdi, %rdi je 0xdae52 movq (%rdi), %rax vzeroupper callq *0x18(%rax) jmp 0xdae62 callq 0x25adc jmp 0xd35c8 callq 0x3c821 testl %eax, %eax je 0xdaada leaq 0xf0(%rsp), %rdi leaq 0x1a0(%rsp), %rsi movq %rbx, %rdx movl %r15d, %ecx movq 0x178(%rsp), %r8 callq 0x168407 movl %eax, %ebx testl %ebx, %ebx jne 0xdb435 movq 0x178(%rsp), %rbx cmpb $0x1, 0x27(%rbx) jne 0xd6e1d movl 0x1b8(%rsp), %eax cmpl $0x65, 0x28c(%rsp) movq 0xd0(%rsp), %r15 jl 0xd6e82 cmpl $0x4, %eax movq 0x338(%rsp), %r14 jne 0xd6e3b movl $0x80000001, %eax # imm = 0x80000001 andl 0x1d8(%rsp), %eax cmpl $0x1, %eax jne 0xdb33d leaq 0x200(%rsp), %rsi movq $0x0, 0x40(%rsi) vpxor %xmm0, %xmm0, %xmm0 vmovdqa %xmm0, (%rsi) vmovdqu %xmm0, 0xc(%rsi) vmovdqa %xmm0, 0x20(%rsi) vmovdqu %xmm0, 0x2c(%rsi) leaq 0x1a0(%rsp), %rdi movl $0x1, %edx movq %rbx, %rcx vzeroupper callq 0x2c9a4 movq 0x208(%rsp), %rax testq %rax, %rax je 0xd6ddf lock incl (%rax) movq 0x1a8(%rsp), %rax testq %rax, %rax je 0xdb1b9 lock decl (%rax) jne 0xdb1b9 movq 0x1a0(%rsp), %rsi movq 0x1c0(%rsp), %rdi testq %rdi, %rdi je 0xdb1ac movq (%rdi), %rax callq *0x18(%rax) jmp 0xdb1b9 cmpl $0x65, 0x28c(%rsp) movq 0x338(%rsp), %r14 movq 0xd0(%rsp), %r15 jl 0xdb3e5 leaq 0x448(%r15), %rdx leaq 0x280(%r15), %rcx leaq 0x1a8(%r15), %r8 movl 0x10c(%r15), %r9d addq $0x110, %r15 # imm = 0x110 movq %rbx, %rax xorl %ebx, %ebx leaq 0x1a0(%rsp), %rdi movq %r14, %rsi pushq %rax pushq %r15 vzeroupper callq 0x2d305 addq $0x10, %rsp jmp 0xdb435 cmpl $0x4, %eax movq 0x338(%rsp), %r14 jne 0xdb3e5 testb $0x1, 0x1d8(%rsp) jne 0xdb3e5 leaq 0x200(%rsp), %rsi movq $0x0, 0x40(%rsi) vpxor %xmm0, %xmm0, %xmm0 vmovdqa %xmm0, (%rsi) vmovdqu %xmm0, 0xc(%rsi) vmovdqa %xmm0, 0x20(%rsi) vmovdqu %xmm0, 0x2c(%rsi) leaq 0x1a0(%rsp), %rdi movl $0x8, %edx movq %rbx, %rcx vzeroupper callq 0x2c9a4 movq 0x208(%rsp), %rax testq %rax, %rax je 0xd6ef0 lock incl (%rax) movq 0x1a8(%rsp), %rax testq %rax, %rax je 0xdb274 lock decl (%rax) jne 0xdb274 movq 0x1a0(%rsp), %rsi movq 0x1c0(%rsp), %rdi testq %rdi, %rdi je 0xdb267 movq (%rdi), %rax callq *0x18(%rax) jmp 0xdb274 movl 0x108(%rsp), %eax movl 0x11c(%rsp), %ebx movl 0x128(%rsp), %ecx imull %eax, %ecx movl %ecx, 0x8(%rsp) movl 0x130(%rsp), %r13d movl %eax, 0x10(%rsp) imull %eax, %r13d movl 0x1b8(%rsp), %ecx movl 0x1cc(%rsp), %eax movl %eax, 0x170(%rsp) movl 0x1d0(%rsp), %eax movl %eax, 0x98(%rsp) movl 0x1d8(%rsp), %r12d vmovdqa 0xc0(%rsp), %xmm0 vpextrd $0x1, %xmm0, %r14d movl %ecx, 0x50(%rsp) imull %ecx, %r12d movl %r14d, %eax imull %ebp, %eax movq %rax, 0x40(%rsp) movslq %eax, %rsi leaq 0x200(%rsp), %rdi leaq 0x290(%rsp), %rdx callq 0x3d47e movq 0x200(%rsp), %rax movq %rax, (%rsp) testl %r14d, %r14d jle 0xd7038 vmovdqa 0xc0(%rsp), %xmm0 vpextrd $0x3, %xmm0, %eax imull %eax, %ebx vpextrd $0x2, %xmm0, %eax movl %eax, %ecx imull %ebp, %ecx subl %ecx, %ebx movl %eax, %ecx imull 0x10(%rsp), %ecx movl %ebp, %edx xorl %esi, %esi xorl %edi, %edi xorl %r8d, %r8d testl %ebp, %ebp jle 0xd702e movslq %esi, %r9 movq (%rsp), %r10 leaq (%r10,%r9,4), %r9 movl 0x10(%rsp), %r10d imull %edi, %r10d xorl %r15d, %r15d movl %r10d, (%r9,%r15,4) addl %eax, %edi incq %r15 addl %ecx, %r10d cmpl %r15d, %edx jne 0xd701a addl %r15d, %esi addl %ebx, %edi incl %r8d cmpl %r14d, %r8d jne 0xd6fff movl %r13d, 0x3b4(%rsp) leal 0x3(%r12), %eax testl %r12d, %r12d cmovnsl %r12d, %eax sarl $0x2, %eax movq %rax, 0x328(%rsp) movl 0x40(%rsp), %r13d movq %r12, 0x3b8(%rsp) cmpl $0x4, %r12d movq %r13, 0xc0(%rsp) movl 0x50(%rsp), %r10d jl 0xd868e movl 0x10(%rsp), %ecx imull 0x58(%rsp), %ecx movl %ecx, 0x190(%rsp) movl 0x8(%rsp), %ecx andl $-0x8, %ecx movl %ecx, 0x280(%rsp) movl 0x328(%rsp), %ecx movq %rcx, 0x458(%rsp) movq %r13, %rcx shlq $0x5, %rcx movq %rcx, 0x80(%rsp) leaq (,%r13,8), %rcx movq %rcx, 0x78(%rsp) leaq (,%r13,4), %rcx movq %rcx, 0x30(%rsp) xorl %edx, %edx vpxor %xmm0, %xmm0, %xmm0 movl 0x1cc(%rsp), %edi movl 0x130(%rsp), %r9d imull 0x10(%rsp), %r9d movq 0x1e0(%rsp), %rcx movl %r10d, %r8d movq %rdx, 0xe8(%rsp) leal (,%rdx,4), %eax cltd idivl %r10d movq %r8, %rdx cltq imulq %rcx, %rax imulq 0x1b0(%rsp), %rax addq 0x1a0(%rsp), %rax movq %rax, 0xb0(%rsp) imull %ecx, %edx movl 0x1d0(%rsp), %eax movl %edi, 0xe0(%rsp) imull %edi, %eax movl %eax, 0xb8(%rsp) cmpl $0x4, %eax movq %rdx, 0x3c0(%rsp) movl %r9d, 0x330(%rsp) jl 0xd7bd6 movslq %r9d, %r15 leaq (%r15,%r15,2), %rax movq %rax, 0x18(%rsp) leaq (%r15,%r15,4), %rax movq %rax, 0xa0(%rsp) leaq (%r15,%r15), %rax leaq (%rax,%rax,2), %rax movq %rax, 0x150(%rsp) leaq (,%r15,8), %rax subq %r15, %rax movq %rax, 0x38(%rsp) movslq %edx, %rcx leal 0x1(%rcx), %eax cltq movq %rax, 0x348(%rsp) leal 0x2(%rcx), %eax cltq movq %rax, 0x3a8(%rsp) leal 0x3(%rcx), %eax cltq movq %rax, 0x3a0(%rsp) leal (%rcx,%rcx), %eax cltq movq %rax, 0x340(%rsp) leal 0x2(%rcx,%rcx), %eax cltq movq %rax, 0x480(%rsp) leal (%rcx,%rcx,2), %eax cltq movq %rax, 0x478(%rsp) leal 0x1(%rcx,%rcx,2), %eax cltq movq %rax, 0x470(%rsp) leal 0x2(%rcx,%rcx,2), %eax cltq movq %rax, 0x468(%rsp) movq %rcx, 0x350(%rsp) leal (%rcx,%rcx,2), %eax addl $0x3, %eax cltq movq %rax, 0x460(%rsp) xorl %esi, %esi movl %esi, %eax cltd movl 0xe0(%rsp), %ecx idivl %ecx movl %eax, %r11d movl %edx, %r9d movl %esi, %eax orl $0x1, %eax cltd idivl %ecx movl %eax, %r10d movl %edx, %r8d movl %esi, %eax orl $0x2, %eax cltd idivl %ecx movl %eax, %r12d movl %edx, %edi movq %rsi, 0x140(%rsp) movl %esi, %eax orl $0x3, %eax cltd idivl %ecx movq 0xd0(%rsp), %rcx movq 0x318(%rcx), %rsi imulq 0xe8(%rsp), %rsi imulq 0x2e8(%rcx), %rsi addq 0x2d8(%rcx), %rsi vpxor %xmm1, %xmm1, %xmm1 cmpl $0x8, 0x8(%rsp) movl %r12d, 0x1f0(%rsp) movl %r10d, 0xd8(%rsp) movl %edx, 0xa8(%rsp) movl %edi, 0x168(%rsp) movl %eax, 0x188(%rsp) movl %r8d, 0x198(%rsp) movl %r9d, 0xdc(%rsp) movl %r11d, 0x1e8(%rsp) jl 0xd76ce movl %r10d, %ebp movl 0x190(%rsp), %ebx imull %ebx, %edi imull %ebx, %r8d imull %ebx, %r9d movslq 0x11c(%rsp), %rdx movq 0xf0(%rsp), %r14 movq 0x100(%rsp), %r10 movq 0x130(%rsp), %rcx imulq %r10, %rcx movq %rcx, 0x60(%rsp) imulq %r10, %rdx movq 0x28(%rsp), %rcx imull %ecx, %r11d movslq %r11d, %r10 imulq %rdx, %r10 movslq %r9d, %r9 addq %r14, %r9 addq %r10, %r9 movq %r9, 0x68(%rsp) imull %ecx, %ebp movslq %ebp, %r9 imulq %rdx, %r9 movslq %r8d, %r8 addq %r14, %r8 addq %r9, %r8 movq %r8, 0x70(%rsp) movl %r12d, %r8d imull %ecx, %r8d movslq %r8d, %r8 imulq %rdx, %r8 movslq %edi, %rdi addq %r14, %rdi addq %r8, %rdi movq %rdi, 0x90(%rsp) movl %eax, %edi imull %ecx, %edi movslq %edi, %rdi imulq %rdx, %rdi movl 0xa8(%rsp), %edx imull %ebx, %edx movslq %edx, %rax addq %r14, %rax addq %rdi, %rax movq %rax, 0x88(%rsp) xorl %ecx, %ecx vpxor %xmm4, %xmm4, %xmm4 vpxor %xmm3, %xmm3, %xmm3 vpxor %xmm2, %xmm2, %xmm2 movl %ecx, %eax cltd idivl 0x10(%rsp) cmpl $0x0, 0x40(%rsp) jle 0xd7683 movq %rcx, 0x20(%rsp) movslq %eax, %r11 imulq 0x60(%rsp), %r11 movq 0x68(%rsp), %rcx leaq (%rcx,%r11), %r12 movq 0x70(%rsp), %rcx leaq (%rcx,%r11), %rbx movq 0x90(%rsp), %rcx addq %r11, %rcx addq 0x88(%rsp), %r11 xorl %edi, %edi movq %r11, 0x48(%rsp) movq (%rsp), %rax movslq (%rax,%rdi), %r13 leaq (%r12,%r13), %r9 leaq (%rbx,%r13), %r8 leaq (%rcx,%r13), %r10 addq %r11, %r13 cmpl $0x8, 0x10(%rsp) jne 0xd7420 vmovq (%r9), %xmm5 vmovq (%r8), %xmm6 vmovq (%r10), %xmm9 vmovq (%r13), %xmm10 jmp 0xd753d movq %rcx, %rax movzbl (%r9), %ecx vmovd %ecx, %xmm5 vpinsrb $0x1, (%r9,%r15), %xmm5, %xmm5 vpinsrb $0x2, (%r9,%r15,2), %xmm5, %xmm5 movq 0x18(%rsp), %rbp vpinsrb $0x3, (%r9,%rbp), %xmm5, %xmm5 vpinsrb $0x4, (%r9,%r15,4), %xmm5, %xmm5 movq 0xa0(%rsp), %r14 vpinsrb $0x5, (%r9,%r14), %xmm5, %xmm5 movq 0x150(%rsp), %rdx vpinsrb $0x6, (%r9,%rdx), %xmm5, %xmm5 movq %rsi, %r11 movq %rbx, %rsi movq 0x38(%rsp), %rbx vpinsrb $0x7, (%r9,%rbx), %xmm5, %xmm5 movzbl (%r8), %ecx vmovd %ecx, %xmm6 vpinsrb $0x1, (%r8,%r15), %xmm6, %xmm6 vpinsrb $0x2, (%r8,%r15,2), %xmm6, %xmm6 vpinsrb $0x3, (%r8,%rbp), %xmm6, %xmm6 vpinsrb $0x4, (%r8,%r15,4), %xmm6, %xmm6 vpinsrb $0x5, (%r8,%r14), %xmm6, %xmm6 vpinsrb $0x6, (%r8,%rdx), %xmm6, %xmm6 vpinsrb $0x7, (%r8,%rbx), %xmm6, %xmm6 movzbl (%r10), %ecx vmovd %ecx, %xmm7 vpinsrb $0x1, (%r10,%r15), %xmm7, %xmm7 vpinsrb $0x2, (%r10,%r15,2), %xmm7, %xmm7 vpinsrb $0x3, (%r10,%rbp), %xmm7, %xmm7 vpinsrb $0x4, (%r10,%r15,4), %xmm7, %xmm7 vpinsrb $0x5, (%r10,%r14), %xmm7, %xmm7 vpinsrb $0x6, (%r10,%rdx), %xmm7, %xmm7 vpinsrb $0x7, (%r10,%rbx), %xmm7, %xmm9 movzbl (%r13), %ecx vmovd %ecx, %xmm7 movq %rax, %rcx vpinsrb $0x1, (%r13,%r15), %xmm7, %xmm7 vpinsrb $0x2, (%r13,%r15,2), %xmm7, %xmm7 vpinsrb $0x3, (%r13,%rbp), %xmm7, %xmm7 vpinsrb $0x4, (%r13,%r15,4), %xmm7, %xmm7 vpinsrb $0x5, (%r13,%r14), %xmm7, %xmm7 vpinsrb $0x6, (%r13,%rdx), %xmm7, %xmm7 vpinsrb $0x7, (%r13,%rbx), %xmm7, %xmm10 movq %rsi, %rbx movq %r11, %rsi movq 0x48(%rsp), %r11 vpmovsxbw %xmm5, %xmm8 vpmovsxbw %xmm6, %xmm7 vpmovsxbw %xmm9, %xmm6 vpmovsxbw %xmm10, %xmm5 vmovdqa (%rsi,%rdi,8), %xmm9 vmovdqa 0x10(%rsi,%rdi,8), %xmm11 vpcmpgtb %xmm9, %xmm0, %xmm10 vpcmpgtb %xmm11, %xmm0, %xmm12 vpunpcklbw %xmm10, %xmm9, %xmm13 # xmm13 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3],xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7] vpunpckhbw %xmm10, %xmm9, %xmm14 # xmm14 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15] vpunpcklbw %xmm12, %xmm11, %xmm10 # xmm10 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3],xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7] vpunpckhbw %xmm12, %xmm11, %xmm9 # xmm9 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15] vpshufd $0x0, %xmm8, %xmm11 # xmm11 = xmm8[0,0,0,0] vpmaddwd %xmm13, %xmm11, %xmm11 vpaddd %xmm4, %xmm11, %xmm4 vpshufd $0x0, %xmm7, %xmm11 # xmm11 = xmm7[0,0,0,0] vpmaddwd %xmm13, %xmm11, %xmm11 vpaddd %xmm3, %xmm11, %xmm3 vpshufd $0x0, %xmm6, %xmm11 # xmm11 = xmm6[0,0,0,0] vpmaddwd %xmm13, %xmm11, %xmm11 vpaddd %xmm2, %xmm11, %xmm2 vpshufd $0x0, %xmm5, %xmm11 # xmm11 = xmm5[0,0,0,0] vpmaddwd %xmm13, %xmm11, %xmm11 vpaddd %xmm1, %xmm11, %xmm1 vpshufd $0x55, %xmm8, %xmm11 # xmm11 = xmm8[1,1,1,1] vpmaddwd %xmm14, %xmm11, %xmm11 vpshufd $0x55, %xmm7, %xmm12 # xmm12 = xmm7[1,1,1,1] vpmaddwd %xmm14, %xmm12, %xmm12 vpshufd $0x55, %xmm6, %xmm13 # xmm13 = xmm6[1,1,1,1] vpmaddwd %xmm14, %xmm13, %xmm13 vpshufd $0x55, %xmm5, %xmm15 # xmm15 = xmm5[1,1,1,1] vpmaddwd %xmm14, %xmm15, %xmm14 vpshufd $0xaa, %xmm8, %xmm15 # xmm15 = xmm8[2,2,2,2] vpmaddwd %xmm10, %xmm15, %xmm15 vpaddd %xmm15, %xmm11, %xmm11 vpaddd %xmm4, %xmm11, %xmm4 vpshufd $0xaa, %xmm7, %xmm11 # xmm11 = xmm7[2,2,2,2] vpmaddwd %xmm10, %xmm11, %xmm11 vpaddd %xmm11, %xmm12, %xmm11 vpaddd %xmm3, %xmm11, %xmm3 vpshufd $0xaa, %xmm6, %xmm11 # xmm11 = xmm6[2,2,2,2] vpmaddwd %xmm10, %xmm11, %xmm11 vpaddd %xmm11, %xmm13, %xmm11 vpaddd %xmm2, %xmm11, %xmm2 vpshufd $0xaa, %xmm5, %xmm11 # xmm11 = xmm5[2,2,2,2] vpmaddwd %xmm10, %xmm11, %xmm10 vpaddd %xmm10, %xmm14, %xmm10 vpaddd %xmm1, %xmm10, %xmm1 vpshufd $0xff, %xmm8, %xmm8 # xmm8 = xmm8[3,3,3,3] vpmaddwd %xmm9, %xmm8, %xmm8 vpaddd %xmm4, %xmm8, %xmm4 vpshufd $0xff, %xmm7, %xmm7 # xmm7 = xmm7[3,3,3,3] vpmaddwd %xmm7, %xmm9, %xmm7 vpaddd %xmm7, %xmm3, %xmm3 vpshufd $0xff, %xmm6, %xmm6 # xmm6 = xmm6[3,3,3,3] vpmaddwd %xmm6, %xmm9, %xmm6 vpaddd %xmm6, %xmm2, %xmm2 vpshufd $0xff, %xmm5, %xmm5 # xmm5 = xmm5[3,3,3,3] vpmaddwd %xmm5, %xmm9, %xmm5 vpaddd %xmm5, %xmm1, %xmm1 addq $0x4, %rdi cmpq %rdi, 0x30(%rsp) jne 0xd73e8 addq 0x80(%rsp), %rsi movq 0xc0(%rsp), %r13 movq 0x20(%rsp), %rcx leal 0x8(%rcx), %eax addl $0xf, %ecx cmpl 0x8(%rsp), %ecx movl %eax, %ecx jl 0xd739c movl 0x280(%rsp), %eax movl %eax, %r10d movl 0xa8(%rsp), %ebp movl 0x168(%rsp), %edi movl 0x188(%rsp), %eax movl 0x198(%rsp), %r8d movl 0xdc(%rsp), %r9d movl 0x1e8(%rsp), %r11d jmp 0xd76df movl %edx, %ebp xorl %r10d, %r10d vpxor %xmm2, %xmm2, %xmm2 vpxor %xmm3, %xmm3, %xmm3 vpxor %xmm4, %xmm4, %xmm4 movl %r10d, %edx orl $0x1, %edx cmpl 0x8(%rsp), %edx jge 0xd78d7 movq %r10, 0x70(%rsp) movl %ebp, %r10d movl %edi, %ebx movslq 0x11c(%rsp), %rdi movq 0xf0(%rsp), %rdx movl %r8d, %r14d movq 0x100(%rsp), %r8 movq 0x130(%rsp), %r12 imulq %r8, %r12 movq %r12, 0x48(%rsp) movl %r9d, %r12d movl %eax, %ebp movq 0x28(%rsp), %rcx imull %ecx, %r11d movslq %r11d, %r9 imulq %r8, %rdi imulq %rdi, %r9 movl %r12d, %r8d movq 0x58(%rsp), %rax imull %eax, %r8d movslq %r8d, %r8 addq %rdx, %r8 addq %r9, %r8 movq %r8, 0x20(%rsp) movl 0xd8(%rsp), %r8d imull %ecx, %r8d movslq %r8d, %r8 imulq %rdi, %r8 movl %r14d, %r9d imull %eax, %r9d movslq %r9d, %r9 addq %rdx, %r9 addq %r8, %r9 movq %r9, 0x60(%rsp) movl 0x1f0(%rsp), %r8d imull %ecx, %r8d movslq %r8d, %r8 imulq %rdi, %r8 movl %ebx, %r9d imull %eax, %r9d movslq %r9d, %r11 addq %rdx, %r11 addq %r8, %r11 movl %ebp, %r8d imull %ecx, %r8d movslq %r8d, %r8 imulq %rdi, %r8 movl %r10d, %edi movq 0x70(%rsp), %r10 imull %eax, %edi movslq %edi, %rax addq %rdx, %rax addq %r8, %rax movq %rax, 0x68(%rsp) movl %r10d, %r10d cmpl $0x0, 0x40(%rsp) jle 0xd78c3 movq 0x48(%rsp), %rdx movq %r10, %rbp imulq %r10, %rdx movq 0x20(%rsp), %rax leaq (%rax,%rdx), %rbx movq 0x60(%rsp), %rax leaq (%rax,%rdx), %r14 leaq (%r11,%rdx), %rdi addq 0x68(%rsp), %rdx xorl %r12d, %r12d movq (%rsp), %rax movslq (%rax,%r12,4), %rcx leaq (%rbx,%rcx), %r8 leaq (%r14,%rcx), %r9 leaq (%rdi,%rcx), %r10 leaq (%rdx,%rcx), %r13 movzbl (%rbx,%rcx), %eax vmovd %eax, %xmm5 vpinsrb $0x1, (%r15,%r8), %xmm5, %xmm5 vpmovsxbw %xmm5, %xmm5 vpshufd $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0] movzbl (%r14,%rcx), %eax vmovd %eax, %xmm6 vpinsrb $0x1, (%r15,%r9), %xmm6, %xmm6 vpmovsxbw %xmm6, %xmm6 vpshufd $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0] movzbl (%rdi,%rcx), %eax vmovd %eax, %xmm7 vpinsrb $0x1, (%r15,%r10), %xmm7, %xmm7 vpmovsxbw %xmm7, %xmm7 vpshufd $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0] movzbl (%rdx,%rcx), %eax vmovd %eax, %xmm8 vpinsrb $0x1, (%r15,%r13), %xmm8, %xmm8 movq 0xc0(%rsp), %r13 vpmovsxbw %xmm8, %xmm8 vpshufd $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0] vpmovsxbw (%rsi,%r12,8), %xmm9 vpmaddwd %xmm5, %xmm9, %xmm5 vpaddd %xmm4, %xmm5, %xmm4 vpmaddwd %xmm6, %xmm9, %xmm5 vpaddd %xmm3, %xmm5, %xmm3 vpmaddwd %xmm7, %xmm9, %xmm5 vpaddd %xmm2, %xmm5, %xmm2 vpmaddwd %xmm9, %xmm8, %xmm5 vpaddd %xmm1, %xmm5, %xmm1 incq %r12 cmpq %r12, %r13 jne 0xd7802 addq 0x78(%rsp), %rsi movq %rbp, %r10 addq $0x2, %r10 movl %r10d, %ecx orl $0x1, %ecx cmpl 0x8(%rsp), %ecx jl 0xd77cd cmpl 0x8(%rsp), %r10d jge 0xd7a9e movslq 0x11c(%rsp), %rdi movq 0xf0(%rsp), %rdx movq 0x100(%rsp), %rcx movq 0x130(%rsp), %rax imulq %rcx, %rax movq %rax, 0x48(%rsp) movq 0x28(%rsp), %r9 movl 0x1e8(%rsp), %eax imull %r9d, %eax movslq %eax, %r8 imulq %rcx, %rdi imulq %rdi, %r8 movq 0x58(%rsp), %rax movl 0xdc(%rsp), %ecx imull %eax, %ecx movslq %ecx, %r14 addq %rdx, %r14 addq %r8, %r14 movl 0xd8(%rsp), %ecx imull %r9d, %ecx movslq %ecx, %rcx imulq %rdi, %rcx movl 0x198(%rsp), %r8d imull %eax, %r8d movslq %r8d, %r11 addq %rdx, %r11 addq %rcx, %r11 movl 0x1f0(%rsp), %ecx imull %r9d, %ecx movslq %ecx, %rcx imulq %rdi, %rcx movl 0x168(%rsp), %r8d imull %eax, %r8d movslq %r8d, %r8 addq %rdx, %r8 addq %rcx, %r8 movl 0x188(%rsp), %ecx imull %r9d, %ecx movslq %ecx, %rcx imulq %rdi, %rcx movl 0xa8(%rsp), %ebp imull %eax, %ebp movslq %ebp, %r9 movq %r8, %rbp addq %rdx, %r9 addq %rcx, %r9 movl %r10d, %eax movq %r14, %r10 cmpl $0x0, 0x40(%rsp) jle 0xd7a91 movq 0x48(%rsp), %rdx imulq %rax, %rdx leaq (%r10,%rdx), %rbx leaq (%r11,%rdx), %r14 leaq (%rdx,%rbp), %rdi addq %r9, %rdx xorl %r12d, %r12d movq (%rsp), %rcx movslq (%rcx,%r12,4), %rcx movsbl (%rbx,%rcx), %r8d vmovd %r8d, %xmm5 vpshuflw $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0,4,5,6,7] vpshufd $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0] movsbl (%r14,%rcx), %r8d vmovd %r8d, %xmm6 vpshuflw $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0,4,5,6,7] vpshufd $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0] movsbl (%rdi,%rcx), %r8d vmovd %r8d, %xmm7 vpshuflw $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0,4,5,6,7] vpshufd $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0] movsbl (%rdx,%rcx), %ecx vmovd %ecx, %xmm8 vpshuflw $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0,4,5,6,7] vpshufd $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0] vpmovsxbw (%rsi,%r12,4), %xmm9 vpmullw %xmm5, %xmm9, %xmm10 vpmulhw %xmm5, %xmm9, %xmm5 vpunpcklwd %xmm5, %xmm10, %xmm5 # xmm5 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3] vpaddd %xmm5, %xmm4, %xmm4 vpmullw %xmm6, %xmm9, %xmm5 vpmulhw %xmm6, %xmm9, %xmm6 vpunpcklwd %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3] vpaddd %xmm5, %xmm3, %xmm3 vpmullw %xmm7, %xmm9, %xmm5 vpmulhw %xmm7, %xmm9, %xmm6 vpunpcklwd %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3] vpaddd %xmm5, %xmm2, %xmm2 vpmullw %xmm9, %xmm8, %xmm5 vpmulhw %xmm9, %xmm8, %xmm6 vpunpcklwd %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3] vpaddd %xmm5, %xmm1, %xmm1 incq %r12 cmpq %r12, %r13 jne 0xd79e0 addq 0x30(%rsp), %rsi incq %rax cmpl %eax, 0x8(%rsp) jg 0xd79ba movl 0x50(%rsp), %eax cmpl $0x1, %eax je 0xd7aeb cmpl $0x4, %eax movl 0x330(%rsp), %ecx movq 0x140(%rsp), %rdx jne 0xd7bbf movq 0xb0(%rsp), %rax vmovdqa %xmm4, (%rax) vmovdqa %xmm3, 0x10(%rax) vmovdqa %xmm2, 0x20(%rax) vmovdqa %xmm1, 0x30(%rax) addq $0x40, %rax movq %rax, 0xb0(%rsp) jmp 0xd7bbf vpunpckldq %xmm3, %xmm4, %xmm5 # xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] vpunpcklqdq %xmm2, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm2[0] vpshufd $0x0, %xmm1, %xmm6 # xmm6 = xmm1[0,0,0,0] vpblendw $0xc0, %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0,1,2,3,4,5],xmm6[6,7] movq 0xb0(%rsp), %rax vmovdqu %xmm5, (%rax) movq 0x350(%rsp), %rcx vpextrd $0x1, %xmm4, (%rax,%rcx,4) movq 0x348(%rsp), %rcx vpextrd $0x1, %xmm3, (%rax,%rcx,4) movq 0x3a8(%rsp), %rcx vpextrd $0x1, %xmm2, (%rax,%rcx,4) movq 0x3a0(%rsp), %rcx vpextrd $0x1, %xmm1, (%rax,%rcx,4) vpunpckhdq %xmm3, %xmm4, %xmm5 # xmm5 = xmm4[2],xmm3[2],xmm4[3],xmm3[3] movq 0x340(%rsp), %rcx vmovq %xmm5, (%rax,%rcx,4) vpunpckhdq %xmm1, %xmm2, %xmm5 # xmm5 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] movq 0x480(%rsp), %rcx vmovq %xmm5, (%rax,%rcx,4) movq 0x478(%rsp), %rcx vpextrd $0x3, %xmm4, (%rax,%rcx,4) movq 0x470(%rsp), %rcx vpextrd $0x3, %xmm3, (%rax,%rcx,4) movq 0x468(%rsp), %rcx vpextrd $0x3, %xmm2, (%rax,%rcx,4) movq 0x460(%rsp), %rcx vpextrd $0x3, %xmm1, (%rax,%rcx,4) addq $0x10, %rax movq %rax, 0xb0(%rsp) movl 0x330(%rsp), %ecx movq 0x140(%rsp), %rdx leal 0x4(%rdx), %eax addl $0x7, %edx cmpl 0xb8(%rsp), %edx movl %eax, %esi jl 0xd7220 jmp 0xd7bdb xorl %eax, %eax movl %r9d, %ecx movl %eax, %r8d orl $0x1, %r8d cmpl 0xb8(%rsp), %r8d jge 0xd8251 movslq %ecx, %r9 leaq (%r9,%r9,2), %rbp leaq (%r9,%r9,4), %r14 leaq (%r9,%r9), %rcx leaq (%rcx,%rcx,2), %rcx leaq (,%r9,8), %rsi subq %r9, %rsi movq 0x3c0(%rsp), %rdx movslq %edx, %rdi movq %rdi, 0x90(%rsp) leal 0x1(%rdx), %edi movslq %edi, %rdi movq %rdi, 0x88(%rsp) leal (%rdx,%rdx), %edi movslq %edi, %rdi movq %rdi, 0xa8(%rsp) leal (%rdx,%rdx,2), %edi movslq %edi, %rdi movq %rdi, 0x140(%rsp) leal 0x1(%rdx,%rdx,2), %edx movslq %edx, %rdx movq %rdx, 0x168(%rsp) movq 0xd0(%rsp), %r11 movq %rbp, 0x48(%rsp) movq %r14, 0x70(%rsp) movq %rax, 0x150(%rsp) cltd movl 0xe0(%rsp), %edi idivl %edi movl %edx, %r10d movl %eax, %r12d movl %r8d, %eax cltd idivl %edi movq 0x318(%r11), %r8 imulq 0xe8(%rsp), %r8 imulq 0x2e8(%r11), %r8 addq 0x2d8(%r11), %r8 cmpl $0x8, 0x8(%rsp) movl %edx, 0x38(%rsp) movl %r10d, 0x20(%rsp) movl %eax, 0x60(%rsp) movl %r12d, 0x68(%rsp) jl 0xd7f0b movl %edx, %r14d movl 0x190(%rsp), %edi imull %edi, %r14d movl %r10d, %edx imull %edi, %edx movslq 0x11c(%rsp), %rdi movq 0xf0(%rsp), %r11 movq 0x100(%rsp), %rbx movq 0x130(%rsp), %r15 imulq %rbx, %r15 movq 0x28(%rsp), %r10 imull %r10d, %r12d movslq %r12d, %r12 imulq %rbx, %rdi imulq %rdi, %r12 movslq %edx, %rdx movq %r11, %rbx addq %r11, %rdx addq %r12, %rdx movq %rdx, 0x18(%rsp) movl %eax, %edx imull %r10d, %edx movslq %edx, %rdx imulq %rdi, %rdx movslq %r14d, %r11 addq %rbx, %r11 addq %rdx, %r11 xorl %r10d, %r10d vpxor %xmm2, %xmm2, %xmm2 vpxor %xmm4, %xmm4, %xmm4 vpxor %xmm3, %xmm3, %xmm3 vpxor %xmm1, %xmm1, %xmm1 movq 0x70(%rsp), %r14 movl %r10d, %eax cltd idivl 0x10(%rsp) cmpl $0x0, 0x40(%rsp) jle 0xd7ed3 cltq imulq %r15, %rax movq 0x18(%rsp), %rdx addq %rax, %rdx addq %r11, %rax xorl %edi, %edi movq (%rsp), %rbx movslq (%rbx,%rdi), %r13 leaq (%rdx,%r13), %r12 addq %rax, %r13 cmpl $0x8, 0x10(%rsp) jne 0xd7d9b vmovq (%r12), %xmm5 vmovq (%r13), %xmm6 jmp 0xd7e16 movzbl (%r12), %ebx vmovd %ebx, %xmm5 vpinsrb $0x1, (%r12,%r9), %xmm5, %xmm5 vpinsrb $0x2, (%r12,%r9,2), %xmm5, %xmm5 vpinsrb $0x3, (%r12,%rbp), %xmm5, %xmm5 vpinsrb $0x4, (%r12,%r9,4), %xmm5, %xmm5 vpinsrb $0x5, (%r12,%r14), %xmm5, %xmm5 vpinsrb $0x6, (%r12,%rcx), %xmm5, %xmm5 vpinsrb $0x7, (%r12,%rsi), %xmm5, %xmm5 movzbl (%r13), %ebx vmovd %ebx, %xmm6 vpinsrb $0x1, (%r13,%r9), %xmm6, %xmm6 vpinsrb $0x2, (%r13,%r9,2), %xmm6, %xmm6 vpinsrb $0x3, (%r13,%rbp), %xmm6, %xmm6 vpinsrb $0x4, (%r13,%r9,4), %xmm6, %xmm6 vpinsrb $0x5, (%r13,%r14), %xmm6, %xmm6 vpinsrb $0x6, (%r13,%rcx), %xmm6, %xmm6 vpinsrb $0x7, (%r13,%rsi), %xmm6, %xmm6 vpmovsxbw %xmm5, %xmm5 vpmovsxbw %xmm6, %xmm6 vmovdqa (%r8,%rdi,8), %xmm7 vmovdqa 0x10(%r8,%rdi,8), %xmm8 vpcmpgtb %xmm7, %xmm0, %xmm9 vpcmpgtb %xmm8, %xmm0, %xmm10 vpunpcklbw %xmm9, %xmm7, %xmm11 # xmm11 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7] vpunpckhbw %xmm9, %xmm7, %xmm7 # xmm7 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15] vpunpcklbw %xmm10, %xmm8, %xmm9 # xmm9 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7] vpunpckhbw %xmm10, %xmm8, %xmm8 # xmm8 = xmm8[8],xmm10[8],xmm8[9],xmm10[9],xmm8[10],xmm10[10],xmm8[11],xmm10[11],xmm8[12],xmm10[12],xmm8[13],xmm10[13],xmm8[14],xmm10[14],xmm8[15],xmm10[15] vpshufd $0x0, %xmm5, %xmm10 # xmm10 = xmm5[0,0,0,0] vpmaddwd %xmm11, %xmm10, %xmm10 vpaddd %xmm2, %xmm10, %xmm2 vpshufd $0x0, %xmm6, %xmm10 # xmm10 = xmm6[0,0,0,0] vpmaddwd %xmm11, %xmm10, %xmm10 vpaddd %xmm4, %xmm10, %xmm4 vpshufd $0x55, %xmm5, %xmm10 # xmm10 = xmm5[1,1,1,1] vpmaddwd %xmm7, %xmm10, %xmm10 vpaddd %xmm3, %xmm10, %xmm3 vpshufd $0x55, %xmm6, %xmm10 # xmm10 = xmm6[1,1,1,1] vpmaddwd %xmm7, %xmm10, %xmm7 vpaddd %xmm1, %xmm7, %xmm1 vpshufd $0xaa, %xmm5, %xmm7 # xmm7 = xmm5[2,2,2,2] vpmaddwd %xmm7, %xmm9, %xmm7 vpaddd %xmm7, %xmm2, %xmm2 vpshufd $0xaa, %xmm6, %xmm7 # xmm7 = xmm6[2,2,2,2] vpmaddwd %xmm7, %xmm9, %xmm7 vpaddd %xmm7, %xmm4, %xmm4 vpshufd $0xff, %xmm5, %xmm5 # xmm5 = xmm5[3,3,3,3] vpmaddwd %xmm5, %xmm8, %xmm5 vpaddd %xmm5, %xmm3, %xmm3 vpshufd $0xff, %xmm6, %xmm5 # xmm5 = xmm6[3,3,3,3] vpmaddwd %xmm5, %xmm8, %xmm5 vpaddd %xmm5, %xmm1, %xmm1 addq $0x4, %rdi cmpq %rdi, 0x30(%rsp) jne 0xd7d77 addq 0x80(%rsp), %r8 movq 0xc0(%rsp), %r13 leal 0x8(%r10), %eax addl $0xf, %r10d cmpl 0x8(%rsp), %r10d movl %eax, %r10d jl 0xd7d51 movl 0x280(%rsp), %eax movl %eax, %r14d movl 0x20(%rsp), %r10d movl 0x60(%rsp), %eax movl 0x68(%rsp), %r12d movq 0xd0(%rsp), %r11 jmp 0xd7f1e xorl %r14d, %r14d vpxor %xmm1, %xmm1, %xmm1 vpxor %xmm3, %xmm3, %xmm3 vpxor %xmm4, %xmm4, %xmm4 vpxor %xmm2, %xmm2, %xmm2 movl %r14d, %edx orl $0x1, %edx cmpl 0x8(%rsp), %edx jge 0xd805e movl %r10d, %edx movslq 0x11c(%rsp), %r10 movq 0xf0(%rsp), %r15 movq 0x100(%rsp), %rdi movl %eax, %ebx movq 0x130(%rsp), %rax imulq %rdi, %rax movq %rax, 0xa0(%rsp) movq 0x28(%rsp), %rax imull %eax, %r12d movslq %r12d, %r12 imulq %rdi, %r10 imulq %r10, %r12 movl %edx, %edi movq 0x58(%rsp), %rdx imull %edx, %edi movslq %edi, %rdi addq %r15, %rdi addq %r12, %rdi movq %rdi, 0x18(%rsp) imull %eax, %ebx movslq %ebx, %r12 imulq %r10, %r12 movl 0x38(%rsp), %r10d imull %edx, %r10d movslq %r10d, %rax addq %r15, %rax addq %r12, %rax movl %r14d, %r14d cmpl $0x0, 0x40(%rsp) jle 0xd804a movq 0xa0(%rsp), %rbx imulq %r14, %rbx movq 0x18(%rsp), %rdx leaq (%rdx,%rbx), %r12 addq %rax, %rbx movq %r13, %rbp xorl %r13d, %r13d movq (%rsp), %r11 movslq (%r11,%r13,4), %rdx leaq (%r12,%rdx), %rdi leaq (%rbx,%rdx), %r10 movzbl (%r12,%rdx), %r15d vmovd %r15d, %xmm5 vpinsrb $0x1, (%r9,%rdi), %xmm5, %xmm5 vpmovsxbw %xmm5, %xmm5 vpshufd $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0] movzbl (%rbx,%rdx), %edx vmovd %edx, %xmm6 vpinsrb $0x1, (%r9,%r10), %xmm6, %xmm6 vpmovsxbw %xmm6, %xmm6 vpshufd $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0] vpmovsxbw (%r8,%r13,8), %xmm7 vpmaddwd %xmm7, %xmm5, %xmm5 vpaddd %xmm2, %xmm5, %xmm2 vpmaddwd %xmm7, %xmm6, %xmm5 vpaddd %xmm4, %xmm5, %xmm4 incq %r13 cmpq %r13, %rbp jne 0xd7fd7 addq 0x78(%rsp), %r8 movq 0xd0(%rsp), %r11 movq %rbp, %r13 movq 0x48(%rsp), %rbp addq $0x2, %r14 movl %r14d, %ebx orl $0x1, %ebx cmpl 0x8(%rsp), %ebx jl 0xd7faa cmpl 0x8(%rsp), %r14d jge 0xd8182 movslq 0x11c(%rsp), %r10 movq 0xf0(%rsp), %rbx movq 0x100(%rsp), %rdi movq 0x130(%rsp), %rax imulq %rdi, %rax movq %rax, 0x18(%rsp) movq 0x28(%rsp), %r15 movl 0x68(%rsp), %eax imull %r15d, %eax movslq %eax, %r12 imulq %rdi, %r10 imulq %r10, %r12 movq 0x58(%rsp), %rdx movl 0x20(%rsp), %edi imull %edx, %edi movslq %edi, %rdi addq %rbx, %rdi addq %r12, %rdi movl 0x60(%rsp), %eax imull %r15d, %eax movslq %eax, %r12 imulq %r10, %r12 movl 0x38(%rsp), %r10d imull %edx, %r10d movslq %r10d, %r10 addq %rbx, %r10 addq %r12, %r10 movl %r14d, %eax cmpl $0x0, 0x40(%rsp) jle 0xd8175 movq 0x18(%rsp), %rbx imulq %rax, %rbx leaq (%rdi,%rbx), %r12 addq %r10, %rbx movq %r13, %rbp xorl %r13d, %r13d movq (%rsp), %r11 movslq (%r11,%r13,4), %r15 movsbl (%r12,%r15), %edx vmovd %edx, %xmm5 vpshuflw $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0,4,5,6,7] vpshufd $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0] movsbl (%rbx,%r15), %edx vmovd %edx, %xmm6 vpshuflw $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0,4,5,6,7] vpshufd $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0] vpmovsxbw (%r8,%r13,4), %xmm7 vpmullw %xmm7, %xmm5, %xmm8 vpmulhw %xmm7, %xmm5, %xmm5 vpunpcklwd %xmm5, %xmm8, %xmm5 # xmm5 = xmm8[0],xmm5[0],xmm8[1],xmm5[1],xmm8[2],xmm5[2],xmm8[3],xmm5[3] vpaddd %xmm5, %xmm2, %xmm2 vpmullw %xmm7, %xmm6, %xmm5 vpmulhw %xmm7, %xmm6, %xmm6 vpunpcklwd %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3] vpaddd %xmm5, %xmm4, %xmm4 incq %r13 cmpq %r13, %rbp jne 0xd8108 addq 0x30(%rsp), %r8 movq 0xd0(%rsp), %r11 movq %rbp, %r13 movq 0x48(%rsp), %rbp incq %rax cmpl %eax, 0x8(%rsp) jg 0xd80e3 vpaddd %xmm1, %xmm4, %xmm1 vpaddd %xmm3, %xmm2, %xmm2 movl 0x50(%rsp), %r10d cmpl $0x1, %r10d je 0xd81c6 cmpl $0x4, %r10d movq 0x150(%rsp), %rax jne 0xd8237 movq 0xb0(%rsp), %rdx vmovdqa %xmm2, (%rdx) vmovdqa %xmm1, 0x10(%rdx) addq $0x20, %rdx movq %rdx, 0xb0(%rsp) jmp 0xd8237 vpunpckldq %xmm1, %xmm2, %xmm3 # xmm3 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] movq 0xb0(%rsp), %rdx vmovq %xmm3, (%rdx) movq 0x90(%rsp), %rax vpextrd $0x1, %xmm2, (%rdx,%rax,4) movq 0x88(%rsp), %rax vpextrd $0x1, %xmm1, (%rdx,%rax,4) vpunpckhdq %xmm1, %xmm2, %xmm3 # xmm3 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] movq 0xa8(%rsp), %rax vmovq %xmm3, (%rdx,%rax,4) movq 0x140(%rsp), %rax vpextrd $0x3, %xmm2, (%rdx,%rax,4) movq 0x168(%rsp), %rax vpextrd $0x3, %xmm1, (%rdx,%rax,4) addq $0x8, %rdx movq %rdx, 0xb0(%rsp) movq 0x150(%rsp), %rax leal 0x2(%rax), %edx addl $0x3, %eax movl %eax, %r8d cmpl 0xb8(%rsp), %eax movl %edx, %eax jl 0xd7c6c jmp 0xd8258 movl %eax, %edx movl 0x50(%rsp), %r10d cmpl 0xb8(%rsp), %edx jge 0xd8675 movl %edx, %ebp movslq 0x330(%rsp), %r9 leaq (%r9,%r9,2), %r14 leaq (%r9,%r9,4), %r15 leaq (%r9,%r9), %rax leaq (%rax,%rax,2), %rsi leaq (,%r9,8), %rcx subq %r9, %rcx movq 0x3c0(%rsp), %rdx movslq %edx, %rax movq %rax, 0x150(%rsp) leal (%rdx,%rdx), %eax cltq movq %rax, 0x38(%rsp) leal (%rdx,%rdx,2), %eax cltq movq %rax, 0x20(%rsp) movl %ebp, 0xa0(%rsp) movl %ebp, %eax cltd idivl 0xe0(%rsp) movq 0xd0(%rsp), %rdi movq 0x318(%rdi), %r10 imulq 0xe8(%rsp), %r10 imulq 0x2e8(%rdi), %r10 addq 0x2d8(%rdi), %r10 cmpl $0x8, 0x8(%rsp) movl %edx, 0x48(%rsp) movl %eax, 0x18(%rsp) jl 0xd847b movl %edx, %ebp imull 0x190(%rsp), %ebp movslq 0x11c(%rsp), %rdx movq 0x100(%rsp), %r8 movq 0x130(%rsp), %rdi movl %eax, %ebx imull 0x28(%rsp), %ebx movslq %ebx, %rbx imulq %r8, %rdx imulq %rbx, %rdx addq 0xf0(%rsp), %rdx imulq %r8, %rdi movslq %ebp, %rbx addq %rdx, %rbx xorl %r8d, %r8d vpxor %xmm1, %xmm1, %xmm1 vpxor %xmm2, %xmm2, %xmm2 vpxor %xmm3, %xmm3, %xmm3 vpxor %xmm4, %xmm4, %xmm4 movl %r8d, %eax cltd idivl 0x10(%rsp) cmpl $0x0, 0x40(%rsp) jle 0xd844a cltq imulq %rdi, %rax addq %rbx, %rax xorl %edx, %edx movq (%rsp), %r11 movslq (%r11,%rdx), %r12 addq %rax, %r12 cmpl $0x8, 0x10(%rsp) jne 0xd838d vmovq (%r12), %xmm5 jmp 0xd83d0 movzbl (%r12), %r13d vmovd %r13d, %xmm5 movq 0xc0(%rsp), %r13 vpinsrb $0x1, (%r12,%r9), %xmm5, %xmm5 vpinsrb $0x2, (%r12,%r9,2), %xmm5, %xmm5 vpinsrb $0x3, (%r12,%r14), %xmm5, %xmm5 vpinsrb $0x4, (%r12,%r9,4), %xmm5, %xmm5 vpinsrb $0x5, (%r12,%r15), %xmm5, %xmm5 vpinsrb $0x6, (%r12,%rsi), %xmm5, %xmm5 vpinsrb $0x7, (%r12,%rcx), %xmm5, %xmm5 vpmovsxbw %xmm5, %xmm5 vmovdqa (%r10,%rdx,8), %xmm6 vmovdqa 0x10(%r10,%rdx,8), %xmm7 vpcmpgtb %xmm6, %xmm0, %xmm8 vpcmpgtb %xmm7, %xmm0, %xmm9 vpunpcklbw %xmm8, %xmm6, %xmm10 # xmm10 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7] vpunpckhbw %xmm8, %xmm6, %xmm6 # xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15] vpunpcklbw %xmm9, %xmm7, %xmm8 # xmm8 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7] vpunpckhbw %xmm9, %xmm7, %xmm7 # xmm7 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15] vpshufd $0x0, %xmm5, %xmm9 # xmm9 = xmm5[0,0,0,0] vpmaddwd %xmm10, %xmm9, %xmm9 vpaddd %xmm1, %xmm9, %xmm1 vpshufd $0x55, %xmm5, %xmm9 # xmm9 = xmm5[1,1,1,1] vpmaddwd %xmm6, %xmm9, %xmm6 vpaddd %xmm2, %xmm6, %xmm2 vpshufd $0xaa, %xmm5, %xmm6 # xmm6 = xmm5[2,2,2,2] vpmaddwd %xmm6, %xmm8, %xmm6 vpaddd %xmm3, %xmm6, %xmm3 vpshufd $0xff, %xmm5, %xmm5 # xmm5 = xmm5[3,3,3,3] vpmaddwd %xmm7, %xmm5, %xmm5 vpaddd %xmm4, %xmm5, %xmm4 addq $0x4, %rdx cmpq %rdx, 0x30(%rsp) jne 0xd8373 addq 0x80(%rsp), %r10 leal 0x8(%r8), %eax addl $0xf, %r8d cmpl 0x8(%rsp), %r8d movl %eax, %r8d jl 0xd8355 vpaddd %xmm4, %xmm3, %xmm3 vpaddd %xmm1, %xmm2, %xmm1 vpaddd %xmm1, %xmm3, %xmm1 movl 0x280(%rsp), %eax movl %eax, %ebp movl 0x18(%rsp), %eax jmp 0xd8481 xorl %ebp, %ebp vpxor %xmm1, %xmm1, %xmm1 movl %ebp, %edx orl $0x1, %edx cmpl 0x8(%rsp), %edx jge 0xd8546 movslq 0x11c(%rsp), %r8 movq 0x100(%rsp), %rdi movq 0x130(%rsp), %rdx imulq %rdi, %rdx movl %eax, %ebx imull 0x28(%rsp), %ebx movslq %ebx, %rbx imulq %rdi, %r8 imulq %rbx, %r8 addq 0xf0(%rsp), %r8 movl 0x48(%rsp), %edi imull 0x58(%rsp), %edi movslq %edi, %rdi addq %r8, %rdi movl %ebp, %ebp cmpl $0x0, 0x40(%rsp) jle 0xd8534 movq %rdx, %r8 imulq %rbp, %r8 addq %rdi, %r8 xorl %ebx, %ebx movq (%rsp), %rax movslq (%rax,%rbx,4), %r12 leaq (%r8,%r12), %r13 movzbl (%r8,%r12), %r12d vmovd %r12d, %xmm2 vpinsrb $0x1, (%r9,%r13), %xmm2, %xmm2 movq 0xc0(%rsp), %r13 vpmovsxbw %xmm2, %xmm2 vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0] vpmovsxbw (%r10,%rbx,8), %xmm3 vpmaddwd %xmm3, %xmm2, %xmm2 vpaddd %xmm1, %xmm2, %xmm1 incq %rbx cmpq %rbx, %r13 jne 0xd84ea addq 0x78(%rsp), %r10 addq $0x2, %rbp movl %ebp, %r8d orl $0x1, %r8d cmpl 0x8(%rsp), %r8d jl 0xd84d7 cmpl 0x8(%rsp), %ebp jge 0xd85fd movslq 0x11c(%rsp), %rdi movq 0x100(%rsp), %r8 movq 0x130(%rsp), %rdx imulq %r8, %rdx movl 0x18(%rsp), %eax imull 0x28(%rsp), %eax movslq %eax, %rbx imulq %rdi, %rbx imulq %r8, %rbx addq 0xf0(%rsp), %rbx movl 0x48(%rsp), %edi imull 0x58(%rsp), %edi movslq %edi, %rdi addq %rbx, %rdi movl %ebp, %eax movl 0xa0(%rsp), %ebp cmpl $0x0, 0x40(%rsp) jle 0xd85f2 movq %rdx, %r8 imulq %rax, %r8 addq %rdi, %r8 xorl %ebx, %ebx movq (%rsp), %r11 movslq (%r11,%rbx,4), %r12 movsbl (%r8,%r12), %r12d vmovd %r12d, %xmm2 vpshuflw $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0,4,5,6,7] vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0] vpmovsxbw (%r10,%rbx,4), %xmm3 vpmullw %xmm3, %xmm2, %xmm4 vpmulhw %xmm3, %xmm2, %xmm2 vpunpcklwd %xmm2, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3] vpaddd %xmm2, %xmm1, %xmm1 incq %rbx cmpq %rbx, %r13 jne 0xd85b3 addq 0x30(%rsp), %r10 incq %rax cmpl %eax, 0x8(%rsp) jg 0xd85a0 jmp 0xd8604 movl 0xa0(%rsp), %ebp movl 0x50(%rsp), %r10d cmpl $0x1, %r10d je 0xd8627 cmpl $0x4, %r10d jne 0xd8666 movq 0xb0(%rsp), %rdx vmovdqa %xmm1, (%rdx) addq $0x10, %rdx jmp 0xd865e movq 0xb0(%rsp), %rdx vmovd %xmm1, (%rdx) movq 0x150(%rsp), %rax vpextrd $0x1, %xmm1, (%rdx,%rax,4) movq 0x38(%rsp), %rax vpextrd $0x2, %xmm1, (%rdx,%rax,4) movq 0x20(%rsp), %rax vpextrd $0x3, %xmm1, (%rdx,%rax,4) addq $0x4, %rdx movq %rdx, 0xb0(%rsp) incl %ebp cmpl 0xb8(%rsp), %ebp jne 0xd82b1 movq 0xe8(%rsp), %rdx incq %rdx cmpq 0x458(%rsp), %rdx jne 0xd70d7 movq 0x328(%rsp), %rcx shll $0x2, %ecx movq 0x3b8(%rsp), %rax movq %rcx, 0x328(%rsp) subl %ecx, %eax movl %eax, %ecx shrl $0x1f, %ecx addl %eax, %ecx sarl %ecx movq %rcx, 0x340(%rsp) cmpl $0x2, %eax jl 0xd9a0f movl 0x10(%rsp), %eax imull 0x58(%rsp), %eax movl %eax, 0x1e8(%rsp) movl 0x8(%rsp), %eax andl $-0x8, %eax movl %eax, 0x190(%rsp) movslq 0x328(%rsp), %rax movq %rax, 0x3a8(%rsp) movl 0x340(%rsp), %eax movq %rax, 0x3a0(%rsp) movq %r13, %rax shlq $0x4, %rax movq %rax, 0x80(%rsp) leaq (,%r13,4), %rax movq %rax, 0x30(%rsp) leaq (,%r13,2), %rax movq %rax, 0x78(%rsp) xorl %esi, %esi vpxor %xmm0, %xmm0, %xmm0 movq 0x3a8(%rsp), %rdx leaq (%rdx,%rsi,2), %r9 movl 0x1cc(%rsp), %edi movl 0x130(%rsp), %r8d imull 0x10(%rsp), %r8d movq 0x1a0(%rsp), %rax movq 0x1b0(%rsp), %rcx imulq 0x1e0(%rsp), %rcx movq %rsi, 0x330(%rsp) leaq (%rdx,%rsi,2), %rdx incq %rdx imulq %rcx, %rdx imulq %r9, %rcx addq %rax, %rcx movq %rcx, 0xb8(%rsp) addq %rax, %rdx movq %rdx, 0xb0(%rsp) movl 0x1d0(%rsp), %eax movl %edi, 0xe0(%rsp) imull %edi, %eax movl %eax, 0xe8(%rsp) cmpl $0x4, %eax movl %r8d, 0x350(%rsp) movq %r9, 0x348(%rsp) jl 0xd9056 leal 0x3(%r9), %eax testl %r9d, %r9d cmovnsl %r9d, %eax movl %eax, %ecx sarl $0x2, %ecx andl $-0x4, %eax movl %r9d, %edx subl %eax, %edx sarl %edx addl %ecx, %edx movslq %edx, %rax movq %rax, 0x280(%rsp) movslq %r8d, %r15 leaq (%r15,%r15,2), %rax movq %rax, 0x18(%rsp) leaq (%r15,%r15,4), %rax movq %rax, 0xa0(%rsp) leaq (%r15,%r15), %rax leaq (%rax,%rax,2), %rax movq %rax, 0x150(%rsp) leaq (,%r15,8), %rax subq %r15, %rax movq %rax, 0x38(%rsp) xorl %esi, %esi movl %esi, %eax cltd movl 0xe0(%rsp), %ecx idivl %ecx movl %eax, %r14d movl %edx, %r9d movl %esi, %eax orl $0x1, %eax cltd idivl %ecx movl %eax, %r10d movl %edx, %r8d movl %esi, %eax orl $0x2, %eax cltd idivl %ecx movl %eax, %edi movl %edx, %ebx movq %rsi, 0xa8(%rsp) movl %esi, %eax orl $0x3, %eax cltd idivl %ecx movq 0xd0(%rsp), %rcx movq 0x318(%rcx), %rsi imulq 0x280(%rsp), %rsi imulq 0x2e8(%rcx), %rsi addq 0x2d8(%rcx), %rsi vpxor %xmm1, %xmm1, %xmm1 cmpl $0x8, 0x8(%rsp) movl %r10d, 0x50(%rsp) movl %edx, 0x140(%rsp) movl %ebx, 0x168(%rsp) movl %eax, 0x188(%rsp) movl %r8d, 0x198(%rsp) movl %edi, 0x1f0(%rsp) movl %r9d, 0xdc(%rsp) movl %r14d, 0xd8(%rsp) jl 0xd8c32 movl %eax, %r11d movl %edi, %r12d movl %ebx, %edi movl 0x1e8(%rsp), %ebx imull %ebx, %edi imull %ebx, %r8d imull %ebx, %r9d movl %edx, %ebp movslq 0x11c(%rsp), %rdx movq 0xf0(%rsp), %rax movq 0x100(%rsp), %r10 movq 0x130(%rsp), %rcx imulq %r10, %rcx movq %rcx, 0x60(%rsp) imulq %r10, %rdx movq 0x28(%rsp), %rcx imull %ecx, %r14d movslq %r14d, %r10 imulq %rdx, %r10 movslq %r9d, %r9 addq %rax, %r9 addq %r10, %r9 movq %r9, 0x68(%rsp) movl 0x50(%rsp), %r9d imull %ecx, %r9d movslq %r9d, %r9 imulq %rdx, %r9 movslq %r8d, %r8 addq %rax, %r8 addq %r9, %r8 movq %r8, 0x70(%rsp) movl %r12d, %r8d imull %ecx, %r8d movslq %r8d, %r8 imulq %rdx, %r8 movslq %edi, %rdi addq %rax, %rdi addq %r8, %rdi movq %rdi, 0x90(%rsp) movl %r11d, %edi imull %ecx, %edi movslq %edi, %rdi imulq %rdx, %rdi movl %ebp, %edx imull %ebx, %edx movslq %edx, %rcx addq %rax, %rcx addq %rdi, %rcx movq %rcx, 0x88(%rsp) vpxor %xmm3, %xmm3, %xmm3 xorl %ecx, %ecx vpxor %xmm4, %xmm4, %xmm4 vpxor %xmm8, %xmm8, %xmm8 vpxor %xmm5, %xmm5, %xmm5 vpxor %xmm6, %xmm6, %xmm6 vpxor %xmm2, %xmm2, %xmm2 vpxor %xmm7, %xmm7, %xmm7 movl %ecx, %eax cltd idivl 0x10(%rsp) cmpl $0x0, 0x40(%rsp) jle 0xd8bf7 movq %rcx, 0x20(%rsp) cltq imulq 0x60(%rsp), %rax movq 0x68(%rsp), %rcx leaq (%rcx,%rax), %r11 movq 0x70(%rsp), %rcx leaq (%rcx,%rax), %rdx movq 0x90(%rsp), %rcx leaq (%rcx,%rax), %rbx addq 0x88(%rsp), %rax xorl %edi, %edi movq %rsi, 0x48(%rsp) movq (%rsp), %rcx movslq (%rcx,%rdi), %r13 leaq (%r11,%r13), %r9 leaq (%rdx,%r13), %r8 leaq (%rbx,%r13), %r10 addq %rax, %r13 cmpl $0x8, 0x10(%rsp) jne 0xd8a3f vmovq (%r9), %xmm9 vmovq (%r8), %xmm10 vmovq (%r10), %xmm11 vmovq (%r13), %xmm12 jmp 0xd8b62 movzbl (%r9), %ecx vmovd %ecx, %xmm9 vpinsrb $0x1, (%r9,%r15), %xmm9, %xmm9 vpinsrb $0x2, (%r9,%r15,2), %xmm9, %xmm9 movq 0x18(%rsp), %rbp vpinsrb $0x3, (%r9,%rbp), %xmm9, %xmm9 vpinsrb $0x4, (%r9,%r15,4), %xmm9, %xmm9 movq 0xa0(%rsp), %r14 vpinsrb $0x5, (%r9,%r14), %xmm9, %xmm9 movq %rax, %rsi movq %rbx, %rax movq %r11, %rbx movq %rdx, %r11 movq 0x150(%rsp), %rdx vpinsrb $0x6, (%r9,%rdx), %xmm9, %xmm9 movq 0x38(%rsp), %r12 vpinsrb $0x7, (%r9,%r12), %xmm9, %xmm9 movzbl (%r8), %ecx vmovd %ecx, %xmm10 vpinsrb $0x1, (%r8,%r15), %xmm10, %xmm10 vpinsrb $0x2, (%r8,%r15,2), %xmm10, %xmm10 vpinsrb $0x3, (%r8,%rbp), %xmm10, %xmm10 vpinsrb $0x4, (%r8,%r15,4), %xmm10, %xmm10 vpinsrb $0x5, (%r8,%r14), %xmm10, %xmm10 vpinsrb $0x6, (%r8,%rdx), %xmm10, %xmm10 vpinsrb $0x7, (%r8,%r12), %xmm10, %xmm10 movzbl (%r10), %ecx vmovd %ecx, %xmm11 vpinsrb $0x1, (%r10,%r15), %xmm11, %xmm11 vpinsrb $0x2, (%r10,%r15,2), %xmm11, %xmm11 vpinsrb $0x3, (%r10,%rbp), %xmm11, %xmm11 vpinsrb $0x4, (%r10,%r15,4), %xmm11, %xmm11 vpinsrb $0x5, (%r10,%r14), %xmm11, %xmm11 vpinsrb $0x6, (%r10,%rdx), %xmm11, %xmm11 vpinsrb $0x7, (%r10,%r12), %xmm11, %xmm11 movzbl (%r13), %ecx vmovd %ecx, %xmm12 vpinsrb $0x1, (%r13,%r15), %xmm12, %xmm12 vpinsrb $0x2, (%r13,%r15,2), %xmm12, %xmm12 vpinsrb $0x3, (%r13,%rbp), %xmm12, %xmm12 vpinsrb $0x4, (%r13,%r15,4), %xmm12, %xmm12 vpinsrb $0x5, (%r13,%r14), %xmm12, %xmm12 vpinsrb $0x6, (%r13,%rdx), %xmm12, %xmm12 movq %r11, %rdx movq %rbx, %r11 movq %rax, %rbx movq %rsi, %rax movq 0x48(%rsp), %rsi vpinsrb $0x7, (%r13,%r12), %xmm12, %xmm12 vpmovsxbw %xmm9, %xmm9 vpmovsxbw %xmm10, %xmm10 vpmovsxbw %xmm11, %xmm11 vpmovsxbw %xmm12, %xmm12 vmovdqa (%rsi,%rdi,4), %xmm13 vpcmpgtb %xmm13, %xmm0, %xmm14 vpunpcklbw %xmm14, %xmm13, %xmm15 # xmm15 = xmm13[0],xmm14[0],xmm13[1],xmm14[1],xmm13[2],xmm14[2],xmm13[3],xmm14[3],xmm13[4],xmm14[4],xmm13[5],xmm14[5],xmm13[6],xmm14[6],xmm13[7],xmm14[7] vpunpckhbw %xmm14, %xmm13, %xmm13 # xmm13 = xmm13[8],xmm14[8],xmm13[9],xmm14[9],xmm13[10],xmm14[10],xmm13[11],xmm14[11],xmm13[12],xmm14[12],xmm13[13],xmm14[13],xmm13[14],xmm14[14],xmm13[15],xmm14[15] vpmaddwd %xmm15, %xmm9, %xmm14 vpaddd %xmm3, %xmm14, %xmm3 vpmaddwd %xmm13, %xmm9, %xmm9 vpaddd %xmm4, %xmm9, %xmm4 vpmaddwd %xmm15, %xmm10, %xmm9 vpaddd %xmm8, %xmm9, %xmm8 vpmaddwd %xmm13, %xmm10, %xmm9 vpaddd %xmm5, %xmm9, %xmm5 vpmaddwd %xmm15, %xmm11, %xmm9 vpaddd %xmm6, %xmm9, %xmm6 vpmaddwd %xmm13, %xmm11, %xmm9 vpaddd %xmm2, %xmm9, %xmm2 vpmaddwd %xmm15, %xmm12, %xmm9 vpaddd %xmm7, %xmm9, %xmm7 vpmaddwd %xmm13, %xmm12, %xmm9 vpaddd %xmm1, %xmm9, %xmm1 addq $0x4, %rdi cmpq %rdi, 0x30(%rsp) jne 0xd8a07 addq 0x80(%rsp), %rsi movq 0xc0(%rsp), %r13 movq 0x20(%rsp), %rcx leal 0x8(%rcx), %eax addl $0xf, %ecx cmpl 0x8(%rsp), %ecx movl %eax, %ecx jl 0xd89bb movl 0x190(%rsp), %eax movl %eax, %ebp movl 0x188(%rsp), %eax movl 0x1f0(%rsp), %edi movl 0xdc(%rsp), %r9d movl 0xd8(%rsp), %r8d jmp 0xd8c54 vpxor %xmm7, %xmm7, %xmm7 vpxor %xmm2, %xmm2, %xmm2 vpxor %xmm6, %xmm6, %xmm6 vpxor %xmm5, %xmm5, %xmm5 vpxor %xmm8, %xmm8, %xmm8 vpxor %xmm4, %xmm4, %xmm4 vpxor %xmm3, %xmm3, %xmm3 xorl %ebp, %ebp movl %r14d, %r8d vpunpckldq %xmm8, %xmm3, %xmm9 # xmm9 = xmm3[0],xmm8[0],xmm3[1],xmm8[1] vpunpckldq %xmm7, %xmm6, %xmm10 # xmm10 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] vpunpckhdq %xmm8, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm8[2],xmm3[3],xmm8[3] vpunpckhdq %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[2],xmm7[2],xmm6[3],xmm7[3] vpunpckldq %xmm5, %xmm4, %xmm7 # xmm7 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] vpunpckldq %xmm1, %xmm2, %xmm8 # xmm8 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] vpunpckhdq %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3] vpunpckhdq %xmm1, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] vpunpcklqdq %xmm10, %xmm9, %xmm1 # xmm1 = xmm9[0],xmm10[0] vpunpckhqdq %xmm10, %xmm9, %xmm5 # xmm5 = xmm9[1],xmm10[1] vpaddd %xmm5, %xmm1, %xmm1 vpunpcklqdq %xmm6, %xmm3, %xmm5 # xmm5 = xmm3[0],xmm6[0] vpunpckhqdq %xmm6, %xmm3, %xmm3 # xmm3 = xmm3[1],xmm6[1] vpaddd %xmm5, %xmm3, %xmm3 vpaddd %xmm3, %xmm1, %xmm1 vpunpcklqdq %xmm8, %xmm7, %xmm3 # xmm3 = xmm7[0],xmm8[0] vpunpckhqdq %xmm8, %xmm7, %xmm5 # xmm5 = xmm7[1],xmm8[1] vpaddd %xmm5, %xmm3, %xmm3 vpunpcklqdq %xmm2, %xmm4, %xmm5 # xmm5 = xmm4[0],xmm2[0] vpunpckhqdq %xmm2, %xmm4, %xmm2 # xmm2 = xmm4[1],xmm2[1] vpaddd %xmm5, %xmm2, %xmm2 vpaddd %xmm2, %xmm3, %xmm2 movl %ebp, %ecx orl $0x1, %ecx cmpl 0x8(%rsp), %ecx jge 0xd8e84 movl %edi, %r14d movslq 0x11c(%rsp), %rdi movq 0xf0(%rsp), %rbx movq 0x100(%rsp), %rcx movq 0x130(%rsp), %r10 imulq %rcx, %r10 movq 0x28(%rsp), %rdx imull %edx, %r8d movslq %r8d, %r8 imulq %rcx, %rdi imulq %rdi, %r8 movl %r9d, %ecx movq 0x58(%rsp), %r9 imull %r9d, %ecx movslq %ecx, %rcx addq %rbx, %rcx addq %r8, %rcx movq %rcx, 0x48(%rsp) movl 0x50(%rsp), %ecx imull %edx, %ecx movslq %ecx, %rcx imulq %rdi, %rcx movl 0x198(%rsp), %r8d imull %r9d, %r8d movslq %r8d, %r8 addq %rbx, %r8 addq %rcx, %r8 movq %r8, 0x20(%rsp) movl %r14d, %ecx imull %edx, %ecx movslq %ecx, %rcx imulq %rdi, %rcx movl 0x168(%rsp), %r8d imull %r9d, %r8d movslq %r8d, %r11 addq %rbx, %r11 addq %rcx, %r11 movl %eax, %ecx imull %edx, %ecx movslq %ecx, %rcx imulq %rdi, %rcx movl 0x140(%rsp), %edi imull %r9d, %edi movslq %edi, %rax addq %rbx, %rax addq %rcx, %rax movl %ebp, %ebp cmpl $0x0, 0x40(%rsp) jle 0xd8e71 movq %r10, %rdx imulq %rbp, %rdx movq 0x48(%rsp), %rcx leaq (%rcx,%rdx), %rbx movq 0x20(%rsp), %rcx leaq (%rcx,%rdx), %r14 leaq (%r11,%rdx), %r12 addq %rax, %rdx xorl %edi, %edi movq (%rsp), %rcx movslq (%rcx,%rdi,4), %rcx leaq (%rbx,%rcx), %r8 movsbl (%r15,%r8), %r8d movsbl (%rbx,%rcx), %r9d vmovd %r9d, %xmm3 vpinsrw $0x1, %r8d, %xmm3, %xmm3 movsbl (%r14,%rcx), %r8d vpinsrw $0x2, %r8d, %xmm3, %xmm3 leaq (%r14,%rcx), %r8 movsbl (%r15,%r8), %r8d vpinsrw $0x3, %r8d, %xmm3, %xmm3 leaq (%r12,%rcx), %r8 movsbl (%r12,%rcx), %r9d vpinsrw $0x4, %r9d, %xmm3, %xmm3 leaq (%rdx,%rcx), %r9 movsbl (%r15,%r8), %r8d vpinsrw $0x5, %r8d, %xmm3, %xmm3 movsbl (%rdx,%rcx), %ecx vpinsrw $0x6, %ecx, %xmm3, %xmm3 movsbl (%r15,%r9), %ecx vpinsrw $0x7, %ecx, %xmm3, %xmm3 movzbl (%rsi,%rdi,4), %ecx vmovd %ecx, %xmm4 vpinsrb $0x1, 0x2(%rsi,%rdi,4), %xmm4, %xmm4 vpmovsxbw %xmm4, %xmm4 vpshufd $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0] movzbl 0x1(%rsi,%rdi,4), %ecx vmovd %ecx, %xmm5 vpinsrb $0x1, 0x3(%rsi,%rdi,4), %xmm5, %xmm5 vpmovsxbw %xmm5, %xmm5 vpshufd $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0] vpmaddwd %xmm4, %xmm3, %xmm4 vpaddd %xmm1, %xmm4, %xmm1 vpmaddwd %xmm5, %xmm3, %xmm3 vpaddd %xmm2, %xmm3, %xmm2 incq %rdi cmpq %rdi, %r13 jne 0xd8daf addq 0x30(%rsp), %rsi addq $0x2, %rbp movl %ebp, %ecx orl $0x1, %ecx cmpl 0x8(%rsp), %ecx jl 0xd8d82 cmpl 0x8(%rsp), %ebp jge 0xd9007 movslq 0x11c(%rsp), %r12 movq 0xf0(%rsp), %rbx movq 0x100(%rsp), %rcx movq 0x130(%rsp), %rax imulq %rcx, %rax movq %rax, 0x48(%rsp) movq 0x28(%rsp), %r8 movl 0xd8(%rsp), %eax imull %r8d, %eax movslq %eax, %rdx imulq %rcx, %r12 imulq %r12, %rdx movq 0x58(%rsp), %rdi movl 0xdc(%rsp), %eax imull %edi, %eax movslq %eax, %r9 addq %rbx, %r9 addq %rdx, %r9 movl 0x50(%rsp), %eax imull %r8d, %eax movslq %eax, %rcx imulq %r12, %rcx movl 0x198(%rsp), %eax imull %edi, %eax movslq %eax, %r11 addq %rbx, %r11 addq %rcx, %r11 movl 0x1f0(%rsp), %eax imull %r8d, %eax movslq %eax, %rcx imulq %r12, %rcx movl 0x168(%rsp), %eax imull %edi, %eax movslq %eax, %r14 addq %rbx, %r14 addq %rcx, %r14 movl 0x188(%rsp), %eax imull %r8d, %eax movslq %eax, %rcx imulq %r12, %rcx movl 0x140(%rsp), %r10d imull %edi, %r10d movslq %r10d, %r12 addq %rbx, %r12 addq %rcx, %r12 movl %ebp, %eax movq %r9, %rbp cmpl $0x0, 0x40(%rsp) jle 0xd8ffa movq %r13, %r9 movq 0x48(%rsp), %r13 imulq %rax, %r13 movq %rbp, %rdx addq %r13, %rdx leaq (%r11,%r13), %rbx leaq (%r14,%r13), %rdi addq %r12, %r13 xorl %r10d, %r10d movq (%rsp), %rcx movslq (%rcx,%r10,4), %rcx movzbl (%rdx,%rcx), %r8d vmovd %r8d, %xmm3 vpinsrb $0x1, (%rbx,%rcx), %xmm3, %xmm3 vpinsrb $0x2, (%rdi,%rcx), %xmm3, %xmm3 vpinsrb $0x3, (%r13,%rcx), %xmm3, %xmm3 vpmovsxbw %xmm3, %xmm3 vpshufd $0x44, %xmm3, %xmm3 # xmm3 = xmm3[0,1,0,1] movzwl (%rsi,%r10,2), %ecx vmovd %ecx, %xmm4 vpmovsxbw %xmm4, %xmm4 vpshuflw $0x50, %xmm4, %xmm4 # xmm4 = xmm4[0,0,1,1,4,5,6,7] vpshufd $0x50, %xmm4, %xmm4 # xmm4 = xmm4[0,0,1,1] vpmullw %xmm3, %xmm4, %xmm5 vpmulhw %xmm4, %xmm3, %xmm3 vpunpcklwd %xmm3, %xmm5, %xmm4 # xmm4 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3] vpaddd %xmm4, %xmm1, %xmm1 vpunpckhwd %xmm3, %xmm5, %xmm3 # xmm3 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7] vpaddd %xmm3, %xmm2, %xmm2 incq %r10 cmpq %r10, %r9 jne 0xd8f88 addq 0x78(%rsp), %rsi movq %r9, %r13 incq %rax cmpl %eax, 0x8(%rsp) jg 0xd8f5d movq 0xb8(%rsp), %rcx vmovdqa %xmm1, (%rcx) movq 0xb0(%rsp), %rax vmovdqa %xmm2, (%rax) addq $0x10, %rcx movq %rcx, 0xb8(%rsp) addq $0x10, %rax movq %rax, 0xb0(%rsp) movq 0xa8(%rsp), %rcx leal 0x4(%rcx), %eax addl $0x7, %ecx cmpl 0xe8(%rsp), %ecx movl %eax, %esi jl 0xd882a jmp 0xd9058 xorl %eax, %eax movl %eax, %r10d orl $0x1, %r10d cmpl 0xe8(%rsp), %r10d jge 0xd9633 movq 0x348(%rsp), %rdx leal 0x3(%rdx), %esi testl %edx, %edx cmovnsl %edx, %esi movl %esi, %ecx sarl $0x2, %ecx andl $-0x4, %esi subl %esi, %edx sarl %edx addl %ecx, %edx movslq %edx, %rdx movq 0xd0(%rsp), %rcx imulq 0x318(%rcx), %rdx imulq 0x2e8(%rcx), %rdx addq 0x2d8(%rcx), %rdx movq %rdx, 0x188(%rsp) movq 0xf0(%rsp), %rcx movq %rcx, 0x140(%rsp) movq 0x100(%rsp), %rcx movq 0x130(%rsp), %rdx movq %rcx, 0x168(%rsp) imulq %rcx, %rdx movq %rdx, 0xa0(%rsp) movslq 0x350(%rsp), %r15 leaq (%r15,%r15,2), %rbp leaq (%r15,%r15,4), %rcx movq %rcx, 0x68(%rsp) leaq (%r15,%r15), %rcx leaq (%rcx,%rcx,2), %rcx movq %rcx, 0x48(%rsp) leaq (,%r15,8), %rcx subq %r15, %rcx movq %rcx, 0x18(%rsp) movq %rbp, 0x150(%rsp) movq %rax, 0x70(%rsp) cltd movl 0xe0(%rsp), %ecx idivl %ecx movl %edx, %edi movl %eax, %esi movl %r10d, %eax cltd idivl %ecx cmpl $0x8, 0x8(%rsp) movl %edx, 0x90(%rsp) movl %edi, 0x88(%rsp) movl %eax, 0x50(%rsp) movl %esi, 0xa8(%rsp) jl 0xd932b movl %edx, %r9d movl 0x1e8(%rsp), %edx imull %edx, %r9d movl %edi, %ecx imull %edx, %ecx movslq 0x11c(%rsp), %rdx movq 0x28(%rsp), %rdi imull %edi, %esi movslq %esi, %rsi imulq 0x168(%rsp), %rdx imulq %rdx, %rsi movslq %ecx, %rcx movq 0x140(%rsp), %r8 addq %r8, %rcx addq %rsi, %rcx movq %rcx, 0x38(%rsp) movl %eax, %ecx imull %edi, %ecx movslq %ecx, %rcx imulq %rdx, %rcx movslq %r9d, %r12 addq %r8, %r12 addq %rcx, %r12 vpxor %xmm3, %xmm3, %xmm3 xorl %r10d, %r10d movq 0x188(%rsp), %r9 vpxor %xmm4, %xmm4, %xmm4 vpxor %xmm2, %xmm2, %xmm2 vpxor %xmm1, %xmm1, %xmm1 movl %r10d, %eax cltd idivl 0x10(%rsp) cmpl $0x0, 0x40(%rsp) jle 0xd92fb cltq imulq 0xa0(%rsp), %rax movq 0x38(%rsp), %rcx leaq (%rcx,%rax), %rdx addq %r12, %rax xorl %edi, %edi movq 0x68(%rsp), %r8 movq (%rsp), %rcx movslq (%rcx,%rdi), %rcx leaq (%rdx,%rcx), %rsi addq %rax, %rcx cmpl $0x8, 0x10(%rsp) jne 0xd922a vmovq (%rsi), %xmm5 vmovq (%rcx), %xmm6 jmp 0xd92a6 movzbl (%rsi), %r14d vmovd %r14d, %xmm5 vpinsrb $0x1, (%rsi,%r15), %xmm5, %xmm5 vpinsrb $0x2, (%rsi,%r15,2), %xmm5, %xmm5 vpinsrb $0x3, (%rsi,%rbp), %xmm5, %xmm5 vpinsrb $0x4, (%rsi,%r15,4), %xmm5, %xmm5 vpinsrb $0x5, (%rsi,%r8), %xmm5, %xmm5 movq 0x48(%rsp), %r14 vpinsrb $0x6, (%rsi,%r14), %xmm5, %xmm5 movq 0x18(%rsp), %rbx vpinsrb $0x7, (%rsi,%rbx), %xmm5, %xmm5 movzbl (%rcx), %esi vmovd %esi, %xmm6 vpinsrb $0x1, (%rcx,%r15), %xmm6, %xmm6 vpinsrb $0x2, (%rcx,%r15,2), %xmm6, %xmm6 vpinsrb $0x3, (%rcx,%rbp), %xmm6, %xmm6 vpinsrb $0x4, (%rcx,%r15,4), %xmm6, %xmm6 vpinsrb $0x5, (%rcx,%r8), %xmm6, %xmm6 vpinsrb $0x6, (%rcx,%r14), %xmm6, %xmm6 vpinsrb $0x7, (%rcx,%rbx), %xmm6, %xmm6 vpmovsxbw %xmm5, %xmm5 vpmovsxbw %xmm6, %xmm6 vmovdqa (%r9,%rdi,4), %xmm7 vpcmpgtb %xmm7, %xmm0, %xmm8 vpunpcklbw %xmm8, %xmm7, %xmm9 # xmm9 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7] vpunpckhbw %xmm8, %xmm7, %xmm7 # xmm7 = xmm7[8],xmm8[8],xmm7[9],xmm8[9],xmm7[10],xmm8[10],xmm7[11],xmm8[11],xmm7[12],xmm8[12],xmm7[13],xmm8[13],xmm7[14],xmm8[14],xmm7[15],xmm8[15] vpmaddwd %xmm5, %xmm9, %xmm8 vpaddd %xmm3, %xmm8, %xmm3 vpmaddwd %xmm7, %xmm5, %xmm5 vpaddd %xmm4, %xmm5, %xmm4 vpmaddwd %xmm6, %xmm9, %xmm5 vpaddd %xmm2, %xmm5, %xmm2 vpmaddwd %xmm7, %xmm6, %xmm5 vpaddd %xmm1, %xmm5, %xmm1 addq $0x4, %rdi cmpq %rdi, 0x30(%rsp) jne 0xd920a addq 0x80(%rsp), %r9 leal 0x8(%r10), %eax addl $0xf, %r10d cmpl 0x8(%rsp), %r10d movl %eax, %r10d jl 0xd91d9 movl 0x190(%rsp), %eax movl %eax, %r12d movl 0x88(%rsp), %edi movl 0xa8(%rsp), %esi jmp 0xd9346 xorl %r12d, %r12d vpxor %xmm1, %xmm1, %xmm1 vpxor %xmm2, %xmm2, %xmm2 vpxor %xmm4, %xmm4, %xmm4 vpxor %xmm3, %xmm3, %xmm3 movq 0x188(%rsp), %r9 vphaddd %xmm3, %xmm3, %xmm3 vphaddd %xmm3, %xmm3, %xmm3 vphaddd %xmm4, %xmm4, %xmm4 vphaddd %xmm4, %xmm4, %xmm4 vphaddd %xmm2, %xmm2, %xmm2 vmovd %xmm3, %ebx vphaddd %xmm2, %xmm2, %xmm2 vmovd %xmm4, %eax vphaddd %xmm1, %xmm1, %xmm1 vmovd %xmm2, %r10d vphaddd %xmm1, %xmm1, %xmm1 vmovd %xmm1, %edx movl %r12d, %ecx orl $0x1, %ecx cmpl 0x8(%rsp), %ecx jge 0xd94d6 movslq 0x11c(%rsp), %rcx movq 0x28(%rsp), %r11 imull %r11d, %esi movslq %esi, %rsi imulq 0x168(%rsp), %rcx imulq %rcx, %rsi movq 0x58(%rsp), %r8 imull %r8d, %edi movslq %edi, %r14 movq 0x140(%rsp), %rdi addq %rdi, %r14 addq %rsi, %r14 movq %r14, 0x20(%rsp) movl 0x50(%rsp), %esi imull %r11d, %esi movslq %esi, %rsi imulq %rcx, %rsi movl 0x90(%rsp), %ecx imull %r8d, %ecx movslq %ecx, %rcx addq %rdi, %rcx addq %rsi, %rcx movq %rcx, 0x60(%rsp) movl %r12d, %r12d cmpl $0x0, 0x40(%rsp) jle 0xd94c2 movq %r12, 0x38(%rsp) movq %r12, %rdi imulq 0xa0(%rsp), %rdi movq 0x20(%rsp), %rcx leaq (%rcx,%rdi), %r12 addq 0x60(%rsp), %rdi xorl %r14d, %r14d movq (%rsp), %rcx movslq (%rcx,%r14,4), %rcx leaq (%r12,%rcx), %r8 movsbl (%r12,%rcx), %esi movsbl (%r9,%r14,4), %ebp movl %ebp, %r13d imull %esi, %r13d addl %ebx, %r13d movsbl 0x1(%r9,%r14,4), %r11d imull %r11d, %esi addl %eax, %esi movsbl (%r15,%r8), %eax movsbl (%rdi,%rcx), %r8d imull %r8d, %ebp addl %r10d, %ebp movsbl 0x2(%r9,%r14,4), %r10d movl %r10d, %ebx imull %eax, %ebx addl %r13d, %ebx movsbl 0x3(%r9,%r14,4), %r13d imull %r13d, %eax addl %esi, %eax imull %r11d, %r8d addl %edx, %r8d addq %rdi, %rcx movsbl (%r15,%rcx), %edx imull %edx, %r10d addl %ebp, %r10d imull %r13d, %edx movq 0xc0(%rsp), %r13 addl %r8d, %edx incq %r14 cmpq %r14, %r13 jne 0xd9427 addq 0x30(%rsp), %r9 movq 0x150(%rsp), %rbp movq 0x38(%rsp), %r12 addq $0x2, %r12 movl %r12d, %ecx orl $0x1, %ecx cmpl 0x8(%rsp), %ecx jl 0xd93fa cmpl 0x8(%rsp), %r12d jge 0xd95e1 movslq 0x11c(%rsp), %rcx movq 0x28(%rsp), %r8 movl 0xa8(%rsp), %esi imull %r8d, %esi movslq %esi, %rsi imulq 0x168(%rsp), %rcx imulq %rcx, %rsi movq 0x58(%rsp), %rdi movl 0x88(%rsp), %r14d imull %edi, %r14d movslq %r14d, %r11 movq 0x140(%rsp), %r14 addq %r14, %r11 addq %rsi, %r11 movq %r11, 0x38(%rsp) movl 0x50(%rsp), %esi imull %r8d, %esi movslq %esi, %rsi imulq %rcx, %rsi movl 0x90(%rsp), %ecx imull %edi, %ecx movslq %ecx, %rcx addq %r14, %rcx addq %rsi, %rcx movq %rcx, 0x20(%rsp) movl %r12d, %r12d cmpl $0x0, 0x40(%rsp) jle 0xd95d3 movq %r12, %r14 imulq 0xa0(%rsp), %r14 movq %r13, %rbp movq 0x38(%rsp), %rcx leaq (%rcx,%r14), %r13 addq 0x20(%rsp), %r14 xorl %edi, %edi movq (%rsp), %rcx movslq (%rcx,%rdi,4), %rcx movsbl (%r13,%rcx), %esi movsbl (%r9,%rdi,2), %r8d movl %r8d, %r11d imull %esi, %r11d addl %r11d, %ebx movsbl 0x1(%r9,%rdi,2), %r11d imull %r11d, %esi addl %esi, %eax movsbl (%r14,%rcx), %ecx imull %ecx, %r8d addl %r8d, %r10d imull %r11d, %ecx addl %ecx, %edx incq %rdi cmpq %rdi, %rbp jne 0xd9580 addq 0x78(%rsp), %r9 movq %rbp, %r13 movq 0x150(%rsp), %rbp incq %r12 cmpl %r12d, 0x8(%rsp) jg 0xd955a movq 0xb8(%rsp), %rsi movl %ebx, (%rsi) movl %r10d, 0x4(%rsi) movq 0xb0(%rsp), %rcx movl %eax, (%rcx) movl %edx, 0x4(%rcx) addq $0x8, %rsi movq %rsi, 0xb8(%rsp) addq $0x8, %rcx movq %rcx, 0xb0(%rsp) movq 0x70(%rsp), %rax leal 0x2(%rax), %edi addl $0x3, %eax movl %eax, %r10d cmpl 0xe8(%rsp), %eax movl %edi, %eax jl 0xd9123 jmp 0xd9635 movl %eax, %edi cmpl 0xe8(%rsp), %edi jge 0xd99f6 movq 0x348(%rsp), %rdx leal 0x3(%rdx), %eax testl %edx, %edx cmovnsl %edx, %eax movl %eax, %ecx sarl $0x2, %ecx andl $-0x4, %eax subl %eax, %edx sarl %edx addl %ecx, %edx movslq %edx, %rcx movq 0xd0(%rsp), %rax imulq 0x318(%rax), %rcx imulq 0x2e8(%rax), %rcx addq 0x2d8(%rax), %rcx movq %rcx, 0x70(%rsp) movq 0xf0(%rsp), %rax movq %rax, 0x60(%rsp) movq 0x100(%rsp), %rax movq 0x130(%rsp), %rcx movq %rax, 0x68(%rsp) imulq %rax, %rcx movq %rcx, 0x48(%rsp) movslq 0x350(%rsp), %r9 leaq (%r9,%r9,2), %r10 leaq (%r9,%r9,4), %r14 leaq (%r9,%r9), %rax leaq (%rax,%rax,2), %rsi leaq (,%r9,8), %rcx subq %r9, %rcx movq %r10, 0x18(%rsp) movl %edi, 0x150(%rsp) movl %edi, %eax cltd idivl 0xe0(%rsp) cmpl $0x8, 0x8(%rsp) movl %edx, 0x38(%rsp) movl %eax, 0x20(%rsp) jl 0xd9813 movl %edx, %r8d imull 0x1e8(%rsp), %r8d movslq 0x11c(%rsp), %rdx movl %eax, %edi imull 0x28(%rsp), %edi movslq %edi, %rdi imulq 0x68(%rsp), %rdx imulq %rdi, %rdx movslq %r8d, %rdi addq 0x60(%rsp), %rdi addq %rdx, %rdi xorl %r8d, %r8d vpxor %xmm1, %xmm1, %xmm1 vpxor %xmm2, %xmm2, %xmm2 movq 0x70(%rsp), %r15 movl %r8d, %eax cltd idivl 0x10(%rsp) cmpl $0x0, 0x40(%rsp) jle 0xd97f1 cltq imulq 0x48(%rsp), %rax addq %rdi, %rax xorl %edx, %edx movq (%rsp), %r11 movslq (%r11,%rdx), %rbx addq %rax, %rbx cmpl $0x8, 0x10(%rsp) jne 0xd9779 vmovq (%rbx), %xmm3 jmp 0xd97b3 movzbl (%rbx), %r11d vmovd %r11d, %xmm3 vpinsrb $0x1, (%rbx,%r9), %xmm3, %xmm3 vpinsrb $0x2, (%rbx,%r9,2), %xmm3, %xmm3 vpinsrb $0x3, (%rbx,%r10), %xmm3, %xmm3 vpinsrb $0x4, (%rbx,%r9,4), %xmm3, %xmm3 vpinsrb $0x5, (%rbx,%r14), %xmm3, %xmm3 vpinsrb $0x6, (%rbx,%rsi), %xmm3, %xmm3 vpinsrb $0x7, (%rbx,%rcx), %xmm3, %xmm3 vpmovsxbw %xmm3, %xmm3 vmovdqa (%r15,%rdx,4), %xmm4 vpcmpgtb %xmm4, %xmm0, %xmm5 vpunpcklbw %xmm5, %xmm4, %xmm6 # xmm6 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] vpunpckhbw %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15] vpmaddwd %xmm6, %xmm3, %xmm5 vpaddd %xmm2, %xmm5, %xmm2 vpmaddwd %xmm4, %xmm3, %xmm3 vpaddd %xmm1, %xmm3, %xmm1 addq $0x4, %rdx cmpq %rdx, 0x30(%rsp) jne 0xd9761 addq 0x80(%rsp), %r15 leal 0x8(%r8), %eax addl $0xf, %r8d cmpl 0x8(%rsp), %r8d movl %eax, %r8d jl 0xd9741 movl 0x190(%rsp), %eax movl %eax, %r8d jmp 0xd9823 xorl %r8d, %r8d vpxor %xmm2, %xmm2, %xmm2 movq 0x70(%rsp), %r15 vpxor %xmm1, %xmm1, %xmm1 vphaddd %xmm2, %xmm2, %xmm2 vphaddd %xmm2, %xmm2, %xmm2 vphaddd %xmm1, %xmm1, %xmm1 vmovd %xmm2, %eax vphaddd %xmm1, %xmm1, %xmm1 vmovd %xmm1, %edx movl %r8d, %edi orl $0x1, %edi cmpl 0x8(%rsp), %edi jge 0xd991c movslq 0x11c(%rsp), %r11 movl 0x20(%rsp), %edi imull 0x28(%rsp), %edi movslq %edi, %rdi imulq 0x68(%rsp), %r11 imulq %rdi, %r11 movl 0x38(%rsp), %edi imull 0x58(%rsp), %edi movslq %edi, %rdi addq 0x60(%rsp), %rdi addq %r11, %rdi movq %rdi, 0xa0(%rsp) movl %r8d, %r8d cmpl $0x0, 0x40(%rsp) jle 0xd9906 movq %r8, %rbx imulq 0x48(%rsp), %rbx addq 0xa0(%rsp), %rbx xorl %r12d, %r12d movq (%rsp), %r10 movslq (%r10,%r12,4), %r11 leaq (%rbx,%r11), %r13 movsbl (%rbx,%r11), %r11d movsbl (%r15,%r12,4), %ebp imull %r11d, %ebp addl %eax, %ebp movsbl 0x1(%r15,%r12,4), %edi imull %r11d, %edi addl %edx, %edi movsbl (%r9,%r13), %r11d movq 0xc0(%rsp), %r13 movsbl 0x2(%r15,%r12,4), %eax imull %r11d, %eax addl %ebp, %eax movsbl 0x3(%r15,%r12,4), %edx imull %r11d, %edx addl %edi, %edx incq %r12 cmpq %r12, %r13 jne 0xd98ab addq 0x30(%rsp), %r15 movq 0x18(%rsp), %r10 addq $0x2, %r8 movl %r8d, %r11d orl $0x1, %r11d cmpl 0x8(%rsp), %r11d jl 0xd988c cmpl 0x8(%rsp), %r8d jge 0xd99b4 movslq 0x11c(%rsp), %rdi movl 0x20(%rsp), %r11d imull 0x28(%rsp), %r11d movslq %r11d, %r11 imulq 0x68(%rsp), %r11 imulq %rdi, %r11 movl 0x38(%rsp), %edi imull 0x58(%rsp), %edi movslq %edi, %rdi addq 0x60(%rsp), %rdi addq %r11, %rdi movl %r8d, %r8d cmpl $0x0, 0x40(%rsp) jle 0xd99aa movq %r8, %rbx imulq 0x48(%rsp), %rbx addq %rdi, %rbx xorl %r12d, %r12d movq (%rsp), %r10 movslq (%r10,%r12,4), %r11 movsbl (%rbx,%r11), %r11d movsbl (%r15,%r12,2), %ebp imull %r11d, %ebp addl %ebp, %eax movsbl 0x1(%r15,%r12,2), %ebp imull %r11d, %ebp addl %ebp, %edx incq %r12 cmpq %r12, %r13 jne 0xd9974 addq 0x78(%rsp), %r15 movq 0x18(%rsp), %r10 incq %r8 cmpl %r8d, 0x8(%rsp) jg 0xd995e movq 0xb8(%rsp), %rdi movl %eax, (%rdi) movq 0xb0(%rsp), %rax movl %edx, (%rax) addq $0x4, %rdi movq %rdi, 0xb8(%rsp) addq $0x4, %rax movq %rax, 0xb0(%rsp) movl 0x150(%rsp), %edi incl %edi cmpl 0xe8(%rsp), %edi jne 0xd96da movq 0x330(%rsp), %rsi incq %rsi cmpq 0x3a0(%rsp), %rsi jne 0xd8731 movq 0x328(%rsp), %rax movq 0x340(%rsp), %rcx leal (%rax,%rcx,2), %eax movq 0x3b8(%rsp), %rdx cmpl %edx, %eax jge 0xdaab4 movl 0x98(%rsp), %ecx imull 0x170(%rsp), %ecx movl %ecx, 0x98(%rsp) movl 0x10(%rsp), %ecx imull 0x58(%rsp), %ecx movl %ecx, 0xdc(%rsp) movslq 0x3b4(%rsp), %r15 leaq (%r15,%r15,2), %rcx movq %rcx, 0x30(%rsp) leaq (%r15,%r15,4), %rcx movq %rcx, 0xa0(%rsp) leaq (%r15,%r15), %rcx leaq (%rcx,%rcx,2), %rcx movq %rcx, 0x48(%rsp) leaq (,%r15,8), %rcx subq %r15, %rcx movq %rcx, 0x18(%rsp) movl 0x8(%rsp), %ecx andl $-0x8, %ecx movl %ecx, 0xd8(%rsp) movq 0x40(%rsp), %rcx decl %ecx leaq 0x1(%rcx), %rdi movq %rdi, 0x38(%rsp) leaq 0x8(,%rcx,8), %rdi movq %rdi, 0x80(%rsp) leaq 0x2(%rcx,%rcx), %rcx movq %rcx, 0x78(%rsp) cltq movslq %edx, %rcx movq %rcx, 0x280(%rsp) leaq (,%r13,8), %rcx movq %rcx, 0x88(%rsp) leaq (,%r13,2), %rcx movq %rcx, 0xb0(%rsp) movq 0x1e0(%rsp), %rcx imulq %rax, %rcx imulq 0x1b0(%rsp), %rcx addq 0x1a0(%rsp), %rcx movq %rcx, 0xb8(%rsp) cmpl $0x4, 0x98(%rsp) movq %rax, 0x190(%rsp) jl 0xda29d movq %rax, %rdi addl $0x3, %eax testl %edi, %edi cmovnsl %edi, %eax movl %eax, %ecx sarl $0x2, %ecx andl $-0x4, %eax movl %edi, %edx subl %eax, %edx movl %edx, %eax shrb $0x7, %al addb %dl, %al sarb %al movsbl %al, %eax movl %edi, %edx shrl $0x1f, %edx addl %edi, %edx andl $-0x2, %edx subl %edx, %edi addl %ecx, %edi addl %eax, %edi movslq %edi, %rax movq %rax, 0x1e8(%rsp) xorl %edi, %edi movl %edi, %eax cltd movl 0x170(%rsp), %ecx idivl %ecx movl %eax, %r11d movl %edx, %r10d movl %edi, %eax orl $0x1, %eax cltd idivl %ecx movl %eax, %ebx movl %edx, %ebp movl %edi, %eax orl $0x2, %eax cltd idivl %ecx movl %eax, %r12d movl %edx, %r9d movq %rdi, 0xe0(%rsp) movl %edi, %eax orl $0x3, %eax cltd idivl %ecx movl %eax, %r14d movq 0xd0(%rsp), %rax movq 0x318(%rax), %r8 imulq 0x1e8(%rsp), %r8 imulq 0x2e8(%rax), %r8 addq 0x2d8(%rax), %r8 vpxor %xmm0, %xmm0, %xmm0 cmpl $0x8, 0x8(%rsp) movl %r14d, 0xe8(%rsp) movl %ebp, 0x140(%rsp) movl %r12d, 0x168(%rsp) movl %r10d, 0x188(%rsp) movl %ebx, 0x198(%rsp) movl %edx, 0x50(%rsp) movl %r9d, 0xa8(%rsp) movl %r11d, 0x1f0(%rsp) jl 0xd9f12 movl %r10d, %edi movl %r9d, %r10d movl %ebx, %r14d movl 0xdc(%rsp), %ebx imull %ebx, %r10d movl %ebp, %ecx imull %ebx, %ecx imull %ebx, %edi movl %edx, %ebp movslq 0x11c(%rsp), %rdx movq 0xf0(%rsp), %rax movq 0x100(%rsp), %r9 movq 0x130(%rsp), %rsi imulq %r9, %rsi movq %rsi, 0x20(%rsp) imulq %r9, %rdx movl %r11d, %r9d movq 0x28(%rsp), %r11 imull %r11d, %r9d movslq %r9d, %r9 imulq %rdx, %r9 movslq %edi, %rsi addq %rax, %rsi addq %r9, %rsi movq %rsi, 0x60(%rsp) imull %r11d, %r14d movslq %r14d, %rdi imulq %rdx, %rdi movslq %ecx, %rcx addq %rax, %rcx addq %rdi, %rcx movq %rcx, 0x68(%rsp) movl %r12d, %ecx imull %r11d, %ecx movslq %ecx, %rcx imulq %rdx, %rcx movslq %r10d, %rsi addq %rax, %rsi addq %rcx, %rsi movq %rsi, 0x70(%rsp) movl 0xe8(%rsp), %ecx imull %r11d, %ecx movslq %ecx, %rcx imulq %rdx, %rcx movl %ebp, %edx imull %ebx, %edx movslq %edx, %rdx addq %rax, %rdx addq %rcx, %rdx movq %rdx, 0x90(%rsp) xorl %ecx, %ecx vpxor %xmm1, %xmm1, %xmm1 vpxor %xmm3, %xmm3, %xmm3 vpxor %xmm2, %xmm2, %xmm2 movl %ecx, %eax cltd idivl 0x10(%rsp) cmpl $0x0, 0x40(%rsp) jle 0xd9ee3 movq %rcx, 0x150(%rsp) cltq imulq 0x20(%rsp), %rax movq 0x60(%rsp), %rcx leaq (%rcx,%rax), %rdx movq 0x68(%rsp), %rcx leaq (%rcx,%rax), %r10 movq 0x70(%rsp), %rcx leaq (%rcx,%rax), %rbx addq 0x90(%rsp), %rax xorl %r12d, %r12d movq (%rsp), %rcx movslq (%rcx,%r12,4), %r13 leaq (%rdx,%r13), %rcx leaq (%r10,%r13), %rdi leaq (%rbx,%r13), %r9 addq %rax, %r13 cmpl $0x8, 0x10(%rsp) jne 0xd9d76 vmovq (%rcx), %xmm4 vmovq (%rdi), %xmm5 vmovq (%r9), %xmm6 vmovq (%r13), %xmm7 jmp 0xd9e85 movzbl (%rcx), %r11d vmovd %r11d, %xmm4 vpinsrb $0x1, (%rcx,%r15), %xmm4, %xmm4 vpinsrb $0x2, (%rcx,%r15,2), %xmm4, %xmm4 movq 0xa0(%rsp), %rsi movq 0x30(%rsp), %rbp vpinsrb $0x3, (%rcx,%rbp), %xmm4, %xmm4 vpinsrb $0x4, (%rcx,%r15,4), %xmm4, %xmm4 vpinsrb $0x5, (%rcx,%rsi), %xmm4, %xmm4 movq %r8, %r14 movq 0x48(%rsp), %r8 vpinsrb $0x6, (%rcx,%r8), %xmm4, %xmm4 movq %rdx, %r11 movq 0x18(%rsp), %rdx vpinsrb $0x7, (%rcx,%rdx), %xmm4, %xmm4 movzbl (%rdi), %ecx vmovd %ecx, %xmm5 vpinsrb $0x1, (%rdi,%r15), %xmm5, %xmm5 vpinsrb $0x2, (%rdi,%r15,2), %xmm5, %xmm5 vpinsrb $0x3, (%rdi,%rbp), %xmm5, %xmm5 vpinsrb $0x4, (%rdi,%r15,4), %xmm5, %xmm5 vpinsrb $0x5, (%rdi,%rsi), %xmm5, %xmm5 vpinsrb $0x6, (%rdi,%r8), %xmm5, %xmm5 vpinsrb $0x7, (%rdi,%rdx), %xmm5, %xmm5 movzbl (%r9), %ecx vmovd %ecx, %xmm6 vpinsrb $0x1, (%r9,%r15), %xmm6, %xmm6 vpinsrb $0x2, (%r9,%r15,2), %xmm6, %xmm6 vpinsrb $0x3, (%r9,%rbp), %xmm6, %xmm6 vpinsrb $0x4, (%r9,%r15,4), %xmm6, %xmm6 vpinsrb $0x5, (%r9,%rsi), %xmm6, %xmm6 vpinsrb $0x6, (%r9,%r8), %xmm6, %xmm6 vpinsrb $0x7, (%r9,%rdx), %xmm6, %xmm6 movzbl (%r13), %ecx vmovd %ecx, %xmm7 vpinsrb $0x1, (%r13,%r15), %xmm7, %xmm7 vpinsrb $0x2, (%r13,%r15,2), %xmm7, %xmm7 vpinsrb $0x3, (%r13,%rbp), %xmm7, %xmm7 vpinsrb $0x4, (%r13,%r15,4), %xmm7, %xmm7 vpinsrb $0x5, (%r13,%rsi), %xmm7, %xmm7 vpinsrb $0x6, (%r13,%r8), %xmm7, %xmm7 movq %r14, %r8 vpinsrb $0x7, (%r13,%rdx), %xmm7, %xmm7 movq %r11, %rdx vpmovsxbw %xmm4, %xmm4 vpmovsxbw %xmm5, %xmm5 vpmovsxbw %xmm6, %xmm6 vpmovsxbw %xmm7, %xmm7 vpmovsxbw (%r8,%r12,8), %xmm8 vpmaddwd %xmm4, %xmm8, %xmm4 vpaddd %xmm0, %xmm4, %xmm0 vpmaddwd %xmm5, %xmm8, %xmm4 vpaddd %xmm2, %xmm4, %xmm2 vpmaddwd %xmm6, %xmm8, %xmm4 vpaddd %xmm3, %xmm4, %xmm3 vpmaddwd %xmm7, %xmm8, %xmm4 vpaddd %xmm1, %xmm4, %xmm1 incq %r12 movq 0xc0(%rsp), %r13 cmpq %r12, %r13 jne 0xd9d40 addq 0x88(%rsp), %r8 movq 0x150(%rsp), %rcx leal 0x8(%rcx), %eax addl $0xf, %ecx cmpl 0x8(%rsp), %ecx movl %eax, %ecx jl 0xd9cf8 movl 0xd8(%rsp), %eax movl 0x50(%rsp), %edx movl 0xa8(%rsp), %r9d movl 0x1f0(%rsp), %r11d jmp 0xd9f20 xorl %eax, %eax vpxor %xmm2, %xmm2, %xmm2 vpxor %xmm3, %xmm3, %xmm3 vpxor %xmm1, %xmm1, %xmm1 vphaddd %xmm1, %xmm3, %xmm1 vphaddd %xmm2, %xmm0, %xmm0 vphaddd %xmm1, %xmm0, %xmm0 movl %eax, %ecx orl $0x1, %ecx cmpl 0x8(%rsp), %ecx jge 0xda0e7 movl %edx, %ebx movslq 0x11c(%rsp), %rdx movq 0xf0(%rsp), %r14 movq 0x100(%rsp), %rcx movq 0x130(%rsp), %rsi imulq %rcx, %rsi movq %rsi, 0x150(%rsp) movq 0x28(%rsp), %r10 imull %r10d, %r11d movslq %r11d, %rdi imulq %rcx, %rdx imulq %rdx, %rdi movl 0x188(%rsp), %ecx movl %r9d, %ebp movq 0x58(%rsp), %r9 imull %r9d, %ecx movslq %ecx, %rcx addq %r14, %rcx addq %rdi, %rcx movq %rcx, 0x20(%rsp) movl 0x198(%rsp), %ecx imull %r10d, %ecx movslq %ecx, %rcx imulq %rdx, %rcx movl 0x140(%rsp), %edi imull %r9d, %edi movslq %edi, %rsi addq %r14, %rsi addq %rcx, %rsi movl 0x168(%rsp), %ecx imull %r10d, %ecx movslq %ecx, %rcx imulq %rdx, %rcx movl %ebp, %edi imull %r9d, %edi movslq %edi, %r11 addq %r14, %r11 addq %rcx, %r11 movl 0xe8(%rsp), %ecx imull %r10d, %ecx movslq %ecx, %rcx imulq %rdx, %rcx movl %ebx, %edx imull %r9d, %edx movslq %edx, %r12 addq %r14, %r12 addq %rcx, %r12 movl %eax, %eax cmpl $0x0, 0x40(%rsp) jle 0xda0d4 movq 0x150(%rsp), %rdx imulq %rax, %rdx movq %r13, %rbp movq 0x20(%rsp), %rcx leaq (%rcx,%rdx), %r13 leaq (%rsi,%rdx), %r10 leaq (%r11,%rdx), %rbx addq %r12, %rdx xorl %r14d, %r14d movq (%rsp), %rcx movslq (%rcx,%r14,4), %rcx leaq (%rcx,%r13), %rdi movsbl (%r15,%rdi), %edi movsbl (%r13,%rcx), %r9d vmovd %r9d, %xmm1 vpinsrw $0x1, %edi, %xmm1, %xmm1 movsbl (%r10,%rcx), %edi vpinsrw $0x2, %edi, %xmm1, %xmm1 leaq (%r10,%rcx), %rdi movsbl (%r15,%rdi), %edi vpinsrw $0x3, %edi, %xmm1, %xmm1 movsbl (%rbx,%rcx), %edi vpinsrw $0x4, %edi, %xmm1, %xmm1 leaq (%rbx,%rcx), %rdi movsbl (%r15,%rdi), %edi vpinsrw $0x5, %edi, %xmm1, %xmm1 movsbl (%rdx,%rcx), %edi vpinsrw $0x6, %edi, %xmm1, %xmm1 addq %rdx, %rcx movsbl (%r15,%rcx), %ecx vpinsrw $0x7, %ecx, %xmm1, %xmm1 movzwl (%r8,%r14,2), %ecx vmovd %ecx, %xmm2 vpmovsxbw %xmm2, %xmm2 vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0] vpmaddwd %xmm2, %xmm1, %xmm1 vpaddd %xmm0, %xmm1, %xmm0 incq %r14 cmpq %r14, %rbp jne 0xda03c addq 0xb0(%rsp), %r8 movq %rbp, %r13 addq $0x2, %rax movl %eax, %ecx orl $0x1, %ecx cmpl 0x8(%rsp), %ecx jl 0xda00b cmpl 0x8(%rsp), %eax jge 0xda266 movslq 0x11c(%rsp), %r12 movq 0xf0(%rsp), %r10 movq 0x100(%rsp), %rcx movq 0x130(%rsp), %rdx imulq %rcx, %rdx movq %rdx, 0x150(%rsp) movq 0x28(%rsp), %r9 movl 0x1f0(%rsp), %edx imull %r9d, %edx movslq %edx, %rdx imulq %rcx, %r12 imulq %r12, %rdx movq 0x58(%rsp), %rdi movl 0x188(%rsp), %ecx imull %edi, %ecx movslq %ecx, %rcx addq %r10, %rcx addq %rdx, %rcx movq %rcx, 0x20(%rsp) movl 0x198(%rsp), %ecx imull %r9d, %ecx movslq %ecx, %rcx imulq %r12, %rcx movl 0x140(%rsp), %edx imull %edi, %edx movslq %edx, %rsi addq %r10, %rsi addq %rcx, %rsi movl 0x168(%rsp), %ecx imull %r9d, %ecx movslq %ecx, %rcx imulq %r12, %rcx movl 0xa8(%rsp), %edx imull %edi, %edx movslq %edx, %r14 addq %r10, %r14 addq %rcx, %r14 movl 0xe8(%rsp), %ecx imull %r9d, %ecx movslq %ecx, %rcx imulq %r12, %rcx movl 0x50(%rsp), %edx imull %edi, %edx movslq %edx, %r12 addq %r10, %r12 addq %rcx, %r12 movl %eax, %eax cmpl $0x0, 0x40(%rsp) jle 0xda259 movq %r13, %rbp movq 0x150(%rsp), %r13 imulq %rax, %r13 movq 0x20(%rsp), %rcx leaq (%rcx,%r13), %rdx leaq (%rsi,%r13), %r10 leaq (%r14,%r13), %r9 addq %r12, %r13 xorl %ebx, %ebx movq (%rsp), %rcx movslq (%rcx,%rbx,4), %rcx movsbl (%r8,%rbx), %edi vmovd %edi, %xmm1 vpshuflw $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0,4,5,6,7] vpshufd $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0] movsbl (%rdx,%rcx), %edi movsbl (%r10,%rcx), %r11d movzwl %di, %edi vmovd %edi, %xmm2 vpinsrw $0x1, %r11d, %xmm2, %xmm2 movsbl (%r9,%rcx), %edi vpinsrw $0x2, %edi, %xmm2, %xmm2 movsbl (%r13,%rcx), %ecx vpinsrw $0x3, %ecx, %xmm2, %xmm2 vpmullw %xmm1, %xmm2, %xmm3 vpmulhw %xmm1, %xmm2, %xmm1 vpunpcklwd %xmm1, %xmm3, %xmm1 # xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] vpaddd %xmm1, %xmm0, %xmm0 incq %rbx cmpq %rbx, %rbp jne 0xda1f3 addq 0x38(%rsp), %r8 movq %rbp, %r13 incq %rax cmpl %eax, 0x8(%rsp) jg 0xda1c3 movq 0xb8(%rsp), %rax vmovdqa %xmm0, (%rax) addq $0x10, %rax movq %rax, 0xb8(%rsp) movq 0xe0(%rsp), %rax leal 0x4(%rax), %ecx addl $0x7, %eax cmpl 0x98(%rsp), %eax movl %ecx, %edi jl 0xd9b72 jmp 0xda29f xorl %ecx, %ecx movl %ecx, %r8d orl $0x1, %r8d cmpl 0x98(%rsp), %r8d jge 0xda782 movq 0x190(%rsp), %rdi leal 0x3(%rdi), %eax testl %edi, %edi cmovnsl %edi, %eax movq %rcx, %r9 movl %eax, %ecx sarl $0x2, %ecx andl $-0x4, %eax movl %edi, %edx subl %eax, %edx movl %edx, %eax shrb $0x7, %al addb %dl, %al sarb %al movsbl %al, %r10d movl %edi, %edx shrl $0x1f, %edx addl %edi, %edx andl $-0x2, %edx subl %edx, %edi addl %ecx, %edi movq %r9, %rax addl %r10d, %edi movslq %edi, %rdx movq 0xd0(%rsp), %rcx imulq 0x318(%rcx), %rdx imulq 0x2e8(%rcx), %rdx addq 0x2d8(%rcx), %rdx movq %rdx, 0x140(%rsp) movq 0xf0(%rsp), %rcx movq %rcx, 0x50(%rsp) movq 0x100(%rsp), %rcx movq 0x130(%rsp), %rdx movq %rcx, 0xa8(%rsp) imulq %rcx, %rdx movq %rdx, 0x150(%rsp) movq %rax, 0x70(%rsp) cltd movl 0x170(%rsp), %ecx idivl %ecx movl %edx, %r9d movl %eax, %edi movl %r8d, %eax cltd idivl %ecx movl %eax, %r10d cmpl $0x8, 0x8(%rsp) movl %eax, 0x20(%rsp) movl %edx, 0x90(%rsp) movl %r9d, 0xe8(%rsp) movl %edi, 0xe0(%rsp) jl 0xda535 movl %edx, %esi movl 0xdc(%rsp), %edx imull %edx, %esi imull %edx, %r9d movslq 0x11c(%rsp), %rdx movq 0x28(%rsp), %r8 imull %r8d, %edi movslq %edi, %rdi imulq 0xa8(%rsp), %rdx imulq %rdx, %rdi movslq %r9d, %r11 movq 0x50(%rsp), %r9 addq %r9, %r11 addq %rdi, %r11 imull %r8d, %r10d movslq %r10d, %rcx imulq %rdx, %rcx movslq %esi, %rbp addq %r9, %rbp addq %rcx, %rbp xorl %r10d, %r10d vpxor %xmm0, %xmm0, %xmm0 vpxor %xmm1, %xmm1, %xmm1 movq 0x140(%rsp), %rsi movl %r10d, %eax cltd idivl 0x10(%rsp) cmpl $0x0, 0x40(%rsp) jle 0xda4ff cltq imulq 0x150(%rsp), %rax leaq (%r11,%rax), %rdx addq %rbp, %rax xorl %r14d, %r14d movq (%rsp), %rcx movslq (%rcx,%r14,4), %rdi leaq (%rdx,%rdi), %rcx addq %rax, %rdi cmpl $0x8, 0x10(%rsp) jne 0xda442 vmovq (%rcx), %xmm2 vmovq (%rdi), %xmm3 jmp 0xda4cb movzbl (%rcx), %r9d vmovd %r9d, %xmm2 vpinsrb $0x1, (%rcx,%r15), %xmm2, %xmm2 vpinsrb $0x2, (%rcx,%r15,2), %xmm2, %xmm2 movq 0x30(%rsp), %r9 vpinsrb $0x3, (%rcx,%r9), %xmm2, %xmm2 vpinsrb $0x4, (%rcx,%r15,4), %xmm2, %xmm2 movq 0xa0(%rsp), %r8 vpinsrb $0x5, (%rcx,%r8), %xmm2, %xmm2 movq 0x48(%rsp), %rbx vpinsrb $0x6, (%rcx,%rbx), %xmm2, %xmm2 movq 0x18(%rsp), %r12 vpinsrb $0x7, (%rcx,%r12), %xmm2, %xmm2 movzbl (%rdi), %ecx vmovd %ecx, %xmm3 vpinsrb $0x1, (%rdi,%r15), %xmm3, %xmm3 vpinsrb $0x2, (%rdi,%r15,2), %xmm3, %xmm3 vpinsrb $0x3, (%rdi,%r9), %xmm3, %xmm3 vpinsrb $0x4, (%rdi,%r15,4), %xmm3, %xmm3 vpinsrb $0x5, (%rdi,%r8), %xmm3, %xmm3 vpinsrb $0x6, (%rdi,%rbx), %xmm3, %xmm3 vpinsrb $0x7, (%rdi,%r12), %xmm3, %xmm3 vpmovsxbw %xmm2, %xmm2 vpmovsxbw %xmm3, %xmm3 vpmovsxbw (%rsi,%r14,8), %xmm4 vpmaddwd %xmm4, %xmm2, %xmm2 vpaddd %xmm1, %xmm2, %xmm1 vpmaddwd %xmm4, %xmm3, %xmm2 vpaddd %xmm0, %xmm2, %xmm0 incq %r14 cmpq %r14, %r13 jne 0xda41f addq 0x80(%rsp), %rsi leal 0x8(%r10), %eax addl $0xf, %r10d cmpl 0x8(%rsp), %r10d movl %eax, %r10d jl 0xda3f7 movl 0xd8(%rsp), %eax movl %eax, %r14d movl 0xe8(%rsp), %r9d movl 0x20(%rsp), %r10d movl 0xe0(%rsp), %edi jmp 0xda548 xorl %r14d, %r14d vpxor %xmm1, %xmm1, %xmm1 movq 0x140(%rsp), %rsi vpxor %xmm0, %xmm0, %xmm0 vphaddd %xmm1, %xmm1, %xmm1 vphaddd %xmm1, %xmm1, %xmm1 vphaddd %xmm0, %xmm0, %xmm0 vmovd %xmm1, %r8d vphaddd %xmm0, %xmm0, %xmm0 vmovd %xmm0, %edx movl %r14d, %ecx orl $0x1, %ecx cmpl 0x8(%rsp), %ecx jge 0xda672 movslq 0x11c(%rsp), %rcx movq 0x28(%rsp), %rbx imull %ebx, %edi movslq %edi, %rdi imulq 0xa8(%rsp), %rcx imulq %rcx, %rdi movq 0x58(%rsp), %r11 imull %r11d, %r9d movslq %r9d, %rax movq 0x50(%rsp), %r9 addq %r9, %rax addq %rdi, %rax movq %rax, 0x60(%rsp) movl %r10d, %edi imull %ebx, %edi movslq %edi, %rdi imulq %rcx, %rdi movl 0x90(%rsp), %ecx imull %r11d, %ecx movslq %ecx, %rax addq %r9, %rax addq %rdi, %rax movq %rax, 0x68(%rsp) movl %r14d, %r14d cmpl $0x0, 0x40(%rsp) jle 0xda65e movq %r14, %rax imulq 0x150(%rsp), %r14 movq %r13, %r10 movq 0x60(%rsp), %rcx leaq (%rcx,%r14), %r13 addq 0x68(%rsp), %r14 xorl %ebx, %ebx movq (%rsp), %r12 movslq (%r12,%rbx,4), %rcx leaq (%rcx,%r13), %rdi leaq (%r14,%rcx), %r9 movsbl (%r13,%rcx), %r11d movsbl (%rsi,%rbx,2), %ebp imull %ebp, %r11d addl %r8d, %r11d movsbl (%r15,%rdi), %r8d movsbl 0x1(%rsi,%rbx,2), %edi imull %edi, %r8d addl %r11d, %r8d movsbl (%r14,%rcx), %ecx imull %ebp, %ecx addl %edx, %ecx movsbl (%r15,%r9), %edx imull %edi, %edx addl %ecx, %edx incq %rbx cmpq %rbx, %r10 jne 0xda604 addq 0x78(%rsp), %rsi movq %r10, %r13 movl 0x20(%rsp), %r10d movq %rax, %r14 addq $0x2, %r14 movl %r14d, %ecx orl $0x1, %ecx cmpl 0x8(%rsp), %ecx jl 0xda5da cmpl 0x8(%rsp), %r14d jge 0xda747 movslq 0x11c(%rsp), %rcx movq 0x28(%rsp), %r11 movl 0xe0(%rsp), %edi imull %r11d, %edi movslq %edi, %rdi imulq 0xa8(%rsp), %rcx imulq %rcx, %rdi movq 0x58(%rsp), %r9 movl %r10d, %eax movl 0xe8(%rsp), %r10d imull %r9d, %r10d movslq %r10d, %rbx movq 0x50(%rsp), %r10 addq %r10, %rbx addq %rdi, %rbx imull %r11d, %eax movslq %eax, %rdi imulq %rcx, %rdi movl 0x90(%rsp), %ecx imull %r9d, %ecx movslq %ecx, %r12 addq %r10, %r12 addq %rdi, %r12 movl %r14d, %r10d cmpl $0x0, 0x40(%rsp) jle 0xda73d movq %r10, %r9 imulq 0x150(%rsp), %r9 leaq (%rbx,%r9), %r14 addq %r12, %r9 movq %r13, %rbp xorl %r13d, %r13d movq (%rsp), %rax movslq (%rax,%r13,4), %rcx movsbl (%r14,%rcx), %edi movsbl (%rsi,%r13), %r11d imull %r11d, %edi addl %edi, %r8d movsbl (%r9,%rcx), %ecx imull %r11d, %ecx addl %ecx, %edx incq %r13 cmpq %r13, %rbp jne 0xda709 addq 0x38(%rsp), %rsi movq %rbp, %r13 incq %r10 cmpl %r10d, 0x8(%rsp) jg 0xda6e9 movq 0xb8(%rsp), %rcx movl %r8d, (%rcx) movl %edx, 0x4(%rcx) addq $0x8, %rcx movq %rcx, 0xb8(%rsp) movq 0x70(%rsp), %rax leal 0x2(%rax), %r10d addl $0x3, %eax movl %eax, %r8d cmpl 0x98(%rsp), %eax movl %r10d, %eax jl 0xda34f jmp 0xda785 movl %ecx, %r10d cmpl 0x98(%rsp), %r10d jge 0xdaa9b movq 0x190(%rsp), %rdi leal 0x3(%rdi), %eax testl %edi, %edi cmovnsl %edi, %eax movl %eax, %ecx sarl $0x2, %ecx andl $-0x4, %eax movl %edi, %edx subl %eax, %edx movl %edx, %eax shrb $0x7, %al addb %dl, %al sarb %al movsbl %al, %eax movl %edi, %edx shrl $0x1f, %edx addl %edi, %edx andl $-0x2, %edx subl %edx, %edi addl %ecx, %edi addl %eax, %edi movslq %edi, %rcx movq 0xd0(%rsp), %rax imulq 0x318(%rax), %rcx imulq 0x2e8(%rax), %rcx addq 0x2d8(%rax), %rcx movq %rcx, 0x60(%rsp) movq 0xf0(%rsp), %rax movq %rax, 0x20(%rsp) movq 0x100(%rsp), %r12 movq 0x130(%rsp), %r14 imulq %r12, %r14 movl %r10d, %eax cltd idivl 0x170(%rsp) movl %eax, %edi cmpl $0x8, 0x8(%rsp) movl %edx, 0x150(%rsp) jl 0xda944 movl %edx, %eax imull 0xdc(%rsp), %eax movslq 0x11c(%rsp), %rcx movl %edi, %ebp movl %edi, %edx imull 0x28(%rsp), %edx movslq %edx, %rdx movq %r12, %r9 imulq %r12, %rcx imulq %rdx, %rcx movslq %eax, %r12 addq 0x20(%rsp), %r12 addq %rcx, %r12 xorl %r8d, %r8d vpxor %xmm0, %xmm0, %xmm0 movq 0x60(%rsp), %rbx movl %r8d, %eax cltd idivl 0x10(%rsp) cmpl $0x0, 0x40(%rsp) jle 0xda91e cltq imulq %r14, %rax addq %r12, %rax xorl %edx, %edx movq (%rsp), %rcx movslq (%rcx,%rdx,4), %rcx addq %rax, %rcx cmpl $0x8, 0x10(%rsp) jne 0xda8a8 vmovq (%rcx), %xmm1 jmp 0xda8f7 movzbl (%rcx), %edi vmovd %edi, %xmm1 vpinsrb $0x1, (%rcx,%r15), %xmm1, %xmm1 vpinsrb $0x2, (%rcx,%r15,2), %xmm1, %xmm1 movq 0x30(%rsp), %rdi vpinsrb $0x3, (%rcx,%rdi), %xmm1, %xmm1 vpinsrb $0x4, (%rcx,%r15,4), %xmm1, %xmm1 movq 0xa0(%rsp), %rsi vpinsrb $0x5, (%rcx,%rsi), %xmm1, %xmm1 movq 0x48(%rsp), %rdi vpinsrb $0x6, (%rcx,%rdi), %xmm1, %xmm1 movq 0x18(%rsp), %rdi vpinsrb $0x7, (%rcx,%rdi), %xmm1, %xmm1 vpmovsxbw %xmm1, %xmm1 vpmovsxbw (%rbx,%rdx,8), %xmm2 vpmaddwd %xmm2, %xmm1, %xmm1 vpaddd %xmm0, %xmm1, %xmm0 incq %rdx cmpq %rdx, %r13 jne 0xda890 addq 0x80(%rsp), %rbx leal 0x8(%r8), %eax addl $0xf, %r8d cmpl 0x8(%rsp), %r8d movl %eax, %r8d jl 0xda872 movl 0xd8(%rsp), %eax movl %eax, %edx movq %r9, %r12 movl %ebp, %edi jmp 0xda94f xorl %edx, %edx vpxor %xmm0, %xmm0, %xmm0 movq 0x60(%rsp), %rbx vphaddd %xmm0, %xmm0, %xmm0 vphaddd %xmm0, %xmm0, %xmm0 vmovd %xmm0, %eax movl %edx, %ecx orl $0x1, %ecx cmpl 0x8(%rsp), %ecx jge 0xda9f9 movslq 0x11c(%rsp), %rcx movl %edi, %esi imull 0x28(%rsp), %edi movslq %edi, %rdi imulq %r12, %rcx imulq %rdi, %rcx movl 0x150(%rsp), %edi imull 0x58(%rsp), %edi movslq %edi, %r8 addq 0x20(%rsp), %r8 addq %rcx, %r8 movl %edx, %edx cmpl $0x0, 0x40(%rsp) jle 0xda9e8 movq %rdx, %r9 imulq %r14, %r9 addq %r8, %r9 xorl %edi, %edi movq (%rsp), %rcx movslq (%rcx,%rdi,4), %rcx leaq (%r9,%rcx), %r11 movsbl (%r9,%rcx), %ecx movsbl (%rbx,%rdi,2), %ebp imull %ecx, %ebp addl %eax, %ebp movsbl (%r15,%r11), %ecx movsbl 0x1(%rbx,%rdi,2), %eax imull %ecx, %eax addl %ebp, %eax incq %rdi cmpq %rdi, %r13 jne 0xda9b2 addq 0x78(%rsp), %rbx addq $0x2, %rdx movl %edx, %ecx orl $0x1, %ecx cmpl 0x8(%rsp), %ecx jl 0xda99f jmp 0xda9fb movl %edi, %esi cmpl 0x8(%rsp), %edx jge 0xdaa74 movslq 0x11c(%rsp), %rcx imull 0x28(%rsp), %esi movslq %esi, %rdi imulq %r12, %rdi imulq %rcx, %rdi movl 0x150(%rsp), %ecx imull 0x58(%rsp), %ecx movslq %ecx, %r8 addq 0x20(%rsp), %r8 addq %rdi, %r8 movl %edx, %edx cmpl $0x0, 0x40(%rsp) jle 0xdaa6b movq %rdx, %rdi imulq %r14, %rdi addq %r8, %rdi xorl %r9d, %r9d movq (%rsp), %rcx movslq (%rcx,%r9,4), %rcx movsbl (%rdi,%rcx), %ecx movsbl (%rbx,%r9), %r11d imull %ecx, %r11d addl %r11d, %eax incq %r9 cmpq %r9, %r13 jne 0xdaa46 addq 0x38(%rsp), %rbx incq %rdx cmpl %edx, 0x8(%rsp) jg 0xdaa32 movq 0xb8(%rsp), %rcx movl %eax, (%rcx) addq $0x4, %rcx movq %rcx, 0xb8(%rsp) incl %r10d cmpl 0x98(%rsp), %r10d jne 0xda813 movq 0x190(%rsp), %rax incq %rax cmpq 0x280(%rsp), %rax jne 0xd9af7 movq 0x200(%rsp), %rdi testq %rdi, %rdi je 0xd6d3b movq 0x210(%rsp), %rsi subq %rdi, %rsi callq 0x24330 jmp 0xd6d3b vmovq 0x1cc(%rsp), %xmm0 vpcmpeqd %xmm1, %xmm1, %xmm1 vpsubd %xmm1, %xmm0, %xmm0 vpsrld $0x1f, %xmm0, %xmm1 vpaddd %xmm1, %xmm0, %xmm0 vpsrad $0x1, %xmm0, %xmm0 vpextrd $0x1, %xmm0, %r14d vmovd %xmm0, %eax movl 0x1b8(%rsp), %r13d imull 0x1d8(%rsp), %r13d imull %eax, %r14d movl 0x108(%rsp), %ebx imull 0x128(%rsp), %ebx subq $0x8, %rsp leaq 0x3d0(%rsp), %rcx leaq 0x18c(%rsp), %r8 leaq 0x154(%rsp), %r9 movl %r13d, %edi movl %r14d, %esi movl %ebx, %edx pushq %r15 callq 0xf4741 addq $0x10, %rsp movl 0x3c8(%rsp), %ecx leal (%rcx,%r13), %eax decl %eax cltd idivl %ecx movl %eax, 0x170(%rsp) movl 0x184(%rsp), %ecx leal (%r14,%rcx), %eax decl %eax cltd idivl %ecx movl %eax, %r12d movl 0x14c(%rsp), %esi leal (%rbx,%rsi), %eax decl %eax cltd idivl %esi movl %eax, %r8d movq %rsi, 0xc0(%rsp) movl %esi, %ebp movq %rcx, 0x10(%rsp) imull %ecx, %ebp movq 0x178(%rsp), %rax movq 0x10(%rax), %rax leaq 0x290(%rsp), %rdi movq $0x0, 0x40(%rdi) vpxor %xmm0, %xmm0, %xmm0 vmovdqa %xmm0, (%rdi) vmovdqu %xmm0, 0xc(%rdi) vmovdqa %xmm0, 0x20(%rdi) vmovdqu %xmm0, 0x2c(%rdi) subq $0x8, %rsp movl $0x2, %r9d movl %ebp, %esi movl $0x10, %edx movl %r8d, 0x8(%rsp) movl %r8d, %ecx movl %r12d, %r8d pushq %rax callq 0x2b208 addq $0x10, %rsp movq %rbx, 0x70(%rsp) movl $0xffffff9c, %ebx # imm = 0xFFFFFF9C cmpq $0x0, 0x290(%rsp) je 0xdc15a movslq 0x2c8(%rsp), %rax imulq 0x2d0(%rsp), %rax testq %rax, %rax je 0xdc15a imull (%rsp), %r12d shll $0x4, %ebp movq %r15, %rcx cmpl $0x2, %ecx jl 0xdaeb8 cmpl %ecx, %r12d jge 0xdaeb8 movq 0x178(%rsp), %rax movq 0x10(%rax), %rcx leaq 0x200(%rsp), %rdi movq $0x0, 0x40(%rdi) vpxor %xmm0, %xmm0, %xmm0 vmovdqa %xmm0, (%rdi) vmovdqu %xmm0, 0xc(%rdi) vmovdqa %xmm0, 0x20(%rdi) vmovdqu %xmm0, 0x2c(%rdi) movl $0x2, %edx movl %ebp, %esi callq 0x2aeaa movq %r14, 0xe0(%rsp) cmpq $0x0, 0x200(%rsp) je 0xdb619 movslq 0x238(%rsp), %rax imulq 0x240(%rsp), %rax testq %rax, %rax je 0xdb619 movq %r13, 0xa8(%rsp) testl %r12d, %r12d movl %r12d, 0x30(%rsp) jle 0xdae14 xorl %ebx, %ebx movl %ebx, %eax cltd idivl (%rsp) movl %edx, %r13d movl %eax, %r12d movl %eax, %edx movq 0x10(%rsp), %rcx imull %ecx, %edx movl %r13d, %r8d movq 0xc0(%rsp), %rsi imull %esi, %r8d movq 0xe0(%rsp), %rax movl %eax, %r14d subl %edx, %r14d cmpl %r14d, %ecx cmovll %ecx, %r14d movq 0x70(%rsp), %rax movl %eax, %ebp subl %r8d, %ebp cmpl %ebp, %esi cmovll %esi, %ebp leaq 0xf0(%rsp), %rdi leaq 0x200(%rsp), %rsi movl %r14d, %ecx movl %ebp, %r9d callq 0xf7557 movslq 0x2bc(%rsp), %rax movslq 0x2c0(%rsp), %rcx movslq %r12d, %rdx imulq 0x2d0(%rsp), %rdx movq 0x2a0(%rsp), %rsi imulq %rsi, %rdx addq 0x290(%rsp), %rdx movl 0x2a8(%rsp), %edi movq 0x2b0(%rsp), %r8 movq %rcx, %r9 imulq %rax, %r9 imulq %rsi, %r13 imulq %r9, %r13 addq %rdx, %r13 movq %r13, 0x2e0(%rsp) movq $0x0, 0x2e8(%rsp) movq %rsi, 0x2f0(%rsp) movl %edi, 0x2f8(%rsp) movq %r8, 0x300(%rsp) movl $0x2, 0x308(%rsp) movl %eax, 0x30c(%rsp) movl %ecx, 0x310(%rsp) movabsq $0x100000001, %rax # imm = 0x100000001 movq %rax, 0x314(%rsp) movq %r9, 0x320(%rsp) leaq 0x200(%rsp), %rdi leaq 0x2e0(%rsp), %rsi movl $0x10, %edx movl %r14d, %ecx movl %ebp, %r8d callq 0xf600e incl %ebx cmpl %ebx, 0x30(%rsp) jne 0xdacd5 movq 0x208(%rsp), %rax testq %rax, %rax je 0xdb683 lock decl (%rax) jne 0xdb683 movq 0x200(%rsp), %rsi movq 0x220(%rsp), %rdi testq %rdi, %rdi je 0xdb66f movq (%rdi), %rax callq *0x18(%rax) jmp 0xdb683 testq %rsi, %rsi je 0xdae62 movq %rsi, %rdi vzeroupper callq 0x244a0 movq 0x208(%rsp), %rax testq %rax, %rax je 0xd6d33 lock decl (%rax) jne 0xd6d33 movq 0x200(%rsp), %rsi movq 0x220(%rsp), %rdi testq %rdi, %rdi je 0xdae9f movq (%rdi), %rax vzeroupper callq *0x18(%rax) jmp 0xd6d33 testq %rsi, %rsi je 0xd6d33 movq %rsi, %rdi vzeroupper callq 0x244a0 jmp 0xd6d33 movq 0x178(%rsp), %rax movq 0x10(%rax), %r9 leaq 0x200(%rsp), %rdi movq $0x0, 0x40(%rdi) vpxor %xmm0, %xmm0, %xmm0 vmovdqa %xmm0, (%rdi) vmovdqu %xmm0, 0xc(%rdi) vmovdqa %xmm0, 0x20(%rdi) vmovdqu %xmm0, 0x2c(%rdi) movl $0x2, %r8d movl %ebp, %esi movl $0x1, %edx callq 0x2b0d2 cmpq $0x0, 0x200(%rsp) je 0xdb5df movslq 0x238(%rsp), %rax imulq 0x240(%rsp), %rax testq %rax, %rax je 0xdb5df movq %r13, 0xa8(%rsp) movq %r14, 0xe0(%rsp) movl %r12d, 0x30(%rsp) testl %r12d, %r12d jle 0xdb16e xorl %eax, %eax movq %rax, 0x48(%rsp) cltd idivl (%rsp) movl %edx, %ebx movl %eax, %ebp movl %eax, %r12d movq 0x10(%rsp), %rcx imull %ecx, %ebp movl %edx, 0x18(%rsp) movq 0xc0(%rsp), %rdx imull %edx, %ebx movq 0xe0(%rsp), %rax movl %eax, %r13d subl %ebp, %r13d cmpl %r13d, %ecx cmovll %ecx, %r13d movq 0x70(%rsp), %rax movl %eax, %r14d subl %ebx, %r14d cmpl %r14d, %edx cmovll %edx, %r14d callq 0x3cbe9 movslq 0x22c(%rsp), %rdx movslq 0x230(%rsp), %rcx movl 0x234(%rsp), %esi cltq imulq 0x240(%rsp), %rax movq 0x210(%rsp), %rdi imulq %rdi, %rax addq 0x200(%rsp), %rax movl 0x218(%rsp), %r8d movq 0x220(%rsp), %r9 movq %rax, 0x2e0(%rsp) movq $0x0, 0x2e8(%rsp) movq %rdi, 0x2f0(%rsp) movl %r8d, 0x2f8(%rsp) movq %r9, 0x300(%rsp) movl %edx, 0x30c(%rsp) movl %ecx, 0x310(%rsp) movl $0x1, 0x314(%rsp) movl %esi, 0x318(%rsp) imulq %rdx, %rcx movq %rdi, %rax imulq %rcx, %rax addq $0xf, %rax andq $-0x10, %rax xorl %edx, %edx divq %rdi movq %rax, 0x320(%rsp) movl 0x228(%rsp), %eax leal -0x1(%rax), %edx movl %edx, 0x308(%rsp) cmpl $0x4, %eax jne 0xdb069 movq %rcx, 0x320(%rsp) leaq 0xf0(%rsp), %rdi leaq 0x2e0(%rsp), %rsi movl %ebp, %edx movl %r13d, %ecx movl %ebx, %r8d movl %r14d, %r9d callq 0xf7557 movslq 0x2bc(%rsp), %rax movslq 0x2c0(%rsp), %rcx movslq %r12d, %rdx imulq 0x2d0(%rsp), %rdx movq 0x2a0(%rsp), %rsi imulq %rsi, %rdx addq 0x290(%rsp), %rdx movl 0x2a8(%rsp), %edi movq 0x2b0(%rsp), %r8 movq %rcx, %r9 imulq %rax, %r9 movl 0x18(%rsp), %r10d imulq %rsi, %r10 imulq %r9, %r10 addq %rdx, %r10 movq %r10, 0x358(%rsp) movq $0x0, 0x360(%rsp) movq %rsi, 0x368(%rsp) movl %edi, 0x370(%rsp) movq %r8, 0x378(%rsp) movl $0x2, 0x380(%rsp) movl %eax, 0x384(%rsp) movl %ecx, 0x388(%rsp) movabsq $0x100000001, %rax # imm = 0x100000001 movq %rax, 0x38c(%rsp) movq %r9, 0x398(%rsp) leaq 0x2e0(%rsp), %rdi leaq 0x358(%rsp), %rsi movl $0x10, %edx movl %r13d, %ecx movl %r14d, %r8d callq 0xf600e movq 0x48(%rsp), %rax incl %eax cmpl %eax, 0x30(%rsp) jne 0xdaf46 movq 0x208(%rsp), %rax testq %rax, %rax je 0xdb683 lock decl (%rax) jne 0xdb683 movq 0x200(%rsp), %rsi movq 0x220(%rsp), %rdi testq %rdi, %rdi je 0xdb676 movq (%rdi), %rax callq *0x18(%rax) jmp 0xdb683 testq %rsi, %rsi je 0xdb1b9 movq %rsi, %rdi callq 0x244a0 movq 0x208(%rsp), %rax vmovaps 0x200(%rsp), %xmm0 vmovaps %xmm0, 0x1a0(%rsp) movq 0x210(%rsp), %rcx movq %rcx, 0x1b0(%rsp) movl 0x218(%rsp), %ecx movl %ecx, 0x1b8(%rsp) movq 0x220(%rsp), %rcx movq %rcx, 0x1c0(%rsp) vmovdqu 0x228(%rsp), %xmm0 vmovdqu %xmm0, 0x1c8(%rsp) movl 0x238(%rsp), %ecx movl %ecx, 0x1d8(%rsp) movq 0x240(%rsp), %rcx movq %rcx, 0x1e0(%rsp) testq %rax, %rax je 0xdb32f lock decl (%rax) jne 0xdb32f movq 0x200(%rsp), %rsi movq 0x220(%rsp), %rdi testq %rdi, %rdi je 0xdb322 movq (%rdi), %rax callq *0x18(%rax) jmp 0xdb32f testq %rsi, %rsi je 0xdb274 movq %rsi, %rdi callq 0x244a0 movq 0x208(%rsp), %rax vmovaps 0x200(%rsp), %xmm0 vmovaps %xmm0, 0x1a0(%rsp) movq 0x210(%rsp), %rcx movq %rcx, 0x1b0(%rsp) movl 0x218(%rsp), %ecx movl %ecx, 0x1b8(%rsp) movq 0x220(%rsp), %rcx movq %rcx, 0x1c0(%rsp) vmovdqu 0x228(%rsp), %xmm0 vmovdqu %xmm0, 0x1c8(%rsp) movl 0x238(%rsp), %ecx movl %ecx, 0x1d8(%rsp) movq 0x240(%rsp), %rcx movq %rcx, 0x1e0(%rsp) testq %rax, %rax je 0xdb3e5 lock decl (%rax) jne 0xdb3e5 movq 0x200(%rsp), %rsi movq 0x220(%rsp), %rdi testq %rdi, %rdi je 0xdb3d8 movq (%rdi), %rax callq *0x18(%rax) jmp 0xdb3e5 testq %rsi, %rsi je 0xdb32f movq %rsi, %rdi callq 0x244a0 cmpl $0x4, 0x1b8(%rsp) jne 0xd6e3b testb $0x1, 0x1d8(%rsp) jne 0xd6e3b leaq 0x200(%rsp), %rsi movq $0x0, 0x40(%rsi) vpxor %xmm0, %xmm0, %xmm0 vmovdqa %xmm0, (%rsi) vmovdqu %xmm0, 0xc(%rsi) vmovdqa %xmm0, 0x20(%rsi) vmovdqu %xmm0, 0x2c(%rsi) leaq 0x1a0(%rsp), %rdi movl $0x8, %edx movq %rbx, %rcx vzeroupper callq 0x2c9a4 movq 0x208(%rsp), %rax testq %rax, %rax je 0xdb39a lock incl (%rax) movq 0x1a8(%rsp), %rax testq %rax, %rax je 0xdb51f lock decl (%rax) jne 0xdb51f movq 0x1a0(%rsp), %rsi movq 0x1c0(%rsp), %rdi testq %rdi, %rdi je 0xdb512 movq (%rdi), %rax callq *0x18(%rax) jmp 0xdb51f testq %rsi, %rsi je 0xdb3e5 movq %rsi, %rdi callq 0x244a0 leaq 0x448(%r15), %rdx leaq 0x1a8(%r15), %rcx leaq 0x1a0(%rsp), %rdi movq %r14, %rsi movq %rbx, %r8 vzeroupper callq 0x2cfbc movq 0xd0(%rsp), %rax movq 0x2c8(%rax), %rdi xorl %ebx, %ebx testq %rdi, %rdi je 0xdb435 movq (%rdi), %rax movq 0x338(%rsp), %rsi movq 0x178(%rsp), %rdx callq *0x48(%rax) movq 0x1a8(%rsp), %rax testq %rax, %rax je 0xdb477 lock decl (%rax) jne 0xdb477 movq 0x1a0(%rsp), %rsi movq 0x1c0(%rsp), %rdi testq %rdi, %rdi je 0xdb467 movq (%rdi), %rax vzeroupper callq *0x18(%rax) jmp 0xdb477 testq %rsi, %rsi je 0xdb477 movq %rsi, %rdi vzeroupper callq 0x244a0 movq 0xf8(%rsp), %rax testq %rax, %rax je 0xdb4b9 lock decl (%rax) jne 0xdb4b9 movq 0xf0(%rsp), %rsi movq 0x110(%rsp), %rdi testq %rdi, %rdi je 0xdb4a9 movq (%rdi), %rax vzeroupper callq *0x18(%rax) jmp 0xdb4b9 testq %rsi, %rsi je 0xdb4b9 movq %rsi, %rdi vzeroupper callq 0x244a0 movq 0x418(%rsp), %rax testq %rax, %rax je 0xdb4fb lock decl (%rax) jne 0xdb4fb movq 0x410(%rsp), %rsi movq 0x430(%rsp), %rdi testq %rdi, %rdi je 0xdb4eb movq (%rdi), %rax vzeroupper callq *0x18(%rax) jmp 0xdb4fb testq %rsi, %rsi je 0xdb4fb movq %rsi, %rdi vzeroupper callq 0x244a0 movl %ebx, %eax addq $0x488, %rsp # imm = 0x488 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq testq %rsi, %rsi je 0xdb51f movq %rsi, %rdi callq 0x244a0 movq 0x208(%rsp), %rax vmovaps 0x200(%rsp), %xmm0 vmovaps %xmm0, 0x1a0(%rsp) movq 0x210(%rsp), %rcx movq %rcx, 0x1b0(%rsp) movl 0x218(%rsp), %ecx movl %ecx, 0x1b8(%rsp) movq 0x220(%rsp), %rcx movq %rcx, 0x1c0(%rsp) vmovdqu 0x228(%rsp), %xmm0 vmovdqu %xmm0, 0x1c8(%rsp) movl 0x238(%rsp), %ecx movl %ecx, 0x1d8(%rsp) movq 0x240(%rsp), %rcx movq %rcx, 0x1e0(%rsp) testq %rax, %rax je 0xd6e3b lock decl (%rax) jne 0xd6e3b movq 0x200(%rsp), %rsi movq 0x220(%rsp), %rdi testq %rdi, %rdi je 0xdb5c9 movq (%rdi), %rax callq *0x18(%rax) jmp 0xd6e3b testq %rsi, %rsi je 0xd6e3b movq %rsi, %rdi callq 0x244a0 jmp 0xd6e3b movq 0x208(%rsp), %rax testq %rax, %rax je 0xdc15a lock decl (%rax) jne 0xdc15a movq 0x200(%rsp), %rsi movq 0x220(%rsp), %rdi testq %rdi, %rdi je 0xdb653 movq (%rdi), %rax callq *0x18(%rax) jmp 0xdc15a movq 0x208(%rsp), %rax testq %rax, %rax je 0xdc15a lock decl (%rax) jne 0xdc15a movq 0x200(%rsp), %rsi movq 0x220(%rsp), %rdi testq %rdi, %rdi je 0xdb661 movq (%rdi), %rax callq *0x18(%rax) jmp 0xdc15a testq %rsi, %rsi je 0xdc15a jmp 0xdc152 testq %rsi, %rsi je 0xdc15a jmp 0xdc152 testq %rsi, %rsi je 0xdb683 jmp 0xdb67b testq %rsi, %rsi je 0xdb683 movq %rsi, %rdi callq 0x244a0 movq 0x178(%rsp), %rax movl 0x184(%rsp), %ebp movl 0x3c8(%rsp), %esi imull %ebp, %esi shll $0x4, %esi movq 0x10(%rax), %r9 leaq 0x2e0(%rsp), %rdi movq $0x0, 0x40(%rdi) vpxor %xmm0, %xmm0, %xmm0 vmovdqa %xmm0, (%rdi) vmovdqu %xmm0, 0xc(%rdi) vmovdqa %xmm0, 0x20(%rdi) vmovdqu %xmm0, 0x2c(%rdi) movl $0x4, %r8d movl $0x1, %edx movl %r15d, %ecx callq 0x2b0d2 movl $0xffffff9c, %ebx # imm = 0xFFFFFF9C cmpq $0x0, 0x2e0(%rsp) je 0xdc11e movslq 0x318(%rsp), %rax imulq 0x320(%rsp), %rax testq %rax, %rax je 0xdc11e cmpl $0x0, 0x170(%rsp) jle 0xdc11c xorl %r15d, %r15d movq 0xe0(%rsp), %r14 movq 0x70(%rsp), %r12 movq %r14, 0xe0(%rsp) movl 0x3c8(%rsp), %ebx callq 0x3cbe9 movq 0xa8(%rsp), %rcx movl %ebx, %edx movl %r15d, 0x50(%rsp) imull %r15d, %edx subl %edx, %ecx cmpl %ecx, %ebx cmovll %ebx, %ecx testl %r14d, %r14d jle 0xdc106 cltq imulq 0x320(%rsp), %rax imulq 0x2f0(%rsp), %rax addq 0x2e0(%rsp), %rax movq %rax, 0x30(%rsp) movl %ecx, %eax movq %rax, 0x60(%rsp) movl %ecx, 0x68(%rsp) movslq %ecx, %rax movq %rax, 0x58(%rsp) movq %rdx, 0xb8(%rsp) movslq %edx, %rax movq %rax, 0x90(%rsp) xorl %eax, %eax movl %r14d, %r15d movq %rax, 0x18(%rsp) subl %eax, %r15d cmpl %r15d, %ebp cmovll %ebp, %r15d testl %r12d, %r12d movq 0xd0(%rsp), %r14 jle 0xdb9b9 movl 0x14c(%rsp), %ebx movq 0xb8(%rsp), %rax cltd idivl 0x3c8(%rsp) cltq movq %rax, (%rsp) movq 0x18(%rsp), %rax cltd idivl %ebp cltq movq %rax, 0x10(%rsp) xorl %r10d, %r10d movl %r12d, %r11d movl %ebx, %ebp subl %r11d, %ebp cmovll %ebx, %r11d movslq 0x394(%r14), %rcx movslq 0x398(%r14), %rsi movq 0x3a8(%r14), %rdi imulq (%rsp), %rdi movq 0x378(%r14), %r8 imulq %r8, %rdi addq 0x368(%r14), %rdi movl 0x380(%r14), %r9d movq 0x388(%r14), %r12 movq %rsi, %r13 imulq %rcx, %r13 movl %r10d, %eax cltd idivl %ebx cltq movq %r8, %rdx imulq %r13, %rdx imulq %rax, %rdx addq %rdi, %rdx movq %rdx, 0x200(%rsp) xorl %edx, %edx movq %rdx, 0x208(%rsp) movq %r8, 0x210(%rsp) movl %r9d, 0x218(%rsp) movq %r12, 0x220(%rsp) movl $0x2, %r9d movl %r9d, 0x228(%rsp) movl %ecx, 0x22c(%rsp) movl %esi, 0x230(%rsp) movabsq $0x100000001, %r12 # imm = 0x100000001 movq %r12, 0x234(%rsp) movq %r13, 0x240(%rsp) movq 0x70(%rsp), %r13 movslq 0x2bc(%rsp), %rcx movslq 0x2c0(%rsp), %rdx movq 0x2a0(%rsp), %rsi movq %rdx, %rdi imulq %rcx, %rdi movq %rdi, %r8 imulq %rsi, %r8 imulq %rax, %r8 movq 0x2d0(%rsp), %rax imulq 0x10(%rsp), %rax imulq %rsi, %rax addq 0x290(%rsp), %rax addq %rax, %r8 movq %r8, 0x358(%rsp) xorl %eax, %eax movq %rax, 0x360(%rsp) movq %rsi, 0x368(%rsp) movl 0x2a8(%rsp), %eax movl %eax, 0x370(%rsp) movq 0x2b0(%rsp), %rax movq %rax, 0x378(%rsp) movl %r9d, 0x380(%rsp) movl %ecx, 0x384(%rsp) negl %ebp movl %edx, 0x388(%rsp) movq %r12, 0x38c(%rsp) movq %rdi, 0x398(%rsp) leal (%r10,%rbx), %r12d xorl %eax, %eax cmpl %r13d, %r12d setge %al subq $0x8, %rsp leaq 0x208(%rsp), %rdi leaq 0x360(%rsp), %rsi movq 0x38(%rsp), %rdx movl $0x10, %ecx movl 0x70(%rsp), %r8d movl %r15d, %r9d pushq %rax pushq %r11 pushq %r10 callq 0xf6615 addq $0x20, %rsp movl %ebp, %r11d movl %r12d, %r10d cmpl %r13d, %r12d jl 0xdb7fe movslq 0x1cc(%rsp), %rdx movl 0x1b8(%rsp), %r11d movl 0x1d0(%rsp), %eax movl %eax, 0x10(%rsp) movl 0x1e0(%rsp), %esi imull %r11d, %esi leal 0x1(%rdx), %eax shrl $0x1f, %eax movq %rdx, (%rsp) addl %edx, %eax incl %eax sarl %eax movl %eax, 0xa0(%rsp) movl %r15d, %eax shll $0x4, %eax movl %eax, 0x88(%rsp) xorl %r14d, %r14d movl 0x68(%rsp), %ecx cmpl $0x4, %ecx movq %r15, %rdx movq %r15, 0x20(%rsp) jl 0xdbca5 leal (,%rdx,4), %eax movslq %eax, %r10 leal (,%rdx,8), %eax movslq %eax, %r8 leal (%r10,%r10,2), %eax movslq %eax, %rbx movslq 0x88(%rsp), %r13 movl %esi, 0x28(%rsp) movslq %esi, %rcx leal (%rcx,%rcx), %eax cltq movq %rax, 0xc0(%rsp) movq %rcx, %rsi leal (%rcx,%rcx,2), %eax cltq movq %rax, 0x48(%rsp) movl %r11d, %eax imull (%rsp), %eax movslq %eax, %r15 movl %edx, %eax movq %rax, 0x150(%rsp) movl %edx, %eax shll $0x6, %eax movl %eax, 0x78(%rsp) shlq $0x2, %r13 movq $0x0, 0x38(%rsp) movq $0x0, 0x80(%rsp) testl %edx, %edx jle 0xdbc67 movslq 0x38(%rsp), %rax movq 0x30(%rsp), %rcx leaq (%rcx,%rax,4), %rdi movq 0xb8(%rsp), %rax movq 0x80(%rsp), %rcx addl %ecx, %eax cltd idivl %r11d cltq movq %rax, 0x40(%rsp) xorl %ebp, %ebp movl $0x40, %eax movq %rdi, 0x8(%rsp) movq %rdi, %rdx vmovdqa (%rdx,%r10,4), %xmm0 vmovdqa (%rdx,%r8,4), %xmm1 vpaddd (%rdx), %xmm0, %xmm2 vpaddd %xmm1, %xmm2, %xmm2 vpsubd %xmm1, %xmm0, %xmm0 vpaddd (%rdx,%rbx,4), %xmm0, %xmm0 vmovdqa %xmm2, 0x1c0(%rsp,%rax) vmovdqa %xmm0, 0x200(%rsp,%rax) addq $0x10, %rax addq %r13, %rdx cmpq $0x80, %rax jne 0xdbad7 movq 0x18(%rsp), %rax addl %ebp, %eax cltd idivl 0xa0(%rsp) movq 0x1e0(%rsp), %rcx imulq 0x40(%rsp), %rcx movq 0x1b0(%rsp), %rdi imulq %rdi, %rcx addq 0x1a0(%rsp), %rcx movslq 0x1cc(%rsp), %r9 addl %eax, %eax movslq %eax, %r14 imulq %rdi, %r9 imulq %r14, %r9 addq %rcx, %r9 leal (%rdx,%rdx), %ecx imull %r11d, %ecx movslq %ecx, %rcx leaq (%r9,%rcx,4), %r9 leal 0x1(,%rdx,2), %edx xorl %r12d, %r12d movb $0x1, %r14b leaq 0x200(%rsp), %rdi movl 0x10(%rsp), %ecx orl %eax, %r12d cmpl %ecx, %r12d jge 0xdbc2f vmovdqu 0x10(%rdi), %xmm0 vpaddd (%rdi), %xmm0, %xmm1 vmovdqu 0x20(%rdi), %xmm2 vpsubd %xmm2, %xmm0, %xmm0 vpaddd 0x30(%rdi), %xmm0, %xmm0 vpaddd %xmm2, %xmm1, %xmm1 vpsrad $0x2, %xmm1, %xmm1 vpsrad $0x2, %xmm0, %xmm0 cmpl $0x1, %r11d je 0xdbbd4 cmpl $0x4, %r11d jne 0xdbc2b vmovdqa %xmm1, (%r9) cmpl (%rsp), %edx jge 0xdbc2b vmovdqa %xmm0, 0x10(%r9) jmp 0xdbc2b vmovd %xmm1, (%r9) vpextrd $0x1, %xmm1, (%r9,%rsi,4) movq 0xc0(%rsp), %rdi vpextrd $0x2, %xmm1, (%r9,%rdi,4) movq 0x48(%rsp), %rdi vpextrd $0x3, %xmm1, (%r9,%rdi,4) cmpl (%rsp), %edx jge 0xdbc2b vmovd %xmm0, 0x4(%r9) vpextrd $0x1, %xmm0, 0x4(%r9,%rsi,4) movq 0xc0(%rsp), %rdi vpextrd $0x2, %xmm0, 0x4(%r9,%rdi,4) movq 0x48(%rsp), %rdi vpextrd $0x3, %xmm0, 0x4(%r9,%rdi,4) leaq (%r9,%r15,4), %r9 movl $0x1, %r12d leaq 0x240(%rsp), %rdi testb $0x1, %r14b movl $0x0, %r14d jne 0xdbb85 incq %rbp movq 0x8(%rsp), %rdi addq $0x10, %rdi cmpq 0x150(%rsp), %rbp jne 0xdbaca movq 0x80(%rsp), %rcx leaq 0x4(%rcx), %r14 addq $0x7, %rcx movq 0x38(%rsp), %rax addl 0x78(%rsp), %eax movq %rax, 0x38(%rsp) cmpq 0x60(%rsp), %rcx movq %r14, 0x80(%rsp) movq 0x20(%rsp), %rdx jb 0xdba95 movl 0x68(%rsp), %ecx movl 0x28(%rsp), %esi movl %r14d, %eax orl $0x1, %eax cmpl %ecx, %eax jge 0xdbf1e leal (%rdx,%rdx), %eax movslq %eax, %rcx leal (,%rdx,4), %eax movslq %eax, %rdi leal (%rcx,%rcx,2), %eax movslq %eax, %r9 leal (,%rdx,8), %eax movslq %eax, %r10 movq 0x1a0(%rsp), %rax movq %rax, 0x98(%rsp) movq 0x1b0(%rsp), %rax movq 0x1e0(%rsp), %r8 movq %rax, 0x38(%rsp) imulq %rax, %r8 movq %r8, 0xe8(%rsp) movslq %esi, %rax movq %rax, 0xc0(%rsp) movl %r14d, %eax movq %rax, 0x28(%rsp) movl %edx, %eax movq %rax, 0x80(%rsp) imull %edx, %r14d shll $0x4, %r14d movl %edx, %eax shll $0x5, %eax movl %eax, 0xb0(%rsp) shlq $0x2, %r10 movq 0x30(%rsp), %rsi movq %rcx, 0x150(%rsp) movq %r14, 0x78(%rsp) testl %edx, %edx jle 0xdbee8 movslq 0x78(%rsp), %rax leaq (%rsi,%rax,4), %r8 movq 0x90(%rsp), %rax movq 0x28(%rsp), %rdx addq %rdx, %rax imulq 0xe8(%rsp), %rax addq 0x98(%rsp), %rax movq %rax, 0x40(%rsp) xorl %r11d, %r11d movl $0x4, %eax movq %r8, 0x48(%rsp) movq %r8, %rdx leaq 0x200(%rsp), %r13 vmovq (%rdx), %xmm0 vmovq (%rdx,%rcx,4), %xmm1 vpaddd %xmm0, %xmm1, %xmm0 vmovq (%rdx,%rdi,4), %xmm2 vpaddd %xmm2, %xmm0, %xmm0 vmovq %xmm0, 0x1e0(%rsp,%rax,8) vpsubd %xmm2, %xmm1, %xmm0 vmovq (%rdx,%r9,4), %xmm1 vpaddd %xmm1, %xmm0, %xmm0 vmovq %xmm0, 0x200(%rsp,%rax,8) incq %rax addq %r10, %rdx cmpq $0x8, %rax jne 0xdbd9a movq 0x18(%rsp), %rax movq %r11, 0x8(%rsp) addl %r11d, %eax cltd idivl 0xa0(%rsp) movslq 0x1cc(%rsp), %rcx addl %eax, %eax movslq %eax, %r11 imulq 0x38(%rsp), %rcx imulq %r11, %rcx addq 0x40(%rsp), %rcx leal (%rdx,%rdx), %r11d movslq %r11d, %r11 leaq (%rcx,%r11,4), %r11 leal 0x1(,%rdx,2), %edx xorl %ebp, %ebp movb $0x1, %bl movl 0x10(%rsp), %ecx orl %eax, %ebp cmpl %ecx, %ebp jge 0xdbea6 movl 0x8(%r13), %r12d movl 0xc(%r13), %ebp movl (%r13), %r8d addl %r12d, %r8d movl 0x10(%r13), %r14d addl %r14d, %r8d movl 0x4(%r13), %esi addl %ebp, %esi movl 0x14(%r13), %ecx addl %ecx, %esi sarl $0x2, %r8d sarl $0x2, %esi movl 0x18(%r13), %r15d movl 0x1c(%r13), %r13d movl %r8d, (%r11) movq 0xc0(%rsp), %r8 movl %esi, (%r11,%r8,4) cmpl (%rsp), %edx jge 0xdbe95 subl %ecx, %ebp addl %r13d, %ebp sarl $0x2, %ebp subl %r14d, %r12d addl %r15d, %r12d sarl $0x2, %r12d movl %r12d, 0x4(%r11) movq 0xc0(%rsp), %rcx movl %ebp, 0x4(%r11,%rcx,4) movq (%rsp), %rcx leaq (%r11,%rcx,4), %r11 movq 0x30(%rsp), %rsi movl 0x10(%rsp), %ecx movl $0x1, %ebp leaq 0x220(%rsp), %r13 testb $0x1, %bl movl $0x0, %ebx jne 0xdbe27 movq 0x8(%rsp), %r11 incq %r11 movq 0x48(%rsp), %r8 addq $0x8, %r8 cmpq 0x80(%rsp), %r11 movq 0x150(%rsp), %rcx jne 0xdbd85 movq 0x28(%rsp), %rdx leaq 0x2(%rdx), %rax addq $0x3, %rdx movq 0x78(%rsp), %r14 addl 0xb0(%rsp), %r14d cmpq 0x58(%rsp), %rdx movq %rax, 0x28(%rsp) movq 0x20(%rsp), %rdx jl 0xdbd46 movl %eax, %r14d movl 0x68(%rsp), %ecx cmpl %ecx, %r14d jge 0xdc0e2 movslq %edx, %rdi leal (%rdx,%rdx), %eax movslq %eax, %r8 leal (%rdx,%rdx,2), %eax movslq %eax, %r9 leal (,%rdx,4), %eax movslq %eax, %r10 movq 0x1a0(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x1b0(%rsp), %rax movq 0x1e0(%rsp), %rcx movq %rax, 0xc0(%rsp) imulq %rax, %rcx movq %rcx, 0x150(%rsp) movl %r14d, %eax movq %rax, 0x8(%rsp) movl %edx, %eax movq %rax, 0x48(%rsp) imull %edx, %r14d shll $0x4, %r14d shlq $0x2, %r10 movq %r14, 0x78(%rsp) testl %edx, %edx jle 0xdc0b8 movslq 0x78(%rsp), %rax movq 0x30(%rsp), %rcx leaq (%rcx,%rax,4), %r12 movq 0x90(%rsp), %rax movq 0x8(%rsp), %rcx leaq (%rcx,%rax), %rbp imulq 0x150(%rsp), %rbp addq 0x40(%rsp), %rbp xorl %r11d, %r11d movl $0x4, %eax movq %r12, %rdx movl (%rdx,%rdi,4), %ecx movl (%rdx), %esi addl %ecx, %esi movl (%rdx,%r8,4), %ebx addl %ebx, %esi movl %esi, 0x1f0(%rsp,%rax,4) subl %ebx, %ecx addl (%rdx,%r9,4), %ecx movl %ecx, 0x200(%rsp,%rax,4) incq %rax addq %r10, %rdx cmpq $0x8, %rax jne 0xdbfd1 movq 0x18(%rsp), %rax addl %r11d, %eax cltd idivl 0xa0(%rsp) movslq 0x1cc(%rsp), %rcx addl %eax, %eax movslq %eax, %rsi imulq 0xc0(%rsp), %rcx imulq %rsi, %rcx addq %rbp, %rcx leal (%rdx,%rdx), %esi movslq %esi, %rsi leaq (%rcx,%rsi,4), %r14 leal 0x1(,%rdx,2), %edx xorl %r13d, %r13d movb $0x1, %r15b leaq 0x200(%rsp), %rbx movl 0x10(%rsp), %ecx orl %eax, %r13d cmpl %ecx, %r13d jge 0xdc08c movl 0x4(%rbx), %r13d movl 0x8(%rbx), %ecx movl (%rbx), %esi addl %r13d, %esi addl %ecx, %esi sarl $0x2, %esi movl 0xc(%rbx), %ebx movl %esi, (%r14) cmpl (%rsp), %edx jge 0xdc080 subl %ecx, %r13d addl %ebx, %r13d sarl $0x2, %r13d movl %r13d, 0x4(%r14) movq (%rsp), %rcx leaq (%r14,%rcx,4), %r14 movl 0x10(%rsp), %ecx movl $0x1, %r13d leaq 0x210(%rsp), %rbx testb $0x1, %r15b movl $0x0, %r15d jne 0xdc04e incq %r11 addq $0x4, %r12 cmpq 0x48(%rsp), %r11 jne 0xdbfc9 movq 0x8(%rsp), %rcx incq %rcx movq 0x78(%rsp), %r14 addl 0x88(%rsp), %r14d movq %rcx, 0x8(%rsp) cmpq 0x60(%rsp), %rcx movq 0x20(%rsp), %rdx jne 0xdbf8c movl 0x184(%rsp), %ebp movq 0x18(%rsp), %rax addl %ebp, %eax movq 0xe0(%rsp), %r14 cmpl %r14d, %eax movq 0x70(%rsp), %r12 jl 0xdb7a9 movl 0x50(%rsp), %r15d incl %r15d cmpl 0x170(%rsp), %r15d jne 0xdb731 xorl %ebx, %ebx movq 0x2e8(%rsp), %rax testq %rax, %rax je 0xdc15a lock decl (%rax) jne 0xdc15a movq 0x2e0(%rsp), %rsi movq 0x300(%rsp), %rdi testq %rdi, %rdi je 0xdc14d movq (%rdi), %rax callq *0x18(%rax) jmp 0xdc15a testq %rsi, %rsi je 0xdc15a movq %rsi, %rdi callq 0x244a0 movq 0x298(%rsp), %rax testq %rax, %rax je 0xd6d33 lock decl (%rax) jne 0xd6d33 movq 0x290(%rsp), %rsi movq 0x2b0(%rsp), %rdi testq %rdi, %rdi je 0xdc194 movq (%rdi), %rax callq *0x18(%rax) jmp 0xd6d33 testq %rsi, %rsi je 0xd6d33 jmp 0xdaea8 jmp 0xdc656 jmp 0xdc656 jmp 0xdc656 jmp 0xdc656 jmp 0xdc656 jmp 0xdc1c4 jmp 0xdc656 jmp 0xdc1c4 movq %rax, %rbx jmp 0xdc2bf jmp 0xdc656 jmp 0xdc656 jmp 0xdc656 jmp 0xdc656 movq %rax, %rbx movq 0x2e8(%rsp), %rax testq %rax, %rax je 0xdc2bf lock decl (%rax) jne 0xdc2bf movq 0x2e0(%rsp), %rsi movq 0x300(%rsp), %rdi testq %rdi, %rdi jne 0xdc220 testq %rsi, %rsi je 0xdc2bf jmp 0xdc2af movq (%rdi), %rax callq *0x18(%rax) jmp 0xdc2bf jmp 0xdc656 movq %rax, %rbx movq 0x208(%rsp), %rax testq %rax, %rax je 0xdc58c lock decl (%rax) jne 0xdc58c movq 0x200(%rsp), %rsi movq 0x220(%rsp), %rdi testq %rdi, %rdi jne 0xdc270 testq %rsi, %rsi je 0xdc58c jmp 0xdc57c movq (%rdi), %rax callq *0x18(%rax) jmp 0xdc58c jmp 0xdc656 movq %rax, %rbx movq 0x208(%rsp), %rax testq %rax, %rax je 0xdc2bf lock decl (%rax) jne 0xdc2bf movq 0x200(%rsp), %rsi movq 0x220(%rsp), %rdi testq %rdi, %rdi jne 0xdc2b9 testq %rsi, %rsi je 0xdc2bf movq %rsi, %rdi callq 0x244a0 jmp 0xdc2bf movq (%rdi), %rax callq *0x18(%rax) movq 0x298(%rsp), %rax testq %rax, %rax je 0xdc58c lock decl (%rax) jne 0xdc58c movq 0x290(%rsp), %rsi movq 0x2b0(%rsp), %rdi testq %rdi, %rdi jne 0xdc2fc testq %rsi, %rsi je 0xdc58c jmp 0xdc57c movq (%rdi), %rax callq *0x18(%rax) jmp 0xdc58c jmp 0xdc656 jmp 0xdc656 movq %rax, %rbx movq 0x208(%rsp), %rax testq %rax, %rax je 0xdc58c lock decl (%rax) jne 0xdc58c movq 0x200(%rsp), %rsi movq 0x220(%rsp), %rdi testq %rdi, %rdi jne 0xdc351 testq %rsi, %rsi je 0xdc58c jmp 0xdc57c movq (%rdi), %rax callq *0x18(%rax) jmp 0xdc58c jmp 0xdc656 movq %rax, %rbx movq 0x208(%rsp), %rax testq %rax, %rax je 0xdc58c lock decl (%rax) jne 0xdc58c movq 0x200(%rsp), %rsi movq 0x220(%rsp), %rdi testq %rdi, %rdi jne 0xdc3a1 testq %rsi, %rsi je 0xdc58c jmp 0xdc57c movq (%rdi), %rax callq *0x18(%rax) jmp 0xdc58c jmp 0xdc656 jmp 0xdc656 jmp 0xdc656 movq %rax, %rbx jmp 0xdc550 jmp 0xdc656 jmp 0xdc656 movq %rax, %rbx jmp 0xdc514 jmp 0xdc3d7 movq %rax, %rbx jmp 0xdc58c jmp 0xdc656 jmp 0xdc656 movq %rax, %rbx movq 0x298(%rsp), %rax testq %rax, %rax je 0xdc550 lock decl (%rax) jne 0xdc550 movq 0x290(%rsp), %rsi movq 0x2b0(%rsp), %rdi testq %rdi, %rdi jne 0xdc429 testq %rsi, %rsi je 0xdc550 jmp 0xdc540 movq (%rdi), %rax callq *0x18(%rax) jmp 0xdc550 jmp 0xdc656 jmp 0xdc443 movq %rax, %rbx jmp 0xdc604 movq %rax, %rbx jmp 0xdc5c8 movq %rax, %rbx jmp 0xdc4d8 jmp 0xdc656 jmp 0xdc656 movq %rax, %rbx movq 0x3d0(%rsp), %rax testq %rax, %rax je 0xdc49c lock decl (%rax) jne 0xdc49c movq 0x3c8(%rsp), %rsi movq 0x3e8(%rsp), %rdi testq %rdi, %rdi jne 0xdc496 testq %rsi, %rsi je 0xdc49c movq %rsi, %rdi callq 0x244a0 jmp 0xdc49c movq (%rdi), %rax callq *0x18(%rax) movq 0x360(%rsp), %rax testq %rax, %rax je 0xdc4d8 lock decl (%rax) jne 0xdc4d8 movq 0x358(%rsp), %rsi movq 0x378(%rsp), %rdi testq %rdi, %rdi jne 0xdc4d2 testq %rsi, %rsi je 0xdc4d8 movq %rsi, %rdi callq 0x244a0 jmp 0xdc4d8 movq (%rdi), %rax callq *0x18(%rax) movq 0x2e8(%rsp), %rax testq %rax, %rax je 0xdc514 lock decl (%rax) jne 0xdc514 movq 0x2e0(%rsp), %rsi movq 0x300(%rsp), %rdi testq %rdi, %rdi jne 0xdc50e testq %rsi, %rsi je 0xdc514 movq %rsi, %rdi callq 0x244a0 jmp 0xdc514 movq (%rdi), %rax callq *0x18(%rax) movq 0x298(%rsp), %rax testq %rax, %rax je 0xdc550 lock decl (%rax) jne 0xdc550 movq 0x290(%rsp), %rsi movq 0x2b0(%rsp), %rdi testq %rdi, %rdi jne 0xdc54a testq %rsi, %rsi je 0xdc550 movq %rsi, %rdi callq 0x244a0 jmp 0xdc550 movq (%rdi), %rax callq *0x18(%rax) movq 0x208(%rsp), %rax testq %rax, %rax je 0xdc58c lock decl (%rax) jne 0xdc58c movq 0x200(%rsp), %rsi movq 0x220(%rsp), %rdi testq %rdi, %rdi jne 0xdc586 testq %rsi, %rsi je 0xdc58c movq %rsi, %rdi callq 0x244a0 jmp 0xdc58c movq (%rdi), %rax callq *0x18(%rax) movq 0x1a8(%rsp), %rax testq %rax, %rax je 0xdc5c8 lock decl (%rax) jne 0xdc5c8 movq 0x1a0(%rsp), %rsi movq 0x1c0(%rsp), %rdi testq %rdi, %rdi jne 0xdc5c2 testq %rsi, %rsi je 0xdc5c8 movq %rsi, %rdi callq 0x244a0 jmp 0xdc5c8 movq (%rdi), %rax callq *0x18(%rax) movq 0xf8(%rsp), %rax testq %rax, %rax je 0xdc604 lock decl (%rax) jne 0xdc604 movq 0xf0(%rsp), %rsi movq 0x110(%rsp), %rdi testq %rdi, %rdi jne 0xdc5fe testq %rsi, %rsi je 0xdc604 movq %rsi, %rdi callq 0x244a0 jmp 0xdc604 movq (%rdi), %rax callq *0x18(%rax) movq 0x418(%rsp), %rax testq %rax, %rax je 0xdc640 lock decl (%rax) jne 0xdc640 movq 0x410(%rsp), %rsi movq 0x430(%rsp), %rdi testq %rdi, %rdi jne 0xdc63a testq %rsi, %rsi je 0xdc640 movq %rsi, %rdi callq 0x244a0 jmp 0xdc640 movq (%rdi), %rax callq *0x18(%rax) movq %rbx, %rdi callq 0x243e0 jmp 0xdc656 jmp 0xdc656 jmp 0xdc656 jmp 0xdc656 jmp 0xdc656 jmp 0xdc656 jmp 0xdc656 movq %rax, %rdi callq 0x2953f
/Tencent[P]ncnn/build_O3/src/layer/x86/convolution_x86_fma.cpp
ncnn::transpose_pack_B_tile_int8(ncnn::Mat const&, ncnn::Mat&, int, int, int, int)
static void transpose_pack_B_tile_int8(const Mat& B, Mat& BT, int batch, int max_jj, int max_kk, int nT) { #pragma omp parallel for num_threads(nT) for (int b = 0; b < batch; b++) { short* pp = BT.row<short>(b); int jj = 0; #if __SSE2__ #if defined(__x86_64__) || defined(_M_X64) #if __AVX512F__ for (; jj + 15 < max_jj; jj += 16) { const short* p0 = B; int kk = 0; p0 += (b * max_jj + jj) * 16; for (; kk + 15 < max_kk; kk += 16) { __m512i _r0 = _mm512_loadu_si512((const __m512i*)p0); __m512i _r1 = _mm512_loadu_si512((const __m512i*)(p0 + 32)); __m512i _r2 = _mm512_loadu_si512((const __m512i*)(p0 + 64)); __m512i _r3 = _mm512_loadu_si512((const __m512i*)(p0 + 96)); __m512i _r4 = _mm512_loadu_si512((const __m512i*)(p0 + 128)); __m512i _r5 = _mm512_loadu_si512((const __m512i*)(p0 + 160)); __m512i _r6 = _mm512_loadu_si512((const __m512i*)(p0 + 192)); __m512i _r7 = _mm512_loadu_si512((const __m512i*)(p0 + 224)); __m512i _tmp0 = _mm512_shuffle_i32x4(_r0, _r2, _MM_SHUFFLE(1, 0, 1, 0)); __m512i _tmp1 = _mm512_shuffle_i32x4(_r0, _r2, _MM_SHUFFLE(3, 2, 3, 2)); __m512i _tmp2 = _mm512_shuffle_i32x4(_r1, _r3, _MM_SHUFFLE(1, 0, 1, 0)); __m512i _tmp3 = _mm512_shuffle_i32x4(_r1, _r3, _MM_SHUFFLE(3, 2, 3, 2)); __m512i _tmp4 = _mm512_shuffle_i32x4(_r4, _r6, _MM_SHUFFLE(1, 0, 1, 0)); __m512i _tmp5 = _mm512_shuffle_i32x4(_r4, _r6, _MM_SHUFFLE(3, 2, 3, 2)); __m512i _tmp6 = _mm512_shuffle_i32x4(_r5, _r7, _MM_SHUFFLE(1, 0, 1, 0)); __m512i _tmp7 = _mm512_shuffle_i32x4(_r5, _r7, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm512_unpacklo_epi32(_tmp0, _tmp1); _r1 = _mm512_unpackhi_epi32(_tmp0, _tmp1); _r2 = _mm512_unpacklo_epi32(_tmp2, _tmp3); _r3 = _mm512_unpackhi_epi32(_tmp2, _tmp3); _r4 = _mm512_unpacklo_epi32(_tmp4, _tmp5); _r5 = _mm512_unpackhi_epi32(_tmp4, _tmp5); _r6 = _mm512_unpacklo_epi32(_tmp6, _tmp7); _r7 = _mm512_unpackhi_epi32(_tmp6, _tmp7); _tmp0 = _mm512_unpacklo_epi64(_r0, _r2); _tmp1 = _mm512_unpackhi_epi64(_r0, _r2); _tmp2 = _mm512_unpacklo_epi64(_r1, _r3); _tmp3 = _mm512_unpackhi_epi64(_r1, _r3); _tmp4 = _mm512_unpacklo_epi64(_r4, _r6); _tmp5 = _mm512_unpackhi_epi64(_r4, _r6); _tmp6 = _mm512_unpacklo_epi64(_r5, _r7); _tmp7 = _mm512_unpackhi_epi64(_r5, _r7); _r0 = _mm512_shuffle_i32x4(_tmp0, _tmp4, _MM_SHUFFLE(2, 0, 2, 0)); _r1 = _mm512_shuffle_i32x4(_tmp1, _tmp5, _MM_SHUFFLE(2, 0, 2, 0)); _r2 = _mm512_shuffle_i32x4(_tmp2, _tmp6, _MM_SHUFFLE(2, 0, 2, 0)); _r3 = _mm512_shuffle_i32x4(_tmp3, _tmp7, _MM_SHUFFLE(2, 0, 2, 0)); _r4 = _mm512_shuffle_i32x4(_tmp0, _tmp4, _MM_SHUFFLE(3, 1, 3, 1)); _r5 = _mm512_shuffle_i32x4(_tmp1, _tmp5, _MM_SHUFFLE(3, 1, 3, 1)); _r6 = _mm512_shuffle_i32x4(_tmp2, _tmp6, _MM_SHUFFLE(3, 1, 3, 1)); _r7 = _mm512_shuffle_i32x4(_tmp3, _tmp7, _MM_SHUFFLE(3, 1, 3, 1)); _mm512_storeu_si512((__m512i*)pp, _r0); _mm512_storeu_si512((__m512i*)(pp + 32), _r1); _mm512_storeu_si512((__m512i*)(pp + 64), _r2); _mm512_storeu_si512((__m512i*)(pp + 96), _r3); _mm512_storeu_si512((__m512i*)(pp + 128), _r4); _mm512_storeu_si512((__m512i*)(pp + 160), _r5); _mm512_storeu_si512((__m512i*)(pp + 192), _r6); _mm512_storeu_si512((__m512i*)(pp + 224), _r7); p0 += max_jj * batch * 16; pp += 256; } p0 -= (b * max_jj + jj) * 16; p0 += (b * max_jj + jj) * 8; for (; kk + 7 < max_kk; kk += 8) { __m512i _r0 = _mm512_loadu_si512((const __m512i*)p0); __m512i _r1 = _mm512_loadu_si512((const __m512i*)(p0 + 32)); __m512i _r2 = _mm512_loadu_si512((const __m512i*)(p0 + 64)); __m512i _r3 = _mm512_loadu_si512((const __m512i*)(p0 + 96)); __m512i _tmp0 = _mm512_shuffle_i32x4(_r0, _r1, _MM_SHUFFLE(2, 0, 2, 0)); __m512i _tmp1 = _mm512_shuffle_i32x4(_r0, _r1, _MM_SHUFFLE(3, 1, 3, 1)); __m512i _tmp2 = _mm512_shuffle_i32x4(_r2, _r3, _MM_SHUFFLE(2, 0, 2, 0)); __m512i _tmp3 = _mm512_shuffle_i32x4(_r2, _r3, _MM_SHUFFLE(3, 1, 3, 1)); _r0 = _mm512_unpacklo_epi32(_tmp0, _tmp1); _r1 = _mm512_unpackhi_epi32(_tmp0, _tmp1); _r2 = _mm512_unpacklo_epi32(_tmp2, _tmp3); _r3 = _mm512_unpackhi_epi32(_tmp2, _tmp3); _tmp0 = _mm512_permutex_epi64(_r0, _MM_SHUFFLE(3, 1, 2, 0)); _tmp1 = _mm512_permutex_epi64(_r1, _MM_SHUFFLE(3, 1, 2, 0)); _tmp2 = _mm512_permutex_epi64(_r2, _MM_SHUFFLE(3, 1, 2, 0)); _tmp3 = _mm512_permutex_epi64(_r3, _MM_SHUFFLE(3, 1, 2, 0)); _r0 = _mm512_shuffle_i32x4(_tmp0, _tmp2, _MM_SHUFFLE(2, 0, 2, 0)); _r1 = _mm512_shuffle_i32x4(_tmp0, _tmp2, _MM_SHUFFLE(3, 1, 3, 1)); _r2 = _mm512_shuffle_i32x4(_tmp1, _tmp3, _MM_SHUFFLE(2, 0, 2, 0)); _r3 = _mm512_shuffle_i32x4(_tmp1, _tmp3, _MM_SHUFFLE(3, 1, 3, 1)); _mm512_storeu_si512((__m512i*)pp, _r0); _mm512_storeu_si512((__m512i*)(pp + 32), _r1); _mm512_storeu_si512((__m512i*)(pp + 64), _r2); _mm512_storeu_si512((__m512i*)(pp + 96), _r3); p0 += max_jj * batch * 8; pp += 128; } p0 -= (b * max_jj + jj) * 8; p0 += (b * max_jj + jj) * 2; for (; kk + 1 < max_kk; kk += 2) { __m512i _r0 = _mm512_loadu_si512((const __m512i*)p0); _mm512_storeu_si512((__m512i*)pp, _r0); p0 += max_jj * batch * 2; pp += 32; } p0 -= (b * max_jj + jj) * 2; p0 += (b * max_jj + jj); for (; kk < max_kk; kk++) { __m256i _r0 = _mm256_loadu_si256((const __m256i*)p0); _mm256_store_si256((__m256i*)pp, _r0); p0 += max_jj * batch; pp += 16; } } #endif // __AVX512F__ for (; jj + 7 < max_jj; jj += 8) { const short* p0 = B; int kk = 0; #if __AVX512F__ p0 += (b * max_jj + jj) * 16; for (; kk + 15 < max_kk; kk += 16) { __m512i _r0 = _mm512_loadu_si512((const __m512i*)p0); __m512i _r1 = _mm512_loadu_si512((const __m512i*)(p0 + 32)); __m512i _r2 = _mm512_loadu_si512((const __m512i*)(p0 + 64)); __m512i _r3 = _mm512_loadu_si512((const __m512i*)(p0 + 96)); __m512i _tmp0 = _mm512_shuffle_i32x4(_r0, _r2, _MM_SHUFFLE(1, 0, 1, 0)); __m512i _tmp1 = _mm512_shuffle_i32x4(_r0, _r2, _MM_SHUFFLE(3, 2, 3, 2)); __m512i _tmp2 = _mm512_shuffle_i32x4(_r1, _r3, _MM_SHUFFLE(1, 0, 1, 0)); __m512i _tmp3 = _mm512_shuffle_i32x4(_r1, _r3, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm512_unpacklo_epi32(_tmp0, _tmp1); _r1 = _mm512_unpackhi_epi32(_tmp0, _tmp1); _r2 = _mm512_unpacklo_epi32(_tmp2, _tmp3); _r3 = _mm512_unpackhi_epi32(_tmp2, _tmp3); _tmp0 = _mm512_unpacklo_epi64(_r0, _r2); _tmp1 = _mm512_unpackhi_epi64(_r0, _r2); _tmp2 = _mm512_unpacklo_epi64(_r1, _r3); _tmp3 = _mm512_unpackhi_epi64(_r1, _r3); _r0 = _mm512_shuffle_i32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0)); _r1 = _mm512_shuffle_i32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0)); _r2 = _mm512_shuffle_i32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1)); _r3 = _mm512_shuffle_i32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1)); _mm512_storeu_si512((__m512i*)pp, _r0); _mm512_storeu_si512((__m512i*)(pp + 32), _r1); _mm512_storeu_si512((__m512i*)(pp + 64), _r2); _mm512_storeu_si512((__m512i*)(pp + 96), _r3); p0 += max_jj * batch * 16; pp += 128; } p0 -= (b * max_jj + jj) * 16; #endif // __AVX512F__ p0 += (b * max_jj + jj) * 8; for (; kk + 7 < max_kk; kk += 8) { #if __AVX__ __m256 _r0 = _mm256_loadu_ps((const float*)p0); __m256 _r1 = _mm256_loadu_ps((const float*)(p0 + 16)); __m256 _r2 = _mm256_loadu_ps((const float*)(p0 + 32)); __m256 _r3 = _mm256_loadu_ps((const float*)(p0 + 48)); __m256 _tmp0 = _mm256_permute2f128_ps(_r0, _r2, _MM_SHUFFLE(0, 2, 0, 0)); __m256 _tmp1 = _mm256_permute2f128_ps(_r0, _r2, _MM_SHUFFLE(0, 3, 0, 1)); __m256 _tmp2 = _mm256_permute2f128_ps(_r1, _r3, _MM_SHUFFLE(0, 2, 0, 0)); __m256 _tmp3 = _mm256_permute2f128_ps(_r1, _r3, _MM_SHUFFLE(0, 3, 0, 1)); _r0 = _mm256_unpacklo_ps(_tmp0, _tmp1); _r1 = _mm256_unpackhi_ps(_tmp0, _tmp1); _r2 = _mm256_unpacklo_ps(_tmp2, _tmp3); _r3 = _mm256_unpackhi_ps(_tmp2, _tmp3); _tmp0 = _mm256_castpd_ps(_mm256_unpacklo_pd(_mm256_castps_pd(_r0), _mm256_castps_pd(_r2))); _tmp1 = _mm256_castpd_ps(_mm256_unpackhi_pd(_mm256_castps_pd(_r0), _mm256_castps_pd(_r2))); _tmp2 = _mm256_castpd_ps(_mm256_unpacklo_pd(_mm256_castps_pd(_r1), _mm256_castps_pd(_r3))); _tmp3 = _mm256_castpd_ps(_mm256_unpackhi_pd(_mm256_castps_pd(_r1), _mm256_castps_pd(_r3))); _mm256_storeu_ps((float*)pp, _tmp0); _mm256_storeu_ps((float*)(pp + 16), _tmp1); _mm256_storeu_ps((float*)(pp + 32), _tmp2); _mm256_storeu_ps((float*)(pp + 48), _tmp3); #else __m128i _r0 = _mm_load_si128((const __m128i*)p0); __m128i _r1 = _mm_load_si128((const __m128i*)(p0 + 8)); __m128i _r2 = _mm_load_si128((const __m128i*)(p0 + 8 * 2)); __m128i _r3 = _mm_load_si128((const __m128i*)(p0 + 8 * 3)); __m128i _r4 = _mm_load_si128((const __m128i*)(p0 + 8 * 4)); __m128i _r5 = _mm_load_si128((const __m128i*)(p0 + 8 * 5)); __m128i _r6 = _mm_load_si128((const __m128i*)(p0 + 8 * 6)); __m128i _r7 = _mm_load_si128((const __m128i*)(p0 + 8 * 7)); transpose4x8_epi32(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7); _mm_store_si128((__m128i*)pp, _r0); _mm_store_si128((__m128i*)(pp + 8), _r1); _mm_store_si128((__m128i*)(pp + 8 * 2), _r2); _mm_store_si128((__m128i*)(pp + 8 * 3), _r3); _mm_store_si128((__m128i*)(pp + 8 * 4), _r4); _mm_store_si128((__m128i*)(pp + 8 * 5), _r5); _mm_store_si128((__m128i*)(pp + 8 * 6), _r6); _mm_store_si128((__m128i*)(pp + 8 * 7), _r7); #endif // __AVX__ p0 += max_jj * batch * 8; pp += 64; } p0 -= (b * max_jj + jj) * 8; p0 += (b * max_jj + jj) * 2; for (; kk + 1 < max_kk; kk += 2) { #if __AVX__ __m256 _r0 = _mm256_loadu_ps((const float*)p0); _mm256_storeu_ps((float*)pp, _r0); #else __m128i _r0 = _mm_loadu_si128((const __m128i*)p0); __m128i _r1 = _mm_loadu_si128((const __m128i*)(p0 + 8)); _mm_store_si128((__m128i*)pp, _r0); _mm_store_si128((__m128i*)(pp + 8), _r1); #endif // __AVX__ p0 += max_jj * batch * 2; pp += 16; } p0 -= (b * max_jj + jj) * 2; p0 += (b * max_jj + jj); for (; kk < max_kk; kk++) { __m128i _r0 = _mm_loadu_si128((const __m128i*)p0); _mm_store_si128((__m128i*)pp, _r0); p0 += max_jj * batch; pp += 8; } } #endif // defined(__x86_64__) || defined(_M_X64) for (; jj + 3 < max_jj; jj += 4) { const short* p0 = B; int kk = 0; #if __AVX512F__ p0 += (b * max_jj + jj) * 16; for (; kk + 15 < max_kk; kk += 16) { __m512i _r0 = _mm512_loadu_si512((const __m512i*)p0); __m512i _r1 = _mm512_loadu_si512((const __m512i*)(p0 + 32)); __m512i _tmp0 = _mm512_shuffle_i32x4(_r0, _r1, _MM_SHUFFLE(1, 0, 1, 0)); __m512i _tmp1 = _mm512_shuffle_i32x4(_r0, _r1, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm512_unpacklo_epi32(_tmp0, _tmp1); _r1 = _mm512_unpackhi_epi32(_tmp0, _tmp1); _r0 = _mm512_permutex_epi64(_r0, _MM_SHUFFLE(3, 1, 2, 0)); _r1 = _mm512_permutex_epi64(_r1, _MM_SHUFFLE(3, 1, 2, 0)); _tmp0 = _mm512_shuffle_i32x4(_r0, _r1, _MM_SHUFFLE(1, 0, 1, 0)); _tmp1 = _mm512_shuffle_i32x4(_r0, _r1, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm512_unpacklo_epi64(_tmp0, _tmp1); _r1 = _mm512_unpackhi_epi64(_tmp0, _tmp1); _mm512_storeu_si512((__m512i*)pp, _r0); _mm512_storeu_si512((__m512i*)(pp + 32), _r1); p0 += max_jj * batch * 16; pp += 64; } p0 -= (b * max_jj + jj) * 16; #endif // __AVX512F__ p0 += (b * max_jj + jj) * 8; for (; kk + 7 < max_kk; kk += 8) { __m128i _r0 = _mm_load_si128((const __m128i*)p0); __m128i _r1 = _mm_load_si128((const __m128i*)(p0 + 8)); __m128i _r2 = _mm_load_si128((const __m128i*)(p0 + 8 * 2)); __m128i _r3 = _mm_load_si128((const __m128i*)(p0 + 8 * 3)); transpose4x4_epi32(_r0, _r1, _r2, _r3); _mm_storeu_si128((__m128i*)pp, _r0); _mm_storeu_si128((__m128i*)(pp + 8), _r1); _mm_storeu_si128((__m128i*)(pp + 8 * 2), _r2); _mm_storeu_si128((__m128i*)(pp + 8 * 3), _r3); p0 += max_jj * batch * 8; pp += 32; } p0 -= (b * max_jj + jj) * 8; p0 += (b * max_jj + jj) * 2; for (; kk + 1 < max_kk; kk += 2) { __m128i _r0 = _mm_loadu_si128((const __m128i*)p0); _mm_storeu_si128((__m128i*)pp, _r0); p0 += max_jj * batch * 2; pp += 8; } p0 -= (b * max_jj + jj) * 2; p0 += (b * max_jj + jj); for (; kk < max_kk; kk++) { pp[0] = p0[0]; pp[1] = p0[1]; pp[2] = p0[2]; pp[3] = p0[3]; p0 += max_jj * batch; pp += 4; } } #endif // __SSE2__ for (; jj + 1 < max_jj; jj += 2) { const short* p0 = B; int kk = 0; #if __SSE2__ #if __AVX512F__ p0 += (b * max_jj + jj) * 16; for (; kk + 15 < max_kk; kk += 16) { __m256i _r0 = _mm256_load_si256((const __m256i*)p0); __m256i _r1 = _mm256_load_si256((const __m256i*)(p0 + 16)); transpose8x2_epi32(_r0, _r1); _mm256_storeu_si256((__m256i*)pp, _r0); _mm256_storeu_si256((__m256i*)(pp + 16), _r1); p0 += max_jj * batch * 16; pp += 32; } p0 -= (b * max_jj + jj) * 16; #endif // __AVX512F__ p0 += (b * max_jj + jj) * 8; for (; kk + 7 < max_kk; kk += 8) { __m128i _r0 = _mm_load_si128((const __m128i*)p0); __m128i _r1 = _mm_load_si128((const __m128i*)(p0 + 8)); __m128i _tmp0 = _mm_unpacklo_epi32(_r0, _r1); __m128i _tmp1 = _mm_unpackhi_epi32(_r0, _r1); _mm_storeu_si128((__m128i*)pp, _tmp0); _mm_storeu_si128((__m128i*)(pp + 8), _tmp1); p0 += max_jj * batch * 8; pp += 16; } p0 -= (b * max_jj + jj) * 8; #endif // __SSE2__ p0 += (b * max_jj + jj) * 2; for (; kk + 1 < max_kk; kk += 2) { pp[0] = p0[0]; pp[1] = p0[1]; pp[2] = p0[2]; pp[3] = p0[3]; p0 += max_jj * batch * 2; pp += 4; } p0 -= (b * max_jj + jj) * 2; p0 += (b * max_jj + jj); for (; kk < max_kk; kk++) { pp[0] = p0[0]; pp[1] = p0[1]; p0 += max_jj * batch; pp += 2; } } for (; jj < max_jj; jj++) { const short* p0 = B; int kk = 0; #if __SSE2__ #if __AVX512F__ p0 += (b * max_jj + jj) * 16; for (; kk + 15 < max_kk; kk += 16) { __m256i _r0 = _mm256_load_si256((const __m256i*)p0); _mm256_storeu_si256((__m256i*)pp, _r0); p0 += max_jj * batch * 16; pp += 16; } p0 -= (b * max_jj + jj) * 16; #endif // __AVX512F__ p0 += (b * max_jj + jj) * 8; for (; kk + 7 < max_kk; kk += 8) { __m128i _r0 = _mm_load_si128((const __m128i*)p0); _mm_storeu_si128((__m128i*)pp, _r0); p0 += max_jj * batch * 8; pp += 8; } p0 -= (b * max_jj + jj) * 8; #endif // __SSE2__ p0 += (b * max_jj + jj) * 2; for (; kk + 1 < max_kk; kk += 2) { pp[0] = p0[0]; pp[1] = p0[1]; p0 += max_jj * batch * 2; pp += 2; } p0 -= (b * max_jj + jj) * 2; p0 += (b * max_jj + jj); for (; kk < max_kk; kk++) { pp[0] = p0[0]; p0 += max_jj * batch; pp += 1; } } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq %rsi, -0x10(%rsp) testl %edx, %edx jle 0x12b23f movq %rdi, %r15 movl %ecx, %edi imull %edx, %edi leal (,%rdi,8), %eax leal (%rdi,%rdi), %esi cltq movslq %esi, %r9 movslq %edi, %r10 movl %r8d, %esi andl $-0x8, %esi movl %esi, -0x44(%rsp) movslq %ecx, %rsi movq %rsi, -0x40(%rsp) movl %edx, %edx movq %rdx, -0x18(%rsp) addq %rax, %rax addq %r9, %r9 addq %r10, %r10 movq $0x0, -0x38(%rsp) xorl %esi, %esi xorl %edi, %edi movl %ecx, -0x48(%rsp) movq %r15, -0x20(%rsp) movq -0x10(%rsp), %rdx movslq 0x2c(%rdx), %r12 imulq %rdi, %r12 imulq 0x10(%rdx), %r12 addq (%rdx), %r12 cmpl $0x8, %ecx movq %rdi, -0x30(%rsp) movl %esi, -0x24(%rsp) jl 0x12ae3b movl %esi, %r13d leaq (,%r13,2), %rdx negq %rdx shlq $0x2, %r13 imull %ecx, %edi movq %rdi, -0x8(%rsp) xorl %ecx, %ecx movl %esi, %r15d andl $0x1fffffff, %r15d # imm = 0x1FFFFFFF shlq $0x4, %r15 movq -0x8(%rsp), %rdi leaq (%rcx,%rdi), %r11 leal (,%r11,8), %ebx movq -0x20(%rsp), %rdi movq (%rdi), %r14 cmpl $0x8, %r8d jl 0x12ad9e addq %r15, %r14 movl $0x7, %edi vmovups (%r14), %ymm0 vmovups 0x20(%r14), %ymm1 vinsertf128 $0x1, 0x40(%r14), %ymm0, %ymm2 vperm2f128 $0x31, 0x40(%r14), %ymm0, %ymm0 # ymm0 = ymm0[2,3],mem[2,3] vinsertf128 $0x1, 0x60(%r14), %ymm1, %ymm3 vperm2f128 $0x31, 0x60(%r14), %ymm1, %ymm1 # ymm1 = ymm1[2,3],mem[2,3] vunpcklps %ymm0, %ymm2, %ymm4 # ymm4 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[4],ymm0[4],ymm2[5],ymm0[5] vunpckhps %ymm0, %ymm2, %ymm0 # ymm0 = ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[6],ymm0[6],ymm2[7],ymm0[7] vunpcklps %ymm1, %ymm3, %ymm2 # ymm2 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[4],ymm1[4],ymm3[5],ymm1[5] vunpckhps %ymm1, %ymm3, %ymm1 # ymm1 = ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[6],ymm1[6],ymm3[7],ymm1[7] vunpcklpd %ymm2, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm2[0],ymm4[2],ymm2[2] vunpckhpd %ymm2, %ymm4, %ymm2 # ymm2 = ymm4[1],ymm2[1],ymm4[3],ymm2[3] vunpcklpd %ymm1, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] vunpckhpd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] vmovups %ymm3, (%r12) vmovups %ymm2, 0x20(%r12) vmovups %ymm4, 0x40(%r12) vmovups %ymm0, 0x60(%r12) subq $-0x80, %r12 addq %rax, %r14 addl $0x8, %edi cmpl %r8d, %edi jl 0x12ad27 movl -0x44(%rsp), %ebp jmp 0x12ada4 xorl %ebp, %ebp leaq (%r14,%rbx,2), %r14 movl %ebp, %edi orl $0x1, %edi cmpl %r8d, %edi jge 0x12adde movq %r13, %rdi subq %r15, %rdi addq %rdi, %r14 movl %ebp, %r11d vmovups (%r14), %ymm0 vmovups %ymm0, (%r12) addq $0x20, %r12 leal 0x2(%r11), %ebp addq %r9, %r14 addl $0x3, %r11d cmpl %r8d, %r11d movl %ebp, %r11d jl 0x12adba jmp 0x12ade8 addq %rbx, %rbx subq %rbx, %r14 leaq (%r14,%r11,4), %r14 movl %r8d, %r11d subl %ebp, %r11d jle 0x12ae0a addq %rdx, %r14 vmovups (%r14), %xmm0 vmovaps %xmm0, (%r12) addq $0x10, %r12 addq %r10, %r14 decl %r11d jne 0x12adf3 leaq 0x8(%rcx), %r14 addq $0xf, %rcx addl $0x8, %esi addq $0x20, %r13 addq $-0x10, %rdx cmpq -0x40(%rsp), %rcx movq %r14, %rcx jl 0x12acf2 movl -0x48(%rsp), %ecx movq -0x20(%rsp), %r15 movq -0x30(%rsp), %rdi jmp 0x12ae3e xorl %r14d, %r14d movl %r14d, %edx orl $0x3, %edx cmpl %ecx, %edx jge 0x12afbe movq %rdi, %r13 imulq -0x40(%rsp), %r13 movl %r14d, %ebp movq -0x38(%rsp), %rcx leaq (%rcx,%rbp), %rdi movq %rdi, %rdx shlq $0x4, %rdx leaq (,%rdi,4), %rcx leaq (%rcx,%rcx,2), %rsi negq %rsi addq %rdi, %rdi movl $0x6, %ecx subq %rdi, %rcx movq (%r15), %r14 movq %rbp, %r11 addq %r13, %r11 cmpl $0x8, %r8d jl 0x12af00 addq %rdx, %r14 movl $0x7, %edi vmovaps (%r14), %xmm0 vmovaps 0x10(%r14), %xmm1 vmovaps 0x20(%r14), %xmm2 vmovaps 0x30(%r14), %xmm3 vunpcklps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] vunpcklps %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] vunpckhps %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] vmovlhps %xmm1, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm1[0] vunpckhpd %xmm1, %xmm4, %xmm1 # xmm1 = xmm4[1],xmm1[1] vmovlhps %xmm2, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm2[0] vunpckhpd %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm2[1] vmovups %xmm3, (%r12) vmovups %xmm1, 0x10(%r12) vmovups %xmm4, 0x20(%r12) vmovups %xmm0, 0x30(%r12) addq $0x40, %r12 addq %rax, %r14 addl $0x8, %edi cmpl %r8d, %edi jl 0x12ae99 movl -0x44(%rsp), %edi jmp 0x12af0c movq %r11, %rdi shlq $0x4, %rdi addq %rdi, %r14 xorl %edi, %edi movl %edi, %ebx orl $0x1, %ebx cmpl %r8d, %ebx jge 0x12af40 addq %rsi, %r14 movl %edi, %r11d vmovups (%r14), %xmm0 vmovups %xmm0, (%r12) addq $0x10, %r12 leal 0x2(%r11), %edi addq %r9, %r14 addl $0x3, %r11d cmpl %r8d, %r11d movl %edi, %r11d jl 0x12af1c jmp 0x12af52 leaq (,%r11,8), %rbx addq %rbx, %rbx subq %rbx, %r14 leaq (%r14,%r11,4), %r14 movl %r8d, %r11d subl %edi, %r11d jle 0x12af93 addq %rcx, %r14 movzwl -0x6(%r14), %edi movw %di, (%r12) movzwl -0x4(%r14), %edi movw %di, 0x2(%r12) movzwl -0x2(%r14), %edi movw %di, 0x4(%r12) movzwl (%r14), %edi movw %di, 0x6(%r12) addq $0x8, %r12 addq %r10, %r14 decl %r11d jne 0x12af5d leaq 0x4(%rbp), %r14 addq $0x7, %rbp addq $0x40, %rdx addq $-0x30, %rsi addq $-0x8, %rcx cmpq -0x40(%rsp), %rbp movq %r14, %rbp jl 0x12ae82 movl -0x48(%rsp), %ecx movq -0x30(%rsp), %rdi movl %r14d, %edx orl $0x1, %edx cmpl %ecx, %edx jge 0x12b10e movq %rdi, %rdx imulq -0x40(%rsp), %rdx movl %r14d, %esi movq -0x38(%rsp), %rcx leaq (%rcx,%rsi), %rdi movq %rdi, %rcx shlq $0x4, %rcx leaq (,%rdi,4), %r11 leaq (%r11,%r11,2), %r13 negq %r13 addq %rdi, %rdi movl $0x2, %ebp subq %rdi, %rbp movq (%r15), %r14 leaq (%rsi,%rdx), %r11 cmpl $0x8, %r8d jl 0x12b04c addq %rcx, %r14 movl $0x7, %edi vmovaps (%r14), %xmm0 vmovaps 0x10(%r14), %xmm1 vunpcklps %xmm1, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] vmovups %xmm2, (%r12) vmovups %xmm0, 0x10(%r12) addq $0x20, %r12 addl $0x8, %edi addq %rax, %r14 cmpl %r8d, %edi jl 0x12b017 movl -0x44(%rsp), %edi jmp 0x12b058 movq %r11, %rdi shlq $0x4, %rdi addq %rdi, %r14 xorl %edi, %edi movl %edi, %ebx orl $0x1, %ebx cmpl %r8d, %ebx jge 0x12b0ab addq %r13, %r14 movl %edi, %r11d movzwl (%r14), %edi movw %di, (%r12) movzwl 0x2(%r14), %edi movw %di, 0x2(%r12) movzwl 0x4(%r14), %edi movw %di, 0x4(%r12) movzwl 0x6(%r14), %edi movw %di, 0x6(%r12) addq $0x8, %r12 leal 0x2(%r11), %edi addl $0x3, %r11d addq %r9, %r14 cmpl %r8d, %r11d movl %edi, %r11d jl 0x12b068 jmp 0x12b0bd leaq (,%r11,8), %rbx addq %rbx, %rbx subq %rbx, %r14 leaq (%r14,%r11,4), %r14 movl %r8d, %r11d subl %edi, %r11d jle 0x12b0e8 addq %rbp, %r14 movzwl -0x2(%r14), %edi movw %di, (%r12) movzwl (%r14), %edi movw %di, 0x2(%r12) addq $0x4, %r12 addq %r10, %r14 decl %r11d jne 0x12b0c8 leaq 0x2(%rsi), %r14 addq $0x3, %rsi addq $0x20, %rcx addq $-0x18, %r13 addq $-0x4, %rbp cmpq -0x40(%rsp), %rsi movq %r14, %rsi jl 0x12b002 movl -0x48(%rsp), %ecx cmpl %ecx, %r14d jge 0x12b213 movq -0x30(%rsp), %rcx imulq -0x40(%rsp), %rcx movslq %r14d, %rdx movq -0x38(%rsp), %rsi addq %rdx, %rsi movq %rsi, %r13 shlq $0x4, %r13 leaq (,%rsi,4), %rdi leaq (%rdi,%rdi,2), %rbp negq %rbp addq %rsi, %rsi negq %rsi movq (%r15), %r14 leaq (%rdx,%rcx), %r11 cmpl $0x8, %r8d jl 0x12b17e addq %r13, %r14 movl $0x7, %edi vmovaps (%r14), %xmm0 vmovups %xmm0, (%r12) addq $0x10, %r12 addl $0x8, %edi addq %rax, %r14 cmpl %r8d, %edi jl 0x12b15e movl -0x44(%rsp), %edi jmp 0x12b18a movq %r11, %rdi shlq $0x4, %rdi addq %rdi, %r14 xorl %edi, %edi movl %edi, %ebx orl $0x1, %ebx cmpl %r8d, %ebx jge 0x12b1c7 addq %rbp, %r14 movl %edi, %r11d movzwl (%r14), %edi movw %di, (%r12) movzwl 0x2(%r14), %edi movw %di, 0x2(%r12) addq $0x4, %r12 leal 0x2(%r11), %edi addl $0x3, %r11d addq %r9, %r14 cmpl %r8d, %r11d movl %edi, %r11d jl 0x12b19a jmp 0x12b1d9 leaq (,%r11,8), %rbx addq %rbx, %rbx subq %rbx, %r14 leaq (%r14,%r11,4), %r14 movl %r8d, %r11d subl %edi, %r11d jle 0x12b1f9 addq %rsi, %r14 movzwl (%r14), %edi movw %di, (%r12) addq $0x2, %r12 addq %r10, %r14 decl %r11d jne 0x12b1e4 incq %rdx addq $0x10, %r13 addq $-0xc, %rbp addq $-0x2, %rsi cmpq -0x40(%rsp), %rdx jne 0x12b149 movq -0x30(%rsp), %rdi incq %rdi movl -0x48(%rsp), %ecx movl -0x24(%rsp), %esi addl %ecx, %esi movq -0x38(%rsp), %rdx addq -0x40(%rsp), %rdx movq %rdx, -0x38(%rsp) cmpq -0x18(%rsp), %rdi jne 0x12acaf popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq
/Tencent[P]ncnn/src/layer/x86/convolution_3x3_winograd_int8.h
ncnn::get_optimal_tile_mnk_int8(int, int, int, int&, int&, int&, int)
static void get_optimal_tile_mnk_int8(int M, int N, int K, int& TILE_M, int& TILE_N, int& TILE_K, int nT) { // resolve optimal tile size from cache size const size_t l2_cache_size_int8 = (int)(get_cpu_level2_cache_size() / sizeof(short)); if (nT == 0) nT = get_physical_big_cpu_count(); // solve M { int tile_size = (int)sqrt((float)l2_cache_size_int8 / 3); #if __AVX512F__ TILE_M = std::max(16, tile_size / 16 * 16); #elif __AVX2__ TILE_M = std::max(8, tile_size / 8 * 8); #elif __SSE2__ TILE_M = std::max(4, tile_size / 4 * 4); #else TILE_M = std::max(2, tile_size / 2 * 2); #endif TILE_M *= std::min(nT, get_physical_cpu_count()); int nn_M = (M + TILE_M - 1) / TILE_M; #if __AVX512F__ TILE_M = std::min(TILE_M, ((M + nn_M - 1) / nn_M + 15) / 16 * 16); #elif __AVX2__ TILE_M = std::min(TILE_M, ((M + nn_M - 1) / nn_M + 7) / 8 * 8); #elif __SSE2__ TILE_M = std::min(TILE_M, ((M + nn_M - 1) / nn_M + 3) / 4 * 4); #else TILE_M = std::min(TILE_M, ((M + nn_M - 1) / nn_M + 1) / 2 * 2); #endif if (nT > 1) { #if __AVX512F__ TILE_M = std::min(TILE_M, (std::max(1, TILE_M / nT) + 15) / 16 * 16); #elif __AVX2__ TILE_M = std::min(TILE_M, (std::max(1, TILE_M / nT) + 7) / 8 * 8); #elif __SSE2__ TILE_M = std::min(TILE_M, (std::max(1, TILE_M / nT) + 3) / 4 * 4); #else TILE_M = std::min(TILE_M, (std::max(1, TILE_M / nT) + 1) / 2 * 2); #endif } } // solve K { int tile_size = (int)(sqrt((float)l2_cache_size_int8) - TILE_M); #if __AVX512F__ TILE_K = std::max(16, tile_size / 16 * 16); #elif __AVX2__ TILE_K = std::max(8, tile_size / 8 * 8); #elif __SSE2__ TILE_K = std::max(4, tile_size / 4 * 4); #else TILE_K = std::max(2, tile_size / 2 * 2); #endif int nn_K = (K + TILE_K - 1) / TILE_K; #if __AVX512F__ TILE_K = std::min(TILE_K, ((K + nn_K - 1) / nn_K + 15) / 16 * 16); #elif __AVX2__ TILE_K = std::min(TILE_K, ((K + nn_K - 1) / nn_K + 7) / 8 * 8); #elif __SSE2__ TILE_K = std::min(TILE_K, ((K + nn_K - 1) / nn_K + 3) / 4 * 4); #else TILE_K = std::min(TILE_K, ((K + nn_K - 1) / nn_K + 1) / 2 * 2); #endif } if (N > 0) { int tile_size = (int)((l2_cache_size_int8 - TILE_M * TILE_K) / (TILE_M * 2 + TILE_K)); #if __SSE2__ TILE_N = std::max(4, tile_size / 4 * 4); #else TILE_N = std::max(1, tile_size); #endif int nn_N = (N + TILE_N - 1) / TILE_N; #if __SSE2__ TILE_N = std::min(TILE_N, ((N + nn_N - 1) / nn_N + 3) / 4 * 4); #else TILE_N = std::min(TILE_N, (N + nn_N - 1) / nn_N); #endif } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %r9, 0x20(%rsp) movq %r8, 0x18(%rsp) movq %rcx, %r12 movl %edx, %ebp movq %rsi, 0x10(%rsp) movl %edi, %r13d movl 0x60(%rsp), %ebx callq 0x3cb1f sarl %eax movslq %eax, %r15 testl %ebx, %ebx jne 0x13caf2 callq 0x3cb03 movl %eax, %ebx vcvtusi2ss %r15, %xmm0, %xmm0 vmovss %xmm0, 0xc(%rsp) vmulss 0x36aea6(%rip), %xmm0, %xmm0 # 0x4a79ac vsqrtss %xmm0, %xmm0, %xmm0 vcvttss2si %xmm0, %eax leal 0xf(%rax), %ecx testl %eax, %eax cmovnsl %eax, %ecx andl $-0x10, %ecx cmpl $0x11, %ecx movl $0x10, %r14d cmovll %r14d, %ecx movl %ecx, (%r12) callq 0x3cad3 movl %eax, %esi cmpl %ebx, %eax cmovgel %ebx, %esi imull (%r12), %esi leal (%rsi,%r13), %eax decl %eax cltd idivl %esi movl %eax, %ecx leal (%rcx,%r13), %eax decl %eax cltd idivl %ecx movl %eax, %ecx leal 0xf(%rcx), %eax addl $0x1e, %ecx testl %eax, %eax cmovnsl %eax, %ecx andl $-0x10, %ecx cmpl %esi, %ecx cmovgel %esi, %ecx cmpl $0x2, %ebx jl 0x13cb89 movl %ecx, %eax cltd idivl %ebx cmpl $0x2, %eax movl $0x1, %edx cmovgel %eax, %edx addl $0xf, %edx andl $0x7ffffff0, %edx # imm = 0x7FFFFFF0 cmpl %ecx, %edx cmovgel %ecx, %edx movl %edx, %ecx movq 0x10(%rsp), %rdi movl %ecx, (%r12) vsqrtss 0xc(%rsp), %xmm1, %xmm0 vcvtsi2ss %ecx, %xmm1, %xmm1 vsubss %xmm1, %xmm0, %xmm0 vcvttss2si %xmm0, %eax leal 0xf(%rax), %ecx testl %eax, %eax cmovnsl %eax, %ecx andl $-0x10, %ecx cmpl $0x11, %ecx cmovgel %ecx, %r14d leal (%r14,%rbp), %eax decl %eax cltd idivl %r14d movl %eax, %ecx leal (%rcx,%rbp), %eax decl %eax cltd idivl %ecx leal 0xf(%rax), %ecx addl $0x1e, %eax testl %ecx, %ecx cmovnsl %ecx, %eax andl $-0x10, %eax cmpl %r14d, %eax cmovgel %r14d, %eax movq 0x20(%rsp), %rcx movl %eax, (%rcx) testl %edi, %edi jle 0x13cc41 movl (%r12), %ecx leal (%rax,%rcx,2), %edx imull %eax, %ecx movslq %ecx, %rax subq %rax, %r15 movslq %edx, %rcx movq %r15, %rax xorl %edx, %edx divq %rcx leal 0x3(%rax), %ecx testl %eax, %eax cmovnsl %eax, %ecx andl $-0x4, %ecx cmpl $0x5, %ecx movl $0x4, %esi cmovgel %ecx, %esi leal (%rdi,%rsi), %eax decl %eax xorl %edx, %edx divl %esi movl %eax, %ecx leal (%rdi,%rcx), %eax decl %eax xorl %edx, %edx divl %ecx addl $0x3, %eax andl $-0x4, %eax cmpl %esi, %eax cmovael %esi, %eax movq 0x18(%rsp), %rcx movl %eax, (%rcx) addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/Tencent[P]ncnn/src/layer/x86/convolution_3x3_winograd_int8.h
ncnn::transpose_pack_B_tile_int8(ncnn::Mat const&, ncnn::Mat&, int, int, int, int)
static void transpose_pack_B_tile_int8(const Mat& B, Mat& BT, int batch, int max_jj, int max_kk, int nT) { #pragma omp parallel for num_threads(nT) for (int b = 0; b < batch; b++) { short* pp = BT.row<short>(b); int jj = 0; #if __SSE2__ #if defined(__x86_64__) || defined(_M_X64) #if __AVX512F__ for (; jj + 15 < max_jj; jj += 16) { const short* p0 = B; int kk = 0; p0 += (b * max_jj + jj) * 16; for (; kk + 15 < max_kk; kk += 16) { __m512i _r0 = _mm512_loadu_si512((const __m512i*)p0); __m512i _r1 = _mm512_loadu_si512((const __m512i*)(p0 + 32)); __m512i _r2 = _mm512_loadu_si512((const __m512i*)(p0 + 64)); __m512i _r3 = _mm512_loadu_si512((const __m512i*)(p0 + 96)); __m512i _r4 = _mm512_loadu_si512((const __m512i*)(p0 + 128)); __m512i _r5 = _mm512_loadu_si512((const __m512i*)(p0 + 160)); __m512i _r6 = _mm512_loadu_si512((const __m512i*)(p0 + 192)); __m512i _r7 = _mm512_loadu_si512((const __m512i*)(p0 + 224)); __m512i _tmp0 = _mm512_shuffle_i32x4(_r0, _r2, _MM_SHUFFLE(1, 0, 1, 0)); __m512i _tmp1 = _mm512_shuffle_i32x4(_r0, _r2, _MM_SHUFFLE(3, 2, 3, 2)); __m512i _tmp2 = _mm512_shuffle_i32x4(_r1, _r3, _MM_SHUFFLE(1, 0, 1, 0)); __m512i _tmp3 = _mm512_shuffle_i32x4(_r1, _r3, _MM_SHUFFLE(3, 2, 3, 2)); __m512i _tmp4 = _mm512_shuffle_i32x4(_r4, _r6, _MM_SHUFFLE(1, 0, 1, 0)); __m512i _tmp5 = _mm512_shuffle_i32x4(_r4, _r6, _MM_SHUFFLE(3, 2, 3, 2)); __m512i _tmp6 = _mm512_shuffle_i32x4(_r5, _r7, _MM_SHUFFLE(1, 0, 1, 0)); __m512i _tmp7 = _mm512_shuffle_i32x4(_r5, _r7, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm512_unpacklo_epi32(_tmp0, _tmp1); _r1 = _mm512_unpackhi_epi32(_tmp0, _tmp1); _r2 = _mm512_unpacklo_epi32(_tmp2, _tmp3); _r3 = _mm512_unpackhi_epi32(_tmp2, _tmp3); _r4 = _mm512_unpacklo_epi32(_tmp4, _tmp5); _r5 = _mm512_unpackhi_epi32(_tmp4, _tmp5); _r6 = _mm512_unpacklo_epi32(_tmp6, _tmp7); _r7 = _mm512_unpackhi_epi32(_tmp6, _tmp7); _tmp0 = _mm512_unpacklo_epi64(_r0, _r2); _tmp1 = _mm512_unpackhi_epi64(_r0, _r2); _tmp2 = _mm512_unpacklo_epi64(_r1, _r3); _tmp3 = _mm512_unpackhi_epi64(_r1, _r3); _tmp4 = _mm512_unpacklo_epi64(_r4, _r6); _tmp5 = _mm512_unpackhi_epi64(_r4, _r6); _tmp6 = _mm512_unpacklo_epi64(_r5, _r7); _tmp7 = _mm512_unpackhi_epi64(_r5, _r7); _r0 = _mm512_shuffle_i32x4(_tmp0, _tmp4, _MM_SHUFFLE(2, 0, 2, 0)); _r1 = _mm512_shuffle_i32x4(_tmp1, _tmp5, _MM_SHUFFLE(2, 0, 2, 0)); _r2 = _mm512_shuffle_i32x4(_tmp2, _tmp6, _MM_SHUFFLE(2, 0, 2, 0)); _r3 = _mm512_shuffle_i32x4(_tmp3, _tmp7, _MM_SHUFFLE(2, 0, 2, 0)); _r4 = _mm512_shuffle_i32x4(_tmp0, _tmp4, _MM_SHUFFLE(3, 1, 3, 1)); _r5 = _mm512_shuffle_i32x4(_tmp1, _tmp5, _MM_SHUFFLE(3, 1, 3, 1)); _r6 = _mm512_shuffle_i32x4(_tmp2, _tmp6, _MM_SHUFFLE(3, 1, 3, 1)); _r7 = _mm512_shuffle_i32x4(_tmp3, _tmp7, _MM_SHUFFLE(3, 1, 3, 1)); _mm512_storeu_si512((__m512i*)pp, _r0); _mm512_storeu_si512((__m512i*)(pp + 32), _r1); _mm512_storeu_si512((__m512i*)(pp + 64), _r2); _mm512_storeu_si512((__m512i*)(pp + 96), _r3); _mm512_storeu_si512((__m512i*)(pp + 128), _r4); _mm512_storeu_si512((__m512i*)(pp + 160), _r5); _mm512_storeu_si512((__m512i*)(pp + 192), _r6); _mm512_storeu_si512((__m512i*)(pp + 224), _r7); p0 += max_jj * batch * 16; pp += 256; } p0 -= (b * max_jj + jj) * 16; p0 += (b * max_jj + jj) * 8; for (; kk + 7 < max_kk; kk += 8) { __m512i _r0 = _mm512_loadu_si512((const __m512i*)p0); __m512i _r1 = _mm512_loadu_si512((const __m512i*)(p0 + 32)); __m512i _r2 = _mm512_loadu_si512((const __m512i*)(p0 + 64)); __m512i _r3 = _mm512_loadu_si512((const __m512i*)(p0 + 96)); __m512i _tmp0 = _mm512_shuffle_i32x4(_r0, _r1, _MM_SHUFFLE(2, 0, 2, 0)); __m512i _tmp1 = _mm512_shuffle_i32x4(_r0, _r1, _MM_SHUFFLE(3, 1, 3, 1)); __m512i _tmp2 = _mm512_shuffle_i32x4(_r2, _r3, _MM_SHUFFLE(2, 0, 2, 0)); __m512i _tmp3 = _mm512_shuffle_i32x4(_r2, _r3, _MM_SHUFFLE(3, 1, 3, 1)); _r0 = _mm512_unpacklo_epi32(_tmp0, _tmp1); _r1 = _mm512_unpackhi_epi32(_tmp0, _tmp1); _r2 = _mm512_unpacklo_epi32(_tmp2, _tmp3); _r3 = _mm512_unpackhi_epi32(_tmp2, _tmp3); _tmp0 = _mm512_permutex_epi64(_r0, _MM_SHUFFLE(3, 1, 2, 0)); _tmp1 = _mm512_permutex_epi64(_r1, _MM_SHUFFLE(3, 1, 2, 0)); _tmp2 = _mm512_permutex_epi64(_r2, _MM_SHUFFLE(3, 1, 2, 0)); _tmp3 = _mm512_permutex_epi64(_r3, _MM_SHUFFLE(3, 1, 2, 0)); _r0 = _mm512_shuffle_i32x4(_tmp0, _tmp2, _MM_SHUFFLE(2, 0, 2, 0)); _r1 = _mm512_shuffle_i32x4(_tmp0, _tmp2, _MM_SHUFFLE(3, 1, 3, 1)); _r2 = _mm512_shuffle_i32x4(_tmp1, _tmp3, _MM_SHUFFLE(2, 0, 2, 0)); _r3 = _mm512_shuffle_i32x4(_tmp1, _tmp3, _MM_SHUFFLE(3, 1, 3, 1)); _mm512_storeu_si512((__m512i*)pp, _r0); _mm512_storeu_si512((__m512i*)(pp + 32), _r1); _mm512_storeu_si512((__m512i*)(pp + 64), _r2); _mm512_storeu_si512((__m512i*)(pp + 96), _r3); p0 += max_jj * batch * 8; pp += 128; } p0 -= (b * max_jj + jj) * 8; p0 += (b * max_jj + jj) * 2; for (; kk + 1 < max_kk; kk += 2) { __m512i _r0 = _mm512_loadu_si512((const __m512i*)p0); _mm512_storeu_si512((__m512i*)pp, _r0); p0 += max_jj * batch * 2; pp += 32; } p0 -= (b * max_jj + jj) * 2; p0 += (b * max_jj + jj); for (; kk < max_kk; kk++) { __m256i _r0 = _mm256_loadu_si256((const __m256i*)p0); _mm256_store_si256((__m256i*)pp, _r0); p0 += max_jj * batch; pp += 16; } } #endif // __AVX512F__ for (; jj + 7 < max_jj; jj += 8) { const short* p0 = B; int kk = 0; #if __AVX512F__ p0 += (b * max_jj + jj) * 16; for (; kk + 15 < max_kk; kk += 16) { __m512i _r0 = _mm512_loadu_si512((const __m512i*)p0); __m512i _r1 = _mm512_loadu_si512((const __m512i*)(p0 + 32)); __m512i _r2 = _mm512_loadu_si512((const __m512i*)(p0 + 64)); __m512i _r3 = _mm512_loadu_si512((const __m512i*)(p0 + 96)); __m512i _tmp0 = _mm512_shuffle_i32x4(_r0, _r2, _MM_SHUFFLE(1, 0, 1, 0)); __m512i _tmp1 = _mm512_shuffle_i32x4(_r0, _r2, _MM_SHUFFLE(3, 2, 3, 2)); __m512i _tmp2 = _mm512_shuffle_i32x4(_r1, _r3, _MM_SHUFFLE(1, 0, 1, 0)); __m512i _tmp3 = _mm512_shuffle_i32x4(_r1, _r3, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm512_unpacklo_epi32(_tmp0, _tmp1); _r1 = _mm512_unpackhi_epi32(_tmp0, _tmp1); _r2 = _mm512_unpacklo_epi32(_tmp2, _tmp3); _r3 = _mm512_unpackhi_epi32(_tmp2, _tmp3); _tmp0 = _mm512_unpacklo_epi64(_r0, _r2); _tmp1 = _mm512_unpackhi_epi64(_r0, _r2); _tmp2 = _mm512_unpacklo_epi64(_r1, _r3); _tmp3 = _mm512_unpackhi_epi64(_r1, _r3); _r0 = _mm512_shuffle_i32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0)); _r1 = _mm512_shuffle_i32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0)); _r2 = _mm512_shuffle_i32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1)); _r3 = _mm512_shuffle_i32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1)); _mm512_storeu_si512((__m512i*)pp, _r0); _mm512_storeu_si512((__m512i*)(pp + 32), _r1); _mm512_storeu_si512((__m512i*)(pp + 64), _r2); _mm512_storeu_si512((__m512i*)(pp + 96), _r3); p0 += max_jj * batch * 16; pp += 128; } p0 -= (b * max_jj + jj) * 16; #endif // __AVX512F__ p0 += (b * max_jj + jj) * 8; for (; kk + 7 < max_kk; kk += 8) { #if __AVX__ __m256 _r0 = _mm256_loadu_ps((const float*)p0); __m256 _r1 = _mm256_loadu_ps((const float*)(p0 + 16)); __m256 _r2 = _mm256_loadu_ps((const float*)(p0 + 32)); __m256 _r3 = _mm256_loadu_ps((const float*)(p0 + 48)); __m256 _tmp0 = _mm256_permute2f128_ps(_r0, _r2, _MM_SHUFFLE(0, 2, 0, 0)); __m256 _tmp1 = _mm256_permute2f128_ps(_r0, _r2, _MM_SHUFFLE(0, 3, 0, 1)); __m256 _tmp2 = _mm256_permute2f128_ps(_r1, _r3, _MM_SHUFFLE(0, 2, 0, 0)); __m256 _tmp3 = _mm256_permute2f128_ps(_r1, _r3, _MM_SHUFFLE(0, 3, 0, 1)); _r0 = _mm256_unpacklo_ps(_tmp0, _tmp1); _r1 = _mm256_unpackhi_ps(_tmp0, _tmp1); _r2 = _mm256_unpacklo_ps(_tmp2, _tmp3); _r3 = _mm256_unpackhi_ps(_tmp2, _tmp3); _tmp0 = _mm256_castpd_ps(_mm256_unpacklo_pd(_mm256_castps_pd(_r0), _mm256_castps_pd(_r2))); _tmp1 = _mm256_castpd_ps(_mm256_unpackhi_pd(_mm256_castps_pd(_r0), _mm256_castps_pd(_r2))); _tmp2 = _mm256_castpd_ps(_mm256_unpacklo_pd(_mm256_castps_pd(_r1), _mm256_castps_pd(_r3))); _tmp3 = _mm256_castpd_ps(_mm256_unpackhi_pd(_mm256_castps_pd(_r1), _mm256_castps_pd(_r3))); _mm256_storeu_ps((float*)pp, _tmp0); _mm256_storeu_ps((float*)(pp + 16), _tmp1); _mm256_storeu_ps((float*)(pp + 32), _tmp2); _mm256_storeu_ps((float*)(pp + 48), _tmp3); #else __m128i _r0 = _mm_load_si128((const __m128i*)p0); __m128i _r1 = _mm_load_si128((const __m128i*)(p0 + 8)); __m128i _r2 = _mm_load_si128((const __m128i*)(p0 + 8 * 2)); __m128i _r3 = _mm_load_si128((const __m128i*)(p0 + 8 * 3)); __m128i _r4 = _mm_load_si128((const __m128i*)(p0 + 8 * 4)); __m128i _r5 = _mm_load_si128((const __m128i*)(p0 + 8 * 5)); __m128i _r6 = _mm_load_si128((const __m128i*)(p0 + 8 * 6)); __m128i _r7 = _mm_load_si128((const __m128i*)(p0 + 8 * 7)); transpose4x8_epi32(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7); _mm_store_si128((__m128i*)pp, _r0); _mm_store_si128((__m128i*)(pp + 8), _r1); _mm_store_si128((__m128i*)(pp + 8 * 2), _r2); _mm_store_si128((__m128i*)(pp + 8 * 3), _r3); _mm_store_si128((__m128i*)(pp + 8 * 4), _r4); _mm_store_si128((__m128i*)(pp + 8 * 5), _r5); _mm_store_si128((__m128i*)(pp + 8 * 6), _r6); _mm_store_si128((__m128i*)(pp + 8 * 7), _r7); #endif // __AVX__ p0 += max_jj * batch * 8; pp += 64; } p0 -= (b * max_jj + jj) * 8; p0 += (b * max_jj + jj) * 2; for (; kk + 1 < max_kk; kk += 2) { #if __AVX__ __m256 _r0 = _mm256_loadu_ps((const float*)p0); _mm256_storeu_ps((float*)pp, _r0); #else __m128i _r0 = _mm_loadu_si128((const __m128i*)p0); __m128i _r1 = _mm_loadu_si128((const __m128i*)(p0 + 8)); _mm_store_si128((__m128i*)pp, _r0); _mm_store_si128((__m128i*)(pp + 8), _r1); #endif // __AVX__ p0 += max_jj * batch * 2; pp += 16; } p0 -= (b * max_jj + jj) * 2; p0 += (b * max_jj + jj); for (; kk < max_kk; kk++) { __m128i _r0 = _mm_loadu_si128((const __m128i*)p0); _mm_store_si128((__m128i*)pp, _r0); p0 += max_jj * batch; pp += 8; } } #endif // defined(__x86_64__) || defined(_M_X64) for (; jj + 3 < max_jj; jj += 4) { const short* p0 = B; int kk = 0; #if __AVX512F__ p0 += (b * max_jj + jj) * 16; for (; kk + 15 < max_kk; kk += 16) { __m512i _r0 = _mm512_loadu_si512((const __m512i*)p0); __m512i _r1 = _mm512_loadu_si512((const __m512i*)(p0 + 32)); __m512i _tmp0 = _mm512_shuffle_i32x4(_r0, _r1, _MM_SHUFFLE(1, 0, 1, 0)); __m512i _tmp1 = _mm512_shuffle_i32x4(_r0, _r1, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm512_unpacklo_epi32(_tmp0, _tmp1); _r1 = _mm512_unpackhi_epi32(_tmp0, _tmp1); _r0 = _mm512_permutex_epi64(_r0, _MM_SHUFFLE(3, 1, 2, 0)); _r1 = _mm512_permutex_epi64(_r1, _MM_SHUFFLE(3, 1, 2, 0)); _tmp0 = _mm512_shuffle_i32x4(_r0, _r1, _MM_SHUFFLE(1, 0, 1, 0)); _tmp1 = _mm512_shuffle_i32x4(_r0, _r1, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm512_unpacklo_epi64(_tmp0, _tmp1); _r1 = _mm512_unpackhi_epi64(_tmp0, _tmp1); _mm512_storeu_si512((__m512i*)pp, _r0); _mm512_storeu_si512((__m512i*)(pp + 32), _r1); p0 += max_jj * batch * 16; pp += 64; } p0 -= (b * max_jj + jj) * 16; #endif // __AVX512F__ p0 += (b * max_jj + jj) * 8; for (; kk + 7 < max_kk; kk += 8) { __m128i _r0 = _mm_load_si128((const __m128i*)p0); __m128i _r1 = _mm_load_si128((const __m128i*)(p0 + 8)); __m128i _r2 = _mm_load_si128((const __m128i*)(p0 + 8 * 2)); __m128i _r3 = _mm_load_si128((const __m128i*)(p0 + 8 * 3)); transpose4x4_epi32(_r0, _r1, _r2, _r3); _mm_storeu_si128((__m128i*)pp, _r0); _mm_storeu_si128((__m128i*)(pp + 8), _r1); _mm_storeu_si128((__m128i*)(pp + 8 * 2), _r2); _mm_storeu_si128((__m128i*)(pp + 8 * 3), _r3); p0 += max_jj * batch * 8; pp += 32; } p0 -= (b * max_jj + jj) * 8; p0 += (b * max_jj + jj) * 2; for (; kk + 1 < max_kk; kk += 2) { __m128i _r0 = _mm_loadu_si128((const __m128i*)p0); _mm_storeu_si128((__m128i*)pp, _r0); p0 += max_jj * batch * 2; pp += 8; } p0 -= (b * max_jj + jj) * 2; p0 += (b * max_jj + jj); for (; kk < max_kk; kk++) { pp[0] = p0[0]; pp[1] = p0[1]; pp[2] = p0[2]; pp[3] = p0[3]; p0 += max_jj * batch; pp += 4; } } #endif // __SSE2__ for (; jj + 1 < max_jj; jj += 2) { const short* p0 = B; int kk = 0; #if __SSE2__ #if __AVX512F__ p0 += (b * max_jj + jj) * 16; for (; kk + 15 < max_kk; kk += 16) { __m256i _r0 = _mm256_load_si256((const __m256i*)p0); __m256i _r1 = _mm256_load_si256((const __m256i*)(p0 + 16)); transpose8x2_epi32(_r0, _r1); _mm256_storeu_si256((__m256i*)pp, _r0); _mm256_storeu_si256((__m256i*)(pp + 16), _r1); p0 += max_jj * batch * 16; pp += 32; } p0 -= (b * max_jj + jj) * 16; #endif // __AVX512F__ p0 += (b * max_jj + jj) * 8; for (; kk + 7 < max_kk; kk += 8) { __m128i _r0 = _mm_load_si128((const __m128i*)p0); __m128i _r1 = _mm_load_si128((const __m128i*)(p0 + 8)); __m128i _tmp0 = _mm_unpacklo_epi32(_r0, _r1); __m128i _tmp1 = _mm_unpackhi_epi32(_r0, _r1); _mm_storeu_si128((__m128i*)pp, _tmp0); _mm_storeu_si128((__m128i*)(pp + 8), _tmp1); p0 += max_jj * batch * 8; pp += 16; } p0 -= (b * max_jj + jj) * 8; #endif // __SSE2__ p0 += (b * max_jj + jj) * 2; for (; kk + 1 < max_kk; kk += 2) { pp[0] = p0[0]; pp[1] = p0[1]; pp[2] = p0[2]; pp[3] = p0[3]; p0 += max_jj * batch * 2; pp += 4; } p0 -= (b * max_jj + jj) * 2; p0 += (b * max_jj + jj); for (; kk < max_kk; kk++) { pp[0] = p0[0]; pp[1] = p0[1]; p0 += max_jj * batch; pp += 2; } } for (; jj < max_jj; jj++) { const short* p0 = B; int kk = 0; #if __SSE2__ #if __AVX512F__ p0 += (b * max_jj + jj) * 16; for (; kk + 15 < max_kk; kk += 16) { __m256i _r0 = _mm256_load_si256((const __m256i*)p0); _mm256_storeu_si256((__m256i*)pp, _r0); p0 += max_jj * batch * 16; pp += 16; } p0 -= (b * max_jj + jj) * 16; #endif // __AVX512F__ p0 += (b * max_jj + jj) * 8; for (; kk + 7 < max_kk; kk += 8) { __m128i _r0 = _mm_load_si128((const __m128i*)p0); _mm_storeu_si128((__m128i*)pp, _r0); p0 += max_jj * batch * 8; pp += 8; } p0 -= (b * max_jj + jj) * 8; #endif // __SSE2__ p0 += (b * max_jj + jj) * 2; for (; kk + 1 < max_kk; kk += 2) { pp[0] = p0[0]; pp[1] = p0[1]; p0 += max_jj * batch * 2; pp += 2; } p0 -= (b * max_jj + jj) * 2; p0 += (b * max_jj + jj); for (; kk < max_kk; kk++) { pp[0] = p0[0]; p0 += max_jj * batch; pp += 1; } } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq %rsi, -0x10(%rsp) movq %rdi, -0x28(%rsp) testl %edx, %edx jle 0x13e675 movl %ecx, %r9d imull %edx, %r9d leal (,%r9,8), %esi leal (%r9,%r9), %edi movslq %r9d, %rax shll $0x4, %r9d movslq %r9d, %r9 movslq %esi, %r10 movslq %edi, %r11 movl %r8d, %esi andl $-0x10, %esi movl %esi, -0x54(%rsp) movslq %ecx, %rsi movq %rsi, -0x48(%rsp) movl %edx, %edx movq %rdx, -0x18(%rsp) addq %r9, %r9 addq %r10, %r10 addq %r11, %r11 addq %rax, %rax movq $0x0, -0x40(%rsp) vpmovsxbd 0x36a0d1(%rip), %zmm0 # 0x4a7bf0 vpmovsxbd 0x36a0d7(%rip), %zmm1 # 0x4a7c00 xorl %esi, %esi xorl %edi, %edi movl %ecx, -0x58(%rsp) movq -0x10(%rsp), %rdx movslq 0x2c(%rdx), %r13 imulq %rdi, %r13 imulq 0x10(%rdx), %r13 addq (%rdx), %r13 cmpl $0x10, %ecx movl %esi, -0x1c(%rsp) movq %rdi, -0x38(%rsp) jl 0x13de90 movl %esi, %r14d leaq (%r14,%r14), %rdx negq %rdx shlq $0x2, %r14 imull %ecx, %edi movq %rdi, -0x8(%rsp) movl %esi, %ecx xorl %r15d, %r15d movq %rdx, -0x50(%rsp) movq %r14, -0x60(%rsp) movl %ecx, %r14d andl $0xfffffff, %r14d # imm = 0xFFFFFFF shlq $0x5, %r14 movq -0x8(%rsp), %rdx leaq (%r15,%rdx), %rbp movl %ebp, %ebx shll $0x4, %ebx movq -0x28(%rsp), %rdx movq (%rdx), %rsi cmpl $0x10, %r8d jl 0x13dcfa addq %r14, %rsi movl $0xf, %edi vmovdqu64 (%rsi), %zmm2 vmovdqu64 0x40(%rsi), %zmm3 vmovdqu64 0x100(%rsi), %zmm4 vmovdqu64 0x140(%rsi), %zmm5 vinserti64x4 $0x1, 0x80(%rsi), %zmm2, %zmm6 vshufi64x2 $0xee, 0x80(%rsi), %zmm2, %zmm2 # zmm2 = zmm2[4,5,6,7],mem[4,5,6,7] vinserti64x4 $0x1, 0xc0(%rsi), %zmm3, %zmm7 vshufi64x2 $0xee, 0xc0(%rsi), %zmm3, %zmm3 # zmm3 = zmm3[4,5,6,7],mem[4,5,6,7] vinserti64x4 $0x1, 0x180(%rsi), %zmm4, %zmm8 vshufi64x2 $0xee, 0x180(%rsi), %zmm4, %zmm4 # zmm4 = zmm4[4,5,6,7],mem[4,5,6,7] vinserti64x4 $0x1, 0x1c0(%rsi), %zmm5, %zmm9 vshufi64x2 $0xee, 0x1c0(%rsi), %zmm5, %zmm5 # zmm5 = zmm5[4,5,6,7],mem[4,5,6,7] vpunpckldq %zmm2, %zmm6, %zmm10 # zmm10 = zmm6[0],zmm2[0],zmm6[1],zmm2[1],zmm6[4],zmm2[4],zmm6[5],zmm2[5],zmm6[8],zmm2[8],zmm6[9],zmm2[9],zmm6[12],zmm2[12],zmm6[13],zmm2[13] vpunpckhdq %zmm2, %zmm6, %zmm2 # zmm2 = zmm6[2],zmm2[2],zmm6[3],zmm2[3],zmm6[6],zmm2[6],zmm6[7],zmm2[7],zmm6[10],zmm2[10],zmm6[11],zmm2[11],zmm6[14],zmm2[14],zmm6[15],zmm2[15] vpunpckldq %zmm3, %zmm7, %zmm6 # zmm6 = zmm7[0],zmm3[0],zmm7[1],zmm3[1],zmm7[4],zmm3[4],zmm7[5],zmm3[5],zmm7[8],zmm3[8],zmm7[9],zmm3[9],zmm7[12],zmm3[12],zmm7[13],zmm3[13] vpunpckhdq %zmm3, %zmm7, %zmm3 # zmm3 = zmm7[2],zmm3[2],zmm7[3],zmm3[3],zmm7[6],zmm3[6],zmm7[7],zmm3[7],zmm7[10],zmm3[10],zmm7[11],zmm3[11],zmm7[14],zmm3[14],zmm7[15],zmm3[15] vpunpckldq %zmm4, %zmm8, %zmm7 # zmm7 = zmm8[0],zmm4[0],zmm8[1],zmm4[1],zmm8[4],zmm4[4],zmm8[5],zmm4[5],zmm8[8],zmm4[8],zmm8[9],zmm4[9],zmm8[12],zmm4[12],zmm8[13],zmm4[13] vpunpckhdq %zmm4, %zmm8, %zmm4 # zmm4 = zmm8[2],zmm4[2],zmm8[3],zmm4[3],zmm8[6],zmm4[6],zmm8[7],zmm4[7],zmm8[10],zmm4[10],zmm8[11],zmm4[11],zmm8[14],zmm4[14],zmm8[15],zmm4[15] vpunpckldq %zmm5, %zmm9, %zmm8 # zmm8 = zmm9[0],zmm5[0],zmm9[1],zmm5[1],zmm9[4],zmm5[4],zmm9[5],zmm5[5],zmm9[8],zmm5[8],zmm9[9],zmm5[9],zmm9[12],zmm5[12],zmm9[13],zmm5[13] vpunpckhdq %zmm5, %zmm9, %zmm5 # zmm5 = zmm9[2],zmm5[2],zmm9[3],zmm5[3],zmm9[6],zmm5[6],zmm9[7],zmm5[7],zmm9[10],zmm5[10],zmm9[11],zmm5[11],zmm9[14],zmm5[14],zmm9[15],zmm5[15] vpunpcklqdq %zmm6, %zmm10, %zmm9 # zmm9 = zmm10[0],zmm6[0],zmm10[2],zmm6[2],zmm10[4],zmm6[4],zmm10[6],zmm6[6] vpunpckhqdq %zmm6, %zmm10, %zmm6 # zmm6 = zmm10[1],zmm6[1],zmm10[3],zmm6[3],zmm10[5],zmm6[5],zmm10[7],zmm6[7] vpunpcklqdq %zmm3, %zmm2, %zmm10 # zmm10 = zmm2[0],zmm3[0],zmm2[2],zmm3[2],zmm2[4],zmm3[4],zmm2[6],zmm3[6] vpunpckhqdq %zmm3, %zmm2, %zmm2 # zmm2 = zmm2[1],zmm3[1],zmm2[3],zmm3[3],zmm2[5],zmm3[5],zmm2[7],zmm3[7] vpunpcklqdq %zmm8, %zmm7, %zmm3 # zmm3 = zmm7[0],zmm8[0],zmm7[2],zmm8[2],zmm7[4],zmm8[4],zmm7[6],zmm8[6] vpunpckhqdq %zmm8, %zmm7, %zmm7 # zmm7 = zmm7[1],zmm8[1],zmm7[3],zmm8[3],zmm7[5],zmm8[5],zmm7[7],zmm8[7] vpunpcklqdq %zmm5, %zmm4, %zmm8 # zmm8 = zmm4[0],zmm5[0],zmm4[2],zmm5[2],zmm4[4],zmm5[4],zmm4[6],zmm5[6] vpunpckhqdq %zmm5, %zmm4, %zmm4 # zmm4 = zmm4[1],zmm5[1],zmm4[3],zmm5[3],zmm4[5],zmm5[5],zmm4[7],zmm5[7] vshufi64x2 $0x88, %zmm3, %zmm9, %zmm5 # zmm5 = zmm9[0,1,4,5],zmm3[0,1,4,5] vshufi64x2 $0x88, %zmm7, %zmm6, %zmm11 # zmm11 = zmm6[0,1,4,5],zmm7[0,1,4,5] vshufi64x2 $0x88, %zmm8, %zmm10, %zmm12 # zmm12 = zmm10[0,1,4,5],zmm8[0,1,4,5] vshufi64x2 $0x88, %zmm4, %zmm2, %zmm13 # zmm13 = zmm2[0,1,4,5],zmm4[0,1,4,5] vshufi64x2 $0xdd, %zmm3, %zmm9, %zmm3 # zmm3 = zmm9[2,3,6,7],zmm3[2,3,6,7] vshufi64x2 $0xdd, %zmm7, %zmm6, %zmm6 # zmm6 = zmm6[2,3,6,7],zmm7[2,3,6,7] vshufi64x2 $0xdd, %zmm8, %zmm10, %zmm7 # zmm7 = zmm10[2,3,6,7],zmm8[2,3,6,7] vshufi64x2 $0xdd, %zmm4, %zmm2, %zmm2 # zmm2 = zmm2[2,3,6,7],zmm4[2,3,6,7] vmovdqu64 %zmm5, (%r13) vmovdqu64 %zmm11, 0x40(%r13) vmovdqu64 %zmm12, 0x80(%r13) vmovdqu64 %zmm13, 0xc0(%r13) vmovdqu64 %zmm3, 0x100(%r13) vmovdqu64 %zmm6, 0x140(%r13) vmovdqu64 %zmm7, 0x180(%r13) vmovdqu64 %zmm2, 0x1c0(%r13) addq $0x200, %r13 # imm = 0x200 addq %r9, %rsi addl $0x10, %edi cmpl %r8d, %edi jl 0x13dbb3 movl -0x54(%rsp), %edx jmp 0x13dd00 xorl %edx, %edx leaq (%rsi,%rbx,2), %rsi movl %ecx, %r12d andl $0x1fffffff, %r12d # imm = 0x1FFFFFFF shlq $0x4, %r12 movq %rbp, -0x30(%rsp) leal (,%rbp,8), %ebp movl %edx, %edi orl $0x7, %edi cmpl %r8d, %edi jge 0x13dde8 movq %r12, %rdi subq %r14, %rdi addq %rdi, %rsi movl %edx, %ebx movq -0x60(%rsp), %r14 vmovdqu64 (%rsi), %zmm2 vmovdqu64 0x40(%rsi), %zmm3 vmovdqu64 0x80(%rsi), %zmm4 vmovdqu64 0xc0(%rsi), %zmm5 vshufi64x2 $0x88, %zmm3, %zmm2, %zmm6 # zmm6 = zmm2[0,1,4,5],zmm3[0,1,4,5] vshufi64x2 $0xdd, %zmm3, %zmm2, %zmm2 # zmm2 = zmm2[2,3,6,7],zmm3[2,3,6,7] vshufi64x2 $0x88, %zmm5, %zmm4, %zmm3 # zmm3 = zmm4[0,1,4,5],zmm5[0,1,4,5] vshufi64x2 $0xdd, %zmm5, %zmm4, %zmm4 # zmm4 = zmm4[2,3,6,7],zmm5[2,3,6,7] vmovdqa64 %zmm6, %zmm5 vpermt2d %zmm2, %zmm0, %zmm5 vmovdqa64 %zmm3, %zmm7 vpermt2d %zmm4, %zmm0, %zmm7 vshufi64x2 $0x88, %zmm7, %zmm5, %zmm8 # zmm8 = zmm5[0,1,4,5],zmm7[0,1,4,5] vshufi64x2 $0xdd, %zmm7, %zmm5, %zmm5 # zmm5 = zmm5[2,3,6,7],zmm7[2,3,6,7] vpermt2d %zmm2, %zmm1, %zmm6 vpermt2d %zmm4, %zmm1, %zmm3 vshufi64x2 $0x88, %zmm3, %zmm6, %zmm2 # zmm2 = zmm6[0,1,4,5],zmm3[0,1,4,5] vshufi64x2 $0xdd, %zmm3, %zmm6, %zmm3 # zmm3 = zmm6[2,3,6,7],zmm3[2,3,6,7] vmovdqu64 %zmm8, (%r13) vmovdqu64 %zmm5, 0x40(%r13) vmovdqu64 %zmm2, 0x80(%r13) vmovdqu64 %zmm3, 0xc0(%r13) addq $0x100, %r13 # imm = 0x100 leal 0x8(%rbx), %edx addq %r10, %rsi addl $0xf, %ebx cmpl %r8d, %ebx movl %edx, %ebx jl 0x13dd38 jmp 0x13ddf7 addq %rbx, %rbx subq %rbx, %rsi leaq (%rsi,%rbp,2), %rsi movq -0x60(%rsp), %r14 movl %edx, %edi orl $0x1, %edi cmpl %r8d, %edi jge 0x13de2f movq %r14, %rdi subq %r12, %rdi addq %rdi, %rsi movl %edx, %ebx vmovdqu64 (%rsi), %zmm2 vmovdqu64 %zmm2, (%r13) addq $0x40, %r13 leal 0x2(%rbx), %edx addq %r11, %rsi addl $0x3, %ebx cmpl %r8d, %ebx movl %edx, %ebx jl 0x13de0c jmp 0x13de3e addq %rbp, %rbp subq %rbp, %rsi movq -0x30(%rsp), %rdi leaq (%rsi,%rdi,4), %rsi movl %r8d, %ebx subl %edx, %ebx jle 0x13de64 movq -0x50(%rsp), %rdx addq %rdx, %rsi vmovdqu (%rsi), %ymm2 vmovdqa %ymm2, (%r13) addq $0x20, %r13 addq %rax, %rsi decl %ebx jne 0x13de4d jmp 0x13de69 movq -0x50(%rsp), %rdx leaq 0x10(%r15), %rbx addq $0x1f, %r15 addl $0x10, %ecx addq $0x40, %r14 addq $-0x20, %rdx cmpq -0x48(%rsp), %r15 movq %rbx, %r15 jl 0x13db73 movl -0x58(%rsp), %ecx jmp 0x13de92 xorl %ebx, %ebx movl %ebx, %edx orl $0x7, %edx cmpl %ecx, %edx jge 0x13e0fd movq -0x38(%rsp), %rcx imulq -0x48(%rsp), %rcx movq %rcx, -0x50(%rsp) movl %ebx, %ebp movq -0x40(%rsp), %rcx leaq (%rcx,%rbp), %rdx movq %rdx, %r15 shlq $0x5, %r15 movq %rdx, %r12 shlq $0x4, %r12 negq %r12 leaq (,%rdx,4), %rcx leaq (%rcx,%rcx,2), %rsi negq %rsi addq %rdx, %rdx negq %rdx movq -0x28(%rsp), %rcx movq (%rcx), %rbx movq -0x50(%rsp), %rcx addq %rbp, %rcx cmpl $0x10, %r8d movq %rcx, -0x60(%rsp) jl 0x13dfb8 addq %r15, %rbx movl $0xf, %edi vmovdqu64 (%rbx), %zmm2 vmovdqu64 0x40(%rbx), %zmm3 vinserti64x4 $0x1, 0x80(%rbx), %zmm2, %zmm4 vshufi64x2 $0xee, 0x80(%rbx), %zmm2, %zmm2 # zmm2 = zmm2[4,5,6,7],mem[4,5,6,7] vinserti64x4 $0x1, 0xc0(%rbx), %zmm3, %zmm5 vshufi64x2 $0xee, 0xc0(%rbx), %zmm3, %zmm3 # zmm3 = zmm3[4,5,6,7],mem[4,5,6,7] vpunpckldq %zmm2, %zmm4, %zmm6 # zmm6 = zmm4[0],zmm2[0],zmm4[1],zmm2[1],zmm4[4],zmm2[4],zmm4[5],zmm2[5],zmm4[8],zmm2[8],zmm4[9],zmm2[9],zmm4[12],zmm2[12],zmm4[13],zmm2[13] vpunpckhdq %zmm2, %zmm4, %zmm2 # zmm2 = zmm4[2],zmm2[2],zmm4[3],zmm2[3],zmm4[6],zmm2[6],zmm4[7],zmm2[7],zmm4[10],zmm2[10],zmm4[11],zmm2[11],zmm4[14],zmm2[14],zmm4[15],zmm2[15] vpunpckldq %zmm3, %zmm5, %zmm4 # zmm4 = zmm5[0],zmm3[0],zmm5[1],zmm3[1],zmm5[4],zmm3[4],zmm5[5],zmm3[5],zmm5[8],zmm3[8],zmm5[9],zmm3[9],zmm5[12],zmm3[12],zmm5[13],zmm3[13] vpunpckhdq %zmm3, %zmm5, %zmm3 # zmm3 = zmm5[2],zmm3[2],zmm5[3],zmm3[3],zmm5[6],zmm3[6],zmm5[7],zmm3[7],zmm5[10],zmm3[10],zmm5[11],zmm3[11],zmm5[14],zmm3[14],zmm5[15],zmm3[15] vpunpcklqdq %zmm4, %zmm6, %zmm5 # zmm5 = zmm6[0],zmm4[0],zmm6[2],zmm4[2],zmm6[4],zmm4[4],zmm6[6],zmm4[6] vpunpckhqdq %zmm4, %zmm6, %zmm4 # zmm4 = zmm6[1],zmm4[1],zmm6[3],zmm4[3],zmm6[5],zmm4[5],zmm6[7],zmm4[7] vpunpcklqdq %zmm3, %zmm2, %zmm6 # zmm6 = zmm2[0],zmm3[0],zmm2[2],zmm3[2],zmm2[4],zmm3[4],zmm2[6],zmm3[6] vpunpckhqdq %zmm3, %zmm2, %zmm2 # zmm2 = zmm2[1],zmm3[1],zmm2[3],zmm3[3],zmm2[5],zmm3[5],zmm2[7],zmm3[7] vshufi64x2 $0x88, %zmm4, %zmm5, %zmm3 # zmm3 = zmm5[0,1,4,5],zmm4[0,1,4,5] vshufi64x2 $0x88, %zmm2, %zmm6, %zmm7 # zmm7 = zmm6[0,1,4,5],zmm2[0,1,4,5] vshufi64x2 $0xdd, %zmm4, %zmm5, %zmm4 # zmm4 = zmm5[2,3,6,7],zmm4[2,3,6,7] vshufi64x2 $0xdd, %zmm2, %zmm6, %zmm2 # zmm2 = zmm6[2,3,6,7],zmm2[2,3,6,7] vmovdqu64 %zmm3, (%r13) vmovdqu64 %zmm7, 0x40(%r13) vmovdqu64 %zmm4, 0x80(%r13) vmovdqu64 %zmm2, 0xc0(%r13) addq $0x100, %r13 # imm = 0x100 addq %r9, %rbx addl $0x10, %edi cmpl %r8d, %edi jl 0x13df07 movl -0x54(%rsp), %edi jmp 0x13dfc1 shlq $0x5, %rcx addq %rcx, %rbx xorl %edi, %edi movl %edi, %ecx orl $0x7, %ecx cmpl %r8d, %ecx jge 0x13e049 addq %r12, %rbx movl %edi, %ecx vmovupd (%rbx), %ymm2 vmovupd 0x20(%rbx), %ymm3 vmovupd 0x40(%rbx), %ymm4 vmovupd 0x60(%rbx), %ymm5 vperm2f128 $0x20, %ymm4, %ymm2, %ymm6 # ymm6 = ymm2[0,1],ymm4[0,1] vperm2f128 $0x31, %ymm4, %ymm2, %ymm2 # ymm2 = ymm2[2,3],ymm4[2,3] vperm2f128 $0x20, %ymm5, %ymm3, %ymm4 # ymm4 = ymm3[0,1],ymm5[0,1] vperm2f128 $0x31, %ymm5, %ymm3, %ymm3 # ymm3 = ymm3[2,3],ymm5[2,3] vunpcklps %ymm2, %ymm6, %ymm5 # ymm5 = ymm6[0],ymm2[0],ymm6[1],ymm2[1],ymm6[4],ymm2[4],ymm6[5],ymm2[5] vunpckhps %ymm2, %ymm6, %ymm2 # ymm2 = ymm6[2],ymm2[2],ymm6[3],ymm2[3],ymm6[6],ymm2[6],ymm6[7],ymm2[7] vunpcklps %ymm3, %ymm4, %ymm6 # ymm6 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[4],ymm3[4],ymm4[5],ymm3[5] vunpckhps %ymm3, %ymm4, %ymm3 # ymm3 = ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[6],ymm3[6],ymm4[7],ymm3[7] vunpcklpd %ymm6, %ymm5, %ymm4 # ymm4 = ymm5[0],ymm6[0],ymm5[2],ymm6[2] vunpckhpd %ymm6, %ymm5, %ymm5 # ymm5 = ymm5[1],ymm6[1],ymm5[3],ymm6[3] vunpcklpd %ymm3, %ymm2, %ymm6 # ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] vunpckhpd %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] vmovupd %ymm4, (%r13) vmovupd %ymm5, 0x20(%r13) vmovupd %ymm6, 0x40(%r13) vmovupd %ymm2, 0x60(%r13) subq $-0x80, %r13 leal 0x8(%rcx), %edi addq %r10, %rbx addl $0xf, %ecx cmpl %r8d, %ecx movl %edi, %ecx jl 0x13dfd0 jmp 0x13e06b movq -0x60(%rsp), %rcx shlq $0x4, %rcx movq %rdx, -0x30(%rsp) movq %r12, %rdx subq %rcx, %rbx subq %rcx, %rbx movq %rdx, %r12 movq -0x30(%rsp), %rdx addq %rcx, %rbx movl %edi, %ecx orl $0x1, %ecx cmpl %r8d, %ecx jge 0x13e09a addq %rsi, %rbx movl %edi, %ecx vmovupd (%rbx), %ymm2 vmovupd %ymm2, (%r13) addq $0x20, %r13 leal 0x2(%rcx), %edi addq %r11, %rbx addl $0x3, %ecx cmpl %r8d, %ecx movl %edi, %ecx jl 0x13e07a jmp 0x13e0b1 movq -0x60(%rsp), %r14 leaq (,%r14,8), %rcx addq %rcx, %rcx subq %rcx, %rbx leaq (%rbx,%r14,4), %rbx movl %r8d, %ecx subl %edi, %ecx jle 0x13e0d0 addq %rdx, %rbx vmovupd (%rbx), %xmm2 vmovapd %xmm2, (%r13) addq $0x10, %r13 addq %rax, %rbx decl %ecx jne 0x13e0bb leaq 0x8(%rbp), %rbx addq $0xf, %rbp addq $0x100, %r15 # imm = 0x100 addq $-0x80, %r12 addq $-0x60, %rsi addq $-0x10, %rdx cmpq -0x48(%rsp), %rbp movq %rbx, %rbp jl 0x13dee0 movl -0x58(%rsp), %ecx movl %ebx, %edx orl $0x3, %edx cmpl %ecx, %edx jge 0x13e31b movq -0x38(%rsp), %rcx imulq -0x48(%rsp), %rcx movq %rcx, -0x50(%rsp) movl %ebx, %r15d movq -0x40(%rsp), %rcx addq %r15, %rcx movq %rcx, %rdx shlq $0x5, %rdx movq %rcx, %rsi shlq $0x4, %rsi negq %rsi leaq (,%rcx,4), %rdi leaq (%rdi,%rdi,2), %r12 negq %r12 addq %rcx, %rcx movl $0x6, %ebp subq %rcx, %rbp movq -0x28(%rsp), %rcx movq (%rcx), %rbx movq -0x50(%rsp), %rcx addq %r15, %rcx cmpl $0x10, %r8d movq %rcx, -0x60(%rsp) jl 0x13e1d8 addq %rdx, %rbx movl $0xf, %ecx vmovdqu64 (%rbx), %zmm2 vinserti64x4 $0x1, 0x40(%rbx), %zmm2, %zmm3 vshufi64x2 $0xee, 0x40(%rbx), %zmm2, %zmm2 # zmm2 = zmm2[4,5,6,7],mem[4,5,6,7] vmovdqa64 %zmm3, %zmm4 vpermt2d %zmm2, %zmm0, %zmm4 vpermt2d %zmm2, %zmm1, %zmm3 vinserti64x4 $0x1, %ymm3, %zmm4, %zmm2 vshufi64x2 $0xee, %zmm3, %zmm4, %zmm3 # zmm3 = zmm4[4,5,6,7],zmm3[4,5,6,7] vpunpcklqdq %zmm3, %zmm2, %zmm4 # zmm4 = zmm2[0],zmm3[0],zmm2[2],zmm3[2],zmm2[4],zmm3[4],zmm2[6],zmm3[6] vpunpckhqdq %zmm3, %zmm2, %zmm2 # zmm2 = zmm2[1],zmm3[1],zmm2[3],zmm3[3],zmm2[5],zmm3[5],zmm2[7],zmm3[7] vmovdqu64 %zmm4, (%r13) vmovdqu64 %zmm2, 0x40(%r13) subq $-0x80, %r13 addq %r9, %rbx addl $0x10, %ecx cmpl %r8d, %ecx jl 0x13e173 movl -0x54(%rsp), %edi jmp 0x13e1e1 shlq $0x5, %rcx addq %rcx, %rbx xorl %edi, %edi movl %edi, %ecx orl $0x7, %ecx cmpl %r8d, %ecx jge 0x13e251 addq %rsi, %rbx movl %edi, %ecx vmovapd (%rbx), %xmm2 vmovapd 0x10(%rbx), %xmm3 vmovapd 0x20(%rbx), %xmm4 vmovapd 0x30(%rbx), %xmm5 vunpcklps %xmm3, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] vunpckhps %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] vunpcklps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] vunpckhps %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3] vunpcklpd %xmm3, %xmm6, %xmm5 # xmm5 = xmm6[0],xmm3[0] vunpckhpd %xmm3, %xmm6, %xmm3 # xmm3 = xmm6[1],xmm3[1] vunpcklpd %xmm4, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm4[0] vunpckhpd %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[1],xmm4[1] vmovupd %xmm5, (%r13) vmovupd %xmm3, 0x10(%r13) vmovupd %xmm6, 0x20(%r13) vmovupd %xmm2, 0x30(%r13) addq $0x40, %r13 leal 0x8(%rcx), %edi addq %r10, %rbx addl $0xf, %ecx cmpl %r8d, %ecx movl %edi, %ecx jl 0x13e1f0 jmp 0x13e273 movq -0x60(%rsp), %rcx shlq $0x4, %rcx movq %rbp, -0x30(%rsp) movq %rsi, %rbp subq %rcx, %rbx subq %rcx, %rbx movq %rbp, %rsi movq -0x30(%rsp), %rbp addq %rcx, %rbx movl %edi, %ecx orl $0x1, %ecx cmpl %r8d, %ecx jge 0x13e2a2 addq %r12, %rbx movl %edi, %ecx vmovupd (%rbx), %xmm2 vmovupd %xmm2, (%r13) addq $0x10, %r13 leal 0x2(%rcx), %edi addq %r11, %rbx addl $0x3, %ecx cmpl %r8d, %ecx movl %edi, %ecx jl 0x13e282 jmp 0x13e2b9 movq -0x60(%rsp), %r14 leaq (,%r14,8), %rcx addq %rcx, %rcx subq %rcx, %rbx leaq (%rbx,%r14,4), %rbx movl %r8d, %ecx subl %edi, %ecx jle 0x13e2f1 addq %rbp, %rbx movzwl -0x6(%rbx), %edi movw %di, (%r13) movzwl -0x4(%rbx), %edi movw %di, 0x2(%r13) movzwl -0x2(%rbx), %edi movw %di, 0x4(%r13) movzwl (%rbx), %edi movw %di, 0x6(%r13) addq $0x8, %r13 addq %rax, %rbx decl %ecx jne 0x13e2c3 leaq 0x4(%r15), %rbx addq $0x7, %r15 subq $-0x80, %rdx addq $-0x40, %rsi addq $-0x30, %r12 addq $-0x8, %rbp cmpq -0x48(%rsp), %r15 movq %rbx, %r15 jl 0x13e150 movl -0x58(%rsp), %ecx movl %ebx, %edx orl $0x1, %edx cmpl %ecx, %edx jge 0x13e4e3 movq -0x38(%rsp), %rcx imulq -0x48(%rsp), %rcx movq %rcx, -0x50(%rsp) movslq %ebx, %r15 movq -0x40(%rsp), %rcx addq %r15, %rcx movq %rcx, %rdx shlq $0x5, %rdx movq %rcx, %rsi shlq $0x4, %rsi negq %rsi leaq (,%rcx,4), %rdi leaq (%rdi,%rdi,2), %r12 negq %r12 addq %rcx, %rcx movl $0x2, %ebp subq %rcx, %rbp movq -0x28(%rsp), %rcx movq (%rcx), %rbx movq -0x50(%rsp), %rcx leaq (%r15,%rcx), %r14 cmpl $0x10, %r8d jl 0x13e3cb addq %rdx, %rbx movl $0xf, %edi vmovdqa (%rbx), %ymm2 vmovdqa 0x20(%rbx), %ymm3 vpunpckldq %ymm3, %ymm2, %ymm4 # ymm4 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5] vpunpckhdq %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7] vinserti128 $0x1, %xmm2, %ymm4, %ymm3 vperm2i128 $0x31, %ymm2, %ymm4, %ymm2 # ymm2 = ymm4[2,3],ymm2[2,3] vmovdqu %ymm3, (%r13) vmovdqu %ymm2, 0x20(%r13) addq $0x40, %r13 addl $0x10, %edi addq %r9, %rbx cmpl %r8d, %edi jl 0x13e38d movl -0x54(%rsp), %edi jmp 0x13e3d7 movq %r14, %rcx shlq $0x5, %rcx addq %rcx, %rbx xorl %edi, %edi movl %edi, %ecx orl $0x7, %ecx cmpl %r8d, %ecx movq %r14, -0x60(%rsp) jge 0x13e41e addq %rsi, %rbx movl %edi, %ecx vmovapd (%rbx), %xmm2 vmovapd 0x10(%rbx), %xmm3 vunpcklps %xmm3, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] vunpckhps %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] vmovupd %xmm4, (%r13) vmovupd %xmm2, 0x10(%r13) addq $0x20, %r13 leal 0x8(%rcx), %edi addl $0xf, %ecx addq %r10, %rbx cmpl %r8d, %ecx movl %edi, %ecx jl 0x13e3eb jmp 0x13e42e movq %r14, %rcx shlq $0x4, %rcx subq %rcx, %rbx subq %rcx, %rbx addq %rcx, %rbx movl %edi, %ecx orl $0x1, %ecx cmpl %r8d, %ecx jge 0x13e47a addq %r12, %rbx movl %edi, %r14d movzwl (%rbx), %ecx movw %cx, (%r13) movzwl 0x2(%rbx), %ecx movw %cx, 0x2(%r13) movzwl 0x4(%rbx), %ecx movw %cx, 0x4(%r13) movzwl 0x6(%rbx), %ecx movw %cx, 0x6(%r13) addq $0x8, %r13 leal 0x2(%r14), %edi addl $0x3, %r14d addq %r11, %rbx cmpl %r8d, %r14d movl %edi, %r14d jl 0x13e43e jmp 0x13e491 movq -0x60(%rsp), %r14 leaq (,%r14,8), %rcx addq %rcx, %rcx subq %rcx, %rbx leaq (%rbx,%r14,4), %rbx movl %r8d, %r14d subl %edi, %r14d jle 0x13e4b9 addq %rbp, %rbx movzwl -0x2(%rbx), %ecx movw %cx, (%r13) movzwl (%rbx), %ecx movw %cx, 0x2(%r13) addq $0x4, %r13 addq %rax, %rbx decl %r14d jne 0x13e49c leaq 0x2(%r15), %rbx addq $0x3, %r15 addq $0x40, %rdx addq $-0x20, %rsi addq $-0x18, %r12 addq $-0x4, %rbp cmpq -0x48(%rsp), %r15 movq %rbx, %r15 jl 0x13e36e movl -0x58(%rsp), %ecx cmpl %ecx, %ebx jge 0x13e649 movq -0x38(%rsp), %rcx imulq -0x48(%rsp), %rcx movq %rcx, -0x50(%rsp) movslq %ebx, %rdx movq -0x40(%rsp), %rcx leaq (%rcx,%rdx), %rsi movq %rsi, %r15 shlq $0x5, %r15 movq %rsi, %r12 shlq $0x4, %r12 negq %r12 leaq (,%rsi,4), %rcx leaq (%rcx,%rcx,2), %rbp negq %rbp addq %rsi, %rsi negq %rsi movq -0x28(%rsp), %rcx movq (%rcx), %rbx movq -0x50(%rsp), %rcx leaq (%rdx,%rcx), %r14 cmpl $0x10, %r8d jl 0x13e56b addq %r15, %rbx movl $0xf, %edi vmovapd (%rbx), %ymm2 vmovupd %ymm2, (%r13) addq $0x20, %r13 addl $0x10, %edi addq %r9, %rbx cmpl %r8d, %edi jl 0x13e54c movl -0x54(%rsp), %edi jmp 0x13e577 movq %r14, %rcx shlq $0x5, %rcx addq %rcx, %rbx xorl %edi, %edi movl %edi, %ecx orl $0x7, %ecx cmpl %r8d, %ecx movq %r14, -0x60(%rsp) jge 0x13e5ab addq %r12, %rbx movl %edi, %ecx vmovapd (%rbx), %xmm2 vmovupd %xmm2, (%r13) addq $0x10, %r13 leal 0x8(%rcx), %edi addl $0xf, %ecx addq %r10, %rbx cmpl %r8d, %ecx movl %edi, %ecx jl 0x13e58b jmp 0x13e5bb movq %r14, %rcx shlq $0x4, %rcx subq %rcx, %rbx subq %rcx, %rbx addq %rcx, %rbx movl %edi, %ecx orl $0x1, %ecx cmpl %r8d, %ecx jge 0x13e5f5 addq %rbp, %rbx movl %edi, %r14d movzwl (%rbx), %ecx movw %cx, (%r13) movzwl 0x2(%rbx), %ecx movw %cx, 0x2(%r13) addq $0x4, %r13 leal 0x2(%r14), %edi addl $0x3, %r14d addq %r11, %rbx cmpl %r8d, %r14d movl %edi, %r14d jl 0x13e5cb jmp 0x13e60c movq -0x60(%rsp), %r14 leaq (,%r14,8), %rcx addq %rcx, %rcx subq %rcx, %rbx leaq (%rbx,%r14,4), %rbx movl %r8d, %r14d subl %edi, %r14d jle 0x13e62b addq %rsi, %rbx movzwl (%rbx), %ecx movw %cx, (%r13) addq $0x2, %r13 addq %rax, %rbx decl %r14d jne 0x13e617 incq %rdx addq $0x20, %r15 addq $-0x10, %r12 addq $-0xc, %rbp addq $-0x2, %rsi cmpq -0x48(%rsp), %rdx jne 0x13e52d movq -0x38(%rsp), %rdi incq %rdi movl -0x58(%rsp), %ecx movl -0x1c(%rsp), %esi addl %ecx, %esi movq -0x40(%rsp), %rdx addq -0x48(%rsp), %rdx movq %rdx, -0x40(%rsp) cmpq -0x18(%rsp), %rdi jne 0x13db31 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq
/Tencent[P]ncnn/src/layer/x86/convolution_3x3_winograd_int8.h
ncnn::get_optimal_tile_mnk_int8(int, int, int, int&, int&, int&, int)
static void get_optimal_tile_mnk_int8(int M, int N, int K, int& TILE_M, int& TILE_N, int& TILE_K, int nT) { // resolve optimal tile size from cache size const size_t l2_cache_size_int8 = (int)(get_cpu_level2_cache_size() / sizeof(short)); if (nT == 0) nT = get_physical_big_cpu_count(); // solve M { int tile_size = (int)sqrt((float)l2_cache_size_int8 / 3); #if __AVX512F__ TILE_M = std::max(16, tile_size / 16 * 16); #elif __AVX2__ TILE_M = std::max(8, tile_size / 8 * 8); #elif __SSE2__ TILE_M = std::max(4, tile_size / 4 * 4); #else TILE_M = std::max(2, tile_size / 2 * 2); #endif TILE_M *= std::min(nT, get_physical_cpu_count()); int nn_M = (M + TILE_M - 1) / TILE_M; #if __AVX512F__ TILE_M = std::min(TILE_M, ((M + nn_M - 1) / nn_M + 15) / 16 * 16); #elif __AVX2__ TILE_M = std::min(TILE_M, ((M + nn_M - 1) / nn_M + 7) / 8 * 8); #elif __SSE2__ TILE_M = std::min(TILE_M, ((M + nn_M - 1) / nn_M + 3) / 4 * 4); #else TILE_M = std::min(TILE_M, ((M + nn_M - 1) / nn_M + 1) / 2 * 2); #endif if (nT > 1) { #if __AVX512F__ TILE_M = std::min(TILE_M, (std::max(1, TILE_M / nT) + 15) / 16 * 16); #elif __AVX2__ TILE_M = std::min(TILE_M, (std::max(1, TILE_M / nT) + 7) / 8 * 8); #elif __SSE2__ TILE_M = std::min(TILE_M, (std::max(1, TILE_M / nT) + 3) / 4 * 4); #else TILE_M = std::min(TILE_M, (std::max(1, TILE_M / nT) + 1) / 2 * 2); #endif } } // solve K { int tile_size = (int)(sqrt((float)l2_cache_size_int8) - TILE_M); #if __AVX512F__ TILE_K = std::max(16, tile_size / 16 * 16); #elif __AVX2__ TILE_K = std::max(8, tile_size / 8 * 8); #elif __SSE2__ TILE_K = std::max(4, tile_size / 4 * 4); #else TILE_K = std::max(2, tile_size / 2 * 2); #endif int nn_K = (K + TILE_K - 1) / TILE_K; #if __AVX512F__ TILE_K = std::min(TILE_K, ((K + nn_K - 1) / nn_K + 15) / 16 * 16); #elif __AVX2__ TILE_K = std::min(TILE_K, ((K + nn_K - 1) / nn_K + 7) / 8 * 8); #elif __SSE2__ TILE_K = std::min(TILE_K, ((K + nn_K - 1) / nn_K + 3) / 4 * 4); #else TILE_K = std::min(TILE_K, ((K + nn_K - 1) / nn_K + 1) / 2 * 2); #endif } if (N > 0) { int tile_size = (int)((l2_cache_size_int8 - TILE_M * TILE_K) / (TILE_M * 2 + TILE_K)); #if __SSE2__ TILE_N = std::max(4, tile_size / 4 * 4); #else TILE_N = std::max(1, tile_size); #endif int nn_N = (N + TILE_N - 1) / TILE_N; #if __SSE2__ TILE_N = std::min(TILE_N, ((N + nn_N - 1) / nn_N + 3) / 4 * 4); #else TILE_N = std::min(TILE_N, (N + nn_N - 1) / nn_N); #endif } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %r9, 0x20(%rsp) movq %r8, 0x18(%rsp) movq %rcx, %r12 movl %edx, %ebp movq %rsi, 0x10(%rsp) movl %edi, %r13d movl 0x60(%rsp), %ebx callq 0x3cb1f movl %eax, %r14d sarl %r14d movslq %r14d, %r15 testl %ebx, %ebx jne 0x14dc15 callq 0x3cb03 movl %eax, %ebx testq %r15, %r15 js 0x14dc21 vcvtsi2ss %r14d, %xmm0, %xmm0 jmp 0x14dc37 movq %r15, %rax shrq %rax andl $0x1, %r14d orq %rax, %r14 vcvtsi2ss %r14, %xmm0, %xmm0 vaddss %xmm0, %xmm0, %xmm0 vmovss %xmm0, 0xc(%rsp) vmulss 0x359d67(%rip), %xmm0, %xmm0 # 0x4a79ac vsqrtss %xmm0, %xmm0, %xmm0 vcvttss2si %xmm0, %eax leal 0x7(%rax), %ecx testl %eax, %eax cmovnsl %eax, %ecx andl $-0x8, %ecx cmpl $0x9, %ecx movl $0x8, %r14d cmovll %r14d, %ecx movl %ecx, (%r12) callq 0x3cad3 movl %eax, %esi cmpl %ebx, %eax cmovgel %ebx, %esi imull (%r12), %esi leal (%rsi,%r13), %eax decl %eax cltd idivl %esi movl %eax, %ecx leal (%rcx,%r13), %eax decl %eax cltd idivl %ecx movl %eax, %ecx leal 0x7(%rcx), %eax addl $0xe, %ecx testl %eax, %eax cmovnsl %eax, %ecx andl $-0x8, %ecx cmpl %esi, %ecx cmovgel %esi, %ecx cmpl $0x2, %ebx jl 0x14dcc8 movl %ecx, %eax cltd idivl %ebx cmpl $0x2, %eax movl $0x1, %edx cmovgel %eax, %edx addl $0x7, %edx andl $0x7ffffff8, %edx # imm = 0x7FFFFFF8 cmpl %ecx, %edx cmovgel %ecx, %edx movl %edx, %ecx movq 0x10(%rsp), %rdi movl %ecx, (%r12) vsqrtss 0xc(%rsp), %xmm1, %xmm0 vcvtsi2ss %ecx, %xmm1, %xmm1 vsubss %xmm1, %xmm0, %xmm0 vcvttss2si %xmm0, %eax leal 0x7(%rax), %ecx testl %eax, %eax cmovnsl %eax, %ecx andl $-0x8, %ecx cmpl $0x9, %ecx cmovgel %ecx, %r14d leal (%r14,%rbp), %eax decl %eax cltd idivl %r14d movl %eax, %ecx leal (%rcx,%rbp), %eax decl %eax cltd idivl %ecx leal 0x7(%rax), %ecx addl $0xe, %eax testl %ecx, %ecx cmovnsl %ecx, %eax andl $-0x8, %eax cmpl %r14d, %eax cmovgel %r14d, %eax movq 0x20(%rsp), %rcx movl %eax, (%rcx) testl %edi, %edi jle 0x14dd80 movl (%r12), %ecx leal (%rax,%rcx,2), %edx imull %eax, %ecx movslq %ecx, %rax subq %rax, %r15 movslq %edx, %rcx movq %r15, %rax xorl %edx, %edx divq %rcx leal 0x3(%rax), %ecx testl %eax, %eax cmovnsl %eax, %ecx andl $-0x4, %ecx cmpl $0x5, %ecx movl $0x4, %esi cmovgel %ecx, %esi leal (%rdi,%rsi), %eax decl %eax xorl %edx, %edx divl %esi movl %eax, %ecx leal (%rdi,%rcx), %eax decl %eax xorl %edx, %edx divl %ecx addl $0x3, %eax andl $-0x4, %eax cmpl %esi, %eax cmovael %esi, %eax movq 0x18(%rsp), %rcx movl %eax, (%rcx) addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/Tencent[P]ncnn/src/layer/x86/convolution_3x3_winograd_int8.h
ncnn::Reduction::load_param(ncnn::ParamDict const&)
int Reduction::load_param(const ParamDict& pd) { operation = pd.get(0, 0); reduce_all = pd.get(1, 1); coeff = pd.get(2, 1.f); axes = pd.get(3, Mat()); keepdims = pd.get(4, 0); // the original reduction handle axes as blob with batch dimension // ask user to regenerate param instead of producing wrong result int fixbug0 = pd.get(5, 0); if (fixbug0 == 0 && !axes.empty()) { NCNN_LOGE("param is too old, please regenerate!"); return -1; } return 0; }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx subq $0x98, %rsp movq %rsi, %r14 movq %rdi, %rbx movq %rsi, %rdi xorl %esi, %esi xorl %edx, %edx callq 0x3a260 movl %eax, 0xd0(%rbx) movq %r14, %rdi movl $0x1, %esi movl $0x1, %edx callq 0x3a260 movl %eax, 0xd4(%rbx) movss 0x27fe15(%rip), %xmm0 # 0x4a50d0 movq %r14, %rdi movl $0x2, %esi callq 0x3a27a movss %xmm0, 0xd8(%rbx) leaq 0x50(%rsp), %rcx movq $0x0, 0x40(%rcx) xorps %xmm0, %xmm0 movaps %xmm0, (%rcx) movups %xmm0, 0xc(%rcx) movaps %xmm0, 0x20(%rcx) movups %xmm0, 0x2c(%rcx) movq %rsp, %r15 movq %r15, %rdi movq %r14, %rsi movl $0x3, %edx callq 0x3a294 leaq 0xe0(%rbx), %rcx movq 0x8(%rsp), %rax cmpq %r15, %rcx je 0x2253b3 testq %rax, %rax je 0x22531f lock incl (%rax) movq 0xe8(%rbx), %rax testq %rax, %rax je 0x225358 lock decl (%rax) jne 0x225358 movq 0xe0(%rbx), %rsi movq 0x100(%rbx), %rdi testq %rdi, %rdi je 0x22534b movq (%rdi), %rax callq *0x18(%rax) jmp 0x225358 testq %rsi, %rsi je 0x225358 movq %rsi, %rdi callq 0x244a0 movq (%rsp), %rax movq %rax, 0xe0(%rbx) movq 0x8(%rsp), %rax movq %rax, 0xe8(%rbx) movq 0x10(%rsp), %rcx movq %rcx, 0xf0(%rbx) movl 0x18(%rsp), %ecx movl %ecx, 0xf8(%rbx) movq 0x20(%rsp), %rcx movq %rcx, 0x100(%rbx) movups 0x28(%rsp), %xmm0 movups %xmm0, 0x108(%rbx) movl 0x38(%rsp), %ecx movl %ecx, 0x118(%rbx) movq 0x40(%rsp), %rcx movq %rcx, 0x120(%rbx) testq %rax, %rax je 0x2253e0 lock decl (%rax) jne 0x2253e0 movq (%rsp), %rsi movq 0x20(%rsp), %rdi testq %rdi, %rdi je 0x2253d3 movq (%rdi), %rax callq *0x18(%rax) jmp 0x2253e0 testq %rsi, %rsi je 0x2253e0 movq %rsi, %rdi callq 0x244a0 movq $0x0, 0x40(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, (%rsp) movups %xmm0, 0xc(%rsp) movups %xmm0, 0x28(%rsp) movl $0x0, 0x38(%rsp) movq 0x58(%rsp), %rax testq %rax, %rax je 0x225435 lock decl (%rax) jne 0x225435 movq 0x50(%rsp), %rsi movq 0x70(%rsp), %rdi testq %rdi, %rdi je 0x225428 movq (%rdi), %rax callq *0x18(%rax) jmp 0x225435 testq %rsi, %rsi je 0x225435 movq %rsi, %rdi callq 0x244a0 xorl %ebp, %ebp movq %r14, %rdi movl $0x4, %esi xorl %edx, %edx callq 0x3a260 movl %eax, 0x128(%rbx) movq %r14, %rdi movl $0x5, %esi xorl %edx, %edx callq 0x3a260 testl %eax, %eax jne 0x22547d cmpq $0x0, 0xe0(%rbx) je 0x22547d movslq 0x118(%rbx), %rax imulq 0x120(%rbx), %rax testq %rax, %rax jne 0x22548d movl %ebp, %eax addq $0x98, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq callq 0x25d5e movl $0xffffffff, %ebp # imm = 0xFFFFFFFF jmp 0x22547d movq %rax, %rbx movq 0x8(%rsp), %rax testq %rax, %rax je 0x2254ce lock decl (%rax) jne 0x2254ce movq (%rsp), %rsi movq 0x20(%rsp), %rdi testq %rdi, %rdi jne 0x2254c8 testq %rsi, %rsi je 0x2254ce movq %rsi, %rdi callq 0x244a0 jmp 0x2254ce movq (%rdi), %rax callq *0x18(%rax) movq $0x0, 0x40(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, (%rsp) movups %xmm0, 0xc(%rsp) movups %xmm0, 0x28(%rsp) movl $0x0, 0x38(%rsp) jmp 0x2254fb jmp 0x225536 jmp 0x225536 jmp 0x225536 movq %rax, %rbx movq 0x58(%rsp), %rax testq %rax, %rax je 0x22552e lock decl (%rax) jne 0x22552e movq 0x50(%rsp), %rsi movq 0x70(%rsp), %rdi testq %rdi, %rdi jne 0x225528 testq %rsi, %rsi je 0x22552e movq %rsi, %rdi callq 0x244a0 jmp 0x22552e movq (%rdi), %rax callq *0x18(%rax) movq %rbx, %rdi callq 0x243e0 movq %rax, %rdi callq 0x2953f
/Tencent[P]ncnn/src/layer/reduction.cpp
ncnn::reduction(float, float const*, int, int, int, int)
static float reduction(float v0, const float* ptr, int size0, int size1, int stride1, int op_type) { if (op_type == Reduction::ReductionOp_SUM) return reduction<reduction_op_add>(v0, ptr, size0, size1, stride1); if (op_type == Reduction::ReductionOp_ASUM) return reduction<reduction_op_asum>(v0, ptr, size0, size1, stride1); if (op_type == Reduction::ReductionOp_SUMSQ) return reduction<reduction_op_sumsq>(v0, ptr, size0, size1, stride1); if (op_type == Reduction::ReductionOp_PROD) return reduction<reduction_op_mul>(v0, ptr, size0, size1, stride1); if (op_type == Reduction::ReductionOp_MAX) return reduction<reduction_op_max>(v0, ptr, size0, size1, stride1); if (op_type == Reduction::ReductionOp_MIN) return reduction<reduction_op_min>(v0, ptr, size0, size1, stride1); if (op_type == Reduction::ReductionOp_LogSumExp) return reduction<reduction_op_sumexp>(v0, ptr, size0, size1, stride1); // should never reach here return v0; }
movaps %xmm0, %xmm2 cmpl $0xa, %r8d ja 0x228124 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movl %edx, %ebx movl %esi, %ebp movq %rdi, %r14 movl %r8d, %eax leaq 0x284a12(%rip), %rdx # 0x4ac988 movslq (%rdx,%rax,4), %rax addq %rdx, %rax jmpq *%rax testl %ebx, %ebx jle 0x228116 movslq %ecx, %rax movl %ebp, %ecx shlq $0x2, %rax xorl %edx, %edx testl %ebp, %ebp jle 0x227fa6 xorl %esi, %esi addss (%r14,%rsi,4), %xmm2 incq %rsi cmpq %rsi, %rcx jne 0x227f98 incl %edx addq %rax, %r14 cmpl %ebx, %edx jne 0x227f92 jmp 0x228116 testl %ebx, %ebx jle 0x228116 movslq %ecx, %rax movl %ebp, %ecx shlq $0x2, %rax xorl %edx, %edx testl %ebp, %ebp jle 0x227fdb xorl %esi, %esi mulss (%r14,%rsi,4), %xmm2 incq %rsi cmpq %rsi, %rcx jne 0x227fcd incl %edx addq %rax, %r14 cmpl %ebx, %edx jne 0x227fc7 jmp 0x228116 testl %ebx, %ebx jle 0x228116 movslq %ecx, %rax movl %ebp, %ecx shlq $0x2, %rax xorl %edx, %edx movaps 0x27f77d(%rip), %xmm0 # 0x4a7780 testl %ebp, %ebp jle 0x22801e xorl %esi, %esi movss (%r14,%rsi,4), %xmm1 andps %xmm0, %xmm1 addss %xmm1, %xmm2 incq %rsi cmpq %rsi, %rcx jne 0x228009 incl %edx addq %rax, %r14 cmpl %ebx, %edx jne 0x228003 jmp 0x228116 testl %ebx, %ebx jle 0x228116 movslq %ecx, %rax movl %ebp, %r12d shlq $0x2, %rax movq %rax, 0x10(%rsp) xorl %r13d, %r13d testl %ebp, %ebp jle 0x228070 xorl %r15d, %r15d movss %xmm2, 0xc(%rsp) movss (%r14,%r15,4), %xmm0 callq 0x244d0 movss 0xc(%rsp), %xmm2 addss %xmm0, %xmm2 incq %r15 cmpq %r15, %r12 jne 0x22804d incl %r13d addq 0x10(%rsp), %r14 cmpl %ebx, %r13d jne 0x228046 jmp 0x228116 testl %ebx, %ebx jle 0x228116 movslq %ecx, %rax movl %ebp, %ecx shlq $0x2, %rax xorl %edx, %edx testl %ebp, %ebp jle 0x2280b1 xorl %esi, %esi movss (%r14,%rsi,4), %xmm0 mulss %xmm0, %xmm0 addss %xmm0, %xmm2 incq %rsi cmpq %rsi, %rcx jne 0x22809b incl %edx addq %rax, %r14 cmpl %ebx, %edx jne 0x228095 jmp 0x228116 testl %ebx, %ebx jle 0x228116 movslq %ecx, %rax movl %ebp, %ecx shlq $0x2, %rax xorl %edx, %edx testl %ebp, %ebp jle 0x2280df xorl %esi, %esi maxss (%r14,%rsi,4), %xmm2 incq %rsi cmpq %rsi, %rcx jne 0x2280d1 incl %edx addq %rax, %r14 cmpl %ebx, %edx jne 0x2280cb jmp 0x228116 testl %ebx, %ebx jle 0x228116 movslq %ecx, %rax movl %ebp, %ecx shlq $0x2, %rax xorl %edx, %edx testl %ebp, %ebp jle 0x22810d xorl %esi, %esi minss (%r14,%rsi,4), %xmm2 incq %rsi cmpq %rsi, %rcx jne 0x2280ff incl %edx addq %rax, %r14 cmpl %ebx, %edx jne 0x2280f9 addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp movaps %xmm2, %xmm0 retq
/Tencent[P]ncnn/src/layer/reduction.cpp
ncnn::softmax(float*, int, int, int, int, float*, float*)
static void softmax(float* _ptr, int elemcount, int elempack, int stride, int size1, float* _maxptr, float* _sumptr) { // reduce max { float* maxptr = _maxptr; int j = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ __m512 _negmax_avx512 = _mm512_set1_ps(-FLT_MAX); for (; j + 15 < size1; j += 16) { _mm512_storeu_ps(maxptr, _negmax_avx512); maxptr += 16; } #endif // __AVX512F__ __m256 _negmax_avx = _mm256_set1_ps(-FLT_MAX); for (; j + 7 < size1; j += 8) { _mm256_storeu_ps(maxptr, _negmax_avx); maxptr += 8; } #endif // __AVX__ __m128 _negmax = _mm_set1_ps(-FLT_MAX); for (; j + 3 < size1; j += 4) { _mm_storeu_ps(maxptr, _negmax); maxptr += 4; } #endif // __SSE2__ for (; j < size1; j++) { *maxptr++ = -FLT_MAX; } } // reduce exp(x - max) { float* sumptr = _sumptr; int j = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ __m512 _zero_avx512 = _mm512_set1_ps(0.f); for (; j + 15 < size1; j += 16) { _mm512_storeu_ps(sumptr, _zero_avx512); sumptr += 16; } #endif // __AVX512F__ __m256 _zero_avx = _mm256_set1_ps(0.f); for (; j + 7 < size1; j += 8) { _mm256_storeu_ps(sumptr, _zero_avx); sumptr += 8; } #endif // __AVX__ __m128 _zero = _mm_set1_ps(0.f); for (; j + 3 < size1; j += 4) { _mm_storeu_ps(sumptr, _zero); sumptr += 4; } #endif // __SSE2__ for (; j < size1; j++) { *sumptr++ = 0.f; } } #if __SSE2__ #if __AVX__ #if __AVX512F__ if (elempack == 16) { softmax_pack16(_ptr, elemcount, stride, size1, _maxptr, _sumptr); } #endif // __AVX512F__ if (elempack == 8) { softmax_pack8(_ptr, elemcount, stride, size1, _maxptr, _sumptr); } #endif // __AVX__ if (elempack == 4) { softmax_pack4(_ptr, elemcount, stride, size1, _maxptr, _sumptr); } #endif // __SSE2__ if (elempack == 1) { softmax_pack1(_ptr, elemcount, stride, size1, _maxptr, _sumptr); } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x4a8, %rsp # imm = 0x4A8 movq %r9, %r13 movl %r8d, %ebp movl %ecx, 0x20(%rsp) movl %edx, 0x60(%rsp) movq %rdi, %r12 xorl %ecx, %ecx movq %r9, %rax cmpl $0x10, %r8d jl 0x240a13 movl %ebp, %ecx andl $0x7ffffff0, %ecx # imm = 0x7FFFFFF0 movl $0xf, %edx vpbroadcastd 0x26cac9(%rip), %zmm0 # 0x4ad4c8 movq %r13, %rax vmovdqu64 %zmm0, (%rax) addq $0x40, %rax addl $0x10, %edx cmpl %ebp, %edx jl 0x240a02 movl %ecx, %edx orl $0x7, %edx cmpl %ebp, %edx jge 0x240a3b vpbroadcastd 0x26caa3(%rip), %ymm0 # 0x4ad4c8 vmovdqu %ymm0, (%rax) addq $0x20, %rax leal 0x8(%rcx), %edx addl $0xf, %ecx cmpl %ebp, %ecx movl %edx, %ecx jl 0x240a25 jmp 0x240a3d movl %ecx, %edx movl %edx, %ecx orl $0x3, %ecx cmpl %ebp, %ecx jge 0x240a65 vpbroadcastd 0x26ca79(%rip), %xmm0 # 0x4ad4c8 vmovdqu %xmm0, (%rax) addq $0x10, %rax leal 0x4(%rdx), %ecx addl $0x7, %edx cmpl %ebp, %edx movl %ecx, %edx jl 0x240a4f jmp 0x240a67 movl %edx, %ecx movl %esi, 0xc(%rsp) cmpl %ebp, %ecx jge 0x240ad5 notl %ecx addl %ebp, %ecx leaq 0x10(%rcx), %rdx andq $-0x10, %rdx vpbroadcastq %rcx, %zmm0 xorl %ecx, %ecx vpmovsxbq 0x265e91(%rip), %zmm1 # 0x4a691e vpmovsxbq 0x265e8f(%rip), %zmm2 # 0x4a6926 vbroadcastss 0x26ca27(%rip), %zmm3 # 0x4ad4c8 vpbroadcastq %rcx, %zmm4 vporq %zmm1, %zmm4, %zmm5 vporq %zmm2, %zmm4, %zmm4 vpcmpleuq %zmm0, %zmm4, %k0 vpcmpleuq %zmm0, %zmm5, %k1 kunpckbw %k0, %k1, %k1 vmovups %zmm3, (%rax,%rcx,4) {%k1} addq $0x10, %rcx cmpq %rcx, %rdx jne 0x240aa1 movq 0x4e0(%rsp), %r15 xorl %r14d, %r14d cmpl $0x10, %ebp jl 0x240b13 leal -0x10(%rbp), %r14d movl %r14d, %ebx shrl $0x4, %ebx shlq $0x6, %rbx leaq 0x40(%rbx), %rdx movq %r15, %rdi xorl %esi, %esi vzeroupper callq 0x24070 andl $-0x10, %r14d addq %rbx, %r15 addq $0x40, %r15 addl $0x10, %r14d movq %r13, 0x10(%rsp) movl %r14d, %eax orl $0x7, %eax movq %r12, %r13 cmpl %ebp, %eax jge 0x240b5b movl %ebp, %ebx subl %r14d, %ebx addl $-0x8, %ebx movl %ebx, %r12d shrl $0x3, %r12d shlq $0x5, %r12 leaq 0x20(%r12), %rdx movq %r15, %rdi xorl %esi, %esi vzeroupper callq 0x24070 andl $-0x8, %ebx addq %r12, %r15 addq $0x20, %r15 addl %ebx, %r14d addl $0x8, %r14d movl %r14d, %eax orl $0x3, %eax cmpl %ebp, %eax jge 0x240ba2 leal 0x7(%r14), %ebx cmpl %ebx, %ebp cmovgl %ebp, %ebx subl %r14d, %ebx addl $-0x4, %ebx movl %ebx, %r12d shrl $0x2, %r12d shlq $0x4, %r12 leaq 0x10(%r12), %rdx movq %r15, %rdi xorl %esi, %esi vzeroupper callq 0x24070 andl $-0x4, %ebx addq %r12, %r15 addq $0x10, %r15 addl %ebx, %r14d addl $0x4, %r14d movq %r13, %rbx movl 0x60(%rsp), %r12d cmpl %ebp, %r14d movq 0x10(%rsp), %r13 jge 0x240bcf notl %r14d addl %ebp, %r14d leaq 0x4(,%r14,4), %rdx movq %r15, %rdi xorl %esi, %esi vzeroupper callq 0x24070 cmpl $0x7, %r12d jg 0x241a87 cmpl $0x1, %r12d movq 0x4e0(%rsp), %r14 movl 0xc(%rsp), %r15d je 0x242c7a cmpl $0x4, %r12d jne 0x244ddb testl %r15d, %r15d jle 0x241a19 movl %ebp, %eax andl $-0x10, %eax movslq 0x20(%rsp), %rcx movl %r15d, %edx xorl %esi, %esi vmovaps 0x26ca24(%rip), %zmm0 # 0x4ad640 movq %rsi, %rdi imulq %rcx, %rdi leaq (%rbx,%rdi,4), %rdi cmpl $0x10, %ebp jl 0x240cb5 movl $0xf, %r9d movq %r13, %r8 vmovups (%rdi), %zmm1 vmovups 0x40(%rdi), %zmm2 vmovups 0x80(%rdi), %zmm3 vmovups 0xc0(%rdi), %zmm4 vunpcklps %zmm2, %zmm1, %zmm5 # zmm5 = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[12],zmm2[12],zmm1[13],zmm2[13] vunpckhps %zmm2, %zmm1, %zmm1 # zmm1 = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15] vmaxps %zmm1, %zmm5, %zmm1 vunpcklps %zmm4, %zmm3, %zmm2 # zmm2 = zmm3[0],zmm4[0],zmm3[1],zmm4[1],zmm3[4],zmm4[4],zmm3[5],zmm4[5],zmm3[8],zmm4[8],zmm3[9],zmm4[9],zmm3[12],zmm4[12],zmm3[13],zmm4[13] vunpckhps %zmm4, %zmm3, %zmm3 # zmm3 = zmm3[2],zmm4[2],zmm3[3],zmm4[3],zmm3[6],zmm4[6],zmm3[7],zmm4[7],zmm3[10],zmm4[10],zmm3[11],zmm4[11],zmm3[14],zmm4[14],zmm3[15],zmm4[15] vmaxps %zmm3, %zmm2, %zmm2 vunpcklps %zmm2, %zmm1, %zmm3 # zmm3 = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[12],zmm2[12],zmm1[13],zmm2[13] vunpckhps %zmm2, %zmm1, %zmm1 # zmm1 = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15] vmaxps %zmm1, %zmm3, %zmm1 vpermps %zmm1, %zmm0, %zmm1 vmaxps (%r8), %zmm1, %zmm1 vmovups %zmm1, (%r8) addq $0x100, %rdi # imm = 0x100 addq $0x40, %r8 addl $0x10, %r9d cmpl %ebp, %r9d jl 0x240c39 movl %eax, %r9d jmp 0x240cbb xorl %r9d, %r9d movq %r13, %r8 movl %r9d, %r10d orl $0x7, %r10d cmpl %ebp, %r10d jge 0x240d2c vmovups (%rdi), %ymm1 vmovups 0x20(%rdi), %ymm2 vmovups 0x40(%rdi), %ymm3 vmovups 0x60(%rdi), %ymm4 vunpcklps %ymm2, %ymm1, %ymm5 # ymm5 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5] vunpckhps %ymm2, %ymm1, %ymm1 # ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7] vmaxps %ymm1, %ymm5, %ymm1 vunpcklps %ymm4, %ymm3, %ymm2 # ymm2 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5] vunpckhps %ymm4, %ymm3, %ymm3 # ymm3 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7] vmaxps %ymm3, %ymm2, %ymm2 vinsertf128 $0x1, %xmm2, %ymm1, %ymm3 vperm2f128 $0x31, %ymm2, %ymm1, %ymm1 # ymm1 = ymm1[2,3],ymm2[2,3] vunpcklps %ymm1, %ymm3, %ymm2 # ymm2 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[4],ymm1[4],ymm3[5],ymm1[5] vunpckhps %ymm1, %ymm3, %ymm1 # ymm1 = ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[6],ymm1[6],ymm3[7],ymm1[7] vmaxps %ymm1, %ymm2, %ymm1 vmaxps (%r8), %ymm1, %ymm1 vmovups %ymm1, (%r8) subq $-0x80, %rdi addq $0x20, %r8 leal 0x8(%r9), %r10d addl $0xf, %r9d cmpl %ebp, %r9d movl %r10d, %r9d jl 0x240cc7 movl %r9d, %r10d orl $0x3, %r10d cmpl %ebp, %r10d jge 0x240d99 vmovups (%rdi), %xmm1 vmovups 0x10(%rdi), %xmm2 vmovups 0x20(%rdi), %xmm3 vmovups 0x30(%rdi), %xmm4 vunpcklps %xmm2, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] vunpcklps %xmm4, %xmm3, %xmm6 # xmm6 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] vunpckhps %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] vunpckhps %xmm4, %xmm3, %xmm2 # xmm2 = xmm3[2],xmm4[2],xmm3[3],xmm4[3] vmovlhps %xmm6, %xmm5, %xmm3 # xmm3 = xmm5[0],xmm6[0] vunpckhpd %xmm6, %xmm5, %xmm4 # xmm4 = xmm5[1],xmm6[1] vmaxps %xmm4, %xmm3, %xmm3 vmovlhps %xmm2, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm2[0] vunpckhpd %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[1],xmm2[1] vmaxps %xmm1, %xmm4, %xmm1 vmaxps %xmm1, %xmm3, %xmm1 vmaxps (%r8), %xmm1, %xmm1 vmovups %xmm1, (%r8) addq $0x40, %rdi addq $0x10, %r8 leal 0x4(%r9), %r10d addl $0x7, %r9d cmpl %ebp, %r9d movl %r10d, %r9d jl 0x240d38 movl %ebp, %r10d subl %r9d, %r10d jle 0x240dcd xorl %r9d, %r9d vmovddup 0x8(%rdi), %xmm1 # xmm1 = mem[0,0] vmaxps (%rdi), %xmm1, %xmm1 vmovshdup %xmm1, %xmm2 # xmm2 = xmm1[1,1,3,3] vmaxss %xmm2, %xmm1, %xmm1 vmaxss (%r8,%r9,4), %xmm1, %xmm1 vmovss %xmm1, (%r8,%r9,4) addq $0x10, %rdi incq %r9 cmpl %r9d, %r10d jne 0x240da4 incq %rsi cmpq %rdx, %rsi jne 0x240c1c vmovaps 0x26cb1d(%rip), %zmm0 # 0x4ad900 vmovaps 0x26cb53(%rip), %zmm26 # 0x4ad940 vmovaps 0x26cb89(%rip), %zmm28 # 0x4ad980 vmovaps 0x26cbbf(%rip), %zmm29 # 0x4ad9c0 vbroadcastss 0x265e79(%rip), %zmm1 # 0x4a6c84 vxorps 0x26cbeb(%rip), %zmm1, %zmm4 # 0x4ada00 vmovaps 0x26cda1(%rip), %zmm5 # 0x4adbc0 vxorps 0x26cc17(%rip), %zmm1, %zmm6 # 0x4ada40 vmovaps 0x26cc4d(%rip), %zmm22 # 0x4ada80 vmovaps 0x26cc83(%rip), %zmm30 # 0x4adac0 vmovaps 0x26ccb9(%rip), %zmm31 # 0x4adb00 vmovaps 0x26ccef(%rip), %zmm27 # 0x4adb40 vmovaps 0x26cd25(%rip), %zmm17 # 0x4adb80 vmovdqa64 0x26cd9b(%rip), %zmm1 # 0x4adc00 xorl %esi, %esi vmovaps 0x26c80f(%rip), %zmm13 # 0x4ad680 vmovaps 0x26c845(%rip), %zmm14 # 0x4ad6c0 vmovaps 0x26c87b(%rip), %zmm18 # 0x4ad700 vmovaps 0x26c8b1(%rip), %zmm2 # 0x4ad740 vmovaps 0x26c8e7(%rip), %zmm8 # 0x4ad780 vmovaps 0x26c91d(%rip), %zmm15 # 0x4ad7c0 vbroadcastss 0x264227(%rip), %ymm21 # 0x4a50d4 vbroadcastss 0x26421d(%rip), %xmm16 # 0x4a50d4 vbroadcastss 0x265ddc(%rip), %xmm7 # 0x4a6c9c vbroadcastss 0x265ddf(%rip), %xmm3 # 0x4a6ca8 vbroadcastss 0x265dda(%rip), %xmm9 # 0x4a6cac vpbroadcastd 0x2641f5(%rip), %xmm10 # 0x4a50d0 movq %rsi, %rdi imulq %rcx, %rdi leaq (%rbx,%rdi,4), %rdi cmpl $0x10, %ebp jl 0x2411ec movl $0xf, %r10d movq %r13, %r9 movq %r14, %r8 vmovups (%rdi), %zmm11 vmovups 0x40(%rdi), %zmm12 vmovups 0x80(%rdi), %zmm19 vmovups 0xc0(%rdi), %zmm20 vmovups (%r9), %zmm23 vpermps %zmm23, %zmm13, %zmm24 vsubps %zmm24, %zmm11, %zmm11 vpermps %zmm23, %zmm14, %zmm24 vsubps %zmm24, %zmm12, %zmm24 vpermps %zmm23, %zmm18, %zmm12 vsubps %zmm12, %zmm19, %zmm25 vpermps %zmm23, %zmm2, %zmm12 vsubps %zmm12, %zmm20, %zmm12 vminps %zmm26, %zmm11, %zmm11 vmaxps %zmm28, %zmm11, %zmm11 vmovaps %zmm29, %zmm19 vfmadd213ps %zmm5, %zmm11, %zmm19 # zmm19 = (zmm11 * zmm19) + zmm5 vrndscaleps $0x1, %zmm19, %zmm20 vcmpltps %zmm20, %zmm19, %k1 vsubps %zmm0, %zmm20, %zmm20 {%k1} vfmadd231ps %zmm4, %zmm20, %zmm11 # zmm11 = (zmm20 * zmm4) + zmm11 vfmadd231ps %zmm6, %zmm20, %zmm11 # zmm11 = (zmm20 * zmm6) + zmm11 vmulps %zmm11, %zmm11, %zmm19 vmovaps %zmm11, %zmm23 vfmadd213ps %zmm30, %zmm22, %zmm23 # zmm23 = (zmm22 * zmm23) + zmm30 vfmadd213ps %zmm31, %zmm11, %zmm23 # zmm23 = (zmm11 * zmm23) + zmm31 vfmadd213ps %zmm27, %zmm11, %zmm23 # zmm23 = (zmm11 * zmm23) + zmm27 vfmadd213ps %zmm17, %zmm11, %zmm23 # zmm23 = (zmm11 * zmm23) + zmm17 vfmadd213ps %zmm5, %zmm11, %zmm23 # zmm23 = (zmm11 * zmm23) + zmm5 vfmadd213ps %zmm11, %zmm19, %zmm23 # zmm23 = (zmm19 * zmm23) + zmm11 vaddps %zmm0, %zmm23, %zmm11 vcvttps2dq %zmm20, %zmm19 vpaddd %zmm1, %zmm19, %zmm19 vpslld $0x17, %zmm19, %zmm19 vmulps %zmm19, %zmm11, %zmm11 vminps %zmm26, %zmm24, %zmm19 vmaxps %zmm28, %zmm19, %zmm19 vmovaps %zmm29, %zmm20 vfmadd213ps %zmm5, %zmm19, %zmm20 # zmm20 = (zmm19 * zmm20) + zmm5 vrndscaleps $0x1, %zmm20, %zmm23 vcmpltps %zmm23, %zmm20, %k1 vsubps %zmm0, %zmm23, %zmm23 {%k1} vfmadd231ps %zmm4, %zmm23, %zmm19 # zmm19 = (zmm23 * zmm4) + zmm19 vfmadd231ps %zmm6, %zmm23, %zmm19 # zmm19 = (zmm23 * zmm6) + zmm19 vmulps %zmm19, %zmm19, %zmm20 vmovaps %zmm19, %zmm24 vfmadd213ps %zmm30, %zmm22, %zmm24 # zmm24 = (zmm22 * zmm24) + zmm30 vfmadd213ps %zmm31, %zmm19, %zmm24 # zmm24 = (zmm19 * zmm24) + zmm31 vfmadd213ps %zmm27, %zmm19, %zmm24 # zmm24 = (zmm19 * zmm24) + zmm27 vfmadd213ps %zmm17, %zmm19, %zmm24 # zmm24 = (zmm19 * zmm24) + zmm17 vfmadd213ps %zmm5, %zmm19, %zmm24 # zmm24 = (zmm19 * zmm24) + zmm5 vfmadd213ps %zmm19, %zmm20, %zmm24 # zmm24 = (zmm20 * zmm24) + zmm19 vaddps %zmm0, %zmm24, %zmm19 vcvttps2dq %zmm23, %zmm20 vpaddd %zmm1, %zmm20, %zmm20 vpslld $0x17, %zmm20, %zmm20 vmulps %zmm20, %zmm19, %zmm19 vminps %zmm26, %zmm25, %zmm20 vmaxps %zmm28, %zmm20, %zmm20 vmovaps %zmm29, %zmm23 vfmadd213ps %zmm5, %zmm20, %zmm23 # zmm23 = (zmm20 * zmm23) + zmm5 vrndscaleps $0x1, %zmm23, %zmm24 vcmpltps %zmm24, %zmm23, %k1 vsubps %zmm0, %zmm24, %zmm24 {%k1} vfmadd231ps %zmm4, %zmm24, %zmm20 # zmm20 = (zmm24 * zmm4) + zmm20 vfmadd231ps %zmm6, %zmm24, %zmm20 # zmm20 = (zmm24 * zmm6) + zmm20 vmulps %zmm20, %zmm20, %zmm23 vmovaps %zmm20, %zmm25 vfmadd213ps %zmm30, %zmm22, %zmm25 # zmm25 = (zmm22 * zmm25) + zmm30 vfmadd213ps %zmm31, %zmm20, %zmm25 # zmm25 = (zmm20 * zmm25) + zmm31 vfmadd213ps %zmm27, %zmm20, %zmm25 # zmm25 = (zmm20 * zmm25) + zmm27 vfmadd213ps %zmm17, %zmm20, %zmm25 # zmm25 = (zmm20 * zmm25) + zmm17 vfmadd213ps %zmm5, %zmm20, %zmm25 # zmm25 = (zmm20 * zmm25) + zmm5 vfmadd213ps %zmm20, %zmm23, %zmm25 # zmm25 = (zmm23 * zmm25) + zmm20 vaddps %zmm0, %zmm25, %zmm20 vcvttps2dq %zmm24, %zmm23 vpaddd %zmm1, %zmm23, %zmm23 vpslld $0x17, %zmm23, %zmm23 vmulps %zmm23, %zmm20, %zmm20 vminps %zmm26, %zmm12, %zmm12 vmaxps %zmm28, %zmm12, %zmm12 vmovaps %zmm29, %zmm23 vfmadd213ps %zmm5, %zmm12, %zmm23 # zmm23 = (zmm12 * zmm23) + zmm5 vrndscaleps $0x1, %zmm23, %zmm24 vcmpltps %zmm24, %zmm23, %k1 vsubps %zmm0, %zmm24, %zmm24 {%k1} vfmadd231ps %zmm4, %zmm24, %zmm12 # zmm12 = (zmm24 * zmm4) + zmm12 vfmadd231ps %zmm6, %zmm24, %zmm12 # zmm12 = (zmm24 * zmm6) + zmm12 vmulps %zmm12, %zmm12, %zmm23 vmovaps %zmm12, %zmm25 vfmadd213ps %zmm30, %zmm22, %zmm25 # zmm25 = (zmm22 * zmm25) + zmm30 vfmadd213ps %zmm31, %zmm12, %zmm25 # zmm25 = (zmm12 * zmm25) + zmm31 vfmadd213ps %zmm27, %zmm12, %zmm25 # zmm25 = (zmm12 * zmm25) + zmm27 vfmadd213ps %zmm17, %zmm12, %zmm25 # zmm25 = (zmm12 * zmm25) + zmm17 vfmadd213ps %zmm5, %zmm12, %zmm25 # zmm25 = (zmm12 * zmm25) + zmm5 vfmadd213ps %zmm12, %zmm23, %zmm25 # zmm25 = (zmm23 * zmm25) + zmm12 vaddps %zmm0, %zmm25, %zmm12 vcvttps2dq %zmm24, %zmm23 vpaddd %zmm1, %zmm23, %zmm23 vpslld $0x17, %zmm23, %zmm23 vmulps %zmm23, %zmm12, %zmm12 vmovups %zmm11, (%rdi) vmovups %zmm19, 0x40(%rdi) vmovups %zmm20, 0x80(%rdi) vmovups %zmm12, 0xc0(%rdi) vunpcklps %zmm19, %zmm11, %zmm23 # zmm23 = zmm11[0],zmm19[0],zmm11[1],zmm19[1],zmm11[4],zmm19[4],zmm11[5],zmm19[5],zmm11[8],zmm19[8],zmm11[9],zmm19[9],zmm11[12],zmm19[12],zmm11[13],zmm19[13] vunpckhps %zmm19, %zmm11, %zmm11 # zmm11 = zmm11[2],zmm19[2],zmm11[3],zmm19[3],zmm11[6],zmm19[6],zmm11[7],zmm19[7],zmm11[10],zmm19[10],zmm11[11],zmm19[11],zmm11[14],zmm19[14],zmm11[15],zmm19[15] vaddps %zmm11, %zmm23, %zmm11 vunpcklps %zmm12, %zmm20, %zmm19 # zmm19 = zmm20[0],zmm12[0],zmm20[1],zmm12[1],zmm20[4],zmm12[4],zmm20[5],zmm12[5],zmm20[8],zmm12[8],zmm20[9],zmm12[9],zmm20[12],zmm12[12],zmm20[13],zmm12[13] vunpckhps %zmm12, %zmm20, %zmm12 # zmm12 = zmm20[2],zmm12[2],zmm20[3],zmm12[3],zmm20[6],zmm12[6],zmm20[7],zmm12[7],zmm20[10],zmm12[10],zmm20[11],zmm12[11],zmm20[14],zmm12[14],zmm20[15],zmm12[15] vaddps %zmm12, %zmm19, %zmm12 vmovaps %zmm11, %zmm19 vpermt2ps %zmm12, %zmm8, %zmm19 vpermt2ps %zmm12, %zmm15, %zmm11 vaddps %zmm19, %zmm11, %zmm11 vaddps (%r8), %zmm11, %zmm11 vmovups %zmm11, (%r8) addq $0x100, %rdi # imm = 0x100 addq $0x40, %r9 addq $0x40, %r8 addl $0x10, %r10d cmpl %ebp, %r10d jl 0x240efb movl %eax, %r11d jmp 0x2411f5 xorl %r11d, %r11d movq %r14, %r8 movq %r13, %r9 movl %r11d, %r10d orl $0x7, %r10d cmpl %ebp, %r10d vbroadcastss 0x265a88(%rip), %xmm1 # 0x4a6c90 vbroadcastss 0x265a77(%rip), %ymm2 # 0x4a6c88 vbroadcastss 0x265a72(%rip), %ymm8 # 0x4a6c8c vbroadcastss 0x265a6d(%rip), %ymm13 # 0x4a6c90 vbroadcastss 0x265a70(%rip), %ymm14 # 0x4a6c9c vbroadcastss 0x263e9b(%rip), %ymm15 # 0x4a50d0 vbroadcastss 0x265a55(%rip), %ymm17 # 0x4a6c94 vbroadcastss 0x265a4f(%rip), %ymm18 # 0x4a6c98 vbroadcastss 0x265a4d(%rip), %ymm22 # 0x4a6ca0 vbroadcastss 0x265a4f(%rip), %ymm26 # 0x4a6cac vpbroadcastd 0x263e69(%rip), %ymm27 # 0x4a50d0 vbroadcastss 0x265a33(%rip), %ymm28 # 0x4a6ca4 vbroadcastss 0x265a2d(%rip), %ymm29 # 0x4a6ca8 jge 0x241582 movl %r11d, %r10d vmovups (%rdi), %ymm11 vmovups 0x20(%rdi), %ymm19 vmovups 0x40(%rdi), %ymm20 vmovups 0x60(%rdi), %ymm12 vbroadcastss (%r9), %xmm23 vbroadcastss 0x4(%r9), %xmm24 vinsertf32x4 $0x1, %xmm24, %ymm23, %ymm23 vsubps %ymm23, %ymm11, %ymm11 vminps %ymm2, %ymm11, %ymm11 vmaxps %ymm8, %ymm11, %ymm11 vmovaps %ymm13, %ymm23 vfmadd213ps %ymm21, %ymm11, %ymm23 # ymm23 = (ymm11 * ymm23) + ymm21 vrndscaleps $0x1, %ymm23, %ymm24 vcmpltps %ymm24, %ymm23, %k1 vsubps %ymm15, %ymm24, %ymm24 {%k1} vfmsub231ps %ymm17, %ymm24, %ymm11 # ymm11 = (ymm24 * ymm17) - ymm11 vfnmsub231ps %ymm18, %ymm24, %ymm11 # ymm11 = -(ymm24 * ymm18) - ymm11 vmulps %ymm11, %ymm11, %ymm23 vmovaps %ymm14, %ymm25 vfmadd213ps %ymm22, %ymm11, %ymm25 # ymm25 = (ymm11 * ymm25) + ymm22 vfmadd213ps %ymm28, %ymm11, %ymm25 # ymm25 = (ymm11 * ymm25) + ymm28 vfmadd213ps %ymm29, %ymm11, %ymm25 # ymm25 = (ymm11 * ymm25) + ymm29 vfmadd213ps %ymm26, %ymm11, %ymm25 # ymm25 = (ymm11 * ymm25) + ymm26 vfmadd213ps %ymm21, %ymm11, %ymm25 # ymm25 = (ymm11 * ymm25) + ymm21 vfmadd213ps %ymm11, %ymm23, %ymm25 # ymm25 = (ymm23 * ymm25) + ymm11 vcvttps2dq %ymm24, %ymm11 vpslld $0x17, %ymm11, %ymm11 vpaddd %ymm27, %ymm11, %ymm11 vbroadcastss 0x8(%r9), %xmm23 vfmadd213ps %ymm11, %ymm25, %ymm11 # ymm11 = (ymm25 * ymm11) + ymm11 vbroadcastss 0xc(%r9), %xmm24 vinsertf32x4 $0x1, %xmm24, %ymm23, %ymm23 vsubps %ymm23, %ymm19, %ymm19 vminps %ymm2, %ymm19, %ymm19 vmaxps %ymm8, %ymm19, %ymm19 vmovaps %ymm13, %ymm23 vfmadd213ps %ymm21, %ymm19, %ymm23 # ymm23 = (ymm19 * ymm23) + ymm21 vrndscaleps $0x1, %ymm23, %ymm24 vcmpltps %ymm24, %ymm23, %k1 vsubps %ymm15, %ymm24, %ymm24 {%k1} vfmsub231ps %ymm17, %ymm24, %ymm19 # ymm19 = (ymm24 * ymm17) - ymm19 vfnmsub231ps %ymm18, %ymm24, %ymm19 # ymm19 = -(ymm24 * ymm18) - ymm19 vmulps %ymm19, %ymm19, %ymm23 vmovaps %ymm14, %ymm25 vfmadd213ps %ymm22, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm25) + ymm22 vfmadd213ps %ymm28, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm25) + ymm28 vfmadd213ps %ymm29, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm25) + ymm29 vfmadd213ps %ymm26, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm25) + ymm26 vfmadd213ps %ymm21, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm25) + ymm21 vfmadd213ps %ymm19, %ymm23, %ymm25 # ymm25 = (ymm23 * ymm25) + ymm19 vcvttps2dq %ymm24, %ymm19 vpslld $0x17, %ymm19, %ymm19 vpaddd %ymm27, %ymm19, %ymm19 vfmadd213ps %ymm19, %ymm25, %ymm19 # ymm19 = (ymm25 * ymm19) + ymm19 vbroadcastss 0x10(%r9), %xmm23 vbroadcastss 0x14(%r9), %xmm24 vinsertf32x4 $0x1, %xmm24, %ymm23, %ymm23 vsubps %ymm23, %ymm20, %ymm20 vminps %ymm2, %ymm20, %ymm20 vmaxps %ymm8, %ymm20, %ymm20 vmovaps %ymm13, %ymm23 vfmadd213ps %ymm21, %ymm20, %ymm23 # ymm23 = (ymm20 * ymm23) + ymm21 vrndscaleps $0x1, %ymm23, %ymm24 vcmpltps %ymm24, %ymm23, %k1 vsubps %ymm15, %ymm24, %ymm24 {%k1} vfmsub231ps %ymm17, %ymm24, %ymm20 # ymm20 = (ymm24 * ymm17) - ymm20 vfnmsub231ps %ymm18, %ymm24, %ymm20 # ymm20 = -(ymm24 * ymm18) - ymm20 vmulps %ymm20, %ymm20, %ymm23 vmovaps %ymm14, %ymm25 vfmadd213ps %ymm22, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm22 vfmadd213ps %ymm28, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm28 vfmadd213ps %ymm29, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm29 vfmadd213ps %ymm26, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm26 vfmadd213ps %ymm21, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm21 vfmadd213ps %ymm20, %ymm23, %ymm25 # ymm25 = (ymm23 * ymm25) + ymm20 vcvttps2dq %ymm24, %ymm20 vpslld $0x17, %ymm20, %ymm20 vpaddd %ymm27, %ymm20, %ymm20 vbroadcastss 0x18(%r9), %xmm23 vbroadcastss 0x1c(%r9), %xmm24 vfmadd213ps %ymm20, %ymm25, %ymm20 # ymm20 = (ymm25 * ymm20) + ymm20 vinsertf32x4 $0x1, %xmm24, %ymm23, %ymm23 vsubps %ymm23, %ymm12, %ymm12 vminps %ymm2, %ymm12, %ymm12 vmaxps %ymm8, %ymm12, %ymm12 vmovaps %ymm13, %ymm23 vfmadd213ps %ymm21, %ymm12, %ymm23 # ymm23 = (ymm12 * ymm23) + ymm21 vrndscaleps $0x1, %ymm23, %ymm24 vcmpltps %ymm24, %ymm23, %k1 vsubps %ymm15, %ymm24, %ymm24 {%k1} vfmsub231ps %ymm17, %ymm24, %ymm12 # ymm12 = (ymm24 * ymm17) - ymm12 vfnmsub231ps %ymm18, %ymm24, %ymm12 # ymm12 = -(ymm24 * ymm18) - ymm12 vmulps %ymm12, %ymm12, %ymm23 vmovaps %ymm14, %ymm25 vfmadd213ps %ymm22, %ymm12, %ymm25 # ymm25 = (ymm12 * ymm25) + ymm22 vfmadd213ps %ymm28, %ymm12, %ymm25 # ymm25 = (ymm12 * ymm25) + ymm28 vfmadd213ps %ymm29, %ymm12, %ymm25 # ymm25 = (ymm12 * ymm25) + ymm29 vfmadd213ps %ymm26, %ymm12, %ymm25 # ymm25 = (ymm12 * ymm25) + ymm26 vfmadd213ps %ymm21, %ymm12, %ymm25 # ymm25 = (ymm12 * ymm25) + ymm21 vfmadd213ps %ymm12, %ymm23, %ymm25 # ymm25 = (ymm23 * ymm25) + ymm12 vcvttps2dq %ymm24, %ymm12 vpslld $0x17, %ymm12, %ymm12 vpaddd %ymm27, %ymm12, %ymm12 vfmadd213ps %ymm12, %ymm25, %ymm12 # ymm12 = (ymm25 * ymm12) + ymm12 vmovups %ymm11, (%rdi) vmovups %ymm19, 0x20(%rdi) vmovups %ymm20, 0x40(%rdi) vmovups %ymm12, 0x60(%rdi) vunpcklps %ymm19, %ymm11, %ymm23 # ymm23 = ymm11[0],ymm19[0],ymm11[1],ymm19[1],ymm11[4],ymm19[4],ymm11[5],ymm19[5] vunpckhps %ymm19, %ymm11, %ymm11 # ymm11 = ymm11[2],ymm19[2],ymm11[3],ymm19[3],ymm11[6],ymm19[6],ymm11[7],ymm19[7] vaddps %ymm11, %ymm23, %ymm11 vunpcklps %ymm12, %ymm20, %ymm19 # ymm19 = ymm20[0],ymm12[0],ymm20[1],ymm12[1],ymm20[4],ymm12[4],ymm20[5],ymm12[5] vunpckhps %ymm12, %ymm20, %ymm12 # ymm12 = ymm20[2],ymm12[2],ymm20[3],ymm12[3],ymm20[6],ymm12[6],ymm20[7],ymm12[7] vaddps %ymm12, %ymm19, %ymm12 vinsertf32x4 $0x1, %xmm12, %ymm11, %ymm19 vperm2f128 $0x31, %ymm12, %ymm11, %ymm11 # ymm11 = ymm11[2,3],ymm12[2,3] vunpcklps %ymm11, %ymm19, %ymm12 # ymm12 = ymm19[0],ymm11[0],ymm19[1],ymm11[1],ymm19[4],ymm11[4],ymm19[5],ymm11[5] vunpckhps %ymm11, %ymm19, %ymm11 # ymm11 = ymm19[2],ymm11[2],ymm19[3],ymm11[3],ymm19[6],ymm11[6],ymm19[7],ymm11[7] vaddps (%r8), %ymm11, %ymm11 vaddps %ymm12, %ymm11, %ymm11 vmovups %ymm11, (%r8) subq $-0x80, %rdi addq $0x20, %r9 addq $0x20, %r8 leal 0x8(%r10), %r11d addl $0xf, %r10d cmpl %ebp, %r10d jl 0x241281 movl %r11d, %r10d orl $0x3, %r10d cmpl %ebp, %r10d vbroadcastss 0x2656f7(%rip), %xmm2 # 0x4a6c8c vbroadcastss 0x2656ea(%rip), %xmm8 # 0x4a6c88 vbroadcastss 0x2656ed(%rip), %xmm13 # 0x4a6c94 vbroadcastss 0x2656f0(%rip), %xmm14 # 0x4a6ca0 vbroadcastss 0x2656eb(%rip), %xmm15 # 0x4a6ca4 vbroadcastss 0x263b0d(%rip), %xmm17 # 0x4a50d0 vbroadcastss 0x2656cb(%rip), %xmm18 # 0x4a6c98 jge 0x241895 movl %r11d, %r10d vmovups (%rdi), %xmm11 vmovups 0x10(%rdi), %xmm19 vmovups 0x20(%rdi), %xmm20 vmovups 0x30(%rdi), %xmm12 vsubps (%r9){1to4}, %xmm11, %xmm11 vminps %xmm8, %xmm11, %xmm11 vmaxps %xmm2, %xmm11, %xmm11 vmovaps %xmm1, %xmm23 vfmadd213ps %xmm16, %xmm11, %xmm23 # xmm23 = (xmm11 * xmm23) + xmm16 vcvttps2dq %xmm23, %xmm24 vcvtdq2ps %xmm24, %xmm24 vcmpltps %xmm24, %xmm23, %k1 vsubps %xmm17, %xmm24, %xmm24 {%k1} vfmsub231ps %xmm13, %xmm24, %xmm11 # xmm11 = (xmm24 * xmm13) - xmm11 vfnmsub231ps %xmm18, %xmm24, %xmm11 # xmm11 = -(xmm24 * xmm18) - xmm11 vmulps %xmm11, %xmm11, %xmm23 vmovaps %xmm7, %xmm25 vfmadd213ps %xmm14, %xmm11, %xmm25 # xmm25 = (xmm11 * xmm25) + xmm14 vfmadd213ps %xmm15, %xmm11, %xmm25 # xmm25 = (xmm11 * xmm25) + xmm15 vfmadd213ps %xmm3, %xmm11, %xmm25 # xmm25 = (xmm11 * xmm25) + xmm3 vfmadd213ps %xmm9, %xmm11, %xmm25 # xmm25 = (xmm11 * xmm25) + xmm9 vfmadd213ps %xmm16, %xmm11, %xmm25 # xmm25 = (xmm11 * xmm25) + xmm16 vfmadd213ps %xmm11, %xmm23, %xmm25 # xmm25 = (xmm23 * xmm25) + xmm11 vcvttps2dq %xmm24, %xmm11 vpslld $0x17, %xmm11, %xmm11 vpaddd %xmm10, %xmm11, %xmm11 vfmadd213ps %xmm11, %xmm25, %xmm11 # xmm11 = (xmm25 * xmm11) + xmm11 vsubps 0x4(%r9){1to4}, %xmm19, %xmm19 vminps %xmm8, %xmm19, %xmm19 vmaxps %xmm2, %xmm19, %xmm19 vmovaps %xmm1, %xmm23 vfmadd213ps %xmm16, %xmm19, %xmm23 # xmm23 = (xmm19 * xmm23) + xmm16 vcvttps2dq %xmm23, %xmm24 vcvtdq2ps %xmm24, %xmm24 vcmpltps %xmm24, %xmm23, %k1 vsubps %xmm17, %xmm24, %xmm24 {%k1} vfmsub231ps %xmm13, %xmm24, %xmm19 # xmm19 = (xmm24 * xmm13) - xmm19 vfnmsub231ps %xmm18, %xmm24, %xmm19 # xmm19 = -(xmm24 * xmm18) - xmm19 vmulps %xmm19, %xmm19, %xmm23 vmovaps %xmm7, %xmm25 vfmadd213ps %xmm14, %xmm19, %xmm25 # xmm25 = (xmm19 * xmm25) + xmm14 vfmadd213ps %xmm15, %xmm19, %xmm25 # xmm25 = (xmm19 * xmm25) + xmm15 vfmadd213ps %xmm3, %xmm19, %xmm25 # xmm25 = (xmm19 * xmm25) + xmm3 vfmadd213ps %xmm9, %xmm19, %xmm25 # xmm25 = (xmm19 * xmm25) + xmm9 vfmadd213ps %xmm16, %xmm19, %xmm25 # xmm25 = (xmm19 * xmm25) + xmm16 vfmadd213ps %xmm19, %xmm23, %xmm25 # xmm25 = (xmm23 * xmm25) + xmm19 vcvttps2dq %xmm24, %xmm19 vpslld $0x17, %xmm19, %xmm19 vpaddd %xmm10, %xmm19, %xmm19 vfmadd213ps %xmm19, %xmm25, %xmm19 # xmm19 = (xmm25 * xmm19) + xmm19 vsubps 0x8(%r9){1to4}, %xmm20, %xmm20 vminps %xmm8, %xmm20, %xmm20 vmaxps %xmm2, %xmm20, %xmm20 vmovaps %xmm1, %xmm23 vfmadd213ps %xmm16, %xmm20, %xmm23 # xmm23 = (xmm20 * xmm23) + xmm16 vcvttps2dq %xmm23, %xmm24 vcvtdq2ps %xmm24, %xmm24 vcmpltps %xmm24, %xmm23, %k1 vsubps %xmm17, %xmm24, %xmm24 {%k1} vfmsub231ps %xmm13, %xmm24, %xmm20 # xmm20 = (xmm24 * xmm13) - xmm20 vfnmsub231ps %xmm18, %xmm24, %xmm20 # xmm20 = -(xmm24 * xmm18) - xmm20 vmulps %xmm20, %xmm20, %xmm23 vmovaps %xmm7, %xmm25 vfmadd213ps %xmm14, %xmm20, %xmm25 # xmm25 = (xmm20 * xmm25) + xmm14 vfmadd213ps %xmm15, %xmm20, %xmm25 # xmm25 = (xmm20 * xmm25) + xmm15 vfmadd213ps %xmm3, %xmm20, %xmm25 # xmm25 = (xmm20 * xmm25) + xmm3 vfmadd213ps %xmm9, %xmm20, %xmm25 # xmm25 = (xmm20 * xmm25) + xmm9 vfmadd213ps %xmm16, %xmm20, %xmm25 # xmm25 = (xmm20 * xmm25) + xmm16 vfmadd213ps %xmm20, %xmm23, %xmm25 # xmm25 = (xmm23 * xmm25) + xmm20 vcvttps2dq %xmm24, %xmm20 vpslld $0x17, %xmm20, %xmm20 vpaddd %xmm10, %xmm20, %xmm20 vfmadd213ps %xmm20, %xmm25, %xmm20 # xmm20 = (xmm25 * xmm20) + xmm20 vsubps 0xc(%r9){1to4}, %xmm12, %xmm12 vminps %xmm8, %xmm12, %xmm12 vmaxps %xmm2, %xmm12, %xmm12 vmovaps %xmm1, %xmm23 vfmadd213ps %xmm16, %xmm12, %xmm23 # xmm23 = (xmm12 * xmm23) + xmm16 vcvttps2dq %xmm23, %xmm24 vcvtdq2ps %xmm24, %xmm24 vcmpltps %xmm24, %xmm23, %k1 vsubps %xmm17, %xmm24, %xmm24 {%k1} vfmsub231ps %xmm13, %xmm24, %xmm12 # xmm12 = (xmm24 * xmm13) - xmm12 vfnmsub231ps %xmm18, %xmm24, %xmm12 # xmm12 = -(xmm24 * xmm18) - xmm12 vmulps %xmm12, %xmm12, %xmm23 vmovaps %xmm7, %xmm25 vfmadd213ps %xmm14, %xmm12, %xmm25 # xmm25 = (xmm12 * xmm25) + xmm14 vfmadd213ps %xmm15, %xmm12, %xmm25 # xmm25 = (xmm12 * xmm25) + xmm15 vfmadd213ps %xmm3, %xmm12, %xmm25 # xmm25 = (xmm12 * xmm25) + xmm3 vfmadd213ps %xmm9, %xmm12, %xmm25 # xmm25 = (xmm12 * xmm25) + xmm9 vfmadd213ps %xmm16, %xmm12, %xmm25 # xmm25 = (xmm12 * xmm25) + xmm16 vfmadd213ps %xmm12, %xmm23, %xmm25 # xmm25 = (xmm23 * xmm25) + xmm12 vcvttps2dq %xmm24, %xmm12 vpslld $0x17, %xmm12, %xmm12 vpaddd %xmm10, %xmm12, %xmm12 vfmadd213ps %xmm12, %xmm25, %xmm12 # xmm12 = (xmm25 * xmm12) + xmm12 vmovups %xmm11, (%rdi) vmovups %xmm19, 0x10(%rdi) vmovups %xmm20, 0x20(%rdi) vmovups %xmm12, 0x30(%rdi) vunpcklps %xmm19, %xmm11, %xmm23 # xmm23 = xmm11[0],xmm19[0],xmm11[1],xmm19[1] vunpcklps %xmm12, %xmm20, %xmm24 # xmm24 = xmm20[0],xmm12[0],xmm20[1],xmm12[1] vunpckhps %xmm19, %xmm11, %xmm11 # xmm11 = xmm11[2],xmm19[2],xmm11[3],xmm19[3] vunpckhps %xmm12, %xmm20, %xmm12 # xmm12 = xmm20[2],xmm12[2],xmm20[3],xmm12[3] vmovlhps %xmm24, %xmm23, %xmm19 # xmm19 = xmm23[0],xmm24[0] vunpckhpd %xmm24, %xmm23, %xmm20 # xmm20 = xmm23[1],xmm24[1] vmovlhps %xmm12, %xmm11, %xmm23 # xmm23 = xmm11[0],xmm12[0] vunpckhpd %xmm12, %xmm11, %xmm11 # xmm11 = xmm11[1],xmm12[1] vaddps %xmm11, %xmm19, %xmm11 vaddps %xmm23, %xmm11, %xmm11 vaddps (%r8), %xmm20, %xmm12 vaddps %xmm11, %xmm12, %xmm11 vmovups %xmm11, (%r8) addq $0x40, %rdi addq $0x10, %r9 addq $0x10, %r8 leal 0x4(%r10), %r11d addl $0x7, %r10d cmpl %ebp, %r10d jl 0x2415d3 movl %ebp, %r10d subl %r11d, %r10d vbroadcastss 0x26541b(%rip), %xmm17 # 0x4a6cc0 vbroadcastss 0x2661f1(%rip), %xmm18 # 0x4a7aa0 jle 0x241977 xorl %r11d, %r11d vmovups (%rdi), %xmm11 vsubps (%r9,%r11,4){1to4}, %xmm11, %xmm11 vminps %xmm8, %xmm11, %xmm11 vmaxps %xmm2, %xmm11, %xmm11 vmovaps %xmm16, %xmm12 vfmadd231ps %xmm1, %xmm11, %xmm12 # xmm12 = (xmm11 * xmm1) + xmm12 vcvttps2dq %xmm12, %xmm19 vcvtdq2ps %xmm19, %xmm19 vcmpltps %xmm19, %xmm12, %k1 vaddps %xmm17, %xmm19, %xmm19 {%k1} vfmsub231ps %xmm13, %xmm19, %xmm11 # xmm11 = (xmm19 * xmm13) - xmm11 vfmsub231ps %xmm18, %xmm19, %xmm11 # xmm11 = (xmm19 * xmm18) - xmm11 vmulps %xmm11, %xmm11, %xmm12 vmovaps %xmm7, %xmm20 vfmadd213ps %xmm14, %xmm11, %xmm20 # xmm20 = (xmm11 * xmm20) + xmm14 vfmadd213ps %xmm15, %xmm11, %xmm20 # xmm20 = (xmm11 * xmm20) + xmm15 vfmadd213ps %xmm3, %xmm11, %xmm20 # xmm20 = (xmm11 * xmm20) + xmm3 vfmadd213ps %xmm9, %xmm11, %xmm20 # xmm20 = (xmm11 * xmm20) + xmm9 vfmadd213ps %xmm16, %xmm11, %xmm20 # xmm20 = (xmm11 * xmm20) + xmm16 vfmadd213ps %xmm11, %xmm12, %xmm20 # xmm20 = (xmm12 * xmm20) + xmm11 vcvttps2dq %xmm19, %xmm11 vpslld $0x17, %xmm11, %xmm11 vpaddd %xmm10, %xmm11, %xmm11 vfmadd213ps %xmm11, %xmm20, %xmm11 # xmm11 = (xmm20 * xmm11) + xmm11 vmovups %xmm11, (%rdi) vshufpd $0x1, %xmm11, %xmm11, %xmm12 # xmm12 = xmm11[1,0] vaddps %xmm11, %xmm12, %xmm11 vmovshdup %xmm11, %xmm12 # xmm12 = xmm11[1,1,3,3] vaddss (%r8,%r11,4), %xmm12, %xmm12 vaddss %xmm11, %xmm12, %xmm11 vmovss %xmm11, (%r8,%r11,4) addq $0x10, %rdi incq %r11 cmpl %r11d, %r10d jne 0x2418b8 incq %rsi cmpq %rdx, %rsi vmovaps 0x26bfb9(%rip), %zmm26 # 0x4ad940 vmovaps 0x26bfef(%rip), %zmm28 # 0x4ad980 vmovaps 0x26c025(%rip), %zmm29 # 0x4ad9c0 vmovaps 0x26c0db(%rip), %zmm22 # 0x4ada80 vmovaps 0x26c111(%rip), %zmm30 # 0x4adac0 vmovaps 0x26c147(%rip), %zmm31 # 0x4adb00 vmovaps 0x26c17d(%rip), %zmm27 # 0x4adb40 vmovaps 0x26c1b3(%rip), %zmm17 # 0x4adb80 vmovdqa64 0x26c229(%rip), %zmm1 # 0x4adc00 vmovaps 0x26bc9f(%rip), %zmm13 # 0x4ad680 vmovaps 0x26bcd5(%rip), %zmm14 # 0x4ad6c0 vmovaps 0x26bd0b(%rip), %zmm18 # 0x4ad700 vmovaps 0x26bd41(%rip), %zmm2 # 0x4ad740 vmovaps 0x26bd77(%rip), %zmm8 # 0x4ad780 vmovaps 0x26bdad(%rip), %zmm15 # 0x4ad7c0 jne 0x240edb xorl %edx, %edx movq %r14, %rax cmpl $0x10, %ebp jl 0x241a54 movl $0xf, %ecx vbroadcastss 0x26369e(%rip), %zmm0 # 0x4a50d0 movq %r14, %rax vdivps (%rax), %zmm0, %zmm1 vmovups %zmm1, (%rax) addq $0x40, %rax addl $0x10, %ecx cmpl %ebp, %ecx jl 0x241a35 movl %ebp, %edx andl $0x7ffffff0, %edx # imm = 0x7FFFFFF0 movl %edx, %ecx orl $0x7, %ecx cmpl %ebp, %ecx jge 0x24457d vbroadcastss 0x263666(%rip), %ymm0 # 0x4a50d0 vdivps (%rax), %ymm0, %ymm1 vmovups %ymm1, (%rax) addq $0x20, %rax leal 0x8(%rdx), %ecx addl $0xf, %edx cmpl %ebp, %edx movl %ecx, %edx jl 0x241a6a jmp 0x24457f cmpl $0x8, %r12d movq 0x4e0(%rsp), %r14 movl 0xc(%rsp), %r15d je 0x243357 cmpl $0x10, %r12d jne 0x244ddb movslq 0x20(%rsp), %rax movl %r15d, %ecx testl %r15d, %r15d jle 0x242c0c movl %ebp, %edx andl $-0x10, %edx xorl %esi, %esi movq %rsi, %rdi imulq %rax, %rdi leaq (%rbx,%rdi,4), %rdi cmpl $0x10, %ebp jl 0x241c89 movl $0xf, %r9d movq %r13, %r8 vmovups (%rdi), %zmm0 vmovups 0x40(%rdi), %zmm1 vmovups 0x80(%rdi), %zmm2 vmovups 0xc0(%rdi), %zmm3 vmovups 0x100(%rdi), %zmm4 vmovups 0x140(%rdi), %zmm5 vmovups 0x180(%rdi), %zmm6 vmovups 0x1c0(%rdi), %zmm7 vmovups 0x200(%rdi), %zmm8 vmovups 0x240(%rdi), %zmm9 vmovups 0x280(%rdi), %zmm10 vmovups 0x2c0(%rdi), %zmm11 vmovups 0x300(%rdi), %zmm12 vmovups 0x340(%rdi), %zmm13 vmovups 0x380(%rdi), %zmm14 vmovups 0x3c0(%rdi), %zmm15 vunpcklps %zmm1, %zmm0, %zmm16 # zmm16 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] vunpckhps %zmm1, %zmm0, %zmm0 # zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] vmaxps %zmm0, %zmm16, %zmm0 vunpcklps %zmm3, %zmm2, %zmm1 # zmm1 = zmm2[0],zmm3[0],zmm2[1],zmm3[1],zmm2[4],zmm3[4],zmm2[5],zmm3[5],zmm2[8],zmm3[8],zmm2[9],zmm3[9],zmm2[12],zmm3[12],zmm2[13],zmm3[13] vunpckhps %zmm3, %zmm2, %zmm2 # zmm2 = zmm2[2],zmm3[2],zmm2[3],zmm3[3],zmm2[6],zmm3[6],zmm2[7],zmm3[7],zmm2[10],zmm3[10],zmm2[11],zmm3[11],zmm2[14],zmm3[14],zmm2[15],zmm3[15] vmaxps %zmm2, %zmm1, %zmm1 vunpcklps %zmm5, %zmm4, %zmm2 # zmm2 = zmm4[0],zmm5[0],zmm4[1],zmm5[1],zmm4[4],zmm5[4],zmm4[5],zmm5[5],zmm4[8],zmm5[8],zmm4[9],zmm5[9],zmm4[12],zmm5[12],zmm4[13],zmm5[13] vunpckhps %zmm5, %zmm4, %zmm3 # zmm3 = zmm4[2],zmm5[2],zmm4[3],zmm5[3],zmm4[6],zmm5[6],zmm4[7],zmm5[7],zmm4[10],zmm5[10],zmm4[11],zmm5[11],zmm4[14],zmm5[14],zmm4[15],zmm5[15] vmaxps %zmm3, %zmm2, %zmm2 vunpcklps %zmm7, %zmm6, %zmm3 # zmm3 = zmm6[0],zmm7[0],zmm6[1],zmm7[1],zmm6[4],zmm7[4],zmm6[5],zmm7[5],zmm6[8],zmm7[8],zmm6[9],zmm7[9],zmm6[12],zmm7[12],zmm6[13],zmm7[13] vunpckhps %zmm7, %zmm6, %zmm4 # zmm4 = zmm6[2],zmm7[2],zmm6[3],zmm7[3],zmm6[6],zmm7[6],zmm6[7],zmm7[7],zmm6[10],zmm7[10],zmm6[11],zmm7[11],zmm6[14],zmm7[14],zmm6[15],zmm7[15] vmaxps %zmm4, %zmm3, %zmm3 vunpcklps %zmm9, %zmm8, %zmm4 # zmm4 = zmm8[0],zmm9[0],zmm8[1],zmm9[1],zmm8[4],zmm9[4],zmm8[5],zmm9[5],zmm8[8],zmm9[8],zmm8[9],zmm9[9],zmm8[12],zmm9[12],zmm8[13],zmm9[13] vunpckhps %zmm9, %zmm8, %zmm5 # zmm5 = zmm8[2],zmm9[2],zmm8[3],zmm9[3],zmm8[6],zmm9[6],zmm8[7],zmm9[7],zmm8[10],zmm9[10],zmm8[11],zmm9[11],zmm8[14],zmm9[14],zmm8[15],zmm9[15] vmaxps %zmm5, %zmm4, %zmm4 vunpcklps %zmm11, %zmm10, %zmm5 # zmm5 = zmm10[0],zmm11[0],zmm10[1],zmm11[1],zmm10[4],zmm11[4],zmm10[5],zmm11[5],zmm10[8],zmm11[8],zmm10[9],zmm11[9],zmm10[12],zmm11[12],zmm10[13],zmm11[13] vunpckhps %zmm11, %zmm10, %zmm6 # zmm6 = zmm10[2],zmm11[2],zmm10[3],zmm11[3],zmm10[6],zmm11[6],zmm10[7],zmm11[7],zmm10[10],zmm11[10],zmm10[11],zmm11[11],zmm10[14],zmm11[14],zmm10[15],zmm11[15] vmaxps %zmm6, %zmm5, %zmm5 vunpcklps %zmm13, %zmm12, %zmm6 # zmm6 = zmm12[0],zmm13[0],zmm12[1],zmm13[1],zmm12[4],zmm13[4],zmm12[5],zmm13[5],zmm12[8],zmm13[8],zmm12[9],zmm13[9],zmm12[12],zmm13[12],zmm12[13],zmm13[13] vunpckhps %zmm13, %zmm12, %zmm7 # zmm7 = zmm12[2],zmm13[2],zmm12[3],zmm13[3],zmm12[6],zmm13[6],zmm12[7],zmm13[7],zmm12[10],zmm13[10],zmm12[11],zmm13[11],zmm12[14],zmm13[14],zmm12[15],zmm13[15] vmaxps %zmm7, %zmm6, %zmm6 vunpcklps %zmm15, %zmm14, %zmm7 # zmm7 = zmm14[0],zmm15[0],zmm14[1],zmm15[1],zmm14[4],zmm15[4],zmm14[5],zmm15[5],zmm14[8],zmm15[8],zmm14[9],zmm15[9],zmm14[12],zmm15[12],zmm14[13],zmm15[13] vunpckhps %zmm15, %zmm14, %zmm8 # zmm8 = zmm14[2],zmm15[2],zmm14[3],zmm15[3],zmm14[6],zmm15[6],zmm14[7],zmm15[7],zmm14[10],zmm15[10],zmm14[11],zmm15[11],zmm14[14],zmm15[14],zmm14[15],zmm15[15] vmaxps %zmm8, %zmm7, %zmm7 vunpcklpd %zmm1, %zmm0, %zmm8 # zmm8 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] vunpckhpd %zmm1, %zmm0, %zmm0 # zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] vmaxps %zmm0, %zmm8, %zmm0 vunpcklpd %zmm3, %zmm2, %zmm1 # zmm1 = zmm2[0],zmm3[0],zmm2[2],zmm3[2],zmm2[4],zmm3[4],zmm2[6],zmm3[6] vunpckhpd %zmm3, %zmm2, %zmm2 # zmm2 = zmm2[1],zmm3[1],zmm2[3],zmm3[3],zmm2[5],zmm3[5],zmm2[7],zmm3[7] vmaxps %zmm2, %zmm1, %zmm1 vunpcklpd %zmm5, %zmm4, %zmm2 # zmm2 = zmm4[0],zmm5[0],zmm4[2],zmm5[2],zmm4[4],zmm5[4],zmm4[6],zmm5[6] vunpckhpd %zmm5, %zmm4, %zmm3 # zmm3 = zmm4[1],zmm5[1],zmm4[3],zmm5[3],zmm4[5],zmm5[5],zmm4[7],zmm5[7] vmaxps %zmm3, %zmm2, %zmm2 vunpcklpd %zmm7, %zmm6, %zmm3 # zmm3 = zmm6[0],zmm7[0],zmm6[2],zmm7[2],zmm6[4],zmm7[4],zmm6[6],zmm7[6] vunpckhpd %zmm7, %zmm6, %zmm4 # zmm4 = zmm6[1],zmm7[1],zmm6[3],zmm7[3],zmm6[5],zmm7[5],zmm6[7],zmm7[7] vmaxps %zmm4, %zmm3, %zmm3 vinsertf64x4 $0x1, %ymm1, %zmm0, %zmm4 vshuff64x2 $0xee, %zmm1, %zmm0, %zmm0 # zmm0 = zmm0[4,5,6,7],zmm1[4,5,6,7] vmaxps %zmm0, %zmm4, %zmm0 vinsertf64x4 $0x1, %ymm3, %zmm2, %zmm1 vshuff64x2 $0xee, %zmm3, %zmm2, %zmm2 # zmm2 = zmm2[4,5,6,7],zmm3[4,5,6,7] vmaxps %zmm2, %zmm1, %zmm1 vshuff64x2 $0x88, %zmm1, %zmm0, %zmm2 # zmm2 = zmm0[0,1,4,5],zmm1[0,1,4,5] vshuff64x2 $0xdd, %zmm1, %zmm0, %zmm0 # zmm0 = zmm0[2,3,6,7],zmm1[2,3,6,7] vmaxps %zmm0, %zmm2, %zmm0 vmaxps (%r8), %zmm0, %zmm0 vmovups %zmm0, (%r8) addq $0x400, %rdi # imm = 0x400 addq $0x40, %r8 addl $0x10, %r9d cmpl %ebp, %r9d jl 0x241add movl %edx, %r10d jmp 0x241c8f xorl %r10d, %r10d movq %r13, %r8 movl %ebp, %r9d subl %r10d, %r9d jle 0x241cd6 xorl %r10d, %r10d vmovups (%rdi), %ymm0 vmaxps 0x20(%rdi), %ymm0, %ymm0 vextractf128 $0x1, %ymm0, %xmm1 vmaxps %xmm1, %xmm0, %xmm0 vshufpd $0x3, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,1] vmaxps %xmm1, %xmm0, %xmm0 vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3] vmaxss %xmm1, %xmm0, %xmm0 vmaxss (%r8,%r10,4), %xmm0, %xmm0 vmovss %xmm0, (%r8,%r10,4) addq $0x40, %rdi incq %r10 cmpl %r10d, %r9d jne 0x241c9a incq %rsi cmpq %rcx, %rsi jne 0x241ac0 vmovaps 0x26bc14(%rip), %zmm28 # 0x4ad900 vmovaps 0x26bc4a(%rip), %zmm20 # 0x4ad940 vmovaps 0x26bc80(%rip), %zmm17 # 0x4ad980 vmovaps 0x26bcb6(%rip), %zmm5 # 0x4ad9c0 vbroadcastss 0x264f70(%rip), %zmm0 # 0x4a6c84 vxorps 0x26bce2(%rip), %zmm0, %zmm30 # 0x4ada00 vxorps 0x26bd18(%rip), %zmm0, %zmm6 # 0x4ada40 vmovaps 0x26be8e(%rip), %zmm23 # 0x4adbc0 vmovaps 0x26bd44(%rip), %zmm7 # 0x4ada80 vmovaps 0x26bd7a(%rip), %zmm8 # 0x4adac0 vmovaps 0x26bdb0(%rip), %zmm9 # 0x4adb00 vmovaps 0x26bde6(%rip), %zmm10 # 0x4adb40 vmovaps 0x26be1c(%rip), %zmm11 # 0x4adb80 vmovdqa64 0x26be92(%rip), %zmm12 # 0x4adc00 xorl %esi, %esi movq %rsi, %rdi imulq %rax, %rdi leaq (%rbx,%rdi,4), %rdi cmpl $0x10, %ebp jl 0x242b0c movl $0xf, %r10d movq %r13, %r9 movq %r14, %r8 vmovups (%rdi), %zmm7 vmovups 0x40(%rdi), %zmm8 vmovups 0x80(%rdi), %zmm3 vmovups 0xc0(%rdi), %zmm0 vmovups 0x100(%rdi), %zmm2 vmovups 0x140(%rdi), %zmm9 vmovups 0x180(%rdi), %zmm29 vmovups 0x1c0(%rdi), %zmm14 vmovups 0x200(%rdi), %zmm1 vmovups %zmm1, 0x1e0(%rsp) vmovups 0x240(%rdi), %zmm1 vmovups %zmm1, 0x220(%rsp) vmovups 0x280(%rdi), %zmm1 vmovups %zmm1, 0x260(%rsp) vmovups 0x2c0(%rdi), %zmm1 vmovups %zmm1, 0x2a0(%rsp) vmovups 0x300(%rdi), %zmm10 vmovups %zmm10, 0x2e0(%rsp) vmovups 0x340(%rdi), %zmm10 vmovups %zmm10, 0x120(%rsp) vmovups 0x380(%rdi), %zmm10 vmovups %zmm10, 0x1a0(%rsp) vsubps (%r9){1to16}, %zmm7, %zmm7 vminps %zmm20, %zmm7, %zmm7 vmaxps %zmm17, %zmm7, %zmm7 vmovaps %zmm5, %zmm10 vfmadd213ps %zmm23, %zmm7, %zmm10 # zmm10 = (zmm7 * zmm10) + zmm23 vrndscaleps $0x1, %zmm10, %zmm11 vsubps 0x4(%r9){1to16}, %zmm8, %zmm8 vcmpltps %zmm11, %zmm10, %k1 vmovaps %zmm28, %zmm4 vsubps %zmm28, %zmm11, %zmm11 {%k1} vmovups %zmm11, 0x60(%rsp) vminps %zmm20, %zmm8, %zmm8 vmaxps %zmm17, %zmm8, %zmm8 vmovaps %zmm30, %zmm18 vfmadd231ps %zmm30, %zmm11, %zmm7 # zmm7 = (zmm11 * zmm30) + zmm7 vmovaps %zmm5, %zmm10 vfmadd213ps %zmm23, %zmm8, %zmm10 # zmm10 = (zmm8 * zmm10) + zmm23 vrndscaleps $0x1, %zmm10, %zmm13 vmovaps %zmm6, %zmm19 vfmadd231ps %zmm6, %zmm11, %zmm7 # zmm7 = (zmm11 * zmm6) + zmm7 vmovaps %zmm13, %zmm6 vcmpltps %zmm13, %zmm10, %k1 vsubps %zmm28, %zmm13, %zmm6 {%k1} vmovups %zmm6, 0xa0(%rsp) vfmadd231ps %zmm30, %zmm6, %zmm8 # zmm8 = (zmm6 * zmm30) + zmm8 vmulps %zmm7, %zmm7, %zmm13 vfmadd231ps %zmm19, %zmm6, %zmm8 # zmm8 = (zmm6 * zmm19) + zmm8 vsubps 0x8(%r9){1to16}, %zmm3, %zmm3 vmovaps %zmm7, %zmm12 vminps %zmm20, %zmm3, %zmm3 vmaxps %zmm17, %zmm3, %zmm25 vmovaps %zmm5, %zmm3 vmulps %zmm8, %zmm8, %zmm11 vfmadd213ps %zmm23, %zmm25, %zmm3 # zmm3 = (zmm25 * zmm3) + zmm23 vrndscaleps $0x1, %zmm3, %zmm6 vcmpltps %zmm6, %zmm3, %k1 vmovaps %zmm8, %zmm30 vsubps %zmm28, %zmm6, %zmm6 {%k1} vmovups %zmm6, 0x160(%rsp) vfmadd231ps %zmm18, %zmm6, %zmm25 # zmm25 = (zmm6 * zmm18) + zmm25 vfmadd231ps %zmm19, %zmm6, %zmm25 # zmm25 = (zmm6 * zmm19) + zmm25 vmulps %zmm25, %zmm25, %zmm3 vmovaps %zmm25, %zmm1 vsubps 0xc(%r9){1to16}, %zmm0, %zmm0 vmovaps 0x26bb11(%rip), %zmm16 # 0x4ada80 vmovaps 0x26bb47(%rip), %zmm21 # 0x4adac0 vfmadd213ps %zmm21, %zmm16, %zmm12 # zmm12 = (zmm16 * zmm12) + zmm21 vminps %zmm20, %zmm0, %zmm0 vmaxps %zmm17, %zmm0, %zmm10 vmovaps %zmm5, %zmm0 vfmadd213ps %zmm21, %zmm16, %zmm30 # zmm30 = (zmm16 * zmm30) + zmm21 vfmadd213ps %zmm23, %zmm10, %zmm0 # zmm0 = (zmm10 * zmm0) + zmm23 vrndscaleps $0x1, %zmm0, %zmm6 vcmpltps %zmm6, %zmm0, %k1 vfmadd213ps %zmm21, %zmm16, %zmm1 # zmm1 = (zmm16 * zmm1) + zmm21 vsubps %zmm28, %zmm6, %zmm6 {%k1} vmovups %zmm6, 0xe0(%rsp) vfmadd231ps %zmm18, %zmm6, %zmm10 # zmm10 = (zmm6 * zmm18) + zmm10 vfmadd231ps %zmm19, %zmm6, %zmm10 # zmm10 = (zmm6 * zmm19) + zmm10 vmovaps 0x26bb28(%rip), %zmm15 # 0x4adb00 vfmadd213ps %zmm15, %zmm7, %zmm12 # zmm12 = (zmm7 * zmm12) + zmm15 vmulps %zmm10, %zmm10, %zmm27 vmovaps %zmm10, %zmm22 vfmadd213ps %zmm21, %zmm16, %zmm22 # zmm22 = (zmm16 * zmm22) + zmm21 vfmadd213ps %zmm15, %zmm8, %zmm30 # zmm30 = (zmm8 * zmm30) + zmm15 vsubps 0x10(%r9){1to16}, %zmm2, %zmm0 vminps %zmm20, %zmm0, %zmm0 vfmadd213ps %zmm15, %zmm25, %zmm1 # zmm1 = (zmm25 * zmm1) + zmm15 vmaxps %zmm17, %zmm0, %zmm31 vmovaps %zmm5, %zmm0 vfmadd213ps %zmm23, %zmm31, %zmm0 # zmm0 = (zmm31 * zmm0) + zmm23 vfmadd213ps %zmm15, %zmm10, %zmm22 # zmm22 = (zmm10 * zmm22) + zmm15 vrndscaleps $0x1, %zmm0, %zmm2 vcmpltps %zmm2, %zmm0, %k1 vsubps %zmm28, %zmm2, %zmm2 {%k1} vmovups %zmm2, 0x320(%rsp) vmovaps 0x26baf6(%rip), %zmm6 # 0x4adb40 vfmadd213ps %zmm6, %zmm7, %zmm12 # zmm12 = (zmm7 * zmm12) + zmm6 vfmadd231ps %zmm18, %zmm2, %zmm31 # zmm31 = (zmm2 * zmm18) + zmm31 vfmadd231ps %zmm19, %zmm2, %zmm31 # zmm31 = (zmm2 * zmm19) + zmm31 vmulps %zmm31, %zmm31, %zmm2 vfmadd213ps %zmm6, %zmm8, %zmm30 # zmm30 = (zmm8 * zmm30) + zmm6 vmovaps %zmm31, %zmm24 vfmadd213ps %zmm21, %zmm16, %zmm24 # zmm24 = (zmm16 * zmm24) + zmm21 vfmadd213ps %zmm15, %zmm31, %zmm24 # zmm24 = (zmm31 * zmm24) + zmm15 vfmadd213ps %zmm6, %zmm25, %zmm1 # zmm1 = (zmm25 * zmm1) + zmm6 vsubps 0x14(%r9){1to16}, %zmm9, %zmm0 vminps %zmm20, %zmm0, %zmm0 vfmadd213ps %zmm6, %zmm10, %zmm22 # zmm22 = (zmm10 * zmm22) + zmm6 vmaxps %zmm17, %zmm0, %zmm9 vmovaps %zmm5, %zmm0 vfmadd213ps %zmm23, %zmm9, %zmm0 # zmm0 = (zmm9 * zmm0) + zmm23 vfmadd213ps %zmm6, %zmm31, %zmm24 # zmm24 = (zmm31 * zmm24) + zmm6 vrndscaleps $0x1, %zmm0, %zmm26 vcmpltps %zmm26, %zmm0, %k1 vsubps %zmm28, %zmm26, %zmm26 {%k1} vmovups %zmm26, 0x20(%rsp) vmovaps 0x26baac(%rip), %zmm28 # 0x4adb80 vmovaps %zmm12, %zmm0 vfmadd213ps %zmm28, %zmm7, %zmm0 # zmm0 = (zmm7 * zmm0) + zmm28 vfmadd231ps %zmm18, %zmm26, %zmm9 # zmm9 = (zmm26 * zmm18) + zmm9 vfmadd231ps %zmm19, %zmm26, %zmm9 # zmm9 = (zmm26 * zmm19) + zmm9 vmulps %zmm9, %zmm9, %zmm12 vfmadd213ps %zmm23, %zmm7, %zmm0 # zmm0 = (zmm7 * zmm0) + zmm23 vmovaps %zmm0, %zmm26 vsubps 0x18(%r9){1to16}, %zmm29, %zmm0 vminps %zmm20, %zmm0, %zmm0 vfmadd213ps %zmm7, %zmm13, %zmm26 # zmm26 = (zmm13 * zmm26) + zmm7 vmovups %zmm26, 0x420(%rsp) vmaxps %zmm17, %zmm0, %zmm7 vmovaps %zmm5, %zmm0 vfmadd213ps %zmm23, %zmm7, %zmm0 # zmm0 = (zmm7 * zmm0) + zmm23 vfmadd213ps %zmm28, %zmm8, %zmm30 # zmm30 = (zmm8 * zmm30) + zmm28 vrndscaleps $0x1, %zmm0, %zmm13 vcmpltps %zmm13, %zmm0, %k1 vmovaps %zmm9, %zmm29 vfmadd213ps %zmm28, %zmm25, %zmm1 # zmm1 = (zmm25 * zmm1) + zmm28 vfmadd213ps %zmm21, %zmm16, %zmm29 # zmm29 = (zmm16 * zmm29) + zmm21 vfmadd213ps %zmm15, %zmm9, %zmm29 # zmm29 = (zmm9 * zmm29) + zmm15 vfmadd213ps %zmm6, %zmm9, %zmm29 # zmm29 = (zmm9 * zmm29) + zmm6 vfmadd213ps %zmm23, %zmm8, %zmm30 # zmm30 = (zmm8 * zmm30) + zmm23 vsubps %zmm4, %zmm13, %zmm13 {%k1} vmovups %zmm13, 0x460(%rsp) vfmadd231ps %zmm18, %zmm13, %zmm7 # zmm7 = (zmm13 * zmm18) + zmm7 vfmadd231ps %zmm19, %zmm13, %zmm7 # zmm7 = (zmm13 * zmm19) + zmm7 vfmadd213ps %zmm8, %zmm11, %zmm30 # zmm30 = (zmm11 * zmm30) + zmm8 vmovups %zmm30, 0x3e0(%rsp) vmulps %zmm7, %zmm7, %zmm8 vmovaps %zmm7, %zmm30 vfmadd213ps %zmm21, %zmm16, %zmm30 # zmm30 = (zmm16 * zmm30) + zmm21 vfmadd213ps %zmm23, %zmm25, %zmm1 # zmm1 = (zmm25 * zmm1) + zmm23 vfmadd213ps %zmm15, %zmm7, %zmm30 # zmm30 = (zmm7 * zmm30) + zmm15 vsubps 0x1c(%r9){1to16}, %zmm14, %zmm0 vfmadd213ps %zmm25, %zmm3, %zmm1 # zmm1 = (zmm3 * zmm1) + zmm25 vmovups %zmm1, 0x360(%rsp) vminps %zmm20, %zmm0, %zmm0 vmaxps %zmm17, %zmm0, %zmm3 vmovaps %zmm5, %zmm0 vfmadd213ps %zmm6, %zmm7, %zmm30 # zmm30 = (zmm7 * zmm30) + zmm6 vfmadd213ps %zmm23, %zmm3, %zmm0 # zmm0 = (zmm3 * zmm0) + zmm23 vrndscaleps $0x1, %zmm0, %zmm1 vcmpltps %zmm1, %zmm0, %k1 vfmadd213ps %zmm28, %zmm10, %zmm22 # zmm22 = (zmm10 * zmm22) + zmm28 vsubps %zmm4, %zmm1, %zmm1 {%k1} vmovups %zmm1, 0x3a0(%rsp) vfmadd231ps %zmm18, %zmm1, %zmm3 # zmm3 = (zmm1 * zmm18) + zmm3 vfmadd231ps %zmm19, %zmm1, %zmm3 # zmm3 = (zmm1 * zmm19) + zmm3 vfmadd213ps %zmm23, %zmm10, %zmm22 # zmm22 = (zmm10 * zmm22) + zmm23 vmulps %zmm3, %zmm3, %zmm11 vmovups 0x1e0(%rsp), %zmm0 vsubps 0x20(%r9){1to16}, %zmm0, %zmm0 vfmadd213ps %zmm10, %zmm27, %zmm22 # zmm22 = (zmm27 * zmm22) + zmm10 vminps %zmm20, %zmm0, %zmm0 vmaxps %zmm17, %zmm0, %zmm0 vmovaps %zmm5, %zmm1 vfmadd213ps %zmm28, %zmm31, %zmm24 # zmm24 = (zmm31 * zmm24) + zmm28 vfmadd213ps %zmm23, %zmm0, %zmm1 # zmm1 = (zmm0 * zmm1) + zmm23 vrndscaleps $0x1, %zmm1, %zmm10 vcmpltps %zmm10, %zmm1, %k1 vfmadd213ps %zmm28, %zmm9, %zmm29 # zmm29 = (zmm9 * zmm29) + zmm28 vmovaps %zmm3, %zmm25 vfmadd213ps %zmm21, %zmm16, %zmm25 # zmm25 = (zmm16 * zmm25) + zmm21 vfmadd213ps %zmm15, %zmm3, %zmm25 # zmm25 = (zmm3 * zmm25) + zmm15 vfmadd213ps %zmm23, %zmm31, %zmm24 # zmm24 = (zmm31 * zmm24) + zmm23 vsubps %zmm4, %zmm10, %zmm10 {%k1} vmovups %zmm10, 0x1e0(%rsp) vfmadd231ps %zmm18, %zmm10, %zmm0 # zmm0 = (zmm10 * zmm18) + zmm0 vfmadd231ps %zmm19, %zmm10, %zmm0 # zmm0 = (zmm10 * zmm19) + zmm0 vfmadd213ps %zmm31, %zmm2, %zmm24 # zmm24 = (zmm2 * zmm24) + zmm31 vmulps %zmm0, %zmm0, %zmm10 vmovaps %zmm0, %zmm27 vfmadd213ps %zmm21, %zmm16, %zmm27 # zmm27 = (zmm16 * zmm27) + zmm21 vfmadd213ps %zmm23, %zmm9, %zmm29 # zmm29 = (zmm9 * zmm29) + zmm23 vfmadd213ps %zmm15, %zmm0, %zmm27 # zmm27 = (zmm0 * zmm27) + zmm15 vmovups 0x220(%rsp), %zmm1 vsubps 0x24(%r9){1to16}, %zmm1, %zmm1 vfmadd213ps %zmm9, %zmm12, %zmm29 # zmm29 = (zmm12 * zmm29) + zmm9 vminps %zmm20, %zmm1, %zmm1 vmaxps %zmm17, %zmm1, %zmm9 vmovaps %zmm5, %zmm1 vfmadd213ps %zmm6, %zmm3, %zmm25 # zmm25 = (zmm3 * zmm25) + zmm6 vfmadd213ps %zmm23, %zmm9, %zmm1 # zmm1 = (zmm9 * zmm1) + zmm23 vrndscaleps $0x1, %zmm1, %zmm2 vcmpltps %zmm2, %zmm1, %k1 vfmadd213ps %zmm28, %zmm7, %zmm30 # zmm30 = (zmm7 * zmm30) + zmm28 vsubps %zmm4, %zmm2, %zmm2 {%k1} vmovups %zmm2, 0x220(%rsp) vfmadd231ps %zmm18, %zmm2, %zmm9 # zmm9 = (zmm2 * zmm18) + zmm9 vfmadd231ps %zmm19, %zmm2, %zmm9 # zmm9 = (zmm2 * zmm19) + zmm9 vfmadd213ps %zmm23, %zmm7, %zmm30 # zmm30 = (zmm7 * zmm30) + zmm23 vmulps %zmm9, %zmm9, %zmm12 vmovups 0x260(%rsp), %zmm1 vsubps 0x28(%r9){1to16}, %zmm1, %zmm1 vfmadd213ps %zmm7, %zmm8, %zmm30 # zmm30 = (zmm8 * zmm30) + zmm7 vminps %zmm20, %zmm1, %zmm1 vmaxps %zmm17, %zmm1, %zmm1 vmovaps %zmm5, %zmm2 vmovaps %zmm5, %zmm13 vfmadd213ps %zmm6, %zmm0, %zmm27 # zmm27 = (zmm0 * zmm27) + zmm6 vfmadd213ps %zmm23, %zmm1, %zmm2 # zmm2 = (zmm1 * zmm2) + zmm23 vrndscaleps $0x1, %zmm2, %zmm7 vcmpltps %zmm7, %zmm2, %k1 vfmadd213ps %zmm28, %zmm3, %zmm25 # zmm25 = (zmm3 * zmm25) + zmm28 vmovaps %zmm9, %zmm5 vfmadd213ps %zmm21, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm21 vfmadd213ps %zmm15, %zmm9, %zmm5 # zmm5 = (zmm9 * zmm5) + zmm15 vfmadd213ps %zmm23, %zmm3, %zmm25 # zmm25 = (zmm3 * zmm25) + zmm23 vfmadd213ps %zmm6, %zmm9, %zmm5 # zmm5 = (zmm9 * zmm5) + zmm6 vsubps %zmm4, %zmm7, %zmm7 {%k1} vmovups %zmm7, 0x260(%rsp) vfmadd231ps %zmm18, %zmm7, %zmm1 # zmm1 = (zmm7 * zmm18) + zmm1 vfmadd213ps %zmm3, %zmm11, %zmm25 # zmm25 = (zmm11 * zmm25) + zmm3 vfmadd231ps %zmm19, %zmm7, %zmm1 # zmm1 = (zmm7 * zmm19) + zmm1 vmulps %zmm1, %zmm1, %zmm3 vmovaps %zmm1, %zmm7 vfmadd213ps %zmm28, %zmm0, %zmm27 # zmm27 = (zmm0 * zmm27) + zmm28 vfmadd213ps %zmm21, %zmm16, %zmm7 # zmm7 = (zmm16 * zmm7) + zmm21 vmovups 0x2a0(%rsp), %zmm2 vsubps 0x2c(%r9){1to16}, %zmm2, %zmm8 vfmadd213ps %zmm23, %zmm0, %zmm27 # zmm27 = (zmm0 * zmm27) + zmm23 vfmadd213ps %zmm15, %zmm1, %zmm7 # zmm7 = (zmm1 * zmm7) + zmm15 vminps %zmm20, %zmm8, %zmm8 vmaxps %zmm17, %zmm8, %zmm11 vfmadd213ps %zmm0, %zmm10, %zmm27 # zmm27 = (zmm10 * zmm27) + zmm0 vmovaps %zmm13, %zmm2 vmovaps %zmm13, %zmm0 vfmadd213ps %zmm23, %zmm11, %zmm0 # zmm0 = (zmm11 * zmm0) + zmm23 vrndscaleps $0x1, %zmm0, %zmm8 vfmadd213ps %zmm6, %zmm1, %zmm7 # zmm7 = (zmm1 * zmm7) + zmm6 vcmpltps %zmm8, %zmm0, %k1 vsubps %zmm4, %zmm8, %zmm8 {%k1} vmovups %zmm8, 0x2a0(%rsp) vfmadd231ps %zmm18, %zmm8, %zmm11 # zmm11 = (zmm8 * zmm18) + zmm11 vfmadd213ps %zmm28, %zmm9, %zmm5 # zmm5 = (zmm9 * zmm5) + zmm28 vfmadd231ps %zmm19, %zmm8, %zmm11 # zmm11 = (zmm8 * zmm19) + zmm11 vmovups 0x2e0(%rsp), %zmm0 vsubps 0x30(%r9){1to16}, %zmm0, %zmm0 vfmadd213ps %zmm23, %zmm9, %zmm5 # zmm5 = (zmm9 * zmm5) + zmm23 vmulps %zmm11, %zmm11, %zmm13 vminps %zmm20, %zmm0, %zmm0 vmaxps %zmm17, %zmm0, %zmm0 vfmadd213ps %zmm9, %zmm12, %zmm5 # zmm5 = (zmm12 * zmm5) + zmm9 vmovaps %zmm2, %zmm10 vmovaps %zmm2, %zmm8 vfmadd213ps %zmm23, %zmm0, %zmm10 # zmm10 = (zmm0 * zmm10) + zmm23 vrndscaleps $0x1, %zmm10, %zmm31 vfmadd213ps %zmm28, %zmm1, %zmm7 # zmm7 = (zmm1 * zmm7) + zmm28 vcmpltps %zmm31, %zmm10, %k1 vmovaps %zmm11, %zmm10 vfmadd213ps %zmm21, %zmm16, %zmm10 # zmm10 = (zmm16 * zmm10) + zmm21 vfmadd213ps %zmm23, %zmm1, %zmm7 # zmm7 = (zmm1 * zmm7) + zmm23 vfmadd213ps %zmm15, %zmm11, %zmm10 # zmm10 = (zmm11 * zmm10) + zmm15 vfmadd213ps %zmm6, %zmm11, %zmm10 # zmm10 = (zmm11 * zmm10) + zmm6 vsubps %zmm4, %zmm31, %zmm31 {%k1} vfmadd213ps %zmm1, %zmm3, %zmm7 # zmm7 = (zmm3 * zmm7) + zmm1 vfmadd231ps %zmm18, %zmm31, %zmm0 # zmm0 = (zmm31 * zmm18) + zmm0 vfmadd231ps %zmm19, %zmm31, %zmm0 # zmm0 = (zmm31 * zmm19) + zmm0 vmulps %zmm0, %zmm0, %zmm1 vfmadd213ps %zmm28, %zmm11, %zmm10 # zmm10 = (zmm11 * zmm10) + zmm28 vmovaps %zmm0, %zmm3 vfmadd213ps %zmm21, %zmm16, %zmm3 # zmm3 = (zmm16 * zmm3) + zmm21 vfmadd213ps %zmm15, %zmm0, %zmm3 # zmm3 = (zmm0 * zmm3) + zmm15 vfmadd213ps %zmm23, %zmm11, %zmm10 # zmm10 = (zmm11 * zmm10) + zmm23 vmovups 0x120(%rsp), %zmm2 vsubps 0x34(%r9){1to16}, %zmm2, %zmm12 vminps %zmm20, %zmm12, %zmm12 vfmadd213ps %zmm11, %zmm13, %zmm10 # zmm10 = (zmm13 * zmm10) + zmm11 vmaxps %zmm17, %zmm12, %zmm13 vmovaps %zmm8, %zmm12 vfmadd213ps %zmm23, %zmm13, %zmm12 # zmm12 = (zmm13 * zmm12) + zmm23 vfmadd213ps %zmm6, %zmm0, %zmm3 # zmm3 = (zmm0 * zmm3) + zmm6 vrndscaleps $0x1, %zmm12, %zmm11 vcmpltps %zmm11, %zmm12, %k1 vsubps %zmm4, %zmm11, %zmm11 {%k1} vfmadd213ps %zmm28, %zmm0, %zmm3 # zmm3 = (zmm0 * zmm3) + zmm28 vfmadd231ps %zmm18, %zmm11, %zmm13 # zmm13 = (zmm11 * zmm18) + zmm13 vmovups 0x1a0(%rsp), %zmm2 vsubps 0x38(%r9){1to16}, %zmm2, %zmm12 vfmadd213ps %zmm23, %zmm0, %zmm3 # zmm3 = (zmm0 * zmm3) + zmm23 vfmadd231ps %zmm19, %zmm11, %zmm13 # zmm13 = (zmm11 * zmm19) + zmm13 vminps %zmm20, %zmm12, %zmm12 vmaxps %zmm17, %zmm12, %zmm14 vfmadd213ps %zmm0, %zmm1, %zmm3 # zmm3 = (zmm1 * zmm3) + zmm0 vmovaps %zmm8, %zmm0 vfmadd213ps %zmm23, %zmm14, %zmm0 # zmm0 = (zmm14 * zmm0) + zmm23 vrndscaleps $0x1, %zmm0, %zmm12 vmulps %zmm13, %zmm13, %zmm1 vcmpltps %zmm12, %zmm0, %k1 vmovaps %zmm13, %zmm2 vfmadd213ps %zmm21, %zmm16, %zmm2 # zmm2 = (zmm16 * zmm2) + zmm21 vfmadd213ps %zmm15, %zmm13, %zmm2 # zmm2 = (zmm13 * zmm2) + zmm15 vfmadd213ps %zmm6, %zmm13, %zmm2 # zmm2 = (zmm13 * zmm2) + zmm6 vfmadd213ps %zmm28, %zmm13, %zmm2 # zmm2 = (zmm13 * zmm2) + zmm28 vfmadd213ps %zmm23, %zmm13, %zmm2 # zmm2 = (zmm13 * zmm2) + zmm23 vfmadd213ps %zmm13, %zmm1, %zmm2 # zmm2 = (zmm1 * zmm2) + zmm13 vsubps %zmm4, %zmm12, %zmm12 {%k1} vfmadd231ps %zmm18, %zmm12, %zmm14 # zmm14 = (zmm12 * zmm18) + zmm14 vfmadd231ps %zmm19, %zmm12, %zmm14 # zmm14 = (zmm12 * zmm19) + zmm14 vmulps %zmm14, %zmm14, %zmm1 vmovaps %zmm14, %zmm13 vfmadd213ps %zmm21, %zmm16, %zmm13 # zmm13 = (zmm16 * zmm13) + zmm21 vfmadd213ps %zmm15, %zmm14, %zmm13 # zmm13 = (zmm14 * zmm13) + zmm15 vfmadd213ps %zmm6, %zmm14, %zmm13 # zmm13 = (zmm14 * zmm13) + zmm6 vfmadd213ps %zmm28, %zmm14, %zmm13 # zmm13 = (zmm14 * zmm13) + zmm28 vfmadd213ps %zmm23, %zmm14, %zmm13 # zmm13 = (zmm14 * zmm13) + zmm23 vfmadd213ps %zmm14, %zmm1, %zmm13 # zmm13 = (zmm1 * zmm13) + zmm14 vmovups 0x3c0(%rdi), %zmm1 vsubps 0x3c(%r9){1to16}, %zmm1, %zmm1 vminps %zmm20, %zmm1, %zmm1 vmaxps %zmm17, %zmm1, %zmm14 vmovaps %zmm8, %zmm20 vfmadd213ps %zmm23, %zmm14, %zmm20 # zmm20 = (zmm14 * zmm20) + zmm23 vrndscaleps $0x1, %zmm20, %zmm1 vcmpltps %zmm1, %zmm20, %k1 vsubps %zmm4, %zmm1, %zmm1 {%k1} vfmadd231ps %zmm18, %zmm1, %zmm14 # zmm14 = (zmm1 * zmm18) + zmm14 vmovaps %zmm18, %zmm8 vfmadd231ps %zmm19, %zmm1, %zmm14 # zmm14 = (zmm1 * zmm19) + zmm14 vmovaps %zmm19, %zmm9 vmovaps %zmm14, %zmm20 vfmadd213ps %zmm21, %zmm16, %zmm20 # zmm20 = (zmm16 * zmm20) + zmm21 vfmadd213ps %zmm15, %zmm14, %zmm20 # zmm20 = (zmm14 * zmm20) + zmm15 vfmadd213ps %zmm6, %zmm14, %zmm20 # zmm20 = (zmm14 * zmm20) + zmm6 vfmadd213ps %zmm28, %zmm14, %zmm20 # zmm20 = (zmm14 * zmm20) + zmm28 vfmadd213ps %zmm23, %zmm14, %zmm20 # zmm20 = (zmm14 * zmm20) + zmm23 vmulps %zmm14, %zmm14, %zmm26 vfmadd213ps %zmm14, %zmm26, %zmm20 # zmm20 = (zmm26 * zmm20) + zmm14 vaddps 0x420(%rsp), %zmm4, %zmm14 vcvttps2dq 0x60(%rsp), %zmm17 vmovdqa64 0x26b53f(%rip), %zmm0 # 0x4adc00 vpaddd %zmm0, %zmm17, %zmm17 vpslld $0x17, %zmm17, %zmm17 vaddps 0x3e0(%rsp), %zmm4, %zmm18 vcvttps2dq 0xa0(%rsp), %zmm26 vpaddd %zmm0, %zmm26, %zmm26 vpslld $0x17, %zmm26, %zmm26 vaddps 0x360(%rsp), %zmm4, %zmm19 vcvttps2dq 0x160(%rsp), %zmm28 vpaddd %zmm0, %zmm28, %zmm28 vpslld $0x17, %zmm28, %zmm28 vaddps %zmm4, %zmm22, %zmm22 vcvttps2dq 0xe0(%rsp), %zmm15 vpaddd %zmm0, %zmm15, %zmm15 vpslld $0x17, %zmm15, %zmm15 vaddps %zmm4, %zmm24, %zmm24 vcvttps2dq 0x320(%rsp), %zmm23 vpaddd %zmm0, %zmm23, %zmm23 vpslld $0x17, %zmm23, %zmm23 vmulps %zmm17, %zmm14, %zmm17 vaddps %zmm4, %zmm29, %zmm14 vcvttps2dq 0x20(%rsp), %zmm29 vpaddd %zmm0, %zmm29, %zmm29 vmulps %zmm26, %zmm18, %zmm18 vpslld $0x17, %zmm29, %zmm26 vaddps %zmm4, %zmm30, %zmm29 vcvttps2dq 0x460(%rsp), %zmm30 vmulps %zmm28, %zmm19, %zmm19 vpaddd %zmm0, %zmm30, %zmm28 vpslld $0x17, %zmm28, %zmm28 vaddps %zmm4, %zmm25, %zmm25 vmulps %zmm15, %zmm22, %zmm22 vcvttps2dq 0x3a0(%rsp), %zmm15 vpaddd %zmm0, %zmm15, %zmm15 vpslld $0x17, %zmm15, %zmm30 vmulps %zmm23, %zmm24, %zmm15 vaddps %zmm4, %zmm27, %zmm23 vcvttps2dq 0x1e0(%rsp), %zmm16 vpaddd %zmm0, %zmm16, %zmm16 vmulps %zmm26, %zmm14, %zmm14 vpslld $0x17, %zmm16, %zmm16 vaddps %zmm4, %zmm5, %zmm27 vcvttps2dq 0x220(%rsp), %zmm21 vmulps %zmm28, %zmm29, %zmm24 vpaddd %zmm0, %zmm21, %zmm21 vpslld $0x17, %zmm21, %zmm21 vaddps %zmm4, %zmm7, %zmm7 vmulps %zmm30, %zmm25, %zmm25 vcvttps2dq 0x260(%rsp), %zmm26 vpaddd %zmm0, %zmm26, %zmm26 vpslld $0x17, %zmm26, %zmm26 vmulps %zmm16, %zmm23, %zmm16 vmovaps 0x26b17c(%rip), %zmm5 # 0x4ad9c0 vmovaps %zmm8, %zmm30 vmovaps %zmm9, %zmm6 vaddps %zmm4, %zmm10, %zmm10 vcvttps2dq 0x2a0(%rsp), %zmm8 vpaddd %zmm0, %zmm8, %zmm8 vmulps %zmm21, %zmm27, %zmm21 vpslld $0x17, %zmm8, %zmm8 vaddps %zmm4, %zmm3, %zmm3 vcvttps2dq %zmm31, %zmm9 vmulps %zmm26, %zmm7, %zmm7 vmovaps 0x26b330(%rip), %zmm23 # 0x4adbc0 vpaddd %zmm0, %zmm9, %zmm9 vpslld $0x17, %zmm9, %zmm9 vaddps %zmm4, %zmm2, %zmm2 vmulps %zmm8, %zmm10, %zmm8 vcvttps2dq %zmm11, %zmm10 vpaddd %zmm0, %zmm10, %zmm10 vpslld $0x17, %zmm10, %zmm10 vmulps %zmm9, %zmm3, %zmm3 vaddps %zmm4, %zmm13, %zmm9 vcvttps2dq %zmm12, %zmm11 vpaddd %zmm0, %zmm11, %zmm11 vmulps %zmm10, %zmm2, %zmm27 vpslld $0x17, %zmm11, %zmm10 vaddps %zmm4, %zmm20, %zmm11 vmovaps %zmm4, %zmm28 vmovaps 0x26b049(%rip), %zmm20 # 0x4ad940 vcvttps2dq %zmm1, %zmm1 vmulps %zmm10, %zmm9, %zmm9 vpaddd %zmm0, %zmm1, %zmm1 vpslld $0x17, %zmm1, %zmm1 vmulps %zmm1, %zmm11, %zmm1 vmovups %zmm17, (%rdi) vmovups %zmm18, 0x40(%rdi) vmovups %zmm19, 0x80(%rdi) vmovups %zmm22, 0xc0(%rdi) vmovups %zmm15, 0x100(%rdi) vmovups %zmm14, 0x140(%rdi) vmovups %zmm24, 0x180(%rdi) vmovups %zmm25, 0x1c0(%rdi) vmovups %zmm16, 0x200(%rdi) vmovups %zmm21, 0x240(%rdi) vmovups %zmm7, 0x280(%rdi) vmovups %zmm8, 0x2c0(%rdi) vmovups %zmm3, 0x300(%rdi) vmovups %zmm27, 0x340(%rdi) vmovups %zmm9, 0x380(%rdi) vmovups %zmm1, 0x3c0(%rdi) vunpcklps %zmm18, %zmm17, %zmm10 # zmm10 = zmm17[0],zmm18[0],zmm17[1],zmm18[1],zmm17[4],zmm18[4],zmm17[5],zmm18[5],zmm17[8],zmm18[8],zmm17[9],zmm18[9],zmm17[12],zmm18[12],zmm17[13],zmm18[13] vunpckhps %zmm18, %zmm17, %zmm11 # zmm11 = zmm17[2],zmm18[2],zmm17[3],zmm18[3],zmm17[6],zmm18[6],zmm17[7],zmm18[7],zmm17[10],zmm18[10],zmm17[11],zmm18[11],zmm17[14],zmm18[14],zmm17[15],zmm18[15] vunpcklps %zmm22, %zmm19, %zmm12 # zmm12 = zmm19[0],zmm22[0],zmm19[1],zmm22[1],zmm19[4],zmm22[4],zmm19[5],zmm22[5],zmm19[8],zmm22[8],zmm19[9],zmm22[9],zmm19[12],zmm22[12],zmm19[13],zmm22[13] vunpckhps %zmm22, %zmm19, %zmm13 # zmm13 = zmm19[2],zmm22[2],zmm19[3],zmm22[3],zmm19[6],zmm22[6],zmm19[7],zmm22[7],zmm19[10],zmm22[10],zmm19[11],zmm22[11],zmm19[14],zmm22[14],zmm19[15],zmm22[15] vaddps %zmm11, %zmm10, %zmm10 vaddps %zmm13, %zmm12, %zmm11 vunpcklps %zmm14, %zmm15, %zmm12 # zmm12 = zmm15[0],zmm14[0],zmm15[1],zmm14[1],zmm15[4],zmm14[4],zmm15[5],zmm14[5],zmm15[8],zmm14[8],zmm15[9],zmm14[9],zmm15[12],zmm14[12],zmm15[13],zmm14[13] vunpckhps %zmm14, %zmm15, %zmm13 # zmm13 = zmm15[2],zmm14[2],zmm15[3],zmm14[3],zmm15[6],zmm14[6],zmm15[7],zmm14[7],zmm15[10],zmm14[10],zmm15[11],zmm14[11],zmm15[14],zmm14[14],zmm15[15],zmm14[15] vaddps %zmm13, %zmm12, %zmm12 vunpcklps %zmm25, %zmm24, %zmm13 # zmm13 = zmm24[0],zmm25[0],zmm24[1],zmm25[1],zmm24[4],zmm25[4],zmm24[5],zmm25[5],zmm24[8],zmm25[8],zmm24[9],zmm25[9],zmm24[12],zmm25[12],zmm24[13],zmm25[13] vunpckhps %zmm25, %zmm24, %zmm14 # zmm14 = zmm24[2],zmm25[2],zmm24[3],zmm25[3],zmm24[6],zmm25[6],zmm24[7],zmm25[7],zmm24[10],zmm25[10],zmm24[11],zmm25[11],zmm24[14],zmm25[14],zmm24[15],zmm25[15] vunpcklps %zmm21, %zmm16, %zmm15 # zmm15 = zmm16[0],zmm21[0],zmm16[1],zmm21[1],zmm16[4],zmm21[4],zmm16[5],zmm21[5],zmm16[8],zmm21[8],zmm16[9],zmm21[9],zmm16[12],zmm21[12],zmm16[13],zmm21[13] vaddps %zmm14, %zmm13, %zmm13 vunpckhps %zmm21, %zmm16, %zmm2 # zmm2 = zmm16[2],zmm21[2],zmm16[3],zmm21[3],zmm16[6],zmm21[6],zmm16[7],zmm21[7],zmm16[10],zmm21[10],zmm16[11],zmm21[11],zmm16[14],zmm21[14],zmm16[15],zmm21[15] vunpcklps %zmm8, %zmm7, %zmm14 # zmm14 = zmm7[0],zmm8[0],zmm7[1],zmm8[1],zmm7[4],zmm8[4],zmm7[5],zmm8[5],zmm7[8],zmm8[8],zmm7[9],zmm8[9],zmm7[12],zmm8[12],zmm7[13],zmm8[13] vunpckhps %zmm8, %zmm7, %zmm7 # zmm7 = zmm7[2],zmm8[2],zmm7[3],zmm8[3],zmm7[6],zmm8[6],zmm7[7],zmm8[7],zmm7[10],zmm8[10],zmm7[11],zmm8[11],zmm7[14],zmm8[14],zmm7[15],zmm8[15] vaddps %zmm2, %zmm15, %zmm2 vmovaps 0x26af8b(%rip), %zmm17 # 0x4ad980 vaddps %zmm7, %zmm14, %zmm7 vunpcklps %zmm27, %zmm3, %zmm8 # zmm8 = zmm3[0],zmm27[0],zmm3[1],zmm27[1],zmm3[4],zmm27[4],zmm3[5],zmm27[5],zmm3[8],zmm27[8],zmm3[9],zmm27[9],zmm3[12],zmm27[12],zmm3[13],zmm27[13] vunpckhps %zmm27, %zmm3, %zmm0 # zmm0 = zmm3[2],zmm27[2],zmm3[3],zmm27[3],zmm3[6],zmm27[6],zmm3[7],zmm27[7],zmm3[10],zmm27[10],zmm3[11],zmm27[11],zmm3[14],zmm27[14],zmm3[15],zmm27[15] vaddps %zmm0, %zmm8, %zmm0 vunpcklps %zmm1, %zmm9, %zmm3 # zmm3 = zmm9[0],zmm1[0],zmm9[1],zmm1[1],zmm9[4],zmm1[4],zmm9[5],zmm1[5],zmm9[8],zmm1[8],zmm9[9],zmm1[9],zmm9[12],zmm1[12],zmm9[13],zmm1[13] vunpckhps %zmm1, %zmm9, %zmm1 # zmm1 = zmm9[2],zmm1[2],zmm9[3],zmm1[3],zmm9[6],zmm1[6],zmm9[7],zmm1[7],zmm9[10],zmm1[10],zmm9[11],zmm1[11],zmm9[14],zmm1[14],zmm9[15],zmm1[15] vaddps %zmm1, %zmm3, %zmm1 vunpcklpd %zmm11, %zmm10, %zmm3 # zmm3 = zmm10[0],zmm11[0],zmm10[2],zmm11[2],zmm10[4],zmm11[4],zmm10[6],zmm11[6] vunpckhpd %zmm11, %zmm10, %zmm8 # zmm8 = zmm10[1],zmm11[1],zmm10[3],zmm11[3],zmm10[5],zmm11[5],zmm10[7],zmm11[7] vunpcklpd %zmm13, %zmm12, %zmm9 # zmm9 = zmm12[0],zmm13[0],zmm12[2],zmm13[2],zmm12[4],zmm13[4],zmm12[6],zmm13[6] vunpckhpd %zmm13, %zmm12, %zmm10 # zmm10 = zmm12[1],zmm13[1],zmm12[3],zmm13[3],zmm12[5],zmm13[5],zmm12[7],zmm13[7] vaddps %zmm8, %zmm3, %zmm3 vaddps %zmm10, %zmm9, %zmm8 vunpcklpd %zmm7, %zmm2, %zmm9 # zmm9 = zmm2[0],zmm7[0],zmm2[2],zmm7[2],zmm2[4],zmm7[4],zmm2[6],zmm7[6] vunpckhpd %zmm7, %zmm2, %zmm2 # zmm2 = zmm2[1],zmm7[1],zmm2[3],zmm7[3],zmm2[5],zmm7[5],zmm2[7],zmm7[7] vaddps %zmm2, %zmm9, %zmm2 vunpcklpd %zmm1, %zmm0, %zmm7 # zmm7 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] vunpckhpd %zmm1, %zmm0, %zmm0 # zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] vaddps %zmm0, %zmm7, %zmm0 vinsertf64x4 $0x1, %ymm8, %zmm3, %zmm1 vshuff64x2 $0xee, %zmm8, %zmm3, %zmm3 # zmm3 = zmm3[4,5,6,7],zmm8[4,5,6,7] vinsertf64x4 $0x1, %ymm0, %zmm2, %zmm7 vshuff64x2 $0xee, %zmm0, %zmm2, %zmm0 # zmm0 = zmm2[4,5,6,7],zmm0[4,5,6,7] vaddps %zmm3, %zmm1, %zmm1 vaddps %zmm0, %zmm7, %zmm0 vshuff64x2 $0xdd, %zmm0, %zmm1, %zmm2 # zmm2 = zmm1[2,3,6,7],zmm0[2,3,6,7] vaddps (%r8), %zmm2, %zmm2 vshuff64x2 $0x88, %zmm0, %zmm1, %zmm0 # zmm0 = zmm1[0,1,4,5],zmm0[0,1,4,5] vaddps %zmm0, %zmm2, %zmm0 addq $0x400, %rdi # imm = 0x400 vmovups %zmm0, (%r8) addq $0x40, %r9 addq $0x40, %r8 addl $0x10, %r10d cmpl %ebp, %r10d jl 0x241d90 movl %edx, %r11d vmovaps 0x26afa8(%rip), %zmm7 # 0x4ada80 vmovaps 0x26afde(%rip), %zmm8 # 0x4adac0 vmovaps 0x26b014(%rip), %zmm9 # 0x4adb00 vmovaps 0x26b04a(%rip), %zmm10 # 0x4adb40 vmovaps 0x26b080(%rip), %zmm11 # 0x4adb80 vmovdqa64 0x26b0f6(%rip), %zmm12 # 0x4adc00 jmp 0x242b15 xorl %r11d, %r11d movq %r14, %r8 movq %r13, %r9 movl %ebp, %r10d subl %r11d, %r10d jle 0x242c00 xorl %r11d, %r11d vmovups (%rdi), %zmm0 vsubps (%r9,%r11,4){1to16}, %zmm0, %zmm0 vminps %zmm20, %zmm0, %zmm0 vmaxps %zmm17, %zmm0, %zmm0 vmovaps %zmm5, %zmm1 vfmadd213ps %zmm23, %zmm0, %zmm1 # zmm1 = (zmm0 * zmm1) + zmm23 vrndscaleps $0x1, %zmm1, %zmm2 vcmpltps %zmm2, %zmm1, %k1 vsubps %zmm28, %zmm2, %zmm2 {%k1} vfmadd231ps %zmm30, %zmm2, %zmm0 # zmm0 = (zmm2 * zmm30) + zmm0 vfmadd231ps %zmm6, %zmm2, %zmm0 # zmm0 = (zmm2 * zmm6) + zmm0 vmulps %zmm0, %zmm0, %zmm1 vmovaps %zmm0, %zmm3 vfmadd213ps %zmm8, %zmm7, %zmm3 # zmm3 = (zmm7 * zmm3) + zmm8 vfmadd213ps %zmm9, %zmm0, %zmm3 # zmm3 = (zmm0 * zmm3) + zmm9 vfmadd213ps %zmm10, %zmm0, %zmm3 # zmm3 = (zmm0 * zmm3) + zmm10 vfmadd213ps %zmm11, %zmm0, %zmm3 # zmm3 = (zmm0 * zmm3) + zmm11 vfmadd213ps %zmm23, %zmm0, %zmm3 # zmm3 = (zmm0 * zmm3) + zmm23 vfmadd213ps %zmm0, %zmm1, %zmm3 # zmm3 = (zmm1 * zmm3) + zmm0 vaddps %zmm28, %zmm3, %zmm0 vcvttps2dq %zmm2, %zmm1 vpaddd %zmm12, %zmm1, %zmm1 vpslld $0x17, %zmm1, %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovups %zmm0, (%rdi) vextractf64x4 $0x1, %zmm0, %ymm1 vaddps %ymm1, %ymm0, %ymm0 vextractf128 $0x1, %ymm0, %xmm1 vaddps %xmm1, %xmm0, %xmm0 vshufpd $0x1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0] vaddps %xmm0, %xmm1, %xmm0 vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3] vaddss (%r8,%r11,4), %xmm1, %xmm1 vaddss %xmm0, %xmm1, %xmm0 vmovss %xmm0, (%r8,%r11,4) addq $0x40, %rdi incq %r11 cmpl %r11d, %r10d jne 0x242b24 incq %rsi cmpq %rcx, %rsi jne 0x241d70 xorl %edi, %edi movq %r14, %rdx cmpl $0x10, %ebp jl 0x242c47 movl $0xf, %esi vbroadcastss 0x2624ab(%rip), %zmm0 # 0x4a50d0 movq %r14, %rdx vdivps (%rdx), %zmm0, %zmm1 vmovups %zmm1, (%rdx) addq $0x40, %rdx addl $0x10, %esi cmpl %ebp, %esi jl 0x242c28 movl %ebp, %edi andl $0x7ffffff0, %edi # imm = 0x7FFFFFF0 movl %edi, %esi orl $0x7, %esi cmpl %ebp, %esi jge 0x2445ab vbroadcastss 0x262473(%rip), %ymm0 # 0x4a50d0 vdivps (%rdx), %ymm0, %ymm1 vmovups %ymm1, (%rdx) addq $0x20, %rdx leal 0x8(%rdi), %esi addl $0xf, %edi cmpl %ebp, %edi movl %esi, %edi jl 0x242c5d jmp 0x2445ad testl %r15d, %r15d jle 0x2432e5 movl %ebp, %eax andl $-0x10, %eax movl %eax, 0xa0(%rsp) movslq 0x20(%rsp), %r8 movl 0xc(%rsp), %r9d xorl %eax, %eax movq %rax, %rcx imulq %r8, %rcx leaq (%rbx,%rcx,4), %rcx cmpl $0x10, %ebp jl 0x242cdd movl $0xf, %esi movq %r13, %rdx vmovups (%rdx), %zmm0 vmaxps (%rcx), %zmm0, %zmm0 vmovups %zmm0, (%rdx) addq $0x40, %rcx addq $0x40, %rdx addl $0x10, %esi cmpl %ebp, %esi jl 0x242cb3 movl 0xa0(%rsp), %esi jmp 0x242ce2 xorl %esi, %esi movq %r13, %rdx movl %esi, %edi orl $0x7, %edi cmpl %ebp, %edi jge 0x242d0b vmovups (%rdx), %ymm0 vmaxps (%rcx), %ymm0, %ymm0 vmovups %ymm0, (%rdx) addq $0x20, %rcx addq $0x20, %rdx leal 0x8(%rsi), %edi addl $0xf, %esi cmpl %ebp, %esi movl %edi, %esi jl 0x242ceb movl %esi, %edi orl $0x3, %edi cmpl %ebp, %edi jge 0x242d34 vmovups (%rdx), %xmm0 vmaxps (%rcx), %xmm0, %xmm0 vmovups %xmm0, (%rdx) addq $0x10, %rcx addq $0x10, %rdx leal 0x4(%rsi), %edi addl $0x7, %esi cmpl %ebp, %esi movl %edi, %esi jl 0x242d14 movl %ebp, %edi subl %esi, %edi jle 0x242d52 xorl %esi, %esi vmovss (%rcx,%rsi,4), %xmm0 vmaxss (%rdx,%rsi,4), %xmm0, %xmm0 vmovss %xmm0, (%rdx,%rsi,4) incq %rsi cmpl %esi, %edi jne 0x242d3c incq %rax cmpq %r9, %rax jne 0x242c9b vmovaps 0x26ab98(%rip), %zmm4 # 0x4ad900 vmovaps 0x26abce(%rip), %zmm5 # 0x4ad940 vmovaps 0x26ac04(%rip), %zmm6 # 0x4ad980 vmovaps 0x26ac3a(%rip), %zmm7 # 0x4ad9c0 vbroadcastss 0x263ef4(%rip), %zmm0 # 0x4a6c84 vxorps 0x26ac66(%rip), %zmm0, %zmm8 # 0x4ada00 vxorps 0x26ac9c(%rip), %zmm0, %zmm9 # 0x4ada40 vmovaps 0x26ae12(%rip), %zmm10 # 0x4adbc0 vmovaps 0x26acc8(%rip), %zmm11 # 0x4ada80 vmovaps 0x26acfe(%rip), %zmm12 # 0x4adac0 vmovaps 0x26ad34(%rip), %zmm13 # 0x4adb00 vmovaps 0x26ad6a(%rip), %zmm14 # 0x4adb40 vmovaps 0x26ada0(%rip), %zmm15 # 0x4adb80 vmovdqa64 0x26ae16(%rip), %zmm16 # 0x4adc00 xorl %ecx, %ecx vbroadcastss 0x263e92(%rip), %ymm17 # 0x4a6c88 vbroadcastss 0x263e8c(%rip), %ymm18 # 0x4a6c8c vbroadcastss 0x2622ca(%rip), %ymm19 # 0x4a50d4 vbroadcastss 0x263e7c(%rip), %ymm20 # 0x4a6c90 vbroadcastss 0x263ea2(%rip), %ymm21 # 0x4a6cc0 vbroadcastss 0x263e6c(%rip), %ymm22 # 0x4a6c94 vbroadcastss 0x264c6e(%rip), %ymm23 # 0x4a7aa0 vbroadcastss 0x263e60(%rip), %ymm24 # 0x4a6c9c vbroadcastss 0x263e5a(%rip), %ymm25 # 0x4a6ca0 vbroadcastss 0x263e54(%rip), %ymm26 # 0x4a6ca4 vbroadcastss 0x263e4e(%rip), %ymm27 # 0x4a6ca8 vbroadcastss 0x263e48(%rip), %ymm28 # 0x4a6cac vpbroadcastd 0x262262(%rip), %ymm29 # 0x4a50d0 vbroadcastss 0x263e10(%rip), %xmm30 # 0x4a6c88 vbroadcastss 0x263e0a(%rip), %xmm31 # 0x4a6c8c movq %rbx, 0x18(%rsp) movq %r8, 0x1a0(%rsp) movq %r9, 0x160(%rsp) vmovups %zmm8, 0x120(%rsp) vmovups %zmm9, 0xe0(%rsp) movq %rcx, %rax imulq %r8, %rax leaq (%rbx,%rax,4), %r15 cmpl $0x10, %ebp movq %rcx, 0x60(%rsp) jl 0x242f91 movl $0xf, %eax vmovups (%r15), %zmm0 vsubps (%r13), %zmm0, %zmm0 vminps %zmm5, %zmm0, %zmm0 vmaxps %zmm6, %zmm0, %zmm0 vmovaps %zmm7, %zmm1 vfmadd213ps %zmm10, %zmm0, %zmm1 # zmm1 = (zmm0 * zmm1) + zmm10 vrndscaleps $0x1, %zmm1, %zmm2 vcmpltps %zmm2, %zmm1, %k1 vsubps %zmm4, %zmm2, %zmm2 {%k1} vfmadd231ps %zmm8, %zmm2, %zmm0 # zmm0 = (zmm2 * zmm8) + zmm0 vfmadd231ps %zmm9, %zmm2, %zmm0 # zmm0 = (zmm2 * zmm9) + zmm0 vmulps %zmm0, %zmm0, %zmm1 vmovaps %zmm0, %zmm3 vfmadd213ps %zmm12, %zmm11, %zmm3 # zmm3 = (zmm11 * zmm3) + zmm12 vfmadd213ps %zmm13, %zmm0, %zmm3 # zmm3 = (zmm0 * zmm3) + zmm13 vfmadd213ps %zmm14, %zmm0, %zmm3 # zmm3 = (zmm0 * zmm3) + zmm14 vfmadd213ps %zmm15, %zmm0, %zmm3 # zmm3 = (zmm0 * zmm3) + zmm15 vfmadd213ps %zmm10, %zmm0, %zmm3 # zmm3 = (zmm0 * zmm3) + zmm10 vfmadd213ps %zmm0, %zmm1, %zmm3 # zmm3 = (zmm1 * zmm3) + zmm0 vaddps %zmm4, %zmm3, %zmm0 vcvttps2dq %zmm2, %zmm1 vpaddd %zmm16, %zmm1, %zmm1 vpslld $0x17, %zmm1, %zmm1 vmulps %zmm1, %zmm0, %zmm0 vaddps (%r14), %zmm0, %zmm1 vmovups %zmm0, (%r15) vmovups %zmm1, (%r14) addq $0x40, %r15 addq $0x40, %r13 addq $0x40, %r14 addl $0x10, %eax cmpl %ebp, %eax jl 0x242ecb movl 0xa0(%rsp), %eax jmp 0x242f93 xorl %eax, %eax movl %eax, %ecx orl $0x7, %ecx cmpl %ebp, %ecx vbroadcastss 0x262131(%rip), %xmm4 # 0x4a50d4 vbroadcastss 0x263ce4(%rip), %xmm5 # 0x4a6c90 vbroadcastss 0x263d0b(%rip), %xmm6 # 0x4a6cc0 vbroadcastss 0x263cd6(%rip), %xmm7 # 0x4a6c94 vbroadcastss 0x263cd9(%rip), %xmm10 # 0x4a6ca0 vbroadcastss 0x263cd4(%rip), %xmm11 # 0x4a6ca4 vbroadcastss 0x263ccf(%rip), %xmm12 # 0x4a6ca8 vbroadcastss 0x263cca(%rip), %xmm13 # 0x4a6cac vpbroadcastd 0x2620e5(%rip), %xmm14 # 0x4a50d0 jge 0x24309f vmovups (%r15), %ymm0 vsubps (%r13), %ymm0, %ymm0 vminps %ymm17, %ymm0, %ymm0 vmaxps %ymm18, %ymm0, %ymm0 vmovaps %ymm19, %ymm1 vfmadd231ps %ymm20, %ymm0, %ymm1 # ymm1 = (ymm0 * ymm20) + ymm1 vroundps $0x1, %ymm1, %ymm2 vcmpltps %ymm2, %ymm1, %k1 vaddps %ymm21, %ymm2, %ymm2 {%k1} vfmsub231ps %ymm22, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm22) - ymm0 vfmsub231ps %ymm23, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm23) - ymm0 vmulps %ymm0, %ymm0, %ymm1 vmovaps %ymm24, %ymm3 vfmadd213ps %ymm25, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + ymm25 vfmadd213ps %ymm26, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + ymm26 vfmadd213ps %ymm27, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + ymm27 vfmadd213ps %ymm28, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + ymm28 vfmadd213ps %ymm19, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + ymm19 vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0 vcvttps2dq %ymm2, %ymm0 vpslld $0x17, %ymm0, %ymm0 vpaddd %ymm29, %ymm0, %ymm0 vfmadd213ps %ymm0, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm0) + ymm0 vaddps (%r14), %ymm0, %ymm1 vmovups %ymm0, (%r15) vmovups %ymm1, (%r14) addq $0x20, %r15 addq $0x20, %r13 addq $0x20, %r14 leal 0x8(%rax), %ecx addl $0xf, %eax cmpl %ebp, %eax movl %ecx, %eax jl 0x242ff1 movl %eax, %ecx orl $0x3, %ecx cmpl %ebp, %ecx vbroadcastss 0x2649f1(%rip), %xmm8 # 0x4a7aa0 vbroadcastss 0x263be4(%rip), %xmm9 # 0x4a6c9c jge 0x243160 vmovups (%r15), %xmm0 vsubps (%r13), %xmm0, %xmm0 vminps %xmm30, %xmm0, %xmm0 vmaxps %xmm31, %xmm0, %xmm0 vmovaps %xmm4, %xmm1 vfmadd231ps %xmm5, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm5) + xmm1 vcvttps2dq %xmm1, %xmm2 vcvtdq2ps %xmm2, %xmm2 vcmpltps %xmm2, %xmm1, %k1 vaddps %xmm6, %xmm2, %xmm2 {%k1} vfmsub231ps %xmm7, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm7) - xmm0 vfmsub231ps %xmm8, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm8) - xmm0 vmulps %xmm0, %xmm0, %xmm1 vmovaps %xmm9, %xmm3 vfmadd213ps %xmm10, %xmm0, %xmm3 # xmm3 = (xmm0 * xmm3) + xmm10 vfmadd213ps %xmm11, %xmm0, %xmm3 # xmm3 = (xmm0 * xmm3) + xmm11 vfmadd213ps %xmm12, %xmm0, %xmm3 # xmm3 = (xmm0 * xmm3) + xmm12 vfmadd213ps %xmm13, %xmm0, %xmm3 # xmm3 = (xmm0 * xmm3) + xmm13 vfmadd213ps %xmm4, %xmm0, %xmm3 # xmm3 = (xmm0 * xmm3) + xmm4 vfmadd213ps %xmm0, %xmm1, %xmm3 # xmm3 = (xmm1 * xmm3) + xmm0 vcvttps2dq %xmm2, %xmm0 vpslld $0x17, %xmm0, %xmm0 vpaddd %xmm0, %xmm14, %xmm0 vfmadd213ps %xmm0, %xmm3, %xmm0 # xmm0 = (xmm3 * xmm0) + xmm0 vaddps (%r14), %xmm0, %xmm1 vmovups %xmm0, (%r15) vmovups %xmm1, (%r14) addq $0x10, %r15 addq $0x10, %r13 addq $0x10, %r14 leal 0x4(%rax), %ecx addl $0x7, %eax cmpl %ebp, %eax movl %ecx, %eax jl 0x2430be movl %ebp, %ebx subl %eax, %ebx jle 0x243198 xorl %r12d, %r12d vmovss (%r15,%r12,4), %xmm0 vsubss (%r13,%r12,4), %xmm0, %xmm0 vzeroupper callq 0x244d0 vmovss %xmm0, (%r15,%r12,4) vaddss (%r14,%r12,4), %xmm0, %xmm0 vmovss %xmm0, (%r14,%r12,4) incq %r12 cmpl %r12d, %ebx jne 0x243169 movq 0x60(%rsp), %rcx incq %rcx movq 0x160(%rsp), %r9 cmpq %r9, %rcx movq 0x18(%rsp), %rbx movq 0x4e0(%rsp), %r14 movq 0x10(%rsp), %r13 movq 0x1a0(%rsp), %r8 vmovaps 0x26a731(%rip), %zmm4 # 0x4ad900 vmovaps 0x26a767(%rip), %zmm5 # 0x4ad940 vmovaps 0x26a79d(%rip), %zmm6 # 0x4ad980 vmovaps 0x26a7d3(%rip), %zmm7 # 0x4ad9c0 vmovups 0x120(%rsp), %zmm8 vmovups 0xe0(%rsp), %zmm9 vmovaps 0x26a9b3(%rip), %zmm10 # 0x4adbc0 vmovaps 0x26a869(%rip), %zmm11 # 0x4ada80 vmovaps 0x26a89f(%rip), %zmm12 # 0x4adac0 vmovaps 0x26a8d5(%rip), %zmm13 # 0x4adb00 vmovaps 0x26a90b(%rip), %zmm14 # 0x4adb40 vmovaps 0x26a941(%rip), %zmm15 # 0x4adb80 vmovdqa64 0x26a9b7(%rip), %zmm16 # 0x4adc00 vbroadcastss 0x263a35(%rip), %ymm17 # 0x4a6c88 vbroadcastss 0x263a2f(%rip), %ymm18 # 0x4a6c8c vbroadcastss 0x261e6d(%rip), %ymm19 # 0x4a50d4 vbroadcastss 0x263a1f(%rip), %ymm20 # 0x4a6c90 vbroadcastss 0x263a45(%rip), %ymm21 # 0x4a6cc0 vbroadcastss 0x263a0f(%rip), %ymm22 # 0x4a6c94 vbroadcastss 0x264811(%rip), %ymm23 # 0x4a7aa0 vbroadcastss 0x263a03(%rip), %ymm24 # 0x4a6c9c vbroadcastss 0x2639fd(%rip), %ymm25 # 0x4a6ca0 vbroadcastss 0x2639f7(%rip), %ymm26 # 0x4a6ca4 vbroadcastss 0x2639f1(%rip), %ymm27 # 0x4a6ca8 vbroadcastss 0x2639eb(%rip), %ymm28 # 0x4a6cac vpbroadcastd 0x261e05(%rip), %ymm29 # 0x4a50d0 vbroadcastss 0x2639b3(%rip), %xmm30 # 0x4a6c88 vbroadcastss 0x2639ad(%rip), %xmm31 # 0x4a6c8c jne 0x242ead xorl %edx, %edx movq %r14, %rax cmpl $0x10, %ebp movl 0xc(%rsp), %esi jl 0x243324 movl $0xf, %ecx vbroadcastss 0x261dce(%rip), %zmm0 # 0x4a50d0 movq %r14, %rax vdivps (%rax), %zmm0, %zmm1 vmovups %zmm1, (%rax) addq $0x40, %rax addl $0x10, %ecx cmpl %ebp, %ecx jl 0x243305 movl %ebp, %edx andl $0x7ffffff0, %edx # imm = 0x7FFFFFF0 movl %edx, %ecx orl $0x7, %ecx cmpl %ebp, %ecx jge 0x2449a2 vbroadcastss 0x261d96(%rip), %ymm0 # 0x4a50d0 vdivps (%rax), %ymm0, %ymm1 vmovups %ymm1, (%rax) addq $0x20, %rax leal 0x8(%rdx), %ecx addl $0xf, %edx cmpl %ebp, %edx movl %ecx, %edx jl 0x24333a jmp 0x2449a4 testl %r15d, %r15d jle 0x24450f movl %ebp, %eax andl $-0x10, %eax movslq 0x20(%rsp), %rcx movl %r15d, %edx xorl %esi, %esi vmovaps 0x26a487(%rip), %zmm0 # 0x4ad800 movq %rsi, %rdi imulq %rcx, %rdi leaq (%rbx,%rdi,4), %rdi cmpl $0x10, %ebp jl 0x24347c movl $0xf, %r9d movq %r13, %r8 vmovups (%rdi), %zmm1 vmovups 0x40(%rdi), %zmm2 vmovups 0x80(%rdi), %zmm3 vmovups 0xc0(%rdi), %zmm4 vmovups 0x100(%rdi), %zmm5 vmovups 0x140(%rdi), %zmm6 vmovups 0x180(%rdi), %zmm7 vmovups 0x1c0(%rdi), %zmm8 vunpcklps %zmm2, %zmm1, %zmm9 # zmm9 = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[12],zmm2[12],zmm1[13],zmm2[13] vunpckhps %zmm2, %zmm1, %zmm1 # zmm1 = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15] vmaxps %zmm1, %zmm9, %zmm1 vunpcklps %zmm4, %zmm3, %zmm2 # zmm2 = zmm3[0],zmm4[0],zmm3[1],zmm4[1],zmm3[4],zmm4[4],zmm3[5],zmm4[5],zmm3[8],zmm4[8],zmm3[9],zmm4[9],zmm3[12],zmm4[12],zmm3[13],zmm4[13] vunpckhps %zmm4, %zmm3, %zmm3 # zmm3 = zmm3[2],zmm4[2],zmm3[3],zmm4[3],zmm3[6],zmm4[6],zmm3[7],zmm4[7],zmm3[10],zmm4[10],zmm3[11],zmm4[11],zmm3[14],zmm4[14],zmm3[15],zmm4[15] vmaxps %zmm3, %zmm2, %zmm2 vunpcklps %zmm6, %zmm5, %zmm3 # zmm3 = zmm5[0],zmm6[0],zmm5[1],zmm6[1],zmm5[4],zmm6[4],zmm5[5],zmm6[5],zmm5[8],zmm6[8],zmm5[9],zmm6[9],zmm5[12],zmm6[12],zmm5[13],zmm6[13] vunpckhps %zmm6, %zmm5, %zmm4 # zmm4 = zmm5[2],zmm6[2],zmm5[3],zmm6[3],zmm5[6],zmm6[6],zmm5[7],zmm6[7],zmm5[10],zmm6[10],zmm5[11],zmm6[11],zmm5[14],zmm6[14],zmm5[15],zmm6[15] vmaxps %zmm4, %zmm3, %zmm3 vunpcklps %zmm8, %zmm7, %zmm4 # zmm4 = zmm7[0],zmm8[0],zmm7[1],zmm8[1],zmm7[4],zmm8[4],zmm7[5],zmm8[5],zmm7[8],zmm8[8],zmm7[9],zmm8[9],zmm7[12],zmm8[12],zmm7[13],zmm8[13] vunpckhps %zmm8, %zmm7, %zmm5 # zmm5 = zmm7[2],zmm8[2],zmm7[3],zmm8[3],zmm7[6],zmm8[6],zmm7[7],zmm8[7],zmm7[10],zmm8[10],zmm7[11],zmm8[11],zmm7[14],zmm8[14],zmm7[15],zmm8[15] vmaxps %zmm5, %zmm4, %zmm4 vunpcklps %zmm2, %zmm1, %zmm5 # zmm5 = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[12],zmm2[12],zmm1[13],zmm2[13] vunpckhps %zmm2, %zmm1, %zmm1 # zmm1 = zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[14],zmm2[14],zmm1[15],zmm2[15] vmaxps %zmm1, %zmm5, %zmm1 vunpcklps %zmm4, %zmm3, %zmm2 # zmm2 = zmm3[0],zmm4[0],zmm3[1],zmm4[1],zmm3[4],zmm4[4],zmm3[5],zmm4[5],zmm3[8],zmm4[8],zmm3[9],zmm4[9],zmm3[12],zmm4[12],zmm3[13],zmm4[13] vunpckhps %zmm4, %zmm3, %zmm3 # zmm3 = zmm3[2],zmm4[2],zmm3[3],zmm4[3],zmm3[6],zmm4[6],zmm3[7],zmm4[7],zmm3[10],zmm4[10],zmm3[11],zmm4[11],zmm3[14],zmm4[14],zmm3[15],zmm4[15] vmaxps %zmm3, %zmm2, %zmm2 vshuff64x2 $0x88, %zmm2, %zmm1, %zmm3 # zmm3 = zmm1[0,1,4,5],zmm2[0,1,4,5] vshuff64x2 $0xdd, %zmm2, %zmm1, %zmm1 # zmm1 = zmm1[2,3,6,7],zmm2[2,3,6,7] vmaxps %zmm1, %zmm3, %zmm1 vpermps %zmm1, %zmm0, %zmm1 vmaxps (%r8), %zmm1, %zmm1 vmovups %zmm1, (%r8) addq $0x200, %rdi # imm = 0x200 addq $0x40, %r8 addl $0x10, %r9d cmpl %ebp, %r9d jl 0x243396 movl %eax, %r9d jmp 0x243482 xorl %r9d, %r9d movq %r13, %r8 movl %r9d, %r10d orl $0x7, %r10d cmpl %ebp, %r10d jge 0x243550 vmovups (%rdi), %ymm1 vmovups 0x20(%rdi), %ymm2 vmovups 0x40(%rdi), %ymm3 vmovups 0x60(%rdi), %ymm4 vmovups 0x80(%rdi), %ymm5 vmovups 0xa0(%rdi), %ymm6 vmovups 0xc0(%rdi), %ymm7 vmovups 0xe0(%rdi), %ymm8 vunpcklps %ymm5, %ymm1, %ymm9 # ymm9 = ymm1[0],ymm5[0],ymm1[1],ymm5[1],ymm1[4],ymm5[4],ymm1[5],ymm5[5] vunpckhps %ymm5, %ymm1, %ymm1 # ymm1 = ymm1[2],ymm5[2],ymm1[3],ymm5[3],ymm1[6],ymm5[6],ymm1[7],ymm5[7] vmaxps %ymm1, %ymm9, %ymm1 vunpcklps %ymm7, %ymm3, %ymm5 # ymm5 = ymm3[0],ymm7[0],ymm3[1],ymm7[1],ymm3[4],ymm7[4],ymm3[5],ymm7[5] vunpckhps %ymm7, %ymm3, %ymm3 # ymm3 = ymm3[2],ymm7[2],ymm3[3],ymm7[3],ymm3[6],ymm7[6],ymm3[7],ymm7[7] vmaxps %ymm3, %ymm5, %ymm3 vunpcklps %ymm6, %ymm2, %ymm5 # ymm5 = ymm2[0],ymm6[0],ymm2[1],ymm6[1],ymm2[4],ymm6[4],ymm2[5],ymm6[5] vunpckhps %ymm6, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm6[2],ymm2[3],ymm6[3],ymm2[6],ymm6[6],ymm2[7],ymm6[7] vmaxps %ymm2, %ymm5, %ymm2 vunpcklps %ymm8, %ymm4, %ymm5 # ymm5 = ymm4[0],ymm8[0],ymm4[1],ymm8[1],ymm4[4],ymm8[4],ymm4[5],ymm8[5] vunpckhps %ymm8, %ymm4, %ymm4 # ymm4 = ymm4[2],ymm8[2],ymm4[3],ymm8[3],ymm4[6],ymm8[6],ymm4[7],ymm8[7] vmaxps %ymm4, %ymm5, %ymm4 vunpcklps %ymm3, %ymm1, %ymm5 # ymm5 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[4],ymm3[4],ymm1[5],ymm3[5] vunpckhps %ymm3, %ymm1, %ymm1 # ymm1 = ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[6],ymm3[6],ymm1[7],ymm3[7] vmaxps %ymm1, %ymm5, %ymm1 vunpcklps %ymm4, %ymm2, %ymm3 # ymm3 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[4],ymm4[4],ymm2[5],ymm4[5] vunpckhps %ymm4, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7] vmaxps %ymm2, %ymm3, %ymm2 vunpcklps %ymm2, %ymm1, %ymm3 # ymm3 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5] vunpckhps %ymm2, %ymm1, %ymm1 # ymm1 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7] vinsertf128 $0x1, %xmm1, %ymm3, %ymm2 vperm2f128 $0x31, %ymm1, %ymm3, %ymm1 # ymm1 = ymm3[2,3],ymm1[2,3] vmaxps %ymm1, %ymm2, %ymm1 vmaxps (%r8), %ymm1, %ymm1 vmovups %ymm1, (%r8) addq $0x100, %rdi # imm = 0x100 addq $0x20, %r8 leal 0x8(%r9), %r10d addl $0xf, %r9d cmpl %ebp, %r9d movl %r10d, %r9d jl 0x243492 movl %ebp, %r10d subl %r9d, %r10d jle 0x24358d xorl %r9d, %r9d vmovups 0x10(%rdi), %xmm1 vmaxps (%rdi), %xmm1, %xmm1 vshufpd $0x3, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,1] vmaxps %xmm2, %xmm1, %xmm1 vmovshdup %xmm1, %xmm2 # xmm2 = xmm1[1,1,3,3] vmaxss %xmm2, %xmm1, %xmm1 vmaxss (%r8,%r9,4), %xmm1, %xmm1 vmovss %xmm1, (%r8,%r9,4) addq $0x20, %rdi incq %r9 cmpl %r9d, %r10d jne 0x24355b incq %rsi cmpq %rdx, %rsi jne 0x243379 vmovaps 0x26a35d(%rip), %zmm23 # 0x4ad900 vbroadcastss 0x2636d7(%rip), %zmm1 # 0x4a6c84 vxorps 0x26a449(%rip), %zmm1, %zmm0 # 0x4ada00 vmovups %zmm0, 0x2e0(%rsp) vxorps 0x26a474(%rip), %zmm1, %zmm0 # 0x4ada40 vmovaps 0x26a5ea(%rip), %zmm17 # 0x4adbc0 xorl %esi, %esi vbroadcastss 0x2636aa(%rip), %ymm28 # 0x4a6c8c vbroadcastss 0x261ae9(%rip), %ymm15 # 0x4a50d4 vbroadcastss 0x261adb(%rip), %ymm21 # 0x4a50d0 vbroadcastss 0x263695(%rip), %ymm22 # 0x4a6c94 vbroadcastss 0x26368f(%rip), %ymm24 # 0x4a6c98 vbroadcastss 0x26368d(%rip), %ymm18 # 0x4a6ca0 vbroadcastss 0x26367f(%rip), %ymm26 # 0x4a6c9c vbroadcastss 0x26367d(%rip), %ymm27 # 0x4a6ca4 vbroadcastss 0x263677(%rip), %ymm25 # 0x4a6ca8 vpbroadcastd 0x261a95(%rip), %ymm31 # 0x4a50d0 vmovups %zmm0, 0x320(%rsp) movq %rsi, %rdi imulq %rcx, %rdi leaq (%rbx,%rdi,4), %rdi cmpl $0x10, %ebp jl 0x243e0d movl $0xf, %r10d movq %r13, %r9 movq %r14, %r8 vmovups (%rdi), %zmm1 vmovups 0x40(%rdi), %zmm2 vmovups 0x80(%rdi), %zmm3 vmovups 0xc0(%rdi), %zmm7 vmovups 0x100(%rdi), %zmm8 vmovups 0x140(%rdi), %zmm9 vmovups 0x180(%rdi), %zmm11 vmovups 0x1c0(%rdi), %zmm13 vbroadcastss (%r9), %ymm10 vbroadcastss 0x4(%r9), %ymm12 vbroadcastss 0x8(%r9), %ymm14 vbroadcastss 0xc(%r9), %ymm15 vbroadcastss 0x10(%r9), %ymm16 vbroadcastss 0x14(%r9), %ymm18 vbroadcastss 0x18(%r9), %ymm19 vbroadcastss 0x1c(%r9), %ymm20 vbroadcastss 0x20(%r9), %ymm21 vinsertf64x4 $0x1, %ymm12, %zmm10, %zmm10 vbroadcastss 0x24(%r9), %ymm12 vsubps %zmm10, %zmm1, %zmm22 vinsertf64x4 $0x1, %ymm15, %zmm14, %zmm1 vbroadcastss 0x28(%r9), %ymm14 vsubps %zmm1, %zmm2, %zmm2 vinsertf64x4 $0x1, %ymm18, %zmm16, %zmm1 vbroadcastss 0x2c(%r9), %ymm15 vsubps %zmm1, %zmm3, %zmm1 vinsertf64x4 $0x1, %ymm20, %zmm19, %zmm3 vbroadcastss 0x30(%r9), %ymm16 vsubps %zmm3, %zmm7, %zmm10 vinsertf64x4 $0x1, %ymm12, %zmm21, %zmm3 vbroadcastss 0x34(%r9), %ymm7 vsubps %zmm3, %zmm8, %zmm8 vinsertf64x4 $0x1, %ymm15, %zmm14, %zmm3 vbroadcastss 0x38(%r9), %ymm14 vsubps %zmm3, %zmm9, %zmm12 vinsertf64x4 $0x1, %ymm7, %zmm16, %zmm3 vbroadcastss 0x3c(%r9), %ymm7 vsubps %zmm3, %zmm11, %zmm31 vinsertf64x4 $0x1, %ymm7, %zmm14, %zmm3 vsubps %zmm3, %zmm13, %zmm0 vmovups %zmm0, 0x60(%rsp) vmovaps 0x26a1c1(%rip), %zmm21 # 0x4ad940 vminps %zmm21, %zmm22, %zmm3 vmovaps 0x26a1f1(%rip), %zmm22 # 0x4ad980 vmaxps %zmm22, %zmm3, %zmm14 vmovaps 0x26a221(%rip), %zmm28 # 0x4ad9c0 vmovaps %zmm28, %zmm3 vfmadd213ps %zmm17, %zmm14, %zmm3 # zmm3 = (zmm14 * zmm3) + zmm17 vrndscaleps $0x1, %zmm3, %zmm30 vcmpltps %zmm30, %zmm3, %k1 vsubps %zmm23, %zmm30, %zmm30 {%k1} vmovups 0x2e0(%rsp), %zmm4 vfmadd231ps %zmm4, %zmm30, %zmm14 # zmm14 = (zmm30 * zmm4) + zmm14 vminps %zmm21, %zmm2, %zmm2 vmovups 0x320(%rsp), %zmm29 vfmadd231ps %zmm29, %zmm30, %zmm14 # zmm14 = (zmm30 * zmm29) + zmm14 vmaxps %zmm22, %zmm2, %zmm2 vmovaps %zmm28, %zmm3 vfmadd213ps %zmm17, %zmm2, %zmm3 # zmm3 = (zmm2 * zmm3) + zmm17 vmulps %zmm14, %zmm14, %zmm0 vmovups %zmm0, 0xa0(%rsp) vrndscaleps $0x1, %zmm3, %zmm13 vcmpltps %zmm13, %zmm3, %k1 vsubps %zmm23, %zmm13, %zmm13 {%k1} vmovaps %zmm14, %zmm23 vfmadd231ps %zmm4, %zmm13, %zmm2 # zmm2 = (zmm13 * zmm4) + zmm2 vfmadd231ps %zmm29, %zmm13, %zmm2 # zmm2 = (zmm13 * zmm29) + zmm2 vminps %zmm21, %zmm1, %zmm1 vmovaps %zmm21, %zmm5 vmulps %zmm2, %zmm2, %zmm0 vmovups %zmm0, 0x1a0(%rsp) vmaxps %zmm22, %zmm1, %zmm3 vmovaps %zmm28, %zmm9 vfmadd213ps %zmm17, %zmm3, %zmm9 # zmm9 = (zmm3 * zmm9) + zmm17 vmovaps %zmm2, %zmm7 vrndscaleps $0x1, %zmm9, %zmm1 vcmpltps %zmm1, %zmm9, %k1 vsubps 0x26a083(%rip), %zmm1, %zmm1 {%k1} # 0x4ad900 vmovaps 0x26a239(%rip), %zmm21 # 0x4adac0 vmovaps 0x26a1ef(%rip), %zmm11 # 0x4ada80 vfmadd213ps %zmm21, %zmm11, %zmm23 # zmm23 = (zmm11 * zmm23) + zmm21 vfmadd231ps %zmm4, %zmm1, %zmm3 # zmm3 = (zmm1 * zmm4) + zmm3 vfmadd231ps %zmm29, %zmm1, %zmm3 # zmm3 = (zmm1 * zmm29) + zmm3 vmulps %zmm3, %zmm3, %zmm0 vmovups %zmm0, 0x160(%rsp) vfmadd213ps %zmm21, %zmm11, %zmm7 # zmm7 = (zmm11 * zmm7) + zmm21 vmovaps %zmm3, %zmm9 vfmadd213ps %zmm21, %zmm11, %zmm9 # zmm9 = (zmm11 * zmm9) + zmm21 vminps %zmm5, %zmm10, %zmm10 vmovaps 0x26a22a(%rip), %zmm6 # 0x4adb00 vfmadd213ps %zmm6, %zmm14, %zmm23 # zmm23 = (zmm14 * zmm23) + zmm6 vmaxps %zmm22, %zmm10, %zmm16 vmovaps %zmm28, %zmm15 vfmadd213ps %zmm17, %zmm16, %zmm15 # zmm15 = (zmm16 * zmm15) + zmm17 vfmadd213ps %zmm6, %zmm2, %zmm7 # zmm7 = (zmm2 * zmm7) + zmm6 vrndscaleps $0x1, %zmm15, %zmm10 vcmpltps %zmm10, %zmm15, %k1 vsubps 0x269ff4(%rip), %zmm10, %zmm10 {%k1} # 0x4ad900 vfmadd213ps %zmm6, %zmm3, %zmm9 # zmm9 = (zmm3 * zmm9) + zmm6 vfmadd231ps %zmm4, %zmm10, %zmm16 # zmm16 = (zmm10 * zmm4) + zmm16 vfmadd231ps %zmm29, %zmm10, %zmm16 # zmm16 = (zmm10 * zmm29) + zmm16 vmulps %zmm16, %zmm16, %zmm0 vmovups %zmm0, 0x120(%rsp) vmovaps 0x26a207(%rip), %zmm26 # 0x4adb40 vfmadd213ps %zmm26, %zmm14, %zmm23 # zmm23 = (zmm14 * zmm23) + zmm26 vmovaps %zmm16, %zmm19 vfmadd213ps %zmm21, %zmm11, %zmm19 # zmm19 = (zmm11 * zmm19) + zmm21 vfmadd213ps %zmm6, %zmm16, %zmm19 # zmm19 = (zmm16 * zmm19) + zmm6 vfmadd213ps %zmm26, %zmm2, %zmm7 # zmm7 = (zmm2 * zmm7) + zmm26 vminps %zmm5, %zmm8, %zmm8 vmovaps %zmm5, %zmm20 vmaxps %zmm22, %zmm8, %zmm8 vmovaps %zmm28, %zmm18 vfmadd213ps %zmm26, %zmm3, %zmm9 # zmm9 = (zmm3 * zmm9) + zmm26 vfmadd213ps %zmm17, %zmm8, %zmm18 # zmm18 = (zmm8 * zmm18) + zmm17 vrndscaleps $0x1, %zmm18, %zmm15 vcmpltps %zmm15, %zmm18, %k1 vfmadd213ps %zmm26, %zmm16, %zmm19 # zmm19 = (zmm16 * zmm19) + zmm26 vsubps 0x269f67(%rip), %zmm15, %zmm15 {%k1} # 0x4ad900 vfmadd231ps %zmm4, %zmm15, %zmm8 # zmm8 = (zmm15 * zmm4) + zmm8 vfmadd231ps %zmm29, %zmm15, %zmm8 # zmm8 = (zmm15 * zmm29) + zmm8 vmovaps 0x26a1d1(%rip), %zmm5 # 0x4adb80 vfmadd213ps %zmm5, %zmm14, %zmm23 # zmm23 = (zmm14 * zmm23) + zmm5 vmulps %zmm8, %zmm8, %zmm0 vmovups %zmm0, 0xe0(%rsp) vmovaps %zmm8, %zmm24 vfmadd213ps %zmm21, %zmm11, %zmm24 # zmm24 = (zmm11 * zmm24) + zmm21 vfmadd213ps %zmm5, %zmm2, %zmm7 # zmm7 = (zmm2 * zmm7) + zmm5 vfmadd213ps %zmm6, %zmm8, %zmm24 # zmm24 = (zmm8 * zmm24) + zmm6 vfmadd213ps %zmm26, %zmm8, %zmm24 # zmm24 = (zmm8 * zmm24) + zmm26 vmovaps %zmm20, %zmm0 vminps %zmm20, %zmm12, %zmm12 vfmadd213ps %zmm5, %zmm3, %zmm9 # zmm9 = (zmm3 * zmm9) + zmm5 vmaxps %zmm22, %zmm12, %zmm20 vmovaps %zmm28, %zmm18 vfmadd213ps %zmm17, %zmm20, %zmm18 # zmm18 = (zmm20 * zmm18) + zmm17 vfmadd213ps %zmm5, %zmm16, %zmm19 # zmm19 = (zmm16 * zmm19) + zmm5 vrndscaleps $0x1, %zmm18, %zmm12 vcmpltps %zmm12, %zmm18, %k1 vsubps 0x269eda(%rip), %zmm12, %zmm12 {%k1} # 0x4ad900 vfmadd213ps %zmm5, %zmm8, %zmm24 # zmm24 = (zmm8 * zmm24) + zmm5 vfmadd231ps %zmm4, %zmm12, %zmm20 # zmm20 = (zmm12 * zmm4) + zmm20 vfmadd231ps %zmm29, %zmm12, %zmm20 # zmm20 = (zmm12 * zmm29) + zmm20 vmulps %zmm20, %zmm20, %zmm18 vfmadd213ps %zmm17, %zmm14, %zmm23 # zmm23 = (zmm14 * zmm23) + zmm17 vmovaps %zmm20, %zmm27 vfmadd213ps %zmm21, %zmm11, %zmm27 # zmm27 = (zmm11 * zmm27) + zmm21 vfmadd213ps %zmm6, %zmm20, %zmm27 # zmm27 = (zmm20 * zmm27) + zmm6 vfmadd213ps %zmm17, %zmm2, %zmm7 # zmm7 = (zmm2 * zmm7) + zmm17 vfmadd213ps %zmm26, %zmm20, %zmm27 # zmm27 = (zmm20 * zmm27) + zmm26 vfmadd213ps %zmm5, %zmm20, %zmm27 # zmm27 = (zmm20 * zmm27) + zmm5 vminps %zmm0, %zmm31, %zmm25 vfmadd213ps %zmm17, %zmm3, %zmm9 # zmm9 = (zmm3 * zmm9) + zmm17 vmaxps %zmm22, %zmm25, %zmm25 vmovaps %zmm28, %zmm0 vfmadd213ps %zmm17, %zmm25, %zmm0 # zmm0 = (zmm25 * zmm0) + zmm17 vfmadd213ps %zmm17, %zmm16, %zmm19 # zmm19 = (zmm16 * zmm19) + zmm17 vrndscaleps $0x1, %zmm0, %zmm31 vcmpltps %zmm31, %zmm0, %k1 vsubps 0x269e5c(%rip), %zmm31, %zmm31 {%k1} # 0x4ad900 vfmadd213ps %zmm17, %zmm8, %zmm24 # zmm24 = (zmm8 * zmm24) + zmm17 vfmadd231ps %zmm4, %zmm31, %zmm25 # zmm25 = (zmm31 * zmm4) + zmm25 vfmadd231ps %zmm29, %zmm31, %zmm25 # zmm25 = (zmm31 * zmm29) + zmm25 vmovaps 0x269e80(%rip), %zmm0 # 0x4ad940 vminps 0x60(%rsp), %zmm0, %zmm0 vfmadd213ps %zmm17, %zmm20, %zmm27 # zmm27 = (zmm20 * zmm27) + zmm17 vmaxps %zmm22, %zmm0, %zmm0 vfmadd213ps %zmm17, %zmm0, %zmm28 # zmm28 = (zmm0 * zmm28) + zmm17 vfmadd132ps 0xa0(%rsp), %zmm14, %zmm23 # zmm23 = (zmm23 * mem) + zmm14 vrndscaleps $0x1, %zmm28, %zmm14 vcmpltps %zmm14, %zmm28, %k1 vmulps %zmm25, %zmm25, %zmm28 vfmadd132ps 0x1a0(%rsp), %zmm2, %zmm7 # zmm7 = (zmm7 * mem) + zmm2 vmovaps %zmm25, %zmm2 vfmadd213ps %zmm21, %zmm11, %zmm2 # zmm2 = (zmm11 * zmm2) + zmm21 vfmadd213ps %zmm6, %zmm25, %zmm2 # zmm2 = (zmm25 * zmm2) + zmm6 vfmadd132ps 0x160(%rsp), %zmm3, %zmm9 # zmm9 = (zmm9 * mem) + zmm3 vfmadd213ps %zmm26, %zmm25, %zmm2 # zmm2 = (zmm25 * zmm2) + zmm26 vfmadd213ps %zmm5, %zmm25, %zmm2 # zmm2 = (zmm25 * zmm2) + zmm5 vfmadd213ps %zmm17, %zmm25, %zmm2 # zmm2 = (zmm25 * zmm2) + zmm17 vfmadd132ps 0x120(%rsp), %zmm16, %zmm19 # zmm19 = (zmm19 * mem) + zmm16 vmovdqa64 0x26a0b5(%rip), %zmm16 # 0x4adc00 vsubps 0x269dab(%rip), %zmm14, %zmm14 {%k1} # 0x4ad900 vfmadd231ps %zmm4, %zmm14, %zmm0 # zmm0 = (zmm14 * zmm4) + zmm0 vfmadd231ps %zmm29, %zmm14, %zmm0 # zmm0 = (zmm14 * zmm29) + zmm0 vfmadd132ps 0xe0(%rsp), %zmm8, %zmm24 # zmm24 = (zmm24 * mem) + zmm8 vmulps %zmm0, %zmm0, %zmm3 vmovaps %zmm0, %zmm8 vfmadd213ps %zmm21, %zmm11, %zmm8 # zmm8 = (zmm11 * zmm8) + zmm21 vfmadd213ps %zmm20, %zmm18, %zmm27 # zmm27 = (zmm18 * zmm27) + zmm20 vfmadd213ps %zmm6, %zmm0, %zmm8 # zmm8 = (zmm0 * zmm8) + zmm6 vfmadd213ps %zmm26, %zmm0, %zmm8 # zmm8 = (zmm0 * zmm8) + zmm26 vfmadd213ps %zmm5, %zmm0, %zmm8 # zmm8 = (zmm0 * zmm8) + zmm5 vfmadd213ps %zmm25, %zmm28, %zmm2 # zmm2 = (zmm28 * zmm2) + zmm25 vfmadd213ps %zmm17, %zmm0, %zmm8 # zmm8 = (zmm0 * zmm8) + zmm17 vfmadd213ps %zmm0, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) + zmm0 vcvttps2dq %zmm30, %zmm0 vaddps 0x269d48(%rip), %zmm23, %zmm3 # 0x4ad900 vmovaps 0x269d3e(%rip), %zmm23 # 0x4ad900 vpaddd %zmm16, %zmm0, %zmm0 vpslld $0x17, %zmm0, %zmm0 vcvttps2dq %zmm13, %zmm11 vaddps %zmm23, %zmm7, %zmm7 vpaddd %zmm16, %zmm11, %zmm11 vpslld $0x17, %zmm11, %zmm11 vcvttps2dq %zmm1, %zmm1 vaddps %zmm23, %zmm9, %zmm9 vpaddd %zmm16, %zmm1, %zmm1 vpslld $0x17, %zmm1, %zmm1 vaddps %zmm23, %zmm19, %zmm13 vmulps %zmm0, %zmm3, %zmm0 vcvttps2dq %zmm10, %zmm3 vpaddd %zmm16, %zmm3, %zmm3 vpslld $0x17, %zmm3, %zmm3 vmulps %zmm11, %zmm7, %zmm7 vaddps %zmm23, %zmm24, %zmm10 vcvttps2dq %zmm15, %zmm11 vpaddd %zmm16, %zmm11, %zmm11 vmulps %zmm1, %zmm9, %zmm1 vpslld $0x17, %zmm11, %zmm9 vaddps %zmm23, %zmm27, %zmm11 vcvttps2dq %zmm12, %zmm12 vmulps %zmm3, %zmm13, %zmm3 vpaddd %zmm16, %zmm12, %zmm12 vpslld $0x17, %zmm12, %zmm12 vaddps %zmm23, %zmm2, %zmm2 vmulps %zmm9, %zmm10, %zmm9 vcvttps2dq %zmm31, %zmm10 vpaddd %zmm16, %zmm10, %zmm10 vpslld $0x17, %zmm10, %zmm10 vmulps %zmm12, %zmm11, %zmm11 vaddps %zmm23, %zmm8, %zmm8 vcvttps2dq %zmm14, %zmm12 vpaddd %zmm16, %zmm12, %zmm12 vmulps %zmm10, %zmm2, %zmm2 vpslld $0x17, %zmm12, %zmm10 vmulps %zmm10, %zmm8, %zmm8 vmovups %zmm0, (%rdi) vmovups %zmm7, 0x40(%rdi) vmovups %zmm1, 0x80(%rdi) vmovups %zmm3, 0xc0(%rdi) vmovups %zmm9, 0x100(%rdi) vmovups %zmm11, 0x140(%rdi) vmovups %zmm2, 0x180(%rdi) vunpcklps %zmm7, %zmm0, %zmm10 # zmm10 = zmm0[0],zmm7[0],zmm0[1],zmm7[1],zmm0[4],zmm7[4],zmm0[5],zmm7[5],zmm0[8],zmm7[8],zmm0[9],zmm7[9],zmm0[12],zmm7[12],zmm0[13],zmm7[13] vunpckhps %zmm7, %zmm0, %zmm0 # zmm0 = zmm0[2],zmm7[2],zmm0[3],zmm7[3],zmm0[6],zmm7[6],zmm0[7],zmm7[7],zmm0[10],zmm7[10],zmm0[11],zmm7[11],zmm0[14],zmm7[14],zmm0[15],zmm7[15] vaddps %zmm0, %zmm10, %zmm0 vunpcklps %zmm3, %zmm1, %zmm7 # zmm7 = zmm1[0],zmm3[0],zmm1[1],zmm3[1],zmm1[4],zmm3[4],zmm1[5],zmm3[5],zmm1[8],zmm3[8],zmm1[9],zmm3[9],zmm1[12],zmm3[12],zmm1[13],zmm3[13] vunpckhps %zmm3, %zmm1, %zmm1 # zmm1 = zmm1[2],zmm3[2],zmm1[3],zmm3[3],zmm1[6],zmm3[6],zmm1[7],zmm3[7],zmm1[10],zmm3[10],zmm1[11],zmm3[11],zmm1[14],zmm3[14],zmm1[15],zmm3[15] vunpcklps %zmm11, %zmm9, %zmm3 # zmm3 = zmm9[0],zmm11[0],zmm9[1],zmm11[1],zmm9[4],zmm11[4],zmm9[5],zmm11[5],zmm9[8],zmm11[8],zmm9[9],zmm11[9],zmm9[12],zmm11[12],zmm9[13],zmm11[13] vaddps %zmm1, %zmm7, %zmm1 vunpckhps %zmm11, %zmm9, %zmm7 # zmm7 = zmm9[2],zmm11[2],zmm9[3],zmm11[3],zmm9[6],zmm11[6],zmm9[7],zmm11[7],zmm9[10],zmm11[10],zmm9[11],zmm11[11],zmm9[14],zmm11[14],zmm9[15],zmm11[15] vunpcklps %zmm8, %zmm2, %zmm9 # zmm9 = zmm2[0],zmm8[0],zmm2[1],zmm8[1],zmm2[4],zmm8[4],zmm2[5],zmm8[5],zmm2[8],zmm8[8],zmm2[9],zmm8[9],zmm2[12],zmm8[12],zmm2[13],zmm8[13] vunpckhps %zmm8, %zmm2, %zmm2 # zmm2 = zmm2[2],zmm8[2],zmm2[3],zmm8[3],zmm2[6],zmm8[6],zmm2[7],zmm8[7],zmm2[10],zmm8[10],zmm2[11],zmm8[11],zmm2[14],zmm8[14],zmm2[15],zmm8[15] vaddps %zmm7, %zmm3, %zmm3 vaddps %zmm2, %zmm9, %zmm2 vunpcklps %zmm1, %zmm0, %zmm7 # zmm7 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] vunpckhps %zmm1, %zmm0, %zmm0 # zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] vaddps %zmm0, %zmm7, %zmm0 vunpcklps %zmm2, %zmm3, %zmm1 # zmm1 = zmm3[0],zmm2[0],zmm3[1],zmm2[1],zmm3[4],zmm2[4],zmm3[5],zmm2[5],zmm3[8],zmm2[8],zmm3[9],zmm2[9],zmm3[12],zmm2[12],zmm3[13],zmm2[13] vunpckhps %zmm2, %zmm3, %zmm2 # zmm2 = zmm3[2],zmm2[2],zmm3[3],zmm2[3],zmm3[6],zmm2[6],zmm3[7],zmm2[7],zmm3[10],zmm2[10],zmm3[11],zmm2[11],zmm3[14],zmm2[14],zmm3[15],zmm2[15] vaddps %zmm2, %zmm1, %zmm1 vmovups %zmm8, 0x1c0(%rdi) vmovaps %zmm0, %zmm2 vmovaps 0x269adf(%rip), %zmm3 # 0x4ad840 vpermt2ps %zmm1, %zmm3, %zmm2 vmovaps 0x269b0f(%rip), %zmm3 # 0x4ad880 vpermt2ps %zmm1, %zmm3, %zmm0 vaddps %zmm2, %zmm0, %zmm0 vaddps (%r8), %zmm0, %zmm0 addq $0x200, %rdi # imm = 0x200 vmovups %zmm0, (%r8) addq $0x40, %r9 addq $0x40, %r8 addl $0x10, %r10d cmpl %ebp, %r10d jl 0x243666 movl %eax, %r10d vbroadcastss 0x262eda(%rip), %ymm28 # 0x4a6c8c vbroadcastss 0x261319(%rip), %ymm15 # 0x4a50d4 vbroadcastss 0x26130b(%rip), %ymm21 # 0x4a50d0 vbroadcastss 0x262ec5(%rip), %ymm22 # 0x4a6c94 vbroadcastss 0x262ebf(%rip), %ymm24 # 0x4a6c98 vbroadcastss 0x262ebd(%rip), %ymm18 # 0x4a6ca0 vbroadcastss 0x262eaf(%rip), %ymm26 # 0x4a6c9c vbroadcastss 0x262ead(%rip), %ymm27 # 0x4a6ca4 vbroadcastss 0x262ea7(%rip), %ymm25 # 0x4a6ca8 vpbroadcastd 0x2612c5(%rip), %ymm31 # 0x4a50d0 jmp 0x243e16 xorl %r10d, %r10d movq %r14, %r8 movq %r13, %r9 movl %r10d, %r11d orl $0x7, %r11d cmpl %ebp, %r11d vbroadcastss 0x262e5f(%rip), %ymm4 # 0x4a6c88 vbroadcastss 0x262e5e(%rip), %ymm6 # 0x4a6c90 vbroadcastss 0x262e71(%rip), %ymm5 # 0x4a6cac jge 0x244422 vmovups (%rdi), %ymm0 vmovups 0x20(%rdi), %ymm1 vmovups 0x40(%rdi), %ymm2 vmovups 0x60(%rdi), %ymm10 vmovups 0x80(%rdi), %ymm8 vmovups 0xa0(%rdi), %ymm19 vmovups 0xc0(%rdi), %ymm7 vsubps (%r9){1to8}, %ymm0, %ymm0 vmovups 0xe0(%rdi), %ymm14 vminps %ymm4, %ymm0, %ymm0 vmovaps %ymm6, %ymm3 vmaxps 0x262e01(%rip){1to8}, %ymm0, %ymm28 # 0x4a6c8c vfmadd213ps %ymm15, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm3) + ymm15 vroundps $0x1, %ymm3, %ymm0 vcmpltps %ymm0, %ymm3, %k1 vsubps %ymm21, %ymm0, %ymm0 {%k1} vmovaps %ymm26, %ymm30 vsubps 0x4(%r9){1to8}, %ymm1, %ymm1 vfmsub231ps %ymm22, %ymm0, %ymm28 # ymm28 = (ymm0 * ymm22) - ymm28 vminps %ymm4, %ymm1, %ymm1 vmaxps 0x262dc7(%rip){1to8}, %ymm1, %ymm1 # 0x4a6c8c vmovaps %ymm6, %ymm3 vcvttps2dq %ymm0, %ymm31 vfmadd213ps %ymm15, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm15 vroundps $0x1, %ymm3, %ymm13 vcmpltps %ymm13, %ymm3, %k1 vfnmsub231ps %ymm24, %ymm0, %ymm28 # ymm28 = -(ymm0 * ymm24) - ymm28 vsubps %ymm21, %ymm13, %ymm13 {%k1} vfmsub231ps %ymm22, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm22) - ymm1 vfnmsub231ps %ymm24, %ymm13, %ymm1 # ymm1 = -(ymm13 * ymm24) - ymm1 vmulps %ymm28, %ymm28, %ymm9 vmovaps %ymm26, %ymm23 vsubps 0x8(%r9){1to8}, %ymm2, %ymm0 vfmadd213ps %ymm18, %ymm28, %ymm30 # ymm30 = (ymm28 * ymm30) + ymm18 vminps %ymm4, %ymm0, %ymm0 vmaxps 0x262d6c(%rip){1to8}, %ymm0, %ymm3 # 0x4a6c8c vmovaps %ymm6, %ymm0 vmulps %ymm1, %ymm1, %ymm12 vfmadd213ps %ymm15, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm0) + ymm15 vroundps $0x1, %ymm0, %ymm2 vcmpltps %ymm2, %ymm0, %k1 vfmadd213ps %ymm18, %ymm1, %ymm23 # ymm23 = (ymm1 * ymm23) + ymm18 vsubps %ymm21, %ymm2, %ymm2 {%k1} vfmsub231ps %ymm22, %ymm2, %ymm3 # ymm3 = (ymm2 * ymm22) - ymm3 vfnmsub231ps %ymm24, %ymm2, %ymm3 # ymm3 = -(ymm2 * ymm24) - ymm3 vmulps %ymm3, %ymm3, %ymm21 vmovaps %ymm26, %ymm29 vsubps 0xc(%r9){1to8}, %ymm10, %ymm0 vfmadd213ps %ymm18, %ymm3, %ymm29 # ymm29 = (ymm3 * ymm29) + ymm18 vminps %ymm4, %ymm0, %ymm0 vmaxps 0x262d13(%rip){1to8}, %ymm0, %ymm15 # 0x4a6c8c vmovaps %ymm6, %ymm0 vfmadd213ps %ymm27, %ymm28, %ymm30 # ymm30 = (ymm28 * ymm30) + ymm27 vfmadd213ps 0x261147(%rip){1to8}, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm0) + mem vroundps $0x1, %ymm0, %ymm10 vcmpltps %ymm10, %ymm0, %k1 vfmadd213ps %ymm27, %ymm1, %ymm23 # ymm23 = (ymm1 * ymm23) + ymm27 vsubps 0x261126(%rip){1to8}, %ymm10, %ymm10 {%k1} # 0x4a50d0 vfmsub231ps %ymm22, %ymm10, %ymm15 # ymm15 = (ymm10 * ymm22) - ymm15 vfnmsub231ps %ymm24, %ymm10, %ymm15 # ymm15 = -(ymm10 * ymm24) - ymm15 vfmadd213ps %ymm27, %ymm3, %ymm29 # ymm29 = (ymm3 * ymm29) + ymm27 vmulps %ymm15, %ymm15, %ymm22 vmovaps %ymm26, %ymm11 vfmadd213ps %ymm18, %ymm15, %ymm11 # ymm11 = (ymm15 * ymm11) + ymm18 vfmadd213ps %ymm27, %ymm15, %ymm11 # ymm11 = (ymm15 * ymm11) + ymm27 vsubps 0x10(%r9){1to8}, %ymm8, %ymm0 vminps %ymm4, %ymm0, %ymm0 vfmadd213ps %ymm25, %ymm28, %ymm30 # ymm30 = (ymm28 * ymm30) + ymm25 vmaxps 0x262c9d(%rip){1to8}, %ymm0, %ymm24 # 0x4a6c8c vmovaps %ymm6, %ymm0 vfmadd213ps 0x2610d7(%rip){1to8}, %ymm24, %ymm0 # ymm0 = (ymm24 * ymm0) + mem vfmadd213ps %ymm25, %ymm1, %ymm23 # ymm23 = (ymm1 * ymm23) + ymm25 vrndscaleps $0x1, %ymm0, %ymm16 vcmpltps %ymm16, %ymm0, %k1 vsubps 0x2610b5(%rip){1to8}, %ymm16, %ymm16 {%k1} # 0x4a50d0 vfmadd213ps %ymm25, %ymm3, %ymm29 # ymm29 = (ymm3 * ymm29) + ymm25 vfmsub231ps 0x262c69(%rip){1to8}, %ymm16, %ymm24 # ymm24 = (ymm16 * mem) - ymm24 vfnmsub231ps 0x262c63(%rip){1to8}, %ymm16, %ymm24 # ymm24 = -(ymm16 * mem) - ymm24 vmulps %ymm24, %ymm24, %ymm26 vfmadd213ps %ymm25, %ymm15, %ymm11 # ymm11 = (ymm15 * ymm11) + ymm25 vbroadcastss 0x262c52(%rip), %ymm8 # 0x4a6c9c vfmadd213ps %ymm18, %ymm24, %ymm8 # ymm8 = (ymm24 * ymm8) + ymm18 vfmadd213ps %ymm27, %ymm24, %ymm8 # ymm8 = (ymm24 * ymm8) + ymm27 vfmadd213ps %ymm5, %ymm28, %ymm30 # ymm30 = (ymm28 * ymm30) + ymm5 vfmadd213ps %ymm25, %ymm24, %ymm8 # ymm8 = (ymm24 * ymm8) + ymm25 vsubps 0x14(%r9){1to8}, %ymm19, %ymm0 vfmadd213ps %ymm5, %ymm1, %ymm23 # ymm23 = (ymm1 * ymm23) + ymm5 vminps %ymm4, %ymm0, %ymm0 vmaxps 0x262c0f(%rip){1to8}, %ymm0, %ymm27 # 0x4a6c8c vmovaps %ymm6, %ymm0 vfmadd213ps %ymm5, %ymm3, %ymm29 # ymm29 = (ymm3 * ymm29) + ymm5 vfmadd213ps 0x261043(%rip){1to8}, %ymm27, %ymm0 # ymm0 = (ymm27 * ymm0) + mem vrndscaleps $0x1, %ymm0, %ymm19 vcmpltps %ymm19, %ymm0, %k1 vfmadd213ps %ymm5, %ymm15, %ymm11 # ymm11 = (ymm15 * ymm11) + ymm5 vsubps 0x261022(%rip){1to8}, %ymm19, %ymm19 {%k1} # 0x4a50d0 vfmsub231ps 0x262bdc(%rip){1to8}, %ymm19, %ymm27 # ymm27 = (ymm19 * mem) - ymm27 vfnmsub231ps 0x262bd6(%rip){1to8}, %ymm19, %ymm27 # ymm27 = -(ymm19 * mem) - ymm27 vfmadd213ps %ymm5, %ymm24, %ymm8 # ymm8 = (ymm24 * ymm8) + ymm5 vmulps %ymm27, %ymm27, %ymm18 vbroadcastss 0x262bc4(%rip), %ymm20 # 0x4a6c9c vfmadd213ps 0x262bbe(%rip){1to8}, %ymm27, %ymm20 # ymm20 = (ymm27 * ymm20) + mem vfmadd213ps 0x260fe8(%rip){1to8}, %ymm28, %ymm30 # ymm30 = (ymm28 * ymm30) + mem vfmadd213ps 0x262bae(%rip){1to8}, %ymm27, %ymm20 # ymm20 = (ymm27 * ymm20) + mem vfmadd213ps %ymm25, %ymm27, %ymm20 # ymm20 = (ymm27 * ymm20) + ymm25 vfmadd213ps %ymm5, %ymm27, %ymm20 # ymm20 = (ymm27 * ymm20) + ymm5 vfmadd213ps 0x260fc8(%rip){1to8}, %ymm1, %ymm23 # ymm23 = (ymm1 * ymm23) + mem vsubps 0x18(%r9){1to8}, %ymm7, %ymm0 vminps %ymm4, %ymm0, %ymm0 vfmadd213ps 0x260fb3(%rip){1to8}, %ymm3, %ymm29 # ymm29 = (ymm3 * ymm29) + mem vmaxps 0x262b61(%rip){1to8}, %ymm0, %ymm0 # 0x4a6c8c vmovaps %ymm6, %ymm25 vfmadd213ps 0x260f99(%rip){1to8}, %ymm0, %ymm25 # ymm25 = (ymm0 * ymm25) + mem vfmadd213ps 0x260f8f(%rip){1to8}, %ymm15, %ymm11 # ymm11 = (ymm15 * ymm11) + mem vrndscaleps $0x1, %ymm25, %ymm7 vcmpltps %ymm7, %ymm25, %k1 vsubps 0x260f73(%rip){1to8}, %ymm7, %ymm7 {%k1} # 0x4a50d0 vfmadd213ps 0x260f6d(%rip){1to8}, %ymm24, %ymm8 # ymm8 = (ymm24 * ymm8) + mem vfmsub231ps 0x262b23(%rip){1to8}, %ymm7, %ymm0 # ymm0 = (ymm7 * mem) - ymm0 vfnmsub231ps 0x262b1d(%rip){1to8}, %ymm7, %ymm0 # ymm0 = -(ymm7 * mem) - ymm0 vmulps %ymm0, %ymm0, %ymm25 vfmadd213ps 0x260f49(%rip){1to8}, %ymm27, %ymm20 # ymm20 = (ymm27 * ymm20) + mem vsubps 0x1c(%r9){1to8}, %ymm14, %ymm14 vminps %ymm4, %ymm14, %ymm14 vfmadd213ps %ymm28, %ymm9, %ymm30 # ymm30 = (ymm9 * ymm30) + ymm28 vbroadcastss 0x262ae6(%rip), %ymm28 # 0x4a6c8c vmaxps %ymm28, %ymm14, %ymm9 vmovaps %ymm6, %ymm14 vfmadd213ps 0x260f1a(%rip){1to8}, %ymm9, %ymm14 # ymm14 = (ymm9 * ymm14) + mem vfmadd213ps %ymm1, %ymm12, %ymm23 # ymm23 = (ymm12 * ymm23) + ymm1 vroundps $0x1, %ymm14, %ymm1 vcmpltps %ymm1, %ymm14, %k1 vbroadcastss 0x262ac6(%rip), %ymm12 # 0x4a6c9c vfmadd213ps %ymm3, %ymm21, %ymm29 # ymm29 = (ymm21 * ymm29) + ymm3 vbroadcastss 0x260eea(%rip), %ymm21 # 0x4a50d0 vfmadd213ps 0x262ab0(%rip){1to8}, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + mem vfmadd213ps 0x262aaa(%rip){1to8}, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + mem vfmadd213ps 0x262aa4(%rip){1to8}, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + mem vfmadd213ps %ymm15, %ymm22, %ymm11 # ymm11 = (ymm22 * ymm11) + ymm15 vbroadcastss 0x262a80(%rip), %ymm22 # 0x4a6c94 vbroadcastss 0x260eb7(%rip), %ymm15 # 0x4a50d4 vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5 vfmadd213ps %ymm15, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm15 vsubps %ymm21, %ymm1, %ymm1 {%k1} vfmadd213ps %ymm24, %ymm26, %ymm8 # ymm8 = (ymm26 * ymm8) + ymm24 vbroadcastss 0x262a5f(%rip), %ymm26 # 0x4a6c9c vbroadcastss 0x262a51(%rip), %ymm24 # 0x4a6c98 vfmsub231ps %ymm22, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm22) - ymm9 vfnmsub231ps %ymm24, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm24) - ymm9 vmulps %ymm9, %ymm9, %ymm3 vfmadd213ps %ymm27, %ymm18, %ymm20 # ymm20 = (ymm18 * ymm20) + ymm27 vbroadcastss 0x262a3c(%rip), %ymm27 # 0x4a6ca4 vbroadcastss 0x262a2e(%rip), %ymm18 # 0x4a6ca0 vmovaps %ymm26, %ymm14 vfmadd213ps %ymm18, %ymm9, %ymm14 # ymm14 = (ymm9 * ymm14) + ymm18 vfmadd213ps %ymm27, %ymm9, %ymm14 # ymm14 = (ymm9 * ymm14) + ymm27 vfmadd213ps %ymm0, %ymm25, %ymm12 # ymm12 = (ymm25 * ymm12) + ymm0 vbroadcastss 0x262a14(%rip), %ymm25 # 0x4a6ca8 vfmadd213ps %ymm25, %ymm9, %ymm14 # ymm14 = (ymm9 * ymm14) + ymm25 vfmadd213ps %ymm5, %ymm9, %ymm14 # ymm14 = (ymm9 * ymm14) + ymm5 vfmadd213ps %ymm15, %ymm9, %ymm14 # ymm14 = (ymm9 * ymm14) + ymm15 vfmadd213ps %ymm9, %ymm3, %ymm14 # ymm14 = (ymm3 * ymm14) + ymm9 vpslld $0x17, %ymm31, %ymm0 vpbroadcastd 0x260e16(%rip), %ymm31 # 0x4a50d0 vpaddd %ymm31, %ymm0, %ymm0 vcvttps2dq %ymm13, %ymm3 vfmadd213ps %ymm0, %ymm30, %ymm0 # ymm0 = (ymm30 * ymm0) + ymm0 vpslld $0x17, %ymm3, %ymm3 vpaddd %ymm31, %ymm3, %ymm3 vcvttps2dq %ymm2, %ymm2 vfmadd213ps %ymm3, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm3) + ymm3 vpslld $0x17, %ymm2, %ymm2 vpaddd %ymm31, %ymm2, %ymm2 vcvttps2dq %ymm10, %ymm9 vfmadd213ps %ymm2, %ymm29, %ymm2 # ymm2 = (ymm29 * ymm2) + ymm2 vpslld $0x17, %ymm9, %ymm9 vpaddd %ymm31, %ymm9, %ymm9 vcvttps2dq %ymm16, %ymm10 vfmadd213ps %ymm9, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm9) + ymm9 vpslld $0x17, %ymm10, %ymm10 vpaddd %ymm31, %ymm10, %ymm10 vcvttps2dq %ymm19, %ymm11 vfmadd213ps %ymm10, %ymm8, %ymm10 # ymm10 = (ymm8 * ymm10) + ymm10 vpslld $0x17, %ymm11, %ymm8 vpaddd %ymm31, %ymm8, %ymm8 vcvttps2dq %ymm7, %ymm7 vfmadd213ps %ymm8, %ymm20, %ymm8 # ymm8 = (ymm20 * ymm8) + ymm8 vpslld $0x17, %ymm7, %ymm7 vpaddd %ymm31, %ymm7, %ymm7 vcvttps2dq %ymm1, %ymm1 vfmadd213ps %ymm7, %ymm12, %ymm7 # ymm7 = (ymm12 * ymm7) + ymm7 vpslld $0x17, %ymm1, %ymm1 vpaddd %ymm31, %ymm1, %ymm1 vfmadd213ps %ymm1, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm1) + ymm1 vmovups %ymm0, (%rdi) vmovups %ymm3, 0x20(%rdi) vmovups %ymm2, 0x40(%rdi) vmovups %ymm9, 0x60(%rdi) vmovups %ymm10, 0x80(%rdi) vmovups %ymm8, 0xa0(%rdi) vmovups %ymm7, 0xc0(%rdi) vmovups %ymm1, 0xe0(%rdi) vunpcklps %ymm10, %ymm0, %ymm11 # ymm11 = ymm0[0],ymm10[0],ymm0[1],ymm10[1],ymm0[4],ymm10[4],ymm0[5],ymm10[5] vunpckhps %ymm10, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm10[2],ymm0[3],ymm10[3],ymm0[6],ymm10[6],ymm0[7],ymm10[7] vunpcklps %ymm7, %ymm2, %ymm10 # ymm10 = ymm2[0],ymm7[0],ymm2[1],ymm7[1],ymm2[4],ymm7[4],ymm2[5],ymm7[5] vunpckhps %ymm7, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm7[2],ymm2[3],ymm7[3],ymm2[6],ymm7[6],ymm2[7],ymm7[7] vaddps %ymm0, %ymm11, %ymm0 vaddps %ymm2, %ymm10, %ymm2 vunpcklps %ymm8, %ymm3, %ymm7 # ymm7 = ymm3[0],ymm8[0],ymm3[1],ymm8[1],ymm3[4],ymm8[4],ymm3[5],ymm8[5] vunpckhps %ymm8, %ymm3, %ymm3 # ymm3 = ymm3[2],ymm8[2],ymm3[3],ymm8[3],ymm3[6],ymm8[6],ymm3[7],ymm8[7] vaddps %ymm3, %ymm7, %ymm3 vunpcklps %ymm1, %ymm9, %ymm7 # ymm7 = ymm9[0],ymm1[0],ymm9[1],ymm1[1],ymm9[4],ymm1[4],ymm9[5],ymm1[5] vunpckhps %ymm1, %ymm9, %ymm1 # ymm1 = ymm9[2],ymm1[2],ymm9[3],ymm1[3],ymm9[6],ymm1[6],ymm9[7],ymm1[7] vaddps %ymm1, %ymm7, %ymm1 vunpcklps %ymm2, %ymm0, %ymm7 # ymm7 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[4],ymm2[4],ymm0[5],ymm2[5] vunpckhps %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[6],ymm2[6],ymm0[7],ymm2[7] vunpcklps %ymm1, %ymm3, %ymm2 # ymm2 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[4],ymm1[4],ymm3[5],ymm1[5] vunpckhps %ymm1, %ymm3, %ymm1 # ymm1 = ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[6],ymm1[6],ymm3[7],ymm1[7] vaddps %ymm0, %ymm7, %ymm0 vaddps %ymm1, %ymm2, %ymm1 vunpcklps %ymm1, %ymm0, %ymm2 # ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] vinsertf128 $0x1, %xmm0, %ymm2, %ymm1 vperm2f128 $0x31, %ymm0, %ymm2, %ymm0 # ymm0 = ymm2[2,3],ymm0[2,3] vaddps (%r8), %ymm0, %ymm0 movl %r10d, %r11d vaddps %ymm1, %ymm0, %ymm0 addq $0x100, %rdi # imm = 0x100 addq $0x20, %r9 vmovups %ymm0, (%r8) addq $0x20, %r8 leal 0x8(%r11), %r10d addl $0xf, %r11d cmpl %ebp, %r11d jl 0x243e41 movl %ebp, %r11d subl %r10d, %r11d vbroadcastss 0x26288f(%rip), %ymm7 # 0x4a6cc0 vbroadcastss 0x263666(%rip), %ymm8 # 0x4a7aa0 jle 0x2444f9 xorl %r10d, %r10d vmovups (%rdi), %ymm0 vsubps (%r9,%r10,4){1to8}, %ymm0, %ymm0 vminps %ymm4, %ymm0, %ymm0 vmaxps %ymm28, %ymm0, %ymm0 vmovaps %ymm15, %ymm1 vfmadd231ps %ymm6, %ymm0, %ymm1 # ymm1 = (ymm0 * ymm6) + ymm1 vroundps $0x1, %ymm1, %ymm2 vcmpltps %ymm2, %ymm1, %k1 vaddps %ymm7, %ymm2, %ymm2 {%k1} vfmsub231ps %ymm22, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm22) - ymm0 vfmsub231ps %ymm8, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm8) - ymm0 vmulps %ymm0, %ymm0, %ymm1 vmovaps %ymm26, %ymm3 vfmadd213ps %ymm18, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + ymm18 vfmadd213ps %ymm27, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + ymm27 vfmadd213ps %ymm25, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + ymm25 vfmadd213ps %ymm5, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + ymm5 vfmadd213ps %ymm15, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + ymm15 vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0 vcvttps2dq %ymm2, %ymm0 vpslld $0x17, %ymm0, %ymm0 vpaddd %ymm31, %ymm0, %ymm0 vfmadd213ps %ymm0, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm0) + ymm0 vmovups %ymm0, (%rdi) vextractf128 $0x1, %ymm0, %xmm1 vaddps %xmm0, %xmm1, %xmm0 vshufpd $0x1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0] vaddps %xmm0, %xmm1, %xmm0 vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3] vaddss (%r8,%r10,4), %xmm1, %xmm1 vaddss %xmm0, %xmm1, %xmm0 vmovss %xmm0, (%r8,%r10,4) addq $0x20, %rdi incq %r10 cmpl %r10d, %r11d jne 0x244443 incq %rsi cmpq %rdx, %rsi vmovaps 0x2693f7(%rip), %zmm23 # 0x4ad900 jne 0x243646 xorl %edx, %edx movq %r14, %rax cmpl $0x10, %ebp jl 0x24454a movl $0xf, %ecx vbroadcastss 0x260ba8(%rip), %zmm0 # 0x4a50d0 movq %r14, %rax vdivps (%rax), %zmm0, %zmm1 vmovups %zmm1, (%rax) addq $0x40, %rax addl $0x10, %ecx cmpl %ebp, %ecx jl 0x24452b movl %ebp, %edx andl $0x7ffffff0, %edx # imm = 0x7FFFFFF0 movl %edx, %ecx orl $0x7, %ecx cmpl %ebp, %ecx jge 0x2449d0 vbroadcastss 0x260b70(%rip), %ymm0 # 0x4a50d0 vdivps (%rax), %ymm0, %ymm1 vmovups %ymm1, (%rax) addq $0x20, %rax leal 0x8(%rdx), %ecx addl $0xf, %edx cmpl %ebp, %edx movl %ecx, %edx jl 0x244560 jmp 0x2449d2 movl %edx, %ecx movl %ecx, %edx orl $0x3, %edx cmpl %ebp, %edx jge 0x2445e0 vbroadcastss 0x260b3f(%rip), %xmm0 # 0x4a50d0 vdivps (%rax), %xmm0, %xmm1 vmovups %xmm1, (%rax) addq $0x10, %rax leal 0x4(%rcx), %edx addl $0x7, %ecx cmpl %ebp, %ecx movl %edx, %ecx jl 0x244591 jmp 0x2445e2 movl %edi, %esi movl %esi, %edi orl $0x3, %edi cmpl %ebp, %edi jge 0x244855 vbroadcastss 0x260b0d(%rip), %xmm0 # 0x4a50d0 vdivps (%rdx), %xmm0, %xmm1 vmovups %xmm1, (%rdx) addq $0x10, %rdx leal 0x4(%rsi), %edi addl $0x7, %esi cmpl %ebp, %esi movl %edi, %esi jl 0x2445c3 jmp 0x244857 movl %ecx, %edx cmpl %ebp, %edx jge 0x244659 notl %edx addl %ebp, %edx leaq 0x10(%rdx), %rcx andq $-0x10, %rcx vpbroadcastq %rdx, %zmm0 xorl %edx, %edx vpmovsxbq 0x26231a(%rip), %zmm1 # 0x4a691e vpmovsxbq 0x262318(%rip), %zmm2 # 0x4a6926 vbroadcastss 0x260ab8(%rip), %zmm3 # 0x4a50d0 vpbroadcastq %rdx, %zmm4 vporq %zmm1, %zmm4, %zmm5 vporq %zmm2, %zmm4, %zmm4 vpcmpleuq %zmm0, %zmm4, %k0 vpcmpleuq %zmm0, %zmm5, %k1 kunpckbw %k0, %k1, %k1 vmovups (%rax,%rdx,4), %zmm4 {%k1} {z} vdivps %zmm4, %zmm3, %zmm4 vmovups %zmm4, (%rax,%rdx,4) {%k1} addq $0x10, %rdx cmpq %rdx, %rcx jne 0x244618 testl %r15d, %r15d jle 0x244ddb movl %ebp, %eax andl $-0x10, %eax movslq 0x20(%rsp), %rcx movl %r15d, %edx xorl %esi, %esi vmovaps 0x269005(%rip), %zmm0 # 0x4ad680 vmovaps 0x26903b(%rip), %zmm1 # 0x4ad6c0 vmovaps 0x269071(%rip), %zmm2 # 0x4ad700 vmovaps 0x2690a7(%rip), %zmm3 # 0x4ad740 movq %rsi, %rdi imulq %rcx, %rdi leaq (%rbx,%rdi,4), %rdi cmpl $0x10, %ebp jl 0x24471f movl $0xf, %r9d movq %r14, %r8 vmovups (%r8), %zmm4 vpermps %zmm4, %zmm0, %zmm5 vpermps %zmm4, %zmm1, %zmm6 vpermps %zmm4, %zmm2, %zmm7 vpermps %zmm4, %zmm3, %zmm4 vmulps (%rdi), %zmm5, %zmm5 vmulps 0x40(%rdi), %zmm6, %zmm6 vmulps 0x80(%rdi), %zmm7, %zmm7 vmulps 0xc0(%rdi), %zmm4, %zmm4 vmovups %zmm5, (%rdi) vmovups %zmm6, 0x40(%rdi) vmovups %zmm7, 0x80(%rdi) vmovups %zmm4, 0xc0(%rdi) addq $0x100, %rdi # imm = 0x100 addq $0x40, %r8 addl $0x10, %r9d cmpl %ebp, %r9d jl 0x2446b2 movl %eax, %r9d jmp 0x244725 xorl %r9d, %r9d movq %r14, %r8 movl %r9d, %r10d orl $0x7, %r10d cmpl %ebp, %r10d jge 0x2447be vbroadcastss (%r8), %xmm4 vbroadcastss 0x4(%r8), %xmm5 vinsertf128 $0x1, %xmm5, %ymm4, %ymm4 vmulps (%rdi), %ymm4, %ymm4 vbroadcastss 0x8(%r8), %xmm5 vbroadcastss 0xc(%r8), %xmm6 vinsertf128 $0x1, %xmm6, %ymm5, %ymm5 vmulps 0x20(%rdi), %ymm5, %ymm5 vbroadcastss 0x10(%r8), %xmm6 vbroadcastss 0x14(%r8), %xmm7 vinsertf128 $0x1, %xmm7, %ymm6, %ymm6 vmulps 0x40(%rdi), %ymm6, %ymm6 vbroadcastss 0x18(%r8), %xmm7 vbroadcastss 0x1c(%r8), %xmm8 vinsertf128 $0x1, %xmm8, %ymm7, %ymm7 vmulps 0x60(%rdi), %ymm7, %ymm7 vmovups %ymm4, (%rdi) vmovups %ymm5, 0x20(%rdi) vmovups %ymm6, 0x40(%rdi) vmovups %ymm7, 0x60(%rdi) subq $-0x80, %rdi addq $0x20, %r8 leal 0x8(%r9), %r10d addl $0xf, %r9d cmpl %ebp, %r9d movl %r10d, %r9d jl 0x244735 movl %r9d, %r10d orl $0x3, %r10d cmpl %ebp, %r10d jge 0x24481f vbroadcastss (%r8), %xmm4 vmulps (%rdi), %xmm4, %xmm4 vbroadcastss 0x4(%r8), %xmm5 vmulps 0x10(%rdi), %xmm5, %xmm5 vbroadcastss 0x8(%r8), %xmm6 vmulps 0x20(%rdi), %xmm6, %xmm6 vbroadcastss 0xc(%r8), %xmm7 vmulps 0x30(%rdi), %xmm7, %xmm7 vmovups %xmm4, (%rdi) vmovups %xmm5, 0x10(%rdi) vmovups %xmm6, 0x20(%rdi) vmovups %xmm7, 0x30(%rdi) addq $0x40, %rdi addq $0x10, %r8 leal 0x4(%r9), %r10d addl $0x7, %r9d cmpl %ebp, %r9d movl %r10d, %r9d jl 0x2447ca movl %ebp, %r10d subl %r9d, %r10d jle 0x244844 xorl %r9d, %r9d vbroadcastss (%r8,%r9,4), %xmm4 vmulps (%rdi), %xmm4, %xmm4 vmovups %xmm4, (%rdi) addq $0x10, %rdi incq %r9 cmpl %r9d, %r10d jne 0x24482a incq %rsi cmpq %rdx, %rsi jne 0x244699 jmp 0x244ddb movl %esi, %edi cmpl %ebp, %edi jge 0x2448ce notl %edi addl %ebp, %edi leaq 0x10(%rdi), %rsi andq $-0x10, %rsi vpbroadcastq %rdi, %zmm0 xorl %edi, %edi vpmovsxbq 0x2620a5(%rip), %zmm1 # 0x4a691e vpmovsxbq 0x2620a3(%rip), %zmm2 # 0x4a6926 vbroadcastss 0x260843(%rip), %zmm3 # 0x4a50d0 vpbroadcastq %rdi, %zmm4 vporq %zmm1, %zmm4, %zmm5 vporq %zmm2, %zmm4, %zmm4 vpcmpleuq %zmm0, %zmm4, %k0 vpcmpleuq %zmm0, %zmm5, %k1 kunpckbw %k0, %k1, %k1 vmovups (%rdx,%rdi,4), %zmm4 {%k1} {z} vdivps %zmm4, %zmm3, %zmm4 vmovups %zmm4, (%rdx,%rdi,4) {%k1} addq $0x10, %rdi cmpq %rdi, %rsi jne 0x24488d testl %r15d, %r15d jle 0x244ddb movl %ebp, %edx andl $-0x4, %edx xorl %esi, %esi movq %rsi, %rdi imulq %rax, %rdi leaq (%rbx,%rdi,4), %rdi cmpl $0x4, %ebp jl 0x244961 movl $0x3, %r9d movq %r14, %r8 vbroadcastss (%r8), %zmm0 vmulps (%rdi), %zmm0, %zmm0 vbroadcastss 0x4(%r8), %zmm1 vmulps 0x40(%rdi), %zmm1, %zmm1 vbroadcastss 0x8(%r8), %zmm2 vmulps 0x80(%rdi), %zmm2, %zmm2 vbroadcastss 0xc(%r8), %zmm3 vmulps 0xc0(%rdi), %zmm3, %zmm3 vmovups %zmm0, (%rdi) vmovups %zmm1, 0x40(%rdi) vmovups %zmm2, 0x80(%rdi) vmovups %zmm3, 0xc0(%rdi) addq $0x100, %rdi # imm = 0x100 addq $0x10, %r8 addl $0x4, %r9d cmpl %ebp, %r9d jl 0x2448f7 movl %edx, %r10d jmp 0x244967 movq %r14, %r8 xorl %r10d, %r10d movl %ebp, %r9d subl %r10d, %r9d jle 0x244991 xorl %r10d, %r10d vbroadcastss (%r8,%r10,4), %zmm0 vmulps (%rdi), %zmm0, %zmm0 vmovups %zmm0, (%rdi) addq $0x40, %rdi incq %r10 cmpl %r10d, %r9d jne 0x244972 incq %rsi cmpq %rcx, %rsi jne 0x2448de jmp 0x244ddb movl %edx, %ecx movl %ecx, %edx orl $0x3, %edx cmpl %ebp, %edx jge 0x244a05 vbroadcastss 0x26071a(%rip), %xmm0 # 0x4a50d0 vdivps (%rax), %xmm0, %xmm1 vmovups %xmm1, (%rax) addq $0x10, %rax leal 0x4(%rcx), %edx addl $0x7, %ecx cmpl %ebp, %ecx movl %edx, %ecx jl 0x2449b6 jmp 0x244a07 movl %edx, %ecx movl %ecx, %edx orl $0x3, %edx cmpl %ebp, %edx jge 0x244b73 vbroadcastss 0x2606e8(%rip), %xmm0 # 0x4a50d0 vdivps (%rax), %xmm0, %xmm1 vmovups %xmm1, (%rax) addq $0x10, %rax leal 0x4(%rcx), %edx addl $0x7, %ecx cmpl %ebp, %ecx movl %edx, %ecx jl 0x2449e8 jmp 0x244b75 movl %ecx, %edx cmpl %ebp, %edx jge 0x244a7e notl %edx addl %ebp, %edx leaq 0x10(%rdx), %rcx andq $-0x10, %rcx vpbroadcastq %rdx, %zmm0 xorl %edx, %edx vpmovsxbq 0x261ef5(%rip), %zmm1 # 0x4a691e vpmovsxbq 0x261ef3(%rip), %zmm2 # 0x4a6926 vbroadcastss 0x260693(%rip), %zmm3 # 0x4a50d0 vpbroadcastq %rdx, %zmm4 vporq %zmm1, %zmm4, %zmm5 vporq %zmm2, %zmm4, %zmm4 vpcmpleuq %zmm0, %zmm4, %k0 vpcmpleuq %zmm0, %zmm5, %k1 kunpckbw %k0, %k1, %k1 vmovups (%rax,%rdx,4), %zmm4 {%k1} {z} vdivps %zmm4, %zmm3, %zmm4 vmovups %zmm4, (%rax,%rdx,4) {%k1} addq $0x10, %rdx cmpq %rdx, %rcx jne 0x244a3d testl %esi, %esi jle 0x244ddb movl %ebp, %eax andl $-0x10, %eax movslq 0x20(%rsp), %rcx movl %esi, %edx xorl %esi, %esi movq %rsi, %rdi imulq %rcx, %rdi leaq (%rbx,%rdi,4), %rdi cmpl $0x10, %ebp jl 0x244ad5 movl $0xf, %r9d movq %r14, %r8 vmovups (%r8), %zmm0 vmulps (%rdi), %zmm0, %zmm0 vmovups %zmm0, (%rdi) addq $0x40, %rdi addq $0x40, %r8 addl $0x10, %r9d cmpl %ebp, %r9d jl 0x244aad movl %eax, %r9d jmp 0x244adb movq %r14, %r8 xorl %r9d, %r9d movl %r9d, %r10d orl $0x7, %r10d cmpl %ebp, %r10d jge 0x244b0c vmovups (%r8), %ymm0 vmulps (%rdi), %ymm0, %ymm0 vmovups %ymm0, (%rdi) addq $0x20, %rdi addq $0x20, %r8 leal 0x8(%r9), %r10d addl $0xf, %r9d cmpl %ebp, %r9d movl %r10d, %r9d jl 0x244ae7 movl %r9d, %r10d orl $0x3, %r10d cmpl %ebp, %r10d jge 0x244b3d vmovups (%r8), %xmm0 vmulps (%rdi), %xmm0, %xmm0 vmovups %xmm0, (%rdi) addq $0x10, %rdi addq $0x10, %r8 leal 0x4(%r9), %r10d addl $0x7, %r9d cmpl %ebp, %r9d movl %r10d, %r9d jl 0x244b18 movl %ebp, %r10d subl %r9d, %r10d jle 0x244b62 xorl %r9d, %r9d vmovss (%rdi,%r9,4), %xmm0 vmulss (%r8,%r9,4), %xmm0, %xmm0 vmovss %xmm0, (%rdi,%r9,4) incq %r9 cmpl %r9d, %r10d jne 0x244b48 incq %rsi cmpq %rdx, %rsi jne 0x244a94 jmp 0x244ddb movl %ecx, %edx cmpl %ebp, %edx jge 0x244bec notl %edx addl %ebp, %edx leaq 0x10(%rdx), %rcx andq $-0x10, %rcx vpbroadcastq %rdx, %zmm0 xorl %edx, %edx vpmovsxbq 0x261d87(%rip), %zmm1 # 0x4a691e vpmovsxbq 0x261d85(%rip), %zmm2 # 0x4a6926 vbroadcastss 0x260525(%rip), %zmm3 # 0x4a50d0 vpbroadcastq %rdx, %zmm4 vporq %zmm1, %zmm4, %zmm5 vporq %zmm2, %zmm4, %zmm4 vpcmpleuq %zmm0, %zmm4, %k0 vpcmpleuq %zmm0, %zmm5, %k1 kunpckbw %k0, %k1, %k1 vmovups (%rax,%rdx,4), %zmm4 {%k1} {z} vdivps %zmm4, %zmm3, %zmm4 vmovups %zmm4, (%rax,%rdx,4) {%k1} addq $0x10, %rdx cmpq %rdx, %rcx jne 0x244bab testl %r15d, %r15d jle 0x244ddb movl %ebp, %eax andl $-0x10, %eax movslq 0x20(%rsp), %rcx movl %r15d, %edx xorl %esi, %esi movq %rsi, %rdi imulq %rcx, %rdi leaq (%rbx,%rdi,4), %rdi cmpl $0x10, %ebp jl 0x244d43 movl $0xf, %r9d movq %r14, %r8 vbroadcastss (%r8), %ymm0 vbroadcastss 0x4(%r8), %ymm1 vbroadcastss 0x8(%r8), %ymm2 vbroadcastss 0xc(%r8), %ymm3 vinsertf64x4 $0x1, %ymm1, %zmm0, %zmm0 vinsertf64x4 $0x1, %ymm3, %zmm2, %zmm1 vbroadcastss 0x10(%r8), %ymm2 vbroadcastss 0x14(%r8), %ymm3 vbroadcastss 0x18(%r8), %ymm4 vinsertf64x4 $0x1, %ymm3, %zmm2, %zmm2 vbroadcastss 0x1c(%r8), %ymm3 vinsertf64x4 $0x1, %ymm3, %zmm4, %zmm3 vbroadcastss 0x20(%r8), %ymm4 vbroadcastss 0x24(%r8), %ymm5 vbroadcastss 0x28(%r8), %ymm6 vbroadcastss 0x2c(%r8), %ymm7 vinsertf64x4 $0x1, %ymm5, %zmm4, %zmm4 vinsertf64x4 $0x1, %ymm7, %zmm6, %zmm5 vbroadcastss 0x30(%r8), %ymm6 vbroadcastss 0x34(%r8), %ymm7 vbroadcastss 0x38(%r8), %ymm8 vinsertf64x4 $0x1, %ymm7, %zmm6, %zmm6 vbroadcastss 0x3c(%r8), %ymm7 vinsertf64x4 $0x1, %ymm7, %zmm8, %zmm7 vmulps (%rdi), %zmm0, %zmm0 vmulps 0x40(%rdi), %zmm1, %zmm1 vmulps 0x80(%rdi), %zmm2, %zmm2 vmulps 0xc0(%rdi), %zmm3, %zmm3 vmulps 0x100(%rdi), %zmm4, %zmm4 vmulps 0x140(%rdi), %zmm5, %zmm5 vmulps 0x180(%rdi), %zmm6, %zmm6 vmulps 0x1c0(%rdi), %zmm7, %zmm7 vmovups %zmm0, (%rdi) vmovups %zmm1, 0x40(%rdi) vmovups %zmm2, 0x80(%rdi) vmovups %zmm3, 0xc0(%rdi) vmovups %zmm4, 0x100(%rdi) vmovups %zmm5, 0x140(%rdi) vmovups %zmm6, 0x180(%rdi) vmovups %zmm7, 0x1c0(%rdi) addq $0x200, %rdi # imm = 0x200 addq $0x40, %r8 addl $0x10, %r9d cmpl %ebp, %r9d jl 0x244c21 movl %eax, %r9d jmp 0x244d49 xorl %r9d, %r9d movq %r14, %r8 movl %r9d, %r10d orl $0x3, %r10d cmpl %ebp, %r10d jge 0x244daa vbroadcastss (%r8), %ymm0 vmulps (%rdi), %ymm0, %ymm0 vbroadcastss 0x4(%r8), %ymm1 vmulps 0x20(%rdi), %ymm1, %ymm1 vbroadcastss 0x8(%r8), %ymm2 vmulps 0x40(%rdi), %ymm2, %ymm2 vbroadcastss 0xc(%r8), %ymm3 vmulps 0x60(%rdi), %ymm3, %ymm3 vmovups %ymm0, (%rdi) vmovups %ymm1, 0x20(%rdi) vmovups %ymm2, 0x40(%rdi) vmovups %ymm3, 0x60(%rdi) subq $-0x80, %rdi addq $0x10, %r8 leal 0x4(%r9), %r10d addl $0x7, %r9d cmpl %ebp, %r9d movl %r10d, %r9d jl 0x244d55 movl %ebp, %r10d subl %r9d, %r10d jle 0x244dcf xorl %r9d, %r9d vbroadcastss (%r8,%r9,4), %ymm0 vmulps (%rdi), %ymm0, %ymm0 vmovups %ymm0, (%rdi) addq $0x20, %rdi incq %r9 cmpl %r9d, %r10d jne 0x244db5 incq %rsi cmpq %rdx, %rsi jne 0x244c04 addq $0x4a8, %rsp # imm = 0x4A8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq
/Tencent[P]ncnn/build_O3/src/layer/x86/softmax_x86_avx512.cpp
ncnn::convdw3x3s2_pack4_sse(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Mat const&, ncnn::Option const&)
static void convdw3x3s2_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); __m128 _bias0 = bias ? _mm_loadu_ps((const float*)bias + g * 4) : _mm_set1_ps(0.f); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m128 _k00 = _mm_load_ps(k0); __m128 _k01 = _mm_load_ps(k0 + 4); __m128 _k02 = _mm_load_ps(k0 + 8); __m128 _k10 = _mm_load_ps(k0 + 12); __m128 _k11 = _mm_load_ps(k0 + 16); __m128 _k12 = _mm_load_ps(k0 + 20); __m128 _k20 = _mm_load_ps(k0 + 24); __m128 _k21 = _mm_load_ps(k0 + 28); __m128 _k22 = _mm_load_ps(k0 + 32); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { __m128 _sum0 = _bias0; __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 8); __m128 _r10 = _mm_load_ps(r1); __m128 _r11 = _mm_load_ps(r1 + 4); __m128 _r12 = _mm_load_ps(r1 + 8); __m128 _r20 = _mm_load_ps(r2); __m128 _r21 = _mm_load_ps(r2 + 4); __m128 _r22 = _mm_load_ps(r2 + 8); _sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0); __m128 _sum1 = _bias0; __m128 _r03 = _mm_load_ps(r0 + 12); __m128 _r13 = _mm_load_ps(r1 + 12); __m128 _r23 = _mm_load_ps(r2 + 12); __m128 _r04 = _mm_load_ps(r0 + 16); __m128 _r14 = _mm_load_ps(r1 + 16); __m128 _r24 = _mm_load_ps(r2 + 16); _mm_store_ps(outptr0, _sum0); _sum1 = _mm_comp_fmadd_ps(_k00, _r02, _sum1); _sum1 = _mm_comp_fmadd_ps(_k01, _r03, _sum1); _sum1 = _mm_comp_fmadd_ps(_k02, _r04, _sum1); _sum1 = _mm_comp_fmadd_ps(_k10, _r12, _sum1); _sum1 = _mm_comp_fmadd_ps(_k11, _r13, _sum1); _sum1 = _mm_comp_fmadd_ps(_k12, _r14, _sum1); _sum1 = _mm_comp_fmadd_ps(_k20, _r22, _sum1); _sum1 = _mm_comp_fmadd_ps(_k21, _r23, _sum1); _sum1 = _mm_comp_fmadd_ps(_k22, _r24, _sum1); __m128 _sum2 = _bias0; __m128 _r05 = _mm_load_ps(r0 + 20); __m128 _r15 = _mm_load_ps(r1 + 20); __m128 _r25 = _mm_load_ps(r2 + 20); __m128 _r06 = _mm_load_ps(r0 + 24); __m128 _r16 = _mm_load_ps(r1 + 24); __m128 _r26 = _mm_load_ps(r2 + 24); _mm_store_ps(outptr0 + 4, _sum1); _sum2 = _mm_comp_fmadd_ps(_k00, _r04, _sum2); _sum2 = _mm_comp_fmadd_ps(_k01, _r05, _sum2); _sum2 = _mm_comp_fmadd_ps(_k02, _r06, _sum2); _sum2 = _mm_comp_fmadd_ps(_k10, _r14, _sum2); _sum2 = _mm_comp_fmadd_ps(_k11, _r15, _sum2); _sum2 = _mm_comp_fmadd_ps(_k12, _r16, _sum2); _sum2 = _mm_comp_fmadd_ps(_k20, _r24, _sum2); _sum2 = _mm_comp_fmadd_ps(_k21, _r25, _sum2); _sum2 = _mm_comp_fmadd_ps(_k22, _r26, _sum2); __m128 _sum3 = _bias0; __m128 _r07 = _mm_load_ps(r0 + 28); __m128 _r17 = _mm_load_ps(r1 + 28); __m128 _r27 = _mm_load_ps(r2 + 28); __m128 _r08 = _mm_load_ps(r0 + 32); __m128 _r18 = _mm_load_ps(r1 + 32); __m128 _r28 = _mm_load_ps(r2 + 32); _mm_store_ps(outptr0 + 8, _sum2); _sum3 = _mm_comp_fmadd_ps(_k00, _r06, _sum3); _sum3 = _mm_comp_fmadd_ps(_k01, _r07, _sum3); _sum3 = _mm_comp_fmadd_ps(_k02, _r08, _sum3); _sum3 = _mm_comp_fmadd_ps(_k10, _r16, _sum3); _sum3 = _mm_comp_fmadd_ps(_k11, _r17, _sum3); _sum3 = _mm_comp_fmadd_ps(_k12, _r18, _sum3); _sum3 = _mm_comp_fmadd_ps(_k20, _r26, _sum3); _sum3 = _mm_comp_fmadd_ps(_k21, _r27, _sum3); _sum3 = _mm_comp_fmadd_ps(_k22, _r28, _sum3); _mm_store_ps(outptr0 + 12, _sum3); r0 += 2 * 16; r1 += 2 * 16; r2 += 2 * 16; outptr0 += 16; } for (; j + 1 < outw; j += 2) { __m128 _sum0 = _bias0; __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 8); __m128 _r10 = _mm_load_ps(r1); __m128 _r11 = _mm_load_ps(r1 + 4); __m128 _r12 = _mm_load_ps(r1 + 8); __m128 _r20 = _mm_load_ps(r2); __m128 _r21 = _mm_load_ps(r2 + 4); __m128 _r22 = _mm_load_ps(r2 + 8); _sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0); __m128 _sum1 = _bias0; __m128 _r03 = _mm_load_ps(r0 + 12); __m128 _r13 = _mm_load_ps(r1 + 12); __m128 _r23 = _mm_load_ps(r2 + 12); __m128 _r04 = _mm_load_ps(r0 + 16); __m128 _r14 = _mm_load_ps(r1 + 16); __m128 _r24 = _mm_load_ps(r2 + 16); _mm_store_ps(outptr0, _sum0); _sum1 = _mm_comp_fmadd_ps(_k00, _r02, _sum1); _sum1 = _mm_comp_fmadd_ps(_k01, _r03, _sum1); _sum1 = _mm_comp_fmadd_ps(_k02, _r04, _sum1); _sum1 = _mm_comp_fmadd_ps(_k10, _r12, _sum1); _sum1 = _mm_comp_fmadd_ps(_k11, _r13, _sum1); _sum1 = _mm_comp_fmadd_ps(_k12, _r14, _sum1); _sum1 = _mm_comp_fmadd_ps(_k20, _r22, _sum1); _sum1 = _mm_comp_fmadd_ps(_k21, _r23, _sum1); _sum1 = _mm_comp_fmadd_ps(_k22, _r24, _sum1); _mm_store_ps(outptr0 + 4, _sum1); r0 += 2 * 8; r1 += 2 * 8; r2 += 2 * 8; outptr0 += 8; } for (; j < outw; j++) { __m128 _sum0 = _bias0; __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 8); __m128 _r10 = _mm_load_ps(r1); __m128 _r11 = _mm_load_ps(r1 + 4); __m128 _r12 = _mm_load_ps(r1 + 8); __m128 _r20 = _mm_load_ps(r2); __m128 _r21 = _mm_load_ps(r2 + 4); __m128 _r22 = _mm_load_ps(r2 + 8); _sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0); _mm_store_ps(outptr0, _sum0); r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; outptr0 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq %rdx, -0x18(%rsp) movq %rsi, -0x20(%rsp) movq %rdi, -0x28(%rsp) movslq 0x38(%rdi), %rax movq %rax, -0x8(%rsp) testq %rax, %rax jle 0x2eb11b movq -0x20(%rsp), %rax movl 0x2c(%rax), %edx movl 0x30(%rax), %r9d movq -0x28(%rsp), %rax movl 0x2c(%rax), %eax subl %edx, %eax shll $0x3, %eax movslq %eax, %r10 movl %edx, %esi andl $-0x4, %esi xorl %edi, %edi movl $0x80, %r14d movq %rcx, -0x10(%rsp) testq %rcx, %rcx je 0x2eadcd movq %rdi, %rax shlq $0x4, %rax vmovups (%rcx,%rax), %xmm0 jmp 0x2eadd1 vxorps %xmm0, %xmm0, %xmm0 testl %r9d, %r9d jle 0x2eb108 movq -0x20(%rsp), %rax movq 0x40(%rax), %r15 imulq %rdi, %r15 imulq 0x10(%rax), %r15 addq (%rax), %r15 movq -0x18(%rsp), %rcx movslq 0x2c(%rcx), %rax imulq %rdi, %rax imulq 0x10(%rcx), %rax movq (%rcx), %r8 vmovaps (%r8,%rax), %xmm1 vmovaps 0x10(%r8,%rax), %xmm2 vmovaps 0x20(%r8,%rax), %xmm3 vmovaps 0x30(%r8,%rax), %xmm4 vmovaps 0x40(%r8,%rax), %xmm5 vmovaps 0x50(%r8,%rax), %xmm6 vmovaps 0x60(%r8,%rax), %xmm7 vmovaps 0x70(%r8,%rax), %xmm8 vmovaps 0x80(%r8,%rax), %xmm9 movq -0x28(%rsp), %rcx movq 0x40(%rcx), %r12 imulq %rdi, %r12 movq 0x10(%rcx), %rax imulq %rax, %r12 addq (%rcx), %r12 movslq 0x2c(%rcx), %r13 imulq %rax, %r13 leaq (%r12,%r13,2), %rax addq %r12, %r13 xorl %ebp, %ebp cmpl $0x4, %edx jl 0x2eafc3 movl $0x3, %ebx vmovaps (%r12), %xmm11 vfmadd213ps %xmm0, %xmm1, %xmm11 # xmm11 = (xmm1 * xmm11) + xmm0 vfmadd231ps 0x10(%r12), %xmm2, %xmm11 # xmm11 = (xmm2 * mem) + xmm11 vmovaps 0x20(%r12), %xmm12 vmovaps 0x40(%r12), %xmm10 vfmadd231ps %xmm12, %xmm3, %xmm11 # xmm11 = (xmm3 * xmm12) + xmm11 vfmadd231ps (%r13), %xmm4, %xmm11 # xmm11 = (xmm4 * mem) + xmm11 vfmadd231ps 0x10(%r13), %xmm5, %xmm11 # xmm11 = (xmm5 * mem) + xmm11 vmovaps 0x20(%r13), %xmm13 vfmadd231ps %xmm13, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm13) + xmm11 vfmadd231ps (%rax), %xmm7, %xmm11 # xmm11 = (xmm7 * mem) + xmm11 vfmadd231ps 0x10(%rax), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11 vmovaps 0x40(%r13), %xmm14 vmovaps 0x20(%rax), %xmm15 vmovaps 0x40(%rax), %xmm16 vfmadd231ps %xmm15, %xmm9, %xmm11 # xmm11 = (xmm9 * xmm15) + xmm11 vfmadd213ps %xmm0, %xmm1, %xmm12 # xmm12 = (xmm1 * xmm12) + xmm0 vfmadd231ps 0x30(%r12), %xmm2, %xmm12 # xmm12 = (xmm2 * mem) + xmm12 vfmadd231ps %xmm10, %xmm3, %xmm12 # xmm12 = (xmm3 * xmm10) + xmm12 vfmadd231ps %xmm13, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm13) + xmm12 vfmadd231ps 0x30(%r13), %xmm5, %xmm12 # xmm12 = (xmm5 * mem) + xmm12 vfmadd231ps %xmm14, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm14) + xmm12 vfmadd231ps %xmm15, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm15) + xmm12 vfmadd231ps 0x30(%rax), %xmm8, %xmm12 # xmm12 = (xmm8 * mem) + xmm12 vmovaps %xmm11, (%r15) vfmadd231ps %xmm16, %xmm9, %xmm12 # xmm12 = (xmm9 * xmm16) + xmm12 vmovaps 0x60(%r12), %xmm11 vmovaps 0x60(%r13), %xmm13 vmovaps 0x60(%rax), %xmm15 vfmadd213ps %xmm0, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm0 vfmadd231ps 0x50(%r12), %xmm2, %xmm10 # xmm10 = (xmm2 * mem) + xmm10 vfmadd231ps %xmm11, %xmm3, %xmm10 # xmm10 = (xmm3 * xmm11) + xmm10 vfmadd231ps %xmm14, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm14) + xmm10 vfmadd231ps 0x50(%r13), %xmm5, %xmm10 # xmm10 = (xmm5 * mem) + xmm10 vfmadd231ps %xmm13, %xmm6, %xmm10 # xmm10 = (xmm6 * xmm13) + xmm10 vfmadd231ps %xmm16, %xmm7, %xmm10 # xmm10 = (xmm7 * xmm16) + xmm10 vfmadd231ps 0x50(%rax), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10 vmovaps %xmm12, 0x10(%r15) vfmadd231ps %xmm15, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm15) + xmm10 vfmadd213ps %xmm0, %xmm1, %xmm11 # xmm11 = (xmm1 * xmm11) + xmm0 vfmadd231ps 0x70(%r12), %xmm2, %xmm11 # xmm11 = (xmm2 * mem) + xmm11 vfmadd231ps 0x80(%r12), %xmm3, %xmm11 # xmm11 = (xmm3 * mem) + xmm11 leaq (%r12,%r14), %r12 vfmadd231ps %xmm13, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm13) + xmm11 vfmadd231ps 0x70(%r13), %xmm5, %xmm11 # xmm11 = (xmm5 * mem) + xmm11 vfmadd231ps 0x80(%r13), %xmm6, %xmm11 # xmm11 = (xmm6 * mem) + xmm11 addq %r14, %r13 vfmadd231ps %xmm15, %xmm7, %xmm11 # xmm11 = (xmm7 * xmm15) + xmm11 vfmadd231ps 0x70(%rax), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11 vfmadd231ps 0x80(%rax), %xmm9, %xmm11 # xmm11 = (xmm9 * mem) + xmm11 addq %r14, %rax vmovaps %xmm10, 0x20(%r15) vmovaps %xmm11, 0x30(%r15) addq $0x40, %r15 addl $0x4, %ebx cmpl %edx, %ebx jl 0x2eae7c movl %esi, %r11d jmp 0x2eafc6 xorl %r11d, %r11d movl %r11d, %r8d orl $0x1, %r8d cmpl %edx, %r8d jge 0x2eb082 movl %r11d, %ebx vmovaps 0x20(%r12), %xmm10 vmovaps (%r12), %xmm11 vfmadd213ps %xmm0, %xmm1, %xmm11 # xmm11 = (xmm1 * xmm11) + xmm0 vfmadd231ps 0x10(%r12), %xmm2, %xmm11 # xmm11 = (xmm2 * mem) + xmm11 vfmadd231ps %xmm10, %xmm3, %xmm11 # xmm11 = (xmm3 * xmm10) + xmm11 vfmadd231ps (%r13), %xmm4, %xmm11 # xmm11 = (xmm4 * mem) + xmm11 vfmadd231ps 0x10(%r13), %xmm5, %xmm11 # xmm11 = (xmm5 * mem) + xmm11 vmovaps 0x20(%r13), %xmm12 vfmadd231ps %xmm12, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm12) + xmm11 vfmadd231ps (%rax), %xmm7, %xmm11 # xmm11 = (xmm7 * mem) + xmm11 vmovaps 0x20(%rax), %xmm13 vfmadd231ps 0x10(%rax), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11 vfmadd213ps %xmm0, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm0 vfmadd231ps 0x30(%r12), %xmm2, %xmm10 # xmm10 = (xmm2 * mem) + xmm10 vfmadd231ps 0x40(%r12), %xmm3, %xmm10 # xmm10 = (xmm3 * mem) + xmm10 vfmadd231ps %xmm13, %xmm9, %xmm11 # xmm11 = (xmm9 * xmm13) + xmm11 vfmadd231ps %xmm12, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm12) + xmm10 vfmadd231ps 0x30(%r13), %xmm5, %xmm10 # xmm10 = (xmm5 * mem) + xmm10 addq $0x40, %r12 vfmadd231ps 0x40(%r13), %xmm6, %xmm10 # xmm10 = (xmm6 * mem) + xmm10 vfmadd231ps %xmm13, %xmm7, %xmm10 # xmm10 = (xmm7 * xmm13) + xmm10 vfmadd231ps 0x30(%rax), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10 vfmadd231ps 0x40(%rax), %xmm9, %xmm10 # xmm10 = (xmm9 * mem) + xmm10 addq $0x40, %r13 addq $0x40, %rax vmovaps %xmm11, (%r15) vmovaps %xmm10, 0x10(%r15) addq $0x20, %r15 leal 0x2(%rbx), %r11d addl $0x3, %ebx cmpl %edx, %ebx jl 0x2eafd6 movl %edx, %r8d subl %r11d, %r8d jle 0x2eb0f0 xorl %r11d, %r11d xorl %ebx, %ebx vmovaps (%r12,%r11,2), %xmm10 vfmadd213ps %xmm0, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm0 vfmadd231ps 0x10(%r12,%r11,2), %xmm2, %xmm10 # xmm10 = (xmm2 * mem) + xmm10 vfmadd231ps 0x20(%r12,%r11,2), %xmm3, %xmm10 # xmm10 = (xmm3 * mem) + xmm10 vfmadd231ps (%r13,%r11,2), %xmm4, %xmm10 # xmm10 = (xmm4 * mem) + xmm10 vfmadd231ps 0x10(%r13,%r11,2), %xmm5, %xmm10 # xmm10 = (xmm5 * mem) + xmm10 vfmadd231ps 0x20(%r13,%r11,2), %xmm6, %xmm10 # xmm10 = (xmm6 * mem) + xmm10 vfmadd231ps (%rax,%r11,2), %xmm7, %xmm10 # xmm10 = (xmm7 * mem) + xmm10 vfmadd231ps 0x10(%rax,%r11,2), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10 vfmadd231ps 0x20(%rax,%r11,2), %xmm9, %xmm10 # xmm10 = (xmm9 * mem) + xmm10 vmovaps %xmm10, (%r15,%r11) addq $-0x20, %rbx addq $0x10, %r11 decl %r8d jne 0x2eb08f subq %rbx, %rax subq %rbx, %r13 subq %rbx, %r12 addq %r11, %r15 leaq (%r12,%r10,4), %r12 leaq (%r13,%r10,4), %r13 leaq (%rax,%r10,4), %rax incl %ebp cmpl %r9d, %ebp jne 0x2eae6e incq %rdi cmpq -0x8(%rsp), %rdi movq -0x10(%rsp), %rcx jne 0x2eadba popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/Tencent[P]ncnn/src/layer/x86/convolutiondepthwise_3x3_pack4.h
ncnn::ConvolutionDepthWise_x86_fma::create_pipeline(ncnn::Option const&)
int ConvolutionDepthWise_x86_fma::create_pipeline(const Option& opt) { if (dynamic_weight) return 0; activation = create_activation_layer(activation_type, activation_params, opt); #if NCNN_INT8 if (opt.use_int8_inference && weight_data.elemsize == (size_t)1u) { return create_pipeline_int8_x86(opt); } #endif const int maxk = kernel_w * kernel_h; int channels = (weight_data_size / group) / maxk / (num_output / group) * group; // depth-wise if (channels == group && group == num_output) { int elempack = 1; #if __SSE2__ if (opt.use_packing_layout) { #if __AVX512F__ elempack = channels % 16 == 0 ? 16 : channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1; #elif __AVX__ elempack = channels % 8 == 0 ? 8 : channels % 4 == 0 ? 4 : 1; #else elempack = channels % 4 == 0 ? 4 : 1; #endif } #endif // __SSE2__ #if __SSE2__ #if __AVX__ // pack16 #if __AVX512F__ if (elempack == 16) { Mat weight_data_r2 = weight_data.reshape(maxk, group); convert_packing(weight_data_r2, weight_data_tm, 16, opt); } #endif // __AVX512F__ // pack8 if (elempack == 8) { Mat weight_data_r2 = weight_data.reshape(maxk, group); convert_packing(weight_data_r2, weight_data_tm, 8, opt); } #endif // __AVX__ // pack4 if (elempack == 4) { Mat weight_data_r2 = weight_data.reshape(maxk, group); convert_packing(weight_data_r2, weight_data_tm, 4, opt); } #endif // __SSE2__ if (elempack == 1) { // depth-wise specific if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1) { weight_data_tm = weight_data; } else if (kernel_w == 3 && kernel_h == 3 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2) { weight_data_tm = weight_data; } else { create_group_ops(opt); } } if (opt.lightmode) weight_data.release(); return 0; } // group convolution create_group_ops(opt); if (opt.lightmode) weight_data.release(); return 0; }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x48, %rsp cmpl $0x0, 0x160(%rdi) jne 0x2ed555 movq %rsi, %r14 movq %rdi, %rbx movl 0x110(%rdi), %eax decl %eax cmpl $0x5, %eax ja 0x2ed3a9 leaq 0x1c363a(%rip), %rcx # 0x4b072c movslq (%rcx,%rax,4), %rax addq %rcx, %rax jmpq *%rax movl $0x1a, %edi callq 0x428c9 movq %rax, %r15 movq %rsp, %r12 movq %r12, %rdi callq 0x39dcc movq (%r15), %rax movq %r15, %rdi movq %r12, %rsi callq *0x10(%rax) jmp 0x2ed25a movl $0x47, %edi callq 0x428c9 movq %rax, %r15 movq %rsp, %r12 movq %r12, %rdi callq 0x39dcc movq (%r15), %rax movq %r15, %rdi movq %r12, %rsi callq *0x10(%rax) jmp 0x2ed25a movl $0x36, %edi callq 0x428c9 movq %rax, %r15 movq %rsp, %r12 movq %r12, %rdi callq 0x39dcc movq 0x118(%rbx), %rax vmovss (%rax), %xmm0 movq %r12, %rdi xorl %esi, %esi callq 0x3a346 movq 0x118(%rbx), %rax vmovss 0x4(%rax), %xmm0 movq %rsp, %rdi movl $0x1, %esi callq 0x3a346 movq (%r15), %rax movq %rsp, %rsi movq %r15, %rdi callq *0x10(%rax) jmp 0x2ed25a movl $0x1e, %edi callq 0x428c9 movq %rax, %r15 movq %rsp, %r12 movq %r12, %rdi callq 0x39dcc movq (%r15), %rax movq %r15, %rdi movq %r12, %rsi callq *0x10(%rax) jmp 0x2ed25a movl $0x1a, %edi callq 0x428c9 movq %rax, %r15 movq %rsp, %r12 movq %r12, %rdi callq 0x39dcc movq 0x118(%rbx), %rax vmovss (%rax), %xmm0 movq %r12, %rdi xorl %esi, %esi callq 0x3a346 movq (%r15), %rax movq %rsp, %rsi movq %r15, %rdi callq *0x10(%rax) jmp 0x2ed25a movl $0x43, %edi callq 0x428c9 movq %rax, %r15 movq %rsp, %r12 movq %r12, %rdi callq 0x39dcc movq 0x118(%rbx), %rax vmovss (%rax), %xmm0 movq %r12, %rdi xorl %esi, %esi callq 0x3a346 movq 0x118(%rbx), %rax vmovss 0x4(%rax), %xmm0 movq %rsp, %rdi movl $0x1, %esi callq 0x3a346 movq (%r15), %rax movq %rsp, %rsi movq %r15, %rdi callq *0x10(%rax) movq %rsp, %rdi callq 0x39ed4 movq (%r15), %rax movq %r15, %rdi movq %r14, %rsi callq *0x20(%rax) movq %r15, 0x2d0(%rbx) cmpb $0x1, 0x1e(%r14) jne 0x2ed296 cmpq $0x1, 0x178(%rbx) jne 0x2ed296 movq %rbx, %rdi movq %r14, %rsi callq 0x2ed604 jmp 0x2ed555 movq 0xd4(%rbx), %r10 movq %r10, %r8 shrq $0x20, %r8 imull %r10d, %r8d movl 0x104(%rbx), %eax movl 0x108(%rbx), %ecx cltd idivl %ecx cltd idivl %r8d movl %eax, %esi movl 0xd0(%rbx), %edi movl %edi, %eax cltd idivl %ecx movl %eax, %r9d movl %esi, %eax cltd idivl %r9d cmpl %edi, %ecx jne 0x2ed399 imull %ecx, %eax cmpl %ecx, %eax jne 0x2ed399 cmpb $0x1, 0x27(%r14) jne 0x2ed2fd testb $0x7, %cl je 0x2ed3b1 testb $0x3, %cl je 0x2ed414 vmovq %r10, %xmm0 vpunpcklqdq 0xdc(%rbx), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0] vpcmpeqd 0x1ba22e(%rip), %xmm0, %xmm0 # 0x4a7540 vmovmskps %xmm0, %eax xorl $0xf, %eax movl 0xe4(%rbx), %ecx movl %ecx, %esi xorl $0x1, %esi orl %eax, %esi movl 0xe8(%rbx), %edx movl %edx, %edi xorl $0x1, %edi orl %esi, %edi je 0x2ed34f testb %al, %al sete %al cmpl $0x2, %ecx sete %cl cmpl $0x2, %edx sete %dl andb %cl, %dl andb %al, %dl cmpb $0x1, %dl jne 0x2ed399 movq 0x170(%rbx), %rax testq %rax, %rax je 0x2ed35e lock incl (%rax) movq 0x2f8(%rbx), %rax testq %rax, %rax je 0x2ed481 lock decl (%rax) jne 0x2ed481 movq 0x2f0(%rbx), %rsi movq 0x310(%rbx), %rdi testq %rdi, %rdi je 0x2ed474 movq (%rdi), %rax callq *0x18(%rax) jmp 0x2ed481 movq %rbx, %rdi movq %r14, %rsi callq 0x2ed85e jmp 0x2ed4e3 xorl %r15d, %r15d jmp 0x2ed26e leaq 0x168(%rbx), %rsi movq %rsp, %r15 movq %r15, %rdi movl %r8d, %edx xorl %r8d, %r8d callq 0x2a64a leaq 0x2f0(%rbx), %rsi movq %r15, %rdi movl $0x8, %edx movq %r14, %rcx callq 0x2c9a4 movq 0x8(%rsp), %rax testq %rax, %rax je 0x2ed4e3 lock decl (%rax) jne 0x2ed4e3 movq (%rsp), %rsi movq 0x20(%rsp), %rdi testq %rdi, %rdi je 0x2ed563 movq (%rdi), %rax callq *0x18(%rax) jmp 0x2ed4e3 leaq 0x168(%rbx), %rsi movq %rsp, %r15 movq %r15, %rdi movl %r8d, %edx xorl %r8d, %r8d callq 0x2a64a leaq 0x2f0(%rbx), %rsi movq %r15, %rdi movl $0x4, %edx movq %r14, %rcx callq 0x2c9a4 movq 0x8(%rsp), %rax testq %rax, %rax je 0x2ed4e3 lock decl (%rax) jne 0x2ed4e3 movq (%rsp), %rsi movq 0x20(%rsp), %rdi testq %rdi, %rdi je 0x2ed563 movq (%rdi), %rax callq *0x18(%rax) jmp 0x2ed4e3 testq %rsi, %rsi je 0x2ed481 movq %rsi, %rdi callq 0x244a0 vmovups 0x168(%rbx), %xmm0 vmovups %xmm0, 0x2f0(%rbx) movq 0x178(%rbx), %rax movq %rax, 0x300(%rbx) movl 0x180(%rbx), %eax movl %eax, 0x308(%rbx) movq 0x188(%rbx), %rax movq %rax, 0x310(%rbx) vmovups 0x190(%rbx), %xmm0 vmovups %xmm0, 0x318(%rbx) movl 0x1a0(%rbx), %eax movl %eax, 0x328(%rbx) movq 0x1a8(%rbx), %rax movq %rax, 0x330(%rbx) cmpb $0x1, (%r14) jne 0x2ed555 leaq 0x168(%rbx), %r14 movq 0x170(%rbx), %rax testq %rax, %rax je 0x2ed529 lock decl (%rax) jne 0x2ed529 movq 0x168(%rbx), %rsi movq 0x188(%rbx), %rdi testq %rdi, %rdi je 0x2ed51c movq (%rdi), %rax callq *0x18(%rax) jmp 0x2ed529 testq %rsi, %rsi je 0x2ed529 movq %rsi, %rdi callq 0x244a0 movq $0x0, 0x1a8(%rbx) vxorps %xmm0, %xmm0, %xmm0 vmovups %xmm0, 0xc(%r14) vmovups %xmm0, (%r14) vmovups %xmm0, 0x190(%rbx) movl $0x0, 0x1a0(%rbx) xorl %eax, %eax addq $0x48, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq testq %rsi, %rsi je 0x2ed4e3 movq %rsi, %rdi callq 0x244a0 jmp 0x2ed4e3 jmp 0x2ed5de jmp 0x2ed5de movq %rax, %rbx movq 0x8(%rsp), %rax testq %rax, %rax je 0x2ed5fb lock decl (%rax) jne 0x2ed5fb movq (%rsp), %rsi movq 0x20(%rsp), %rdi testq %rdi, %rdi je 0x2ed5c7 movq (%rdi), %rax callq *0x18(%rax) jmp 0x2ed5fb jmp 0x2ed5de movq %rax, %rbx movq 0x8(%rsp), %rax testq %rax, %rax je 0x2ed5fb lock decl (%rax) jne 0x2ed5fb movq (%rsp), %rsi movq 0x20(%rsp), %rdi testq %rdi, %rdi jne 0x2ed5d6 testq %rsi, %rsi je 0x2ed5fb movq %rsi, %rdi callq 0x244a0 jmp 0x2ed5fb movq (%rdi), %rax callq *0x18(%rax) jmp 0x2ed5fb movq %rax, %rdi callq 0x2953f jmp 0x2ed5f0 jmp 0x2ed5f0 jmp 0x2ed5f0 jmp 0x2ed5f0 jmp 0x2ed5f0 movq %rax, %rbx movq %rsp, %rdi callq 0x39ed4 movq %rbx, %rdi callq 0x243e0 nop
/Tencent[P]ncnn/build_O3/src/layer/x86/convolutiondepthwise_x86_fma.cpp
ncnn::ConvolutionDepthWise_x86_fma::create_pipeline_int8_x86(ncnn::Option const&)
int ConvolutionDepthWise_x86_fma::create_pipeline_int8_x86(const Option& opt) { const int maxk = kernel_w * kernel_h; int channels = (weight_data_size / group) / maxk / (num_output / group) * group; // depth-wise if (channels == group && group == num_output) { int elempack = 1; #if __SSE2__ if (opt.use_packing_layout) { elempack = channels % 8 == 0 ? 8 : 1; } #endif // __SSE2__ if (elempack == 8) { Mat weight_data_r2 = weight_data.reshape(maxk, group); convert_packing(weight_data_r2, weight_data_tm, 8, opt); } if (elempack == 1) { weight_data_tm = weight_data; } if (opt.lightmode) weight_data.release(); return 0; } // group convolution create_group_ops(opt); if (opt.lightmode) weight_data.release(); return 0; }
pushq %r15 pushq %r14 pushq %rbx subq $0x50, %rsp movq %rsi, %r14 movq %rdi, %rbx movl 0xd8(%rdi), %r8d imull 0xd4(%rdi), %r8d movl 0xd0(%rdi), %esi movl 0x104(%rdi), %eax movl 0x108(%rdi), %ecx cltd idivl %ecx cltd idivl %r8d movl %eax, %edi movl %esi, %eax cltd idivl %ecx movl %eax, %r9d movl %edi, %eax cltd idivl %r9d cmpl %esi, %ecx jne 0x2ed6c8 imull %ecx, %eax cmpl %ecx, %eax jne 0x2ed6c8 testb $0x7, %cl jne 0x2ed6d8 cmpb $0x0, 0x27(%r14) je 0x2ed6d8 leaq 0x168(%rbx), %rsi leaq 0x8(%rsp), %r15 movq %r15, %rdi movl %r8d, %edx xorl %r8d, %r8d callq 0x2a64a leaq 0x2f0(%rbx), %rsi movq %r15, %rdi movl $0x8, %edx movq %r14, %rcx callq 0x2c9a4 movq 0x10(%rsp), %rax testq %rax, %rax je 0x2ed782 lock decl (%rax) jne 0x2ed782 movq 0x8(%rsp), %rsi movq 0x28(%rsp), %rdi testq %rdi, %rdi je 0x2ed800 movq (%rdi), %rax callq *0x18(%rax) jmp 0x2ed782 movq %rbx, %rdi movq %r14, %rsi callq 0x2ed85e jmp 0x2ed782 movq 0x170(%rbx), %rax testq %rax, %rax je 0x2ed6e7 lock incl (%rax) movq 0x2f8(%rbx), %rax testq %rax, %rax je 0x2ed720 lock decl (%rax) jne 0x2ed720 movq 0x2f0(%rbx), %rsi movq 0x310(%rbx), %rdi testq %rdi, %rdi je 0x2ed713 movq (%rdi), %rax callq *0x18(%rax) jmp 0x2ed720 testq %rsi, %rsi je 0x2ed720 movq %rsi, %rdi callq 0x244a0 vmovups 0x168(%rbx), %xmm0 vmovups %xmm0, 0x2f0(%rbx) movq 0x178(%rbx), %rax movq %rax, 0x300(%rbx) movl 0x180(%rbx), %eax movl %eax, 0x308(%rbx) movq 0x188(%rbx), %rax movq %rax, 0x310(%rbx) vmovups 0x190(%rbx), %xmm0 vmovups %xmm0, 0x318(%rbx) movl 0x1a0(%rbx), %eax movl %eax, 0x328(%rbx) movq 0x1a8(%rbx), %rax movq %rax, 0x330(%rbx) cmpb $0x1, (%r14) jne 0x2ed7f4 leaq 0x168(%rbx), %r14 movq 0x170(%rbx), %rax testq %rax, %rax je 0x2ed7c8 lock decl (%rax) jne 0x2ed7c8 movq 0x168(%rbx), %rsi movq 0x188(%rbx), %rdi testq %rdi, %rdi je 0x2ed7bb movq (%rdi), %rax callq *0x18(%rax) jmp 0x2ed7c8 testq %rsi, %rsi je 0x2ed7c8 movq %rsi, %rdi callq 0x244a0 movq $0x0, 0x1a8(%rbx) vxorps %xmm0, %xmm0, %xmm0 vmovups %xmm0, 0xc(%r14) vmovups %xmm0, (%r14) vmovups %xmm0, 0x190(%rbx) movl $0x0, 0x1a0(%rbx) xorl %eax, %eax addq $0x50, %rsp popq %rbx popq %r14 popq %r15 retq testq %rsi, %rsi je 0x2ed782 movq %rsi, %rdi callq 0x244a0 jmp 0x2ed782 jmp 0x2ed856 movq %rax, %rbx movq 0x10(%rsp), %rax testq %rax, %rax je 0x2ed84e lock decl (%rax) jne 0x2ed84e movq 0x8(%rsp), %rsi movq 0x28(%rsp), %rdi testq %rdi, %rdi jne 0x2ed848 testq %rsi, %rsi je 0x2ed84e movq %rsi, %rdi callq 0x244a0 jmp 0x2ed84e movq (%rdi), %rax callq *0x18(%rax) movq %rbx, %rdi callq 0x243e0 movq %rax, %rdi callq 0x2953f
/Tencent[P]ncnn/build_O3/src/layer/x86/convolutiondepthwise_x86_fma.cpp
ncnn::ConvolutionDepthWise_x86_fma::destroy_pipeline(ncnn::Option const&)
int ConvolutionDepthWise_x86_fma::destroy_pipeline(const Option& opt) { if (activation) { activation->destroy_pipeline(opt); delete activation; activation = 0; } for (int i = 0; i < (int)group_ops.size(); i++) { group_ops[i]->destroy_pipeline(opt); delete group_ops[i]; } group_ops.clear(); return 0; }
pushq %r15 pushq %r14 pushq %rbx movq %rsi, %r14 movq %rdi, %rbx movq 0x2d0(%rdi), %rdi testq %rdi, %rdi je 0x2eeb81 movq (%rdi), %rax movq %r14, %rsi callq *0x28(%rax) movq 0x2d0(%rbx), %rdi testq %rdi, %rdi je 0x2eeb76 movq (%rdi), %rax callq *0x8(%rax) movq $0x0, 0x2d0(%rbx) movq 0x2d8(%rbx), %rax movq 0x2e0(%rbx), %rcx movq %rcx, %rdx subq %rax, %rdx shrq $0x3, %rdx testl %edx, %edx jle 0x2eebe6 xorl %r15d, %r15d movq (%rax,%r15,8), %rdi movq (%rdi), %rax movq %r14, %rsi callq *0x28(%rax) movq 0x2d8(%rbx), %rax movq (%rax,%r15,8), %rdi testq %rdi, %rdi je 0x2eebca movq (%rdi), %rax callq *0x8(%rax) movq 0x2d8(%rbx), %rax incq %r15 movq 0x2e0(%rbx), %rcx movq %rcx, %rdx subq %rax, %rdx shrq $0x3, %rdx movslq %edx, %rdx cmpq %rdx, %r15 jl 0x2eeba0 cmpq %rax, %rcx je 0x2eebf2 movq %rax, 0x2e0(%rbx) xorl %eax, %eax popq %rbx popq %r14 popq %r15 retq
/Tencent[P]ncnn/build_O3/src/layer/x86/convolutiondepthwise_x86_fma.cpp
void ncnn::qsort_descent_inplace<ncnn::BBoxRect>(std::vector<ncnn::BBoxRect, std::allocator<ncnn::BBoxRect>>&, std::vector<float, std::allocator<float>>&, int, int)
static void qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores, int left, int right) { int i = left; int j = right; float p = scores[(left + right) / 2]; while (i <= j) { while (scores[i] > p) i++; while (scores[j] < p) j--; if (i <= j) { // swap std::swap(datas[i], datas[j]); std::swap(scores[i], scores[j]); i++; j--; } } if (left < j) qsort_descent_inplace(datas, scores, left, j); if (i < right) qsort_descent_inplace(datas, scores, i, right); }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx subq $0x18, %rsp movl %ecx, %ebx movq %rsi, %r14 movq %rdi, %r15 movq (%rsi), %rax cmpl %ebx, %edx jle 0x3436b5 movl %ebx, %ecx movl %edx, %ebp jmp 0x343772 leal (%rdx,%rbx), %ecx movl %ecx, %esi shrl $0x1f, %esi addl %ecx, %esi sarl %esi movslq %esi, %rcx movss (%rax,%rcx,4), %xmm0 movl %edx, %ebp movl %ebx, %ecx movslq %ebp, %rdi leaq (%rdi,%rdi,4), %rsi addq $-0x5, %rsi decq %rdi incl %ebp addq $0x5, %rsi ucomiss 0x4(%rax,%rdi,4), %xmm0 leaq 0x1(%rdi), %rdi jb 0x3436db movslq %ecx, %r9 leaq (%r9,%r9,4), %r8 addq $0x5, %r8 incq %r9 decl %ecx addq $-0x5, %r8 ucomiss -0x4(%rax,%r9,4), %xmm0 leaq -0x1(%r9), %r9 ja 0x3436fa cmpq %r9, %rdi jle 0x343717 decl %ebp incl %ecx jmp 0x34376a movq (%r15), %rax movl 0x10(%rax,%rsi,4), %r10d movl %r10d, 0x10(%rsp) movups (%rax,%rsi,4), %xmm1 movaps %xmm1, (%rsp) movl 0x10(%rax,%r8,4), %r10d movl %r10d, 0x10(%rax,%rsi,4) movups (%rax,%r8,4), %xmm1 movups %xmm1, (%rax,%rsi,4) movl 0x10(%rsp), %esi movl %esi, 0x10(%rax,%r8,4) movaps (%rsp), %xmm1 movups %xmm1, (%rax,%r8,4) movq (%r14), %rax movss (%rax,%rdi,4), %xmm1 movss (%rax,%r9,4), %xmm2 movss %xmm2, (%rax,%rdi,4) movss %xmm1, (%rax,%r9,4) cmpl %ecx, %ebp jle 0x3436cd cmpl %edx, %ecx jle 0x343784 movq %r15, %rdi movq %r14, %rsi callq 0x343693 movq (%r14), %rax movl %ebp, %edx cmpl %ebx, %ebp jl 0x3436a8 addq $0x18, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq nop
/Tencent[P]ncnn/src/layer/yolodetectionoutput.cpp
ncnn::Quantize::load_model(ncnn::ModelBin const&)
int Quantize::load_model(const ModelBin& mb) { scale_data = mb.load(scale_data_size, 1); if (scale_data.empty()) return -100; return 0; }
pushq %r15 pushq %r14 pushq %rbx subq $0x50, %rsp movq %rdi, %rbx movl 0xd0(%rdi), %edx movq (%rsi), %rax leaq 0x8(%rsp), %r14 movq %r14, %rdi movl $0x1, %ecx callq *0x10(%rax) leaq 0xd8(%rbx), %r15 movq 0x8(%r14), %rax cmpq %r14, %r15 je 0x3439ee testq %rax, %rax je 0x343959 lock incl (%rax) movq 0xe0(%rbx), %rax testq %rax, %rax je 0x343992 lock decl (%rax) jne 0x343992 movq 0xd8(%rbx), %rsi movq 0xf8(%rbx), %rdi testq %rdi, %rdi je 0x343985 movq (%rdi), %rax callq *0x18(%rax) jmp 0x343992 testq %rsi, %rsi je 0x343992 movq %rsi, %rdi callq 0x244a0 movq 0x8(%rsp), %rax movq %rax, 0xd8(%rbx) movq 0x10(%rsp), %rax movq %rax, 0xe0(%rbx) movq 0x18(%rsp), %rcx movq %rcx, 0xe8(%rbx) movl 0x20(%rsp), %ecx movl %ecx, 0xf0(%rbx) movq 0x28(%rsp), %rcx movq %rcx, 0xf8(%rbx) movups 0x30(%rsp), %xmm0 movups %xmm0, 0x100(%rbx) movl 0x40(%rsp), %ecx movl %ecx, 0x110(%rbx) movq 0x48(%rsp), %rcx movq %rcx, 0x118(%rbx) testq %rax, %rax je 0x343a1c lock decl (%rax) jne 0x343a1c movq 0x8(%rsp), %rsi movq 0x28(%rsp), %rdi testq %rdi, %rdi je 0x343a0f movq (%rdi), %rax callq *0x18(%rax) jmp 0x343a1c testq %rsi, %rsi je 0x343a1c movq %rsi, %rdi callq 0x244a0 cmpq $0x0, (%r15) je 0x343a38 movslq 0x110(%rbx), %rcx imulq 0x118(%rbx), %rcx xorl %eax, %eax testq %rcx, %rcx jne 0x343a3d movl $0xffffff9c, %eax # imm = 0xFFFFFF9C addq $0x50, %rsp popq %rbx popq %r14 popq %r15 retq movq %rax, %rbx movq 0x10(%rsp), %rax testq %rax, %rax je 0x343a7d lock decl (%rax) jne 0x343a7d movq 0x8(%rsp), %rsi movq 0x28(%rsp), %rdi testq %rdi, %rdi jne 0x343a77 testq %rsi, %rsi je 0x343a7d movq %rsi, %rdi callq 0x244a0 jmp 0x343a7d movq (%rdi), %rax callq *0x18(%rax) movq %rbx, %rdi callq 0x243e0 jmp 0x343a87 movq %rax, %rdi callq 0x2953f nop
/Tencent[P]ncnn/src/layer/quantize.cpp