name
stringlengths 1
473k
| code
stringlengths 7
647k
| asm
stringlengths 4
3.39M
| file
stringlengths 8
196
|
---|---|---|---|
block_buffer_encode
|
static lzma_ret
block_buffer_encode(lzma_block *block, const lzma_allocator *allocator,
const uint8_t *in, size_t in_size,
uint8_t *out, size_t *out_pos, size_t out_size,
bool try_to_compress)
{
// Validate the arguments.
if (block == NULL || (in == NULL && in_size != 0) || out == NULL
|| out_pos == NULL || *out_pos > out_size)
return LZMA_PROG_ERROR;
// The contents of the structure may depend on the version so
// check the version before validating the contents of *block.
if (block->version > 1)
return LZMA_OPTIONS_ERROR;
if ((unsigned int)(block->check) > LZMA_CHECK_ID_MAX
|| (try_to_compress && block->filters == NULL))
return LZMA_PROG_ERROR;
if (!lzma_check_is_supported(block->check))
return LZMA_UNSUPPORTED_CHECK;
// Size of a Block has to be a multiple of four, so limit the size
// here already. This way we don't need to check it again when adding
// Block Padding.
out_size -= (out_size - *out_pos) & 3;
// Get the size of the Check field.
const size_t check_size = lzma_check_size(block->check);
assert(check_size != UINT32_MAX);
// Reserve space for the Check field.
if (out_size - *out_pos <= check_size)
return LZMA_BUF_ERROR;
out_size -= check_size;
// Initialize block->uncompressed_size and calculate the worst-case
// value for block->compressed_size.
block->uncompressed_size = in_size;
block->compressed_size = lzma2_bound(in_size);
if (block->compressed_size == 0)
return LZMA_DATA_ERROR;
// Do the actual compression.
lzma_ret ret = LZMA_BUF_ERROR;
if (try_to_compress)
ret = block_encode_normal(block, allocator,
in, in_size, out, out_pos, out_size);
if (ret != LZMA_OK) {
// If the error was something else than output buffer
// becoming full, return the error now.
if (ret != LZMA_BUF_ERROR)
return ret;
// The data was uncompressible (at least with the options
// given to us) or the output buffer was too small. Use the
// uncompressed chunks of LZMA2 to wrap the data into a valid
// Block. If we haven't been given enough output space, even
// this may fail.
return_if_error(block_encode_uncompressed(block, in, in_size,
out, out_pos, out_size));
}
assert(*out_pos <= out_size);
// Block Padding. No buffer overflow here, because we already adjusted
// out_size so that (out_size - out_start) is a multiple of four.
// Thus, if the buffer is full, the loop body can never run.
for (size_t i = (size_t)(block->compressed_size); i & 3; ++i) {
assert(*out_pos < out_size);
out[(*out_pos)++] = 0x00;
}
// If there's no Check field, we are done now.
if (check_size > 0) {
// Calculate the integrity check. We reserved space for
// the Check field earlier so we don't need to check for
// available output space here.
lzma_check_state check;
lzma_check_init(&check, block->check);
lzma_check_update(&check, block->check, in, in_size);
lzma_check_finish(&check, block->check);
memcpy(block->raw_check, check.buffer.u8, check_size);
memcpy(out + *out_pos, check.buffer.u8, check_size);
*out_pos += check_size;
}
return LZMA_OK;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0xd0, %rsp
movb 0x18(%rbp), %al
movq 0x10(%rbp), %r10
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movq %rcx, -0x28(%rbp)
movq %r8, -0x30(%rbp)
movq %r9, -0x38(%rbp)
andb $0x1, %al
movb %al, -0x39(%rbp)
cmpq $0x0, -0x10(%rbp)
je 0x9bd96f
cmpq $0x0, -0x20(%rbp)
jne 0x9bd954
cmpq $0x0, -0x28(%rbp)
jne 0x9bd96f
cmpq $0x0, -0x30(%rbp)
je 0x9bd96f
cmpq $0x0, -0x38(%rbp)
je 0x9bd96f
movq -0x38(%rbp), %rax
movq (%rax), %rax
cmpq 0x10(%rbp), %rax
jbe 0x9bd97b
movl $0xb, -0x4(%rbp)
jmp 0x9bdc42
movq -0x10(%rbp), %rax
cmpl $0x1, (%rax)
jbe 0x9bd990
movl $0x8, -0x4(%rbp)
jmp 0x9bdc42
movq -0x10(%rbp), %rax
cmpl $0xf, 0x8(%rax)
ja 0x9bd9ab
testb $0x1, -0x39(%rbp)
je 0x9bd9b7
movq -0x10(%rbp), %rax
cmpq $0x0, 0x20(%rax)
jne 0x9bd9b7
movl $0xb, -0x4(%rbp)
jmp 0x9bdc42
movq -0x10(%rbp), %rax
movl 0x8(%rax), %edi
callq 0x9babc0
cmpb $0x0, %al
jne 0x9bd9d3
movl $0x3, -0x4(%rbp)
jmp 0x9bdc42
movq 0x10(%rbp), %rcx
movq -0x38(%rbp), %rax
subq (%rax), %rcx
andq $0x3, %rcx
movq 0x10(%rbp), %rax
subq %rcx, %rax
movq %rax, 0x10(%rbp)
movq -0x10(%rbp), %rax
movl 0x8(%rax), %edi
callq 0x9babf0
movl %eax, %eax
movq %rax, -0x48(%rbp)
movl $0xffffffff, %eax # imm = 0xFFFFFFFF
cmpq %rax, -0x48(%rbp)
je 0x9bda0c
jmp 0x9bda2b
leaq 0x213c9d(%rip), %rdi # 0xbd16b0
leaq 0x213caf(%rip), %rsi # 0xbd16c9
movl $0xfd, %edx
leaq 0x213d26(%rip), %rcx # 0xbd174c
callq 0x3b440
movq 0x10(%rbp), %rax
movq -0x38(%rbp), %rcx
subq (%rcx), %rax
cmpq -0x48(%rbp), %rax
ja 0x9bda48
movl $0xa, -0x4(%rbp)
jmp 0x9bdc42
movq -0x48(%rbp), %rcx
movq 0x10(%rbp), %rax
subq %rcx, %rax
movq %rax, 0x10(%rbp)
movq -0x28(%rbp), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x18(%rax)
movq -0x28(%rbp), %rdi
callq 0x9bd810
movq %rax, %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x10(%rax)
movq -0x10(%rbp), %rax
cmpq $0x0, 0x10(%rax)
jne 0x9bda8e
movl $0x9, -0x4(%rbp)
jmp 0x9bdc42
movl $0xa, -0x4c(%rbp)
testb $0x1, -0x39(%rbp)
je 0x9bdac3
movq -0x10(%rbp), %rdi
movq -0x18(%rbp), %rsi
movq -0x20(%rbp), %rdx
movq -0x28(%rbp), %rcx
movq -0x30(%rbp), %r8
movq -0x38(%rbp), %r9
movq 0x10(%rbp), %rax
movq %rax, (%rsp)
callq 0x9bdcb0
movl %eax, -0x4c(%rbp)
cmpl $0x0, -0x4c(%rbp)
je 0x9bdb11
cmpl $0xa, -0x4c(%rbp)
je 0x9bdada
movl -0x4c(%rbp), %eax
movl %eax, -0x4(%rbp)
jmp 0x9bdc42
jmp 0x9bdadc
movq -0x10(%rbp), %rdi
movq -0x20(%rbp), %rsi
movq -0x28(%rbp), %rdx
movq -0x30(%rbp), %rcx
movq -0x38(%rbp), %r8
movq 0x10(%rbp), %r9
callq 0x9bdea0
movl %eax, -0x50(%rbp)
cmpl $0x0, -0x50(%rbp)
je 0x9bdb0d
movl -0x50(%rbp), %eax
movl %eax, -0x4(%rbp)
jmp 0x9bdc42
jmp 0x9bdb0f
jmp 0x9bdb11
movq -0x38(%rbp), %rax
movq (%rax), %rax
cmpq 0x10(%rbp), %rax
ja 0x9bdb20
jmp 0x9bdb3f
leaq 0x213ca5(%rip), %rdi # 0xbd17cc
leaq 0x213b9b(%rip), %rsi # 0xbd16c9
movl $0x121, %edx # imm = 0x121
leaq 0x213c12(%rip), %rcx # 0xbd174c
callq 0x3b440
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rax
movq %rax, -0x58(%rbp)
movq -0x58(%rbp), %rax
andq $0x3, %rax
cmpq $0x0, %rax
je 0x9bdbae
movq -0x38(%rbp), %rax
movq (%rax), %rax
cmpq 0x10(%rbp), %rax
jae 0x9bdb68
jmp 0x9bdb87
leaq 0x213c72(%rip), %rdi # 0xbd17e1
leaq 0x213b53(%rip), %rsi # 0xbd16c9
movl $0x127, %edx # imm = 0x127
leaq 0x213bca(%rip), %rcx # 0xbd174c
callq 0x3b440
movq -0x30(%rbp), %rax
movq -0x38(%rbp), %rdx
movq (%rdx), %rcx
movq %rcx, %rsi
addq $0x1, %rsi
movq %rsi, (%rdx)
movb $0x0, (%rax,%rcx)
movq -0x58(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x58(%rbp)
jmp 0x9bdb4b
cmpq $0x0, -0x48(%rbp)
jbe 0x9bdc3b
movq -0x10(%rbp), %rax
movl 0x8(%rax), %esi
leaq -0xc0(%rbp), %rdi
callq 0x9bac20
movq -0x10(%rbp), %rax
movl 0x8(%rax), %esi
movq -0x20(%rbp), %rdx
movq -0x28(%rbp), %rcx
leaq -0xc0(%rbp), %rdi
callq 0x9bac80
movq -0x10(%rbp), %rax
movl 0x8(%rax), %esi
leaq -0xc0(%rbp), %rdi
callq 0x9bad20
movq -0x10(%rbp), %rdi
addq $0x28, %rdi
leaq -0xc0(%rbp), %rsi
movq -0x48(%rbp), %rdx
callq 0x3cb70
movq -0x30(%rbp), %rdi
movq -0x38(%rbp), %rax
addq (%rax), %rdi
leaq -0xc0(%rbp), %rsi
movq -0x48(%rbp), %rdx
callq 0x3cb70
movq -0x48(%rbp), %rcx
movq -0x38(%rbp), %rax
addq (%rax), %rcx
movq %rcx, (%rax)
movl $0x0, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0xd0, %rsp
popq %rbp
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/common/block_buffer_encoder.c
|
lzma_filter_flags_encode
|
extern LZMA_API(lzma_ret)
lzma_filter_flags_encode(const lzma_filter *filter,
uint8_t *out, size_t *out_pos, size_t out_size)
{
// Filter ID
if (filter->id >= LZMA_FILTER_RESERVED_START)
return LZMA_PROG_ERROR;
return_if_error(lzma_vli_encode(filter->id, NULL,
out, out_pos, out_size));
// Size of Properties
uint32_t props_size;
return_if_error(lzma_properties_size(&props_size, filter));
return_if_error(lzma_vli_encode(props_size, NULL,
out, out_pos, out_size));
// Filter Properties
if (out_size - *out_pos < props_size)
return LZMA_PROG_ERROR;
return_if_error(lzma_properties_encode(filter, out + *out_pos));
*out_pos += props_size;
return LZMA_OK;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x40, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movq %rcx, -0x28(%rbp)
movq -0x10(%rbp), %rax
movabsq $0x4000000000000000, %rcx # imm = 0x4000000000000000
cmpq %rcx, (%rax)
jb 0x9c0267
movl $0xb, -0x4(%rbp)
jmp 0x9c034a
jmp 0x9c0269
movq -0x10(%rbp), %rax
movq (%rax), %rdi
movq -0x18(%rbp), %rdx
movq -0x20(%rbp), %rcx
movq -0x28(%rbp), %r8
xorl %eax, %eax
movl %eax, %esi
callq 0x9c3a50
movl %eax, -0x2c(%rbp)
cmpl $0x0, -0x2c(%rbp)
je 0x9c0299
movl -0x2c(%rbp), %eax
movl %eax, -0x4(%rbp)
jmp 0x9c034a
jmp 0x9c029b
jmp 0x9c029d
movq -0x10(%rbp), %rsi
leaq -0x30(%rbp), %rdi
callq 0x9a41c0
movl %eax, -0x34(%rbp)
cmpl $0x0, -0x34(%rbp)
je 0x9c02be
movl -0x34(%rbp), %eax
movl %eax, -0x4(%rbp)
jmp 0x9c034a
jmp 0x9c02c0
jmp 0x9c02c2
movl -0x30(%rbp), %eax
movl %eax, %edi
movq -0x18(%rbp), %rdx
movq -0x20(%rbp), %rcx
movq -0x28(%rbp), %r8
xorl %eax, %eax
movl %eax, %esi
callq 0x9c3a50
movl %eax, -0x38(%rbp)
cmpl $0x0, -0x38(%rbp)
je 0x9c02ed
movl -0x38(%rbp), %eax
movl %eax, -0x4(%rbp)
jmp 0x9c034a
jmp 0x9c02ef
movq -0x28(%rbp), %rax
movq -0x20(%rbp), %rcx
subq (%rcx), %rax
movl -0x30(%rbp), %ecx
cmpq %rcx, %rax
jae 0x9c030b
movl $0xb, -0x4(%rbp)
jmp 0x9c034a
jmp 0x9c030d
movq -0x10(%rbp), %rdi
movq -0x18(%rbp), %rsi
movq -0x20(%rbp), %rax
addq (%rax), %rsi
callq 0x9a4250
movl %eax, -0x3c(%rbp)
cmpl $0x0, -0x3c(%rbp)
je 0x9c0332
movl -0x3c(%rbp), %eax
movl %eax, -0x4(%rbp)
jmp 0x9c034a
jmp 0x9c0334
movl -0x30(%rbp), %eax
movl %eax, %ecx
movq -0x20(%rbp), %rax
addq (%rax), %rcx
movq %rcx, (%rax)
movl $0x0, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x40, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/common/filter_flags_encoder.c
|
lzma_index_init
|
extern LZMA_API(lzma_index *)
lzma_index_init(const lzma_allocator *allocator)
{
lzma_index *i = index_init_plain(allocator);
if (i == NULL)
return NULL;
index_stream *s = index_stream_init(0, 0, 1, 0, allocator);
if (s == NULL) {
lzma_free(i, allocator);
return NULL;
}
index_tree_append(&i->streams, &s->node);
return i;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movq %rdi, -0x10(%rbp)
movq -0x10(%rbp), %rdi
callq 0x9c03f0
movq %rax, -0x18(%rbp)
cmpq $0x0, -0x18(%rbp)
jne 0x9c038a
movq $0x0, -0x8(%rbp)
jmp 0x9c03d9
movq -0x10(%rbp), %r8
xorl %eax, %eax
movl %eax, %ecx
movl $0x1, %edx
movq %rcx, %rdi
movq %rcx, %rsi
callq 0x9c0470
movq %rax, -0x20(%rbp)
cmpq $0x0, -0x20(%rbp)
jne 0x9c03c4
movq -0x18(%rbp), %rdi
movq -0x10(%rbp), %rsi
callq 0x9a3140
movq $0x0, -0x8(%rbp)
jmp 0x9c03d9
movq -0x18(%rbp), %rdi
movq -0x20(%rbp), %rsi
callq 0x9c0560
movq -0x18(%rbp), %rax
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x20, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/common/index.c
|
index_init_plain
|
static lzma_index *
index_init_plain(const lzma_allocator *allocator)
{
lzma_index *i = lzma_alloc(sizeof(lzma_index), allocator);
if (i != NULL) {
index_tree_init(&i->streams);
i->uncompressed_size = 0;
i->total_size = 0;
i->record_count = 0;
i->index_list_size = 0;
i->prealloc = INDEX_GROUP_SIZE;
i->checks = 0;
}
return i;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x10, %rsp
movq %rdi, -0x8(%rbp)
movq -0x8(%rbp), %rsi
movl $0x50, %edi
callq 0x9a3040
movq %rax, -0x10(%rbp)
cmpq $0x0, -0x10(%rbp)
je 0x9c0465
movq -0x10(%rbp), %rdi
callq 0x9c2360
movq -0x10(%rbp), %rax
movq $0x0, 0x20(%rax)
movq -0x10(%rbp), %rax
movq $0x0, 0x28(%rax)
movq -0x10(%rbp), %rax
movq $0x0, 0x30(%rax)
movq -0x10(%rbp), %rax
movq $0x0, 0x38(%rax)
movq -0x10(%rbp), %rax
movq $0x200, 0x40(%rax) # imm = 0x200
movq -0x10(%rbp), %rax
movl $0x0, 0x48(%rax)
movq -0x10(%rbp), %rax
addq $0x10, %rsp
popq %rbp
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/common/index.c
|
lzma_index_iter_next
|
extern LZMA_API(lzma_bool)
lzma_index_iter_next(lzma_index_iter *iter, lzma_index_iter_mode mode)
{
// Catch unsupported mode values.
if ((unsigned int)(mode) > LZMA_INDEX_ITER_NONEMPTY_BLOCK)
return true;
const lzma_index *i = iter->internal[ITER_INDEX].p;
const index_stream *stream = iter->internal[ITER_STREAM].p;
const index_group *group = NULL;
size_t record = iter->internal[ITER_RECORD].s;
// If we are being asked for the next Stream, leave group to NULL
// so that the rest of the this function thinks that this Stream
// has no groups and will thus go to the next Stream.
if (mode != LZMA_INDEX_ITER_STREAM) {
// Get the pointer to the current group. See iter_set_inf()
// for explanation.
switch (iter->internal[ITER_METHOD].s) {
case ITER_METHOD_NORMAL:
group = iter->internal[ITER_GROUP].p;
break;
case ITER_METHOD_NEXT:
group = index_tree_next(iter->internal[ITER_GROUP].p);
break;
case ITER_METHOD_LEFTMOST:
group = (const index_group *)(
stream->groups.leftmost);
break;
}
}
again:
if (stream == NULL) {
// We at the beginning of the lzma_index.
// Locate the first Stream.
stream = (const index_stream *)(i->streams.leftmost);
if (mode >= LZMA_INDEX_ITER_BLOCK) {
// Since we are being asked to return information
// about the first a Block, skip Streams that have
// no Blocks.
while (stream->groups.leftmost == NULL) {
stream = index_tree_next(&stream->node);
if (stream == NULL)
return true;
}
}
// Start from the first Record in the Stream.
group = (const index_group *)(stream->groups.leftmost);
record = 0;
} else if (group != NULL && record < group->last) {
// The next Record is in the same group.
++record;
} else {
// This group has no more Records or this Stream has
// no Blocks at all.
record = 0;
// If group is not NULL, this Stream has at least one Block
// and thus at least one group. Find the next group.
if (group != NULL)
group = index_tree_next(&group->node);
if (group == NULL) {
// This Stream has no more Records. Find the next
// Stream. If we are being asked to return information
// about a Block, we skip empty Streams.
do {
stream = index_tree_next(&stream->node);
if (stream == NULL)
return true;
} while (mode >= LZMA_INDEX_ITER_BLOCK
&& stream->groups.leftmost == NULL);
group = (const index_group *)(
stream->groups.leftmost);
}
}
if (mode == LZMA_INDEX_ITER_NONEMPTY_BLOCK) {
// We need to look for the next Block again if this Block
// is empty.
if (record == 0) {
if (group->node.uncompressed_base
== group->records[0].uncompressed_sum)
goto again;
} else if (group->records[record - 1].uncompressed_sum
== group->records[record].uncompressed_sum) {
goto again;
}
}
iter->internal[ITER_STREAM].p = stream;
iter->internal[ITER_GROUP].p = group;
iter->internal[ITER_RECORD].s = record;
iter_set_info(iter);
return false;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x50, %rsp
movq %rdi, -0x10(%rbp)
movl %esi, -0x14(%rbp)
cmpl $0x3, -0x14(%rbp)
jbe 0x9c1a4e
movb $0x1, -0x1(%rbp)
jmp 0x9c1c80
movq -0x10(%rbp), %rax
movq 0x100(%rax), %rax
movq %rax, -0x20(%rbp)
movq -0x10(%rbp), %rax
movq 0x108(%rax), %rax
movq %rax, -0x28(%rbp)
movq $0x0, -0x30(%rbp)
movq -0x10(%rbp), %rax
movq 0x118(%rax), %rax
movq %rax, -0x38(%rbp)
cmpl $0x1, -0x14(%rbp)
je 0x9c1aec
movq -0x10(%rbp), %rax
movq 0x120(%rax), %rax
movq %rax, -0x40(%rbp)
testq %rax, %rax
je 0x9c1ab7
jmp 0x9c1a9f
movq -0x40(%rbp), %rax
subq $0x1, %rax
je 0x9c1ac8
jmp 0x9c1aab
movq -0x40(%rbp), %rax
subq $0x2, %rax
je 0x9c1ade
jmp 0x9c1aea
movq -0x10(%rbp), %rax
movq 0x110(%rax), %rax
movq %rax, -0x30(%rbp)
jmp 0x9c1aea
movq -0x10(%rbp), %rax
movq 0x110(%rax), %rdi
callq 0x9c1910
movq %rax, -0x30(%rbp)
jmp 0x9c1aea
movq -0x28(%rbp), %rax
movq 0x40(%rax), %rax
movq %rax, -0x30(%rbp)
jmp 0x9c1aec
jmp 0x9c1aee
cmpq $0x0, -0x28(%rbp)
jne 0x9c1b4e
movq -0x20(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, -0x28(%rbp)
cmpl $0x2, -0x14(%rbp)
jb 0x9c1b35
jmp 0x9c1b09
movq -0x28(%rbp), %rax
cmpq $0x0, 0x40(%rax)
jne 0x9c1b33
movq -0x28(%rbp), %rdi
callq 0x9c1910
movq %rax, -0x28(%rbp)
cmpq $0x0, -0x28(%rbp)
jne 0x9c1b31
movb $0x1, -0x1(%rbp)
jmp 0x9c1c80
jmp 0x9c1b09
jmp 0x9c1b35
movq -0x28(%rbp), %rax
movq 0x40(%rax), %rax
movq %rax, -0x30(%rbp)
movq $0x0, -0x38(%rbp)
jmp 0x9c1be6
cmpq $0x0, -0x30(%rbp)
je 0x9c1b71
movq -0x38(%rbp), %rax
movq -0x30(%rbp), %rcx
cmpq 0x38(%rcx), %rax
jae 0x9c1b71
movq -0x38(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x38(%rbp)
jmp 0x9c1be4
movq $0x0, -0x38(%rbp)
cmpq $0x0, -0x30(%rbp)
je 0x9c1b8d
movq -0x30(%rbp), %rdi
callq 0x9c1910
movq %rax, -0x30(%rbp)
cmpq $0x0, -0x30(%rbp)
jne 0x9c1be2
jmp 0x9c1b96
movq -0x28(%rbp), %rdi
callq 0x9c1910
movq %rax, -0x28(%rbp)
cmpq $0x0, -0x28(%rbp)
jne 0x9c1bb3
movb $0x1, -0x1(%rbp)
jmp 0x9c1c80
jmp 0x9c1bb5
xorl %eax, %eax
cmpl $0x2, -0x14(%rbp)
movb %al, -0x41(%rbp)
jb 0x9c1bcf
movq -0x28(%rbp), %rax
cmpq $0x0, 0x40(%rax)
sete %al
movb %al, -0x41(%rbp)
movb -0x41(%rbp), %al
testb $0x1, %al
jne 0x9c1b96
movq -0x28(%rbp), %rax
movq 0x40(%rax), %rax
movq %rax, -0x30(%rbp)
jmp 0x9c1be4
jmp 0x9c1be6
cmpl $0x3, -0x14(%rbp)
jne 0x9c1c46
cmpq $0x0, -0x38(%rbp)
jne 0x9c1c0b
movq -0x30(%rbp), %rax
movq (%rax), %rax
movq -0x30(%rbp), %rcx
cmpq 0x40(%rcx), %rax
jne 0x9c1c09
jmp 0x9c1aee
jmp 0x9c1c44
movq -0x30(%rbp), %rax
addq $0x40, %rax
movq -0x38(%rbp), %rcx
subq $0x1, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movq (%rax), %rax
movq -0x30(%rbp), %rcx
addq $0x40, %rcx
movq -0x38(%rbp), %rdx
shlq $0x4, %rdx
addq %rdx, %rcx
cmpq (%rcx), %rax
jne 0x9c1c42
jmp 0x9c1aee
jmp 0x9c1c44
jmp 0x9c1c46
movq -0x28(%rbp), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x108(%rax)
movq -0x30(%rbp), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x110(%rax)
movq -0x38(%rbp), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x118(%rax)
movq -0x10(%rbp), %rdi
callq 0x9c1c90
movb $0x0, -0x1(%rbp)
movb -0x1(%rbp), %al
addq $0x50, %rsp
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/common/index.c
|
lzma_index_iter_locate
|
extern LZMA_API(lzma_bool)
lzma_index_iter_locate(lzma_index_iter *iter, lzma_vli target)
{
const lzma_index *i = iter->internal[ITER_INDEX].p;
// If the target is past the end of the file, return immediately.
if (i->uncompressed_size <= target)
return true;
// Locate the Stream containing the target offset.
const index_stream *stream = index_tree_locate(&i->streams, target);
assert(stream != NULL);
target -= stream->node.uncompressed_base;
// Locate the group containing the target offset.
const index_group *group = index_tree_locate(&stream->groups, target);
assert(group != NULL);
// Use binary search to locate the exact Record. It is the first
// Record whose uncompressed_sum is greater than target.
// This is because we want the rightmost Record that fullfills the
// search criterion. It is possible that there are empty Blocks;
// we don't want to return them.
size_t left = 0;
size_t right = group->last;
while (left < right) {
const size_t pos = left + (right - left) / 2;
if (group->records[pos].uncompressed_sum <= target)
left = pos + 1;
else
right = pos;
}
iter->internal[ITER_STREAM].p = stream;
iter->internal[ITER_GROUP].p = group;
iter->internal[ITER_RECORD].s = left;
iter_set_info(iter);
return false;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x50, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq -0x10(%rbp), %rax
movq 0x100(%rax), %rax
movq %rax, -0x20(%rbp)
movq -0x20(%rbp), %rax
movq 0x20(%rax), %rax
cmpq -0x18(%rbp), %rax
ja 0x9c2176
movb $0x1, -0x1(%rbp)
jmp 0x9c22a0
movq -0x20(%rbp), %rdi
movq -0x18(%rbp), %rsi
callq 0x9c22b0
movq %rax, -0x28(%rbp)
cmpq $0x0, -0x28(%rbp)
je 0x9c2190
jmp 0x9c21af
leaq 0x21a72c(%rip), %rdi # 0xbdc8c3
leaq 0x20fbb4(%rip), %rsi # 0xbd1d52
movl $0x4c4, %edx # imm = 0x4C4
leaq 0x20fceb(%rip), %rcx # 0xbd1e95
callq 0x3b440
movq -0x28(%rbp), %rax
movq (%rax), %rcx
movq -0x18(%rbp), %rax
subq %rcx, %rax
movq %rax, -0x18(%rbp)
movq -0x28(%rbp), %rdi
addq $0x38, %rdi
movq -0x18(%rbp), %rsi
callq 0x9c22b0
movq %rax, -0x30(%rbp)
cmpq $0x0, -0x30(%rbp)
je 0x9c21df
jmp 0x9c21fe
leaq 0x20fced(%rip), %rdi # 0xbd1ed3
leaq 0x20fb65(%rip), %rsi # 0xbd1d52
movl $0x4c9, %edx # imm = 0x4C9
leaq 0x20fc9c(%rip), %rcx # 0xbd1e95
callq 0x3b440
movq $0x0, -0x38(%rbp)
movq -0x30(%rbp), %rax
movq 0x38(%rax), %rax
movq %rax, -0x40(%rbp)
movq -0x38(%rbp), %rax
cmpq -0x40(%rbp), %rax
jae 0x9c2266
movq -0x38(%rbp), %rax
movq -0x40(%rbp), %rcx
subq -0x38(%rbp), %rcx
shrq %rcx
addq %rcx, %rax
movq %rax, -0x48(%rbp)
movq -0x30(%rbp), %rax
addq $0x40, %rax
movq -0x48(%rbp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movq (%rax), %rax
cmpq -0x18(%rbp), %rax
ja 0x9c225c
movq -0x48(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x38(%rbp)
jmp 0x9c2264
movq -0x48(%rbp), %rax
movq %rax, -0x40(%rbp)
jmp 0x9c2212
movq -0x28(%rbp), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x108(%rax)
movq -0x30(%rbp), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x110(%rax)
movq -0x38(%rbp), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x118(%rax)
movq -0x10(%rbp), %rdi
callq 0x9c1c90
movb $0x0, -0x1(%rbp)
movb -0x1(%rbp), %al
addq $0x50, %rsp
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/common/index.c
|
lzma_index_encoder_init
|
extern lzma_ret
lzma_index_encoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
const lzma_index *i)
{
lzma_next_coder_init(&lzma_index_encoder_init, next, allocator);
if (i == NULL)
return LZMA_PROG_ERROR;
if (next->coder == NULL) {
next->coder = lzma_alloc(sizeof(lzma_index_coder), allocator);
if (next->coder == NULL)
return LZMA_MEM_ERROR;
next->code = &index_encode;
next->end = &index_encoder_end;
}
index_encoder_reset(next->coder, i);
return LZMA_OK;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movq -0x10(%rbp), %rcx
leaq -0x1f(%rip), %rax # 0x9c2440
cmpq 0x10(%rcx), %rax
je 0x9c2472
movq -0x10(%rbp), %rdi
movq -0x18(%rbp), %rsi
callq 0x9a32d0
movq -0x10(%rbp), %rax
leaq -0x3d(%rip), %rcx # 0x9c2440
movq %rcx, 0x10(%rax)
cmpq $0x0, -0x20(%rbp)
jne 0x9c2491
movl $0xb, -0x4(%rbp)
jmp 0x9c24fb
movq -0x10(%rbp), %rax
cmpq $0x0, (%rax)
jne 0x9c24e4
movq -0x18(%rbp), %rsi
movl $0x150, %edi # imm = 0x150
callq 0x9a3040
movq %rax, %rcx
movq -0x10(%rbp), %rax
movq %rcx, (%rax)
movq -0x10(%rbp), %rax
cmpq $0x0, (%rax)
jne 0x9c24c6
movl $0x5, -0x4(%rbp)
jmp 0x9c24fb
movq -0x10(%rbp), %rax
leaq 0x3f(%rip), %rcx # 0x9c2510
movq %rcx, 0x18(%rax)
movq -0x10(%rbp), %rax
leaq 0x3a0(%rip), %rcx # 0x9c2880
movq %rcx, 0x20(%rax)
movq -0x10(%rbp), %rax
movq (%rax), %rdi
movq -0x20(%rbp), %rsi
callq 0x9c28b0
movl $0x0, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x20, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/common/index_encoder.c
|
lzma_index_hash_decode
|
extern LZMA_API(lzma_ret)
lzma_index_hash_decode(lzma_index_hash *index_hash, const uint8_t *in,
size_t *in_pos, size_t in_size)
{
// Catch zero input buffer here, because in contrast to Index encoder
// and decoder functions, applications call this function directly
// instead of via lzma_code(), which does the buffer checking.
if (*in_pos >= in_size)
return LZMA_BUF_ERROR;
// NOTE: This function has many similarities to index_encode() and
// index_decode() functions found from index_encoder.c and
// index_decoder.c. See the comments especially in index_encoder.c.
const size_t in_start = *in_pos;
lzma_ret ret = LZMA_OK;
while (*in_pos < in_size)
switch (index_hash->sequence) {
case SEQ_BLOCK:
// Check the Index Indicator is present.
if (in[(*in_pos)++] != 0x00)
return LZMA_DATA_ERROR;
index_hash->sequence = SEQ_COUNT;
break;
case SEQ_COUNT: {
ret = lzma_vli_decode(&index_hash->remaining,
&index_hash->pos, in, in_pos, in_size);
if (ret != LZMA_STREAM_END)
goto out;
// The count must match the count of the Blocks decoded.
if (index_hash->remaining != index_hash->blocks.count)
return LZMA_DATA_ERROR;
ret = LZMA_OK;
index_hash->pos = 0;
// Handle the special case when there are no Blocks.
index_hash->sequence = index_hash->remaining == 0
? SEQ_PADDING_INIT : SEQ_UNPADDED;
break;
}
case SEQ_UNPADDED:
case SEQ_UNCOMPRESSED: {
lzma_vli *size = index_hash->sequence == SEQ_UNPADDED
? &index_hash->unpadded_size
: &index_hash->uncompressed_size;
ret = lzma_vli_decode(size, &index_hash->pos,
in, in_pos, in_size);
if (ret != LZMA_STREAM_END)
goto out;
ret = LZMA_OK;
index_hash->pos = 0;
if (index_hash->sequence == SEQ_UNPADDED) {
if (index_hash->unpadded_size < UNPADDED_SIZE_MIN
|| index_hash->unpadded_size
> UNPADDED_SIZE_MAX)
return LZMA_DATA_ERROR;
index_hash->sequence = SEQ_UNCOMPRESSED;
} else {
// Update the hash.
return_if_error(hash_append(&index_hash->records,
index_hash->unpadded_size,
index_hash->uncompressed_size));
// Verify that we don't go over the known sizes. Note
// that this validation is simpler than the one used
// in lzma_index_hash_append(), because here we know
// that values in index_hash->blocks are already
// validated and we are fine as long as we don't
// exceed them in index_hash->records.
if (index_hash->blocks.blocks_size
< index_hash->records.blocks_size
|| index_hash->blocks.uncompressed_size
< index_hash->records.uncompressed_size
|| index_hash->blocks.index_list_size
< index_hash->records.index_list_size)
return LZMA_DATA_ERROR;
// Check if this was the last Record.
index_hash->sequence = --index_hash->remaining == 0
? SEQ_PADDING_INIT : SEQ_UNPADDED;
}
break;
}
case SEQ_PADDING_INIT:
index_hash->pos = (LZMA_VLI_C(4) - index_size_unpadded(
index_hash->records.count,
index_hash->records.index_list_size)) & 3;
index_hash->sequence = SEQ_PADDING;
// Fall through
case SEQ_PADDING:
if (index_hash->pos > 0) {
--index_hash->pos;
if (in[(*in_pos)++] != 0x00)
return LZMA_DATA_ERROR;
break;
}
// Compare the sizes.
if (index_hash->blocks.blocks_size
!= index_hash->records.blocks_size
|| index_hash->blocks.uncompressed_size
!= index_hash->records.uncompressed_size
|| index_hash->blocks.index_list_size
!= index_hash->records.index_list_size)
return LZMA_DATA_ERROR;
// Finish the hashes and compare them.
lzma_check_finish(&index_hash->blocks.check, LZMA_CHECK_BEST);
lzma_check_finish(&index_hash->records.check, LZMA_CHECK_BEST);
if (memcmp(index_hash->blocks.check.buffer.u8,
index_hash->records.check.buffer.u8,
lzma_check_size(LZMA_CHECK_BEST)) != 0)
return LZMA_DATA_ERROR;
// Finish the CRC32 calculation.
index_hash->crc32 = lzma_crc32(in + in_start,
*in_pos - in_start, index_hash->crc32);
index_hash->sequence = SEQ_CRC32;
// Fall through
case SEQ_CRC32:
do {
if (*in_pos == in_size)
return LZMA_OK;
if (((index_hash->crc32 >> (index_hash->pos * 8))
& 0xFF) != in[(*in_pos)++])
return LZMA_DATA_ERROR;
} while (++index_hash->pos < 4);
return LZMA_STREAM_END;
default:
assert(0);
return LZMA_PROG_ERROR;
}
out:
// Update the CRC32,
index_hash->crc32 = lzma_crc32(in + in_start,
*in_pos - in_start, index_hash->crc32);
return ret;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x70, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movq %rcx, -0x28(%rbp)
movq -0x20(%rbp), %rax
movq (%rax), %rax
cmpq -0x28(%rbp), %rax
jb 0x9c2ee1
movl $0xa, -0x4(%rbp)
jmp 0x9c33e3
movq -0x20(%rbp), %rax
movq (%rax), %rax
movq %rax, -0x30(%rbp)
movl $0x0, -0x34(%rbp)
movq -0x20(%rbp), %rax
movq (%rax), %rax
cmpq -0x28(%rbp), %rax
jae 0x9c33ad
movq -0x10(%rbp), %rax
movl (%rax), %eax
movq %rax, -0x50(%rbp)
subq $0x6, %rax
ja 0x9c3389
movq -0x50(%rbp), %rax
leaq 0x20f449(%rip), %rcx # 0xbd236c
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movq -0x18(%rbp), %rax
movq -0x20(%rbp), %rdx
movq (%rdx), %rcx
movq %rcx, %rsi
addq $0x1, %rsi
movq %rsi, (%rdx)
movzbl (%rax,%rcx), %eax
cmpl $0x0, %eax
je 0x9c2f56
movl $0x9, -0x4(%rbp)
jmp 0x9c33e3
movq -0x10(%rbp), %rax
movl $0x1, (%rax)
jmp 0x9c33a8
movq -0x10(%rbp), %rdi
addq $0x118, %rdi # imm = 0x118
movq -0x10(%rbp), %rsi
addq $0x130, %rsi # imm = 0x130
movq -0x18(%rbp), %rdx
movq -0x20(%rbp), %rcx
movq -0x28(%rbp), %r8
callq 0x9c38c0
movl %eax, -0x34(%rbp)
cmpl $0x1, -0x34(%rbp)
je 0x9c2f9a
jmp 0x9c33af
movq -0x10(%rbp), %rax
movq 0x118(%rax), %rax
movq -0x10(%rbp), %rcx
cmpq 0x18(%rcx), %rax
je 0x9c2fbb
movl $0x9, -0x4(%rbp)
jmp 0x9c33e3
movl $0x0, -0x34(%rbp)
movq -0x10(%rbp), %rax
movq $0x0, 0x130(%rax)
movq -0x10(%rbp), %rax
movq 0x118(%rax), %rdx
movl $0x2, %ecx
movl $0x4, %eax
cmpq $0x0, %rdx
cmovel %eax, %ecx
movq -0x10(%rbp), %rax
movl %ecx, (%rax)
jmp 0x9c33a8
movq -0x10(%rbp), %rax
cmpl $0x2, (%rax)
jne 0x9c3011
movq -0x10(%rbp), %rax
addq $0x120, %rax # imm = 0x120
movq %rax, -0x58(%rbp)
jmp 0x9c301f
movq -0x10(%rbp), %rax
addq $0x128, %rax # imm = 0x128
movq %rax, -0x58(%rbp)
movq -0x58(%rbp), %rax
movq %rax, -0x40(%rbp)
movq -0x40(%rbp), %rdi
movq -0x10(%rbp), %rsi
addq $0x130, %rsi # imm = 0x130
movq -0x18(%rbp), %rdx
movq -0x20(%rbp), %rcx
movq -0x28(%rbp), %r8
callq 0x9c38c0
movl %eax, -0x34(%rbp)
cmpl $0x1, -0x34(%rbp)
je 0x9c3055
jmp 0x9c33af
movl $0x0, -0x34(%rbp)
movq -0x10(%rbp), %rax
movq $0x0, 0x130(%rax)
movq -0x10(%rbp), %rax
cmpl $0x2, (%rax)
jne 0x9c30b4
movq -0x10(%rbp), %rax
cmpq $0x5, 0x120(%rax)
jb 0x9c3099
movq -0x10(%rbp), %rax
movabsq $0x7ffffffffffffffc, %rcx # imm = 0x7FFFFFFFFFFFFFFC
cmpq %rcx, 0x120(%rax)
jbe 0x9c30a5
movl $0x9, -0x4(%rbp)
jmp 0x9c33e3
movq -0x10(%rbp), %rax
movl $0x3, (%rax)
jmp 0x9c316a
jmp 0x9c30b6
movq -0x10(%rbp), %rdi
addq $0x90, %rdi
movq -0x10(%rbp), %rax
movq 0x120(%rax), %rsi
movq -0x10(%rbp), %rax
movq 0x128(%rax), %rdx
callq 0x9c2db0
movl %eax, -0x44(%rbp)
cmpl $0x0, -0x44(%rbp)
je 0x9c30f0
movl -0x44(%rbp), %eax
movl %eax, -0x4(%rbp)
jmp 0x9c33e3
jmp 0x9c30f2
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x10(%rbp), %rcx
cmpq 0x90(%rcx), %rax
jb 0x9c3131
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rax
movq -0x10(%rbp), %rcx
cmpq 0x98(%rcx), %rax
jb 0x9c3131
movq -0x10(%rbp), %rax
movq 0x20(%rax), %rax
movq -0x10(%rbp), %rcx
cmpq 0xa8(%rcx), %rax
jae 0x9c313d
movl $0x9, -0x4(%rbp)
jmp 0x9c33e3
movq -0x10(%rbp), %rax
movq 0x118(%rax), %rdx
addq $-0x1, %rdx
movq %rdx, 0x118(%rax)
movl $0x2, %ecx
movl $0x4, %eax
cmpq $0x0, %rdx
cmovel %eax, %ecx
movq -0x10(%rbp), %rax
movl %ecx, (%rax)
jmp 0x9c33a8
movq -0x10(%rbp), %rax
movq 0xa0(%rax), %rdi
movq -0x10(%rbp), %rax
movq 0xa8(%rax), %rsi
callq 0x9c33f0
movl $0x4, %ecx
subq %rax, %rcx
andq $0x3, %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x130(%rax)
movq -0x10(%rbp), %rax
movl $0x5, (%rax)
movq -0x10(%rbp), %rax
cmpq $0x0, 0x130(%rax)
jbe 0x9c31fe
movq -0x10(%rbp), %rax
movq 0x130(%rax), %rcx
addq $-0x1, %rcx
movq %rcx, 0x130(%rax)
movq -0x18(%rbp), %rax
movq -0x20(%rbp), %rdx
movq (%rdx), %rcx
movq %rcx, %rsi
addq $0x1, %rsi
movq %rsi, (%rdx)
movzbl (%rax,%rcx), %eax
cmpl $0x0, %eax
je 0x9c31f9
movl $0x9, -0x4(%rbp)
jmp 0x9c33e3
jmp 0x9c33a8
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x10(%rbp), %rcx
cmpq 0x90(%rcx), %rax
jne 0x9c323d
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rax
movq -0x10(%rbp), %rcx
cmpq 0x98(%rcx), %rax
jne 0x9c323d
movq -0x10(%rbp), %rax
movq 0x20(%rax), %rax
movq -0x10(%rbp), %rcx
cmpq 0xa8(%rcx), %rax
je 0x9c3249
movl $0x9, -0x4(%rbp)
jmp 0x9c33e3
movq -0x10(%rbp), %rdi
addq $0x8, %rdi
addq $0x20, %rdi
movl $0xa, %esi
callq 0x9bad20
movq -0x10(%rbp), %rdi
addq $0x90, %rdi
addq $0x20, %rdi
movl $0xa, %esi
callq 0x9bad20
movq -0x10(%rbp), %rax
addq $0x8, %rax
addq $0x20, %rax
movq %rax, -0x68(%rbp)
movq -0x10(%rbp), %rax
addq $0x90, %rax
addq $0x20, %rax
movq %rax, -0x60(%rbp)
movl $0xa, %edi
callq 0x9babf0
movq -0x68(%rbp), %rdi
movq -0x60(%rbp), %rsi
movl %eax, %eax
movl %eax, %edx
callq 0x3c770
cmpl $0x0, %eax
je 0x9c32c6
movl $0x9, -0x4(%rbp)
jmp 0x9c33e3
movq -0x18(%rbp), %rdi
addq -0x30(%rbp), %rdi
movq -0x20(%rbp), %rax
movq (%rax), %rsi
subq -0x30(%rbp), %rsi
movq -0x10(%rbp), %rax
movl 0x138(%rax), %edx
callq 0x9a2300
movl %eax, %ecx
movq -0x10(%rbp), %rax
movl %ecx, 0x138(%rax)
movq -0x10(%rbp), %rax
movl $0x6, (%rax)
jmp 0x9c3300
movq -0x20(%rbp), %rax
movq (%rax), %rax
cmpq -0x28(%rbp), %rax
jne 0x9c3319
movl $0x0, -0x4(%rbp)
jmp 0x9c33e3
movq -0x10(%rbp), %rax
movl 0x138(%rax), %eax
movq -0x10(%rbp), %rcx
movq 0x130(%rcx), %rcx
shlq $0x3, %rcx
shrl %cl, %eax
andl $0xff, %eax
movq -0x18(%rbp), %rcx
movq -0x20(%rbp), %rsi
movq (%rsi), %rdx
movq %rdx, %rdi
addq $0x1, %rdi
movq %rdi, (%rsi)
movzbl (%rcx,%rdx), %ecx
cmpl %ecx, %eax
je 0x9c3362
movl $0x9, -0x4(%rbp)
jmp 0x9c33e3
jmp 0x9c3364
movq -0x10(%rbp), %rcx
movq 0x130(%rcx), %rax
addq $0x1, %rax
movq %rax, 0x130(%rcx)
cmpq $0x4, %rax
jb 0x9c3300
movl $0x1, -0x4(%rbp)
jmp 0x9c33e3
leaq 0x1af30a(%rip), %rdi # 0xb7269a
leaq 0x20eff1(%rip), %rsi # 0xbd2388
movl $0x144, %edx # imm = 0x144
leaq 0x20f05e(%rip), %rcx # 0xbd2401
callq 0x3b440
jmp 0x9c2ef3
jmp 0x9c33af
movq -0x18(%rbp), %rdi
addq -0x30(%rbp), %rdi
movq -0x20(%rbp), %rax
movq (%rax), %rsi
subq -0x30(%rbp), %rsi
movq -0x10(%rbp), %rax
movl 0x138(%rax), %edx
callq 0x9a2300
movl %eax, %ecx
movq -0x10(%rbp), %rax
movl %ecx, 0x138(%rax)
movl -0x34(%rbp), %eax
movl %eax, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x70, %rsp
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/common/index_hash.c
|
lzma_vli_decode
|
extern LZMA_API(lzma_ret)
lzma_vli_decode(lzma_vli *restrict vli, size_t *vli_pos,
const uint8_t *restrict in, size_t *restrict in_pos,
size_t in_size)
{
// If we haven't been given vli_pos, work in single-call mode.
size_t vli_pos_internal = 0;
if (vli_pos == NULL) {
vli_pos = &vli_pos_internal;
*vli = 0;
// If there's no input, use LZMA_DATA_ERROR. This way it is
// easy to decode VLIs from buffers that have known size,
// and get the correct error code in case the buffer is
// too short.
if (*in_pos >= in_size)
return LZMA_DATA_ERROR;
} else {
// Initialize *vli when starting to decode a new integer.
if (*vli_pos == 0)
*vli = 0;
// Validate the arguments.
if (*vli_pos >= LZMA_VLI_BYTES_MAX
|| (*vli >> (*vli_pos * 7)) != 0)
return LZMA_PROG_ERROR;;
if (*in_pos >= in_size)
return LZMA_BUF_ERROR;
}
do {
// Read the next byte. Use a temporary variable so that we
// can update *in_pos immediately.
const uint8_t byte = in[*in_pos];
++*in_pos;
// Add the newly read byte to *vli.
*vli += (lzma_vli)(byte & 0x7F) << (*vli_pos * 7);
++*vli_pos;
// Check if this is the last byte of a multibyte integer.
if ((byte & 0x80) == 0) {
// We don't allow using variable-length integers as
// padding i.e. the encoding must use the most the
// compact form.
if (byte == 0x00 && *vli_pos > 1)
return LZMA_DATA_ERROR;
return vli_pos == &vli_pos_internal
? LZMA_OK : LZMA_STREAM_END;
}
// There is at least one more byte coming. If we have already
// read maximum number of bytes, the integer is considered
// corrupt.
//
// If we need bigger integers in future, old versions liblzma
// will confusingly indicate the file being corrupt instead of
// unsupported. I suppose it's still better this way, because
// in the foreseeable future (writing this in 2008) the only
// reason why files would appear having over 63-bit integers
// is that the files are simply corrupt.
if (*vli_pos == LZMA_VLI_BYTES_MAX)
return LZMA_DATA_ERROR;
} while (*in_pos < in_size);
return vli_pos == &vli_pos_internal ? LZMA_DATA_ERROR : LZMA_OK;
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movq %rcx, -0x28(%rbp)
movq %r8, -0x30(%rbp)
movq $0x0, -0x38(%rbp)
cmpq $0x0, -0x18(%rbp)
jne 0x9c3915
leaq -0x38(%rbp), %rax
movq %rax, -0x18(%rbp)
movq -0x10(%rbp), %rax
movq $0x0, (%rax)
movq -0x28(%rbp), %rax
movq (%rax), %rax
cmpq -0x30(%rbp), %rax
jb 0x9c3913
movl $0x9, -0x4(%rbp)
jmp 0x9c3a45
jmp 0x9c3973
movq -0x18(%rbp), %rax
cmpq $0x0, (%rax)
jne 0x9c392a
movq -0x10(%rbp), %rax
movq $0x0, (%rax)
movq -0x18(%rbp), %rax
cmpq $0x9, (%rax)
jae 0x9c394c
movq -0x10(%rbp), %rax
movq (%rax), %rax
movq -0x18(%rbp), %rcx
imulq $0x7, (%rcx), %rcx
shrq %cl, %rax
cmpq $0x0, %rax
je 0x9c3958
movl $0xb, -0x4(%rbp)
jmp 0x9c3a45
movq -0x28(%rbp), %rax
movq (%rax), %rax
cmpq -0x30(%rbp), %rax
jb 0x9c3971
movl $0xa, -0x4(%rbp)
jmp 0x9c3a45
jmp 0x9c3973
jmp 0x9c3975
movq -0x20(%rbp), %rax
movq -0x28(%rbp), %rcx
movq (%rcx), %rcx
movb (%rax,%rcx), %al
movb %al, -0x39(%rbp)
movq -0x28(%rbp), %rax
movq (%rax), %rcx
addq $0x1, %rcx
movq %rcx, (%rax)
movzbl -0x39(%rbp), %eax
andl $0x7f, %eax
cltq
movq -0x18(%rbp), %rcx
imulq $0x7, (%rcx), %rcx
shlq %cl, %rax
movq %rax, %rcx
movq -0x10(%rbp), %rax
addq (%rax), %rcx
movq %rcx, (%rax)
movq -0x18(%rbp), %rax
movq (%rax), %rcx
addq $0x1, %rcx
movq %rcx, (%rax)
movzbl -0x39(%rbp), %eax
andl $0x80, %eax
cmpl $0x0, %eax
jne 0x9c3a07
movzbl -0x39(%rbp), %eax
cmpl $0x0, %eax
jne 0x9c39ed
movq -0x18(%rbp), %rax
cmpq $0x1, (%rax)
jbe 0x9c39ed
movl $0x9, -0x4(%rbp)
jmp 0x9c3a45
movq -0x18(%rbp), %rdx
leaq -0x38(%rbp), %rsi
movl $0x1, %eax
xorl %ecx, %ecx
cmpq %rsi, %rdx
cmovel %ecx, %eax
movl %eax, -0x4(%rbp)
jmp 0x9c3a45
movq -0x18(%rbp), %rax
cmpq $0x9, (%rax)
jne 0x9c3a1a
movl $0x9, -0x4(%rbp)
jmp 0x9c3a45
jmp 0x9c3a1c
movq -0x28(%rbp), %rax
movq (%rax), %rax
cmpq -0x30(%rbp), %rax
jb 0x9c3975
movq -0x18(%rbp), %rdx
leaq -0x38(%rbp), %rsi
xorl %eax, %eax
movl $0x9, %ecx
cmpq %rsi, %rdx
cmovel %ecx, %eax
movl %eax, -0x4(%rbp)
movl -0x4(%rbp), %eax
popq %rbp
retq
nopw (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/common/vli_decoder.c
|
lz_encode
|
static lzma_ret
lz_encode(void *coder_ptr, const lzma_allocator *allocator,
const uint8_t *restrict in, size_t *restrict in_pos,
size_t in_size,
uint8_t *restrict out, size_t *restrict out_pos,
size_t out_size, lzma_action action)
{
lzma_coder *coder = coder_ptr;
while (*out_pos < out_size
&& (*in_pos < in_size || action != LZMA_RUN)) {
// Read more data to coder->mf.buffer if needed.
if (coder->mf.action == LZMA_RUN && coder->mf.read_pos
>= coder->mf.read_limit)
return_if_error(fill_window(coder, allocator,
in, in_pos, in_size, action));
// Encode
const lzma_ret ret = coder->lz.code(coder->lz.coder,
&coder->mf, out, out_pos, out_size);
if (ret != LZMA_OK) {
// Setting this to LZMA_RUN for cases when we are
// flushing. It doesn't matter when finishing or if
// an error occurred.
coder->mf.action = LZMA_RUN;
return ret;
}
}
return LZMA_OK;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x50, %rsp
movl 0x20(%rbp), %eax
movq 0x18(%rbp), %rax
movq 0x10(%rbp), %rax
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movq %rcx, -0x28(%rbp)
movq %r8, -0x30(%rbp)
movq %r9, -0x38(%rbp)
movq -0x10(%rbp), %rax
movq %rax, -0x40(%rbp)
movq 0x10(%rbp), %rax
movq (%rax), %rcx
xorl %eax, %eax
cmpq 0x18(%rbp), %rcx
movb %al, -0x49(%rbp)
jae 0x9c4337
movq -0x28(%rbp), %rax
movq (%rax), %rcx
movb $0x1, %al
cmpq -0x30(%rbp), %rcx
movb %al, -0x4a(%rbp)
jb 0x9c4331
cmpl $0x0, 0x20(%rbp)
setne %al
movb %al, -0x4a(%rbp)
movb -0x4a(%rbp), %al
movb %al, -0x49(%rbp)
movb -0x49(%rbp), %al
testb $0x1, %al
jne 0x9c4343
jmp 0x9c43dd
movq -0x40(%rbp), %rax
cmpl $0x0, 0x88(%rax)
jne 0x9c4394
movq -0x40(%rbp), %rax
movl 0x38(%rax), %eax
movq -0x40(%rbp), %rcx
cmpl 0x40(%rcx), %eax
jb 0x9c4394
jmp 0x9c4362
movq -0x40(%rbp), %rdi
movq -0x18(%rbp), %rsi
movq -0x20(%rbp), %rdx
movq -0x28(%rbp), %rcx
movq -0x30(%rbp), %r8
movl 0x20(%rbp), %r9d
callq 0x9c47d0
movl %eax, -0x44(%rbp)
cmpl $0x0, -0x44(%rbp)
je 0x9c4390
movl -0x44(%rbp), %eax
movl %eax, -0x4(%rbp)
jmp 0x9c43e4
jmp 0x9c4392
jmp 0x9c4394
movq -0x40(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x40(%rbp), %rcx
movq (%rcx), %rdi
movq -0x40(%rbp), %rsi
addq $0x20, %rsi
movq -0x38(%rbp), %rdx
movq 0x10(%rbp), %rcx
movq 0x18(%rbp), %r8
callq *%rax
movl %eax, -0x48(%rbp)
cmpl $0x0, -0x48(%rbp)
je 0x9c43d8
movq -0x40(%rbp), %rax
movl $0x0, 0x88(%rax)
movl -0x48(%rbp), %eax
movl %eax, -0x4(%rbp)
jmp 0x9c43e4
jmp 0x9c4303
movl $0x0, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x50, %rsp
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/lz/lz_encoder.c
|
lzma_mf_is_supported
|
extern LZMA_API(lzma_bool)
lzma_mf_is_supported(lzma_match_finder mf)
{
bool ret = false;
#ifdef HAVE_MF_HC3
if (mf == LZMA_MF_HC3)
ret = true;
#endif
#ifdef HAVE_MF_HC4
if (mf == LZMA_MF_HC4)
ret = true;
#endif
#ifdef HAVE_MF_BT2
if (mf == LZMA_MF_BT2)
ret = true;
#endif
#ifdef HAVE_MF_BT3
if (mf == LZMA_MF_BT3)
ret = true;
#endif
#ifdef HAVE_MF_BT4
if (mf == LZMA_MF_BT4)
ret = true;
#endif
return ret;
}
|
pushq %rbp
movq %rsp, %rbp
movl %edi, -0x4(%rbp)
movb $0x0, -0x5(%rbp)
cmpl $0x3, -0x4(%rbp)
jne 0x9c4795
movb $0x1, -0x5(%rbp)
cmpl $0x4, -0x4(%rbp)
jne 0x9c479f
movb $0x1, -0x5(%rbp)
cmpl $0x12, -0x4(%rbp)
jne 0x9c47a9
movb $0x1, -0x5(%rbp)
cmpl $0x13, -0x4(%rbp)
jne 0x9c47b3
movb $0x1, -0x5(%rbp)
cmpl $0x14, -0x4(%rbp)
jne 0x9c47bd
movb $0x1, -0x5(%rbp)
movb -0x5(%rbp), %al
andb $0x1, %al
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/lz/lz_encoder.c
|
move_window
|
static void
move_window(lzma_mf *mf)
{
// Align the move to a multiple of 16 bytes. Some LZ-based encoders
// like LZMA use the lowest bits of mf->read_pos to know the
// alignment of the uncompressed data. We also get better speed
// for memmove() with aligned buffers.
assert(mf->read_pos > mf->keep_size_before);
const uint32_t move_offset
= (mf->read_pos - mf->keep_size_before) & ~UINT32_C(15);
assert(mf->write_pos > move_offset);
const size_t move_size = mf->write_pos - move_offset;
assert(move_offset + move_size <= mf->size);
memmove(mf->buffer, mf->buffer + move_offset, move_size);
mf->offset += move_offset;
mf->read_pos -= move_offset;
mf->read_limit -= move_offset;
mf->write_pos -= move_offset;
return;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movq %rdi, -0x8(%rbp)
movq -0x8(%rbp), %rax
movl 0x18(%rax), %eax
movq -0x8(%rbp), %rcx
cmpl 0xc(%rcx), %eax
jbe 0x9c4a5e
jmp 0x9c4a7d
leaq 0x20ddcf(%rip), %rdi # 0xbd2834
leaq 0x20dcca(%rip), %rsi # 0xbd2736
movl $0x34, %edx
leaq 0x20dde0(%rip), %rcx # 0xbd2858
callq 0x3b440
movq -0x8(%rbp), %rax
movl 0x18(%rax), %eax
movq -0x8(%rbp), %rcx
subl 0xc(%rcx), %eax
andl $-0x10, %eax
movl %eax, -0xc(%rbp)
movq -0x8(%rbp), %rax
movl 0x24(%rax), %eax
cmpl -0xc(%rbp), %eax
jbe 0x9c4a9f
jmp 0x9c4abe
leaq 0x20ddce(%rip), %rdi # 0xbd2874
leaq 0x20dc89(%rip), %rsi # 0xbd2736
movl $0x38, %edx
leaq 0x20dd9f(%rip), %rcx # 0xbd2858
callq 0x3b440
movq -0x8(%rbp), %rax
movl 0x24(%rax), %eax
subl -0xc(%rbp), %eax
movl %eax, %eax
movq %rax, -0x18(%rbp)
movl -0xc(%rbp), %eax
addq -0x18(%rbp), %rax
movq -0x8(%rbp), %rcx
movl 0x8(%rcx), %ecx
cmpq %rcx, %rax
ja 0x9c4ae3
jmp 0x9c4b02
leaq 0x20dda6(%rip), %rdi # 0xbd2890
leaq 0x20dc45(%rip), %rsi # 0xbd2736
movl $0x3b, %edx
leaq 0x20dd5b(%rip), %rcx # 0xbd2858
callq 0x3b440
movq -0x8(%rbp), %rax
movq (%rax), %rdi
movq -0x8(%rbp), %rax
movq (%rax), %rsi
movl -0xc(%rbp), %eax
addq %rax, %rsi
movq -0x18(%rbp), %rdx
callq 0x3b370
movl -0xc(%rbp), %ecx
movq -0x8(%rbp), %rax
addl 0x14(%rax), %ecx
movl %ecx, 0x14(%rax)
movl -0xc(%rbp), %edx
movq -0x8(%rbp), %rax
movl 0x18(%rax), %ecx
subl %edx, %ecx
movl %ecx, 0x18(%rax)
movl -0xc(%rbp), %edx
movq -0x8(%rbp), %rax
movl 0x20(%rax), %ecx
subl %edx, %ecx
movl %ecx, 0x20(%rax)
movl -0xc(%rbp), %edx
movq -0x8(%rbp), %rax
movl 0x24(%rax), %ecx
subl %edx, %ecx
movl %ecx, 0x24(%rax)
addq $0x20, %rsp
popq %rbp
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/lz/lz_encoder.c
|
lzma_mf_hc4_find
|
extern uint32_t
lzma_mf_hc4_find(lzma_mf *mf, lzma_match *matches)
{
header_find(false, 4);
hash_4_calc();
uint32_t delta2 = pos - mf->hash[hash_2_value];
const uint32_t delta3
= pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value];
const uint32_t cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
mf->hash[hash_2_value ] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
mf->hash[FIX_4_HASH_SIZE + hash_value] = pos;
uint32_t len_best = 1;
if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
len_best = 2;
matches[0].len = 2;
matches[0].dist = delta2 - 1;
matches_count = 1;
}
if (delta2 != delta3 && delta3 < mf->cyclic_size
&& *(cur - delta3) == *cur) {
len_best = 3;
matches[matches_count++].dist = delta3 - 1;
delta2 = delta3;
}
if (matches_count != 0) {
len_best = lzma_memcmplen(cur - delta2, cur,
len_best, len_limit);
matches[matches_count - 1].len = len_best;
if (len_best == len_limit) {
hc_skip();
return matches_count;
}
}
if (len_best < 3)
len_best = 3;
hc_find(len_best);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %rbx
subq $0xa8, %rsp
movq %rdi, -0x40(%rbp)
movq %rsi, -0x48(%rbp)
movq -0x40(%rbp), %rdi
callq 0x9c4e60
movl %eax, -0x4c(%rbp)
movq -0x40(%rbp), %rax
movl 0x60(%rax), %eax
cmpl -0x4c(%rbp), %eax
ja 0x9c5678
movq -0x40(%rbp), %rax
movl 0x60(%rax), %eax
movl %eax, -0x4c(%rbp)
jmp 0x9c56c0
cmpl $0x4, -0x4c(%rbp)
jae 0x9c56be
movq -0x40(%rbp), %rax
cmpl $0x0, 0x68(%rax)
je 0x9c568a
jmp 0x9c56a9
leaq 0x20d362(%rip), %rdi # 0xbd29f3
leaq 0x20d23b(%rip), %rsi # 0xbd28d3
movl $0x170, %edx # imm = 0x170
leaq 0x20d399(%rip), %rcx # 0xbd2a3d
callq 0x3b440
movq -0x40(%rbp), %rdi
callq 0x9c51e0
movl $0x0, -0x34(%rbp)
jmp 0x9c5a7d
jmp 0x9c56c0
movq -0x40(%rbp), %rdi
callq 0x9c4e40
movq %rax, -0x58(%rbp)
movq -0x40(%rbp), %rax
movl 0x18(%rax), %eax
movq -0x40(%rbp), %rcx
addl 0x14(%rcx), %eax
movl %eax, -0x5c(%rbp)
movl $0x0, -0x60(%rbp)
movq -0x58(%rbp), %rax
movzbl (%rax), %eax
movl %eax, %ecx
leaq 0x205abb(%rip), %rax # 0xbcb1b0
movl (%rax,%rcx,4), %eax
movq -0x58(%rbp), %rcx
movzbl 0x1(%rcx), %ecx
xorl %ecx, %eax
movl %eax, -0x64(%rbp)
movl -0x64(%rbp), %eax
andl $0x3ff, %eax # imm = 0x3FF
movl %eax, -0x68(%rbp)
movl -0x64(%rbp), %eax
movq -0x58(%rbp), %rcx
movzbl 0x2(%rcx), %ecx
shll $0x8, %ecx
xorl %ecx, %eax
andl $0xffff, %eax # imm = 0xFFFF
movl %eax, -0x6c(%rbp)
movl -0x64(%rbp), %eax
movq -0x58(%rbp), %rcx
movzbl 0x2(%rcx), %ecx
shll $0x8, %ecx
xorl %ecx, %eax
movq -0x58(%rbp), %rcx
movzbl 0x3(%rcx), %ecx
movl %ecx, %edx
leaq 0x205a67(%rip), %rcx # 0xbcb1b0
movl (%rcx,%rdx,4), %ecx
shll $0x5, %ecx
xorl %ecx, %eax
movq -0x40(%rbp), %rcx
andl 0x58(%rcx), %eax
movl %eax, -0x70(%rbp)
movl -0x5c(%rbp), %eax
movq -0x40(%rbp), %rcx
movq 0x40(%rcx), %rcx
movl -0x68(%rbp), %edx
subl (%rcx,%rdx,4), %eax
movl %eax, -0x74(%rbp)
movl -0x5c(%rbp), %eax
movq -0x40(%rbp), %rcx
movq 0x40(%rcx), %rcx
movl -0x6c(%rbp), %edx
addl $0x400, %edx # imm = 0x400
movl %edx, %edx
subl (%rcx,%rdx,4), %eax
movl %eax, -0x78(%rbp)
movq -0x40(%rbp), %rax
movq 0x40(%rax), %rax
movl -0x70(%rbp), %ecx
addl $0x10400, %ecx # imm = 0x10400
movl %ecx, %ecx
movl (%rax,%rcx,4), %eax
movl %eax, -0x7c(%rbp)
movl -0x5c(%rbp), %edx
movq -0x40(%rbp), %rax
movq 0x40(%rax), %rax
movl -0x68(%rbp), %ecx
movl %edx, (%rax,%rcx,4)
movl -0x5c(%rbp), %edx
movq -0x40(%rbp), %rax
movq 0x40(%rax), %rax
movl -0x6c(%rbp), %ecx
addl $0x400, %ecx # imm = 0x400
movl %ecx, %ecx
movl %edx, (%rax,%rcx,4)
movl -0x5c(%rbp), %edx
movq -0x40(%rbp), %rax
movq 0x40(%rax), %rax
movl -0x70(%rbp), %ecx
addl $0x10400, %ecx # imm = 0x10400
movl %ecx, %ecx
movl %edx, (%rax,%rcx,4)
movl $0x1, -0x80(%rbp)
movl -0x74(%rbp), %eax
movq -0x40(%rbp), %rcx
cmpl 0x54(%rcx), %eax
jae 0x9c583c
movq -0x58(%rbp), %rax
movl -0x74(%rbp), %ecx
movl %ecx, %edx
xorl %ecx, %ecx
subq %rdx, %rcx
movzbl (%rax,%rcx), %eax
movq -0x58(%rbp), %rcx
movzbl (%rcx), %ecx
cmpl %ecx, %eax
jne 0x9c583c
movl $0x2, -0x80(%rbp)
movq -0x48(%rbp), %rax
movl $0x2, (%rax)
movl -0x74(%rbp), %ecx
subl $0x1, %ecx
movq -0x48(%rbp), %rax
movl %ecx, 0x4(%rax)
movl $0x1, -0x60(%rbp)
movl -0x74(%rbp), %eax
cmpl -0x78(%rbp), %eax
je 0x9c5895
movl -0x78(%rbp), %eax
movq -0x40(%rbp), %rcx
cmpl 0x54(%rcx), %eax
jae 0x9c5895
movq -0x58(%rbp), %rax
movl -0x78(%rbp), %ecx
movl %ecx, %edx
xorl %ecx, %ecx
subq %rdx, %rcx
movzbl (%rax,%rcx), %eax
movq -0x58(%rbp), %rcx
movzbl (%rcx), %ecx
cmpl %ecx, %eax
jne 0x9c5895
movl $0x3, -0x80(%rbp)
movl -0x78(%rbp), %edx
subl $0x1, %edx
movq -0x48(%rbp), %rax
movl -0x60(%rbp), %ecx
movl %ecx, %esi
addl $0x1, %esi
movl %esi, -0x60(%rbp)
movl %ecx, %ecx
movl %edx, 0x4(%rax,%rcx,8)
movl -0x78(%rbp), %eax
movl %eax, -0x74(%rbp)
cmpl $0x0, -0x60(%rbp)
je 0x9c59fe
movq -0x58(%rbp), %rsi
movl -0x74(%rbp), %eax
movl %eax, %ecx
xorl %eax, %eax
subq %rcx, %rax
addq %rax, %rsi
movq -0x58(%rbp), %rdx
movl -0x80(%rbp), %ecx
movl -0x4c(%rbp), %eax
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movl %ecx, -0x24(%rbp)
movl %eax, -0x28(%rbp)
movl -0x24(%rbp), %eax
cmpl -0x28(%rbp), %eax
ja 0x9c58db
cmpl $0x7fffffff, -0x28(%rbp) # imm = 0x7FFFFFFF
jbe 0x9c58fa
jmp 0x9c58fc
leaq 0x209571(%rip), %rdi # 0xbcee53
leaq 0x209577(%rip), %rsi # 0xbcee60
movl $0x2e, %edx
leaq 0x2095e3(%rip), %rcx # 0xbceed8
callq 0x3b440
jmp 0x9c591b
leaq 0x209623(%rip), %rdi # 0xbcef26
leaq 0x209556(%rip), %rsi # 0xbcee60
movl $0x2f, %edx
leaq 0x2095c2(%rip), %rcx # 0xbceed8
callq 0x3b440
movl -0x24(%rbp), %eax
cmpl -0x28(%rbp), %eax
jae 0x9c59ab
movq -0x18(%rbp), %rdi
movl -0x24(%rbp), %eax
addq %rax, %rdi
callq 0x9c7060
movq %rax, -0x88(%rbp)
movq -0x20(%rbp), %rdi
movl -0x24(%rbp), %eax
addq %rax, %rdi
callq 0x9c7060
movq %rax, %rcx
movq -0x88(%rbp), %rax
subq %rcx, %rax
movq %rax, -0x30(%rbp)
cmpq $0x0, -0x30(%rbp)
je 0x9c599d
movq -0x30(%rbp), %rax
tzcntq %rax, %rax
shrl $0x3, %eax
addl -0x24(%rbp), %eax
movl %eax, -0x24(%rbp)
movl -0x24(%rbp), %eax
cmpl -0x28(%rbp), %eax
jae 0x9c5989
movl -0x24(%rbp), %eax
movl %eax, -0x8c(%rbp)
jmp 0x9c5992
movl -0x28(%rbp), %eax
movl %eax, -0x8c(%rbp)
movl -0x8c(%rbp), %eax
movl %eax, -0xc(%rbp)
jmp 0x9c59b1
movl -0x24(%rbp), %eax
addl $0x8, %eax
movl %eax, -0x24(%rbp)
jmp 0x9c591b
movl -0x28(%rbp), %eax
movl %eax, -0xc(%rbp)
movl -0xc(%rbp), %eax
movl %eax, -0x80(%rbp)
movl -0x80(%rbp), %edx
movq -0x48(%rbp), %rax
movl -0x60(%rbp), %ecx
subl $0x1, %ecx
movl %ecx, %ecx
movl %edx, (%rax,%rcx,8)
movl -0x80(%rbp), %eax
cmpl -0x4c(%rbp), %eax
jne 0x9c59fc
jmp 0x9c59d3
movl -0x7c(%rbp), %edx
movq -0x40(%rbp), %rax
movq 0x48(%rax), %rax
movq -0x40(%rbp), %rcx
movl 0x50(%rcx), %ecx
movl %edx, (%rax,%rcx,4)
movq -0x40(%rbp), %rdi
callq 0x9c5240
movl -0x60(%rbp), %eax
movl %eax, -0x34(%rbp)
jmp 0x9c5a7d
jmp 0x9c59fe
cmpl $0x3, -0x80(%rbp)
jae 0x9c5a0b
movl $0x3, -0x80(%rbp)
jmp 0x9c5a0d
movl -0x4c(%rbp), %edi
movl -0x5c(%rbp), %esi
movq -0x58(%rbp), %rdx
movl -0x7c(%rbp), %ecx
movq -0x40(%rbp), %rax
movl 0x5c(%rax), %r8d
movq -0x40(%rbp), %rax
movq 0x48(%rax), %r9
movq -0x40(%rbp), %rax
movl 0x50(%rax), %ebx
movq -0x40(%rbp), %rax
movl 0x54(%rax), %r11d
movq -0x48(%rbp), %r10
movl -0x60(%rbp), %eax
shlq $0x3, %rax
addq %rax, %r10
movl -0x80(%rbp), %eax
movl %ebx, (%rsp)
movl %r11d, 0x8(%rsp)
movq %r10, 0x10(%rsp)
movl %eax, 0x18(%rsp)
callq 0x9c52e0
movq -0x48(%rbp), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, -0x60(%rbp)
movq -0x40(%rbp), %rdi
callq 0x9c5240
movl -0x60(%rbp), %eax
movl %eax, -0x34(%rbp)
movl -0x34(%rbp), %eax
addq $0xa8, %rsp
popq %rbx
popq %rbp
retq
nopw (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/lz/lz_encoder_mf.c
|
lzma_mf_bt2_find
|
extern uint32_t
lzma_mf_bt2_find(lzma_mf *mf, lzma_match *matches)
{
header_find(true, 2);
hash_2_calc();
const uint32_t cur_match = mf->hash[hash_value];
mf->hash[hash_value] = pos;
bt_find(1);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %rbx
subq $0x58, %rsp
movq %rdi, -0x18(%rbp)
movq %rsi, -0x20(%rbp)
movq -0x18(%rbp), %rdi
callq 0x9c4e60
movl %eax, -0x24(%rbp)
movq -0x18(%rbp), %rax
movl 0x60(%rax), %eax
cmpl -0x24(%rbp), %eax
ja 0x9c5c25
movq -0x18(%rbp), %rax
movl 0x60(%rax), %eax
movl %eax, -0x24(%rbp)
jmp 0x9c5c77
cmpl $0x2, -0x24(%rbp)
jb 0x9c5c35
movq -0x18(%rbp), %rax
cmpl $0x1, 0x68(%rax)
jne 0x9c5c75
movq -0x18(%rbp), %rax
cmpl $0x0, 0x68(%rax)
je 0x9c5c41
jmp 0x9c5c60
leaq 0x20cdab(%rip), %rdi # 0xbd29f3
leaq 0x20cc84(%rip), %rsi # 0xbd28d3
movl $0x24d, %edx # imm = 0x24D
leaq 0x20ce15(%rip), %rcx # 0xbd2a70
callq 0x3b440
movq -0x18(%rbp), %rdi
callq 0x9c51e0
movl $0x0, -0xc(%rbp)
jmp 0x9c5d40
jmp 0x9c5c77
movq -0x18(%rbp), %rdi
callq 0x9c4e40
movq %rax, -0x30(%rbp)
movq -0x18(%rbp), %rax
movl 0x18(%rax), %eax
movq -0x18(%rbp), %rcx
addl 0x14(%rcx), %eax
movl %eax, -0x34(%rbp)
movl $0x0, -0x38(%rbp)
movq -0x30(%rbp), %rdi
callq 0x9c5d50
movzwl %ax, %eax
movl %eax, -0x3c(%rbp)
movq -0x18(%rbp), %rax
movq 0x40(%rax), %rax
movl -0x3c(%rbp), %ecx
movl (%rax,%rcx,4), %eax
movl %eax, -0x40(%rbp)
movl -0x34(%rbp), %edx
movq -0x18(%rbp), %rax
movq 0x40(%rax), %rax
movl -0x3c(%rbp), %ecx
movl %edx, (%rax,%rcx,4)
movl -0x24(%rbp), %edi
movl -0x34(%rbp), %esi
movq -0x30(%rbp), %rdx
movl -0x40(%rbp), %ecx
movq -0x18(%rbp), %rax
movl 0x5c(%rax), %r8d
movq -0x18(%rbp), %rax
movq 0x48(%rax), %r9
movq -0x18(%rbp), %rax
movl 0x50(%rax), %r11d
movq -0x18(%rbp), %rax
movl 0x54(%rax), %r10d
movq -0x20(%rbp), %rax
movl -0x38(%rbp), %ebx
shlq $0x3, %rbx
addq %rbx, %rax
movl %r11d, (%rsp)
movl %r10d, 0x8(%rsp)
movq %rax, 0x10(%rsp)
movl $0x1, 0x18(%rsp)
callq 0x9c5d70
movq -0x20(%rbp), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, -0x38(%rbp)
movq -0x18(%rbp), %rdi
callq 0x9c5240
movl -0x38(%rbp), %eax
movl %eax, -0xc(%rbp)
movl -0xc(%rbp), %eax
addq $0x58, %rsp
popq %rbx
popq %rbp
retq
nopw (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/lz/lz_encoder_mf.c
|
lzma_mf_bt4_skip
|
extern void
lzma_mf_bt4_skip(lzma_mf *mf, uint32_t amount)
{
do {
header_skip(true, 4);
hash_4_calc();
const uint32_t cur_match
= mf->hash[FIX_4_HASH_SIZE + hash_value];
mf->hash[hash_2_value] = pos;
mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
mf->hash[FIX_4_HASH_SIZE + hash_value] = pos;
bt_skip();
} while (--amount != 0);
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x40, %rsp
movq %rdi, -0x8(%rbp)
movl %esi, -0xc(%rbp)
movq -0x8(%rbp), %rdi
callq 0x9c4e60
movl %eax, -0x10(%rbp)
movq -0x8(%rbp), %rax
movl 0x60(%rax), %eax
cmpl -0x10(%rbp), %eax
ja 0x9c6ec3
movq -0x8(%rbp), %rax
movl 0x60(%rax), %eax
movl %eax, -0x10(%rbp)
jmp 0x9c6f0e
cmpl $0x4, -0x10(%rbp)
jb 0x9c6ed3
movq -0x8(%rbp), %rax
cmpl $0x1, 0x68(%rax)
jne 0x9c6f0c
movq -0x8(%rbp), %rax
cmpl $0x0, 0x68(%rax)
je 0x9c6edf
jmp 0x9c6efe
leaq 0x20bb0d(%rip), %rdi # 0xbd29f3
leaq 0x20b9e6(%rip), %rsi # 0xbd28d3
movl $0x2d9, %edx # imm = 0x2D9
leaq 0x20bc66(%rip), %rcx # 0xbd2b5f
callq 0x3b440
movq -0x8(%rbp), %rdi
callq 0x9c51e0
jmp 0x9c7042
jmp 0x9c6f0e
movq -0x8(%rbp), %rdi
callq 0x9c4e40
movq %rax, -0x18(%rbp)
movq -0x8(%rbp), %rax
movl 0x18(%rax), %eax
movq -0x8(%rbp), %rcx
addl 0x14(%rcx), %eax
movl %eax, -0x1c(%rbp)
movq -0x18(%rbp), %rax
movzbl (%rax), %eax
movl %eax, %ecx
leaq 0x204274(%rip), %rax # 0xbcb1b0
movl (%rax,%rcx,4), %eax
movq -0x18(%rbp), %rcx
movzbl 0x1(%rcx), %ecx
xorl %ecx, %eax
movl %eax, -0x20(%rbp)
movl -0x20(%rbp), %eax
andl $0x3ff, %eax # imm = 0x3FF
movl %eax, -0x24(%rbp)
movl -0x20(%rbp), %eax
movq -0x18(%rbp), %rcx
movzbl 0x2(%rcx), %ecx
shll $0x8, %ecx
xorl %ecx, %eax
andl $0xffff, %eax # imm = 0xFFFF
movl %eax, -0x28(%rbp)
movl -0x20(%rbp), %eax
movq -0x18(%rbp), %rcx
movzbl 0x2(%rcx), %ecx
shll $0x8, %ecx
xorl %ecx, %eax
movq -0x18(%rbp), %rcx
movzbl 0x3(%rcx), %ecx
movl %ecx, %edx
leaq 0x204220(%rip), %rcx # 0xbcb1b0
movl (%rcx,%rdx,4), %ecx
shll $0x5, %ecx
xorl %ecx, %eax
movq -0x8(%rbp), %rcx
andl 0x58(%rcx), %eax
movl %eax, -0x2c(%rbp)
movq -0x8(%rbp), %rax
movq 0x40(%rax), %rax
movl -0x2c(%rbp), %ecx
addl $0x10400, %ecx # imm = 0x10400
movl %ecx, %ecx
movl (%rax,%rcx,4), %eax
movl %eax, -0x30(%rbp)
movl -0x1c(%rbp), %edx
movq -0x8(%rbp), %rax
movq 0x40(%rax), %rax
movl -0x24(%rbp), %ecx
movl %edx, (%rax,%rcx,4)
movl -0x1c(%rbp), %edx
movq -0x8(%rbp), %rax
movq 0x40(%rax), %rax
movl -0x28(%rbp), %ecx
addl $0x400, %ecx # imm = 0x400
movl %ecx, %ecx
movl %edx, (%rax,%rcx,4)
movl -0x1c(%rbp), %edx
movq -0x8(%rbp), %rax
movq 0x40(%rax), %rax
movl -0x2c(%rbp), %ecx
addl $0x10400, %ecx # imm = 0x10400
movl %ecx, %ecx
movl %edx, (%rax,%rcx,4)
movl -0x10(%rbp), %edi
movl -0x1c(%rbp), %esi
movq -0x18(%rbp), %rdx
movl -0x30(%rbp), %ecx
movq -0x8(%rbp), %rax
movl 0x5c(%rax), %r8d
movq -0x8(%rbp), %rax
movq 0x48(%rax), %r9
movq -0x8(%rbp), %rax
movl 0x50(%rax), %r10d
movq -0x8(%rbp), %rax
movl 0x54(%rax), %eax
movl %r10d, (%rsp)
movl %eax, 0x8(%rsp)
callq 0x9c6210
movq -0x8(%rbp), %rdi
callq 0x9c5240
jmp 0x9c7042
movl -0xc(%rbp), %eax
addl $-0x1, %eax
movl %eax, -0xc(%rbp)
cmpl $0x0, %eax
jne 0x9c6e9f
addq $0x40, %rsp
popq %rbp
retq
nopw (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/lz/lz_encoder_mf.c
|
normalize
|
static void
normalize(lzma_mf *mf)
{
assert(mf->read_pos + mf->offset == MUST_NORMALIZE_POS);
// In future we may not want to touch the lowest bits, because there
// may be match finders that use larger resolution than one byte.
const uint32_t subvalue
= (MUST_NORMALIZE_POS - mf->cyclic_size);
// & ~((UINT32_C(1) << 10) - 1);
for (uint32_t i = 0; i < mf->hash_count; ++i) {
// If the distance is greater than the dictionary size,
// we can simply mark the hash element as empty.
if (mf->hash[i] <= subvalue)
mf->hash[i] = EMPTY_HASH_VALUE;
else
mf->hash[i] -= subvalue;
}
for (uint32_t i = 0; i < mf->sons_count; ++i) {
// Do the same for mf->son.
//
// NOTE: There may be uninitialized elements in mf->son.
// Valgrind may complain that the "if" below depends on
// an uninitialized value. In this case it is safe to ignore
// the warning. See also the comments in lz_encoder_init()
// in lz_encoder.c.
if (mf->son[i] <= subvalue)
mf->son[i] = EMPTY_HASH_VALUE;
else
mf->son[i] -= subvalue;
}
// Update offset to match the new locations.
mf->offset -= subvalue;
return;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movq %rdi, -0x8(%rbp)
movq -0x8(%rbp), %rax
movl 0x18(%rax), %eax
movq -0x8(%rbp), %rcx
addl 0x14(%rcx), %eax
cmpl $-0x1, %eax
jne 0x9c70a1
jmp 0x9c70c0
leaq 0x20bb36(%rip), %rdi # 0xbd2bde
leaq 0x20b824(%rip), %rsi # 0xbd28d3
movl $0x6e, %edx
leaq 0x20bb53(%rip), %rcx # 0xbd2c0e
callq 0x3b440
movq -0x8(%rbp), %rcx
movl $0xffffffff, %eax # imm = 0xFFFFFFFF
subl 0x54(%rcx), %eax
movl %eax, -0xc(%rbp)
movl $0x0, -0x10(%rbp)
movl -0x10(%rbp), %eax
movq -0x8(%rbp), %rcx
cmpl 0x6c(%rcx), %eax
jae 0x9c712c
movq -0x8(%rbp), %rax
movq 0x40(%rax), %rax
movl -0x10(%rbp), %ecx
movl (%rax,%rcx,4), %eax
cmpl -0xc(%rbp), %eax
ja 0x9c7109
movq -0x8(%rbp), %rax
movq 0x40(%rax), %rax
movl -0x10(%rbp), %ecx
movl $0x0, (%rax,%rcx,4)
jmp 0x9c711f
movl -0xc(%rbp), %esi
movq -0x8(%rbp), %rax
movq 0x40(%rax), %rax
movl -0x10(%rbp), %ecx
movl (%rax,%rcx,4), %edx
subl %esi, %edx
movl %edx, (%rax,%rcx,4)
jmp 0x9c7121
movl -0x10(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x10(%rbp)
jmp 0x9c70d6
movl $0x0, -0x14(%rbp)
movl -0x14(%rbp), %eax
movq -0x8(%rbp), %rcx
cmpl 0x70(%rcx), %eax
jae 0x9c7189
movq -0x8(%rbp), %rax
movq 0x48(%rax), %rax
movl -0x14(%rbp), %ecx
movl (%rax,%rcx,4), %eax
cmpl -0xc(%rbp), %eax
ja 0x9c7166
movq -0x8(%rbp), %rax
movq 0x48(%rax), %rax
movl -0x14(%rbp), %ecx
movl $0x0, (%rax,%rcx,4)
jmp 0x9c717c
movl -0xc(%rbp), %esi
movq -0x8(%rbp), %rax
movq 0x48(%rax), %rax
movl -0x14(%rbp), %ecx
movl (%rax,%rcx,4), %edx
subl %esi, %edx
movl %edx, (%rax,%rcx,4)
jmp 0x9c717e
movl -0x14(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x14(%rbp)
jmp 0x9c7133
movl -0xc(%rbp), %edx
movq -0x8(%rbp), %rax
movl 0x14(%rax), %ecx
subl %edx, %ecx
movl %ecx, 0x14(%rax)
addq $0x20, %rsp
popq %rbp
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmliblzma/liblzma/lz/lz_encoder_mf.c
|
ZSTD_createCCtx_advanced
|
ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
{
ZSTD_STATIC_ASSERT(zcss_init==0);
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
{ ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
if (!cctx) return NULL;
ZSTD_initCCtx(cctx, customMem);
return cctx;
}
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x30, %rsp
leaq 0x10(%rbp), %rcx
movq %rcx, -0x18(%rbp)
cmpq $0x0, (%rcx)
setne %al
xorb $-0x1, %al
andb $0x1, %al
movzbl %al, %eax
cmpq $0x0, 0x8(%rcx)
setne %cl
xorb $-0x1, %cl
andb $0x1, %cl
movzbl %cl, %ecx
xorl %ecx, %eax
cmpl $0x0, %eax
je 0x9c74b0
movq $0x0, -0x8(%rbp)
jmp 0x9c751a
movq -0x18(%rbp), %rax
movl $0xdf8, %edi # imm = 0xDF8
movq (%rax), %rcx
movq %rcx, (%rsp)
movq 0x8(%rax), %rcx
movq %rcx, 0x8(%rsp)
movq 0x10(%rax), %rax
movq %rax, 0x10(%rsp)
callq 0x9c72d0
movq %rax, -0x10(%rbp)
cmpq $0x0, -0x10(%rbp)
jne 0x9c74ec
movq $0x0, -0x8(%rbp)
jmp 0x9c751a
movq -0x18(%rbp), %rax
movq -0x10(%rbp), %rdi
movq (%rax), %rcx
movq %rcx, (%rsp)
movq 0x8(%rax), %rcx
movq %rcx, 0x8(%rsp)
movq 0x10(%rax), %rax
movq %rax, 0x10(%rsp)
callq 0x9c7530
movq -0x10(%rbp), %rax
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x30, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress.c
|
ZSTD_freeCCtx
|
size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
{
if (cctx==NULL) return 0; /* support free on NULL */
RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
"not compatible with static CCtx");
{
int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
ZSTD_freeCCtxContent(cctx);
if (!cctxInWorkspace) {
ZSTD_customFree(cctx, cctx->customMem);
}
}
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x30, %rsp
movq %rdi, -0x10(%rbp)
cmpq $0x0, -0x10(%rbp)
jne 0x9c79bd
movq $0x0, -0x8(%rbp)
jmp 0x9c7a2f
movq -0x10(%rbp), %rax
cmpq $0x0, 0x308(%rax)
je 0x9c79d5
movq $-0x40, -0x8(%rbp)
jmp 0x9c7a2f
movq -0x10(%rbp), %rdi
addq $0x230, %rdi # imm = 0x230
movq -0x10(%rbp), %rsi
callq 0x9c7a40
movl %eax, -0x14(%rbp)
movq -0x10(%rbp), %rdi
callq 0x9c7a90
cmpl $0x0, -0x14(%rbp)
jne 0x9c7a27
movq -0x10(%rbp), %rdi
movq -0x10(%rbp), %rax
addq $0x2e8, %rax # imm = 0x2E8
movq (%rax), %rcx
movq %rcx, (%rsp)
movq 0x8(%rax), %rcx
movq %rcx, 0x8(%rsp)
movq 0x10(%rax), %rax
movq %rax, 0x10(%rsp)
callq 0x9c7390
movq $0x0, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x30, %rsp
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress.c
|
ZSTD_CCtxParams_init_internal
|
static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel)
{
assert(!ZSTD_checkCParams(params->cParams));
ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
cctxParams->cParams = params->cParams;
cctxParams->fParams = params->fParams;
/* Should not matter, as all cParams are presumed properly defined.
* But, set it for tracing anyway.
*/
cctxParams->compressionLevel = compressionLevel;
cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, ¶ms->cParams);
DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d", cctxParams->useRowMatchFinder);
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movl %edx, -0x14(%rbp)
movq -0x8(%rbp), %rdi
xorl %esi, %esi
movl $0xb0, %edx
callq 0x3b780
movq -0x8(%rbp), %rax
movq -0x10(%rbp), %rcx
movq (%rcx), %rdx
movq %rdx, 0x4(%rax)
movq 0x8(%rcx), %rdx
movq %rdx, 0xc(%rax)
movq 0x10(%rcx), %rdx
movq %rdx, 0x14(%rax)
movl 0x18(%rcx), %ecx
movl %ecx, 0x1c(%rax)
movq -0x8(%rbp), %rax
movq -0x10(%rbp), %rcx
movq 0x1c(%rcx), %rdx
movq %rdx, 0x20(%rax)
movl 0x24(%rcx), %ecx
movl %ecx, 0x28(%rax)
movl -0x14(%rbp), %ecx
movq -0x8(%rbp), %rax
movl %ecx, 0x2c(%rax)
movq -0x8(%rbp), %rax
movl 0x90(%rax), %edi
movq -0x10(%rbp), %rsi
callq 0x9ca400
movl %eax, %ecx
movq -0x8(%rbp), %rax
movl %ecx, 0x90(%rax)
addq $0x20, %rsp
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress.c
|
ZSTD_CCtx_refCDict
|
size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
{
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't ref a dict when ctx not in init stage.");
/* Free the existing local cdict (if any) to save memory. */
ZSTD_clearAllDicts(cctx);
cctx->cdict = cdict;
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq -0x10(%rbp), %rax
cmpl $0x0, 0xd80(%rax)
je 0x9c99d7
movq $-0x3c, -0x8(%rbp)
jmp 0x9c99f7
movq -0x10(%rbp), %rdi
callq 0x9c9890
movq -0x18(%rbp), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0xdd0(%rax)
movq $0x0, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x20, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress.c
|
ZSTD_getCParams_internal
|
static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
{
U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
int row;
DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
/* row */
if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */
else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
else row = compressionLevel;
{ ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
DEBUGLOG(5, "ZSTD_getCParams_internal selected tableID: %u row: %u strat: %u", tableID, row, (U32)cp.strategy);
/* acceleration factor */
if (compressionLevel < 0) {
int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel);
cp.targetLength = (unsigned)(-clampedCompressionLevel);
}
/* refine parameters based on srcSize & dictSize */
return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
}
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x90, %rsp
movq %rdi, -0x68(%rbp)
movq %rdi, -0x60(%rbp)
movl %esi, -0x4(%rbp)
movq %rdx, -0x10(%rbp)
movq %rcx, -0x18(%rbp)
movl %r8d, -0x1c(%rbp)
movq -0x10(%rbp), %rdi
movq -0x18(%rbp), %rsi
movl -0x1c(%rbp), %edx
callq 0x9d8ae0
movq %rax, -0x28(%rbp)
cmpq $0x40000, -0x28(%rbp) # imm = 0x40000
setbe %al
andb $0x1, %al
movzbl %al, %eax
cmpq $0x20000, -0x28(%rbp) # imm = 0x20000
setbe %cl
andb $0x1, %cl
movzbl %cl, %ecx
addl %ecx, %eax
cmpq $0x4000, -0x28(%rbp) # imm = 0x4000
setbe %cl
andb $0x1, %cl
movzbl %cl, %ecx
addl %ecx, %eax
movl %eax, -0x2c(%rbp)
cmpl $0x0, -0x4(%rbp)
jne 0x9ca1ce
movl $0x3, -0x30(%rbp)
jmp 0x9ca1f6
cmpl $0x0, -0x4(%rbp)
jge 0x9ca1dd
movl $0x0, -0x30(%rbp)
jmp 0x9ca1f4
cmpl $0x16, -0x4(%rbp)
jle 0x9ca1ec
movl $0x16, -0x30(%rbp)
jmp 0x9ca1f2
movl -0x4(%rbp), %eax
movl %eax, -0x30(%rbp)
jmp 0x9ca1f4
jmp 0x9ca1f6
movl -0x2c(%rbp), %eax
movl %eax, %ecx
leaq 0x20ae9e(%rip), %rax # 0xbd50a0
imulq $0x284, %rcx, %rcx # imm = 0x284
addq %rcx, %rax
movslq -0x30(%rbp), %rcx
imulq $0x1c, %rcx, %rcx
addq %rcx, %rax
movq (%rax), %rcx
movq %rcx, -0x50(%rbp)
movq 0x8(%rax), %rcx
movq %rcx, -0x48(%rbp)
movq 0x10(%rax), %rcx
movq %rcx, -0x40(%rbp)
movl 0x18(%rax), %eax
movl %eax, -0x38(%rbp)
cmpl $0x0, -0x4(%rbp)
jge 0x9ca262
callq 0x9c8540
cmpl -0x4(%rbp), %eax
jle 0x9ca24e
callq 0x9c8540
movl %eax, -0x6c(%rbp)
jmp 0x9ca254
movl -0x4(%rbp), %eax
movl %eax, -0x6c(%rbp)
movl -0x6c(%rbp), %eax
movl %eax, -0x54(%rbp)
xorl %eax, %eax
subl -0x54(%rbp), %eax
movl %eax, -0x3c(%rbp)
movq -0x68(%rbp), %rdi
movq -0x10(%rbp), %rsi
movq -0x18(%rbp), %rdx
movl -0x1c(%rbp), %ecx
leaq -0x50(%rbp), %rax
movq (%rax), %r8
movq %r8, (%rsp)
movq 0x8(%rax), %r8
movq %r8, 0x8(%rsp)
movq 0x10(%rax), %r8
movq %r8, 0x10(%rsp)
movl 0x18(%rax), %eax
movl %eax, 0x18(%rsp)
callq 0x9c9ed0
movq -0x60(%rbp), %rax
addq $0x90, %rsp
popq %rbp
retq
nopw (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress.c
|
ZSTD_estimateCCtxSize_usingCCtxParams
|
size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
{
ZSTD_compressionParameters const cParams =
ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
ZSTD_useRowMatchFinderMode_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder,
&cParams);
RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
/* estimateCCtxSize is for one-shot compression. So no buffers should
* be needed. However, we still allocate two 0-sized buffers, which can
* take space under ASAN. */
return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
&cParams, ¶ms->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x40, %rsp
movq %rdi, -0x10(%rbp)
movq -0x10(%rbp), %rsi
leaq -0x2c(%rbp), %rdi
movq $-0x1, %rdx
xorl %eax, %eax
movl %eax, %ecx
xorl %r8d, %r8d
callq 0x9ca060
movq -0x10(%rbp), %rax
movl 0x90(%rax), %edi
leaq -0x2c(%rbp), %rsi
callq 0x9ca400
movl %eax, -0x30(%rbp)
movq -0x10(%rbp), %rax
cmpl $0x0, 0x4c(%rax)
jle 0x9ca3c1
movq $-0x1, -0x8(%rbp)
jmp 0x9ca3ee
movq -0x10(%rbp), %rsi
addq $0x60, %rsi
movl -0x30(%rbp), %ecx
leaq -0x2c(%rbp), %rdi
movl $0x1, %edx
xorl %eax, %eax
movl %eax, %r9d
movq %r9, %r8
movq $-0x1, (%rsp)
callq 0x9ca470
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x40, %rsp
popq %rbp
retq
nopl (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress.c
|
ZSTD_copyCCtx
|
size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
{
ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy;
ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
fParams, pledgedSrcSize,
zbuff);
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x40, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq %rdx, -0x18(%rbp)
movq 0x209d71(%rip), %rax # 0xbd4cbc
movq %rax, -0x24(%rbp)
movl 0x209d6f(%rip), %eax # 0xbd4cc4
movl %eax, -0x1c(%rbp)
movq -0x10(%rbp), %rax
movl 0xd30(%rax), %eax
movl %eax, -0x28(%rbp)
cmpq $0x0, -0x18(%rbp)
jne 0x9caf74
movq $-0x1, -0x18(%rbp)
cmpq $-0x1, -0x18(%rbp)
setne %al
andb $0x1, %al
movzbl %al, %eax
movl %eax, -0x24(%rbp)
movq -0x8(%rbp), %rdi
movq -0x10(%rbp), %rsi
movq -0x18(%rbp), %r8
movl -0x28(%rbp), %r9d
movq -0x24(%rbp), %rax
movq %rax, -0x38(%rbp)
movl -0x1c(%rbp), %eax
movl %eax, -0x30(%rbp)
movq -0x38(%rbp), %rdx
movl -0x30(%rbp), %ecx
callq 0x9cafc0
addq $0x40, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress.c
|
ZSTD_dictNCountRepeat
|
static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
{
U32 s;
if (dictMaxSymbolValue < maxSymbolValue) {
return FSE_repeat_check;
}
for (s = 0; s <= maxSymbolValue; ++s) {
if (normalizedCounter[s] == 0) {
return FSE_repeat_check;
}
}
return FSE_repeat_valid;
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x10(%rbp)
movl %esi, -0x14(%rbp)
movl %edx, -0x18(%rbp)
movl -0x14(%rbp), %eax
cmpl -0x18(%rbp), %eax
jae 0x9ccb3f
movl $0x1, -0x4(%rbp)
jmp 0x9ccb7b
movl $0x0, -0x1c(%rbp)
movl -0x1c(%rbp), %eax
cmpl -0x18(%rbp), %eax
ja 0x9ccb74
movq -0x10(%rbp), %rax
movl -0x1c(%rbp), %ecx
movswl (%rax,%rcx,2), %eax
cmpl $0x0, %eax
jne 0x9ccb67
movl $0x1, -0x4(%rbp)
jmp 0x9ccb7b
jmp 0x9ccb69
movl -0x1c(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x1c(%rbp)
jmp 0x9ccb46
movl $0x2, -0x4(%rbp)
movl -0x4(%rbp), %eax
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress.c
|
ZSTD_writeEpilogue
|
static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
{
BYTE* const ostart = (BYTE*)dst;
BYTE* op = ostart;
size_t fhSize = 0;
DEBUGLOG(4, "ZSTD_writeEpilogue");
RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
/* special case : empty frame */
if (cctx->stage == ZSTDcs_init) {
fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
dstCapacity -= fhSize;
op += fhSize;
cctx->stage = ZSTDcs_ongoing;
}
if (cctx->stage != ZSTDcs_ending) {
/* write one last empty block, make it the "last" block */
U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
MEM_writeLE32(op, cBlockHeader24);
op += ZSTD_blockHeaderSize;
dstCapacity -= ZSTD_blockHeaderSize;
}
if (cctx->appliedParams.fParams.checksumFlag) {
U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
MEM_writeLE32(op, checksum);
op += 4;
}
cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
return op-ostart;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x50, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movq -0x18(%rbp), %rax
movq %rax, -0x28(%rbp)
movq -0x28(%rbp), %rax
movq %rax, -0x30(%rbp)
movq $0x0, -0x38(%rbp)
movq -0x10(%rbp), %rax
cmpl $0x0, (%rax)
jne 0x9cd362
movq $-0x3c, -0x8(%rbp)
jmp 0x9cd484
movq -0x10(%rbp), %rax
cmpl $0x1, (%rax)
jne 0x9cd3d8
movq -0x18(%rbp), %rdi
movq -0x20(%rbp), %rsi
movq -0x10(%rbp), %rdx
addq $0xc0, %rdx
xorl %eax, %eax
movl %eax, %ecx
xorl %r8d, %r8d
callq 0x9d0910
movq %rax, -0x38(%rbp)
movq -0x38(%rbp), %rax
movq %rax, -0x40(%rbp)
movq -0x40(%rbp), %rdi
callq 0x9c8000
cmpl $0x0, %eax
je 0x9cd3b1
movq -0x40(%rbp), %rax
movq %rax, -0x8(%rbp)
jmp 0x9cd484
jmp 0x9cd3b3
movq -0x38(%rbp), %rcx
movq -0x20(%rbp), %rax
subq %rcx, %rax
movq %rax, -0x20(%rbp)
movq -0x38(%rbp), %rax
addq -0x30(%rbp), %rax
movq %rax, -0x30(%rbp)
movq -0x10(%rbp), %rax
movl $0x2, (%rax)
movq -0x10(%rbp), %rax
cmpl $0x3, (%rax)
je 0x9cd422
movl $0x1, -0x44(%rbp)
cmpq $0x4, -0x20(%rbp)
jae 0x9cd3fc
movq $-0x46, -0x8(%rbp)
jmp 0x9cd484
movq -0x30(%rbp), %rdi
movl $0x1, %esi
callq 0x9cc0c0
movq -0x30(%rbp), %rax
addq $0x3, %rax
movq %rax, -0x30(%rbp)
movq -0x20(%rbp), %rax
subq $0x3, %rax
movq %rax, -0x20(%rbp)
movq -0x10(%rbp), %rax
cmpl $0x0, 0xe4(%rax)
je 0x9cd46b
movq -0x10(%rbp), %rdi
addq $0x290, %rdi # imm = 0x290
callq 0xaea1a0
movl %eax, -0x48(%rbp)
cmpq $0x4, -0x20(%rbp)
jae 0x9cd453
movq $-0x46, -0x8(%rbp)
jmp 0x9cd484
movq -0x30(%rbp), %rdi
movl -0x48(%rbp), %esi
callq 0x9cc0c0
movq -0x30(%rbp), %rax
addq $0x4, %rax
movq %rax, -0x30(%rbp)
movq -0x10(%rbp), %rax
movl $0x0, (%rax)
movq -0x30(%rbp), %rax
movq -0x28(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x50, %rsp
popq %rbp
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress.c
|
ZSTD_createCDict
|
ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
{
ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
ZSTD_dlm_byCopy, ZSTD_dct_auto,
cParams, ZSTD_defaultCMem);
if (cdict)
cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
return cdict;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x80, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movl %edx, -0x14(%rbp)
movl -0x14(%rbp), %esi
movq -0x10(%rbp), %rcx
leaq -0x30(%rbp), %rdi
movq $-0x1, %rdx
movl $0x2, %r8d
callq 0x9ca150
movq -0x8(%rbp), %rdi
movq -0x10(%rbp), %rsi
xorl %ecx, %ecx
leaq -0x30(%rbp), %r8
leaq 0x206630(%rip), %rax # 0xbd4c98
movl %ecx, %edx
movq (%r8), %r9
movq %r9, (%rsp)
movq 0x8(%r8), %r9
movq %r9, 0x8(%rsp)
movq 0x10(%r8), %r9
movq %r9, 0x10(%rsp)
movl 0x18(%r8), %r8d
movl %r8d, 0x18(%rsp)
movq (%rax), %r8
movq %r8, 0x20(%rsp)
movq 0x8(%rax), %r8
movq %r8, 0x28(%rsp)
movq 0x10(%rax), %rax
movq %rax, 0x30(%rsp)
callq 0x9cdc30
movq %rax, -0x38(%rbp)
cmpq $0x0, -0x38(%rbp)
je 0x9ce6d9
cmpl $0x0, -0x14(%rbp)
jne 0x9ce6c6
movl $0x3, %eax
movl %eax, -0x3c(%rbp)
jmp 0x9ce6cc
movl -0x14(%rbp), %eax
movl %eax, -0x3c(%rbp)
movl -0x3c(%rbp), %ecx
movq -0x38(%rbp), %rax
movl %ecx, 0x1384(%rax)
movq -0x38(%rbp), %rax
addq $0x80, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress.c
|
ZSTD_cwksp_clear_tables
|
MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
DEBUGLOG(4, "cwksp: clearing tables!");
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* We don't do this when the workspace is statically allocated, because
* when that is the case, we have no capability to hook into the end of the
* workspace's lifecycle to unpoison the memory.
*/
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
__asan_poison_memory_region(ws->objectEnd, size);
}
#endif
ws->tableEnd = ws->objectEnd;
ZSTD_cwksp_assert_internal_consistency(ws);
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x10, %rsp
movq %rdi, -0x8(%rbp)
movq -0x8(%rbp), %rax
movq 0x10(%rax), %rcx
movq -0x8(%rbp), %rax
movq %rcx, 0x18(%rax)
movq -0x8(%rbp), %rdi
callq 0x9d1310
addq $0x10, %rsp
popq %rbp
retq
nopl (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_cwksp.h
|
ZSTD_cwksp_clean_tables
|
MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
assert(ws->tableValidEnd >= ws->objectEnd);
assert(ws->tableValidEnd <= ws->allocStart);
if (ws->tableValidEnd < ws->tableEnd) {
ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
}
ZSTD_cwksp_mark_tables_clean(ws);
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x10, %rsp
movq %rdi, -0x8(%rbp)
movq -0x8(%rbp), %rax
movq 0x20(%rax), %rax
movq -0x8(%rbp), %rcx
cmpq 0x18(%rcx), %rax
jae 0x9d27f0
movq -0x8(%rbp), %rax
movq 0x20(%rax), %rdi
movq -0x8(%rbp), %rax
movq 0x18(%rax), %rdx
movq -0x8(%rbp), %rax
movq 0x20(%rax), %rax
subq %rax, %rdx
xorl %esi, %esi
callq 0x3b780
movq -0x8(%rbp), %rdi
callq 0x9d1dc0
addq $0x10, %rsp
popq %rbp
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_cwksp.h
|
ZSTD_buildSeqStore
|
static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
{
ZSTD_matchState_t* const ms = &zc->blockState.matchState;
DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
/* Assert that we have correctly flushed the ctx params into the ms's copy */
ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
} else {
ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
}
return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
}
ZSTD_resetSeqStore(&(zc->seqStore));
/* required for optimal parser to read stats from dictionary */
ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
/* tell the optimal parser how we expect to compress literals */
ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
/* a gap between an attached dict and the current window is not safe,
* they must remain adjacent,
* and when that stops being the case, the dict must be unset */
assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
/* limited update after a very long match */
{ const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
const U32 curr = (U32)(istart-base);
if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */
if (curr > ms->nextToUpdate + 384)
ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
}
/* select and store sequences */
{ ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
size_t lastLLSize;
{ int i;
for (i = 0; i < ZSTD_REP_NUM; ++i)
zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
}
if (zc->externSeqStore.pos < zc->externSeqStore.size) {
assert(!zc->appliedParams.ldmParams.enableLdm);
/* Updates ldmSeqStore.pos */
lastLLSize =
ZSTD_ldm_blockCompress(&zc->externSeqStore,
ms, &zc->seqStore,
zc->blockState.nextCBlock->rep,
zc->appliedParams.useRowMatchFinder,
src, srcSize);
assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
} else if (zc->appliedParams.ldmParams.enableLdm) {
rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
ldmSeqStore.seq = zc->ldmSequences;
ldmSeqStore.capacity = zc->maxNbLdmSequences;
/* Updates ldmSeqStore.size */
FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
&zc->appliedParams.ldmParams,
src, srcSize), "");
/* Updates ldmSeqStore.pos */
lastLLSize =
ZSTD_ldm_blockCompress(&ldmSeqStore,
ms, &zc->seqStore,
zc->blockState.nextCBlock->rep,
zc->appliedParams.useRowMatchFinder,
src, srcSize);
assert(ldmSeqStore.pos == ldmSeqStore.size);
} else { /* not long range mode */
ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy,
zc->appliedParams.useRowMatchFinder,
dictMode);
ms->ldmSeqStore = NULL;
lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
}
{ const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
} }
return ZSTDbss_compress;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x100, %rsp # imm = 0x100
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movq -0x10(%rbp), %rax
addq $0xc00, %rax # imm = 0xC00
addq $0x10, %rax
movq %rax, -0x28(%rbp)
movq -0x10(%rbp), %rcx
movq -0x28(%rbp), %rax
addq $0xf0, %rax
movq 0xc4(%rcx), %rdx
movq %rdx, -0x48(%rbp)
movq 0xcc(%rcx), %rdx
movq %rdx, -0x40(%rbp)
movq 0xd4(%rcx), %rdx
movq %rdx, -0x38(%rbp)
movl 0xdc(%rcx), %ecx
movl %ecx, -0x30(%rbp)
leaq -0x48(%rbp), %rcx
movq (%rcx), %rdx
movq %rdx, (%rsp)
movq 0x8(%rcx), %rdx
movq %rdx, 0x8(%rsp)
movq 0x10(%rcx), %rdx
movq %rdx, 0x10(%rsp)
movl 0x18(%rcx), %ecx
movl %ecx, 0x18(%rsp)
movq (%rax), %rcx
movq %rcx, 0x20(%rsp)
movq 0x8(%rax), %rcx
movq %rcx, 0x28(%rsp)
movq 0x10(%rax), %rcx
movq %rcx, 0x30(%rsp)
movl 0x18(%rax), %eax
movl %eax, 0x38(%rsp)
callq 0x9d44c0
cmpq $0x7, -0x20(%rbp)
jae 0x9d4040
movq -0x10(%rbp), %rax
cmpl $0x7, 0xdc(%rax)
jb 0x9d4015
movq -0x10(%rbp), %rdi
addq $0xbd8, %rdi # imm = 0xBD8
movq -0x20(%rbp), %rsi
callq 0xa7a720
jmp 0x9d4033
movq -0x10(%rbp), %rdi
addq $0xbd8, %rdi # imm = 0xBD8
movq -0x20(%rbp), %rsi
movq -0x10(%rbp), %rax
movl 0xd4(%rax), %edx
callq 0xa7a5e0
movq $0x1, -0x8(%rbp)
jmp 0x9d434e
movq -0x10(%rbp), %rdi
addq $0x338, %rdi # imm = 0x338
callq 0x9cb550
movq -0x10(%rbp), %rax
movq 0xc00(%rax), %rcx
movq -0x28(%rbp), %rax
movq %rcx, 0xd8(%rax)
movq -0x10(%rbp), %rax
movl 0x108(%rax), %ecx
movq -0x28(%rbp), %rax
movl %ecx, 0xe0(%rax)
movq -0x28(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, -0x50(%rbp)
movq -0x18(%rbp), %rax
movq %rax, -0x58(%rbp)
movq -0x58(%rbp), %rax
movq -0x50(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x5c(%rbp)
movl -0x5c(%rbp), %eax
movq -0x28(%rbp), %rcx
movl 0x2c(%rcx), %ecx
addl $0x180, %ecx # imm = 0x180
cmpl %ecx, %eax
jbe 0x9d4109
movl -0x5c(%rbp), %eax
movl %eax, -0xb4(%rbp)
movl -0x5c(%rbp), %ecx
movq -0x28(%rbp), %rax
subl 0x2c(%rax), %ecx
subl $0x180, %ecx # imm = 0x180
movl $0xc0, %eax
cmpl %ecx, %eax
jae 0x9d40df
movl $0xc0, %eax
movl %eax, -0xb8(%rbp)
jmp 0x9d40f4
movl -0x5c(%rbp), %eax
movq -0x28(%rbp), %rcx
subl 0x2c(%rcx), %eax
subl $0x180, %eax # imm = 0x180
movl %eax, -0xb8(%rbp)
movl -0xb4(%rbp), %ecx
movl -0xb8(%rbp), %eax
subl %eax, %ecx
movq -0x28(%rbp), %rax
movl %ecx, 0x2c(%rax)
movq -0x28(%rbp), %rdi
callq 0x9d44d0
movl %eax, -0x60(%rbp)
movl $0x0, -0x6c(%rbp)
cmpl $0x3, -0x6c(%rbp)
jge 0x9d4159
movq -0x10(%rbp), %rax
movq 0xc00(%rax), %rax
movslq -0x6c(%rbp), %rcx
movl 0x11e4(%rax,%rcx,4), %edx
movq -0x10(%rbp), %rax
movq 0xc08(%rax), %rax
movslq -0x6c(%rbp), %rcx
movl %edx, 0x11e4(%rax,%rcx,4)
movl -0x6c(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x6c(%rbp)
jmp 0x9d411c
movq -0x10(%rbp), %rax
movq 0xbe0(%rax), %rax
movq -0x10(%rbp), %rcx
cmpq 0xbf0(%rcx), %rax
jae 0x9d41c2
movq -0x10(%rbp), %rdi
addq $0xbd8, %rdi # imm = 0xBD8
movq -0x28(%rbp), %rsi
movq -0x10(%rbp), %rdx
addq $0x338, %rdx # imm = 0x338
movq -0x10(%rbp), %rax
movq 0xc08(%rax), %rcx
addq $0x11e4, %rcx # imm = 0x11E4
movq -0x10(%rbp), %rax
movl 0x150(%rax), %r8d
movq -0x18(%rbp), %r9
movq -0x20(%rbp), %rax
movq %rax, (%rsp)
callq 0xa7a7f0
movq %rax, -0x68(%rbp)
jmp 0x9d4313
movq -0x10(%rbp), %rax
cmpl $0x0, 0x120(%rax)
je 0x9d42a9
leaq -0x98(%rbp), %rdi
xorl %esi, %esi
movl $0x28, %edx
callq 0x3b780
movq -0x10(%rbp), %rax
movq 0xbc8(%rax), %rax
movq %rax, -0x98(%rbp)
movq -0x10(%rbp), %rax
movq 0xbd0(%rax), %rax
movq %rax, -0x78(%rbp)
movq -0x10(%rbp), %rdi
addq $0x388, %rdi # imm = 0x388
movq -0x10(%rbp), %rdx
addq $0xc0, %rdx
addq $0x60, %rdx
movq -0x18(%rbp), %rcx
movq -0x20(%rbp), %r8
leaq -0x98(%rbp), %rsi
callq 0xa796c0
movq %rax, -0xa0(%rbp)
movq -0xa0(%rbp), %rdi
callq 0x9c8000
cmpl $0x0, %eax
je 0x9d425d
movq -0xa0(%rbp), %rax
movq %rax, -0x8(%rbp)
jmp 0x9d434e
jmp 0x9d425f
movq -0x28(%rbp), %rsi
movq -0x10(%rbp), %rdx
addq $0x338, %rdx # imm = 0x338
movq -0x10(%rbp), %rax
movq 0xc08(%rax), %rcx
addq $0x11e4, %rcx # imm = 0x11E4
movq -0x10(%rbp), %rax
movl 0x150(%rax), %r8d
movq -0x18(%rbp), %r9
movq -0x20(%rbp), %rax
leaq -0x98(%rbp), %rdi
movq %rax, (%rsp)
callq 0xa7a7f0
movq %rax, -0x68(%rbp)
jmp 0x9d4311
movq -0x10(%rbp), %rax
movl 0xdc(%rax), %edi
movq -0x10(%rbp), %rax
movl 0x150(%rax), %esi
movl -0x60(%rbp), %edx
callq 0x9cb4a0
movq %rax, -0xa8(%rbp)
movq -0x28(%rbp), %rax
movq $0x0, 0x110(%rax)
movq -0xa8(%rbp), %rax
movq -0x28(%rbp), %rdi
movq -0x10(%rbp), %rsi
addq $0x338, %rsi # imm = 0x338
movq -0x10(%rbp), %rcx
movq 0xc08(%rcx), %rdx
addq $0x11e4, %rdx # imm = 0x11E4
movq -0x18(%rbp), %rcx
movq -0x20(%rbp), %r8
callq *%rax
movq %rax, -0x68(%rbp)
jmp 0x9d4313
movq -0x18(%rbp), %rax
addq -0x20(%rbp), %rax
xorl %ecx, %ecx
subq -0x68(%rbp), %rcx
addq %rcx, %rax
movq %rax, -0xb0(%rbp)
movq -0x10(%rbp), %rdi
addq $0x338, %rdi # imm = 0x338
movq -0xb0(%rbp), %rsi
movq -0x68(%rbp), %rdx
callq 0x9d4560
movq $0x0, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x100, %rsp # imm = 0x100
popq %rbp
retq
nopl (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress.c
|
ZSTD_deriveSeqStoreChunk
|
static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
const seqStore_t* originalSeqStore,
size_t startIdx, size_t endIdx) {
BYTE* const litEnd = originalSeqStore->lit;
size_t literalsBytes;
size_t literalsBytesPreceding = 0;
*resultSeqStore = *originalSeqStore;
if (startIdx > 0) {
resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx;
literalsBytesPreceding = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
}
/* Move longLengthPos into the correct position if necessary */
if (originalSeqStore->longLengthType != ZSTD_llt_none) {
if (originalSeqStore->longLengthPos < startIdx || originalSeqStore->longLengthPos > endIdx) {
resultSeqStore->longLengthType = ZSTD_llt_none;
} else {
resultSeqStore->longLengthPos -= (U32)startIdx;
}
}
resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx;
resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx;
literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
resultSeqStore->litStart += literalsBytesPreceding;
if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) {
/* This accounts for possible last literals if the derived chunk reaches the end of the block */
resultSeqStore->lit = litEnd;
} else {
resultSeqStore->lit = resultSeqStore->litStart+literalsBytes;
}
resultSeqStore->llCode += startIdx;
resultSeqStore->mlCode += startIdx;
resultSeqStore->ofCode += startIdx;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x40, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq %rdx, -0x18(%rbp)
movq %rcx, -0x20(%rbp)
movq -0x10(%rbp), %rax
movq 0x18(%rax), %rax
movq %rax, -0x28(%rbp)
movq $0x0, -0x38(%rbp)
movq -0x8(%rbp), %rdi
movq -0x10(%rbp), %rsi
movl $0x50, %edx
callq 0x3cb70
cmpq $0x0, -0x18(%rbp)
jbe 0x9d522c
movq -0x10(%rbp), %rax
movq (%rax), %rcx
movq -0x18(%rbp), %rax
shlq $0x3, %rax
addq %rax, %rcx
movq -0x8(%rbp), %rax
movq %rcx, 0x8(%rax)
movq -0x8(%rbp), %rdi
callq 0x9d5340
movq %rax, -0x38(%rbp)
movq -0x10(%rbp), %rax
cmpl $0x0, 0x48(%rax)
je 0x9d5271
movq -0x10(%rbp), %rax
movl 0x4c(%rax), %eax
cmpq -0x18(%rbp), %rax
jb 0x9d5250
movq -0x10(%rbp), %rax
movl 0x4c(%rax), %eax
cmpq -0x20(%rbp), %rax
jbe 0x9d525d
movq -0x8(%rbp), %rax
movl $0x0, 0x48(%rax)
jmp 0x9d526f
movq -0x18(%rbp), %rax
movl %eax, %edx
movq -0x8(%rbp), %rax
movl 0x4c(%rax), %ecx
subl %edx, %ecx
movl %ecx, 0x4c(%rax)
jmp 0x9d5271
movq -0x10(%rbp), %rax
movq (%rax), %rcx
movq -0x18(%rbp), %rax
shlq $0x3, %rax
addq %rax, %rcx
movq -0x8(%rbp), %rax
movq %rcx, (%rax)
movq -0x10(%rbp), %rax
movq (%rax), %rcx
movq -0x20(%rbp), %rax
shlq $0x3, %rax
addq %rax, %rcx
movq -0x8(%rbp), %rax
movq %rcx, 0x8(%rax)
movq -0x8(%rbp), %rdi
callq 0x9d5340
movq %rax, -0x30(%rbp)
movq -0x38(%rbp), %rcx
movq -0x8(%rbp), %rax
addq 0x10(%rax), %rcx
movq %rcx, 0x10(%rax)
movq -0x20(%rbp), %rax
movq -0x10(%rbp), %rcx
movq 0x8(%rcx), %rcx
movq -0x10(%rbp), %rdx
movq (%rdx), %rdx
subq %rdx, %rcx
sarq $0x3, %rcx
cmpq %rcx, %rax
jne 0x9d52ee
movq -0x28(%rbp), %rcx
movq -0x8(%rbp), %rax
movq %rcx, 0x18(%rax)
jmp 0x9d5302
movq -0x8(%rbp), %rax
movq 0x10(%rax), %rcx
addq -0x30(%rbp), %rcx
movq -0x8(%rbp), %rax
movq %rcx, 0x18(%rax)
movq -0x18(%rbp), %rcx
movq -0x8(%rbp), %rax
addq 0x20(%rax), %rcx
movq %rcx, 0x20(%rax)
movq -0x18(%rbp), %rcx
movq -0x8(%rbp), %rax
addq 0x28(%rax), %rcx
movq %rcx, 0x28(%rax)
movq -0x18(%rbp), %rcx
movq -0x8(%rbp), %rax
addq 0x30(%rax), %rcx
movq %rcx, 0x30(%rax)
addq $0x40, %rsp
popq %rbp
retq
nopl (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress.c
|
ZSTD_shouldAttachDict
|
static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params,
U64 pledgedSrcSize)
{
size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch;
return dedicatedDictSearch
|| ( ( pledgedSrcSize <= cutoff
|| pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
|| params->attachDictPref == ZSTD_dictForceAttach )
&& params->attachDictPref != ZSTD_dictForceCopy
&& !params->forceWindow ); /* dictMatchState isn't correctly
* handled in _enforceMaxDist */
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq %rdx, -0x18(%rbp)
movq -0x8(%rbp), %rax
movl 0x168(%rax), %eax
movl %eax, %ecx
leaq 0x1fe51d(%rip), %rax # 0xbd5050
movq (%rax,%rcx,8), %rax
movq %rax, -0x20(%rbp)
movq -0x8(%rbp), %rax
movl 0xdc(%rax), %eax
movl %eax, -0x24(%rbp)
movb $0x1, %al
cmpl $0x0, -0x24(%rbp)
movb %al, -0x25(%rbp)
jne 0x9d6b98
movq -0x18(%rbp), %rax
cmpq -0x20(%rbp), %rax
jbe 0x9d6b73
cmpq $-0x1, -0x18(%rbp)
je 0x9d6b73
movq -0x10(%rbp), %rcx
xorl %eax, %eax
cmpl $0x1, 0x44(%rcx)
movb %al, -0x26(%rbp)
jne 0x9d6b92
movq -0x10(%rbp), %rcx
xorl %eax, %eax
cmpl $0x2, 0x44(%rcx)
movb %al, -0x26(%rbp)
je 0x9d6b92
movq -0x10(%rbp), %rax
cmpl $0x0, 0x30(%rax)
setne %al
xorb $-0x1, %al
movb %al, -0x26(%rbp)
movb -0x26(%rbp), %al
movb %al, -0x25(%rbp)
movb -0x25(%rbp), %al
andb $0x1, %al
movzbl %al, %eax
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress.c
|
ZSTD_getCParamRowSize
|
static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
{
switch (mode) {
case ZSTD_cpm_unknown:
case ZSTD_cpm_noAttachDict:
case ZSTD_cpm_createCDict:
break;
case ZSTD_cpm_attachDict:
dictSize = 0;
break;
default:
assert(0);
break;
}
{ int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
}
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movl %edx, -0x14(%rbp)
movl -0x14(%rbp), %eax
movl %eax, -0x24(%rbp)
testl %eax, %eax
je 0x9d8b12
jmp 0x9d8afb
movl -0x24(%rbp), %eax
subl $0x1, %eax
je 0x9d8b14
jmp 0x9d8b05
movl -0x24(%rbp), %eax
addl $-0x2, %eax
subl $0x1, %eax
ja 0x9d8b1e
jmp 0x9d8b12
jmp 0x9d8b20
movq $0x0, -0x10(%rbp)
jmp 0x9d8b20
jmp 0x9d8b20
cmpq $-0x1, -0x8(%rbp)
sete %al
andb $0x1, %al
movzbl %al, %eax
movl %eax, -0x18(%rbp)
xorl %eax, %eax
cmpl $0x0, -0x18(%rbp)
movb %al, -0x25(%rbp)
je 0x9d8b46
cmpq $0x0, -0x10(%rbp)
seta %al
movb %al, -0x25(%rbp)
movb -0x25(%rbp), %dl
xorl %eax, %eax
movl $0x1f4, %ecx # imm = 0x1F4
testb $0x1, %dl
cmovnel %ecx, %eax
cltq
movq %rax, -0x20(%rbp)
cmpl $0x0, -0x18(%rbp)
je 0x9d8b76
cmpq $0x0, -0x10(%rbp)
jne 0x9d8b76
movq $-0x1, %rax
movq %rax, -0x30(%rbp)
jmp 0x9d8b86
movq -0x8(%rbp), %rax
addq -0x10(%rbp), %rax
addq -0x20(%rbp), %rax
movq %rax, -0x30(%rbp)
movq -0x30(%rbp), %rax
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress.c
|
FSE_initCState
|
MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
{
const void* ptr = ct;
const U16* u16ptr = (const U16*) ptr;
const U32 tableLog = MEM_read16(ptr);
statePtr->value = (ptrdiff_t)1<<tableLog;
statePtr->stateTable = u16ptr+2;
statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
statePtr->stateLog = tableLog;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x40, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq -0x10(%rbp), %rax
movq %rax, -0x18(%rbp)
movq -0x18(%rbp), %rax
movq %rax, -0x20(%rbp)
movq -0x18(%rbp), %rdi
callq 0x9d9ce0
movzwl %ax, %eax
movl %eax, -0x24(%rbp)
movl -0x24(%rbp), %eax
movl %eax, %ecx
movl $0x1, %eax
shlq %cl, %rax
movq %rax, %rcx
movq -0x8(%rbp), %rax
movq %rcx, (%rax)
movq -0x20(%rbp), %rcx
addq $0x4, %rcx
movq -0x8(%rbp), %rax
movq %rcx, 0x8(%rax)
movq -0x10(%rbp), %rax
addq $0x4, %rax
movq %rax, -0x30(%rbp)
cmpl $0x0, -0x24(%rbp)
je 0x9d8cea
movl -0x24(%rbp), %ecx
subl $0x1, %ecx
movl $0x1, %eax
shll %cl, %eax
movl %eax, -0x34(%rbp)
jmp 0x9d8cf4
movl $0x1, %eax
movl %eax, -0x34(%rbp)
jmp 0x9d8cf4
movq -0x30(%rbp), %rcx
movl -0x34(%rbp), %eax
cltq
shlq $0x2, %rax
addq %rax, %rcx
movq -0x8(%rbp), %rax
movq %rcx, 0x10(%rax)
movl -0x24(%rbp), %ecx
movq -0x8(%rbp), %rax
movl %ecx, 0x18(%rax)
addq $0x40, %rsp
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/../common/fse.h
|
ZSTD_encodeSequences
|
size_t ZSTD_encodeSequences(
void* dst, size_t dstCapacity,
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
{
DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
#if DYNAMIC_BMI2
if (bmi2) {
return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
CTable_MatchLength, mlCodeTable,
CTable_OffsetBits, ofCodeTable,
CTable_LitLength, llCodeTable,
sequences, nbSeq, longOffsets);
}
#endif
(void)bmi2;
return ZSTD_encodeSequences_default(dst, dstCapacity,
CTable_MatchLength, mlCodeTable,
CTable_OffsetBits, ofCodeTable,
CTable_LitLength, llCodeTable,
sequences, nbSeq, longOffsets);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r14
pushq %rbx
subq $0x60, %rsp
movl 0x38(%rbp), %eax
movl 0x30(%rbp), %eax
movq 0x28(%rbp), %rax
movq 0x20(%rbp), %rax
movq 0x18(%rbp), %rax
movq 0x10(%rbp), %rax
movq %rdi, -0x18(%rbp)
movq %rsi, -0x20(%rbp)
movq %rdx, -0x28(%rbp)
movq %rcx, -0x30(%rbp)
movq %r8, -0x38(%rbp)
movq %r9, -0x40(%rbp)
movq -0x18(%rbp), %rdi
movq -0x20(%rbp), %rsi
movq -0x28(%rbp), %rdx
movq -0x30(%rbp), %rcx
movq -0x38(%rbp), %r8
movq -0x40(%rbp), %r9
movq 0x10(%rbp), %r14
movq 0x18(%rbp), %rbx
movq 0x20(%rbp), %r11
movq 0x28(%rbp), %r10
movl 0x30(%rbp), %eax
movq %r14, (%rsp)
movq %rbx, 0x8(%rsp)
movq %r11, 0x10(%rsp)
movq %r10, 0x18(%rsp)
movl %eax, 0x20(%rsp)
callq 0x9d9630
addq $0x60, %rsp
popq %rbx
popq %r14
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress_sequences.c
|
BIT_flushBits
|
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
{
size_t const nbBytes = bitC->bitPos >> 3;
assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
assert(bitC->ptr <= bitC->endPtr);
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
bitC->ptr += nbBytes;
if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
bitC->bitPos &= 7;
bitC->bitContainer >>= nbBytes*8;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x10, %rsp
movq %rdi, -0x8(%rbp)
movq -0x8(%rbp), %rax
movl 0x8(%rax), %eax
shrl $0x3, %eax
movl %eax, %eax
movq %rax, -0x10(%rbp)
movq -0x8(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x8(%rbp), %rax
movq (%rax), %rsi
callq 0x9da050
movq -0x10(%rbp), %rcx
movq -0x8(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
movq -0x8(%rbp), %rax
movq 0x18(%rax), %rax
movq -0x8(%rbp), %rcx
cmpq 0x20(%rcx), %rax
jbe 0x9d9ed2
movq -0x8(%rbp), %rax
movq 0x20(%rax), %rcx
movq -0x8(%rbp), %rax
movq %rcx, 0x18(%rax)
movq -0x8(%rbp), %rax
movl 0x8(%rax), %ecx
andl $0x7, %ecx
movl %ecx, 0x8(%rax)
movq -0x10(%rbp), %rcx
shlq $0x3, %rcx
movq -0x8(%rbp), %rax
movq (%rax), %rdx
shrq %cl, %rdx
movq %rdx, %rcx
movq %rcx, (%rax)
addq $0x10, %rsp
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/../common/bitstream.h
|
MEM_swap64
|
MEM_STATIC U64 MEM_swap64(U64 in)
{
#if defined(_MSC_VER) /* Visual Studio */
return _byteswap_uint64(in);
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|| (defined(__clang__) && __has_builtin(__builtin_bswap64))
return __builtin_bswap64(in);
#else
return ((in << 56) & 0xff00000000000000ULL) |
((in << 40) & 0x00ff000000000000ULL) |
((in << 24) & 0x0000ff0000000000ULL) |
((in << 8) & 0x000000ff00000000ULL) |
((in >> 8) & 0x00000000ff000000ULL) |
((in >> 24) & 0x0000000000ff0000ULL) |
((in >> 40) & 0x000000000000ff00ULL) |
((in >> 56) & 0x00000000000000ffULL);
#endif
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x8(%rbp)
movq -0x8(%rbp), %rax
bswapq %rax
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/../common/mem.h
|
ZSTD_seqDecompressedSize
|
static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
const seqDef* const sstart = sequences;
const seqDef* const send = sequences + nbSeq;
const seqDef* sp = sstart;
size_t matchLengthSum = 0;
size_t litLengthSum = 0;
while (send-sp > 0) {
ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
litLengthSum += seqLen.litLength;
matchLengthSum += seqLen.matchLength;
sp++;
}
assert(litLengthSum <= litSize);
if (!lastSequence) {
assert(litLengthSum == litSize);
}
return matchLengthSum + litSize;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x60, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq %rdx, -0x18(%rbp)
movq %rcx, -0x20(%rbp)
movl %r8d, -0x24(%rbp)
movq -0x10(%rbp), %rax
movq %rax, -0x30(%rbp)
movq -0x10(%rbp), %rax
movq -0x18(%rbp), %rcx
shlq $0x3, %rcx
addq %rcx, %rax
movq %rax, -0x38(%rbp)
movq -0x30(%rbp), %rax
movq %rax, -0x40(%rbp)
movq $0x0, -0x48(%rbp)
movq $0x0, -0x50(%rbp)
movq -0x38(%rbp), %rax
movq -0x40(%rbp), %rcx
subq %rcx, %rax
sarq $0x3, %rax
cmpq $0x0, %rax
jle 0x9dad09
movq -0x8(%rbp), %rdi
movq -0x40(%rbp), %rsi
callq 0x9dab20
movq %rax, -0x58(%rbp)
movl -0x58(%rbp), %eax
addq -0x50(%rbp), %rax
movq %rax, -0x50(%rbp)
movl -0x54(%rbp), %eax
addq -0x48(%rbp), %rax
movq %rax, -0x48(%rbp)
movq -0x40(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x40(%rbp)
jmp 0x9dacbf
cmpl $0x0, -0x24(%rbp)
jne 0x9dad11
jmp 0x9dad11
movq -0x48(%rbp), %rax
addq -0x20(%rbp), %rax
addq $0x60, %rsp
popq %rbp
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress_superblock.c
|
MEM_swap32
|
MEM_STATIC U32 MEM_swap32(U32 in)
{
#if defined(_MSC_VER) /* Visual Studio */
return _byteswap_ulong(in);
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|| (defined(__clang__) && __has_builtin(__builtin_bswap32))
return __builtin_bswap32(in);
#else
return ((in << 24) & 0xff000000 ) |
((in << 8) & 0x00ff0000 ) |
((in >> 8) & 0x0000ff00 ) |
((in >> 24) & 0x000000ff );
#endif
}
|
pushq %rbp
movq %rsp, %rbp
movl %edi, -0x4(%rbp)
movl -0x4(%rbp), %eax
bswapl %eax
popq %rbp
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/../common/mem.h
|
ZSTD_compressBlock_doubleFast
|
size_t ZSTD_compressBlock_doubleFast(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
const U32 mls = ms->cParams.minMatch;
switch(mls)
{
default: /* includes case 3 */
case 4 :
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
case 5 :
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
case 6 :
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
case 7 :
return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
}
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x1e80, %rsp # imm = 0x1E80
movq %rdi, -0x1730(%rbp)
movq %rsi, -0x1738(%rbp)
movq %rdx, -0x1740(%rbp)
movq %rcx, -0x1748(%rbp)
movq %r8, -0x1750(%rbp)
movq -0x1730(%rbp), %rax
movl 0x100(%rax), %eax
movl %eax, -0x1754(%rbp)
movl -0x1754(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1760(%rbp)
subl $0x3, %eax
ja 0x9dc0af
movq -0x1760(%rbp), %rax
leaq 0x1fa2ae(%rip), %rcx # 0xbd6354
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9dc0b1
movq -0x1730(%rbp), %rdi
movq -0x1738(%rbp), %rsi
movq -0x1740(%rbp), %rdx
movq -0x1748(%rbp), %rcx
movq -0x1750(%rbp), %rax
movq %rdi, -0xb58(%rbp)
movq %rsi, -0xb60(%rbp)
movq %rdx, -0xb68(%rbp)
movq %rcx, -0xb70(%rbp)
movq %rax, -0xb78(%rbp)
movl $0x4, -0xb7c(%rbp)
movl $0x0, -0xb80(%rbp)
movq -0xb58(%rbp), %rax
addq $0xf0, %rax
movq %rax, -0xb88(%rbp)
movq -0xb58(%rbp), %rax
movq 0x60(%rax), %rax
movq %rax, -0xb90(%rbp)
movq -0xb88(%rbp), %rax
movl 0x8(%rax), %eax
movl %eax, -0xb94(%rbp)
movq -0xb58(%rbp), %rax
movq 0x70(%rax), %rax
movq %rax, -0xba0(%rbp)
movq -0xb88(%rbp), %rax
movl 0x4(%rax), %eax
movl %eax, -0xba4(%rbp)
movq -0xb58(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, -0xbb0(%rbp)
movq -0xb70(%rbp), %rax
movq %rax, -0xbb8(%rbp)
movq -0xbb8(%rbp), %rax
movq %rax, -0xbc0(%rbp)
movq -0xbb8(%rbp), %rax
movq %rax, -0xbc8(%rbp)
movq -0xbb8(%rbp), %rax
movq -0xbb0(%rbp), %rcx
subq %rcx, %rax
addq -0xb78(%rbp), %rax
movl %eax, -0xbcc(%rbp)
movq -0xb58(%rbp), %rdi
movl -0xbcc(%rbp), %esi
movq -0xb88(%rbp), %rax
movl (%rax), %edx
callq 0x9fba50
movl %eax, -0xbd0(%rbp)
movq -0xbb0(%rbp), %rax
movl -0xbd0(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0xbd8(%rbp)
movq -0xbb8(%rbp), %rax
addq -0xb78(%rbp), %rax
movq %rax, -0xbe0(%rbp)
movq -0xbe0(%rbp), %rax
addq $-0x8, %rax
movq %rax, -0xbe8(%rbp)
movq -0xb68(%rbp), %rax
movl (%rax), %eax
movl %eax, -0xbec(%rbp)
movq -0xb68(%rbp), %rax
movl 0x4(%rax), %eax
movl %eax, -0xbf0(%rbp)
movl $0x0, -0xbf4(%rbp)
movq -0xb58(%rbp), %rax
movq 0xe8(%rax), %rax
movq %rax, -0xc00(%rbp)
cmpl $0x2, -0xb80(%rbp)
jne 0x9dc279
movq -0xc00(%rbp), %rax
addq $0xf0, %rax
movq %rax, -0x1768(%rbp)
jmp 0x9dc284
xorl %eax, %eax
movq %rax, -0x1768(%rbp)
jmp 0x9dc284
movq -0x1768(%rbp), %rax
movq %rax, -0xc08(%rbp)
cmpl $0x2, -0xb80(%rbp)
jne 0x9dc2af
movq -0xc00(%rbp), %rax
movq 0x60(%rax), %rax
movq %rax, -0x1770(%rbp)
jmp 0x9dc2ba
xorl %eax, %eax
movq %rax, -0x1770(%rbp)
jmp 0x9dc2ba
movq -0x1770(%rbp), %rax
movq %rax, -0xc10(%rbp)
cmpl $0x2, -0xb80(%rbp)
jne 0x9dc2e5
movq -0xc00(%rbp), %rax
movq 0x70(%rax), %rax
movq %rax, -0x1778(%rbp)
jmp 0x9dc2f0
xorl %eax, %eax
movq %rax, -0x1778(%rbp)
jmp 0x9dc2f0
movq -0x1778(%rbp), %rax
movq %rax, -0xc18(%rbp)
cmpl $0x2, -0xb80(%rbp)
jne 0x9dc319
movq -0xc00(%rbp), %rax
movl 0x18(%rax), %eax
movl %eax, -0x177c(%rbp)
jmp 0x9dc323
xorl %eax, %eax
movl %eax, -0x177c(%rbp)
jmp 0x9dc323
movl -0x177c(%rbp), %eax
movl %eax, -0xc1c(%rbp)
cmpl $0x2, -0xb80(%rbp)
jne 0x9dc34c
movq -0xc00(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, -0x1788(%rbp)
jmp 0x9dc357
xorl %eax, %eax
movq %rax, -0x1788(%rbp)
jmp 0x9dc357
movq -0x1788(%rbp), %rax
movq %rax, -0xc28(%rbp)
cmpl $0x2, -0xb80(%rbp)
jne 0x9dc387
movq -0xc28(%rbp), %rax
movl -0xc1c(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1790(%rbp)
jmp 0x9dc392
xorl %eax, %eax
movq %rax, -0x1790(%rbp)
jmp 0x9dc392
movq -0x1790(%rbp), %rax
movq %rax, -0xc30(%rbp)
cmpl $0x2, -0xb80(%rbp)
jne 0x9dc3bc
movq -0xc00(%rbp), %rax
movq (%rax), %rax
movq %rax, -0x1798(%rbp)
jmp 0x9dc3c7
xorl %eax, %eax
movq %rax, -0x1798(%rbp)
jmp 0x9dc3c7
movq -0x1798(%rbp), %rax
movq %rax, -0xc38(%rbp)
cmpl $0x2, -0xb80(%rbp)
jne 0x9dc3ff
movl -0xbd0(%rbp), %eax
movq -0xc38(%rbp), %rcx
movq -0xc28(%rbp), %rdx
subq %rdx, %rcx
subl %ecx, %eax
movl %eax, -0x179c(%rbp)
jmp 0x9dc409
xorl %eax, %eax
movl %eax, -0x179c(%rbp)
jmp 0x9dc409
movl -0x179c(%rbp), %eax
movl %eax, -0xc3c(%rbp)
cmpl $0x2, -0xb80(%rbp)
jne 0x9dc430
movq -0xc08(%rbp), %rax
movl 0x8(%rax), %eax
movl %eax, -0x17a0(%rbp)
jmp 0x9dc43c
movl -0xb94(%rbp), %eax
movl %eax, -0x17a0(%rbp)
movl -0x17a0(%rbp), %eax
movl %eax, -0xc40(%rbp)
cmpl $0x2, -0xb80(%rbp)
jne 0x9dc463
movq -0xc08(%rbp), %rax
movl 0x4(%rax), %eax
movl %eax, -0x17a4(%rbp)
jmp 0x9dc46f
movl -0xba4(%rbp), %eax
movl %eax, -0x17a4(%rbp)
movl -0x17a4(%rbp), %eax
movl %eax, -0xc44(%rbp)
movq -0xbc0(%rbp), %rax
movq -0xbd8(%rbp), %rcx
subq %rcx, %rax
movq -0xc38(%rbp), %rcx
movq -0xc30(%rbp), %rdx
subq %rdx, %rcx
addq %rcx, %rax
movl %eax, -0xc48(%rbp)
cmpl $0x2, -0xb80(%rbp)
jne 0x9dc4b1
jmp 0x9dc4b1
cmpl $0x0, -0xc48(%rbp)
sete %al
andb $0x1, %al
movzbl %al, %ecx
movq -0xbc0(%rbp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, -0xbc0(%rbp)
cmpl $0x0, -0xb80(%rbp)
jne 0x9dc575
movq -0xbc0(%rbp), %rax
movq -0xbb0(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0xc4c(%rbp)
movq -0xb58(%rbp), %rdi
movl -0xc4c(%rbp), %esi
movq -0xb88(%rbp), %rax
movl (%rax), %edx
callq 0x9fba50
movl %eax, -0xc50(%rbp)
movl -0xc4c(%rbp), %eax
subl -0xc50(%rbp), %eax
movl %eax, -0xc54(%rbp)
movl -0xbf0(%rbp), %eax
cmpl -0xc54(%rbp), %eax
jbe 0x9dc54f
movl -0xbf0(%rbp), %eax
movl %eax, -0xbf4(%rbp)
movl $0x0, -0xbf0(%rbp)
movl -0xbec(%rbp), %eax
cmpl -0xc54(%rbp), %eax
jbe 0x9dc573
movl -0xbec(%rbp), %eax
movl %eax, -0xbf4(%rbp)
movl $0x0, -0xbec(%rbp)
jmp 0x9dc575
cmpl $0x2, -0xb80(%rbp)
jne 0x9dc580
jmp 0x9dc580
jmp 0x9dc582
movq -0xbc0(%rbp), %rax
cmpq -0xbe8(%rbp), %rax
jae 0x9df329
movq -0xbc0(%rbp), %rcx
movl -0xb94(%rbp), %eax
movq %rcx, -0xa10(%rbp)
movl %eax, -0xa14(%rbp)
movl $0x8, -0xa18(%rbp)
movl -0xa18(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x17b0(%rbp)
subl $0x4, %eax
ja 0x9dc5e8
movq -0x17b0(%rbp), %rax
leaq 0x1fa0cd(%rip), %rcx # 0xbd66ac
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9dc5ea
movq -0xa10(%rbp), %rdi
movl -0xa14(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xa08(%rbp)
jmp 0x9dc66f
movq -0xa10(%rbp), %rdi
movl -0xa14(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xa08(%rbp)
jmp 0x9dc66f
movq -0xa10(%rbp), %rdi
movl -0xa14(%rbp), %esi
callq 0x9fb820
movq %rax, -0xa08(%rbp)
jmp 0x9dc66f
movq -0xa10(%rbp), %rdi
movl -0xa14(%rbp), %esi
callq 0x9fb850
movq %rax, -0xa08(%rbp)
jmp 0x9dc66f
movq -0xa10(%rbp), %rdi
movl -0xa14(%rbp), %esi
callq 0x9fb880
movq %rax, -0xa08(%rbp)
movq -0xa08(%rbp), %rax
movq %rax, -0xc70(%rbp)
movq -0xbc0(%rbp), %rdx
movl -0xba4(%rbp), %ecx
movl -0xb7c(%rbp), %eax
movq %rdx, -0xa28(%rbp)
movl %ecx, -0xa2c(%rbp)
movl %eax, -0xa30(%rbp)
movl -0xa30(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x17b8(%rbp)
subl $0x4, %eax
ja 0x9dc6d1
movq -0x17b8(%rbp), %rax
leaq 0x1f9ff8(%rip), %rcx # 0xbd66c0
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9dc6d3
movq -0xa28(%rbp), %rdi
movl -0xa2c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xa20(%rbp)
jmp 0x9dc758
movq -0xa28(%rbp), %rdi
movl -0xa2c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xa20(%rbp)
jmp 0x9dc758
movq -0xa28(%rbp), %rdi
movl -0xa2c(%rbp), %esi
callq 0x9fb820
movq %rax, -0xa20(%rbp)
jmp 0x9dc758
movq -0xa28(%rbp), %rdi
movl -0xa2c(%rbp), %esi
callq 0x9fb850
movq %rax, -0xa20(%rbp)
jmp 0x9dc758
movq -0xa28(%rbp), %rdi
movl -0xa2c(%rbp), %esi
callq 0x9fb880
movq %rax, -0xa20(%rbp)
movq -0xa20(%rbp), %rax
movq %rax, -0xc78(%rbp)
movq -0xbc0(%rbp), %rcx
movl -0xc40(%rbp), %eax
movq %rcx, -0xa40(%rbp)
movl %eax, -0xa44(%rbp)
movl $0x8, -0xa48(%rbp)
movl -0xa48(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x17c0(%rbp)
subl $0x4, %eax
ja 0x9dc7b8
movq -0x17c0(%rbp), %rax
leaq 0x1f9f25(%rip), %rcx # 0xbd66d4
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9dc7ba
movq -0xa40(%rbp), %rdi
movl -0xa44(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xa38(%rbp)
jmp 0x9dc83f
movq -0xa40(%rbp), %rdi
movl -0xa44(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xa38(%rbp)
jmp 0x9dc83f
movq -0xa40(%rbp), %rdi
movl -0xa44(%rbp), %esi
callq 0x9fb820
movq %rax, -0xa38(%rbp)
jmp 0x9dc83f
movq -0xa40(%rbp), %rdi
movl -0xa44(%rbp), %esi
callq 0x9fb850
movq %rax, -0xa38(%rbp)
jmp 0x9dc83f
movq -0xa40(%rbp), %rdi
movl -0xa44(%rbp), %esi
callq 0x9fb880
movq %rax, -0xa38(%rbp)
movq -0xa38(%rbp), %rax
movq %rax, -0xc80(%rbp)
movq -0xbc0(%rbp), %rdx
movl -0xc44(%rbp), %ecx
movl -0xb7c(%rbp), %eax
movq %rdx, -0xa58(%rbp)
movl %ecx, -0xa5c(%rbp)
movl %eax, -0xa60(%rbp)
movl -0xa60(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x17c8(%rbp)
subl $0x4, %eax
ja 0x9dc8a1
movq -0x17c8(%rbp), %rax
leaq 0x1f9e50(%rip), %rcx # 0xbd66e8
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9dc8a3
movq -0xa58(%rbp), %rdi
movl -0xa5c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xa50(%rbp)
jmp 0x9dc928
movq -0xa58(%rbp), %rdi
movl -0xa5c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xa50(%rbp)
jmp 0x9dc928
movq -0xa58(%rbp), %rdi
movl -0xa5c(%rbp), %esi
callq 0x9fb820
movq %rax, -0xa50(%rbp)
jmp 0x9dc928
movq -0xa58(%rbp), %rdi
movl -0xa5c(%rbp), %esi
callq 0x9fb850
movq %rax, -0xa50(%rbp)
jmp 0x9dc928
movq -0xa58(%rbp), %rdi
movl -0xa5c(%rbp), %esi
callq 0x9fb880
movq %rax, -0xa50(%rbp)
movq -0xa50(%rbp), %rax
movq %rax, -0xc88(%rbp)
movq -0xbc0(%rbp), %rax
movq -0xbb0(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0xc8c(%rbp)
movq -0xb90(%rbp), %rax
movq -0xc70(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0xc90(%rbp)
movq -0xba0(%rbp), %rax
movq -0xc78(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0xc94(%rbp)
movq -0xbb0(%rbp), %rax
movl -0xc90(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0xca0(%rbp)
movq -0xbb0(%rbp), %rax
movl -0xc94(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0xca8(%rbp)
movl -0xc8c(%rbp), %eax
addl $0x1, %eax
subl -0xbec(%rbp), %eax
movl %eax, -0xcac(%rbp)
cmpl $0x2, -0xb80(%rbp)
jne 0x9dc9f6
movl -0xcac(%rbp), %eax
cmpl -0xbd0(%rbp), %eax
jae 0x9dc9f6
movq -0xc28(%rbp), %rax
movl -0xcac(%rbp), %ecx
subl -0xc3c(%rbp), %ecx
movl %ecx, %ecx
addq %rcx, %rax
movq %rax, -0x17d0(%rbp)
jmp 0x9dca0d
movq -0xbb0(%rbp), %rax
movl -0xcac(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x17d0(%rbp)
movq -0x17d0(%rbp), %rax
movq %rax, -0xcb8(%rbp)
movl -0xc8c(%rbp), %edx
movq -0xba0(%rbp), %rax
movq -0xc78(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0xb90(%rbp), %rax
movq -0xc70(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
cmpl $0x2, -0xb80(%rbp)
jne 0x9dce36
movl -0xbd0(%rbp), %eax
subl $0x1, %eax
subl -0xcac(%rbp), %eax
cmpl $0x3, %eax
jb 0x9dce36
movq -0xcb8(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x17d4(%rbp)
movq -0xbc0(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x17d4(%rbp), %eax
cmpl %ecx, %eax
jne 0x9dce36
movl -0xcac(%rbp), %eax
cmpl -0xbd0(%rbp), %eax
jae 0x9dcab8
movq -0xc38(%rbp), %rax
movq %rax, -0x17e0(%rbp)
jmp 0x9dcac6
movq -0xbe0(%rbp), %rax
movq %rax, -0x17e0(%rbp)
movq -0x17e0(%rbp), %rax
movq %rax, -0xcc0(%rbp)
movq -0xbc0(%rbp), %rdi
addq $0x1, %rdi
addq $0x4, %rdi
movq -0xcb8(%rbp), %rsi
addq $0x4, %rsi
movq -0xbe0(%rbp), %rdx
movq -0xcc0(%rbp), %rcx
movq -0xbd8(%rbp), %r8
callq 0x9fbad0
addq $0x4, %rax
movq %rax, -0xc60(%rbp)
movq -0xbc0(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xbc0(%rbp)
movq -0xb60(%rbp), %rdi
movq -0xbc0(%rbp), %rsi
movq -0xbc8(%rbp), %rax
subq %rax, %rsi
movq -0xbc8(%rbp), %rdx
movq -0xbe0(%rbp), %rcx
movq -0xc60(%rbp), %rax
subq $0x3, %rax
movq %rdi, -0x9c8(%rbp)
movq %rsi, -0x9d0(%rbp)
movq %rdx, -0x9d8(%rbp)
movq %rcx, -0x9e0(%rbp)
movl $0x0, -0x9e4(%rbp)
movq %rax, -0x9f0(%rbp)
movq -0x9e0(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x9f8(%rbp)
movq -0x9d8(%rbp), %rax
addq -0x9d0(%rbp), %rax
movq %rax, -0xa00(%rbp)
movq -0xa00(%rbp), %rax
cmpq -0x9f8(%rbp), %rax
ja 0x9dcd15
movq -0x9c8(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x9d8(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x9d0(%rbp)
jbe 0x9dcd13
movq -0x9c8(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x9d8(%rbp), %rcx
addq $0x10, %rcx
movq -0x9d0(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x8(%rbp)
movq %rcx, -0x10(%rbp)
movq %rax, -0x18(%rbp)
movl $0x0, -0x1c(%rbp)
movq -0x8(%rbp), %rax
movq -0x10(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x28(%rbp)
movq -0x10(%rbp), %rax
movq %rax, -0x30(%rbp)
movq -0x8(%rbp), %rax
movq %rax, -0x38(%rbp)
movq -0x38(%rbp), %rax
addq -0x18(%rbp), %rax
movq %rax, -0x40(%rbp)
cmpl $0x1, -0x1c(%rbp)
jne 0x9dcc89
cmpq $0x10, -0x28(%rbp)
jge 0x9dcc89
jmp 0x9dcc55
movq -0x38(%rbp), %rdi
movq -0x30(%rbp), %rsi
callq 0x9fbf50
movq -0x38(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x38(%rbp)
movq -0x30(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x30(%rbp)
movq -0x38(%rbp), %rax
cmpq -0x40(%rbp), %rax
jb 0x9dcc55
jmp 0x9dcd11
movq -0x38(%rbp), %rdi
movq -0x30(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x18(%rbp), %rax
jl 0x9dcca3
jmp 0x9dcd11
movq -0x38(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x38(%rbp)
movq -0x30(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x30(%rbp)
movq -0x38(%rbp), %rdi
movq -0x30(%rbp), %rsi
callq 0x9fbd80
movq -0x38(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x38(%rbp)
movq -0x30(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x30(%rbp)
movq -0x38(%rbp), %rdi
movq -0x30(%rbp), %rsi
callq 0x9fbd80
movq -0x38(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x38(%rbp)
movq -0x30(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x30(%rbp)
movq -0x38(%rbp), %rax
cmpq -0x40(%rbp), %rax
jb 0x9dccbb
jmp 0x9dcd11
jmp 0x9dcd13
jmp 0x9dcd3a
movq -0x9c8(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x9d8(%rbp), %rsi
movq -0xa00(%rbp), %rdx
movq -0x9f8(%rbp), %rcx
callq 0x9fbdb0
movq -0x9d0(%rbp), %rcx
movq -0x9c8(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x9d0(%rbp) # imm = 0xFFFF
jbe 0x9dcd93
movq -0x9c8(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x9c8(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x9c8(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x9c8(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x9d0(%rbp), %rax
movw %ax, %cx
movq -0x9c8(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x9e4(%rbp), %ecx
addl $0x1, %ecx
movq -0x9c8(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x9f0(%rbp) # imm = 0xFFFF
jbe 0x9dce05
movq -0x9c8(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x9c8(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x9c8(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x9c8(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x9f0(%rbp), %rax
movw %ax, %cx
movq -0x9c8(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x9c8(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
jmp 0x9de07c
cmpl $0x0, -0xb80(%rbp)
jne 0x9dd21a
cmpl $0x0, -0xbec(%rbp)
seta %al
andb $0x1, %al
movzbl %al, %eax
movl %eax, -0x17e4(%rbp)
movq -0xbc0(%rbp), %rdi
addq $0x1, %rdi
movl -0xbec(%rbp), %eax
movl %eax, %ecx
xorl %eax, %eax
subq %rcx, %rax
addq %rax, %rdi
callq 0x9fb8d0
movl %eax, -0x17e8(%rbp)
movq -0xbc0(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb8d0
movl -0x17e8(%rbp), %ecx
movl %eax, %edx
movl -0x17e4(%rbp), %eax
cmpl %edx, %ecx
sete %cl
andb $0x1, %cl
movzbl %cl, %ecx
andl %ecx, %eax
cmpl $0x0, %eax
je 0x9dd21a
movq -0xbc0(%rbp), %rdi
addq $0x1, %rdi
addq $0x4, %rdi
movq -0xbc0(%rbp), %rsi
addq $0x1, %rsi
addq $0x4, %rsi
movl -0xbec(%rbp), %eax
movl %eax, %ecx
xorl %eax, %eax
subq %rcx, %rax
addq %rax, %rsi
movq -0xbe0(%rbp), %rdx
callq 0x9fbb90
addq $0x4, %rax
movq %rax, -0xc60(%rbp)
movq -0xbc0(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xbc0(%rbp)
movq -0xb60(%rbp), %rdi
movq -0xbc0(%rbp), %rsi
movq -0xbc8(%rbp), %rax
subq %rax, %rsi
movq -0xbc8(%rbp), %rdx
movq -0xbe0(%rbp), %rcx
movq -0xc60(%rbp), %rax
subq $0x3, %rax
movq %rdi, -0x988(%rbp)
movq %rsi, -0x990(%rbp)
movq %rdx, -0x998(%rbp)
movq %rcx, -0x9a0(%rbp)
movl $0x0, -0x9a4(%rbp)
movq %rax, -0x9b0(%rbp)
movq -0x9a0(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x9b8(%rbp)
movq -0x998(%rbp), %rax
addq -0x990(%rbp), %rax
movq %rax, -0x9c0(%rbp)
movq -0x9c0(%rbp), %rax
cmpq -0x9b8(%rbp), %rax
ja 0x9dd0f9
movq -0x988(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x998(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x990(%rbp)
jbe 0x9dd0f7
movq -0x988(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x998(%rbp), %rcx
addq $0x10, %rcx
movq -0x990(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x48(%rbp)
movq %rcx, -0x50(%rbp)
movq %rax, -0x58(%rbp)
movl $0x0, -0x5c(%rbp)
movq -0x48(%rbp), %rax
movq -0x50(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x68(%rbp)
movq -0x50(%rbp), %rax
movq %rax, -0x70(%rbp)
movq -0x48(%rbp), %rax
movq %rax, -0x78(%rbp)
movq -0x78(%rbp), %rax
addq -0x58(%rbp), %rax
movq %rax, -0x80(%rbp)
cmpl $0x1, -0x5c(%rbp)
jne 0x9dd06d
cmpq $0x10, -0x68(%rbp)
jge 0x9dd06d
jmp 0x9dd039
movq -0x78(%rbp), %rdi
movq -0x70(%rbp), %rsi
callq 0x9fbf50
movq -0x78(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x78(%rbp)
movq -0x70(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x70(%rbp)
movq -0x78(%rbp), %rax
cmpq -0x80(%rbp), %rax
jb 0x9dd039
jmp 0x9dd0f5
movq -0x78(%rbp), %rdi
movq -0x70(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x58(%rbp), %rax
jl 0x9dd087
jmp 0x9dd0f5
movq -0x78(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x78(%rbp)
movq -0x70(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x70(%rbp)
movq -0x78(%rbp), %rdi
movq -0x70(%rbp), %rsi
callq 0x9fbd80
movq -0x78(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x78(%rbp)
movq -0x70(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x70(%rbp)
movq -0x78(%rbp), %rdi
movq -0x70(%rbp), %rsi
callq 0x9fbd80
movq -0x78(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x78(%rbp)
movq -0x70(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x70(%rbp)
movq -0x78(%rbp), %rax
cmpq -0x80(%rbp), %rax
jb 0x9dd09f
jmp 0x9dd0f5
jmp 0x9dd0f7
jmp 0x9dd11e
movq -0x988(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x998(%rbp), %rsi
movq -0x9c0(%rbp), %rdx
movq -0x9b8(%rbp), %rcx
callq 0x9fbdb0
movq -0x990(%rbp), %rcx
movq -0x988(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x990(%rbp) # imm = 0xFFFF
jbe 0x9dd177
movq -0x988(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x988(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x988(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x988(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x990(%rbp), %rax
movw %ax, %cx
movq -0x988(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x9a4(%rbp), %ecx
addl $0x1, %ecx
movq -0x988(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x9b0(%rbp) # imm = 0xFFFF
jbe 0x9dd1e9
movq -0x988(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x988(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x988(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x988(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x9b0(%rbp), %rax
movw %ax, %cx
movq -0x988(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x988(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
jmp 0x9de07c
movl -0xc90(%rbp), %eax
cmpl -0xbd0(%rbp), %eax
jbe 0x9dd34f
movq -0xca0(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x17f0(%rbp)
movq -0xbc0(%rbp), %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x17f0(%rbp), %rax
cmpq %rcx, %rax
jne 0x9dd34a
movq -0xbc0(%rbp), %rdi
addq $0x8, %rdi
movq -0xca0(%rbp), %rsi
addq $0x8, %rsi
movq -0xbe0(%rbp), %rdx
callq 0x9fbb90
addq $0x8, %rax
movq %rax, -0xc60(%rbp)
movq -0xbc0(%rbp), %rax
movq -0xca0(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0xc64(%rbp)
movq -0xbc0(%rbp), %rax
cmpq -0xbc8(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0xca0(%rbp), %rax
cmpq -0xbd8(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x17f1(%rbp)
je 0x9dd2fe
movq -0xbc0(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0xca0(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x17f1(%rbp)
movb -0x17f1(%rbp), %al
testb $0x1, %al
jne 0x9dd30a
jmp 0x9dd345
movq -0xbc0(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xbc0(%rbp)
movq -0xca0(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xca0(%rbp)
movq -0xc60(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xc60(%rbp)
jmp 0x9dd2a2
jmp 0x9ddcc9
jmp 0x9dd4cf
cmpl $0x2, -0xb80(%rbp)
jne 0x9dd4cd
movq -0xc10(%rbp), %rax
movq -0xc80(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0xcc4(%rbp)
movq -0xc28(%rbp), %rax
movl -0xcc4(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0xcd0(%rbp)
movq -0xcd0(%rbp), %rax
cmpq -0xc30(%rbp), %rax
jbe 0x9dd4cb
movq -0xcd0(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x1800(%rbp)
movq -0xbc0(%rbp), %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x1800(%rbp), %rax
cmpq %rcx, %rax
jne 0x9dd4cb
movq -0xbc0(%rbp), %rdi
addq $0x8, %rdi
movq -0xcd0(%rbp), %rsi
addq $0x8, %rsi
movq -0xbe0(%rbp), %rdx
movq -0xc38(%rbp), %rcx
movq -0xbd8(%rbp), %r8
callq 0x9fbad0
addq $0x8, %rax
movq %rax, -0xc60(%rbp)
movl -0xc8c(%rbp), %eax
subl -0xcc4(%rbp), %eax
subl -0xc3c(%rbp), %eax
movl %eax, -0xc64(%rbp)
movq -0xbc0(%rbp), %rax
cmpq -0xbc8(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0xcd0(%rbp), %rax
cmpq -0xc30(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1801(%rbp)
je 0x9dd47f
movq -0xbc0(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0xcd0(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1801(%rbp)
movb -0x1801(%rbp), %al
testb $0x1, %al
jne 0x9dd48b
jmp 0x9dd4c6
movq -0xbc0(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xbc0(%rbp)
movq -0xcd0(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xcd0(%rbp)
movq -0xc60(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xc60(%rbp)
jmp 0x9dd423
jmp 0x9ddcc9
jmp 0x9dd4cd
jmp 0x9dd4cf
movl -0xc94(%rbp), %eax
cmpl -0xbd0(%rbp), %eax
jbe 0x9dd511
movq -0xca8(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x1808(%rbp)
movq -0xbc0(%rbp), %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x1808(%rbp), %eax
cmpl %ecx, %eax
jne 0x9dd50c
jmp 0x9dd5c6
jmp 0x9dd59a
cmpl $0x2, -0xb80(%rbp)
jne 0x9dd598
movq -0xc18(%rbp), %rax
movq -0xc88(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0xcd4(%rbp)
movq -0xc28(%rbp), %rax
movl -0xcd4(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0xca8(%rbp)
movl -0xcd4(%rbp), %eax
addl -0xc3c(%rbp), %eax
movl %eax, -0xc94(%rbp)
movq -0xca8(%rbp), %rax
cmpq -0xc30(%rbp), %rax
jbe 0x9dd596
movq -0xca8(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x180c(%rbp)
movq -0xbc0(%rbp), %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x180c(%rbp), %eax
cmpl %ecx, %eax
jne 0x9dd596
jmp 0x9dd5c6
jmp 0x9dd598
jmp 0x9dd59a
movq -0xbc0(%rbp), %rax
movq -0xbc8(%rbp), %rcx
subq %rcx, %rax
sarq $0x8, %rax
addq $0x1, %rax
addq -0xbc0(%rbp), %rax
movq %rax, -0xbc0(%rbp)
jmp 0x9dc582
movq -0xbc0(%rbp), %rcx
incq %rcx
movl -0xb94(%rbp), %eax
movq %rcx, -0xa70(%rbp)
movl %eax, -0xa74(%rbp)
movl $0x8, -0xa78(%rbp)
movl -0xa78(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1818(%rbp)
subl $0x4, %eax
ja 0x9dd61b
movq -0x1818(%rbp), %rax
leaq 0x1f90ea(%rip), %rcx # 0xbd66fc
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9dd61d
movq -0xa70(%rbp), %rdi
movl -0xa74(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xa68(%rbp)
jmp 0x9dd6a2
movq -0xa70(%rbp), %rdi
movl -0xa74(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xa68(%rbp)
jmp 0x9dd6a2
movq -0xa70(%rbp), %rdi
movl -0xa74(%rbp), %esi
callq 0x9fb820
movq %rax, -0xa68(%rbp)
jmp 0x9dd6a2
movq -0xa70(%rbp), %rdi
movl -0xa74(%rbp), %esi
callq 0x9fb850
movq %rax, -0xa68(%rbp)
jmp 0x9dd6a2
movq -0xa70(%rbp), %rdi
movl -0xa74(%rbp), %esi
callq 0x9fb880
movq %rax, -0xa68(%rbp)
movq -0xa68(%rbp), %rax
movq %rax, -0xce0(%rbp)
movq -0xbc0(%rbp), %rcx
incq %rcx
movl -0xc40(%rbp), %eax
movq %rcx, -0xa88(%rbp)
movl %eax, -0xa8c(%rbp)
movl $0x8, -0xa90(%rbp)
movl -0xa90(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1820(%rbp)
subl $0x4, %eax
ja 0x9dd705
movq -0x1820(%rbp), %rax
leaq 0x1f9014(%rip), %rcx # 0xbd6710
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9dd707
movq -0xa88(%rbp), %rdi
movl -0xa8c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xa80(%rbp)
jmp 0x9dd78c
movq -0xa88(%rbp), %rdi
movl -0xa8c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xa80(%rbp)
jmp 0x9dd78c
movq -0xa88(%rbp), %rdi
movl -0xa8c(%rbp), %esi
callq 0x9fb820
movq %rax, -0xa80(%rbp)
jmp 0x9dd78c
movq -0xa88(%rbp), %rdi
movl -0xa8c(%rbp), %esi
callq 0x9fb850
movq %rax, -0xa80(%rbp)
jmp 0x9dd78c
movq -0xa88(%rbp), %rdi
movl -0xa8c(%rbp), %esi
callq 0x9fb880
movq %rax, -0xa80(%rbp)
movq -0xa80(%rbp), %rax
movq %rax, -0xce8(%rbp)
movq -0xb90(%rbp), %rax
movq -0xce0(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0xcec(%rbp)
movq -0xbb0(%rbp), %rax
movl -0xcec(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0xcf8(%rbp)
movl -0xc8c(%rbp), %edx
addl $0x1, %edx
movq -0xb90(%rbp), %rax
movq -0xce0(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movl -0xcec(%rbp), %eax
cmpl -0xbd0(%rbp), %eax
jbe 0x9dd92d
movq -0xcf8(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x1828(%rbp)
movq -0xbc0(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x1828(%rbp), %rax
cmpq %rcx, %rax
jne 0x9dd928
movq -0xbc0(%rbp), %rdi
addq $0x9, %rdi
movq -0xcf8(%rbp), %rsi
addq $0x8, %rsi
movq -0xbe0(%rbp), %rdx
callq 0x9fbb90
addq $0x8, %rax
movq %rax, -0xc60(%rbp)
movq -0xbc0(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xbc0(%rbp)
movq -0xbc0(%rbp), %rax
movq -0xcf8(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0xc64(%rbp)
movq -0xbc0(%rbp), %rax
cmpq -0xbc8(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0xcf8(%rbp), %rax
cmpq -0xbd8(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1829(%rbp)
je 0x9dd8dc
movq -0xbc0(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0xcf8(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1829(%rbp)
movb -0x1829(%rbp), %al
testb $0x1, %al
jne 0x9dd8e8
jmp 0x9dd923
movq -0xbc0(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xbc0(%rbp)
movq -0xcf8(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xcf8(%rbp)
movq -0xc60(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xc60(%rbp)
jmp 0x9dd880
jmp 0x9ddcc9
jmp 0x9ddaca
cmpl $0x2, -0xb80(%rbp)
jne 0x9ddac8
movq -0xc10(%rbp), %rax
movq -0xce8(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0xcfc(%rbp)
movq -0xc28(%rbp), %rax
movl -0xcfc(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0xd08(%rbp)
movq -0xd08(%rbp), %rax
cmpq -0xc30(%rbp), %rax
jbe 0x9ddac6
movq -0xd08(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x1838(%rbp)
movq -0xbc0(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x1838(%rbp), %rax
cmpq %rcx, %rax
jne 0x9ddac6
movq -0xbc0(%rbp), %rdi
addq $0x1, %rdi
addq $0x8, %rdi
movq -0xd08(%rbp), %rsi
addq $0x8, %rsi
movq -0xbe0(%rbp), %rdx
movq -0xc38(%rbp), %rcx
movq -0xbd8(%rbp), %r8
callq 0x9fbad0
addq $0x8, %rax
movq %rax, -0xc60(%rbp)
movq -0xbc0(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xbc0(%rbp)
movl -0xc8c(%rbp), %eax
addl $0x1, %eax
subl -0xcfc(%rbp), %eax
subl -0xc3c(%rbp), %eax
movl %eax, -0xc64(%rbp)
movq -0xbc0(%rbp), %rax
cmpq -0xbc8(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0xd08(%rbp), %rax
cmpq -0xc30(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1839(%rbp)
je 0x9dda7a
movq -0xbc0(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0xd08(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1839(%rbp)
movb -0x1839(%rbp), %al
testb $0x1, %al
jne 0x9dda86
jmp 0x9ddac1
movq -0xbc0(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xbc0(%rbp)
movq -0xd08(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xd08(%rbp)
movq -0xc60(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xc60(%rbp)
jmp 0x9dda1e
jmp 0x9ddcc9
jmp 0x9ddac8
jmp 0x9ddaca
cmpl $0x2, -0xb80(%rbp)
jne 0x9ddbde
movl -0xc94(%rbp), %eax
cmpl -0xbd0(%rbp), %eax
jae 0x9ddbde
movq -0xbc0(%rbp), %rdi
addq $0x4, %rdi
movq -0xca8(%rbp), %rsi
addq $0x4, %rsi
movq -0xbe0(%rbp), %rdx
movq -0xc38(%rbp), %rcx
movq -0xbd8(%rbp), %r8
callq 0x9fbad0
addq $0x4, %rax
movq %rax, -0xc60(%rbp)
movl -0xc8c(%rbp), %eax
subl -0xc94(%rbp), %eax
movl %eax, -0xc64(%rbp)
movq -0xbc0(%rbp), %rax
cmpq -0xbc8(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0xca8(%rbp), %rax
cmpq -0xc30(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x183a(%rbp)
je 0x9ddb92
movq -0xbc0(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0xca8(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x183a(%rbp)
movb -0x183a(%rbp), %al
testb $0x1, %al
jne 0x9ddb9e
jmp 0x9ddbd9
movq -0xbc0(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xbc0(%rbp)
movq -0xca8(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xca8(%rbp)
movq -0xc60(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xc60(%rbp)
jmp 0x9ddb36
jmp 0x9ddcc7
movq -0xbc0(%rbp), %rdi
addq $0x4, %rdi
movq -0xca8(%rbp), %rsi
addq $0x4, %rsi
movq -0xbe0(%rbp), %rdx
callq 0x9fbb90
addq $0x4, %rax
movq %rax, -0xc60(%rbp)
movq -0xbc0(%rbp), %rax
movq -0xca8(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0xc64(%rbp)
movq -0xbc0(%rbp), %rax
cmpq -0xbc8(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0xca8(%rbp), %rax
cmpq -0xbd8(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x183b(%rbp)
je 0x9ddc7e
movq -0xbc0(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0xca8(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x183b(%rbp)
movb -0x183b(%rbp), %al
testb $0x1, %al
jne 0x9ddc8a
jmp 0x9ddcc5
movq -0xbc0(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xbc0(%rbp)
movq -0xca8(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xca8(%rbp)
movq -0xc60(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xc60(%rbp)
jmp 0x9ddc22
jmp 0x9ddcc7
jmp 0x9ddcc9
movl -0xbec(%rbp), %eax
movl %eax, -0xbf0(%rbp)
movl -0xc64(%rbp), %eax
movl %eax, -0xbec(%rbp)
movq -0xb60(%rbp), %r8
movq -0xbc0(%rbp), %rdi
movq -0xbc8(%rbp), %rax
subq %rax, %rdi
movq -0xbc8(%rbp), %rsi
movq -0xbe0(%rbp), %rdx
movl -0xc64(%rbp), %ecx
addl $0x2, %ecx
movq -0xc60(%rbp), %rax
subq $0x3, %rax
movq %r8, -0x8c8(%rbp)
movq %rdi, -0x8d0(%rbp)
movq %rsi, -0x8d8(%rbp)
movq %rdx, -0x8e0(%rbp)
movl %ecx, -0x8e4(%rbp)
movq %rax, -0x8f0(%rbp)
movq -0x8e0(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x8f8(%rbp)
movq -0x8d8(%rbp), %rax
addq -0x8d0(%rbp), %rax
movq %rax, -0x900(%rbp)
movq -0x900(%rbp), %rax
cmpq -0x8f8(%rbp), %rax
ja 0x9ddf60
movq -0x8c8(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x8d8(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x8d0(%rbp)
jbe 0x9ddf5e
movq -0x8c8(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x8d8(%rbp), %rcx
addq $0x10, %rcx
movq -0x8d0(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x108(%rbp)
movq %rcx, -0x110(%rbp)
movq %rax, -0x118(%rbp)
movl $0x0, -0x11c(%rbp)
movq -0x108(%rbp), %rax
movq -0x110(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x128(%rbp)
movq -0x110(%rbp), %rax
movq %rax, -0x130(%rbp)
movq -0x108(%rbp), %rax
movq %rax, -0x138(%rbp)
movq -0x138(%rbp), %rax
addq -0x118(%rbp), %rax
movq %rax, -0x140(%rbp)
cmpl $0x1, -0x11c(%rbp)
jne 0x9dde92
cmpq $0x10, -0x128(%rbp)
jge 0x9dde92
jmp 0x9dde46
movq -0x138(%rbp), %rdi
movq -0x130(%rbp), %rsi
callq 0x9fbf50
movq -0x138(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x138(%rbp)
movq -0x130(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x130(%rbp)
movq -0x138(%rbp), %rax
cmpq -0x140(%rbp), %rax
jb 0x9dde46
jmp 0x9ddf5c
movq -0x138(%rbp), %rdi
movq -0x130(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x118(%rbp), %rax
jl 0x9ddeb8
jmp 0x9ddf5c
movq -0x138(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x138(%rbp)
movq -0x130(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x130(%rbp)
movq -0x138(%rbp), %rdi
movq -0x130(%rbp), %rsi
callq 0x9fbd80
movq -0x138(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x138(%rbp)
movq -0x130(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x130(%rbp)
movq -0x138(%rbp), %rdi
movq -0x130(%rbp), %rsi
callq 0x9fbd80
movq -0x138(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x138(%rbp)
movq -0x130(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x130(%rbp)
movq -0x138(%rbp), %rax
cmpq -0x140(%rbp), %rax
jb 0x9ddedc
jmp 0x9ddf5c
jmp 0x9ddf5e
jmp 0x9ddf85
movq -0x8c8(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x8d8(%rbp), %rsi
movq -0x900(%rbp), %rdx
movq -0x8f8(%rbp), %rcx
callq 0x9fbdb0
movq -0x8d0(%rbp), %rcx
movq -0x8c8(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x8d0(%rbp) # imm = 0xFFFF
jbe 0x9ddfde
movq -0x8c8(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x8c8(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x8c8(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x8c8(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x8d0(%rbp), %rax
movw %ax, %cx
movq -0x8c8(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x8e4(%rbp), %ecx
addl $0x1, %ecx
movq -0x8c8(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x8f0(%rbp) # imm = 0xFFFF
jbe 0x9de050
movq -0x8c8(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x8c8(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x8c8(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x8c8(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x8f0(%rbp), %rax
movw %ax, %cx
movq -0x8c8(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x8c8(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
movq -0xc60(%rbp), %rax
addq -0xbc0(%rbp), %rax
movq %rax, -0xbc0(%rbp)
movq -0xbc0(%rbp), %rax
movq %rax, -0xbc8(%rbp)
movq -0xbc0(%rbp), %rax
cmpq -0xbe8(%rbp), %rax
ja 0x9df324
movl -0xc8c(%rbp), %eax
addl $0x2, %eax
movl %eax, -0xd0c(%rbp)
movl -0xd0c(%rbp), %eax
movl %eax, %ecx
movl %ecx, -0x1854(%rbp)
movq -0xb90(%rbp), %rcx
movq %rcx, -0x1850(%rbp)
movq -0xbb0(%rbp), %rcx
addq %rax, %rcx
movl -0xb94(%rbp), %eax
movq %rcx, -0xaa0(%rbp)
movl %eax, -0xaa4(%rbp)
movl $0x8, -0xaa8(%rbp)
movl -0xaa8(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1848(%rbp)
subl $0x4, %eax
ja 0x9de133
movq -0x1848(%rbp), %rax
leaq 0x1f85fa(%rip), %rcx # 0xbd6724
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9de135
movq -0xaa0(%rbp), %rdi
movl -0xaa4(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xa98(%rbp)
jmp 0x9de1ba
movq -0xaa0(%rbp), %rdi
movl -0xaa4(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xa98(%rbp)
jmp 0x9de1ba
movq -0xaa0(%rbp), %rdi
movl -0xaa4(%rbp), %esi
callq 0x9fb820
movq %rax, -0xa98(%rbp)
jmp 0x9de1ba
movq -0xaa0(%rbp), %rdi
movl -0xaa4(%rbp), %esi
callq 0x9fb850
movq %rax, -0xa98(%rbp)
jmp 0x9de1ba
movq -0xaa0(%rbp), %rdi
movl -0xaa4(%rbp), %esi
callq 0x9fb880
movq %rax, -0xa98(%rbp)
movq -0x1850(%rbp), %rax
movl -0x1854(%rbp), %edx
movq -0xa98(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0xbc0(%rbp), %rcx
addq $-0x2, %rcx
movl -0xbb0(%rbp), %edx
movl %ecx, %eax
subl %edx, %eax
movl %eax, -0x186c(%rbp)
movq -0xb90(%rbp), %rax
movq %rax, -0x1868(%rbp)
movl -0xb94(%rbp), %eax
movq %rcx, -0xab8(%rbp)
movl %eax, -0xabc(%rbp)
movl $0x8, -0xac0(%rbp)
movl -0xac0(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1860(%rbp)
subl $0x4, %eax
ja 0x9de245
movq -0x1860(%rbp), %rax
leaq 0x1f84fc(%rip), %rcx # 0xbd6738
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9de247
movq -0xab8(%rbp), %rdi
movl -0xabc(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xab0(%rbp)
jmp 0x9de2cc
movq -0xab8(%rbp), %rdi
movl -0xabc(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xab0(%rbp)
jmp 0x9de2cc
movq -0xab8(%rbp), %rdi
movl -0xabc(%rbp), %esi
callq 0x9fb820
movq %rax, -0xab0(%rbp)
jmp 0x9de2cc
movq -0xab8(%rbp), %rdi
movl -0xabc(%rbp), %esi
callq 0x9fb850
movq %rax, -0xab0(%rbp)
jmp 0x9de2cc
movq -0xab8(%rbp), %rdi
movl -0xabc(%rbp), %esi
callq 0x9fb880
movq %rax, -0xab0(%rbp)
movq -0x1868(%rbp), %rax
movl -0x186c(%rbp), %edx
movq -0xab0(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movl -0xd0c(%rbp), %eax
movl %eax, %ecx
movl %ecx, -0x1884(%rbp)
movq -0xba0(%rbp), %rcx
movq %rcx, -0x1880(%rbp)
movq -0xbb0(%rbp), %rdx
addq %rax, %rdx
movl -0xba4(%rbp), %ecx
movl -0xb7c(%rbp), %eax
movq %rdx, -0xad0(%rbp)
movl %ecx, -0xad4(%rbp)
movl %eax, -0xad8(%rbp)
movl -0xad8(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1878(%rbp)
subl $0x4, %eax
ja 0x9de356
movq -0x1878(%rbp), %rax
leaq 0x1f83ff(%rip), %rcx # 0xbd674c
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9de358
movq -0xad0(%rbp), %rdi
movl -0xad4(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xac8(%rbp)
jmp 0x9de3dd
movq -0xad0(%rbp), %rdi
movl -0xad4(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xac8(%rbp)
jmp 0x9de3dd
movq -0xad0(%rbp), %rdi
movl -0xad4(%rbp), %esi
callq 0x9fb820
movq %rax, -0xac8(%rbp)
jmp 0x9de3dd
movq -0xad0(%rbp), %rdi
movl -0xad4(%rbp), %esi
callq 0x9fb850
movq %rax, -0xac8(%rbp)
jmp 0x9de3dd
movq -0xad0(%rbp), %rdi
movl -0xad4(%rbp), %esi
callq 0x9fb880
movq %rax, -0xac8(%rbp)
movq -0x1880(%rbp), %rax
movl -0x1884(%rbp), %edx
movq -0xac8(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0xbc0(%rbp), %rdx
decq %rdx
movl -0xbb0(%rbp), %ecx
movl %edx, %eax
subl %ecx, %eax
movl %eax, -0x189c(%rbp)
movq -0xba0(%rbp), %rax
movq %rax, -0x1898(%rbp)
movl -0xba4(%rbp), %ecx
movl -0xb7c(%rbp), %eax
movq %rdx, -0xae8(%rbp)
movl %ecx, -0xaec(%rbp)
movl %eax, -0xaf0(%rbp)
movl -0xaf0(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1890(%rbp)
subl $0x4, %eax
ja 0x9de469
movq -0x1890(%rbp), %rax
leaq 0x1f8300(%rip), %rcx # 0xbd6760
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9de46b
movq -0xae8(%rbp), %rdi
movl -0xaec(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xae0(%rbp)
jmp 0x9de4f0
movq -0xae8(%rbp), %rdi
movl -0xaec(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xae0(%rbp)
jmp 0x9de4f0
movq -0xae8(%rbp), %rdi
movl -0xaec(%rbp), %esi
callq 0x9fb820
movq %rax, -0xae0(%rbp)
jmp 0x9de4f0
movq -0xae8(%rbp), %rdi
movl -0xaec(%rbp), %esi
callq 0x9fb850
movq %rax, -0xae0(%rbp)
jmp 0x9de4f0
movq -0xae8(%rbp), %rdi
movl -0xaec(%rbp), %esi
callq 0x9fb880
movq %rax, -0xae0(%rbp)
movq -0x1898(%rbp), %rax
movl -0x189c(%rbp), %edx
movq -0xae0(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
cmpl $0x2, -0xb80(%rbp)
jne 0x9dec5f
jmp 0x9de516
movq -0xbc0(%rbp), %rax
cmpq -0xbe8(%rbp), %rax
ja 0x9dec5d
movq -0xbc0(%rbp), %rax
movq -0xbb0(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0xd10(%rbp)
movl -0xd10(%rbp), %eax
subl -0xbf0(%rbp), %eax
movl %eax, -0xd14(%rbp)
cmpl $0x2, -0xb80(%rbp)
jne 0x9de593
movl -0xd14(%rbp), %eax
cmpl -0xbd0(%rbp), %eax
jae 0x9de593
movq -0xc28(%rbp), %rax
movl -0xd14(%rbp), %ecx
addq %rcx, %rax
movl -0xc3c(%rbp), %ecx
movl %ecx, %edx
xorl %ecx, %ecx
subq %rdx, %rcx
addq %rcx, %rax
movq %rax, -0x18a8(%rbp)
jmp 0x9de5aa
movq -0xbb0(%rbp), %rax
movl -0xd14(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x18a8(%rbp)
movq -0x18a8(%rbp), %rax
movq %rax, -0xd20(%rbp)
movl -0xbd0(%rbp), %eax
subl $0x1, %eax
subl -0xd14(%rbp), %eax
cmpl $0x3, %eax
jb 0x9dec5b
movq -0xd20(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x18ac(%rbp)
movq -0xbc0(%rbp), %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x18ac(%rbp), %eax
cmpl %ecx, %eax
jne 0x9dec5b
movl -0xd14(%rbp), %eax
cmpl -0xbd0(%rbp), %eax
jae 0x9de61c
movq -0xc38(%rbp), %rax
movq %rax, -0x18b8(%rbp)
jmp 0x9de62a
movq -0xbe0(%rbp), %rax
movq %rax, -0x18b8(%rbp)
movq -0x18b8(%rbp), %rax
movq %rax, -0xd28(%rbp)
movq -0xbc0(%rbp), %rdi
addq $0x4, %rdi
movq -0xd20(%rbp), %rsi
addq $0x4, %rsi
movq -0xbe0(%rbp), %rdx
movq -0xd28(%rbp), %rcx
movq -0xbd8(%rbp), %r8
callq 0x9fbad0
addq $0x4, %rax
movq %rax, -0xd30(%rbp)
movl -0xbf0(%rbp), %eax
movl %eax, -0xd34(%rbp)
movl -0xbec(%rbp), %eax
movl %eax, -0xbf0(%rbp)
movl -0xd34(%rbp), %eax
movl %eax, -0xbec(%rbp)
movq -0xb60(%rbp), %rsi
movq -0xbc8(%rbp), %rdx
movq -0xbe0(%rbp), %rcx
movq -0xd30(%rbp), %rax
subq $0x3, %rax
movq %rsi, -0x948(%rbp)
movq $0x0, -0x950(%rbp)
movq %rdx, -0x958(%rbp)
movq %rcx, -0x960(%rbp)
movl $0x0, -0x964(%rbp)
movq %rax, -0x970(%rbp)
movq -0x960(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x978(%rbp)
movq -0x958(%rbp), %rax
addq -0x950(%rbp), %rax
movq %rax, -0x980(%rbp)
movq -0x980(%rbp), %rax
cmpq -0x978(%rbp), %rax
ja 0x9de904
movq -0x948(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x958(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x950(%rbp)
jbe 0x9de902
movq -0x948(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x958(%rbp), %rcx
addq $0x10, %rcx
movq -0x950(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x88(%rbp)
movq %rcx, -0x90(%rbp)
movq %rax, -0x98(%rbp)
movl $0x0, -0x9c(%rbp)
movq -0x88(%rbp), %rax
movq -0x90(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0xa8(%rbp)
movq -0x90(%rbp), %rax
movq %rax, -0xb0(%rbp)
movq -0x88(%rbp), %rax
movq %rax, -0xb8(%rbp)
movq -0xb8(%rbp), %rax
addq -0x98(%rbp), %rax
movq %rax, -0xc0(%rbp)
cmpl $0x1, -0x9c(%rbp)
jne 0x9de836
cmpq $0x10, -0xa8(%rbp)
jge 0x9de836
jmp 0x9de7ea
movq -0xb8(%rbp), %rdi
movq -0xb0(%rbp), %rsi
callq 0x9fbf50
movq -0xb8(%rbp), %rax
addq $0x8, %rax
movq %rax, -0xb8(%rbp)
movq -0xb0(%rbp), %rax
addq $0x8, %rax
movq %rax, -0xb0(%rbp)
movq -0xb8(%rbp), %rax
cmpq -0xc0(%rbp), %rax
jb 0x9de7ea
jmp 0x9de900
movq -0xb8(%rbp), %rdi
movq -0xb0(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x98(%rbp), %rax
jl 0x9de85c
jmp 0x9de900
movq -0xb8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0xb8(%rbp)
movq -0xb0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0xb0(%rbp)
movq -0xb8(%rbp), %rdi
movq -0xb0(%rbp), %rsi
callq 0x9fbd80
movq -0xb8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0xb8(%rbp)
movq -0xb0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0xb0(%rbp)
movq -0xb8(%rbp), %rdi
movq -0xb0(%rbp), %rsi
callq 0x9fbd80
movq -0xb8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0xb8(%rbp)
movq -0xb0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0xb0(%rbp)
movq -0xb8(%rbp), %rax
cmpq -0xc0(%rbp), %rax
jb 0x9de880
jmp 0x9de900
jmp 0x9de902
jmp 0x9de929
movq -0x948(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x958(%rbp), %rsi
movq -0x980(%rbp), %rdx
movq -0x978(%rbp), %rcx
callq 0x9fbdb0
movq -0x950(%rbp), %rcx
movq -0x948(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x950(%rbp) # imm = 0xFFFF
jbe 0x9de982
movq -0x948(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x948(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x948(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x948(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x950(%rbp), %rax
movw %ax, %cx
movq -0x948(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x964(%rbp), %ecx
addl $0x1, %ecx
movq -0x948(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x970(%rbp) # imm = 0xFFFF
jbe 0x9de9f4
movq -0x948(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x948(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x948(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x948(%rbp), %rax
movl %ecx, 0x4c(%rax)
movw -0x970(%rbp), %cx
movq -0x948(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x948(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
movl -0xd10(%rbp), %eax
movl %eax, -0x18cc(%rbp)
movq -0xba0(%rbp), %rax
movq %rax, -0x18c8(%rbp)
movq -0xbc0(%rbp), %rdx
movl -0xba4(%rbp), %ecx
movl -0xb7c(%rbp), %eax
movq %rdx, -0xb00(%rbp)
movl %ecx, -0xb04(%rbp)
movl %eax, -0xb08(%rbp)
movl -0xb08(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x18c0(%rbp)
subl $0x4, %eax
ja 0x9dea8b
movq -0x18c0(%rbp), %rax
leaq 0x1f7d1a(%rip), %rcx # 0xbd679c
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9dea8d
movq -0xb00(%rbp), %rdi
movl -0xb04(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xaf8(%rbp)
jmp 0x9deb12
movq -0xb00(%rbp), %rdi
movl -0xb04(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xaf8(%rbp)
jmp 0x9deb12
movq -0xb00(%rbp), %rdi
movl -0xb04(%rbp), %esi
callq 0x9fb820
movq %rax, -0xaf8(%rbp)
jmp 0x9deb12
movq -0xb00(%rbp), %rdi
movl -0xb04(%rbp), %esi
callq 0x9fb850
movq %rax, -0xaf8(%rbp)
jmp 0x9deb12
movq -0xb00(%rbp), %rdi
movl -0xb04(%rbp), %esi
callq 0x9fb880
movq %rax, -0xaf8(%rbp)
movq -0x18c8(%rbp), %rax
movl -0x18cc(%rbp), %edx
movq -0xaf8(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movl -0xd10(%rbp), %eax
movl %eax, -0x18e4(%rbp)
movq -0xb90(%rbp), %rax
movq %rax, -0x18e0(%rbp)
movq -0xbc0(%rbp), %rcx
movl -0xb94(%rbp), %eax
movq %rcx, -0xb18(%rbp)
movl %eax, -0xb1c(%rbp)
movl $0x8, -0xb20(%rbp)
movl -0xb20(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x18d8(%rbp)
subl $0x4, %eax
ja 0x9deb95
movq -0x18d8(%rbp), %rax
leaq 0x1f7c24(%rip), %rcx # 0xbd67b0
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9deb97
movq -0xb18(%rbp), %rdi
movl -0xb1c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xb10(%rbp)
jmp 0x9dec1c
movq -0xb18(%rbp), %rdi
movl -0xb1c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xb10(%rbp)
jmp 0x9dec1c
movq -0xb18(%rbp), %rdi
movl -0xb1c(%rbp), %esi
callq 0x9fb820
movq %rax, -0xb10(%rbp)
jmp 0x9dec1c
movq -0xb18(%rbp), %rdi
movl -0xb1c(%rbp), %esi
callq 0x9fb850
movq %rax, -0xb10(%rbp)
jmp 0x9dec1c
movq -0xb18(%rbp), %rdi
movl -0xb1c(%rbp), %esi
callq 0x9fb880
movq %rax, -0xb10(%rbp)
movq -0x18e0(%rbp), %rax
movl -0x18e4(%rbp), %edx
movq -0xb10(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0xd30(%rbp), %rax
addq -0xbc0(%rbp), %rax
movq %rax, -0xbc0(%rbp)
movq -0xbc0(%rbp), %rax
movq %rax, -0xbc8(%rbp)
jmp 0x9de516
jmp 0x9dec5d
jmp 0x9dec5f
cmpl $0x0, -0xb80(%rbp)
jne 0x9df322
jmp 0x9dec6e
movq -0xbc0(%rbp), %rcx
xorl %eax, %eax
cmpq -0xbe8(%rbp), %rcx
movb %al, -0x18e5(%rbp)
ja 0x9decf0
cmpl $0x0, -0xbf0(%rbp)
seta %al
andb $0x1, %al
movzbl %al, %eax
movl %eax, -0x18ec(%rbp)
movq -0xbc0(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x18f0(%rbp)
movq -0xbc0(%rbp), %rdi
movl -0xbf0(%rbp), %eax
movl %eax, %ecx
xorl %eax, %eax
subq %rcx, %rax
addq %rax, %rdi
callq 0x9fb8d0
movl -0x18f0(%rbp), %ecx
movl %eax, %edx
movl -0x18ec(%rbp), %eax
cmpl %edx, %ecx
sete %cl
andb $0x1, %cl
movzbl %cl, %ecx
andl %ecx, %eax
cmpl $0x0, %eax
setne %al
movb %al, -0x18e5(%rbp)
movb -0x18e5(%rbp), %al
testb $0x1, %al
jne 0x9decff
jmp 0x9df320
movq -0xbc0(%rbp), %rdi
addq $0x4, %rdi
movl -0xbf0(%rbp), %eax
movq %rdi, %rsi
subq %rax, %rsi
movq -0xbe0(%rbp), %rdx
callq 0x9fbb90
addq $0x4, %rax
movq %rax, -0xd40(%rbp)
movl -0xbf0(%rbp), %eax
movl %eax, -0xd44(%rbp)
movl -0xbec(%rbp), %eax
movl %eax, -0xbf0(%rbp)
movl -0xd44(%rbp), %eax
movl %eax, -0xbec(%rbp)
movq -0xbc0(%rbp), %rdx
movl -0xbb0(%rbp), %ecx
movl %edx, %eax
subl %ecx, %eax
movl %eax, -0x1904(%rbp)
movq -0xba0(%rbp), %rax
movq %rax, -0x1900(%rbp)
movl -0xba4(%rbp), %ecx
movl -0xb7c(%rbp), %eax
movq %rdx, -0xb30(%rbp)
movl %ecx, -0xb34(%rbp)
movl %eax, -0xb38(%rbp)
movl -0xb38(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x18f8(%rbp)
subl $0x4, %eax
ja 0x9dedc3
movq -0x18f8(%rbp), %rax
leaq 0x1f79ba(%rip), %rcx # 0xbd6774
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9dedc5
movq -0xb30(%rbp), %rdi
movl -0xb34(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xb28(%rbp)
jmp 0x9dee4a
movq -0xb30(%rbp), %rdi
movl -0xb34(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xb28(%rbp)
jmp 0x9dee4a
movq -0xb30(%rbp), %rdi
movl -0xb34(%rbp), %esi
callq 0x9fb820
movq %rax, -0xb28(%rbp)
jmp 0x9dee4a
movq -0xb30(%rbp), %rdi
movl -0xb34(%rbp), %esi
callq 0x9fb850
movq %rax, -0xb28(%rbp)
jmp 0x9dee4a
movq -0xb30(%rbp), %rdi
movl -0xb34(%rbp), %esi
callq 0x9fb880
movq %rax, -0xb28(%rbp)
movq -0x1900(%rbp), %rax
movl -0x1904(%rbp), %edx
movq -0xb28(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0xbc0(%rbp), %rcx
movl -0xbb0(%rbp), %edx
movl %ecx, %eax
subl %edx, %eax
movl %eax, -0x191c(%rbp)
movq -0xb90(%rbp), %rax
movq %rax, -0x1918(%rbp)
movl -0xb94(%rbp), %eax
movq %rcx, -0xb48(%rbp)
movl %eax, -0xb4c(%rbp)
movl $0x8, -0xb50(%rbp)
movl -0xb50(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1910(%rbp)
subl $0x4, %eax
ja 0x9deed1
movq -0x1910(%rbp), %rax
leaq 0x1f78c0(%rip), %rcx # 0xbd6788
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9deed3
movq -0xb48(%rbp), %rdi
movl -0xb4c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xb40(%rbp)
jmp 0x9def58
movq -0xb48(%rbp), %rdi
movl -0xb4c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xb40(%rbp)
jmp 0x9def58
movq -0xb48(%rbp), %rdi
movl -0xb4c(%rbp), %esi
callq 0x9fb820
movq %rax, -0xb40(%rbp)
jmp 0x9def58
movq -0xb48(%rbp), %rdi
movl -0xb4c(%rbp), %esi
callq 0x9fb850
movq %rax, -0xb40(%rbp)
jmp 0x9def58
movq -0xb48(%rbp), %rdi
movl -0xb4c(%rbp), %esi
callq 0x9fb880
movq %rax, -0xb40(%rbp)
movq -0x1918(%rbp), %rax
movl -0x191c(%rbp), %edx
movq -0xb40(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0xb60(%rbp), %rsi
movq -0xbc8(%rbp), %rdx
movq -0xbe0(%rbp), %rcx
movq -0xd40(%rbp), %rax
subq $0x3, %rax
movq %rsi, -0x908(%rbp)
movq $0x0, -0x910(%rbp)
movq %rdx, -0x918(%rbp)
movq %rcx, -0x920(%rbp)
movl $0x0, -0x924(%rbp)
movq %rax, -0x930(%rbp)
movq -0x920(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x938(%rbp)
movq -0x918(%rbp), %rax
addq -0x910(%rbp), %rax
movq %rax, -0x940(%rbp)
movq -0x940(%rbp), %rax
cmpq -0x938(%rbp), %rax
ja 0x9df1dc
movq -0x908(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x918(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x910(%rbp)
jbe 0x9df1da
movq -0x908(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x918(%rbp), %rcx
addq $0x10, %rcx
movq -0x910(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0xc8(%rbp)
movq %rcx, -0xd0(%rbp)
movq %rax, -0xd8(%rbp)
movl $0x0, -0xdc(%rbp)
movq -0xc8(%rbp), %rax
movq -0xd0(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0xe8(%rbp)
movq -0xd0(%rbp), %rax
movq %rax, -0xf0(%rbp)
movq -0xc8(%rbp), %rax
movq %rax, -0xf8(%rbp)
movq -0xf8(%rbp), %rax
addq -0xd8(%rbp), %rax
movq %rax, -0x100(%rbp)
cmpl $0x1, -0xdc(%rbp)
jne 0x9df10e
cmpq $0x10, -0xe8(%rbp)
jge 0x9df10e
jmp 0x9df0c2
movq -0xf8(%rbp), %rdi
movq -0xf0(%rbp), %rsi
callq 0x9fbf50
movq -0xf8(%rbp), %rax
addq $0x8, %rax
movq %rax, -0xf8(%rbp)
movq -0xf0(%rbp), %rax
addq $0x8, %rax
movq %rax, -0xf0(%rbp)
movq -0xf8(%rbp), %rax
cmpq -0x100(%rbp), %rax
jb 0x9df0c2
jmp 0x9df1d8
movq -0xf8(%rbp), %rdi
movq -0xf0(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0xd8(%rbp), %rax
jl 0x9df134
jmp 0x9df1d8
movq -0xf8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0xf8(%rbp)
movq -0xf0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0xf0(%rbp)
movq -0xf8(%rbp), %rdi
movq -0xf0(%rbp), %rsi
callq 0x9fbd80
movq -0xf8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0xf8(%rbp)
movq -0xf0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0xf0(%rbp)
movq -0xf8(%rbp), %rdi
movq -0xf0(%rbp), %rsi
callq 0x9fbd80
movq -0xf8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0xf8(%rbp)
movq -0xf0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0xf0(%rbp)
movq -0xf8(%rbp), %rax
cmpq -0x100(%rbp), %rax
jb 0x9df158
jmp 0x9df1d8
jmp 0x9df1da
jmp 0x9df201
movq -0x908(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x918(%rbp), %rsi
movq -0x940(%rbp), %rdx
movq -0x938(%rbp), %rcx
callq 0x9fbdb0
movq -0x910(%rbp), %rcx
movq -0x908(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x910(%rbp) # imm = 0xFFFF
jbe 0x9df25a
movq -0x908(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x908(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x908(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x908(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x910(%rbp), %rax
movw %ax, %cx
movq -0x908(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x924(%rbp), %ecx
addl $0x1, %ecx
movq -0x908(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x930(%rbp) # imm = 0xFFFF
jbe 0x9df2cc
movq -0x908(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x908(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x908(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x908(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x930(%rbp), %rax
movw %ax, %cx
movq -0x908(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x908(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
movq -0xd40(%rbp), %rax
addq -0xbc0(%rbp), %rax
movq %rax, -0xbc0(%rbp)
movq -0xbc0(%rbp), %rax
movq %rax, -0xbc8(%rbp)
jmp 0x9dec6e
jmp 0x9df322
jmp 0x9df324
jmp 0x9dc582
cmpl $0x0, -0xbec(%rbp)
je 0x9df340
movl -0xbec(%rbp), %eax
movl %eax, -0x1920(%rbp)
jmp 0x9df34c
movl -0xbf4(%rbp), %eax
movl %eax, -0x1920(%rbp)
movl -0x1920(%rbp), %ecx
movq -0xb68(%rbp), %rax
movl %ecx, (%rax)
cmpl $0x0, -0xbf0(%rbp)
je 0x9df372
movl -0xbf0(%rbp), %eax
movl %eax, -0x1924(%rbp)
jmp 0x9df37e
movl -0xbf4(%rbp), %eax
movl %eax, -0x1924(%rbp)
movl -0x1924(%rbp), %ecx
movq -0xb68(%rbp), %rax
movl %ecx, 0x4(%rax)
movq -0xbe0(%rbp), %rax
movq -0xbc8(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x1728(%rbp)
jmp 0x9e8fd0
movq -0x1730(%rbp), %rdi
movq -0x1738(%rbp), %rsi
movq -0x1740(%rbp), %rdx
movq -0x1748(%rbp), %rcx
movq -0x1750(%rbp), %rax
movq %rdi, -0xea0(%rbp)
movq %rsi, -0xea8(%rbp)
movq %rdx, -0xeb0(%rbp)
movq %rcx, -0xeb8(%rbp)
movq %rax, -0xec0(%rbp)
movl $0x5, -0xec4(%rbp)
movl $0x0, -0xec8(%rbp)
movq -0xea0(%rbp), %rax
addq $0xf0, %rax
movq %rax, -0xed0(%rbp)
movq -0xea0(%rbp), %rax
movq 0x60(%rax), %rax
movq %rax, -0xed8(%rbp)
movq -0xed0(%rbp), %rax
movl 0x8(%rax), %eax
movl %eax, -0xedc(%rbp)
movq -0xea0(%rbp), %rax
movq 0x70(%rax), %rax
movq %rax, -0xee8(%rbp)
movq -0xed0(%rbp), %rax
movl 0x4(%rax), %eax
movl %eax, -0xeec(%rbp)
movq -0xea0(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, -0xef8(%rbp)
movq -0xeb8(%rbp), %rax
movq %rax, -0xf00(%rbp)
movq -0xf00(%rbp), %rax
movq %rax, -0xf08(%rbp)
movq -0xf00(%rbp), %rax
movq %rax, -0xf10(%rbp)
movq -0xf00(%rbp), %rax
movq -0xef8(%rbp), %rcx
subq %rcx, %rax
addq -0xec0(%rbp), %rax
movl %eax, -0xf14(%rbp)
movq -0xea0(%rbp), %rdi
movl -0xf14(%rbp), %esi
movq -0xed0(%rbp), %rax
movl (%rax), %edx
callq 0x9fba50
movl %eax, -0xf18(%rbp)
movq -0xef8(%rbp), %rax
movl -0xf18(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0xf20(%rbp)
movq -0xf00(%rbp), %rax
addq -0xec0(%rbp), %rax
movq %rax, -0xf28(%rbp)
movq -0xf28(%rbp), %rax
addq $-0x8, %rax
movq %rax, -0xf30(%rbp)
movq -0xeb0(%rbp), %rax
movl (%rax), %eax
movl %eax, -0xf34(%rbp)
movq -0xeb0(%rbp), %rax
movl 0x4(%rax), %eax
movl %eax, -0xf38(%rbp)
movl $0x0, -0xf3c(%rbp)
movq -0xea0(%rbp), %rax
movq 0xe8(%rax), %rax
movq %rax, -0xf48(%rbp)
cmpl $0x2, -0xec8(%rbp)
jne 0x9df573
movq -0xf48(%rbp), %rax
addq $0xf0, %rax
movq %rax, -0x1930(%rbp)
jmp 0x9df57e
xorl %eax, %eax
movq %rax, -0x1930(%rbp)
jmp 0x9df57e
movq -0x1930(%rbp), %rax
movq %rax, -0xf50(%rbp)
cmpl $0x2, -0xec8(%rbp)
jne 0x9df5a9
movq -0xf48(%rbp), %rax
movq 0x60(%rax), %rax
movq %rax, -0x1938(%rbp)
jmp 0x9df5b4
xorl %eax, %eax
movq %rax, -0x1938(%rbp)
jmp 0x9df5b4
movq -0x1938(%rbp), %rax
movq %rax, -0xf58(%rbp)
cmpl $0x2, -0xec8(%rbp)
jne 0x9df5df
movq -0xf48(%rbp), %rax
movq 0x70(%rax), %rax
movq %rax, -0x1940(%rbp)
jmp 0x9df5ea
xorl %eax, %eax
movq %rax, -0x1940(%rbp)
jmp 0x9df5ea
movq -0x1940(%rbp), %rax
movq %rax, -0xf60(%rbp)
cmpl $0x2, -0xec8(%rbp)
jne 0x9df613
movq -0xf48(%rbp), %rax
movl 0x18(%rax), %eax
movl %eax, -0x1944(%rbp)
jmp 0x9df61d
xorl %eax, %eax
movl %eax, -0x1944(%rbp)
jmp 0x9df61d
movl -0x1944(%rbp), %eax
movl %eax, -0xf64(%rbp)
cmpl $0x2, -0xec8(%rbp)
jne 0x9df646
movq -0xf48(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, -0x1950(%rbp)
jmp 0x9df651
xorl %eax, %eax
movq %rax, -0x1950(%rbp)
jmp 0x9df651
movq -0x1950(%rbp), %rax
movq %rax, -0xf70(%rbp)
cmpl $0x2, -0xec8(%rbp)
jne 0x9df681
movq -0xf70(%rbp), %rax
movl -0xf64(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1958(%rbp)
jmp 0x9df68c
xorl %eax, %eax
movq %rax, -0x1958(%rbp)
jmp 0x9df68c
movq -0x1958(%rbp), %rax
movq %rax, -0xf78(%rbp)
cmpl $0x2, -0xec8(%rbp)
jne 0x9df6b6
movq -0xf48(%rbp), %rax
movq (%rax), %rax
movq %rax, -0x1960(%rbp)
jmp 0x9df6c1
xorl %eax, %eax
movq %rax, -0x1960(%rbp)
jmp 0x9df6c1
movq -0x1960(%rbp), %rax
movq %rax, -0xf80(%rbp)
cmpl $0x2, -0xec8(%rbp)
jne 0x9df6f9
movl -0xf18(%rbp), %eax
movq -0xf80(%rbp), %rcx
movq -0xf70(%rbp), %rdx
subq %rdx, %rcx
subl %ecx, %eax
movl %eax, -0x1964(%rbp)
jmp 0x9df703
xorl %eax, %eax
movl %eax, -0x1964(%rbp)
jmp 0x9df703
movl -0x1964(%rbp), %eax
movl %eax, -0xf84(%rbp)
cmpl $0x2, -0xec8(%rbp)
jne 0x9df72a
movq -0xf50(%rbp), %rax
movl 0x8(%rax), %eax
movl %eax, -0x1968(%rbp)
jmp 0x9df736
movl -0xedc(%rbp), %eax
movl %eax, -0x1968(%rbp)
movl -0x1968(%rbp), %eax
movl %eax, -0xf88(%rbp)
cmpl $0x2, -0xec8(%rbp)
jne 0x9df75d
movq -0xf50(%rbp), %rax
movl 0x4(%rax), %eax
movl %eax, -0x196c(%rbp)
jmp 0x9df769
movl -0xeec(%rbp), %eax
movl %eax, -0x196c(%rbp)
movl -0x196c(%rbp), %eax
movl %eax, -0xf8c(%rbp)
movq -0xf08(%rbp), %rax
movq -0xf20(%rbp), %rcx
subq %rcx, %rax
movq -0xf80(%rbp), %rcx
movq -0xf78(%rbp), %rdx
subq %rdx, %rcx
addq %rcx, %rax
movl %eax, -0xf90(%rbp)
cmpl $0x2, -0xec8(%rbp)
jne 0x9df7ab
jmp 0x9df7ab
cmpl $0x0, -0xf90(%rbp)
sete %al
andb $0x1, %al
movzbl %al, %ecx
movq -0xf08(%rbp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, -0xf08(%rbp)
cmpl $0x0, -0xec8(%rbp)
jne 0x9df86f
movq -0xf08(%rbp), %rax
movq -0xef8(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0xf94(%rbp)
movq -0xea0(%rbp), %rdi
movl -0xf94(%rbp), %esi
movq -0xed0(%rbp), %rax
movl (%rax), %edx
callq 0x9fba50
movl %eax, -0xf98(%rbp)
movl -0xf94(%rbp), %eax
subl -0xf98(%rbp), %eax
movl %eax, -0xf9c(%rbp)
movl -0xf38(%rbp), %eax
cmpl -0xf9c(%rbp), %eax
jbe 0x9df849
movl -0xf38(%rbp), %eax
movl %eax, -0xf3c(%rbp)
movl $0x0, -0xf38(%rbp)
movl -0xf34(%rbp), %eax
cmpl -0xf9c(%rbp), %eax
jbe 0x9df86d
movl -0xf34(%rbp), %eax
movl %eax, -0xf3c(%rbp)
movl $0x0, -0xf34(%rbp)
jmp 0x9df86f
cmpl $0x2, -0xec8(%rbp)
jne 0x9df87a
jmp 0x9df87a
jmp 0x9df87c
movq -0xf08(%rbp), %rax
cmpq -0xf30(%rbp), %rax
jae 0x9e2737
movq -0xf08(%rbp), %rcx
movl -0xedc(%rbp), %eax
movq %rcx, -0xd58(%rbp)
movl %eax, -0xd5c(%rbp)
movl $0x8, -0xd60(%rbp)
movl -0xd60(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1978(%rbp)
subl $0x4, %eax
ja 0x9df8e2
movq -0x1978(%rbp), %rax
leaq 0x1f6cbb(%rip), %rcx # 0xbd6594
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9df8e4
movq -0xd58(%rbp), %rdi
movl -0xd5c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xd50(%rbp)
jmp 0x9df969
movq -0xd58(%rbp), %rdi
movl -0xd5c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xd50(%rbp)
jmp 0x9df969
movq -0xd58(%rbp), %rdi
movl -0xd5c(%rbp), %esi
callq 0x9fb820
movq %rax, -0xd50(%rbp)
jmp 0x9df969
movq -0xd58(%rbp), %rdi
movl -0xd5c(%rbp), %esi
callq 0x9fb850
movq %rax, -0xd50(%rbp)
jmp 0x9df969
movq -0xd58(%rbp), %rdi
movl -0xd5c(%rbp), %esi
callq 0x9fb880
movq %rax, -0xd50(%rbp)
movq -0xd50(%rbp), %rax
movq %rax, -0xfb8(%rbp)
movq -0xf08(%rbp), %rdx
movl -0xeec(%rbp), %ecx
movl -0xec4(%rbp), %eax
movq %rdx, -0xd70(%rbp)
movl %ecx, -0xd74(%rbp)
movl %eax, -0xd78(%rbp)
movl -0xd78(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1980(%rbp)
subl $0x4, %eax
ja 0x9df9cb
movq -0x1980(%rbp), %rax
leaq 0x1f6be6(%rip), %rcx # 0xbd65a8
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9df9cd
movq -0xd70(%rbp), %rdi
movl -0xd74(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xd68(%rbp)
jmp 0x9dfa52
movq -0xd70(%rbp), %rdi
movl -0xd74(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xd68(%rbp)
jmp 0x9dfa52
movq -0xd70(%rbp), %rdi
movl -0xd74(%rbp), %esi
callq 0x9fb820
movq %rax, -0xd68(%rbp)
jmp 0x9dfa52
movq -0xd70(%rbp), %rdi
movl -0xd74(%rbp), %esi
callq 0x9fb850
movq %rax, -0xd68(%rbp)
jmp 0x9dfa52
movq -0xd70(%rbp), %rdi
movl -0xd74(%rbp), %esi
callq 0x9fb880
movq %rax, -0xd68(%rbp)
movq -0xd68(%rbp), %rax
movq %rax, -0xfc0(%rbp)
movq -0xf08(%rbp), %rcx
movl -0xf88(%rbp), %eax
movq %rcx, -0xd88(%rbp)
movl %eax, -0xd8c(%rbp)
movl $0x8, -0xd90(%rbp)
movl -0xd90(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1988(%rbp)
subl $0x4, %eax
ja 0x9dfab2
movq -0x1988(%rbp), %rax
leaq 0x1f6b13(%rip), %rcx # 0xbd65bc
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9dfab4
movq -0xd88(%rbp), %rdi
movl -0xd8c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xd80(%rbp)
jmp 0x9dfb39
movq -0xd88(%rbp), %rdi
movl -0xd8c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xd80(%rbp)
jmp 0x9dfb39
movq -0xd88(%rbp), %rdi
movl -0xd8c(%rbp), %esi
callq 0x9fb820
movq %rax, -0xd80(%rbp)
jmp 0x9dfb39
movq -0xd88(%rbp), %rdi
movl -0xd8c(%rbp), %esi
callq 0x9fb850
movq %rax, -0xd80(%rbp)
jmp 0x9dfb39
movq -0xd88(%rbp), %rdi
movl -0xd8c(%rbp), %esi
callq 0x9fb880
movq %rax, -0xd80(%rbp)
movq -0xd80(%rbp), %rax
movq %rax, -0xfc8(%rbp)
movq -0xf08(%rbp), %rdx
movl -0xf8c(%rbp), %ecx
movl -0xec4(%rbp), %eax
movq %rdx, -0xda0(%rbp)
movl %ecx, -0xda4(%rbp)
movl %eax, -0xda8(%rbp)
movl -0xda8(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1990(%rbp)
subl $0x4, %eax
ja 0x9dfb9b
movq -0x1990(%rbp), %rax
leaq 0x1f6a3e(%rip), %rcx # 0xbd65d0
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9dfb9d
movq -0xda0(%rbp), %rdi
movl -0xda4(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xd98(%rbp)
jmp 0x9dfc22
movq -0xda0(%rbp), %rdi
movl -0xda4(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xd98(%rbp)
jmp 0x9dfc22
movq -0xda0(%rbp), %rdi
movl -0xda4(%rbp), %esi
callq 0x9fb820
movq %rax, -0xd98(%rbp)
jmp 0x9dfc22
movq -0xda0(%rbp), %rdi
movl -0xda4(%rbp), %esi
callq 0x9fb850
movq %rax, -0xd98(%rbp)
jmp 0x9dfc22
movq -0xda0(%rbp), %rdi
movl -0xda4(%rbp), %esi
callq 0x9fb880
movq %rax, -0xd98(%rbp)
movq -0xd98(%rbp), %rax
movq %rax, -0xfd0(%rbp)
movq -0xf08(%rbp), %rax
movq -0xef8(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0xfd4(%rbp)
movq -0xed8(%rbp), %rax
movq -0xfb8(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0xfd8(%rbp)
movq -0xee8(%rbp), %rax
movq -0xfc0(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0xfdc(%rbp)
movq -0xef8(%rbp), %rax
movl -0xfd8(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0xfe8(%rbp)
movq -0xef8(%rbp), %rax
movl -0xfdc(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0xff0(%rbp)
movl -0xfd4(%rbp), %eax
addl $0x1, %eax
subl -0xf34(%rbp), %eax
movl %eax, -0xff4(%rbp)
cmpl $0x2, -0xec8(%rbp)
jne 0x9dfcf0
movl -0xff4(%rbp), %eax
cmpl -0xf18(%rbp), %eax
jae 0x9dfcf0
movq -0xf70(%rbp), %rax
movl -0xff4(%rbp), %ecx
subl -0xf84(%rbp), %ecx
movl %ecx, %ecx
addq %rcx, %rax
movq %rax, -0x1998(%rbp)
jmp 0x9dfd07
movq -0xef8(%rbp), %rax
movl -0xff4(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1998(%rbp)
movq -0x1998(%rbp), %rax
movq %rax, -0x1000(%rbp)
movl -0xfd4(%rbp), %edx
movq -0xee8(%rbp), %rax
movq -0xfc0(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0xed8(%rbp), %rax
movq -0xfb8(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
cmpl $0x2, -0xec8(%rbp)
jne 0x9e01ba
movl -0xf18(%rbp), %eax
subl $0x1, %eax
subl -0xff4(%rbp), %eax
cmpl $0x3, %eax
jb 0x9e01ba
movq -0x1000(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x199c(%rbp)
movq -0xf08(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x199c(%rbp), %eax
cmpl %ecx, %eax
jne 0x9e01ba
movl -0xff4(%rbp), %eax
cmpl -0xf18(%rbp), %eax
jae 0x9dfdb2
movq -0xf80(%rbp), %rax
movq %rax, -0x19a8(%rbp)
jmp 0x9dfdc0
movq -0xf28(%rbp), %rax
movq %rax, -0x19a8(%rbp)
movq -0x19a8(%rbp), %rax
movq %rax, -0x1008(%rbp)
movq -0xf08(%rbp), %rdi
addq $0x1, %rdi
addq $0x4, %rdi
movq -0x1000(%rbp), %rsi
addq $0x4, %rsi
movq -0xf28(%rbp), %rdx
movq -0x1008(%rbp), %rcx
movq -0xf20(%rbp), %r8
callq 0x9fbad0
addq $0x4, %rax
movq %rax, -0xfa8(%rbp)
movq -0xf08(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xf08(%rbp)
movq -0xea8(%rbp), %rdi
movq -0xf08(%rbp), %rsi
movq -0xf10(%rbp), %rax
subq %rax, %rsi
movq -0xf10(%rbp), %rdx
movq -0xf28(%rbp), %rcx
movq -0xfa8(%rbp), %rax
subq $0x3, %rax
movq %rdi, -0x888(%rbp)
movq %rsi, -0x890(%rbp)
movq %rdx, -0x898(%rbp)
movq %rcx, -0x8a0(%rbp)
movl $0x0, -0x8a4(%rbp)
movq %rax, -0x8b0(%rbp)
movq -0x8a0(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x8b8(%rbp)
movq -0x898(%rbp), %rax
addq -0x890(%rbp), %rax
movq %rax, -0x8c0(%rbp)
movq -0x8c0(%rbp), %rax
cmpq -0x8b8(%rbp), %rax
ja 0x9e0099
movq -0x888(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x898(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x890(%rbp)
jbe 0x9e0097
movq -0x888(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x898(%rbp), %rcx
addq $0x10, %rcx
movq -0x890(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x148(%rbp)
movq %rcx, -0x150(%rbp)
movq %rax, -0x158(%rbp)
movl $0x0, -0x15c(%rbp)
movq -0x148(%rbp), %rax
movq -0x150(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x168(%rbp)
movq -0x150(%rbp), %rax
movq %rax, -0x170(%rbp)
movq -0x148(%rbp), %rax
movq %rax, -0x178(%rbp)
movq -0x178(%rbp), %rax
addq -0x158(%rbp), %rax
movq %rax, -0x180(%rbp)
cmpl $0x1, -0x15c(%rbp)
jne 0x9dffcb
cmpq $0x10, -0x168(%rbp)
jge 0x9dffcb
jmp 0x9dff7f
movq -0x178(%rbp), %rdi
movq -0x170(%rbp), %rsi
callq 0x9fbf50
movq -0x178(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x178(%rbp)
movq -0x170(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x170(%rbp)
movq -0x178(%rbp), %rax
cmpq -0x180(%rbp), %rax
jb 0x9dff7f
jmp 0x9e0095
movq -0x178(%rbp), %rdi
movq -0x170(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x158(%rbp), %rax
jl 0x9dfff1
jmp 0x9e0095
movq -0x178(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x178(%rbp)
movq -0x170(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x170(%rbp)
movq -0x178(%rbp), %rdi
movq -0x170(%rbp), %rsi
callq 0x9fbd80
movq -0x178(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x178(%rbp)
movq -0x170(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x170(%rbp)
movq -0x178(%rbp), %rdi
movq -0x170(%rbp), %rsi
callq 0x9fbd80
movq -0x178(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x178(%rbp)
movq -0x170(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x170(%rbp)
movq -0x178(%rbp), %rax
cmpq -0x180(%rbp), %rax
jb 0x9e0015
jmp 0x9e0095
jmp 0x9e0097
jmp 0x9e00be
movq -0x888(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x898(%rbp), %rsi
movq -0x8c0(%rbp), %rdx
movq -0x8b8(%rbp), %rcx
callq 0x9fbdb0
movq -0x890(%rbp), %rcx
movq -0x888(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x890(%rbp) # imm = 0xFFFF
jbe 0x9e0117
movq -0x888(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x888(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x888(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x888(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x890(%rbp), %rax
movw %ax, %cx
movq -0x888(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x8a4(%rbp), %ecx
addl $0x1, %ecx
movq -0x888(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x8b0(%rbp) # imm = 0xFFFF
jbe 0x9e0189
movq -0x888(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x888(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x888(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x888(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x8b0(%rbp), %rax
movw %ax, %cx
movq -0x888(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x888(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
jmp 0x9e148a
cmpl $0x0, -0xec8(%rbp)
jne 0x9e0628
cmpl $0x0, -0xf34(%rbp)
seta %al
andb $0x1, %al
movzbl %al, %eax
movl %eax, -0x19ac(%rbp)
movq -0xf08(%rbp), %rdi
addq $0x1, %rdi
movl -0xf34(%rbp), %eax
movl %eax, %ecx
xorl %eax, %eax
subq %rcx, %rax
addq %rax, %rdi
callq 0x9fb8d0
movl %eax, -0x19b0(%rbp)
movq -0xf08(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb8d0
movl -0x19b0(%rbp), %ecx
movl %eax, %edx
movl -0x19ac(%rbp), %eax
cmpl %edx, %ecx
sete %cl
andb $0x1, %cl
movzbl %cl, %ecx
andl %ecx, %eax
cmpl $0x0, %eax
je 0x9e0628
movq -0xf08(%rbp), %rdi
addq $0x1, %rdi
addq $0x4, %rdi
movq -0xf08(%rbp), %rsi
addq $0x1, %rsi
addq $0x4, %rsi
movl -0xf34(%rbp), %eax
movl %eax, %ecx
xorl %eax, %eax
subq %rcx, %rax
addq %rax, %rsi
movq -0xf28(%rbp), %rdx
callq 0x9fbb90
addq $0x4, %rax
movq %rax, -0xfa8(%rbp)
movq -0xf08(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xf08(%rbp)
movq -0xea8(%rbp), %rdi
movq -0xf08(%rbp), %rsi
movq -0xf10(%rbp), %rax
subq %rax, %rsi
movq -0xf10(%rbp), %rdx
movq -0xf28(%rbp), %rcx
movq -0xfa8(%rbp), %rax
subq $0x3, %rax
movq %rdi, -0x848(%rbp)
movq %rsi, -0x850(%rbp)
movq %rdx, -0x858(%rbp)
movq %rcx, -0x860(%rbp)
movl $0x0, -0x864(%rbp)
movq %rax, -0x870(%rbp)
movq -0x860(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x878(%rbp)
movq -0x858(%rbp), %rax
addq -0x850(%rbp), %rax
movq %rax, -0x880(%rbp)
movq -0x880(%rbp), %rax
cmpq -0x878(%rbp), %rax
ja 0x9e0507
movq -0x848(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x858(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x850(%rbp)
jbe 0x9e0505
movq -0x848(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x858(%rbp), %rcx
addq $0x10, %rcx
movq -0x850(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x188(%rbp)
movq %rcx, -0x190(%rbp)
movq %rax, -0x198(%rbp)
movl $0x0, -0x19c(%rbp)
movq -0x188(%rbp), %rax
movq -0x190(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x1a8(%rbp)
movq -0x190(%rbp), %rax
movq %rax, -0x1b0(%rbp)
movq -0x188(%rbp), %rax
movq %rax, -0x1b8(%rbp)
movq -0x1b8(%rbp), %rax
addq -0x198(%rbp), %rax
movq %rax, -0x1c0(%rbp)
cmpl $0x1, -0x19c(%rbp)
jne 0x9e0439
cmpq $0x10, -0x1a8(%rbp)
jge 0x9e0439
jmp 0x9e03ed
movq -0x1b8(%rbp), %rdi
movq -0x1b0(%rbp), %rsi
callq 0x9fbf50
movq -0x1b8(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x1b8(%rbp)
movq -0x1b0(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x1b0(%rbp)
movq -0x1b8(%rbp), %rax
cmpq -0x1c0(%rbp), %rax
jb 0x9e03ed
jmp 0x9e0503
movq -0x1b8(%rbp), %rdi
movq -0x1b0(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x198(%rbp), %rax
jl 0x9e045f
jmp 0x9e0503
movq -0x1b8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x1b8(%rbp)
movq -0x1b0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x1b0(%rbp)
movq -0x1b8(%rbp), %rdi
movq -0x1b0(%rbp), %rsi
callq 0x9fbd80
movq -0x1b8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x1b8(%rbp)
movq -0x1b0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x1b0(%rbp)
movq -0x1b8(%rbp), %rdi
movq -0x1b0(%rbp), %rsi
callq 0x9fbd80
movq -0x1b8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x1b8(%rbp)
movq -0x1b0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x1b0(%rbp)
movq -0x1b8(%rbp), %rax
cmpq -0x1c0(%rbp), %rax
jb 0x9e0483
jmp 0x9e0503
jmp 0x9e0505
jmp 0x9e052c
movq -0x848(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x858(%rbp), %rsi
movq -0x880(%rbp), %rdx
movq -0x878(%rbp), %rcx
callq 0x9fbdb0
movq -0x850(%rbp), %rcx
movq -0x848(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x850(%rbp) # imm = 0xFFFF
jbe 0x9e0585
movq -0x848(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x848(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x848(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x848(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x850(%rbp), %rax
movw %ax, %cx
movq -0x848(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x864(%rbp), %ecx
addl $0x1, %ecx
movq -0x848(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x870(%rbp) # imm = 0xFFFF
jbe 0x9e05f7
movq -0x848(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x848(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x848(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x848(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x870(%rbp), %rax
movw %ax, %cx
movq -0x848(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x848(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
jmp 0x9e148a
movl -0xfd8(%rbp), %eax
cmpl -0xf18(%rbp), %eax
jbe 0x9e075d
movq -0xfe8(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x19b8(%rbp)
movq -0xf08(%rbp), %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x19b8(%rbp), %rax
cmpq %rcx, %rax
jne 0x9e0758
movq -0xf08(%rbp), %rdi
addq $0x8, %rdi
movq -0xfe8(%rbp), %rsi
addq $0x8, %rsi
movq -0xf28(%rbp), %rdx
callq 0x9fbb90
addq $0x8, %rax
movq %rax, -0xfa8(%rbp)
movq -0xf08(%rbp), %rax
movq -0xfe8(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0xfac(%rbp)
movq -0xf08(%rbp), %rax
cmpq -0xf10(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0xfe8(%rbp), %rax
cmpq -0xf20(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x19b9(%rbp)
je 0x9e070c
movq -0xf08(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0xfe8(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x19b9(%rbp)
movb -0x19b9(%rbp), %al
testb $0x1, %al
jne 0x9e0718
jmp 0x9e0753
movq -0xf08(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xf08(%rbp)
movq -0xfe8(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xfe8(%rbp)
movq -0xfa8(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xfa8(%rbp)
jmp 0x9e06b0
jmp 0x9e10d7
jmp 0x9e08dd
cmpl $0x2, -0xec8(%rbp)
jne 0x9e08db
movq -0xf58(%rbp), %rax
movq -0xfc8(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x100c(%rbp)
movq -0xf70(%rbp), %rax
movl -0x100c(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1018(%rbp)
movq -0x1018(%rbp), %rax
cmpq -0xf78(%rbp), %rax
jbe 0x9e08d9
movq -0x1018(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x19c8(%rbp)
movq -0xf08(%rbp), %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x19c8(%rbp), %rax
cmpq %rcx, %rax
jne 0x9e08d9
movq -0xf08(%rbp), %rdi
addq $0x8, %rdi
movq -0x1018(%rbp), %rsi
addq $0x8, %rsi
movq -0xf28(%rbp), %rdx
movq -0xf80(%rbp), %rcx
movq -0xf20(%rbp), %r8
callq 0x9fbad0
addq $0x8, %rax
movq %rax, -0xfa8(%rbp)
movl -0xfd4(%rbp), %eax
subl -0x100c(%rbp), %eax
subl -0xf84(%rbp), %eax
movl %eax, -0xfac(%rbp)
movq -0xf08(%rbp), %rax
cmpq -0xf10(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x1018(%rbp), %rax
cmpq -0xf78(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x19c9(%rbp)
je 0x9e088d
movq -0xf08(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0x1018(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x19c9(%rbp)
movb -0x19c9(%rbp), %al
testb $0x1, %al
jne 0x9e0899
jmp 0x9e08d4
movq -0xf08(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xf08(%rbp)
movq -0x1018(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1018(%rbp)
movq -0xfa8(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xfa8(%rbp)
jmp 0x9e0831
jmp 0x9e10d7
jmp 0x9e08db
jmp 0x9e08dd
movl -0xfdc(%rbp), %eax
cmpl -0xf18(%rbp), %eax
jbe 0x9e091f
movq -0xff0(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x19d0(%rbp)
movq -0xf08(%rbp), %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x19d0(%rbp), %eax
cmpl %ecx, %eax
jne 0x9e091a
jmp 0x9e09d4
jmp 0x9e09a8
cmpl $0x2, -0xec8(%rbp)
jne 0x9e09a6
movq -0xf60(%rbp), %rax
movq -0xfd0(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x101c(%rbp)
movq -0xf70(%rbp), %rax
movl -0x101c(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0xff0(%rbp)
movl -0x101c(%rbp), %eax
addl -0xf84(%rbp), %eax
movl %eax, -0xfdc(%rbp)
movq -0xff0(%rbp), %rax
cmpq -0xf78(%rbp), %rax
jbe 0x9e09a4
movq -0xff0(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x19d4(%rbp)
movq -0xf08(%rbp), %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x19d4(%rbp), %eax
cmpl %ecx, %eax
jne 0x9e09a4
jmp 0x9e09d4
jmp 0x9e09a6
jmp 0x9e09a8
movq -0xf08(%rbp), %rax
movq -0xf10(%rbp), %rcx
subq %rcx, %rax
sarq $0x8, %rax
addq $0x1, %rax
addq -0xf08(%rbp), %rax
movq %rax, -0xf08(%rbp)
jmp 0x9df87c
movq -0xf08(%rbp), %rcx
incq %rcx
movl -0xedc(%rbp), %eax
movq %rcx, -0xdb8(%rbp)
movl %eax, -0xdbc(%rbp)
movl $0x8, -0xdc0(%rbp)
movl -0xdc0(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x19e0(%rbp)
subl $0x4, %eax
ja 0x9e0a29
movq -0x19e0(%rbp), %rax
leaq 0x1f5bc4(%rip), %rcx # 0xbd65e4
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e0a2b
movq -0xdb8(%rbp), %rdi
movl -0xdbc(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xdb0(%rbp)
jmp 0x9e0ab0
movq -0xdb8(%rbp), %rdi
movl -0xdbc(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xdb0(%rbp)
jmp 0x9e0ab0
movq -0xdb8(%rbp), %rdi
movl -0xdbc(%rbp), %esi
callq 0x9fb820
movq %rax, -0xdb0(%rbp)
jmp 0x9e0ab0
movq -0xdb8(%rbp), %rdi
movl -0xdbc(%rbp), %esi
callq 0x9fb850
movq %rax, -0xdb0(%rbp)
jmp 0x9e0ab0
movq -0xdb8(%rbp), %rdi
movl -0xdbc(%rbp), %esi
callq 0x9fb880
movq %rax, -0xdb0(%rbp)
movq -0xdb0(%rbp), %rax
movq %rax, -0x1028(%rbp)
movq -0xf08(%rbp), %rcx
incq %rcx
movl -0xf88(%rbp), %eax
movq %rcx, -0xdd0(%rbp)
movl %eax, -0xdd4(%rbp)
movl $0x8, -0xdd8(%rbp)
movl -0xdd8(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x19e8(%rbp)
subl $0x4, %eax
ja 0x9e0b13
movq -0x19e8(%rbp), %rax
leaq 0x1f5aee(%rip), %rcx # 0xbd65f8
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e0b15
movq -0xdd0(%rbp), %rdi
movl -0xdd4(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xdc8(%rbp)
jmp 0x9e0b9a
movq -0xdd0(%rbp), %rdi
movl -0xdd4(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xdc8(%rbp)
jmp 0x9e0b9a
movq -0xdd0(%rbp), %rdi
movl -0xdd4(%rbp), %esi
callq 0x9fb820
movq %rax, -0xdc8(%rbp)
jmp 0x9e0b9a
movq -0xdd0(%rbp), %rdi
movl -0xdd4(%rbp), %esi
callq 0x9fb850
movq %rax, -0xdc8(%rbp)
jmp 0x9e0b9a
movq -0xdd0(%rbp), %rdi
movl -0xdd4(%rbp), %esi
callq 0x9fb880
movq %rax, -0xdc8(%rbp)
movq -0xdc8(%rbp), %rax
movq %rax, -0x1030(%rbp)
movq -0xed8(%rbp), %rax
movq -0x1028(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x1034(%rbp)
movq -0xef8(%rbp), %rax
movl -0x1034(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1040(%rbp)
movl -0xfd4(%rbp), %edx
addl $0x1, %edx
movq -0xed8(%rbp), %rax
movq -0x1028(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movl -0x1034(%rbp), %eax
cmpl -0xf18(%rbp), %eax
jbe 0x9e0d3b
movq -0x1040(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x19f0(%rbp)
movq -0xf08(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x19f0(%rbp), %rax
cmpq %rcx, %rax
jne 0x9e0d36
movq -0xf08(%rbp), %rdi
addq $0x9, %rdi
movq -0x1040(%rbp), %rsi
addq $0x8, %rsi
movq -0xf28(%rbp), %rdx
callq 0x9fbb90
addq $0x8, %rax
movq %rax, -0xfa8(%rbp)
movq -0xf08(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xf08(%rbp)
movq -0xf08(%rbp), %rax
movq -0x1040(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0xfac(%rbp)
movq -0xf08(%rbp), %rax
cmpq -0xf10(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x1040(%rbp), %rax
cmpq -0xf20(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x19f1(%rbp)
je 0x9e0cea
movq -0xf08(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0x1040(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x19f1(%rbp)
movb -0x19f1(%rbp), %al
testb $0x1, %al
jne 0x9e0cf6
jmp 0x9e0d31
movq -0xf08(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xf08(%rbp)
movq -0x1040(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1040(%rbp)
movq -0xfa8(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xfa8(%rbp)
jmp 0x9e0c8e
jmp 0x9e10d7
jmp 0x9e0ed8
cmpl $0x2, -0xec8(%rbp)
jne 0x9e0ed6
movq -0xf58(%rbp), %rax
movq -0x1030(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x1044(%rbp)
movq -0xf70(%rbp), %rax
movl -0x1044(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1050(%rbp)
movq -0x1050(%rbp), %rax
cmpq -0xf78(%rbp), %rax
jbe 0x9e0ed4
movq -0x1050(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x1a00(%rbp)
movq -0xf08(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x1a00(%rbp), %rax
cmpq %rcx, %rax
jne 0x9e0ed4
movq -0xf08(%rbp), %rdi
addq $0x1, %rdi
addq $0x8, %rdi
movq -0x1050(%rbp), %rsi
addq $0x8, %rsi
movq -0xf28(%rbp), %rdx
movq -0xf80(%rbp), %rcx
movq -0xf20(%rbp), %r8
callq 0x9fbad0
addq $0x8, %rax
movq %rax, -0xfa8(%rbp)
movq -0xf08(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xf08(%rbp)
movl -0xfd4(%rbp), %eax
addl $0x1, %eax
subl -0x1044(%rbp), %eax
subl -0xf84(%rbp), %eax
movl %eax, -0xfac(%rbp)
movq -0xf08(%rbp), %rax
cmpq -0xf10(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x1050(%rbp), %rax
cmpq -0xf78(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1a01(%rbp)
je 0x9e0e88
movq -0xf08(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0x1050(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1a01(%rbp)
movb -0x1a01(%rbp), %al
testb $0x1, %al
jne 0x9e0e94
jmp 0x9e0ecf
movq -0xf08(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xf08(%rbp)
movq -0x1050(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1050(%rbp)
movq -0xfa8(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xfa8(%rbp)
jmp 0x9e0e2c
jmp 0x9e10d7
jmp 0x9e0ed6
jmp 0x9e0ed8
cmpl $0x2, -0xec8(%rbp)
jne 0x9e0fec
movl -0xfdc(%rbp), %eax
cmpl -0xf18(%rbp), %eax
jae 0x9e0fec
movq -0xf08(%rbp), %rdi
addq $0x4, %rdi
movq -0xff0(%rbp), %rsi
addq $0x4, %rsi
movq -0xf28(%rbp), %rdx
movq -0xf80(%rbp), %rcx
movq -0xf20(%rbp), %r8
callq 0x9fbad0
addq $0x4, %rax
movq %rax, -0xfa8(%rbp)
movl -0xfd4(%rbp), %eax
subl -0xfdc(%rbp), %eax
movl %eax, -0xfac(%rbp)
movq -0xf08(%rbp), %rax
cmpq -0xf10(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0xff0(%rbp), %rax
cmpq -0xf78(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1a02(%rbp)
je 0x9e0fa0
movq -0xf08(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0xff0(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1a02(%rbp)
movb -0x1a02(%rbp), %al
testb $0x1, %al
jne 0x9e0fac
jmp 0x9e0fe7
movq -0xf08(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xf08(%rbp)
movq -0xff0(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xff0(%rbp)
movq -0xfa8(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xfa8(%rbp)
jmp 0x9e0f44
jmp 0x9e10d5
movq -0xf08(%rbp), %rdi
addq $0x4, %rdi
movq -0xff0(%rbp), %rsi
addq $0x4, %rsi
movq -0xf28(%rbp), %rdx
callq 0x9fbb90
addq $0x4, %rax
movq %rax, -0xfa8(%rbp)
movq -0xf08(%rbp), %rax
movq -0xff0(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0xfac(%rbp)
movq -0xf08(%rbp), %rax
cmpq -0xf10(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0xff0(%rbp), %rax
cmpq -0xf20(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1a03(%rbp)
je 0x9e108c
movq -0xf08(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0xff0(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1a03(%rbp)
movb -0x1a03(%rbp), %al
testb $0x1, %al
jne 0x9e1098
jmp 0x9e10d3
movq -0xf08(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xf08(%rbp)
movq -0xff0(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0xff0(%rbp)
movq -0xfa8(%rbp), %rax
addq $0x1, %rax
movq %rax, -0xfa8(%rbp)
jmp 0x9e1030
jmp 0x9e10d5
jmp 0x9e10d7
movl -0xf34(%rbp), %eax
movl %eax, -0xf38(%rbp)
movl -0xfac(%rbp), %eax
movl %eax, -0xf34(%rbp)
movq -0xea8(%rbp), %r8
movq -0xf08(%rbp), %rdi
movq -0xf10(%rbp), %rax
subq %rax, %rdi
movq -0xf10(%rbp), %rsi
movq -0xf28(%rbp), %rdx
movl -0xfac(%rbp), %ecx
addl $0x2, %ecx
movq -0xfa8(%rbp), %rax
subq $0x3, %rax
movq %r8, -0x788(%rbp)
movq %rdi, -0x790(%rbp)
movq %rsi, -0x798(%rbp)
movq %rdx, -0x7a0(%rbp)
movl %ecx, -0x7a4(%rbp)
movq %rax, -0x7b0(%rbp)
movq -0x7a0(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x7b8(%rbp)
movq -0x798(%rbp), %rax
addq -0x790(%rbp), %rax
movq %rax, -0x7c0(%rbp)
movq -0x7c0(%rbp), %rax
cmpq -0x7b8(%rbp), %rax
ja 0x9e136e
movq -0x788(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x798(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x790(%rbp)
jbe 0x9e136c
movq -0x788(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x798(%rbp), %rcx
addq $0x10, %rcx
movq -0x790(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x248(%rbp)
movq %rcx, -0x250(%rbp)
movq %rax, -0x258(%rbp)
movl $0x0, -0x25c(%rbp)
movq -0x248(%rbp), %rax
movq -0x250(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x268(%rbp)
movq -0x250(%rbp), %rax
movq %rax, -0x270(%rbp)
movq -0x248(%rbp), %rax
movq %rax, -0x278(%rbp)
movq -0x278(%rbp), %rax
addq -0x258(%rbp), %rax
movq %rax, -0x280(%rbp)
cmpl $0x1, -0x25c(%rbp)
jne 0x9e12a0
cmpq $0x10, -0x268(%rbp)
jge 0x9e12a0
jmp 0x9e1254
movq -0x278(%rbp), %rdi
movq -0x270(%rbp), %rsi
callq 0x9fbf50
movq -0x278(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x278(%rbp)
movq -0x270(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x270(%rbp)
movq -0x278(%rbp), %rax
cmpq -0x280(%rbp), %rax
jb 0x9e1254
jmp 0x9e136a
movq -0x278(%rbp), %rdi
movq -0x270(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x258(%rbp), %rax
jl 0x9e12c6
jmp 0x9e136a
movq -0x278(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x278(%rbp)
movq -0x270(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x270(%rbp)
movq -0x278(%rbp), %rdi
movq -0x270(%rbp), %rsi
callq 0x9fbd80
movq -0x278(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x278(%rbp)
movq -0x270(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x270(%rbp)
movq -0x278(%rbp), %rdi
movq -0x270(%rbp), %rsi
callq 0x9fbd80
movq -0x278(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x278(%rbp)
movq -0x270(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x270(%rbp)
movq -0x278(%rbp), %rax
cmpq -0x280(%rbp), %rax
jb 0x9e12ea
jmp 0x9e136a
jmp 0x9e136c
jmp 0x9e1393
movq -0x788(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x798(%rbp), %rsi
movq -0x7c0(%rbp), %rdx
movq -0x7b8(%rbp), %rcx
callq 0x9fbdb0
movq -0x790(%rbp), %rcx
movq -0x788(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x790(%rbp) # imm = 0xFFFF
jbe 0x9e13ec
movq -0x788(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x788(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x788(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x788(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x790(%rbp), %rax
movw %ax, %cx
movq -0x788(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x7a4(%rbp), %ecx
addl $0x1, %ecx
movq -0x788(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x7b0(%rbp) # imm = 0xFFFF
jbe 0x9e145e
movq -0x788(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x788(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x788(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x788(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x7b0(%rbp), %rax
movw %ax, %cx
movq -0x788(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x788(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
movq -0xfa8(%rbp), %rax
addq -0xf08(%rbp), %rax
movq %rax, -0xf08(%rbp)
movq -0xf08(%rbp), %rax
movq %rax, -0xf10(%rbp)
movq -0xf08(%rbp), %rax
cmpq -0xf30(%rbp), %rax
ja 0x9e2732
movl -0xfd4(%rbp), %eax
addl $0x2, %eax
movl %eax, -0x1054(%rbp)
movl -0x1054(%rbp), %eax
movl %eax, %ecx
movl %ecx, -0x1a1c(%rbp)
movq -0xed8(%rbp), %rcx
movq %rcx, -0x1a18(%rbp)
movq -0xef8(%rbp), %rcx
addq %rax, %rcx
movl -0xedc(%rbp), %eax
movq %rcx, -0xde8(%rbp)
movl %eax, -0xdec(%rbp)
movl $0x8, -0xdf0(%rbp)
movl -0xdf0(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1a10(%rbp)
subl $0x4, %eax
ja 0x9e1541
movq -0x1a10(%rbp), %rax
leaq 0x1f50d4(%rip), %rcx # 0xbd660c
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e1543
movq -0xde8(%rbp), %rdi
movl -0xdec(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xde0(%rbp)
jmp 0x9e15c8
movq -0xde8(%rbp), %rdi
movl -0xdec(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xde0(%rbp)
jmp 0x9e15c8
movq -0xde8(%rbp), %rdi
movl -0xdec(%rbp), %esi
callq 0x9fb820
movq %rax, -0xde0(%rbp)
jmp 0x9e15c8
movq -0xde8(%rbp), %rdi
movl -0xdec(%rbp), %esi
callq 0x9fb850
movq %rax, -0xde0(%rbp)
jmp 0x9e15c8
movq -0xde8(%rbp), %rdi
movl -0xdec(%rbp), %esi
callq 0x9fb880
movq %rax, -0xde0(%rbp)
movq -0x1a18(%rbp), %rax
movl -0x1a1c(%rbp), %edx
movq -0xde0(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0xf08(%rbp), %rcx
addq $-0x2, %rcx
movl -0xef8(%rbp), %edx
movl %ecx, %eax
subl %edx, %eax
movl %eax, -0x1a34(%rbp)
movq -0xed8(%rbp), %rax
movq %rax, -0x1a30(%rbp)
movl -0xedc(%rbp), %eax
movq %rcx, -0xe00(%rbp)
movl %eax, -0xe04(%rbp)
movl $0x8, -0xe08(%rbp)
movl -0xe08(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1a28(%rbp)
subl $0x4, %eax
ja 0x9e1653
movq -0x1a28(%rbp), %rax
leaq 0x1f4fd6(%rip), %rcx # 0xbd6620
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e1655
movq -0xe00(%rbp), %rdi
movl -0xe04(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xdf8(%rbp)
jmp 0x9e16da
movq -0xe00(%rbp), %rdi
movl -0xe04(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xdf8(%rbp)
jmp 0x9e16da
movq -0xe00(%rbp), %rdi
movl -0xe04(%rbp), %esi
callq 0x9fb820
movq %rax, -0xdf8(%rbp)
jmp 0x9e16da
movq -0xe00(%rbp), %rdi
movl -0xe04(%rbp), %esi
callq 0x9fb850
movq %rax, -0xdf8(%rbp)
jmp 0x9e16da
movq -0xe00(%rbp), %rdi
movl -0xe04(%rbp), %esi
callq 0x9fb880
movq %rax, -0xdf8(%rbp)
movq -0x1a30(%rbp), %rax
movl -0x1a34(%rbp), %edx
movq -0xdf8(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movl -0x1054(%rbp), %eax
movl %eax, %ecx
movl %ecx, -0x1a4c(%rbp)
movq -0xee8(%rbp), %rcx
movq %rcx, -0x1a48(%rbp)
movq -0xef8(%rbp), %rdx
addq %rax, %rdx
movl -0xeec(%rbp), %ecx
movl -0xec4(%rbp), %eax
movq %rdx, -0xe18(%rbp)
movl %ecx, -0xe1c(%rbp)
movl %eax, -0xe20(%rbp)
movl -0xe20(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1a40(%rbp)
subl $0x4, %eax
ja 0x9e1764
movq -0x1a40(%rbp), %rax
leaq 0x1f4ed9(%rip), %rcx # 0xbd6634
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e1766
movq -0xe18(%rbp), %rdi
movl -0xe1c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xe10(%rbp)
jmp 0x9e17eb
movq -0xe18(%rbp), %rdi
movl -0xe1c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xe10(%rbp)
jmp 0x9e17eb
movq -0xe18(%rbp), %rdi
movl -0xe1c(%rbp), %esi
callq 0x9fb820
movq %rax, -0xe10(%rbp)
jmp 0x9e17eb
movq -0xe18(%rbp), %rdi
movl -0xe1c(%rbp), %esi
callq 0x9fb850
movq %rax, -0xe10(%rbp)
jmp 0x9e17eb
movq -0xe18(%rbp), %rdi
movl -0xe1c(%rbp), %esi
callq 0x9fb880
movq %rax, -0xe10(%rbp)
movq -0x1a48(%rbp), %rax
movl -0x1a4c(%rbp), %edx
movq -0xe10(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0xf08(%rbp), %rdx
decq %rdx
movl -0xef8(%rbp), %ecx
movl %edx, %eax
subl %ecx, %eax
movl %eax, -0x1a64(%rbp)
movq -0xee8(%rbp), %rax
movq %rax, -0x1a60(%rbp)
movl -0xeec(%rbp), %ecx
movl -0xec4(%rbp), %eax
movq %rdx, -0xe30(%rbp)
movl %ecx, -0xe34(%rbp)
movl %eax, -0xe38(%rbp)
movl -0xe38(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1a58(%rbp)
subl $0x4, %eax
ja 0x9e1877
movq -0x1a58(%rbp), %rax
leaq 0x1f4dda(%rip), %rcx # 0xbd6648
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e1879
movq -0xe30(%rbp), %rdi
movl -0xe34(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xe28(%rbp)
jmp 0x9e18fe
movq -0xe30(%rbp), %rdi
movl -0xe34(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xe28(%rbp)
jmp 0x9e18fe
movq -0xe30(%rbp), %rdi
movl -0xe34(%rbp), %esi
callq 0x9fb820
movq %rax, -0xe28(%rbp)
jmp 0x9e18fe
movq -0xe30(%rbp), %rdi
movl -0xe34(%rbp), %esi
callq 0x9fb850
movq %rax, -0xe28(%rbp)
jmp 0x9e18fe
movq -0xe30(%rbp), %rdi
movl -0xe34(%rbp), %esi
callq 0x9fb880
movq %rax, -0xe28(%rbp)
movq -0x1a60(%rbp), %rax
movl -0x1a64(%rbp), %edx
movq -0xe28(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
cmpl $0x2, -0xec8(%rbp)
jne 0x9e206d
jmp 0x9e1924
movq -0xf08(%rbp), %rax
cmpq -0xf30(%rbp), %rax
ja 0x9e206b
movq -0xf08(%rbp), %rax
movq -0xef8(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x1058(%rbp)
movl -0x1058(%rbp), %eax
subl -0xf38(%rbp), %eax
movl %eax, -0x105c(%rbp)
cmpl $0x2, -0xec8(%rbp)
jne 0x9e19a1
movl -0x105c(%rbp), %eax
cmpl -0xf18(%rbp), %eax
jae 0x9e19a1
movq -0xf70(%rbp), %rax
movl -0x105c(%rbp), %ecx
addq %rcx, %rax
movl -0xf84(%rbp), %ecx
movl %ecx, %edx
xorl %ecx, %ecx
subq %rdx, %rcx
addq %rcx, %rax
movq %rax, -0x1a70(%rbp)
jmp 0x9e19b8
movq -0xef8(%rbp), %rax
movl -0x105c(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1a70(%rbp)
movq -0x1a70(%rbp), %rax
movq %rax, -0x1068(%rbp)
movl -0xf18(%rbp), %eax
subl $0x1, %eax
subl -0x105c(%rbp), %eax
cmpl $0x3, %eax
jb 0x9e2069
movq -0x1068(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x1a74(%rbp)
movq -0xf08(%rbp), %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x1a74(%rbp), %eax
cmpl %ecx, %eax
jne 0x9e2069
movl -0x105c(%rbp), %eax
cmpl -0xf18(%rbp), %eax
jae 0x9e1a2a
movq -0xf80(%rbp), %rax
movq %rax, -0x1a80(%rbp)
jmp 0x9e1a38
movq -0xf28(%rbp), %rax
movq %rax, -0x1a80(%rbp)
movq -0x1a80(%rbp), %rax
movq %rax, -0x1070(%rbp)
movq -0xf08(%rbp), %rdi
addq $0x4, %rdi
movq -0x1068(%rbp), %rsi
addq $0x4, %rsi
movq -0xf28(%rbp), %rdx
movq -0x1070(%rbp), %rcx
movq -0xf20(%rbp), %r8
callq 0x9fbad0
addq $0x4, %rax
movq %rax, -0x1078(%rbp)
movl -0xf38(%rbp), %eax
movl %eax, -0x107c(%rbp)
movl -0xf34(%rbp), %eax
movl %eax, -0xf38(%rbp)
movl -0x107c(%rbp), %eax
movl %eax, -0xf34(%rbp)
movq -0xea8(%rbp), %rsi
movq -0xf10(%rbp), %rdx
movq -0xf28(%rbp), %rcx
movq -0x1078(%rbp), %rax
subq $0x3, %rax
movq %rsi, -0x808(%rbp)
movq $0x0, -0x810(%rbp)
movq %rdx, -0x818(%rbp)
movq %rcx, -0x820(%rbp)
movl $0x0, -0x824(%rbp)
movq %rax, -0x830(%rbp)
movq -0x820(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x838(%rbp)
movq -0x818(%rbp), %rax
addq -0x810(%rbp), %rax
movq %rax, -0x840(%rbp)
movq -0x840(%rbp), %rax
cmpq -0x838(%rbp), %rax
ja 0x9e1d12
movq -0x808(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x818(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x810(%rbp)
jbe 0x9e1d10
movq -0x808(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x818(%rbp), %rcx
addq $0x10, %rcx
movq -0x810(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x1c8(%rbp)
movq %rcx, -0x1d0(%rbp)
movq %rax, -0x1d8(%rbp)
movl $0x0, -0x1dc(%rbp)
movq -0x1c8(%rbp), %rax
movq -0x1d0(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x1e8(%rbp)
movq -0x1d0(%rbp), %rax
movq %rax, -0x1f0(%rbp)
movq -0x1c8(%rbp), %rax
movq %rax, -0x1f8(%rbp)
movq -0x1f8(%rbp), %rax
addq -0x1d8(%rbp), %rax
movq %rax, -0x200(%rbp)
cmpl $0x1, -0x1dc(%rbp)
jne 0x9e1c44
cmpq $0x10, -0x1e8(%rbp)
jge 0x9e1c44
jmp 0x9e1bf8
movq -0x1f8(%rbp), %rdi
movq -0x1f0(%rbp), %rsi
callq 0x9fbf50
movq -0x1f8(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x1f8(%rbp)
movq -0x1f0(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x1f0(%rbp)
movq -0x1f8(%rbp), %rax
cmpq -0x200(%rbp), %rax
jb 0x9e1bf8
jmp 0x9e1d0e
movq -0x1f8(%rbp), %rdi
movq -0x1f0(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x1d8(%rbp), %rax
jl 0x9e1c6a
jmp 0x9e1d0e
movq -0x1f8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x1f8(%rbp)
movq -0x1f0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x1f0(%rbp)
movq -0x1f8(%rbp), %rdi
movq -0x1f0(%rbp), %rsi
callq 0x9fbd80
movq -0x1f8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x1f8(%rbp)
movq -0x1f0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x1f0(%rbp)
movq -0x1f8(%rbp), %rdi
movq -0x1f0(%rbp), %rsi
callq 0x9fbd80
movq -0x1f8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x1f8(%rbp)
movq -0x1f0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x1f0(%rbp)
movq -0x1f8(%rbp), %rax
cmpq -0x200(%rbp), %rax
jb 0x9e1c8e
jmp 0x9e1d0e
jmp 0x9e1d10
jmp 0x9e1d37
movq -0x808(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x818(%rbp), %rsi
movq -0x840(%rbp), %rdx
movq -0x838(%rbp), %rcx
callq 0x9fbdb0
movq -0x810(%rbp), %rcx
movq -0x808(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x810(%rbp) # imm = 0xFFFF
jbe 0x9e1d90
movq -0x808(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x808(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x808(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x808(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x810(%rbp), %rax
movw %ax, %cx
movq -0x808(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x824(%rbp), %ecx
addl $0x1, %ecx
movq -0x808(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x830(%rbp) # imm = 0xFFFF
jbe 0x9e1e02
movq -0x808(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x808(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x808(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x808(%rbp), %rax
movl %ecx, 0x4c(%rax)
movw -0x830(%rbp), %cx
movq -0x808(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x808(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
movl -0x1058(%rbp), %eax
movl %eax, -0x1a94(%rbp)
movq -0xee8(%rbp), %rax
movq %rax, -0x1a90(%rbp)
movq -0xf08(%rbp), %rdx
movl -0xeec(%rbp), %ecx
movl -0xec4(%rbp), %eax
movq %rdx, -0xe48(%rbp)
movl %ecx, -0xe4c(%rbp)
movl %eax, -0xe50(%rbp)
movl -0xe50(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1a88(%rbp)
subl $0x4, %eax
ja 0x9e1e99
movq -0x1a88(%rbp), %rax
leaq 0x1f47f4(%rip), %rcx # 0xbd6684
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e1e9b
movq -0xe48(%rbp), %rdi
movl -0xe4c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xe40(%rbp)
jmp 0x9e1f20
movq -0xe48(%rbp), %rdi
movl -0xe4c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xe40(%rbp)
jmp 0x9e1f20
movq -0xe48(%rbp), %rdi
movl -0xe4c(%rbp), %esi
callq 0x9fb820
movq %rax, -0xe40(%rbp)
jmp 0x9e1f20
movq -0xe48(%rbp), %rdi
movl -0xe4c(%rbp), %esi
callq 0x9fb850
movq %rax, -0xe40(%rbp)
jmp 0x9e1f20
movq -0xe48(%rbp), %rdi
movl -0xe4c(%rbp), %esi
callq 0x9fb880
movq %rax, -0xe40(%rbp)
movq -0x1a90(%rbp), %rax
movl -0x1a94(%rbp), %edx
movq -0xe40(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movl -0x1058(%rbp), %eax
movl %eax, -0x1aac(%rbp)
movq -0xed8(%rbp), %rax
movq %rax, -0x1aa8(%rbp)
movq -0xf08(%rbp), %rcx
movl -0xedc(%rbp), %eax
movq %rcx, -0xe60(%rbp)
movl %eax, -0xe64(%rbp)
movl $0x8, -0xe68(%rbp)
movl -0xe68(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1aa0(%rbp)
subl $0x4, %eax
ja 0x9e1fa3
movq -0x1aa0(%rbp), %rax
leaq 0x1f46fe(%rip), %rcx # 0xbd6698
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e1fa5
movq -0xe60(%rbp), %rdi
movl -0xe64(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xe58(%rbp)
jmp 0x9e202a
movq -0xe60(%rbp), %rdi
movl -0xe64(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xe58(%rbp)
jmp 0x9e202a
movq -0xe60(%rbp), %rdi
movl -0xe64(%rbp), %esi
callq 0x9fb820
movq %rax, -0xe58(%rbp)
jmp 0x9e202a
movq -0xe60(%rbp), %rdi
movl -0xe64(%rbp), %esi
callq 0x9fb850
movq %rax, -0xe58(%rbp)
jmp 0x9e202a
movq -0xe60(%rbp), %rdi
movl -0xe64(%rbp), %esi
callq 0x9fb880
movq %rax, -0xe58(%rbp)
movq -0x1aa8(%rbp), %rax
movl -0x1aac(%rbp), %edx
movq -0xe58(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0x1078(%rbp), %rax
addq -0xf08(%rbp), %rax
movq %rax, -0xf08(%rbp)
movq -0xf08(%rbp), %rax
movq %rax, -0xf10(%rbp)
jmp 0x9e1924
jmp 0x9e206b
jmp 0x9e206d
cmpl $0x0, -0xec8(%rbp)
jne 0x9e2730
jmp 0x9e207c
movq -0xf08(%rbp), %rcx
xorl %eax, %eax
cmpq -0xf30(%rbp), %rcx
movb %al, -0x1aad(%rbp)
ja 0x9e20fe
cmpl $0x0, -0xf38(%rbp)
seta %al
andb $0x1, %al
movzbl %al, %eax
movl %eax, -0x1ab4(%rbp)
movq -0xf08(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x1ab8(%rbp)
movq -0xf08(%rbp), %rdi
movl -0xf38(%rbp), %eax
movl %eax, %ecx
xorl %eax, %eax
subq %rcx, %rax
addq %rax, %rdi
callq 0x9fb8d0
movl -0x1ab8(%rbp), %ecx
movl %eax, %edx
movl -0x1ab4(%rbp), %eax
cmpl %edx, %ecx
sete %cl
andb $0x1, %cl
movzbl %cl, %ecx
andl %ecx, %eax
cmpl $0x0, %eax
setne %al
movb %al, -0x1aad(%rbp)
movb -0x1aad(%rbp), %al
testb $0x1, %al
jne 0x9e210d
jmp 0x9e272e
movq -0xf08(%rbp), %rdi
addq $0x4, %rdi
movl -0xf38(%rbp), %eax
movq %rdi, %rsi
subq %rax, %rsi
movq -0xf28(%rbp), %rdx
callq 0x9fbb90
addq $0x4, %rax
movq %rax, -0x1088(%rbp)
movl -0xf38(%rbp), %eax
movl %eax, -0x108c(%rbp)
movl -0xf34(%rbp), %eax
movl %eax, -0xf38(%rbp)
movl -0x108c(%rbp), %eax
movl %eax, -0xf34(%rbp)
movq -0xf08(%rbp), %rdx
movl -0xef8(%rbp), %ecx
movl %edx, %eax
subl %ecx, %eax
movl %eax, -0x1acc(%rbp)
movq -0xee8(%rbp), %rax
movq %rax, -0x1ac8(%rbp)
movl -0xeec(%rbp), %ecx
movl -0xec4(%rbp), %eax
movq %rdx, -0xe78(%rbp)
movl %ecx, -0xe7c(%rbp)
movl %eax, -0xe80(%rbp)
movl -0xe80(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1ac0(%rbp)
subl $0x4, %eax
ja 0x9e21d1
movq -0x1ac0(%rbp), %rax
leaq 0x1f4494(%rip), %rcx # 0xbd665c
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e21d3
movq -0xe78(%rbp), %rdi
movl -0xe7c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xe70(%rbp)
jmp 0x9e2258
movq -0xe78(%rbp), %rdi
movl -0xe7c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xe70(%rbp)
jmp 0x9e2258
movq -0xe78(%rbp), %rdi
movl -0xe7c(%rbp), %esi
callq 0x9fb820
movq %rax, -0xe70(%rbp)
jmp 0x9e2258
movq -0xe78(%rbp), %rdi
movl -0xe7c(%rbp), %esi
callq 0x9fb850
movq %rax, -0xe70(%rbp)
jmp 0x9e2258
movq -0xe78(%rbp), %rdi
movl -0xe7c(%rbp), %esi
callq 0x9fb880
movq %rax, -0xe70(%rbp)
movq -0x1ac8(%rbp), %rax
movl -0x1acc(%rbp), %edx
movq -0xe70(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0xf08(%rbp), %rcx
movl -0xef8(%rbp), %edx
movl %ecx, %eax
subl %edx, %eax
movl %eax, -0x1ae4(%rbp)
movq -0xed8(%rbp), %rax
movq %rax, -0x1ae0(%rbp)
movl -0xedc(%rbp), %eax
movq %rcx, -0xe90(%rbp)
movl %eax, -0xe94(%rbp)
movl $0x8, -0xe98(%rbp)
movl -0xe98(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1ad8(%rbp)
subl $0x4, %eax
ja 0x9e22df
movq -0x1ad8(%rbp), %rax
leaq 0x1f439a(%rip), %rcx # 0xbd6670
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e22e1
movq -0xe90(%rbp), %rdi
movl -0xe94(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0xe88(%rbp)
jmp 0x9e2366
movq -0xe90(%rbp), %rdi
movl -0xe94(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0xe88(%rbp)
jmp 0x9e2366
movq -0xe90(%rbp), %rdi
movl -0xe94(%rbp), %esi
callq 0x9fb820
movq %rax, -0xe88(%rbp)
jmp 0x9e2366
movq -0xe90(%rbp), %rdi
movl -0xe94(%rbp), %esi
callq 0x9fb850
movq %rax, -0xe88(%rbp)
jmp 0x9e2366
movq -0xe90(%rbp), %rdi
movl -0xe94(%rbp), %esi
callq 0x9fb880
movq %rax, -0xe88(%rbp)
movq -0x1ae0(%rbp), %rax
movl -0x1ae4(%rbp), %edx
movq -0xe88(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0xea8(%rbp), %rsi
movq -0xf10(%rbp), %rdx
movq -0xf28(%rbp), %rcx
movq -0x1088(%rbp), %rax
subq $0x3, %rax
movq %rsi, -0x7c8(%rbp)
movq $0x0, -0x7d0(%rbp)
movq %rdx, -0x7d8(%rbp)
movq %rcx, -0x7e0(%rbp)
movl $0x0, -0x7e4(%rbp)
movq %rax, -0x7f0(%rbp)
movq -0x7e0(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x7f8(%rbp)
movq -0x7d8(%rbp), %rax
addq -0x7d0(%rbp), %rax
movq %rax, -0x800(%rbp)
movq -0x800(%rbp), %rax
cmpq -0x7f8(%rbp), %rax
ja 0x9e25ea
movq -0x7c8(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x7d8(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x7d0(%rbp)
jbe 0x9e25e8
movq -0x7c8(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x7d8(%rbp), %rcx
addq $0x10, %rcx
movq -0x7d0(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x208(%rbp)
movq %rcx, -0x210(%rbp)
movq %rax, -0x218(%rbp)
movl $0x0, -0x21c(%rbp)
movq -0x208(%rbp), %rax
movq -0x210(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x228(%rbp)
movq -0x210(%rbp), %rax
movq %rax, -0x230(%rbp)
movq -0x208(%rbp), %rax
movq %rax, -0x238(%rbp)
movq -0x238(%rbp), %rax
addq -0x218(%rbp), %rax
movq %rax, -0x240(%rbp)
cmpl $0x1, -0x21c(%rbp)
jne 0x9e251c
cmpq $0x10, -0x228(%rbp)
jge 0x9e251c
jmp 0x9e24d0
movq -0x238(%rbp), %rdi
movq -0x230(%rbp), %rsi
callq 0x9fbf50
movq -0x238(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x238(%rbp)
movq -0x230(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x230(%rbp)
movq -0x238(%rbp), %rax
cmpq -0x240(%rbp), %rax
jb 0x9e24d0
jmp 0x9e25e6
movq -0x238(%rbp), %rdi
movq -0x230(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x218(%rbp), %rax
jl 0x9e2542
jmp 0x9e25e6
movq -0x238(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x238(%rbp)
movq -0x230(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x230(%rbp)
movq -0x238(%rbp), %rdi
movq -0x230(%rbp), %rsi
callq 0x9fbd80
movq -0x238(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x238(%rbp)
movq -0x230(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x230(%rbp)
movq -0x238(%rbp), %rdi
movq -0x230(%rbp), %rsi
callq 0x9fbd80
movq -0x238(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x238(%rbp)
movq -0x230(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x230(%rbp)
movq -0x238(%rbp), %rax
cmpq -0x240(%rbp), %rax
jb 0x9e2566
jmp 0x9e25e6
jmp 0x9e25e8
jmp 0x9e260f
movq -0x7c8(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x7d8(%rbp), %rsi
movq -0x800(%rbp), %rdx
movq -0x7f8(%rbp), %rcx
callq 0x9fbdb0
movq -0x7d0(%rbp), %rcx
movq -0x7c8(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x7d0(%rbp) # imm = 0xFFFF
jbe 0x9e2668
movq -0x7c8(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x7c8(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x7c8(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x7c8(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x7d0(%rbp), %rax
movw %ax, %cx
movq -0x7c8(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x7e4(%rbp), %ecx
addl $0x1, %ecx
movq -0x7c8(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x7f0(%rbp) # imm = 0xFFFF
jbe 0x9e26da
movq -0x7c8(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x7c8(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x7c8(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x7c8(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x7f0(%rbp), %rax
movw %ax, %cx
movq -0x7c8(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x7c8(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
movq -0x1088(%rbp), %rax
addq -0xf08(%rbp), %rax
movq %rax, -0xf08(%rbp)
movq -0xf08(%rbp), %rax
movq %rax, -0xf10(%rbp)
jmp 0x9e207c
jmp 0x9e2730
jmp 0x9e2732
jmp 0x9df87c
cmpl $0x0, -0xf34(%rbp)
je 0x9e274e
movl -0xf34(%rbp), %eax
movl %eax, -0x1ae8(%rbp)
jmp 0x9e275a
movl -0xf3c(%rbp), %eax
movl %eax, -0x1ae8(%rbp)
movl -0x1ae8(%rbp), %ecx
movq -0xeb0(%rbp), %rax
movl %ecx, (%rax)
cmpl $0x0, -0xf38(%rbp)
je 0x9e2780
movl -0xf38(%rbp), %eax
movl %eax, -0x1aec(%rbp)
jmp 0x9e278c
movl -0xf3c(%rbp), %eax
movl %eax, -0x1aec(%rbp)
movl -0x1aec(%rbp), %ecx
movq -0xeb0(%rbp), %rax
movl %ecx, 0x4(%rax)
movq -0xf28(%rbp), %rax
movq -0xf10(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x1728(%rbp)
jmp 0x9e8fd0
movq -0x1730(%rbp), %rdi
movq -0x1738(%rbp), %rsi
movq -0x1740(%rbp), %rdx
movq -0x1748(%rbp), %rcx
movq -0x1750(%rbp), %rax
movq %rdi, -0x11e8(%rbp)
movq %rsi, -0x11f0(%rbp)
movq %rdx, -0x11f8(%rbp)
movq %rcx, -0x1200(%rbp)
movq %rax, -0x1208(%rbp)
movl $0x6, -0x120c(%rbp)
movl $0x0, -0x1210(%rbp)
movq -0x11e8(%rbp), %rax
addq $0xf0, %rax
movq %rax, -0x1218(%rbp)
movq -0x11e8(%rbp), %rax
movq 0x60(%rax), %rax
movq %rax, -0x1220(%rbp)
movq -0x1218(%rbp), %rax
movl 0x8(%rax), %eax
movl %eax, -0x1224(%rbp)
movq -0x11e8(%rbp), %rax
movq 0x70(%rax), %rax
movq %rax, -0x1230(%rbp)
movq -0x1218(%rbp), %rax
movl 0x4(%rax), %eax
movl %eax, -0x1234(%rbp)
movq -0x11e8(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, -0x1240(%rbp)
movq -0x1200(%rbp), %rax
movq %rax, -0x1248(%rbp)
movq -0x1248(%rbp), %rax
movq %rax, -0x1250(%rbp)
movq -0x1248(%rbp), %rax
movq %rax, -0x1258(%rbp)
movq -0x1248(%rbp), %rax
movq -0x1240(%rbp), %rcx
subq %rcx, %rax
addq -0x1208(%rbp), %rax
movl %eax, -0x125c(%rbp)
movq -0x11e8(%rbp), %rdi
movl -0x125c(%rbp), %esi
movq -0x1218(%rbp), %rax
movl (%rax), %edx
callq 0x9fba50
movl %eax, -0x1260(%rbp)
movq -0x1240(%rbp), %rax
movl -0x1260(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1268(%rbp)
movq -0x1248(%rbp), %rax
addq -0x1208(%rbp), %rax
movq %rax, -0x1270(%rbp)
movq -0x1270(%rbp), %rax
addq $-0x8, %rax
movq %rax, -0x1278(%rbp)
movq -0x11f8(%rbp), %rax
movl (%rax), %eax
movl %eax, -0x127c(%rbp)
movq -0x11f8(%rbp), %rax
movl 0x4(%rax), %eax
movl %eax, -0x1280(%rbp)
movl $0x0, -0x1284(%rbp)
movq -0x11e8(%rbp), %rax
movq 0xe8(%rax), %rax
movq %rax, -0x1290(%rbp)
cmpl $0x2, -0x1210(%rbp)
jne 0x9e2981
movq -0x1290(%rbp), %rax
addq $0xf0, %rax
movq %rax, -0x1af8(%rbp)
jmp 0x9e298c
xorl %eax, %eax
movq %rax, -0x1af8(%rbp)
jmp 0x9e298c
movq -0x1af8(%rbp), %rax
movq %rax, -0x1298(%rbp)
cmpl $0x2, -0x1210(%rbp)
jne 0x9e29b7
movq -0x1290(%rbp), %rax
movq 0x60(%rax), %rax
movq %rax, -0x1b00(%rbp)
jmp 0x9e29c2
xorl %eax, %eax
movq %rax, -0x1b00(%rbp)
jmp 0x9e29c2
movq -0x1b00(%rbp), %rax
movq %rax, -0x12a0(%rbp)
cmpl $0x2, -0x1210(%rbp)
jne 0x9e29ed
movq -0x1290(%rbp), %rax
movq 0x70(%rax), %rax
movq %rax, -0x1b08(%rbp)
jmp 0x9e29f8
xorl %eax, %eax
movq %rax, -0x1b08(%rbp)
jmp 0x9e29f8
movq -0x1b08(%rbp), %rax
movq %rax, -0x12a8(%rbp)
cmpl $0x2, -0x1210(%rbp)
jne 0x9e2a21
movq -0x1290(%rbp), %rax
movl 0x18(%rax), %eax
movl %eax, -0x1b0c(%rbp)
jmp 0x9e2a2b
xorl %eax, %eax
movl %eax, -0x1b0c(%rbp)
jmp 0x9e2a2b
movl -0x1b0c(%rbp), %eax
movl %eax, -0x12ac(%rbp)
cmpl $0x2, -0x1210(%rbp)
jne 0x9e2a54
movq -0x1290(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, -0x1b18(%rbp)
jmp 0x9e2a5f
xorl %eax, %eax
movq %rax, -0x1b18(%rbp)
jmp 0x9e2a5f
movq -0x1b18(%rbp), %rax
movq %rax, -0x12b8(%rbp)
cmpl $0x2, -0x1210(%rbp)
jne 0x9e2a8f
movq -0x12b8(%rbp), %rax
movl -0x12ac(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1b20(%rbp)
jmp 0x9e2a9a
xorl %eax, %eax
movq %rax, -0x1b20(%rbp)
jmp 0x9e2a9a
movq -0x1b20(%rbp), %rax
movq %rax, -0x12c0(%rbp)
cmpl $0x2, -0x1210(%rbp)
jne 0x9e2ac4
movq -0x1290(%rbp), %rax
movq (%rax), %rax
movq %rax, -0x1b28(%rbp)
jmp 0x9e2acf
xorl %eax, %eax
movq %rax, -0x1b28(%rbp)
jmp 0x9e2acf
movq -0x1b28(%rbp), %rax
movq %rax, -0x12c8(%rbp)
cmpl $0x2, -0x1210(%rbp)
jne 0x9e2b07
movl -0x1260(%rbp), %eax
movq -0x12c8(%rbp), %rcx
movq -0x12b8(%rbp), %rdx
subq %rdx, %rcx
subl %ecx, %eax
movl %eax, -0x1b2c(%rbp)
jmp 0x9e2b11
xorl %eax, %eax
movl %eax, -0x1b2c(%rbp)
jmp 0x9e2b11
movl -0x1b2c(%rbp), %eax
movl %eax, -0x12cc(%rbp)
cmpl $0x2, -0x1210(%rbp)
jne 0x9e2b38
movq -0x1298(%rbp), %rax
movl 0x8(%rax), %eax
movl %eax, -0x1b30(%rbp)
jmp 0x9e2b44
movl -0x1224(%rbp), %eax
movl %eax, -0x1b30(%rbp)
movl -0x1b30(%rbp), %eax
movl %eax, -0x12d0(%rbp)
cmpl $0x2, -0x1210(%rbp)
jne 0x9e2b6b
movq -0x1298(%rbp), %rax
movl 0x4(%rax), %eax
movl %eax, -0x1b34(%rbp)
jmp 0x9e2b77
movl -0x1234(%rbp), %eax
movl %eax, -0x1b34(%rbp)
movl -0x1b34(%rbp), %eax
movl %eax, -0x12d4(%rbp)
movq -0x1250(%rbp), %rax
movq -0x1268(%rbp), %rcx
subq %rcx, %rax
movq -0x12c8(%rbp), %rcx
movq -0x12c0(%rbp), %rdx
subq %rdx, %rcx
addq %rcx, %rax
movl %eax, -0x12d8(%rbp)
cmpl $0x2, -0x1210(%rbp)
jne 0x9e2bb9
jmp 0x9e2bb9
cmpl $0x0, -0x12d8(%rbp)
sete %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x1250(%rbp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, -0x1250(%rbp)
cmpl $0x0, -0x1210(%rbp)
jne 0x9e2c7d
movq -0x1250(%rbp), %rax
movq -0x1240(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x12dc(%rbp)
movq -0x11e8(%rbp), %rdi
movl -0x12dc(%rbp), %esi
movq -0x1218(%rbp), %rax
movl (%rax), %edx
callq 0x9fba50
movl %eax, -0x12e0(%rbp)
movl -0x12dc(%rbp), %eax
subl -0x12e0(%rbp), %eax
movl %eax, -0x12e4(%rbp)
movl -0x1280(%rbp), %eax
cmpl -0x12e4(%rbp), %eax
jbe 0x9e2c57
movl -0x1280(%rbp), %eax
movl %eax, -0x1284(%rbp)
movl $0x0, -0x1280(%rbp)
movl -0x127c(%rbp), %eax
cmpl -0x12e4(%rbp), %eax
jbe 0x9e2c7b
movl -0x127c(%rbp), %eax
movl %eax, -0x1284(%rbp)
movl $0x0, -0x127c(%rbp)
jmp 0x9e2c7d
cmpl $0x2, -0x1210(%rbp)
jne 0x9e2c88
jmp 0x9e2c88
jmp 0x9e2c8a
movq -0x1250(%rbp), %rax
cmpq -0x1278(%rbp), %rax
jae 0x9e5b45
movq -0x1250(%rbp), %rcx
movl -0x1224(%rbp), %eax
movq %rcx, -0x10a0(%rbp)
movl %eax, -0x10a4(%rbp)
movl $0x8, -0x10a8(%rbp)
movl -0x10a8(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1b40(%rbp)
subl $0x4, %eax
ja 0x9e2cf0
movq -0x1b40(%rbp), %rax
leaq 0x1f3795(%rip), %rcx # 0xbd647c
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e2cf2
movq -0x10a0(%rbp), %rdi
movl -0x10a4(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x1098(%rbp)
jmp 0x9e2d77
movq -0x10a0(%rbp), %rdi
movl -0x10a4(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x1098(%rbp)
jmp 0x9e2d77
movq -0x10a0(%rbp), %rdi
movl -0x10a4(%rbp), %esi
callq 0x9fb820
movq %rax, -0x1098(%rbp)
jmp 0x9e2d77
movq -0x10a0(%rbp), %rdi
movl -0x10a4(%rbp), %esi
callq 0x9fb850
movq %rax, -0x1098(%rbp)
jmp 0x9e2d77
movq -0x10a0(%rbp), %rdi
movl -0x10a4(%rbp), %esi
callq 0x9fb880
movq %rax, -0x1098(%rbp)
movq -0x1098(%rbp), %rax
movq %rax, -0x1300(%rbp)
movq -0x1250(%rbp), %rdx
movl -0x1234(%rbp), %ecx
movl -0x120c(%rbp), %eax
movq %rdx, -0x10b8(%rbp)
movl %ecx, -0x10bc(%rbp)
movl %eax, -0x10c0(%rbp)
movl -0x10c0(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1b48(%rbp)
subl $0x4, %eax
ja 0x9e2dd9
movq -0x1b48(%rbp), %rax
leaq 0x1f36c0(%rip), %rcx # 0xbd6490
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e2ddb
movq -0x10b8(%rbp), %rdi
movl -0x10bc(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x10b0(%rbp)
jmp 0x9e2e60
movq -0x10b8(%rbp), %rdi
movl -0x10bc(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x10b0(%rbp)
jmp 0x9e2e60
movq -0x10b8(%rbp), %rdi
movl -0x10bc(%rbp), %esi
callq 0x9fb820
movq %rax, -0x10b0(%rbp)
jmp 0x9e2e60
movq -0x10b8(%rbp), %rdi
movl -0x10bc(%rbp), %esi
callq 0x9fb850
movq %rax, -0x10b0(%rbp)
jmp 0x9e2e60
movq -0x10b8(%rbp), %rdi
movl -0x10bc(%rbp), %esi
callq 0x9fb880
movq %rax, -0x10b0(%rbp)
movq -0x10b0(%rbp), %rax
movq %rax, -0x1308(%rbp)
movq -0x1250(%rbp), %rcx
movl -0x12d0(%rbp), %eax
movq %rcx, -0x10d0(%rbp)
movl %eax, -0x10d4(%rbp)
movl $0x8, -0x10d8(%rbp)
movl -0x10d8(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1b50(%rbp)
subl $0x4, %eax
ja 0x9e2ec0
movq -0x1b50(%rbp), %rax
leaq 0x1f35ed(%rip), %rcx # 0xbd64a4
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e2ec2
movq -0x10d0(%rbp), %rdi
movl -0x10d4(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x10c8(%rbp)
jmp 0x9e2f47
movq -0x10d0(%rbp), %rdi
movl -0x10d4(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x10c8(%rbp)
jmp 0x9e2f47
movq -0x10d0(%rbp), %rdi
movl -0x10d4(%rbp), %esi
callq 0x9fb820
movq %rax, -0x10c8(%rbp)
jmp 0x9e2f47
movq -0x10d0(%rbp), %rdi
movl -0x10d4(%rbp), %esi
callq 0x9fb850
movq %rax, -0x10c8(%rbp)
jmp 0x9e2f47
movq -0x10d0(%rbp), %rdi
movl -0x10d4(%rbp), %esi
callq 0x9fb880
movq %rax, -0x10c8(%rbp)
movq -0x10c8(%rbp), %rax
movq %rax, -0x1310(%rbp)
movq -0x1250(%rbp), %rdx
movl -0x12d4(%rbp), %ecx
movl -0x120c(%rbp), %eax
movq %rdx, -0x10e8(%rbp)
movl %ecx, -0x10ec(%rbp)
movl %eax, -0x10f0(%rbp)
movl -0x10f0(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1b58(%rbp)
subl $0x4, %eax
ja 0x9e2fa9
movq -0x1b58(%rbp), %rax
leaq 0x1f3518(%rip), %rcx # 0xbd64b8
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e2fab
movq -0x10e8(%rbp), %rdi
movl -0x10ec(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x10e0(%rbp)
jmp 0x9e3030
movq -0x10e8(%rbp), %rdi
movl -0x10ec(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x10e0(%rbp)
jmp 0x9e3030
movq -0x10e8(%rbp), %rdi
movl -0x10ec(%rbp), %esi
callq 0x9fb820
movq %rax, -0x10e0(%rbp)
jmp 0x9e3030
movq -0x10e8(%rbp), %rdi
movl -0x10ec(%rbp), %esi
callq 0x9fb850
movq %rax, -0x10e0(%rbp)
jmp 0x9e3030
movq -0x10e8(%rbp), %rdi
movl -0x10ec(%rbp), %esi
callq 0x9fb880
movq %rax, -0x10e0(%rbp)
movq -0x10e0(%rbp), %rax
movq %rax, -0x1318(%rbp)
movq -0x1250(%rbp), %rax
movq -0x1240(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x131c(%rbp)
movq -0x1220(%rbp), %rax
movq -0x1300(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x1320(%rbp)
movq -0x1230(%rbp), %rax
movq -0x1308(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x1324(%rbp)
movq -0x1240(%rbp), %rax
movl -0x1320(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1330(%rbp)
movq -0x1240(%rbp), %rax
movl -0x1324(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1338(%rbp)
movl -0x131c(%rbp), %eax
addl $0x1, %eax
subl -0x127c(%rbp), %eax
movl %eax, -0x133c(%rbp)
cmpl $0x2, -0x1210(%rbp)
jne 0x9e30fe
movl -0x133c(%rbp), %eax
cmpl -0x1260(%rbp), %eax
jae 0x9e30fe
movq -0x12b8(%rbp), %rax
movl -0x133c(%rbp), %ecx
subl -0x12cc(%rbp), %ecx
movl %ecx, %ecx
addq %rcx, %rax
movq %rax, -0x1b60(%rbp)
jmp 0x9e3115
movq -0x1240(%rbp), %rax
movl -0x133c(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1b60(%rbp)
movq -0x1b60(%rbp), %rax
movq %rax, -0x1348(%rbp)
movl -0x131c(%rbp), %edx
movq -0x1230(%rbp), %rax
movq -0x1308(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0x1220(%rbp), %rax
movq -0x1300(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
cmpl $0x2, -0x1210(%rbp)
jne 0x9e35c8
movl -0x1260(%rbp), %eax
subl $0x1, %eax
subl -0x133c(%rbp), %eax
cmpl $0x3, %eax
jb 0x9e35c8
movq -0x1348(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x1b64(%rbp)
movq -0x1250(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x1b64(%rbp), %eax
cmpl %ecx, %eax
jne 0x9e35c8
movl -0x133c(%rbp), %eax
cmpl -0x1260(%rbp), %eax
jae 0x9e31c0
movq -0x12c8(%rbp), %rax
movq %rax, -0x1b70(%rbp)
jmp 0x9e31ce
movq -0x1270(%rbp), %rax
movq %rax, -0x1b70(%rbp)
movq -0x1b70(%rbp), %rax
movq %rax, -0x1350(%rbp)
movq -0x1250(%rbp), %rdi
addq $0x1, %rdi
addq $0x4, %rdi
movq -0x1348(%rbp), %rsi
addq $0x4, %rsi
movq -0x1270(%rbp), %rdx
movq -0x1350(%rbp), %rcx
movq -0x1268(%rbp), %r8
callq 0x9fbad0
addq $0x4, %rax
movq %rax, -0x12f0(%rbp)
movq -0x1250(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x1250(%rbp)
movq -0x11f0(%rbp), %rdi
movq -0x1250(%rbp), %rsi
movq -0x1258(%rbp), %rax
subq %rax, %rsi
movq -0x1258(%rbp), %rdx
movq -0x1270(%rbp), %rcx
movq -0x12f0(%rbp), %rax
subq $0x3, %rax
movq %rdi, -0x748(%rbp)
movq %rsi, -0x750(%rbp)
movq %rdx, -0x758(%rbp)
movq %rcx, -0x760(%rbp)
movl $0x0, -0x764(%rbp)
movq %rax, -0x770(%rbp)
movq -0x760(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x778(%rbp)
movq -0x758(%rbp), %rax
addq -0x750(%rbp), %rax
movq %rax, -0x780(%rbp)
movq -0x780(%rbp), %rax
cmpq -0x778(%rbp), %rax
ja 0x9e34a7
movq -0x748(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x758(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x750(%rbp)
jbe 0x9e34a5
movq -0x748(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x758(%rbp), %rcx
addq $0x10, %rcx
movq -0x750(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x288(%rbp)
movq %rcx, -0x290(%rbp)
movq %rax, -0x298(%rbp)
movl $0x0, -0x29c(%rbp)
movq -0x288(%rbp), %rax
movq -0x290(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x2a8(%rbp)
movq -0x290(%rbp), %rax
movq %rax, -0x2b0(%rbp)
movq -0x288(%rbp), %rax
movq %rax, -0x2b8(%rbp)
movq -0x2b8(%rbp), %rax
addq -0x298(%rbp), %rax
movq %rax, -0x2c0(%rbp)
cmpl $0x1, -0x29c(%rbp)
jne 0x9e33d9
cmpq $0x10, -0x2a8(%rbp)
jge 0x9e33d9
jmp 0x9e338d
movq -0x2b8(%rbp), %rdi
movq -0x2b0(%rbp), %rsi
callq 0x9fbf50
movq -0x2b8(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x2b8(%rbp)
movq -0x2b0(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x2b0(%rbp)
movq -0x2b8(%rbp), %rax
cmpq -0x2c0(%rbp), %rax
jb 0x9e338d
jmp 0x9e34a3
movq -0x2b8(%rbp), %rdi
movq -0x2b0(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x298(%rbp), %rax
jl 0x9e33ff
jmp 0x9e34a3
movq -0x2b8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x2b8(%rbp)
movq -0x2b0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x2b0(%rbp)
movq -0x2b8(%rbp), %rdi
movq -0x2b0(%rbp), %rsi
callq 0x9fbd80
movq -0x2b8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x2b8(%rbp)
movq -0x2b0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x2b0(%rbp)
movq -0x2b8(%rbp), %rdi
movq -0x2b0(%rbp), %rsi
callq 0x9fbd80
movq -0x2b8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x2b8(%rbp)
movq -0x2b0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x2b0(%rbp)
movq -0x2b8(%rbp), %rax
cmpq -0x2c0(%rbp), %rax
jb 0x9e3423
jmp 0x9e34a3
jmp 0x9e34a5
jmp 0x9e34cc
movq -0x748(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x758(%rbp), %rsi
movq -0x780(%rbp), %rdx
movq -0x778(%rbp), %rcx
callq 0x9fbdb0
movq -0x750(%rbp), %rcx
movq -0x748(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x750(%rbp) # imm = 0xFFFF
jbe 0x9e3525
movq -0x748(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x748(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x748(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x748(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x750(%rbp), %rax
movw %ax, %cx
movq -0x748(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x764(%rbp), %ecx
addl $0x1, %ecx
movq -0x748(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x770(%rbp) # imm = 0xFFFF
jbe 0x9e3597
movq -0x748(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x748(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x748(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x748(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x770(%rbp), %rax
movw %ax, %cx
movq -0x748(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x748(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
jmp 0x9e4898
cmpl $0x0, -0x1210(%rbp)
jne 0x9e3a36
cmpl $0x0, -0x127c(%rbp)
seta %al
andb $0x1, %al
movzbl %al, %eax
movl %eax, -0x1b74(%rbp)
movq -0x1250(%rbp), %rdi
addq $0x1, %rdi
movl -0x127c(%rbp), %eax
movl %eax, %ecx
xorl %eax, %eax
subq %rcx, %rax
addq %rax, %rdi
callq 0x9fb8d0
movl %eax, -0x1b78(%rbp)
movq -0x1250(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb8d0
movl -0x1b78(%rbp), %ecx
movl %eax, %edx
movl -0x1b74(%rbp), %eax
cmpl %edx, %ecx
sete %cl
andb $0x1, %cl
movzbl %cl, %ecx
andl %ecx, %eax
cmpl $0x0, %eax
je 0x9e3a36
movq -0x1250(%rbp), %rdi
addq $0x1, %rdi
addq $0x4, %rdi
movq -0x1250(%rbp), %rsi
addq $0x1, %rsi
addq $0x4, %rsi
movl -0x127c(%rbp), %eax
movl %eax, %ecx
xorl %eax, %eax
subq %rcx, %rax
addq %rax, %rsi
movq -0x1270(%rbp), %rdx
callq 0x9fbb90
addq $0x4, %rax
movq %rax, -0x12f0(%rbp)
movq -0x1250(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x1250(%rbp)
movq -0x11f0(%rbp), %rdi
movq -0x1250(%rbp), %rsi
movq -0x1258(%rbp), %rax
subq %rax, %rsi
movq -0x1258(%rbp), %rdx
movq -0x1270(%rbp), %rcx
movq -0x12f0(%rbp), %rax
subq $0x3, %rax
movq %rdi, -0x708(%rbp)
movq %rsi, -0x710(%rbp)
movq %rdx, -0x718(%rbp)
movq %rcx, -0x720(%rbp)
movl $0x0, -0x724(%rbp)
movq %rax, -0x730(%rbp)
movq -0x720(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x738(%rbp)
movq -0x718(%rbp), %rax
addq -0x710(%rbp), %rax
movq %rax, -0x740(%rbp)
movq -0x740(%rbp), %rax
cmpq -0x738(%rbp), %rax
ja 0x9e3915
movq -0x708(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x718(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x710(%rbp)
jbe 0x9e3913
movq -0x708(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x718(%rbp), %rcx
addq $0x10, %rcx
movq -0x710(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x2c8(%rbp)
movq %rcx, -0x2d0(%rbp)
movq %rax, -0x2d8(%rbp)
movl $0x0, -0x2dc(%rbp)
movq -0x2c8(%rbp), %rax
movq -0x2d0(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x2e8(%rbp)
movq -0x2d0(%rbp), %rax
movq %rax, -0x2f0(%rbp)
movq -0x2c8(%rbp), %rax
movq %rax, -0x2f8(%rbp)
movq -0x2f8(%rbp), %rax
addq -0x2d8(%rbp), %rax
movq %rax, -0x300(%rbp)
cmpl $0x1, -0x2dc(%rbp)
jne 0x9e3847
cmpq $0x10, -0x2e8(%rbp)
jge 0x9e3847
jmp 0x9e37fb
movq -0x2f8(%rbp), %rdi
movq -0x2f0(%rbp), %rsi
callq 0x9fbf50
movq -0x2f8(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x2f8(%rbp)
movq -0x2f0(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x2f0(%rbp)
movq -0x2f8(%rbp), %rax
cmpq -0x300(%rbp), %rax
jb 0x9e37fb
jmp 0x9e3911
movq -0x2f8(%rbp), %rdi
movq -0x2f0(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x2d8(%rbp), %rax
jl 0x9e386d
jmp 0x9e3911
movq -0x2f8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x2f8(%rbp)
movq -0x2f0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x2f0(%rbp)
movq -0x2f8(%rbp), %rdi
movq -0x2f0(%rbp), %rsi
callq 0x9fbd80
movq -0x2f8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x2f8(%rbp)
movq -0x2f0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x2f0(%rbp)
movq -0x2f8(%rbp), %rdi
movq -0x2f0(%rbp), %rsi
callq 0x9fbd80
movq -0x2f8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x2f8(%rbp)
movq -0x2f0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x2f0(%rbp)
movq -0x2f8(%rbp), %rax
cmpq -0x300(%rbp), %rax
jb 0x9e3891
jmp 0x9e3911
jmp 0x9e3913
jmp 0x9e393a
movq -0x708(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x718(%rbp), %rsi
movq -0x740(%rbp), %rdx
movq -0x738(%rbp), %rcx
callq 0x9fbdb0
movq -0x710(%rbp), %rcx
movq -0x708(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x710(%rbp) # imm = 0xFFFF
jbe 0x9e3993
movq -0x708(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x708(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x708(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x708(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x710(%rbp), %rax
movw %ax, %cx
movq -0x708(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x724(%rbp), %ecx
addl $0x1, %ecx
movq -0x708(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x730(%rbp) # imm = 0xFFFF
jbe 0x9e3a05
movq -0x708(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x708(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x708(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x708(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x730(%rbp), %rax
movw %ax, %cx
movq -0x708(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x708(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
jmp 0x9e4898
movl -0x1320(%rbp), %eax
cmpl -0x1260(%rbp), %eax
jbe 0x9e3b6b
movq -0x1330(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x1b80(%rbp)
movq -0x1250(%rbp), %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x1b80(%rbp), %rax
cmpq %rcx, %rax
jne 0x9e3b66
movq -0x1250(%rbp), %rdi
addq $0x8, %rdi
movq -0x1330(%rbp), %rsi
addq $0x8, %rsi
movq -0x1270(%rbp), %rdx
callq 0x9fbb90
addq $0x8, %rax
movq %rax, -0x12f0(%rbp)
movq -0x1250(%rbp), %rax
movq -0x1330(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x12f4(%rbp)
movq -0x1250(%rbp), %rax
cmpq -0x1258(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x1330(%rbp), %rax
cmpq -0x1268(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1b81(%rbp)
je 0x9e3b1a
movq -0x1250(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0x1330(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1b81(%rbp)
movb -0x1b81(%rbp), %al
testb $0x1, %al
jne 0x9e3b26
jmp 0x9e3b61
movq -0x1250(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1250(%rbp)
movq -0x1330(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1330(%rbp)
movq -0x12f0(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x12f0(%rbp)
jmp 0x9e3abe
jmp 0x9e44e5
jmp 0x9e3ceb
cmpl $0x2, -0x1210(%rbp)
jne 0x9e3ce9
movq -0x12a0(%rbp), %rax
movq -0x1310(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x1354(%rbp)
movq -0x12b8(%rbp), %rax
movl -0x1354(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1360(%rbp)
movq -0x1360(%rbp), %rax
cmpq -0x12c0(%rbp), %rax
jbe 0x9e3ce7
movq -0x1360(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x1b90(%rbp)
movq -0x1250(%rbp), %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x1b90(%rbp), %rax
cmpq %rcx, %rax
jne 0x9e3ce7
movq -0x1250(%rbp), %rdi
addq $0x8, %rdi
movq -0x1360(%rbp), %rsi
addq $0x8, %rsi
movq -0x1270(%rbp), %rdx
movq -0x12c8(%rbp), %rcx
movq -0x1268(%rbp), %r8
callq 0x9fbad0
addq $0x8, %rax
movq %rax, -0x12f0(%rbp)
movl -0x131c(%rbp), %eax
subl -0x1354(%rbp), %eax
subl -0x12cc(%rbp), %eax
movl %eax, -0x12f4(%rbp)
movq -0x1250(%rbp), %rax
cmpq -0x1258(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x1360(%rbp), %rax
cmpq -0x12c0(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1b91(%rbp)
je 0x9e3c9b
movq -0x1250(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0x1360(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1b91(%rbp)
movb -0x1b91(%rbp), %al
testb $0x1, %al
jne 0x9e3ca7
jmp 0x9e3ce2
movq -0x1250(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1250(%rbp)
movq -0x1360(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1360(%rbp)
movq -0x12f0(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x12f0(%rbp)
jmp 0x9e3c3f
jmp 0x9e44e5
jmp 0x9e3ce9
jmp 0x9e3ceb
movl -0x1324(%rbp), %eax
cmpl -0x1260(%rbp), %eax
jbe 0x9e3d2d
movq -0x1338(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x1b98(%rbp)
movq -0x1250(%rbp), %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x1b98(%rbp), %eax
cmpl %ecx, %eax
jne 0x9e3d28
jmp 0x9e3de2
jmp 0x9e3db6
cmpl $0x2, -0x1210(%rbp)
jne 0x9e3db4
movq -0x12a8(%rbp), %rax
movq -0x1318(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x1364(%rbp)
movq -0x12b8(%rbp), %rax
movl -0x1364(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1338(%rbp)
movl -0x1364(%rbp), %eax
addl -0x12cc(%rbp), %eax
movl %eax, -0x1324(%rbp)
movq -0x1338(%rbp), %rax
cmpq -0x12c0(%rbp), %rax
jbe 0x9e3db2
movq -0x1338(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x1b9c(%rbp)
movq -0x1250(%rbp), %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x1b9c(%rbp), %eax
cmpl %ecx, %eax
jne 0x9e3db2
jmp 0x9e3de2
jmp 0x9e3db4
jmp 0x9e3db6
movq -0x1250(%rbp), %rax
movq -0x1258(%rbp), %rcx
subq %rcx, %rax
sarq $0x8, %rax
addq $0x1, %rax
addq -0x1250(%rbp), %rax
movq %rax, -0x1250(%rbp)
jmp 0x9e2c8a
movq -0x1250(%rbp), %rcx
incq %rcx
movl -0x1224(%rbp), %eax
movq %rcx, -0x1100(%rbp)
movl %eax, -0x1104(%rbp)
movl $0x8, -0x1108(%rbp)
movl -0x1108(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1ba8(%rbp)
subl $0x4, %eax
ja 0x9e3e37
movq -0x1ba8(%rbp), %rax
leaq 0x1f269e(%rip), %rcx # 0xbd64cc
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e3e39
movq -0x1100(%rbp), %rdi
movl -0x1104(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x10f8(%rbp)
jmp 0x9e3ebe
movq -0x1100(%rbp), %rdi
movl -0x1104(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x10f8(%rbp)
jmp 0x9e3ebe
movq -0x1100(%rbp), %rdi
movl -0x1104(%rbp), %esi
callq 0x9fb820
movq %rax, -0x10f8(%rbp)
jmp 0x9e3ebe
movq -0x1100(%rbp), %rdi
movl -0x1104(%rbp), %esi
callq 0x9fb850
movq %rax, -0x10f8(%rbp)
jmp 0x9e3ebe
movq -0x1100(%rbp), %rdi
movl -0x1104(%rbp), %esi
callq 0x9fb880
movq %rax, -0x10f8(%rbp)
movq -0x10f8(%rbp), %rax
movq %rax, -0x1370(%rbp)
movq -0x1250(%rbp), %rcx
incq %rcx
movl -0x12d0(%rbp), %eax
movq %rcx, -0x1118(%rbp)
movl %eax, -0x111c(%rbp)
movl $0x8, -0x1120(%rbp)
movl -0x1120(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1bb0(%rbp)
subl $0x4, %eax
ja 0x9e3f21
movq -0x1bb0(%rbp), %rax
leaq 0x1f25c8(%rip), %rcx # 0xbd64e0
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e3f23
movq -0x1118(%rbp), %rdi
movl -0x111c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x1110(%rbp)
jmp 0x9e3fa8
movq -0x1118(%rbp), %rdi
movl -0x111c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x1110(%rbp)
jmp 0x9e3fa8
movq -0x1118(%rbp), %rdi
movl -0x111c(%rbp), %esi
callq 0x9fb820
movq %rax, -0x1110(%rbp)
jmp 0x9e3fa8
movq -0x1118(%rbp), %rdi
movl -0x111c(%rbp), %esi
callq 0x9fb850
movq %rax, -0x1110(%rbp)
jmp 0x9e3fa8
movq -0x1118(%rbp), %rdi
movl -0x111c(%rbp), %esi
callq 0x9fb880
movq %rax, -0x1110(%rbp)
movq -0x1110(%rbp), %rax
movq %rax, -0x1378(%rbp)
movq -0x1220(%rbp), %rax
movq -0x1370(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x137c(%rbp)
movq -0x1240(%rbp), %rax
movl -0x137c(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1388(%rbp)
movl -0x131c(%rbp), %edx
addl $0x1, %edx
movq -0x1220(%rbp), %rax
movq -0x1370(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movl -0x137c(%rbp), %eax
cmpl -0x1260(%rbp), %eax
jbe 0x9e4149
movq -0x1388(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x1bb8(%rbp)
movq -0x1250(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x1bb8(%rbp), %rax
cmpq %rcx, %rax
jne 0x9e4144
movq -0x1250(%rbp), %rdi
addq $0x9, %rdi
movq -0x1388(%rbp), %rsi
addq $0x8, %rsi
movq -0x1270(%rbp), %rdx
callq 0x9fbb90
addq $0x8, %rax
movq %rax, -0x12f0(%rbp)
movq -0x1250(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x1250(%rbp)
movq -0x1250(%rbp), %rax
movq -0x1388(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x12f4(%rbp)
movq -0x1250(%rbp), %rax
cmpq -0x1258(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x1388(%rbp), %rax
cmpq -0x1268(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1bb9(%rbp)
je 0x9e40f8
movq -0x1250(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0x1388(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1bb9(%rbp)
movb -0x1bb9(%rbp), %al
testb $0x1, %al
jne 0x9e4104
jmp 0x9e413f
movq -0x1250(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1250(%rbp)
movq -0x1388(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1388(%rbp)
movq -0x12f0(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x12f0(%rbp)
jmp 0x9e409c
jmp 0x9e44e5
jmp 0x9e42e6
cmpl $0x2, -0x1210(%rbp)
jne 0x9e42e4
movq -0x12a0(%rbp), %rax
movq -0x1378(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x138c(%rbp)
movq -0x12b8(%rbp), %rax
movl -0x138c(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1398(%rbp)
movq -0x1398(%rbp), %rax
cmpq -0x12c0(%rbp), %rax
jbe 0x9e42e2
movq -0x1398(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x1bc8(%rbp)
movq -0x1250(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x1bc8(%rbp), %rax
cmpq %rcx, %rax
jne 0x9e42e2
movq -0x1250(%rbp), %rdi
addq $0x1, %rdi
addq $0x8, %rdi
movq -0x1398(%rbp), %rsi
addq $0x8, %rsi
movq -0x1270(%rbp), %rdx
movq -0x12c8(%rbp), %rcx
movq -0x1268(%rbp), %r8
callq 0x9fbad0
addq $0x8, %rax
movq %rax, -0x12f0(%rbp)
movq -0x1250(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x1250(%rbp)
movl -0x131c(%rbp), %eax
addl $0x1, %eax
subl -0x138c(%rbp), %eax
subl -0x12cc(%rbp), %eax
movl %eax, -0x12f4(%rbp)
movq -0x1250(%rbp), %rax
cmpq -0x1258(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x1398(%rbp), %rax
cmpq -0x12c0(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1bc9(%rbp)
je 0x9e4296
movq -0x1250(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0x1398(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1bc9(%rbp)
movb -0x1bc9(%rbp), %al
testb $0x1, %al
jne 0x9e42a2
jmp 0x9e42dd
movq -0x1250(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1250(%rbp)
movq -0x1398(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1398(%rbp)
movq -0x12f0(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x12f0(%rbp)
jmp 0x9e423a
jmp 0x9e44e5
jmp 0x9e42e4
jmp 0x9e42e6
cmpl $0x2, -0x1210(%rbp)
jne 0x9e43fa
movl -0x1324(%rbp), %eax
cmpl -0x1260(%rbp), %eax
jae 0x9e43fa
movq -0x1250(%rbp), %rdi
addq $0x4, %rdi
movq -0x1338(%rbp), %rsi
addq $0x4, %rsi
movq -0x1270(%rbp), %rdx
movq -0x12c8(%rbp), %rcx
movq -0x1268(%rbp), %r8
callq 0x9fbad0
addq $0x4, %rax
movq %rax, -0x12f0(%rbp)
movl -0x131c(%rbp), %eax
subl -0x1324(%rbp), %eax
movl %eax, -0x12f4(%rbp)
movq -0x1250(%rbp), %rax
cmpq -0x1258(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x1338(%rbp), %rax
cmpq -0x12c0(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1bca(%rbp)
je 0x9e43ae
movq -0x1250(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0x1338(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1bca(%rbp)
movb -0x1bca(%rbp), %al
testb $0x1, %al
jne 0x9e43ba
jmp 0x9e43f5
movq -0x1250(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1250(%rbp)
movq -0x1338(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1338(%rbp)
movq -0x12f0(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x12f0(%rbp)
jmp 0x9e4352
jmp 0x9e44e3
movq -0x1250(%rbp), %rdi
addq $0x4, %rdi
movq -0x1338(%rbp), %rsi
addq $0x4, %rsi
movq -0x1270(%rbp), %rdx
callq 0x9fbb90
addq $0x4, %rax
movq %rax, -0x12f0(%rbp)
movq -0x1250(%rbp), %rax
movq -0x1338(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x12f4(%rbp)
movq -0x1250(%rbp), %rax
cmpq -0x1258(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x1338(%rbp), %rax
cmpq -0x1268(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1bcb(%rbp)
je 0x9e449a
movq -0x1250(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0x1338(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1bcb(%rbp)
movb -0x1bcb(%rbp), %al
testb $0x1, %al
jne 0x9e44a6
jmp 0x9e44e1
movq -0x1250(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1250(%rbp)
movq -0x1338(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1338(%rbp)
movq -0x12f0(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x12f0(%rbp)
jmp 0x9e443e
jmp 0x9e44e3
jmp 0x9e44e5
movl -0x127c(%rbp), %eax
movl %eax, -0x1280(%rbp)
movl -0x12f4(%rbp), %eax
movl %eax, -0x127c(%rbp)
movq -0x11f0(%rbp), %r8
movq -0x1250(%rbp), %rdi
movq -0x1258(%rbp), %rax
subq %rax, %rdi
movq -0x1258(%rbp), %rsi
movq -0x1270(%rbp), %rdx
movl -0x12f4(%rbp), %ecx
addl $0x2, %ecx
movq -0x12f0(%rbp), %rax
subq $0x3, %rax
movq %r8, -0x648(%rbp)
movq %rdi, -0x650(%rbp)
movq %rsi, -0x658(%rbp)
movq %rdx, -0x660(%rbp)
movl %ecx, -0x664(%rbp)
movq %rax, -0x670(%rbp)
movq -0x660(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x678(%rbp)
movq -0x658(%rbp), %rax
addq -0x650(%rbp), %rax
movq %rax, -0x680(%rbp)
movq -0x680(%rbp), %rax
cmpq -0x678(%rbp), %rax
ja 0x9e477c
movq -0x648(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x658(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x650(%rbp)
jbe 0x9e477a
movq -0x648(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x658(%rbp), %rcx
addq $0x10, %rcx
movq -0x650(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x388(%rbp)
movq %rcx, -0x390(%rbp)
movq %rax, -0x398(%rbp)
movl $0x0, -0x39c(%rbp)
movq -0x388(%rbp), %rax
movq -0x390(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x3a8(%rbp)
movq -0x390(%rbp), %rax
movq %rax, -0x3b0(%rbp)
movq -0x388(%rbp), %rax
movq %rax, -0x3b8(%rbp)
movq -0x3b8(%rbp), %rax
addq -0x398(%rbp), %rax
movq %rax, -0x3c0(%rbp)
cmpl $0x1, -0x39c(%rbp)
jne 0x9e46ae
cmpq $0x10, -0x3a8(%rbp)
jge 0x9e46ae
jmp 0x9e4662
movq -0x3b8(%rbp), %rdi
movq -0x3b0(%rbp), %rsi
callq 0x9fbf50
movq -0x3b8(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x3b8(%rbp)
movq -0x3b0(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x3b0(%rbp)
movq -0x3b8(%rbp), %rax
cmpq -0x3c0(%rbp), %rax
jb 0x9e4662
jmp 0x9e4778
movq -0x3b8(%rbp), %rdi
movq -0x3b0(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x398(%rbp), %rax
jl 0x9e46d4
jmp 0x9e4778
movq -0x3b8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x3b8(%rbp)
movq -0x3b0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x3b0(%rbp)
movq -0x3b8(%rbp), %rdi
movq -0x3b0(%rbp), %rsi
callq 0x9fbd80
movq -0x3b8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x3b8(%rbp)
movq -0x3b0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x3b0(%rbp)
movq -0x3b8(%rbp), %rdi
movq -0x3b0(%rbp), %rsi
callq 0x9fbd80
movq -0x3b8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x3b8(%rbp)
movq -0x3b0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x3b0(%rbp)
movq -0x3b8(%rbp), %rax
cmpq -0x3c0(%rbp), %rax
jb 0x9e46f8
jmp 0x9e4778
jmp 0x9e477a
jmp 0x9e47a1
movq -0x648(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x658(%rbp), %rsi
movq -0x680(%rbp), %rdx
movq -0x678(%rbp), %rcx
callq 0x9fbdb0
movq -0x650(%rbp), %rcx
movq -0x648(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x650(%rbp) # imm = 0xFFFF
jbe 0x9e47fa
movq -0x648(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x648(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x648(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x648(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x650(%rbp), %rax
movw %ax, %cx
movq -0x648(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x664(%rbp), %ecx
addl $0x1, %ecx
movq -0x648(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x670(%rbp) # imm = 0xFFFF
jbe 0x9e486c
movq -0x648(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x648(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x648(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x648(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x670(%rbp), %rax
movw %ax, %cx
movq -0x648(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x648(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
movq -0x12f0(%rbp), %rax
addq -0x1250(%rbp), %rax
movq %rax, -0x1250(%rbp)
movq -0x1250(%rbp), %rax
movq %rax, -0x1258(%rbp)
movq -0x1250(%rbp), %rax
cmpq -0x1278(%rbp), %rax
ja 0x9e5b40
movl -0x131c(%rbp), %eax
addl $0x2, %eax
movl %eax, -0x139c(%rbp)
movl -0x139c(%rbp), %eax
movl %eax, %ecx
movl %ecx, -0x1be4(%rbp)
movq -0x1220(%rbp), %rcx
movq %rcx, -0x1be0(%rbp)
movq -0x1240(%rbp), %rcx
addq %rax, %rcx
movl -0x1224(%rbp), %eax
movq %rcx, -0x1130(%rbp)
movl %eax, -0x1134(%rbp)
movl $0x8, -0x1138(%rbp)
movl -0x1138(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1bd8(%rbp)
subl $0x4, %eax
ja 0x9e494f
movq -0x1bd8(%rbp), %rax
leaq 0x1f1bae(%rip), %rcx # 0xbd64f4
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e4951
movq -0x1130(%rbp), %rdi
movl -0x1134(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x1128(%rbp)
jmp 0x9e49d6
movq -0x1130(%rbp), %rdi
movl -0x1134(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x1128(%rbp)
jmp 0x9e49d6
movq -0x1130(%rbp), %rdi
movl -0x1134(%rbp), %esi
callq 0x9fb820
movq %rax, -0x1128(%rbp)
jmp 0x9e49d6
movq -0x1130(%rbp), %rdi
movl -0x1134(%rbp), %esi
callq 0x9fb850
movq %rax, -0x1128(%rbp)
jmp 0x9e49d6
movq -0x1130(%rbp), %rdi
movl -0x1134(%rbp), %esi
callq 0x9fb880
movq %rax, -0x1128(%rbp)
movq -0x1be0(%rbp), %rax
movl -0x1be4(%rbp), %edx
movq -0x1128(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0x1250(%rbp), %rcx
addq $-0x2, %rcx
movl -0x1240(%rbp), %edx
movl %ecx, %eax
subl %edx, %eax
movl %eax, -0x1bfc(%rbp)
movq -0x1220(%rbp), %rax
movq %rax, -0x1bf8(%rbp)
movl -0x1224(%rbp), %eax
movq %rcx, -0x1148(%rbp)
movl %eax, -0x114c(%rbp)
movl $0x8, -0x1150(%rbp)
movl -0x1150(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1bf0(%rbp)
subl $0x4, %eax
ja 0x9e4a61
movq -0x1bf0(%rbp), %rax
leaq 0x1f1ab0(%rip), %rcx # 0xbd6508
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e4a63
movq -0x1148(%rbp), %rdi
movl -0x114c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x1140(%rbp)
jmp 0x9e4ae8
movq -0x1148(%rbp), %rdi
movl -0x114c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x1140(%rbp)
jmp 0x9e4ae8
movq -0x1148(%rbp), %rdi
movl -0x114c(%rbp), %esi
callq 0x9fb820
movq %rax, -0x1140(%rbp)
jmp 0x9e4ae8
movq -0x1148(%rbp), %rdi
movl -0x114c(%rbp), %esi
callq 0x9fb850
movq %rax, -0x1140(%rbp)
jmp 0x9e4ae8
movq -0x1148(%rbp), %rdi
movl -0x114c(%rbp), %esi
callq 0x9fb880
movq %rax, -0x1140(%rbp)
movq -0x1bf8(%rbp), %rax
movl -0x1bfc(%rbp), %edx
movq -0x1140(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movl -0x139c(%rbp), %eax
movl %eax, %ecx
movl %ecx, -0x1c14(%rbp)
movq -0x1230(%rbp), %rcx
movq %rcx, -0x1c10(%rbp)
movq -0x1240(%rbp), %rdx
addq %rax, %rdx
movl -0x1234(%rbp), %ecx
movl -0x120c(%rbp), %eax
movq %rdx, -0x1160(%rbp)
movl %ecx, -0x1164(%rbp)
movl %eax, -0x1168(%rbp)
movl -0x1168(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1c08(%rbp)
subl $0x4, %eax
ja 0x9e4b72
movq -0x1c08(%rbp), %rax
leaq 0x1f19b3(%rip), %rcx # 0xbd651c
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e4b74
movq -0x1160(%rbp), %rdi
movl -0x1164(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x1158(%rbp)
jmp 0x9e4bf9
movq -0x1160(%rbp), %rdi
movl -0x1164(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x1158(%rbp)
jmp 0x9e4bf9
movq -0x1160(%rbp), %rdi
movl -0x1164(%rbp), %esi
callq 0x9fb820
movq %rax, -0x1158(%rbp)
jmp 0x9e4bf9
movq -0x1160(%rbp), %rdi
movl -0x1164(%rbp), %esi
callq 0x9fb850
movq %rax, -0x1158(%rbp)
jmp 0x9e4bf9
movq -0x1160(%rbp), %rdi
movl -0x1164(%rbp), %esi
callq 0x9fb880
movq %rax, -0x1158(%rbp)
movq -0x1c10(%rbp), %rax
movl -0x1c14(%rbp), %edx
movq -0x1158(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0x1250(%rbp), %rdx
decq %rdx
movl -0x1240(%rbp), %ecx
movl %edx, %eax
subl %ecx, %eax
movl %eax, -0x1c2c(%rbp)
movq -0x1230(%rbp), %rax
movq %rax, -0x1c28(%rbp)
movl -0x1234(%rbp), %ecx
movl -0x120c(%rbp), %eax
movq %rdx, -0x1178(%rbp)
movl %ecx, -0x117c(%rbp)
movl %eax, -0x1180(%rbp)
movl -0x1180(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1c20(%rbp)
subl $0x4, %eax
ja 0x9e4c85
movq -0x1c20(%rbp), %rax
leaq 0x1f18b4(%rip), %rcx # 0xbd6530
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e4c87
movq -0x1178(%rbp), %rdi
movl -0x117c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x1170(%rbp)
jmp 0x9e4d0c
movq -0x1178(%rbp), %rdi
movl -0x117c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x1170(%rbp)
jmp 0x9e4d0c
movq -0x1178(%rbp), %rdi
movl -0x117c(%rbp), %esi
callq 0x9fb820
movq %rax, -0x1170(%rbp)
jmp 0x9e4d0c
movq -0x1178(%rbp), %rdi
movl -0x117c(%rbp), %esi
callq 0x9fb850
movq %rax, -0x1170(%rbp)
jmp 0x9e4d0c
movq -0x1178(%rbp), %rdi
movl -0x117c(%rbp), %esi
callq 0x9fb880
movq %rax, -0x1170(%rbp)
movq -0x1c28(%rbp), %rax
movl -0x1c2c(%rbp), %edx
movq -0x1170(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
cmpl $0x2, -0x1210(%rbp)
jne 0x9e547b
jmp 0x9e4d32
movq -0x1250(%rbp), %rax
cmpq -0x1278(%rbp), %rax
ja 0x9e5479
movq -0x1250(%rbp), %rax
movq -0x1240(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x13a0(%rbp)
movl -0x13a0(%rbp), %eax
subl -0x1280(%rbp), %eax
movl %eax, -0x13a4(%rbp)
cmpl $0x2, -0x1210(%rbp)
jne 0x9e4daf
movl -0x13a4(%rbp), %eax
cmpl -0x1260(%rbp), %eax
jae 0x9e4daf
movq -0x12b8(%rbp), %rax
movl -0x13a4(%rbp), %ecx
addq %rcx, %rax
movl -0x12cc(%rbp), %ecx
movl %ecx, %edx
xorl %ecx, %ecx
subq %rdx, %rcx
addq %rcx, %rax
movq %rax, -0x1c38(%rbp)
jmp 0x9e4dc6
movq -0x1240(%rbp), %rax
movl -0x13a4(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1c38(%rbp)
movq -0x1c38(%rbp), %rax
movq %rax, -0x13b0(%rbp)
movl -0x1260(%rbp), %eax
subl $0x1, %eax
subl -0x13a4(%rbp), %eax
cmpl $0x3, %eax
jb 0x9e5477
movq -0x13b0(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x1c3c(%rbp)
movq -0x1250(%rbp), %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x1c3c(%rbp), %eax
cmpl %ecx, %eax
jne 0x9e5477
movl -0x13a4(%rbp), %eax
cmpl -0x1260(%rbp), %eax
jae 0x9e4e38
movq -0x12c8(%rbp), %rax
movq %rax, -0x1c48(%rbp)
jmp 0x9e4e46
movq -0x1270(%rbp), %rax
movq %rax, -0x1c48(%rbp)
movq -0x1c48(%rbp), %rax
movq %rax, -0x13b8(%rbp)
movq -0x1250(%rbp), %rdi
addq $0x4, %rdi
movq -0x13b0(%rbp), %rsi
addq $0x4, %rsi
movq -0x1270(%rbp), %rdx
movq -0x13b8(%rbp), %rcx
movq -0x1268(%rbp), %r8
callq 0x9fbad0
addq $0x4, %rax
movq %rax, -0x13c0(%rbp)
movl -0x1280(%rbp), %eax
movl %eax, -0x13c4(%rbp)
movl -0x127c(%rbp), %eax
movl %eax, -0x1280(%rbp)
movl -0x13c4(%rbp), %eax
movl %eax, -0x127c(%rbp)
movq -0x11f0(%rbp), %rsi
movq -0x1258(%rbp), %rdx
movq -0x1270(%rbp), %rcx
movq -0x13c0(%rbp), %rax
subq $0x3, %rax
movq %rsi, -0x6c8(%rbp)
movq $0x0, -0x6d0(%rbp)
movq %rdx, -0x6d8(%rbp)
movq %rcx, -0x6e0(%rbp)
movl $0x0, -0x6e4(%rbp)
movq %rax, -0x6f0(%rbp)
movq -0x6e0(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x6f8(%rbp)
movq -0x6d8(%rbp), %rax
addq -0x6d0(%rbp), %rax
movq %rax, -0x700(%rbp)
movq -0x700(%rbp), %rax
cmpq -0x6f8(%rbp), %rax
ja 0x9e5120
movq -0x6c8(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x6d8(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x6d0(%rbp)
jbe 0x9e511e
movq -0x6c8(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x6d8(%rbp), %rcx
addq $0x10, %rcx
movq -0x6d0(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x308(%rbp)
movq %rcx, -0x310(%rbp)
movq %rax, -0x318(%rbp)
movl $0x0, -0x31c(%rbp)
movq -0x308(%rbp), %rax
movq -0x310(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x328(%rbp)
movq -0x310(%rbp), %rax
movq %rax, -0x330(%rbp)
movq -0x308(%rbp), %rax
movq %rax, -0x338(%rbp)
movq -0x338(%rbp), %rax
addq -0x318(%rbp), %rax
movq %rax, -0x340(%rbp)
cmpl $0x1, -0x31c(%rbp)
jne 0x9e5052
cmpq $0x10, -0x328(%rbp)
jge 0x9e5052
jmp 0x9e5006
movq -0x338(%rbp), %rdi
movq -0x330(%rbp), %rsi
callq 0x9fbf50
movq -0x338(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x338(%rbp)
movq -0x330(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x330(%rbp)
movq -0x338(%rbp), %rax
cmpq -0x340(%rbp), %rax
jb 0x9e5006
jmp 0x9e511c
movq -0x338(%rbp), %rdi
movq -0x330(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x318(%rbp), %rax
jl 0x9e5078
jmp 0x9e511c
movq -0x338(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x338(%rbp)
movq -0x330(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x330(%rbp)
movq -0x338(%rbp), %rdi
movq -0x330(%rbp), %rsi
callq 0x9fbd80
movq -0x338(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x338(%rbp)
movq -0x330(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x330(%rbp)
movq -0x338(%rbp), %rdi
movq -0x330(%rbp), %rsi
callq 0x9fbd80
movq -0x338(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x338(%rbp)
movq -0x330(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x330(%rbp)
movq -0x338(%rbp), %rax
cmpq -0x340(%rbp), %rax
jb 0x9e509c
jmp 0x9e511c
jmp 0x9e511e
jmp 0x9e5145
movq -0x6c8(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x6d8(%rbp), %rsi
movq -0x700(%rbp), %rdx
movq -0x6f8(%rbp), %rcx
callq 0x9fbdb0
movq -0x6d0(%rbp), %rcx
movq -0x6c8(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x6d0(%rbp) # imm = 0xFFFF
jbe 0x9e519e
movq -0x6c8(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x6c8(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x6c8(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x6c8(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x6d0(%rbp), %rax
movw %ax, %cx
movq -0x6c8(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x6e4(%rbp), %ecx
addl $0x1, %ecx
movq -0x6c8(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x6f0(%rbp) # imm = 0xFFFF
jbe 0x9e5210
movq -0x6c8(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x6c8(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x6c8(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x6c8(%rbp), %rax
movl %ecx, 0x4c(%rax)
movw -0x6f0(%rbp), %cx
movq -0x6c8(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x6c8(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
movl -0x13a0(%rbp), %eax
movl %eax, -0x1c5c(%rbp)
movq -0x1230(%rbp), %rax
movq %rax, -0x1c58(%rbp)
movq -0x1250(%rbp), %rdx
movl -0x1234(%rbp), %ecx
movl -0x120c(%rbp), %eax
movq %rdx, -0x1190(%rbp)
movl %ecx, -0x1194(%rbp)
movl %eax, -0x1198(%rbp)
movl -0x1198(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1c50(%rbp)
subl $0x4, %eax
ja 0x9e52a7
movq -0x1c50(%rbp), %rax
leaq 0x1f12ce(%rip), %rcx # 0xbd656c
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e52a9
movq -0x1190(%rbp), %rdi
movl -0x1194(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x1188(%rbp)
jmp 0x9e532e
movq -0x1190(%rbp), %rdi
movl -0x1194(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x1188(%rbp)
jmp 0x9e532e
movq -0x1190(%rbp), %rdi
movl -0x1194(%rbp), %esi
callq 0x9fb820
movq %rax, -0x1188(%rbp)
jmp 0x9e532e
movq -0x1190(%rbp), %rdi
movl -0x1194(%rbp), %esi
callq 0x9fb850
movq %rax, -0x1188(%rbp)
jmp 0x9e532e
movq -0x1190(%rbp), %rdi
movl -0x1194(%rbp), %esi
callq 0x9fb880
movq %rax, -0x1188(%rbp)
movq -0x1c58(%rbp), %rax
movl -0x1c5c(%rbp), %edx
movq -0x1188(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movl -0x13a0(%rbp), %eax
movl %eax, -0x1c74(%rbp)
movq -0x1220(%rbp), %rax
movq %rax, -0x1c70(%rbp)
movq -0x1250(%rbp), %rcx
movl -0x1224(%rbp), %eax
movq %rcx, -0x11a8(%rbp)
movl %eax, -0x11ac(%rbp)
movl $0x8, -0x11b0(%rbp)
movl -0x11b0(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1c68(%rbp)
subl $0x4, %eax
ja 0x9e53b1
movq -0x1c68(%rbp), %rax
leaq 0x1f11d8(%rip), %rcx # 0xbd6580
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e53b3
movq -0x11a8(%rbp), %rdi
movl -0x11ac(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x11a0(%rbp)
jmp 0x9e5438
movq -0x11a8(%rbp), %rdi
movl -0x11ac(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x11a0(%rbp)
jmp 0x9e5438
movq -0x11a8(%rbp), %rdi
movl -0x11ac(%rbp), %esi
callq 0x9fb820
movq %rax, -0x11a0(%rbp)
jmp 0x9e5438
movq -0x11a8(%rbp), %rdi
movl -0x11ac(%rbp), %esi
callq 0x9fb850
movq %rax, -0x11a0(%rbp)
jmp 0x9e5438
movq -0x11a8(%rbp), %rdi
movl -0x11ac(%rbp), %esi
callq 0x9fb880
movq %rax, -0x11a0(%rbp)
movq -0x1c70(%rbp), %rax
movl -0x1c74(%rbp), %edx
movq -0x11a0(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0x13c0(%rbp), %rax
addq -0x1250(%rbp), %rax
movq %rax, -0x1250(%rbp)
movq -0x1250(%rbp), %rax
movq %rax, -0x1258(%rbp)
jmp 0x9e4d32
jmp 0x9e5479
jmp 0x9e547b
cmpl $0x0, -0x1210(%rbp)
jne 0x9e5b3e
jmp 0x9e548a
movq -0x1250(%rbp), %rcx
xorl %eax, %eax
cmpq -0x1278(%rbp), %rcx
movb %al, -0x1c75(%rbp)
ja 0x9e550c
cmpl $0x0, -0x1280(%rbp)
seta %al
andb $0x1, %al
movzbl %al, %eax
movl %eax, -0x1c7c(%rbp)
movq -0x1250(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x1c80(%rbp)
movq -0x1250(%rbp), %rdi
movl -0x1280(%rbp), %eax
movl %eax, %ecx
xorl %eax, %eax
subq %rcx, %rax
addq %rax, %rdi
callq 0x9fb8d0
movl -0x1c80(%rbp), %ecx
movl %eax, %edx
movl -0x1c7c(%rbp), %eax
cmpl %edx, %ecx
sete %cl
andb $0x1, %cl
movzbl %cl, %ecx
andl %ecx, %eax
cmpl $0x0, %eax
setne %al
movb %al, -0x1c75(%rbp)
movb -0x1c75(%rbp), %al
testb $0x1, %al
jne 0x9e551b
jmp 0x9e5b3c
movq -0x1250(%rbp), %rdi
addq $0x4, %rdi
movl -0x1280(%rbp), %eax
movq %rdi, %rsi
subq %rax, %rsi
movq -0x1270(%rbp), %rdx
callq 0x9fbb90
addq $0x4, %rax
movq %rax, -0x13d0(%rbp)
movl -0x1280(%rbp), %eax
movl %eax, -0x13d4(%rbp)
movl -0x127c(%rbp), %eax
movl %eax, -0x1280(%rbp)
movl -0x13d4(%rbp), %eax
movl %eax, -0x127c(%rbp)
movq -0x1250(%rbp), %rdx
movl -0x1240(%rbp), %ecx
movl %edx, %eax
subl %ecx, %eax
movl %eax, -0x1c94(%rbp)
movq -0x1230(%rbp), %rax
movq %rax, -0x1c90(%rbp)
movl -0x1234(%rbp), %ecx
movl -0x120c(%rbp), %eax
movq %rdx, -0x11c0(%rbp)
movl %ecx, -0x11c4(%rbp)
movl %eax, -0x11c8(%rbp)
movl -0x11c8(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1c88(%rbp)
subl $0x4, %eax
ja 0x9e55df
movq -0x1c88(%rbp), %rax
leaq 0x1f0f6e(%rip), %rcx # 0xbd6544
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e55e1
movq -0x11c0(%rbp), %rdi
movl -0x11c4(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x11b8(%rbp)
jmp 0x9e5666
movq -0x11c0(%rbp), %rdi
movl -0x11c4(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x11b8(%rbp)
jmp 0x9e5666
movq -0x11c0(%rbp), %rdi
movl -0x11c4(%rbp), %esi
callq 0x9fb820
movq %rax, -0x11b8(%rbp)
jmp 0x9e5666
movq -0x11c0(%rbp), %rdi
movl -0x11c4(%rbp), %esi
callq 0x9fb850
movq %rax, -0x11b8(%rbp)
jmp 0x9e5666
movq -0x11c0(%rbp), %rdi
movl -0x11c4(%rbp), %esi
callq 0x9fb880
movq %rax, -0x11b8(%rbp)
movq -0x1c90(%rbp), %rax
movl -0x1c94(%rbp), %edx
movq -0x11b8(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0x1250(%rbp), %rcx
movl -0x1240(%rbp), %edx
movl %ecx, %eax
subl %edx, %eax
movl %eax, -0x1cac(%rbp)
movq -0x1220(%rbp), %rax
movq %rax, -0x1ca8(%rbp)
movl -0x1224(%rbp), %eax
movq %rcx, -0x11d8(%rbp)
movl %eax, -0x11dc(%rbp)
movl $0x8, -0x11e0(%rbp)
movl -0x11e0(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1ca0(%rbp)
subl $0x4, %eax
ja 0x9e56ed
movq -0x1ca0(%rbp), %rax
leaq 0x1f0e74(%rip), %rcx # 0xbd6558
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e56ef
movq -0x11d8(%rbp), %rdi
movl -0x11dc(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x11d0(%rbp)
jmp 0x9e5774
movq -0x11d8(%rbp), %rdi
movl -0x11dc(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x11d0(%rbp)
jmp 0x9e5774
movq -0x11d8(%rbp), %rdi
movl -0x11dc(%rbp), %esi
callq 0x9fb820
movq %rax, -0x11d0(%rbp)
jmp 0x9e5774
movq -0x11d8(%rbp), %rdi
movl -0x11dc(%rbp), %esi
callq 0x9fb850
movq %rax, -0x11d0(%rbp)
jmp 0x9e5774
movq -0x11d8(%rbp), %rdi
movl -0x11dc(%rbp), %esi
callq 0x9fb880
movq %rax, -0x11d0(%rbp)
movq -0x1ca8(%rbp), %rax
movl -0x1cac(%rbp), %edx
movq -0x11d0(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0x11f0(%rbp), %rsi
movq -0x1258(%rbp), %rdx
movq -0x1270(%rbp), %rcx
movq -0x13d0(%rbp), %rax
subq $0x3, %rax
movq %rsi, -0x688(%rbp)
movq $0x0, -0x690(%rbp)
movq %rdx, -0x698(%rbp)
movq %rcx, -0x6a0(%rbp)
movl $0x0, -0x6a4(%rbp)
movq %rax, -0x6b0(%rbp)
movq -0x6a0(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x6b8(%rbp)
movq -0x698(%rbp), %rax
addq -0x690(%rbp), %rax
movq %rax, -0x6c0(%rbp)
movq -0x6c0(%rbp), %rax
cmpq -0x6b8(%rbp), %rax
ja 0x9e59f8
movq -0x688(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x698(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x690(%rbp)
jbe 0x9e59f6
movq -0x688(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x698(%rbp), %rcx
addq $0x10, %rcx
movq -0x690(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x348(%rbp)
movq %rcx, -0x350(%rbp)
movq %rax, -0x358(%rbp)
movl $0x0, -0x35c(%rbp)
movq -0x348(%rbp), %rax
movq -0x350(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x368(%rbp)
movq -0x350(%rbp), %rax
movq %rax, -0x370(%rbp)
movq -0x348(%rbp), %rax
movq %rax, -0x378(%rbp)
movq -0x378(%rbp), %rax
addq -0x358(%rbp), %rax
movq %rax, -0x380(%rbp)
cmpl $0x1, -0x35c(%rbp)
jne 0x9e592a
cmpq $0x10, -0x368(%rbp)
jge 0x9e592a
jmp 0x9e58de
movq -0x378(%rbp), %rdi
movq -0x370(%rbp), %rsi
callq 0x9fbf50
movq -0x378(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x378(%rbp)
movq -0x370(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x370(%rbp)
movq -0x378(%rbp), %rax
cmpq -0x380(%rbp), %rax
jb 0x9e58de
jmp 0x9e59f4
movq -0x378(%rbp), %rdi
movq -0x370(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x358(%rbp), %rax
jl 0x9e5950
jmp 0x9e59f4
movq -0x378(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x378(%rbp)
movq -0x370(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x370(%rbp)
movq -0x378(%rbp), %rdi
movq -0x370(%rbp), %rsi
callq 0x9fbd80
movq -0x378(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x378(%rbp)
movq -0x370(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x370(%rbp)
movq -0x378(%rbp), %rdi
movq -0x370(%rbp), %rsi
callq 0x9fbd80
movq -0x378(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x378(%rbp)
movq -0x370(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x370(%rbp)
movq -0x378(%rbp), %rax
cmpq -0x380(%rbp), %rax
jb 0x9e5974
jmp 0x9e59f4
jmp 0x9e59f6
jmp 0x9e5a1d
movq -0x688(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x698(%rbp), %rsi
movq -0x6c0(%rbp), %rdx
movq -0x6b8(%rbp), %rcx
callq 0x9fbdb0
movq -0x690(%rbp), %rcx
movq -0x688(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x690(%rbp) # imm = 0xFFFF
jbe 0x9e5a76
movq -0x688(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x688(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x688(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x688(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x690(%rbp), %rax
movw %ax, %cx
movq -0x688(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x6a4(%rbp), %ecx
addl $0x1, %ecx
movq -0x688(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x6b0(%rbp) # imm = 0xFFFF
jbe 0x9e5ae8
movq -0x688(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x688(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x688(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x688(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x6b0(%rbp), %rax
movw %ax, %cx
movq -0x688(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x688(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
movq -0x13d0(%rbp), %rax
addq -0x1250(%rbp), %rax
movq %rax, -0x1250(%rbp)
movq -0x1250(%rbp), %rax
movq %rax, -0x1258(%rbp)
jmp 0x9e548a
jmp 0x9e5b3e
jmp 0x9e5b40
jmp 0x9e2c8a
cmpl $0x0, -0x127c(%rbp)
je 0x9e5b5c
movl -0x127c(%rbp), %eax
movl %eax, -0x1cb0(%rbp)
jmp 0x9e5b68
movl -0x1284(%rbp), %eax
movl %eax, -0x1cb0(%rbp)
movl -0x1cb0(%rbp), %ecx
movq -0x11f8(%rbp), %rax
movl %ecx, (%rax)
cmpl $0x0, -0x1280(%rbp)
je 0x9e5b8e
movl -0x1280(%rbp), %eax
movl %eax, -0x1cb4(%rbp)
jmp 0x9e5b9a
movl -0x1284(%rbp), %eax
movl %eax, -0x1cb4(%rbp)
movl -0x1cb4(%rbp), %ecx
movq -0x11f8(%rbp), %rax
movl %ecx, 0x4(%rax)
movq -0x1270(%rbp), %rax
movq -0x1258(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x1728(%rbp)
jmp 0x9e8fd0
movq -0x1730(%rbp), %rdi
movq -0x1738(%rbp), %rsi
movq -0x1740(%rbp), %rdx
movq -0x1748(%rbp), %rcx
movq -0x1750(%rbp), %rax
movq %rdi, -0x1530(%rbp)
movq %rsi, -0x1538(%rbp)
movq %rdx, -0x1540(%rbp)
movq %rcx, -0x1548(%rbp)
movq %rax, -0x1550(%rbp)
movl $0x7, -0x1554(%rbp)
movl $0x0, -0x1558(%rbp)
movq -0x1530(%rbp), %rax
addq $0xf0, %rax
movq %rax, -0x1560(%rbp)
movq -0x1530(%rbp), %rax
movq 0x60(%rax), %rax
movq %rax, -0x1568(%rbp)
movq -0x1560(%rbp), %rax
movl 0x8(%rax), %eax
movl %eax, -0x156c(%rbp)
movq -0x1530(%rbp), %rax
movq 0x70(%rax), %rax
movq %rax, -0x1578(%rbp)
movq -0x1560(%rbp), %rax
movl 0x4(%rax), %eax
movl %eax, -0x157c(%rbp)
movq -0x1530(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, -0x1588(%rbp)
movq -0x1548(%rbp), %rax
movq %rax, -0x1590(%rbp)
movq -0x1590(%rbp), %rax
movq %rax, -0x1598(%rbp)
movq -0x1590(%rbp), %rax
movq %rax, -0x15a0(%rbp)
movq -0x1590(%rbp), %rax
movq -0x1588(%rbp), %rcx
subq %rcx, %rax
addq -0x1550(%rbp), %rax
movl %eax, -0x15a4(%rbp)
movq -0x1530(%rbp), %rdi
movl -0x15a4(%rbp), %esi
movq -0x1560(%rbp), %rax
movl (%rax), %edx
callq 0x9fba50
movl %eax, -0x15a8(%rbp)
movq -0x1588(%rbp), %rax
movl -0x15a8(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x15b0(%rbp)
movq -0x1590(%rbp), %rax
addq -0x1550(%rbp), %rax
movq %rax, -0x15b8(%rbp)
movq -0x15b8(%rbp), %rax
addq $-0x8, %rax
movq %rax, -0x15c0(%rbp)
movq -0x1540(%rbp), %rax
movl (%rax), %eax
movl %eax, -0x15c4(%rbp)
movq -0x1540(%rbp), %rax
movl 0x4(%rax), %eax
movl %eax, -0x15c8(%rbp)
movl $0x0, -0x15cc(%rbp)
movq -0x1530(%rbp), %rax
movq 0xe8(%rax), %rax
movq %rax, -0x15d8(%rbp)
cmpl $0x2, -0x1558(%rbp)
jne 0x9e5d8f
movq -0x15d8(%rbp), %rax
addq $0xf0, %rax
movq %rax, -0x1cc0(%rbp)
jmp 0x9e5d9a
xorl %eax, %eax
movq %rax, -0x1cc0(%rbp)
jmp 0x9e5d9a
movq -0x1cc0(%rbp), %rax
movq %rax, -0x15e0(%rbp)
cmpl $0x2, -0x1558(%rbp)
jne 0x9e5dc5
movq -0x15d8(%rbp), %rax
movq 0x60(%rax), %rax
movq %rax, -0x1cc8(%rbp)
jmp 0x9e5dd0
xorl %eax, %eax
movq %rax, -0x1cc8(%rbp)
jmp 0x9e5dd0
movq -0x1cc8(%rbp), %rax
movq %rax, -0x15e8(%rbp)
cmpl $0x2, -0x1558(%rbp)
jne 0x9e5dfb
movq -0x15d8(%rbp), %rax
movq 0x70(%rax), %rax
movq %rax, -0x1cd0(%rbp)
jmp 0x9e5e06
xorl %eax, %eax
movq %rax, -0x1cd0(%rbp)
jmp 0x9e5e06
movq -0x1cd0(%rbp), %rax
movq %rax, -0x15f0(%rbp)
cmpl $0x2, -0x1558(%rbp)
jne 0x9e5e2f
movq -0x15d8(%rbp), %rax
movl 0x18(%rax), %eax
movl %eax, -0x1cd4(%rbp)
jmp 0x9e5e39
xorl %eax, %eax
movl %eax, -0x1cd4(%rbp)
jmp 0x9e5e39
movl -0x1cd4(%rbp), %eax
movl %eax, -0x15f4(%rbp)
cmpl $0x2, -0x1558(%rbp)
jne 0x9e5e62
movq -0x15d8(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, -0x1ce0(%rbp)
jmp 0x9e5e6d
xorl %eax, %eax
movq %rax, -0x1ce0(%rbp)
jmp 0x9e5e6d
movq -0x1ce0(%rbp), %rax
movq %rax, -0x1600(%rbp)
cmpl $0x2, -0x1558(%rbp)
jne 0x9e5e9d
movq -0x1600(%rbp), %rax
movl -0x15f4(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1ce8(%rbp)
jmp 0x9e5ea8
xorl %eax, %eax
movq %rax, -0x1ce8(%rbp)
jmp 0x9e5ea8
movq -0x1ce8(%rbp), %rax
movq %rax, -0x1608(%rbp)
cmpl $0x2, -0x1558(%rbp)
jne 0x9e5ed2
movq -0x15d8(%rbp), %rax
movq (%rax), %rax
movq %rax, -0x1cf0(%rbp)
jmp 0x9e5edd
xorl %eax, %eax
movq %rax, -0x1cf0(%rbp)
jmp 0x9e5edd
movq -0x1cf0(%rbp), %rax
movq %rax, -0x1610(%rbp)
cmpl $0x2, -0x1558(%rbp)
jne 0x9e5f15
movl -0x15a8(%rbp), %eax
movq -0x1610(%rbp), %rcx
movq -0x1600(%rbp), %rdx
subq %rdx, %rcx
subl %ecx, %eax
movl %eax, -0x1cf4(%rbp)
jmp 0x9e5f1f
xorl %eax, %eax
movl %eax, -0x1cf4(%rbp)
jmp 0x9e5f1f
movl -0x1cf4(%rbp), %eax
movl %eax, -0x1614(%rbp)
cmpl $0x2, -0x1558(%rbp)
jne 0x9e5f46
movq -0x15e0(%rbp), %rax
movl 0x8(%rax), %eax
movl %eax, -0x1cf8(%rbp)
jmp 0x9e5f52
movl -0x156c(%rbp), %eax
movl %eax, -0x1cf8(%rbp)
movl -0x1cf8(%rbp), %eax
movl %eax, -0x1618(%rbp)
cmpl $0x2, -0x1558(%rbp)
jne 0x9e5f79
movq -0x15e0(%rbp), %rax
movl 0x4(%rax), %eax
movl %eax, -0x1cfc(%rbp)
jmp 0x9e5f85
movl -0x157c(%rbp), %eax
movl %eax, -0x1cfc(%rbp)
movl -0x1cfc(%rbp), %eax
movl %eax, -0x161c(%rbp)
movq -0x1598(%rbp), %rax
movq -0x15b0(%rbp), %rcx
subq %rcx, %rax
movq -0x1610(%rbp), %rcx
movq -0x1608(%rbp), %rdx
subq %rdx, %rcx
addq %rcx, %rax
movl %eax, -0x1620(%rbp)
cmpl $0x2, -0x1558(%rbp)
jne 0x9e5fc7
jmp 0x9e5fc7
cmpl $0x0, -0x1620(%rbp)
sete %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x1598(%rbp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, -0x1598(%rbp)
cmpl $0x0, -0x1558(%rbp)
jne 0x9e608b
movq -0x1598(%rbp), %rax
movq -0x1588(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x1624(%rbp)
movq -0x1530(%rbp), %rdi
movl -0x1624(%rbp), %esi
movq -0x1560(%rbp), %rax
movl (%rax), %edx
callq 0x9fba50
movl %eax, -0x1628(%rbp)
movl -0x1624(%rbp), %eax
subl -0x1628(%rbp), %eax
movl %eax, -0x162c(%rbp)
movl -0x15c8(%rbp), %eax
cmpl -0x162c(%rbp), %eax
jbe 0x9e6065
movl -0x15c8(%rbp), %eax
movl %eax, -0x15cc(%rbp)
movl $0x0, -0x15c8(%rbp)
movl -0x15c4(%rbp), %eax
cmpl -0x162c(%rbp), %eax
jbe 0x9e6089
movl -0x15c4(%rbp), %eax
movl %eax, -0x15cc(%rbp)
movl $0x0, -0x15c4(%rbp)
jmp 0x9e608b
cmpl $0x2, -0x1558(%rbp)
jne 0x9e6096
jmp 0x9e6096
jmp 0x9e6098
movq -0x1598(%rbp), %rax
cmpq -0x15c0(%rbp), %rax
jae 0x9e8f53
movq -0x1598(%rbp), %rcx
movl -0x156c(%rbp), %eax
movq %rcx, -0x13e8(%rbp)
movl %eax, -0x13ec(%rbp)
movl $0x8, -0x13f0(%rbp)
movl -0x13f0(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1d08(%rbp)
subl $0x4, %eax
ja 0x9e60fe
movq -0x1d08(%rbp), %rax
leaq 0x1f026f(%rip), %rcx # 0xbd6364
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e6100
movq -0x13e8(%rbp), %rdi
movl -0x13ec(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x13e0(%rbp)
jmp 0x9e6185
movq -0x13e8(%rbp), %rdi
movl -0x13ec(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x13e0(%rbp)
jmp 0x9e6185
movq -0x13e8(%rbp), %rdi
movl -0x13ec(%rbp), %esi
callq 0x9fb820
movq %rax, -0x13e0(%rbp)
jmp 0x9e6185
movq -0x13e8(%rbp), %rdi
movl -0x13ec(%rbp), %esi
callq 0x9fb850
movq %rax, -0x13e0(%rbp)
jmp 0x9e6185
movq -0x13e8(%rbp), %rdi
movl -0x13ec(%rbp), %esi
callq 0x9fb880
movq %rax, -0x13e0(%rbp)
movq -0x13e0(%rbp), %rax
movq %rax, -0x1648(%rbp)
movq -0x1598(%rbp), %rdx
movl -0x157c(%rbp), %ecx
movl -0x1554(%rbp), %eax
movq %rdx, -0x1400(%rbp)
movl %ecx, -0x1404(%rbp)
movl %eax, -0x1408(%rbp)
movl -0x1408(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1d10(%rbp)
subl $0x4, %eax
ja 0x9e61e7
movq -0x1d10(%rbp), %rax
leaq 0x1f019a(%rip), %rcx # 0xbd6378
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e61e9
movq -0x1400(%rbp), %rdi
movl -0x1404(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x13f8(%rbp)
jmp 0x9e626e
movq -0x1400(%rbp), %rdi
movl -0x1404(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x13f8(%rbp)
jmp 0x9e626e
movq -0x1400(%rbp), %rdi
movl -0x1404(%rbp), %esi
callq 0x9fb820
movq %rax, -0x13f8(%rbp)
jmp 0x9e626e
movq -0x1400(%rbp), %rdi
movl -0x1404(%rbp), %esi
callq 0x9fb850
movq %rax, -0x13f8(%rbp)
jmp 0x9e626e
movq -0x1400(%rbp), %rdi
movl -0x1404(%rbp), %esi
callq 0x9fb880
movq %rax, -0x13f8(%rbp)
movq -0x13f8(%rbp), %rax
movq %rax, -0x1650(%rbp)
movq -0x1598(%rbp), %rcx
movl -0x1618(%rbp), %eax
movq %rcx, -0x1418(%rbp)
movl %eax, -0x141c(%rbp)
movl $0x8, -0x1420(%rbp)
movl -0x1420(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1d18(%rbp)
subl $0x4, %eax
ja 0x9e62ce
movq -0x1d18(%rbp), %rax
leaq 0x1f00c7(%rip), %rcx # 0xbd638c
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e62d0
movq -0x1418(%rbp), %rdi
movl -0x141c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x1410(%rbp)
jmp 0x9e6355
movq -0x1418(%rbp), %rdi
movl -0x141c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x1410(%rbp)
jmp 0x9e6355
movq -0x1418(%rbp), %rdi
movl -0x141c(%rbp), %esi
callq 0x9fb820
movq %rax, -0x1410(%rbp)
jmp 0x9e6355
movq -0x1418(%rbp), %rdi
movl -0x141c(%rbp), %esi
callq 0x9fb850
movq %rax, -0x1410(%rbp)
jmp 0x9e6355
movq -0x1418(%rbp), %rdi
movl -0x141c(%rbp), %esi
callq 0x9fb880
movq %rax, -0x1410(%rbp)
movq -0x1410(%rbp), %rax
movq %rax, -0x1658(%rbp)
movq -0x1598(%rbp), %rdx
movl -0x161c(%rbp), %ecx
movl -0x1554(%rbp), %eax
movq %rdx, -0x1430(%rbp)
movl %ecx, -0x1434(%rbp)
movl %eax, -0x1438(%rbp)
movl -0x1438(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1d20(%rbp)
subl $0x4, %eax
ja 0x9e63b7
movq -0x1d20(%rbp), %rax
leaq 0x1efff2(%rip), %rcx # 0xbd63a0
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e63b9
movq -0x1430(%rbp), %rdi
movl -0x1434(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x1428(%rbp)
jmp 0x9e643e
movq -0x1430(%rbp), %rdi
movl -0x1434(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x1428(%rbp)
jmp 0x9e643e
movq -0x1430(%rbp), %rdi
movl -0x1434(%rbp), %esi
callq 0x9fb820
movq %rax, -0x1428(%rbp)
jmp 0x9e643e
movq -0x1430(%rbp), %rdi
movl -0x1434(%rbp), %esi
callq 0x9fb850
movq %rax, -0x1428(%rbp)
jmp 0x9e643e
movq -0x1430(%rbp), %rdi
movl -0x1434(%rbp), %esi
callq 0x9fb880
movq %rax, -0x1428(%rbp)
movq -0x1428(%rbp), %rax
movq %rax, -0x1660(%rbp)
movq -0x1598(%rbp), %rax
movq -0x1588(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x1664(%rbp)
movq -0x1568(%rbp), %rax
movq -0x1648(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x1668(%rbp)
movq -0x1578(%rbp), %rax
movq -0x1650(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x166c(%rbp)
movq -0x1588(%rbp), %rax
movl -0x1668(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1678(%rbp)
movq -0x1588(%rbp), %rax
movl -0x166c(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1680(%rbp)
movl -0x1664(%rbp), %eax
addl $0x1, %eax
subl -0x15c4(%rbp), %eax
movl %eax, -0x1684(%rbp)
cmpl $0x2, -0x1558(%rbp)
jne 0x9e650c
movl -0x1684(%rbp), %eax
cmpl -0x15a8(%rbp), %eax
jae 0x9e650c
movq -0x1600(%rbp), %rax
movl -0x1684(%rbp), %ecx
subl -0x1614(%rbp), %ecx
movl %ecx, %ecx
addq %rcx, %rax
movq %rax, -0x1d28(%rbp)
jmp 0x9e6523
movq -0x1588(%rbp), %rax
movl -0x1684(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1d28(%rbp)
movq -0x1d28(%rbp), %rax
movq %rax, -0x1690(%rbp)
movl -0x1664(%rbp), %edx
movq -0x1578(%rbp), %rax
movq -0x1650(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0x1568(%rbp), %rax
movq -0x1648(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
cmpl $0x2, -0x1558(%rbp)
jne 0x9e69d6
movl -0x15a8(%rbp), %eax
subl $0x1, %eax
subl -0x1684(%rbp), %eax
cmpl $0x3, %eax
jb 0x9e69d6
movq -0x1690(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x1d2c(%rbp)
movq -0x1598(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x1d2c(%rbp), %eax
cmpl %ecx, %eax
jne 0x9e69d6
movl -0x1684(%rbp), %eax
cmpl -0x15a8(%rbp), %eax
jae 0x9e65ce
movq -0x1610(%rbp), %rax
movq %rax, -0x1d38(%rbp)
jmp 0x9e65dc
movq -0x15b8(%rbp), %rax
movq %rax, -0x1d38(%rbp)
movq -0x1d38(%rbp), %rax
movq %rax, -0x1698(%rbp)
movq -0x1598(%rbp), %rdi
addq $0x1, %rdi
addq $0x4, %rdi
movq -0x1690(%rbp), %rsi
addq $0x4, %rsi
movq -0x15b8(%rbp), %rdx
movq -0x1698(%rbp), %rcx
movq -0x15b0(%rbp), %r8
callq 0x9fbad0
addq $0x4, %rax
movq %rax, -0x1638(%rbp)
movq -0x1598(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x1598(%rbp)
movq -0x1538(%rbp), %rdi
movq -0x1598(%rbp), %rsi
movq -0x15a0(%rbp), %rax
subq %rax, %rsi
movq -0x15a0(%rbp), %rdx
movq -0x15b8(%rbp), %rcx
movq -0x1638(%rbp), %rax
subq $0x3, %rax
movq %rdi, -0x608(%rbp)
movq %rsi, -0x610(%rbp)
movq %rdx, -0x618(%rbp)
movq %rcx, -0x620(%rbp)
movl $0x0, -0x624(%rbp)
movq %rax, -0x630(%rbp)
movq -0x620(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x638(%rbp)
movq -0x618(%rbp), %rax
addq -0x610(%rbp), %rax
movq %rax, -0x640(%rbp)
movq -0x640(%rbp), %rax
cmpq -0x638(%rbp), %rax
ja 0x9e68b5
movq -0x608(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x618(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x610(%rbp)
jbe 0x9e68b3
movq -0x608(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x618(%rbp), %rcx
addq $0x10, %rcx
movq -0x610(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x3c8(%rbp)
movq %rcx, -0x3d0(%rbp)
movq %rax, -0x3d8(%rbp)
movl $0x0, -0x3dc(%rbp)
movq -0x3c8(%rbp), %rax
movq -0x3d0(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x3e8(%rbp)
movq -0x3d0(%rbp), %rax
movq %rax, -0x3f0(%rbp)
movq -0x3c8(%rbp), %rax
movq %rax, -0x3f8(%rbp)
movq -0x3f8(%rbp), %rax
addq -0x3d8(%rbp), %rax
movq %rax, -0x400(%rbp)
cmpl $0x1, -0x3dc(%rbp)
jne 0x9e67e7
cmpq $0x10, -0x3e8(%rbp)
jge 0x9e67e7
jmp 0x9e679b
movq -0x3f8(%rbp), %rdi
movq -0x3f0(%rbp), %rsi
callq 0x9fbf50
movq -0x3f8(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x3f8(%rbp)
movq -0x3f0(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x3f0(%rbp)
movq -0x3f8(%rbp), %rax
cmpq -0x400(%rbp), %rax
jb 0x9e679b
jmp 0x9e68b1
movq -0x3f8(%rbp), %rdi
movq -0x3f0(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x3d8(%rbp), %rax
jl 0x9e680d
jmp 0x9e68b1
movq -0x3f8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x3f8(%rbp)
movq -0x3f0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x3f0(%rbp)
movq -0x3f8(%rbp), %rdi
movq -0x3f0(%rbp), %rsi
callq 0x9fbd80
movq -0x3f8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x3f8(%rbp)
movq -0x3f0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x3f0(%rbp)
movq -0x3f8(%rbp), %rdi
movq -0x3f0(%rbp), %rsi
callq 0x9fbd80
movq -0x3f8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x3f8(%rbp)
movq -0x3f0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x3f0(%rbp)
movq -0x3f8(%rbp), %rax
cmpq -0x400(%rbp), %rax
jb 0x9e6831
jmp 0x9e68b1
jmp 0x9e68b3
jmp 0x9e68da
movq -0x608(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x618(%rbp), %rsi
movq -0x640(%rbp), %rdx
movq -0x638(%rbp), %rcx
callq 0x9fbdb0
movq -0x610(%rbp), %rcx
movq -0x608(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x610(%rbp) # imm = 0xFFFF
jbe 0x9e6933
movq -0x608(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x608(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x608(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x608(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x610(%rbp), %rax
movw %ax, %cx
movq -0x608(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x624(%rbp), %ecx
addl $0x1, %ecx
movq -0x608(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x630(%rbp) # imm = 0xFFFF
jbe 0x9e69a5
movq -0x608(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x608(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x608(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x608(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x630(%rbp), %rax
movw %ax, %cx
movq -0x608(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x608(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
jmp 0x9e7ca6
cmpl $0x0, -0x1558(%rbp)
jne 0x9e6e44
cmpl $0x0, -0x15c4(%rbp)
seta %al
andb $0x1, %al
movzbl %al, %eax
movl %eax, -0x1d3c(%rbp)
movq -0x1598(%rbp), %rdi
addq $0x1, %rdi
movl -0x15c4(%rbp), %eax
movl %eax, %ecx
xorl %eax, %eax
subq %rcx, %rax
addq %rax, %rdi
callq 0x9fb8d0
movl %eax, -0x1d40(%rbp)
movq -0x1598(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb8d0
movl -0x1d40(%rbp), %ecx
movl %eax, %edx
movl -0x1d3c(%rbp), %eax
cmpl %edx, %ecx
sete %cl
andb $0x1, %cl
movzbl %cl, %ecx
andl %ecx, %eax
cmpl $0x0, %eax
je 0x9e6e44
movq -0x1598(%rbp), %rdi
addq $0x1, %rdi
addq $0x4, %rdi
movq -0x1598(%rbp), %rsi
addq $0x1, %rsi
addq $0x4, %rsi
movl -0x15c4(%rbp), %eax
movl %eax, %ecx
xorl %eax, %eax
subq %rcx, %rax
addq %rax, %rsi
movq -0x15b8(%rbp), %rdx
callq 0x9fbb90
addq $0x4, %rax
movq %rax, -0x1638(%rbp)
movq -0x1598(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x1598(%rbp)
movq -0x1538(%rbp), %rdi
movq -0x1598(%rbp), %rsi
movq -0x15a0(%rbp), %rax
subq %rax, %rsi
movq -0x15a0(%rbp), %rdx
movq -0x15b8(%rbp), %rcx
movq -0x1638(%rbp), %rax
subq $0x3, %rax
movq %rdi, -0x5c8(%rbp)
movq %rsi, -0x5d0(%rbp)
movq %rdx, -0x5d8(%rbp)
movq %rcx, -0x5e0(%rbp)
movl $0x0, -0x5e4(%rbp)
movq %rax, -0x5f0(%rbp)
movq -0x5e0(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x5f8(%rbp)
movq -0x5d8(%rbp), %rax
addq -0x5d0(%rbp), %rax
movq %rax, -0x600(%rbp)
movq -0x600(%rbp), %rax
cmpq -0x5f8(%rbp), %rax
ja 0x9e6d23
movq -0x5c8(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x5d8(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x5d0(%rbp)
jbe 0x9e6d21
movq -0x5c8(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x5d8(%rbp), %rcx
addq $0x10, %rcx
movq -0x5d0(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x408(%rbp)
movq %rcx, -0x410(%rbp)
movq %rax, -0x418(%rbp)
movl $0x0, -0x41c(%rbp)
movq -0x408(%rbp), %rax
movq -0x410(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x428(%rbp)
movq -0x410(%rbp), %rax
movq %rax, -0x430(%rbp)
movq -0x408(%rbp), %rax
movq %rax, -0x438(%rbp)
movq -0x438(%rbp), %rax
addq -0x418(%rbp), %rax
movq %rax, -0x440(%rbp)
cmpl $0x1, -0x41c(%rbp)
jne 0x9e6c55
cmpq $0x10, -0x428(%rbp)
jge 0x9e6c55
jmp 0x9e6c09
movq -0x438(%rbp), %rdi
movq -0x430(%rbp), %rsi
callq 0x9fbf50
movq -0x438(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x438(%rbp)
movq -0x430(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x430(%rbp)
movq -0x438(%rbp), %rax
cmpq -0x440(%rbp), %rax
jb 0x9e6c09
jmp 0x9e6d1f
movq -0x438(%rbp), %rdi
movq -0x430(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x418(%rbp), %rax
jl 0x9e6c7b
jmp 0x9e6d1f
movq -0x438(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x438(%rbp)
movq -0x430(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x430(%rbp)
movq -0x438(%rbp), %rdi
movq -0x430(%rbp), %rsi
callq 0x9fbd80
movq -0x438(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x438(%rbp)
movq -0x430(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x430(%rbp)
movq -0x438(%rbp), %rdi
movq -0x430(%rbp), %rsi
callq 0x9fbd80
movq -0x438(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x438(%rbp)
movq -0x430(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x430(%rbp)
movq -0x438(%rbp), %rax
cmpq -0x440(%rbp), %rax
jb 0x9e6c9f
jmp 0x9e6d1f
jmp 0x9e6d21
jmp 0x9e6d48
movq -0x5c8(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x5d8(%rbp), %rsi
movq -0x600(%rbp), %rdx
movq -0x5f8(%rbp), %rcx
callq 0x9fbdb0
movq -0x5d0(%rbp), %rcx
movq -0x5c8(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x5d0(%rbp) # imm = 0xFFFF
jbe 0x9e6da1
movq -0x5c8(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x5c8(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x5c8(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x5c8(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x5d0(%rbp), %rax
movw %ax, %cx
movq -0x5c8(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x5e4(%rbp), %ecx
addl $0x1, %ecx
movq -0x5c8(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x5f0(%rbp) # imm = 0xFFFF
jbe 0x9e6e13
movq -0x5c8(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x5c8(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x5c8(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x5c8(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x5f0(%rbp), %rax
movw %ax, %cx
movq -0x5c8(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x5c8(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
jmp 0x9e7ca6
movl -0x1668(%rbp), %eax
cmpl -0x15a8(%rbp), %eax
jbe 0x9e6f79
movq -0x1678(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x1d48(%rbp)
movq -0x1598(%rbp), %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x1d48(%rbp), %rax
cmpq %rcx, %rax
jne 0x9e6f74
movq -0x1598(%rbp), %rdi
addq $0x8, %rdi
movq -0x1678(%rbp), %rsi
addq $0x8, %rsi
movq -0x15b8(%rbp), %rdx
callq 0x9fbb90
addq $0x8, %rax
movq %rax, -0x1638(%rbp)
movq -0x1598(%rbp), %rax
movq -0x1678(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x163c(%rbp)
movq -0x1598(%rbp), %rax
cmpq -0x15a0(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x1678(%rbp), %rax
cmpq -0x15b0(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1d49(%rbp)
je 0x9e6f28
movq -0x1598(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0x1678(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1d49(%rbp)
movb -0x1d49(%rbp), %al
testb $0x1, %al
jne 0x9e6f34
jmp 0x9e6f6f
movq -0x1598(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1598(%rbp)
movq -0x1678(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1678(%rbp)
movq -0x1638(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x1638(%rbp)
jmp 0x9e6ecc
jmp 0x9e78f3
jmp 0x9e70f9
cmpl $0x2, -0x1558(%rbp)
jne 0x9e70f7
movq -0x15e8(%rbp), %rax
movq -0x1658(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x169c(%rbp)
movq -0x1600(%rbp), %rax
movl -0x169c(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x16a8(%rbp)
movq -0x16a8(%rbp), %rax
cmpq -0x1608(%rbp), %rax
jbe 0x9e70f5
movq -0x16a8(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x1d58(%rbp)
movq -0x1598(%rbp), %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x1d58(%rbp), %rax
cmpq %rcx, %rax
jne 0x9e70f5
movq -0x1598(%rbp), %rdi
addq $0x8, %rdi
movq -0x16a8(%rbp), %rsi
addq $0x8, %rsi
movq -0x15b8(%rbp), %rdx
movq -0x1610(%rbp), %rcx
movq -0x15b0(%rbp), %r8
callq 0x9fbad0
addq $0x8, %rax
movq %rax, -0x1638(%rbp)
movl -0x1664(%rbp), %eax
subl -0x169c(%rbp), %eax
subl -0x1614(%rbp), %eax
movl %eax, -0x163c(%rbp)
movq -0x1598(%rbp), %rax
cmpq -0x15a0(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x16a8(%rbp), %rax
cmpq -0x1608(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1d59(%rbp)
je 0x9e70a9
movq -0x1598(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0x16a8(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1d59(%rbp)
movb -0x1d59(%rbp), %al
testb $0x1, %al
jne 0x9e70b5
jmp 0x9e70f0
movq -0x1598(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1598(%rbp)
movq -0x16a8(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x16a8(%rbp)
movq -0x1638(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x1638(%rbp)
jmp 0x9e704d
jmp 0x9e78f3
jmp 0x9e70f7
jmp 0x9e70f9
movl -0x166c(%rbp), %eax
cmpl -0x15a8(%rbp), %eax
jbe 0x9e713b
movq -0x1680(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x1d60(%rbp)
movq -0x1598(%rbp), %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x1d60(%rbp), %eax
cmpl %ecx, %eax
jne 0x9e7136
jmp 0x9e71f0
jmp 0x9e71c4
cmpl $0x2, -0x1558(%rbp)
jne 0x9e71c2
movq -0x15f0(%rbp), %rax
movq -0x1660(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x16ac(%rbp)
movq -0x1600(%rbp), %rax
movl -0x16ac(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1680(%rbp)
movl -0x16ac(%rbp), %eax
addl -0x1614(%rbp), %eax
movl %eax, -0x166c(%rbp)
movq -0x1680(%rbp), %rax
cmpq -0x1608(%rbp), %rax
jbe 0x9e71c0
movq -0x1680(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x1d64(%rbp)
movq -0x1598(%rbp), %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x1d64(%rbp), %eax
cmpl %ecx, %eax
jne 0x9e71c0
jmp 0x9e71f0
jmp 0x9e71c2
jmp 0x9e71c4
movq -0x1598(%rbp), %rax
movq -0x15a0(%rbp), %rcx
subq %rcx, %rax
sarq $0x8, %rax
addq $0x1, %rax
addq -0x1598(%rbp), %rax
movq %rax, -0x1598(%rbp)
jmp 0x9e6098
movq -0x1598(%rbp), %rcx
incq %rcx
movl -0x156c(%rbp), %eax
movq %rcx, -0x1448(%rbp)
movl %eax, -0x144c(%rbp)
movl $0x8, -0x1450(%rbp)
movl -0x1450(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1d70(%rbp)
subl $0x4, %eax
ja 0x9e7245
movq -0x1d70(%rbp), %rax
leaq 0x1ef178(%rip), %rcx # 0xbd63b4
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e7247
movq -0x1448(%rbp), %rdi
movl -0x144c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x1440(%rbp)
jmp 0x9e72cc
movq -0x1448(%rbp), %rdi
movl -0x144c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x1440(%rbp)
jmp 0x9e72cc
movq -0x1448(%rbp), %rdi
movl -0x144c(%rbp), %esi
callq 0x9fb820
movq %rax, -0x1440(%rbp)
jmp 0x9e72cc
movq -0x1448(%rbp), %rdi
movl -0x144c(%rbp), %esi
callq 0x9fb850
movq %rax, -0x1440(%rbp)
jmp 0x9e72cc
movq -0x1448(%rbp), %rdi
movl -0x144c(%rbp), %esi
callq 0x9fb880
movq %rax, -0x1440(%rbp)
movq -0x1440(%rbp), %rax
movq %rax, -0x16b8(%rbp)
movq -0x1598(%rbp), %rcx
incq %rcx
movl -0x1618(%rbp), %eax
movq %rcx, -0x1460(%rbp)
movl %eax, -0x1464(%rbp)
movl $0x8, -0x1468(%rbp)
movl -0x1468(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1d78(%rbp)
subl $0x4, %eax
ja 0x9e732f
movq -0x1d78(%rbp), %rax
leaq 0x1ef0a2(%rip), %rcx # 0xbd63c8
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e7331
movq -0x1460(%rbp), %rdi
movl -0x1464(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x1458(%rbp)
jmp 0x9e73b6
movq -0x1460(%rbp), %rdi
movl -0x1464(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x1458(%rbp)
jmp 0x9e73b6
movq -0x1460(%rbp), %rdi
movl -0x1464(%rbp), %esi
callq 0x9fb820
movq %rax, -0x1458(%rbp)
jmp 0x9e73b6
movq -0x1460(%rbp), %rdi
movl -0x1464(%rbp), %esi
callq 0x9fb850
movq %rax, -0x1458(%rbp)
jmp 0x9e73b6
movq -0x1460(%rbp), %rdi
movl -0x1464(%rbp), %esi
callq 0x9fb880
movq %rax, -0x1458(%rbp)
movq -0x1458(%rbp), %rax
movq %rax, -0x16c0(%rbp)
movq -0x1568(%rbp), %rax
movq -0x16b8(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x16c4(%rbp)
movq -0x1588(%rbp), %rax
movl -0x16c4(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x16d0(%rbp)
movl -0x1664(%rbp), %edx
addl $0x1, %edx
movq -0x1568(%rbp), %rax
movq -0x16b8(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movl -0x16c4(%rbp), %eax
cmpl -0x15a8(%rbp), %eax
jbe 0x9e7557
movq -0x16d0(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x1d80(%rbp)
movq -0x1598(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x1d80(%rbp), %rax
cmpq %rcx, %rax
jne 0x9e7552
movq -0x1598(%rbp), %rdi
addq $0x9, %rdi
movq -0x16d0(%rbp), %rsi
addq $0x8, %rsi
movq -0x15b8(%rbp), %rdx
callq 0x9fbb90
addq $0x8, %rax
movq %rax, -0x1638(%rbp)
movq -0x1598(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x1598(%rbp)
movq -0x1598(%rbp), %rax
movq -0x16d0(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x163c(%rbp)
movq -0x1598(%rbp), %rax
cmpq -0x15a0(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x16d0(%rbp), %rax
cmpq -0x15b0(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1d81(%rbp)
je 0x9e7506
movq -0x1598(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0x16d0(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1d81(%rbp)
movb -0x1d81(%rbp), %al
testb $0x1, %al
jne 0x9e7512
jmp 0x9e754d
movq -0x1598(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1598(%rbp)
movq -0x16d0(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x16d0(%rbp)
movq -0x1638(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x1638(%rbp)
jmp 0x9e74aa
jmp 0x9e78f3
jmp 0x9e76f4
cmpl $0x2, -0x1558(%rbp)
jne 0x9e76f2
movq -0x15e8(%rbp), %rax
movq -0x16c0(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x16d4(%rbp)
movq -0x1600(%rbp), %rax
movl -0x16d4(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x16e0(%rbp)
movq -0x16e0(%rbp), %rax
cmpq -0x1608(%rbp), %rax
jbe 0x9e76f0
movq -0x16e0(%rbp), %rdi
callq 0x9fb980
movq %rax, -0x1d90(%rbp)
movq -0x1598(%rbp), %rdi
addq $0x1, %rdi
callq 0x9fb980
movq %rax, %rcx
movq -0x1d90(%rbp), %rax
cmpq %rcx, %rax
jne 0x9e76f0
movq -0x1598(%rbp), %rdi
addq $0x1, %rdi
addq $0x8, %rdi
movq -0x16e0(%rbp), %rsi
addq $0x8, %rsi
movq -0x15b8(%rbp), %rdx
movq -0x1610(%rbp), %rcx
movq -0x15b0(%rbp), %r8
callq 0x9fbad0
addq $0x8, %rax
movq %rax, -0x1638(%rbp)
movq -0x1598(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x1598(%rbp)
movl -0x1664(%rbp), %eax
addl $0x1, %eax
subl -0x16d4(%rbp), %eax
subl -0x1614(%rbp), %eax
movl %eax, -0x163c(%rbp)
movq -0x1598(%rbp), %rax
cmpq -0x15a0(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x16e0(%rbp), %rax
cmpq -0x1608(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1d91(%rbp)
je 0x9e76a4
movq -0x1598(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0x16e0(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1d91(%rbp)
movb -0x1d91(%rbp), %al
testb $0x1, %al
jne 0x9e76b0
jmp 0x9e76eb
movq -0x1598(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1598(%rbp)
movq -0x16e0(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x16e0(%rbp)
movq -0x1638(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x1638(%rbp)
jmp 0x9e7648
jmp 0x9e78f3
jmp 0x9e76f2
jmp 0x9e76f4
cmpl $0x2, -0x1558(%rbp)
jne 0x9e7808
movl -0x166c(%rbp), %eax
cmpl -0x15a8(%rbp), %eax
jae 0x9e7808
movq -0x1598(%rbp), %rdi
addq $0x4, %rdi
movq -0x1680(%rbp), %rsi
addq $0x4, %rsi
movq -0x15b8(%rbp), %rdx
movq -0x1610(%rbp), %rcx
movq -0x15b0(%rbp), %r8
callq 0x9fbad0
addq $0x4, %rax
movq %rax, -0x1638(%rbp)
movl -0x1664(%rbp), %eax
subl -0x166c(%rbp), %eax
movl %eax, -0x163c(%rbp)
movq -0x1598(%rbp), %rax
cmpq -0x15a0(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x1680(%rbp), %rax
cmpq -0x1608(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1d92(%rbp)
je 0x9e77bc
movq -0x1598(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0x1680(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1d92(%rbp)
movb -0x1d92(%rbp), %al
testb $0x1, %al
jne 0x9e77c8
jmp 0x9e7803
movq -0x1598(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1598(%rbp)
movq -0x1680(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1680(%rbp)
movq -0x1638(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x1638(%rbp)
jmp 0x9e7760
jmp 0x9e78f1
movq -0x1598(%rbp), %rdi
addq $0x4, %rdi
movq -0x1680(%rbp), %rsi
addq $0x4, %rsi
movq -0x15b8(%rbp), %rdx
callq 0x9fbb90
addq $0x4, %rax
movq %rax, -0x1638(%rbp)
movq -0x1598(%rbp), %rax
movq -0x1680(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x163c(%rbp)
movq -0x1598(%rbp), %rax
cmpq -0x15a0(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x1680(%rbp), %rax
cmpq -0x15b0(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %eax
andl %eax, %ecx
xorl %eax, %eax
cmpl $0x0, %ecx
movb %al, -0x1d93(%rbp)
je 0x9e78a8
movq -0x1598(%rbp), %rax
movzbl -0x1(%rax), %eax
movq -0x1680(%rbp), %rcx
movzbl -0x1(%rcx), %ecx
cmpl %ecx, %eax
sete %al
movb %al, -0x1d93(%rbp)
movb -0x1d93(%rbp), %al
testb $0x1, %al
jne 0x9e78b4
jmp 0x9e78ef
movq -0x1598(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1598(%rbp)
movq -0x1680(%rbp), %rax
addq $-0x1, %rax
movq %rax, -0x1680(%rbp)
movq -0x1638(%rbp), %rax
addq $0x1, %rax
movq %rax, -0x1638(%rbp)
jmp 0x9e784c
jmp 0x9e78f1
jmp 0x9e78f3
movl -0x15c4(%rbp), %eax
movl %eax, -0x15c8(%rbp)
movl -0x163c(%rbp), %eax
movl %eax, -0x15c4(%rbp)
movq -0x1538(%rbp), %r8
movq -0x1598(%rbp), %rdi
movq -0x15a0(%rbp), %rax
subq %rax, %rdi
movq -0x15a0(%rbp), %rsi
movq -0x15b8(%rbp), %rdx
movl -0x163c(%rbp), %ecx
addl $0x2, %ecx
movq -0x1638(%rbp), %rax
subq $0x3, %rax
movq %r8, -0x508(%rbp)
movq %rdi, -0x510(%rbp)
movq %rsi, -0x518(%rbp)
movq %rdx, -0x520(%rbp)
movl %ecx, -0x524(%rbp)
movq %rax, -0x530(%rbp)
movq -0x520(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x538(%rbp)
movq -0x518(%rbp), %rax
addq -0x510(%rbp), %rax
movq %rax, -0x540(%rbp)
movq -0x540(%rbp), %rax
cmpq -0x538(%rbp), %rax
ja 0x9e7b8a
movq -0x508(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x518(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x510(%rbp)
jbe 0x9e7b88
movq -0x508(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x518(%rbp), %rcx
addq $0x10, %rcx
movq -0x510(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x4c8(%rbp)
movq %rcx, -0x4d0(%rbp)
movq %rax, -0x4d8(%rbp)
movl $0x0, -0x4dc(%rbp)
movq -0x4c8(%rbp), %rax
movq -0x4d0(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x4e8(%rbp)
movq -0x4d0(%rbp), %rax
movq %rax, -0x4f0(%rbp)
movq -0x4c8(%rbp), %rax
movq %rax, -0x4f8(%rbp)
movq -0x4f8(%rbp), %rax
addq -0x4d8(%rbp), %rax
movq %rax, -0x500(%rbp)
cmpl $0x1, -0x4dc(%rbp)
jne 0x9e7abc
cmpq $0x10, -0x4e8(%rbp)
jge 0x9e7abc
jmp 0x9e7a70
movq -0x4f8(%rbp), %rdi
movq -0x4f0(%rbp), %rsi
callq 0x9fbf50
movq -0x4f8(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x4f8(%rbp)
movq -0x4f0(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x4f0(%rbp)
movq -0x4f8(%rbp), %rax
cmpq -0x500(%rbp), %rax
jb 0x9e7a70
jmp 0x9e7b86
movq -0x4f8(%rbp), %rdi
movq -0x4f0(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x4d8(%rbp), %rax
jl 0x9e7ae2
jmp 0x9e7b86
movq -0x4f8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x4f8(%rbp)
movq -0x4f0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x4f0(%rbp)
movq -0x4f8(%rbp), %rdi
movq -0x4f0(%rbp), %rsi
callq 0x9fbd80
movq -0x4f8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x4f8(%rbp)
movq -0x4f0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x4f0(%rbp)
movq -0x4f8(%rbp), %rdi
movq -0x4f0(%rbp), %rsi
callq 0x9fbd80
movq -0x4f8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x4f8(%rbp)
movq -0x4f0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x4f0(%rbp)
movq -0x4f8(%rbp), %rax
cmpq -0x500(%rbp), %rax
jb 0x9e7b06
jmp 0x9e7b86
jmp 0x9e7b88
jmp 0x9e7baf
movq -0x508(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x518(%rbp), %rsi
movq -0x540(%rbp), %rdx
movq -0x538(%rbp), %rcx
callq 0x9fbdb0
movq -0x510(%rbp), %rcx
movq -0x508(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x510(%rbp) # imm = 0xFFFF
jbe 0x9e7c08
movq -0x508(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x508(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x508(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x508(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x510(%rbp), %rax
movw %ax, %cx
movq -0x508(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x524(%rbp), %ecx
addl $0x1, %ecx
movq -0x508(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x530(%rbp) # imm = 0xFFFF
jbe 0x9e7c7a
movq -0x508(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x508(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x508(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x508(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x530(%rbp), %rax
movw %ax, %cx
movq -0x508(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x508(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
movq -0x1638(%rbp), %rax
addq -0x1598(%rbp), %rax
movq %rax, -0x1598(%rbp)
movq -0x1598(%rbp), %rax
movq %rax, -0x15a0(%rbp)
movq -0x1598(%rbp), %rax
cmpq -0x15c0(%rbp), %rax
ja 0x9e8f4e
movl -0x1664(%rbp), %eax
addl $0x2, %eax
movl %eax, -0x16e4(%rbp)
movl -0x16e4(%rbp), %eax
movl %eax, %ecx
movl %ecx, -0x1dac(%rbp)
movq -0x1568(%rbp), %rcx
movq %rcx, -0x1da8(%rbp)
movq -0x1588(%rbp), %rcx
addq %rax, %rcx
movl -0x156c(%rbp), %eax
movq %rcx, -0x1478(%rbp)
movl %eax, -0x147c(%rbp)
movl $0x8, -0x1480(%rbp)
movl -0x1480(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1da0(%rbp)
subl $0x4, %eax
ja 0x9e7d5d
movq -0x1da0(%rbp), %rax
leaq 0x1ee688(%rip), %rcx # 0xbd63dc
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e7d5f
movq -0x1478(%rbp), %rdi
movl -0x147c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x1470(%rbp)
jmp 0x9e7de4
movq -0x1478(%rbp), %rdi
movl -0x147c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x1470(%rbp)
jmp 0x9e7de4
movq -0x1478(%rbp), %rdi
movl -0x147c(%rbp), %esi
callq 0x9fb820
movq %rax, -0x1470(%rbp)
jmp 0x9e7de4
movq -0x1478(%rbp), %rdi
movl -0x147c(%rbp), %esi
callq 0x9fb850
movq %rax, -0x1470(%rbp)
jmp 0x9e7de4
movq -0x1478(%rbp), %rdi
movl -0x147c(%rbp), %esi
callq 0x9fb880
movq %rax, -0x1470(%rbp)
movq -0x1da8(%rbp), %rax
movl -0x1dac(%rbp), %edx
movq -0x1470(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0x1598(%rbp), %rcx
addq $-0x2, %rcx
movl -0x1588(%rbp), %edx
movl %ecx, %eax
subl %edx, %eax
movl %eax, -0x1dc4(%rbp)
movq -0x1568(%rbp), %rax
movq %rax, -0x1dc0(%rbp)
movl -0x156c(%rbp), %eax
movq %rcx, -0x1490(%rbp)
movl %eax, -0x1494(%rbp)
movl $0x8, -0x1498(%rbp)
movl -0x1498(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1db8(%rbp)
subl $0x4, %eax
ja 0x9e7e6f
movq -0x1db8(%rbp), %rax
leaq 0x1ee58a(%rip), %rcx # 0xbd63f0
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e7e71
movq -0x1490(%rbp), %rdi
movl -0x1494(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x1488(%rbp)
jmp 0x9e7ef6
movq -0x1490(%rbp), %rdi
movl -0x1494(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x1488(%rbp)
jmp 0x9e7ef6
movq -0x1490(%rbp), %rdi
movl -0x1494(%rbp), %esi
callq 0x9fb820
movq %rax, -0x1488(%rbp)
jmp 0x9e7ef6
movq -0x1490(%rbp), %rdi
movl -0x1494(%rbp), %esi
callq 0x9fb850
movq %rax, -0x1488(%rbp)
jmp 0x9e7ef6
movq -0x1490(%rbp), %rdi
movl -0x1494(%rbp), %esi
callq 0x9fb880
movq %rax, -0x1488(%rbp)
movq -0x1dc0(%rbp), %rax
movl -0x1dc4(%rbp), %edx
movq -0x1488(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movl -0x16e4(%rbp), %eax
movl %eax, %ecx
movl %ecx, -0x1ddc(%rbp)
movq -0x1578(%rbp), %rcx
movq %rcx, -0x1dd8(%rbp)
movq -0x1588(%rbp), %rdx
addq %rax, %rdx
movl -0x157c(%rbp), %ecx
movl -0x1554(%rbp), %eax
movq %rdx, -0x14a8(%rbp)
movl %ecx, -0x14ac(%rbp)
movl %eax, -0x14b0(%rbp)
movl -0x14b0(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1dd0(%rbp)
subl $0x4, %eax
ja 0x9e7f80
movq -0x1dd0(%rbp), %rax
leaq 0x1ee48d(%rip), %rcx # 0xbd6404
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e7f82
movq -0x14a8(%rbp), %rdi
movl -0x14ac(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x14a0(%rbp)
jmp 0x9e8007
movq -0x14a8(%rbp), %rdi
movl -0x14ac(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x14a0(%rbp)
jmp 0x9e8007
movq -0x14a8(%rbp), %rdi
movl -0x14ac(%rbp), %esi
callq 0x9fb820
movq %rax, -0x14a0(%rbp)
jmp 0x9e8007
movq -0x14a8(%rbp), %rdi
movl -0x14ac(%rbp), %esi
callq 0x9fb850
movq %rax, -0x14a0(%rbp)
jmp 0x9e8007
movq -0x14a8(%rbp), %rdi
movl -0x14ac(%rbp), %esi
callq 0x9fb880
movq %rax, -0x14a0(%rbp)
movq -0x1dd8(%rbp), %rax
movl -0x1ddc(%rbp), %edx
movq -0x14a0(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0x1598(%rbp), %rdx
decq %rdx
movl -0x1588(%rbp), %ecx
movl %edx, %eax
subl %ecx, %eax
movl %eax, -0x1df4(%rbp)
movq -0x1578(%rbp), %rax
movq %rax, -0x1df0(%rbp)
movl -0x157c(%rbp), %ecx
movl -0x1554(%rbp), %eax
movq %rdx, -0x14c0(%rbp)
movl %ecx, -0x14c4(%rbp)
movl %eax, -0x14c8(%rbp)
movl -0x14c8(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1de8(%rbp)
subl $0x4, %eax
ja 0x9e8093
movq -0x1de8(%rbp), %rax
leaq 0x1ee38e(%rip), %rcx # 0xbd6418
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e8095
movq -0x14c0(%rbp), %rdi
movl -0x14c4(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x14b8(%rbp)
jmp 0x9e811a
movq -0x14c0(%rbp), %rdi
movl -0x14c4(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x14b8(%rbp)
jmp 0x9e811a
movq -0x14c0(%rbp), %rdi
movl -0x14c4(%rbp), %esi
callq 0x9fb820
movq %rax, -0x14b8(%rbp)
jmp 0x9e811a
movq -0x14c0(%rbp), %rdi
movl -0x14c4(%rbp), %esi
callq 0x9fb850
movq %rax, -0x14b8(%rbp)
jmp 0x9e811a
movq -0x14c0(%rbp), %rdi
movl -0x14c4(%rbp), %esi
callq 0x9fb880
movq %rax, -0x14b8(%rbp)
movq -0x1df0(%rbp), %rax
movl -0x1df4(%rbp), %edx
movq -0x14b8(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
cmpl $0x2, -0x1558(%rbp)
jne 0x9e8889
jmp 0x9e8140
movq -0x1598(%rbp), %rax
cmpq -0x15c0(%rbp), %rax
ja 0x9e8887
movq -0x1598(%rbp), %rax
movq -0x1588(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x16e8(%rbp)
movl -0x16e8(%rbp), %eax
subl -0x15c8(%rbp), %eax
movl %eax, -0x16ec(%rbp)
cmpl $0x2, -0x1558(%rbp)
jne 0x9e81bd
movl -0x16ec(%rbp), %eax
cmpl -0x15a8(%rbp), %eax
jae 0x9e81bd
movq -0x1600(%rbp), %rax
movl -0x16ec(%rbp), %ecx
addq %rcx, %rax
movl -0x1614(%rbp), %ecx
movl %ecx, %edx
xorl %ecx, %ecx
subq %rdx, %rcx
addq %rcx, %rax
movq %rax, -0x1e00(%rbp)
jmp 0x9e81d4
movq -0x1588(%rbp), %rax
movl -0x16ec(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x1e00(%rbp)
movq -0x1e00(%rbp), %rax
movq %rax, -0x16f8(%rbp)
movl -0x15a8(%rbp), %eax
subl $0x1, %eax
subl -0x16ec(%rbp), %eax
cmpl $0x3, %eax
jb 0x9e8885
movq -0x16f8(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x1e04(%rbp)
movq -0x1598(%rbp), %rdi
callq 0x9fb8d0
movl %eax, %ecx
movl -0x1e04(%rbp), %eax
cmpl %ecx, %eax
jne 0x9e8885
movl -0x16ec(%rbp), %eax
cmpl -0x15a8(%rbp), %eax
jae 0x9e8246
movq -0x1610(%rbp), %rax
movq %rax, -0x1e10(%rbp)
jmp 0x9e8254
movq -0x15b8(%rbp), %rax
movq %rax, -0x1e10(%rbp)
movq -0x1e10(%rbp), %rax
movq %rax, -0x1700(%rbp)
movq -0x1598(%rbp), %rdi
addq $0x4, %rdi
movq -0x16f8(%rbp), %rsi
addq $0x4, %rsi
movq -0x15b8(%rbp), %rdx
movq -0x1700(%rbp), %rcx
movq -0x15b0(%rbp), %r8
callq 0x9fbad0
addq $0x4, %rax
movq %rax, -0x1708(%rbp)
movl -0x15c8(%rbp), %eax
movl %eax, -0x170c(%rbp)
movl -0x15c4(%rbp), %eax
movl %eax, -0x15c8(%rbp)
movl -0x170c(%rbp), %eax
movl %eax, -0x15c4(%rbp)
movq -0x1538(%rbp), %rsi
movq -0x15a0(%rbp), %rdx
movq -0x15b8(%rbp), %rcx
movq -0x1708(%rbp), %rax
subq $0x3, %rax
movq %rsi, -0x588(%rbp)
movq $0x0, -0x590(%rbp)
movq %rdx, -0x598(%rbp)
movq %rcx, -0x5a0(%rbp)
movl $0x0, -0x5a4(%rbp)
movq %rax, -0x5b0(%rbp)
movq -0x5a0(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x5b8(%rbp)
movq -0x598(%rbp), %rax
addq -0x590(%rbp), %rax
movq %rax, -0x5c0(%rbp)
movq -0x5c0(%rbp), %rax
cmpq -0x5b8(%rbp), %rax
ja 0x9e852e
movq -0x588(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x598(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x590(%rbp)
jbe 0x9e852c
movq -0x588(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x598(%rbp), %rcx
addq $0x10, %rcx
movq -0x590(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x448(%rbp)
movq %rcx, -0x450(%rbp)
movq %rax, -0x458(%rbp)
movl $0x0, -0x45c(%rbp)
movq -0x448(%rbp), %rax
movq -0x450(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x468(%rbp)
movq -0x450(%rbp), %rax
movq %rax, -0x470(%rbp)
movq -0x448(%rbp), %rax
movq %rax, -0x478(%rbp)
movq -0x478(%rbp), %rax
addq -0x458(%rbp), %rax
movq %rax, -0x480(%rbp)
cmpl $0x1, -0x45c(%rbp)
jne 0x9e8460
cmpq $0x10, -0x468(%rbp)
jge 0x9e8460
jmp 0x9e8414
movq -0x478(%rbp), %rdi
movq -0x470(%rbp), %rsi
callq 0x9fbf50
movq -0x478(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x478(%rbp)
movq -0x470(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x470(%rbp)
movq -0x478(%rbp), %rax
cmpq -0x480(%rbp), %rax
jb 0x9e8414
jmp 0x9e852a
movq -0x478(%rbp), %rdi
movq -0x470(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x458(%rbp), %rax
jl 0x9e8486
jmp 0x9e852a
movq -0x478(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x478(%rbp)
movq -0x470(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x470(%rbp)
movq -0x478(%rbp), %rdi
movq -0x470(%rbp), %rsi
callq 0x9fbd80
movq -0x478(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x478(%rbp)
movq -0x470(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x470(%rbp)
movq -0x478(%rbp), %rdi
movq -0x470(%rbp), %rsi
callq 0x9fbd80
movq -0x478(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x478(%rbp)
movq -0x470(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x470(%rbp)
movq -0x478(%rbp), %rax
cmpq -0x480(%rbp), %rax
jb 0x9e84aa
jmp 0x9e852a
jmp 0x9e852c
jmp 0x9e8553
movq -0x588(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x598(%rbp), %rsi
movq -0x5c0(%rbp), %rdx
movq -0x5b8(%rbp), %rcx
callq 0x9fbdb0
movq -0x590(%rbp), %rcx
movq -0x588(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x590(%rbp) # imm = 0xFFFF
jbe 0x9e85ac
movq -0x588(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x588(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x588(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x588(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x590(%rbp), %rax
movw %ax, %cx
movq -0x588(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x5a4(%rbp), %ecx
addl $0x1, %ecx
movq -0x588(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x5b0(%rbp) # imm = 0xFFFF
jbe 0x9e861e
movq -0x588(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x588(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x588(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x588(%rbp), %rax
movl %ecx, 0x4c(%rax)
movw -0x5b0(%rbp), %cx
movq -0x588(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x588(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
movl -0x16e8(%rbp), %eax
movl %eax, -0x1e24(%rbp)
movq -0x1578(%rbp), %rax
movq %rax, -0x1e20(%rbp)
movq -0x1598(%rbp), %rdx
movl -0x157c(%rbp), %ecx
movl -0x1554(%rbp), %eax
movq %rdx, -0x14d8(%rbp)
movl %ecx, -0x14dc(%rbp)
movl %eax, -0x14e0(%rbp)
movl -0x14e0(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1e18(%rbp)
subl $0x4, %eax
ja 0x9e86b5
movq -0x1e18(%rbp), %rax
leaq 0x1edda8(%rip), %rcx # 0xbd6454
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e86b7
movq -0x14d8(%rbp), %rdi
movl -0x14dc(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x14d0(%rbp)
jmp 0x9e873c
movq -0x14d8(%rbp), %rdi
movl -0x14dc(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x14d0(%rbp)
jmp 0x9e873c
movq -0x14d8(%rbp), %rdi
movl -0x14dc(%rbp), %esi
callq 0x9fb820
movq %rax, -0x14d0(%rbp)
jmp 0x9e873c
movq -0x14d8(%rbp), %rdi
movl -0x14dc(%rbp), %esi
callq 0x9fb850
movq %rax, -0x14d0(%rbp)
jmp 0x9e873c
movq -0x14d8(%rbp), %rdi
movl -0x14dc(%rbp), %esi
callq 0x9fb880
movq %rax, -0x14d0(%rbp)
movq -0x1e20(%rbp), %rax
movl -0x1e24(%rbp), %edx
movq -0x14d0(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movl -0x16e8(%rbp), %eax
movl %eax, -0x1e3c(%rbp)
movq -0x1568(%rbp), %rax
movq %rax, -0x1e38(%rbp)
movq -0x1598(%rbp), %rcx
movl -0x156c(%rbp), %eax
movq %rcx, -0x14f0(%rbp)
movl %eax, -0x14f4(%rbp)
movl $0x8, -0x14f8(%rbp)
movl -0x14f8(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1e30(%rbp)
subl $0x4, %eax
ja 0x9e87bf
movq -0x1e30(%rbp), %rax
leaq 0x1edcb2(%rip), %rcx # 0xbd6468
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e87c1
movq -0x14f0(%rbp), %rdi
movl -0x14f4(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x14e8(%rbp)
jmp 0x9e8846
movq -0x14f0(%rbp), %rdi
movl -0x14f4(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x14e8(%rbp)
jmp 0x9e8846
movq -0x14f0(%rbp), %rdi
movl -0x14f4(%rbp), %esi
callq 0x9fb820
movq %rax, -0x14e8(%rbp)
jmp 0x9e8846
movq -0x14f0(%rbp), %rdi
movl -0x14f4(%rbp), %esi
callq 0x9fb850
movq %rax, -0x14e8(%rbp)
jmp 0x9e8846
movq -0x14f0(%rbp), %rdi
movl -0x14f4(%rbp), %esi
callq 0x9fb880
movq %rax, -0x14e8(%rbp)
movq -0x1e38(%rbp), %rax
movl -0x1e3c(%rbp), %edx
movq -0x14e8(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0x1708(%rbp), %rax
addq -0x1598(%rbp), %rax
movq %rax, -0x1598(%rbp)
movq -0x1598(%rbp), %rax
movq %rax, -0x15a0(%rbp)
jmp 0x9e8140
jmp 0x9e8887
jmp 0x9e8889
cmpl $0x0, -0x1558(%rbp)
jne 0x9e8f4c
jmp 0x9e8898
movq -0x1598(%rbp), %rcx
xorl %eax, %eax
cmpq -0x15c0(%rbp), %rcx
movb %al, -0x1e3d(%rbp)
ja 0x9e891a
cmpl $0x0, -0x15c8(%rbp)
seta %al
andb $0x1, %al
movzbl %al, %eax
movl %eax, -0x1e44(%rbp)
movq -0x1598(%rbp), %rdi
callq 0x9fb8d0
movl %eax, -0x1e48(%rbp)
movq -0x1598(%rbp), %rdi
movl -0x15c8(%rbp), %eax
movl %eax, %ecx
xorl %eax, %eax
subq %rcx, %rax
addq %rax, %rdi
callq 0x9fb8d0
movl -0x1e48(%rbp), %ecx
movl %eax, %edx
movl -0x1e44(%rbp), %eax
cmpl %edx, %ecx
sete %cl
andb $0x1, %cl
movzbl %cl, %ecx
andl %ecx, %eax
cmpl $0x0, %eax
setne %al
movb %al, -0x1e3d(%rbp)
movb -0x1e3d(%rbp), %al
testb $0x1, %al
jne 0x9e8929
jmp 0x9e8f4a
movq -0x1598(%rbp), %rdi
addq $0x4, %rdi
movl -0x15c8(%rbp), %eax
movq %rdi, %rsi
subq %rax, %rsi
movq -0x15b8(%rbp), %rdx
callq 0x9fbb90
addq $0x4, %rax
movq %rax, -0x1718(%rbp)
movl -0x15c8(%rbp), %eax
movl %eax, -0x171c(%rbp)
movl -0x15c4(%rbp), %eax
movl %eax, -0x15c8(%rbp)
movl -0x171c(%rbp), %eax
movl %eax, -0x15c4(%rbp)
movq -0x1598(%rbp), %rdx
movl -0x1588(%rbp), %ecx
movl %edx, %eax
subl %ecx, %eax
movl %eax, -0x1e5c(%rbp)
movq -0x1578(%rbp), %rax
movq %rax, -0x1e58(%rbp)
movl -0x157c(%rbp), %ecx
movl -0x1554(%rbp), %eax
movq %rdx, -0x1508(%rbp)
movl %ecx, -0x150c(%rbp)
movl %eax, -0x1510(%rbp)
movl -0x1510(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1e50(%rbp)
subl $0x4, %eax
ja 0x9e89ed
movq -0x1e50(%rbp), %rax
leaq 0x1eda48(%rip), %rcx # 0xbd642c
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e89ef
movq -0x1508(%rbp), %rdi
movl -0x150c(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x1500(%rbp)
jmp 0x9e8a74
movq -0x1508(%rbp), %rdi
movl -0x150c(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x1500(%rbp)
jmp 0x9e8a74
movq -0x1508(%rbp), %rdi
movl -0x150c(%rbp), %esi
callq 0x9fb820
movq %rax, -0x1500(%rbp)
jmp 0x9e8a74
movq -0x1508(%rbp), %rdi
movl -0x150c(%rbp), %esi
callq 0x9fb850
movq %rax, -0x1500(%rbp)
jmp 0x9e8a74
movq -0x1508(%rbp), %rdi
movl -0x150c(%rbp), %esi
callq 0x9fb880
movq %rax, -0x1500(%rbp)
movq -0x1e58(%rbp), %rax
movl -0x1e5c(%rbp), %edx
movq -0x1500(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0x1598(%rbp), %rcx
movl -0x1588(%rbp), %edx
movl %ecx, %eax
subl %edx, %eax
movl %eax, -0x1e74(%rbp)
movq -0x1568(%rbp), %rax
movq %rax, -0x1e70(%rbp)
movl -0x156c(%rbp), %eax
movq %rcx, -0x1520(%rbp)
movl %eax, -0x1524(%rbp)
movl $0x8, -0x1528(%rbp)
movl -0x1528(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0x1e68(%rbp)
subl $0x4, %eax
ja 0x9e8afb
movq -0x1e68(%rbp), %rax
leaq 0x1ed94e(%rip), %rcx # 0xbd6440
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9e8afd
movq -0x1520(%rbp), %rdi
movl -0x1524(%rbp), %esi
callq 0x9fb7c0
movq %rax, -0x1518(%rbp)
jmp 0x9e8b82
movq -0x1520(%rbp), %rdi
movl -0x1524(%rbp), %esi
callq 0x9fb7f0
movq %rax, -0x1518(%rbp)
jmp 0x9e8b82
movq -0x1520(%rbp), %rdi
movl -0x1524(%rbp), %esi
callq 0x9fb820
movq %rax, -0x1518(%rbp)
jmp 0x9e8b82
movq -0x1520(%rbp), %rdi
movl -0x1524(%rbp), %esi
callq 0x9fb850
movq %rax, -0x1518(%rbp)
jmp 0x9e8b82
movq -0x1520(%rbp), %rdi
movl -0x1524(%rbp), %esi
callq 0x9fb880
movq %rax, -0x1518(%rbp)
movq -0x1e70(%rbp), %rax
movl -0x1e74(%rbp), %edx
movq -0x1518(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movq -0x1538(%rbp), %rsi
movq -0x15a0(%rbp), %rdx
movq -0x15b8(%rbp), %rcx
movq -0x1718(%rbp), %rax
subq $0x3, %rax
movq %rsi, -0x548(%rbp)
movq $0x0, -0x550(%rbp)
movq %rdx, -0x558(%rbp)
movq %rcx, -0x560(%rbp)
movl $0x0, -0x564(%rbp)
movq %rax, -0x570(%rbp)
movq -0x560(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x578(%rbp)
movq -0x558(%rbp), %rax
addq -0x550(%rbp), %rax
movq %rax, -0x580(%rbp)
movq -0x580(%rbp), %rax
cmpq -0x578(%rbp), %rax
ja 0x9e8e06
movq -0x548(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x558(%rbp), %rsi
callq 0x9fbd80
cmpq $0x10, -0x550(%rbp)
jbe 0x9e8e04
movq -0x548(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x558(%rbp), %rcx
addq $0x10, %rcx
movq -0x550(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x488(%rbp)
movq %rcx, -0x490(%rbp)
movq %rax, -0x498(%rbp)
movl $0x0, -0x49c(%rbp)
movq -0x488(%rbp), %rax
movq -0x490(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x4a8(%rbp)
movq -0x490(%rbp), %rax
movq %rax, -0x4b0(%rbp)
movq -0x488(%rbp), %rax
movq %rax, -0x4b8(%rbp)
movq -0x4b8(%rbp), %rax
addq -0x498(%rbp), %rax
movq %rax, -0x4c0(%rbp)
cmpl $0x1, -0x49c(%rbp)
jne 0x9e8d38
cmpq $0x10, -0x4a8(%rbp)
jge 0x9e8d38
jmp 0x9e8cec
movq -0x4b8(%rbp), %rdi
movq -0x4b0(%rbp), %rsi
callq 0x9fbf50
movq -0x4b8(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x4b8(%rbp)
movq -0x4b0(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x4b0(%rbp)
movq -0x4b8(%rbp), %rax
cmpq -0x4c0(%rbp), %rax
jb 0x9e8cec
jmp 0x9e8e02
movq -0x4b8(%rbp), %rdi
movq -0x4b0(%rbp), %rsi
callq 0x9fbd80
movl $0x10, %eax
cmpq -0x498(%rbp), %rax
jl 0x9e8d5e
jmp 0x9e8e02
movq -0x4b8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x4b8(%rbp)
movq -0x4b0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x4b0(%rbp)
movq -0x4b8(%rbp), %rdi
movq -0x4b0(%rbp), %rsi
callq 0x9fbd80
movq -0x4b8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x4b8(%rbp)
movq -0x4b0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x4b0(%rbp)
movq -0x4b8(%rbp), %rdi
movq -0x4b0(%rbp), %rsi
callq 0x9fbd80
movq -0x4b8(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x4b8(%rbp)
movq -0x4b0(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x4b0(%rbp)
movq -0x4b8(%rbp), %rax
cmpq -0x4c0(%rbp), %rax
jb 0x9e8d82
jmp 0x9e8e02
jmp 0x9e8e04
jmp 0x9e8e2b
movq -0x548(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x558(%rbp), %rsi
movq -0x580(%rbp), %rdx
movq -0x578(%rbp), %rcx
callq 0x9fbdb0
movq -0x550(%rbp), %rcx
movq -0x548(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x550(%rbp) # imm = 0xFFFF
jbe 0x9e8e84
movq -0x548(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x548(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x548(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x548(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x550(%rbp), %rax
movw %ax, %cx
movq -0x548(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x564(%rbp), %ecx
addl $0x1, %ecx
movq -0x548(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x570(%rbp) # imm = 0xFFFF
jbe 0x9e8ef6
movq -0x548(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x548(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x548(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x548(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x570(%rbp), %rax
movw %ax, %cx
movq -0x548(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x548(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
movq -0x1718(%rbp), %rax
addq -0x1598(%rbp), %rax
movq %rax, -0x1598(%rbp)
movq -0x1598(%rbp), %rax
movq %rax, -0x15a0(%rbp)
jmp 0x9e8898
jmp 0x9e8f4c
jmp 0x9e8f4e
jmp 0x9e6098
cmpl $0x0, -0x15c4(%rbp)
je 0x9e8f6a
movl -0x15c4(%rbp), %eax
movl %eax, -0x1e78(%rbp)
jmp 0x9e8f76
movl -0x15cc(%rbp), %eax
movl %eax, -0x1e78(%rbp)
movl -0x1e78(%rbp), %ecx
movq -0x1540(%rbp), %rax
movl %ecx, (%rax)
cmpl $0x0, -0x15c8(%rbp)
je 0x9e8f9c
movl -0x15c8(%rbp), %eax
movl %eax, -0x1e7c(%rbp)
jmp 0x9e8fa8
movl -0x15cc(%rbp), %eax
movl %eax, -0x1e7c(%rbp)
movl -0x1e7c(%rbp), %ecx
movq -0x1540(%rbp), %rax
movl %ecx, 0x4(%rax)
movq -0x15b8(%rbp), %rax
movq -0x15a0(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x1728(%rbp)
movq -0x1728(%rbp), %rax
addq $0x1e80, %rsp # imm = 0x1E80
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_double_fast.c
|
ZSTD_fillHashTable
|
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
const void* const end,
ZSTD_dictTableLoadMethod_e dtlm)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hBits = cParams->hashLog;
U32 const mls = cParams->minMatch;
const BYTE* const base = ms->window.base;
const BYTE* ip = base + ms->nextToUpdate;
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
const U32 fastHashFillStep = 3;
/* Always insert every fastHashFillStep position into the hash table.
* Insert the other positions if their hash entry is empty.
*/
for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
U32 const curr = (U32)(ip - base);
size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
hashTable[hash0] = curr;
if (dtlm == ZSTD_dtlm_fast) continue;
/* Only load extra positions for ZSTD_dtlm_full */
{ U32 p;
for (p = 1; p < fastHashFillStep; ++p) {
size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
if (hashTable[hash] == 0) { /* not yet filled */
hashTable[hash] = curr + p;
} } } }
}
|
pushq %rbp
movq %rsp, %rbp
subq $0xb0, %rsp
movq %rdi, -0x38(%rbp)
movq %rsi, -0x40(%rbp)
movl %edx, -0x44(%rbp)
movq -0x38(%rbp), %rax
addq $0xf0, %rax
movq %rax, -0x50(%rbp)
movq -0x38(%rbp), %rax
movq 0x60(%rax), %rax
movq %rax, -0x58(%rbp)
movq -0x50(%rbp), %rax
movl 0x8(%rax), %eax
movl %eax, -0x5c(%rbp)
movq -0x50(%rbp), %rax
movl 0x10(%rax), %eax
movl %eax, -0x60(%rbp)
movq -0x38(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, -0x68(%rbp)
movq -0x68(%rbp), %rax
movq -0x38(%rbp), %rcx
movl 0x2c(%rcx), %ecx
addq %rcx, %rax
movq %rax, -0x70(%rbp)
movq -0x40(%rbp), %rax
addq $-0x8, %rax
movq %rax, -0x78(%rbp)
movl $0x3, -0x7c(%rbp)
movq -0x70(%rbp), %rax
addq $0x3, %rax
movq -0x78(%rbp), %rcx
addq $0x2, %rcx
cmpq %rcx, %rax
jae 0x9fc32e
movl -0x70(%rbp), %eax
movl -0x68(%rbp), %ecx
subl %ecx, %eax
movl %eax, -0x80(%rbp)
movq -0x70(%rbp), %rdx
movl -0x5c(%rbp), %ecx
movl -0x60(%rbp), %eax
movq %rdx, -0x10(%rbp)
movl %ecx, -0x14(%rbp)
movl %eax, -0x18(%rbp)
movl -0x18(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0xa0(%rbp)
subl $0x4, %eax
ja 0x9fc198
movq -0xa0(%rbp), %rax
leaq 0x1dac81(%rip), %rcx # 0xbd6e10
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9fc19a
movq -0x10(%rbp), %rdi
movl -0x14(%rbp), %esi
callq 0xa0b270
movq %rax, -0x8(%rbp)
jmp 0x9fc1f2
movq -0x10(%rbp), %rdi
movl -0x14(%rbp), %esi
callq 0xa0b2a0
movq %rax, -0x8(%rbp)
jmp 0x9fc1f2
movq -0x10(%rbp), %rdi
movl -0x14(%rbp), %esi
callq 0xa0b2d0
movq %rax, -0x8(%rbp)
jmp 0x9fc1f2
movq -0x10(%rbp), %rdi
movl -0x14(%rbp), %esi
callq 0xa0b300
movq %rax, -0x8(%rbp)
jmp 0x9fc1f2
movq -0x10(%rbp), %rdi
movl -0x14(%rbp), %esi
callq 0xa0b330
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
movq %rax, -0x88(%rbp)
movl -0x80(%rbp), %edx
movq -0x58(%rbp), %rax
movq -0x88(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
cmpl $0x0, -0x44(%rbp)
jne 0x9fc219
jmp 0x9fc31d
movl $0x1, -0x8c(%rbp)
cmpl $0x3, -0x8c(%rbp)
jae 0x9fc31b
movq -0x70(%rbp), %rdx
movl -0x8c(%rbp), %eax
addq %rax, %rdx
movl -0x5c(%rbp), %ecx
movl -0x60(%rbp), %eax
movq %rdx, -0x28(%rbp)
movl %ecx, -0x2c(%rbp)
movl %eax, -0x30(%rbp)
movl -0x30(%rbp), %eax
addl $-0x4, %eax
movl %eax, %ecx
movq %rcx, -0xa8(%rbp)
subl $0x4, %eax
ja 0x9fc278
movq -0xa8(%rbp), %rax
leaq 0x1dabb5(%rip), %rcx # 0xbd6e24
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
jmp 0x9fc27a
movq -0x28(%rbp), %rdi
movl -0x2c(%rbp), %esi
callq 0xa0b270
movq %rax, -0x20(%rbp)
jmp 0x9fc2d2
movq -0x28(%rbp), %rdi
movl -0x2c(%rbp), %esi
callq 0xa0b2a0
movq %rax, -0x20(%rbp)
jmp 0x9fc2d2
movq -0x28(%rbp), %rdi
movl -0x2c(%rbp), %esi
callq 0xa0b2d0
movq %rax, -0x20(%rbp)
jmp 0x9fc2d2
movq -0x28(%rbp), %rdi
movl -0x2c(%rbp), %esi
callq 0xa0b300
movq %rax, -0x20(%rbp)
jmp 0x9fc2d2
movq -0x28(%rbp), %rdi
movl -0x2c(%rbp), %esi
callq 0xa0b330
movq %rax, -0x20(%rbp)
movq -0x20(%rbp), %rax
movq %rax, -0x98(%rbp)
movq -0x58(%rbp), %rax
movq -0x98(%rbp), %rcx
cmpl $0x0, (%rax,%rcx,4)
jne 0x9fc305
movl -0x80(%rbp), %edx
addl -0x8c(%rbp), %edx
movq -0x58(%rbp), %rax
movq -0x98(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
jmp 0x9fc307
movl -0x8c(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x8c(%rbp)
jmp 0x9fc223
jmp 0x9fc31d
movq -0x70(%rbp), %rax
addq $0x3, %rax
movq %rax, -0x70(%rbp)
jmp 0x9fc135
addq $0xb0, %rsp
popq %rbp
retq
nopw (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_fast.c
|
ZSTD_BtFindBestMatch_dictMatchState_selectMLS
|
static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* const iLimit,
size_t* offsetPtr)
{
switch(ms->cParams.minMatch)
{
default : /* includes case 3 */
case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
case 7 :
case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
}
}
|
pushq %rbp
movq %rsp, %rbp
subq $0xc0, %rsp
movq %rdi, -0xa0(%rbp)
movq %rsi, -0xa8(%rbp)
movq %rdx, -0xb0(%rbp)
movq %rcx, -0xb8(%rbp)
movq -0xa0(%rbp), %rax
movl 0x100(%rax), %eax
movl %eax, -0xbc(%rbp)
subl $0x4, %eax
je 0xa4fb08
jmp 0xa4fae1
movl -0xbc(%rbp), %eax
subl $0x5, %eax
je 0xa4fbac
jmp 0xa4faf2
movl -0xbc(%rbp), %eax
addl $-0x6, %eax
subl $0x2, %eax
jb 0xa4fc50
jmp 0xa4fb06
jmp 0xa4fb08
movq -0xa0(%rbp), %rsi
movq -0xa8(%rbp), %rdx
movq -0xb0(%rbp), %rcx
movq -0xb8(%rbp), %rax
movq %rsi, -0x10(%rbp)
movq %rdx, -0x18(%rbp)
movq %rcx, -0x20(%rbp)
movq %rax, -0x28(%rbp)
movl $0x4, -0x2c(%rbp)
movl $0x2, -0x30(%rbp)
movq -0x18(%rbp), %rax
movq -0x10(%rbp), %rcx
movq 0x8(%rcx), %rcx
movq -0x10(%rbp), %rdx
movl 0x2c(%rdx), %edx
addq %rdx, %rcx
cmpq %rcx, %rax
jae 0xa4fb67
movq $0x0, -0x8(%rbp)
jmp 0xa4fb9c
movq -0x10(%rbp), %rdi
movq -0x18(%rbp), %rsi
movq -0x20(%rbp), %rdx
movl -0x2c(%rbp), %ecx
callq 0xa691e0
movq -0x10(%rbp), %rdi
movq -0x18(%rbp), %rsi
movq -0x20(%rbp), %rdx
movq -0x28(%rbp), %rcx
movl -0x2c(%rbp), %r8d
movl -0x30(%rbp), %r9d
callq 0xa693b0
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
movq %rax, -0x98(%rbp)
jmp 0xa4fd04
movq -0xa0(%rbp), %rsi
movq -0xa8(%rbp), %rdx
movq -0xb0(%rbp), %rcx
movq -0xb8(%rbp), %rax
movq %rsi, -0x40(%rbp)
movq %rdx, -0x48(%rbp)
movq %rcx, -0x50(%rbp)
movq %rax, -0x58(%rbp)
movl $0x5, -0x5c(%rbp)
movl $0x2, -0x60(%rbp)
movq -0x48(%rbp), %rax
movq -0x40(%rbp), %rcx
movq 0x8(%rcx), %rcx
movq -0x40(%rbp), %rdx
movl 0x2c(%rdx), %edx
addq %rdx, %rcx
cmpq %rcx, %rax
jae 0xa4fc0b
movq $0x0, -0x38(%rbp)
jmp 0xa4fc40
movq -0x40(%rbp), %rdi
movq -0x48(%rbp), %rsi
movq -0x50(%rbp), %rdx
movl -0x5c(%rbp), %ecx
callq 0xa691e0
movq -0x40(%rbp), %rdi
movq -0x48(%rbp), %rsi
movq -0x50(%rbp), %rdx
movq -0x58(%rbp), %rcx
movl -0x5c(%rbp), %r8d
movl -0x60(%rbp), %r9d
callq 0xa693b0
movq %rax, -0x38(%rbp)
movq -0x38(%rbp), %rax
movq %rax, -0x98(%rbp)
jmp 0xa4fd04
movq -0xa0(%rbp), %rsi
movq -0xa8(%rbp), %rdx
movq -0xb0(%rbp), %rcx
movq -0xb8(%rbp), %rax
movq %rsi, -0x70(%rbp)
movq %rdx, -0x78(%rbp)
movq %rcx, -0x80(%rbp)
movq %rax, -0x88(%rbp)
movl $0x6, -0x8c(%rbp)
movl $0x2, -0x90(%rbp)
movq -0x78(%rbp), %rax
movq -0x70(%rbp), %rcx
movq 0x8(%rcx), %rcx
movq -0x70(%rbp), %rdx
movl 0x2c(%rdx), %edx
addq %rdx, %rcx
cmpq %rcx, %rax
jae 0xa4fcb8
movq $0x0, -0x68(%rbp)
jmp 0xa4fcf9
movq -0x70(%rbp), %rdi
movq -0x78(%rbp), %rsi
movq -0x80(%rbp), %rdx
movl -0x8c(%rbp), %ecx
callq 0xa691e0
movq -0x70(%rbp), %rdi
movq -0x78(%rbp), %rsi
movq -0x80(%rbp), %rdx
movq -0x88(%rbp), %rcx
movl -0x8c(%rbp), %r8d
movl -0x90(%rbp), %r9d
callq 0xa693b0
movq %rax, -0x68(%rbp)
movq -0x68(%rbp), %rax
movq %rax, -0x98(%rbp)
movq -0x98(%rbp), %rax
addq $0xc0, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_lazy.c
|
ZSTD_ldm_skipSequences
|
void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
if (srcSize <= seq->litLength) {
/* Skip past srcSize literals */
seq->litLength -= (U32)srcSize;
return;
}
srcSize -= seq->litLength;
seq->litLength = 0;
if (srcSize < seq->matchLength) {
/* Skip past the first srcSize of the match */
seq->matchLength -= (U32)srcSize;
if (seq->matchLength < minMatch) {
/* The match is too short, omit it */
if (rawSeqStore->pos + 1 < rawSeqStore->size) {
seq[1].litLength += seq[0].matchLength;
}
rawSeqStore->pos++;
}
return;
}
srcSize -= seq->matchLength;
seq->matchLength = 0;
rawSeqStore->pos++;
}
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movl %edx, -0x14(%rbp)
xorl %eax, %eax
cmpq $0x0, -0x10(%rbp)
movb %al, -0x21(%rbp)
jbe 0xa7a611
movq -0x8(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x8(%rbp), %rcx
cmpq 0x18(%rcx), %rax
setb %al
movb %al, -0x21(%rbp)
movb -0x21(%rbp), %al
testb $0x1, %al
jne 0xa7a61d
jmp 0xa7a715
movq -0x8(%rbp), %rax
movq (%rax), %rax
movq -0x8(%rbp), %rcx
imulq $0xc, 0x8(%rcx), %rcx
addq %rcx, %rax
movq %rax, -0x20(%rbp)
movq -0x10(%rbp), %rax
movq -0x20(%rbp), %rcx
movl 0x4(%rcx), %ecx
cmpq %rcx, %rax
ja 0xa7a65b
movq -0x10(%rbp), %rax
movl %eax, %edx
movq -0x20(%rbp), %rax
movl 0x4(%rax), %ecx
subl %edx, %ecx
movl %ecx, 0x4(%rax)
jmp 0xa7a715
movq -0x20(%rbp), %rax
movl 0x4(%rax), %eax
movl %eax, %ecx
movq -0x10(%rbp), %rax
subq %rcx, %rax
movq %rax, -0x10(%rbp)
movq -0x20(%rbp), %rax
movl $0x0, 0x4(%rax)
movq -0x10(%rbp), %rax
movq -0x20(%rbp), %rcx
movl 0x8(%rcx), %ecx
cmpq %rcx, %rax
jae 0xa7a6e1
movq -0x10(%rbp), %rax
movl %eax, %edx
movq -0x20(%rbp), %rax
movl 0x8(%rax), %ecx
subl %edx, %ecx
movl %ecx, 0x8(%rax)
movq -0x20(%rbp), %rax
movl 0x8(%rax), %eax
cmpl -0x14(%rbp), %eax
jae 0xa7a6df
movq -0x8(%rbp), %rax
movq 0x8(%rax), %rax
addq $0x1, %rax
movq -0x8(%rbp), %rcx
cmpq 0x18(%rcx), %rax
jae 0xa7a6cf
movq -0x20(%rbp), %rax
movl 0x8(%rax), %ecx
movq -0x20(%rbp), %rax
addl 0x10(%rax), %ecx
movl %ecx, 0x10(%rax)
movq -0x8(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x1, %rcx
movq %rcx, 0x8(%rax)
jmp 0xa7a715
movq -0x20(%rbp), %rax
movl 0x8(%rax), %eax
movl %eax, %ecx
movq -0x10(%rbp), %rax
subq %rcx, %rax
movq %rax, -0x10(%rbp)
movq -0x20(%rbp), %rax
movl $0x0, 0x8(%rax)
movq -0x8(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x1, %rcx
movq %rcx, 0x8(%rax)
jmp 0xa7a5ef
popq %rbp
retq
nopw (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_ldm.c
|
ZSTD_ldm_blockCompress
|
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
ZSTD_useRowMatchFinderMode_e useRowMatchFinder,
void const* src, size_t srcSize)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
unsigned const minMatch = cParams->minMatch;
ZSTD_blockCompressor const blockCompressor =
ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
/* Input bounds */
BYTE const* const istart = (BYTE const*)src;
BYTE const* const iend = istart + srcSize;
/* Input positions */
BYTE const* ip = istart;
DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
/* If using opt parser, use LDMs only as candidates rather than always accepting them */
if (cParams->strategy >= ZSTD_btopt) {
size_t lastLLSize;
ms->ldmSeqStore = rawSeqStore;
lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
return lastLLSize;
}
assert(rawSeqStore->pos <= rawSeqStore->size);
assert(rawSeqStore->size <= rawSeqStore->capacity);
/* Loop through each sequence and apply the block compressor to the literals */
while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
/* maybeSplitSequence updates rawSeqStore->pos */
rawSeq const sequence = maybeSplitSequence(rawSeqStore,
(U32)(iend - ip), minMatch);
int i;
/* End signal */
if (sequence.offset == 0)
break;
assert(ip + sequence.litLength + sequence.matchLength <= iend);
/* Fill tables for block compressor */
ZSTD_ldm_limitTableUpdate(ms, ip);
ZSTD_ldm_fillFastTables(ms, ip);
/* Run the block compressor */
DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
{
size_t const newLitLength =
blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
ip += sequence.litLength;
/* Update the repcodes */
for (i = ZSTD_REP_NUM - 1; i > 0; i--)
rep[i] = rep[i-1];
rep[0] = sequence.offset;
/* Store the sequence */
ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
sequence.offset + ZSTD_REP_MOVE,
sequence.matchLength - MINMATCH);
ip += sequence.matchLength;
}
}
/* Fill the tables for the block compressor */
ZSTD_ldm_limitTableUpdate(ms, ip);
ZSTD_ldm_fillFastTables(ms, ip);
/* Compress the last literals */
return blockCompressor(ms, seqStore, rep, ip, iend - ip);
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x130, %rsp # imm = 0x130
movq 0x10(%rbp), %rax
movq %rdi, -0x90(%rbp)
movq %rsi, -0x98(%rbp)
movq %rdx, -0xa0(%rbp)
movq %rcx, -0xa8(%rbp)
movl %r8d, -0xac(%rbp)
movq %r9, -0xb8(%rbp)
movq -0x98(%rbp), %rax
addq $0xf0, %rax
movq %rax, -0xc0(%rbp)
movq -0xc0(%rbp), %rax
movl 0x10(%rax), %eax
movl %eax, -0xc4(%rbp)
movq -0xc0(%rbp), %rax
movl 0x18(%rax), %eax
movl %eax, -0x128(%rbp)
movl -0xac(%rbp), %eax
movl %eax, -0x124(%rbp)
movq -0x98(%rbp), %rdi
callq 0xa7add0
movl -0x128(%rbp), %edi
movl -0x124(%rbp), %esi
movl %eax, %edx
callq 0x9cb4a0
movq %rax, -0xd0(%rbp)
movq -0xb8(%rbp), %rax
movq %rax, -0xd8(%rbp)
movq -0xd8(%rbp), %rax
addq 0x10(%rbp), %rax
movq %rax, -0xe0(%rbp)
movq -0xd8(%rbp), %rax
movq %rax, -0xe8(%rbp)
movq -0xc0(%rbp), %rax
cmpl $0x7, 0x18(%rax)
jb 0xa7a932
movq -0x90(%rbp), %rcx
movq -0x98(%rbp), %rax
movq %rcx, 0x110(%rax)
movq -0xd0(%rbp), %rax
movq -0x98(%rbp), %rdi
movq -0xa0(%rbp), %rsi
movq -0xa8(%rbp), %rdx
movq -0xb8(%rbp), %rcx
movq 0x10(%rbp), %r8
callq *%rax
movq %rax, -0xf0(%rbp)
movq -0x90(%rbp), %rdi
movq 0x10(%rbp), %rsi
callq 0xa7a720
movq -0xf0(%rbp), %rax
movq %rax, -0x88(%rbp)
jmp 0xa7adb1
jmp 0xa7a934
movq -0x90(%rbp), %rax
movq 0x8(%rax), %rcx
movq -0x90(%rbp), %rdx
xorl %eax, %eax
cmpq 0x18(%rdx), %rcx
movb %al, -0x129(%rbp)
jae 0xa7a96b
movq -0xe8(%rbp), %rax
cmpq -0xe0(%rbp), %rax
setb %al
movb %al, -0x129(%rbp)
movb -0x129(%rbp), %al
testb $0x1, %al
jne 0xa7a97a
jmp 0xa7ad4e
movq -0x90(%rbp), %rdi
movl -0xe0(%rbp), %esi
movl -0xe8(%rbp), %eax
subl %eax, %esi
movl -0xc4(%rbp), %edx
callq 0xa7ae60
movl %edx, -0x108(%rbp)
movq %rax, -0x110(%rbp)
movq -0x110(%rbp), %rax
movq %rax, -0xfc(%rbp)
movl -0x108(%rbp), %eax
movl %eax, -0xf4(%rbp)
cmpl $0x0, -0xfc(%rbp)
jne 0xa7a9cf
jmp 0xa7ad4e
movq -0x98(%rbp), %rdi
movq -0xe8(%rbp), %rsi
callq 0xa7af20
movq -0x98(%rbp), %rdi
movq -0xe8(%rbp), %rsi
callq 0xa7afa0
movq -0xd0(%rbp), %rax
movq -0x98(%rbp), %rdi
movq -0xa0(%rbp), %rsi
movq -0xa8(%rbp), %rdx
movq -0xe8(%rbp), %rcx
movl -0xf8(%rbp), %r8d
callq *%rax
movq %rax, -0x120(%rbp)
movl -0xf8(%rbp), %ecx
movq -0xe8(%rbp), %rax
movl %ecx, %ecx
addq %rcx, %rax
movq %rax, -0xe8(%rbp)
movl $0x2, -0x114(%rbp)
cmpl $0x0, -0x114(%rbp)
jle 0xa7aa8c
movq -0xa8(%rbp), %rax
movl -0x114(%rbp), %ecx
subl $0x1, %ecx
movslq %ecx, %rcx
movl (%rax,%rcx,4), %edx
movq -0xa8(%rbp), %rax
movslq -0x114(%rbp), %rcx
movl %edx, (%rax,%rcx,4)
movl -0x114(%rbp), %eax
addl $-0x1, %eax
movl %eax, -0x114(%rbp)
jmp 0xa7aa4b
movl -0xfc(%rbp), %ecx
movq -0xa8(%rbp), %rax
movl %ecx, (%rax)
movq -0xa0(%rbp), %r8
movq -0x120(%rbp), %rdi
movq -0xe8(%rbp), %rsi
xorl %eax, %eax
subq -0x120(%rbp), %rax
addq %rax, %rsi
movq -0xe0(%rbp), %rdx
movl -0xfc(%rbp), %ecx
addl $0x2, %ecx
movl -0xf4(%rbp), %eax
subl $0x3, %eax
movl %eax, %eax
movq %r8, -0x48(%rbp)
movq %rdi, -0x50(%rbp)
movq %rsi, -0x58(%rbp)
movq %rdx, -0x60(%rbp)
movl %ecx, -0x64(%rbp)
movq %rax, -0x70(%rbp)
movq -0x60(%rbp), %rax
addq $-0x20, %rax
movq %rax, -0x78(%rbp)
movq -0x58(%rbp), %rax
addq -0x50(%rbp), %rax
movq %rax, -0x80(%rbp)
movq -0x80(%rbp), %rax
cmpq -0x78(%rbp), %rax
ja 0xa7ac59
movq -0x48(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x58(%rbp), %rsi
callq 0xa7b6c0
cmpq $0x10, -0x50(%rbp)
jbe 0xa7ac57
movq -0x48(%rbp), %rax
movq 0x18(%rax), %rdx
addq $0x10, %rdx
movq -0x58(%rbp), %rcx
addq $0x10, %rcx
movq -0x50(%rbp), %rax
subq $0x10, %rax
movq %rdx, -0x8(%rbp)
movq %rcx, -0x10(%rbp)
movq %rax, -0x18(%rbp)
movl $0x0, -0x1c(%rbp)
movq -0x8(%rbp), %rax
movq -0x10(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x28(%rbp)
movq -0x10(%rbp), %rax
movq %rax, -0x30(%rbp)
movq -0x8(%rbp), %rax
movq %rax, -0x38(%rbp)
movq -0x38(%rbp), %rax
addq -0x18(%rbp), %rax
movq %rax, -0x40(%rbp)
cmpl $0x1, -0x1c(%rbp)
jne 0xa7abcd
cmpq $0x10, -0x28(%rbp)
jge 0xa7abcd
jmp 0xa7ab99
movq -0x38(%rbp), %rdi
movq -0x30(%rbp), %rsi
callq 0xa7b890
movq -0x38(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x38(%rbp)
movq -0x30(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x30(%rbp)
movq -0x38(%rbp), %rax
cmpq -0x40(%rbp), %rax
jb 0xa7ab99
jmp 0xa7ac55
movq -0x38(%rbp), %rdi
movq -0x30(%rbp), %rsi
callq 0xa7b6c0
movl $0x10, %eax
cmpq -0x18(%rbp), %rax
jl 0xa7abe7
jmp 0xa7ac55
movq -0x38(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x38(%rbp)
movq -0x30(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x30(%rbp)
movq -0x38(%rbp), %rdi
movq -0x30(%rbp), %rsi
callq 0xa7b6c0
movq -0x38(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x38(%rbp)
movq -0x30(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x30(%rbp)
movq -0x38(%rbp), %rdi
movq -0x30(%rbp), %rsi
callq 0xa7b6c0
movq -0x38(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x38(%rbp)
movq -0x30(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x30(%rbp)
movq -0x38(%rbp), %rax
cmpq -0x40(%rbp), %rax
jb 0xa7abff
jmp 0xa7ac55
jmp 0xa7ac57
jmp 0xa7ac72
movq -0x48(%rbp), %rax
movq 0x18(%rax), %rdi
movq -0x58(%rbp), %rsi
movq -0x80(%rbp), %rdx
movq -0x78(%rbp), %rcx
callq 0xa7b6f0
movq -0x50(%rbp), %rcx
movq -0x48(%rbp), %rax
addq 0x18(%rax), %rcx
movq %rcx, 0x18(%rax)
cmpq $0xffff, -0x50(%rbp) # imm = 0xFFFF
jbe 0xa7acb6
movq -0x48(%rbp), %rax
movl $0x1, 0x48(%rax)
movq -0x48(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x48(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x48(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x50(%rbp), %rax
movw %ax, %cx
movq -0x48(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x4(%rax)
movl -0x64(%rbp), %ecx
addl $0x1, %ecx
movq -0x48(%rbp), %rax
movq 0x8(%rax), %rax
movl %ecx, (%rax)
cmpq $0xffff, -0x70(%rbp) # imm = 0xFFFF
jbe 0xa7ad0d
movq -0x48(%rbp), %rax
movl $0x2, 0x48(%rax)
movq -0x48(%rbp), %rax
movq 0x8(%rax), %rax
movq -0x48(%rbp), %rcx
movq (%rcx), %rcx
subq %rcx, %rax
sarq $0x3, %rax
movl %eax, %ecx
movq -0x48(%rbp), %rax
movl %ecx, 0x4c(%rax)
movq -0x70(%rbp), %rax
movw %ax, %cx
movq -0x48(%rbp), %rax
movq 0x8(%rax), %rax
movw %cx, 0x6(%rax)
movq -0x48(%rbp), %rax
movq 0x8(%rax), %rcx
addq $0x8, %rcx
movq %rcx, 0x8(%rax)
movl -0xf4(%rbp), %ecx
movq -0xe8(%rbp), %rax
movl %ecx, %ecx
addq %rcx, %rax
movq %rax, -0xe8(%rbp)
jmp 0xa7a934
movq -0x98(%rbp), %rdi
movq -0xe8(%rbp), %rsi
callq 0xa7af20
movq -0x98(%rbp), %rdi
movq -0xe8(%rbp), %rsi
callq 0xa7afa0
movq -0xd0(%rbp), %rax
movq -0x98(%rbp), %rdi
movq -0xa0(%rbp), %rsi
movq -0xa8(%rbp), %rdx
movq -0xe8(%rbp), %rcx
movq -0xe0(%rbp), %r8
movq -0xe8(%rbp), %r9
subq %r9, %r8
callq *%rax
movq %rax, -0x88(%rbp)
movq -0x88(%rbp), %rax
addq $0x130, %rsp # imm = 0x130
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_ldm.c
|
ZSTD_ldm_fillFastTables
|
static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
void const* end)
{
const BYTE* const iend = (const BYTE*)end;
switch(ms->cParams.strategy)
{
case ZSTD_fast:
ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
break;
case ZSTD_dfast:
ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
break;
case ZSTD_greedy:
case ZSTD_lazy:
case ZSTD_lazy2:
case ZSTD_btlazy2:
case ZSTD_btopt:
case ZSTD_btultra:
case ZSTD_btultra2:
break;
default:
assert(0); /* not possible : not a valid strategy id */
}
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq -0x10(%rbp), %rax
movq %rax, -0x18(%rbp)
movq -0x8(%rbp), %rax
movl 0x108(%rax), %eax
movl %eax, -0x1c(%rbp)
subl $0x1, %eax
je 0xa7afe3
jmp 0xa7afcc
movl -0x1c(%rbp), %eax
subl $0x2, %eax
je 0xa7aff4
jmp 0xa7afd6
movl -0x1c(%rbp), %eax
addl $-0x3, %eax
subl $0x7, %eax
jb 0xa7b005
jmp 0xa7b007
movq -0x8(%rbp), %rdi
movq -0x18(%rbp), %rsi
xorl %edx, %edx
callq 0x9fc0c0
jmp 0xa7b009
movq -0x8(%rbp), %rdi
movq -0x18(%rbp), %rsi
xorl %edx, %edx
callq 0x9dbd80
jmp 0xa7b009
jmp 0xa7b009
jmp 0xa7b009
xorl %eax, %eax
addq $0x20, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_ldm.c
|
ZSTD_NbCommonBytes
|
static unsigned ZSTD_NbCommonBytes (size_t val)
{
if (MEM_isLittleEndian()) {
if (MEM_64bits()) {
# if defined(_MSC_VER) && defined(_WIN64)
# if STATIC_BMI2
return _tzcnt_u64(val) >> 3;
# else
unsigned long r = 0;
return _BitScanForward64( &r, (U64)val ) ? (unsigned)(r >> 3) : 0;
# endif
# elif defined(__GNUC__) && (__GNUC__ >= 4)
return (__builtin_ctzll((U64)val) >> 3);
# else
static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
0, 3, 1, 3, 1, 4, 2, 7,
0, 2, 3, 6, 1, 5, 3, 5,
1, 3, 4, 4, 2, 5, 6, 7,
7, 0, 1, 2, 3, 3, 4, 6,
2, 6, 5, 5, 3, 4, 5, 6,
7, 1, 2, 4, 6, 4, 4, 5,
7, 2, 6, 5, 7, 6, 7, 7 };
return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
# endif
} else { /* 32 bits */
# if defined(_MSC_VER)
unsigned long r=0;
return _BitScanForward( &r, (U32)val ) ? (unsigned)(r >> 3) : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3)
return (__builtin_ctz((U32)val) >> 3);
# else
static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
3, 2, 2, 1, 3, 2, 0, 1,
3, 3, 1, 2, 2, 2, 2, 0,
3, 1, 2, 0, 1, 0, 1, 1 };
return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
# endif
}
} else { /* Big Endian CPU */
if (MEM_64bits()) {
# if defined(_MSC_VER) && defined(_WIN64)
# if STATIC_BMI2
return _lzcnt_u64(val) >> 3;
# else
unsigned long r = 0;
return _BitScanReverse64(&r, (U64)val) ? (unsigned)(r >> 3) : 0;
# endif
# elif defined(__GNUC__) && (__GNUC__ >= 4)
return (__builtin_clzll(val) >> 3);
# else
unsigned r;
const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
r += (!val);
return r;
# endif
} else { /* 32 bits */
# if defined(_MSC_VER)
unsigned long r = 0;
return _BitScanReverse( &r, (unsigned long)val ) ? (unsigned)(r >> 3) : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3)
return (__builtin_clz((U32)val) >> 3);
# else
unsigned r;
if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
r += (!val);
return r;
# endif
} }
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x10, %rsp
movq %rdi, -0x10(%rbp)
callq 0xa7b6a0
cmpl $0x0, %eax
je 0xa7b621
callq 0xa7b660
cmpl $0x0, %eax
je 0xa7b611
movq -0x10(%rbp), %rax
tzcntq %rax, %rax
sarl $0x3, %eax
movl %eax, -0x4(%rbp)
jmp 0xa7b64f
movq -0x10(%rbp), %rax
tzcntl %eax, %eax
sarl $0x3, %eax
movl %eax, -0x4(%rbp)
jmp 0xa7b64f
callq 0xa7b660
cmpl $0x0, %eax
je 0xa7b63f
movq -0x10(%rbp), %rax
bsrq %rax, %rax
xorq $0x3f, %rax
sarl $0x3, %eax
movl %eax, -0x4(%rbp)
jmp 0xa7b64f
movq -0x10(%rbp), %rax
bsrl %eax, %eax
xorl $0x1f, %eax
sarl $0x3, %eax
movl %eax, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x10, %rsp
popq %rbp
retq
nopl (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress_internal.h
|
ZSTD_updateTree
|
void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x50, %rsp
movq %rdi, -0x40(%rbp)
movq %rsi, -0x48(%rbp)
movq %rdx, -0x50(%rbp)
movq -0x40(%rbp), %rsi
movq -0x48(%rbp), %rdx
movq -0x50(%rbp), %rcx
movq -0x40(%rbp), %rax
movl 0x100(%rax), %eax
movq %rsi, -0x8(%rbp)
movq %rdx, -0x10(%rbp)
movq %rcx, -0x18(%rbp)
movl %eax, -0x1c(%rbp)
movl $0x0, -0x20(%rbp)
movq -0x8(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, -0x28(%rbp)
movq -0x10(%rbp), %rax
movq -0x28(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x2c(%rbp)
movq -0x8(%rbp), %rax
movl 0x2c(%rax), %eax
movl %eax, -0x30(%rbp)
movl -0x30(%rbp), %eax
cmpl -0x2c(%rbp), %eax
jae 0xa7b951
movq -0x8(%rbp), %rdi
movq -0x28(%rbp), %rsi
movl -0x30(%rbp), %eax
addq %rax, %rsi
movq -0x18(%rbp), %rdx
movl -0x1c(%rbp), %ecx
cmpl $0x1, -0x20(%rbp)
sete %al
andb $0x1, %al
movzbl %al, %r8d
callq 0xada510
movl %eax, -0x34(%rbp)
movl -0x34(%rbp), %eax
addl -0x30(%rbp), %eax
movl %eax, -0x30(%rbp)
jmp 0xa7b914
movl -0x2c(%rbp), %ecx
movq -0x8(%rbp), %rax
movl %ecx, 0x2c(%rax)
addq $0x50, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_opt.c
|
ZSTD_NbCommonBytes
|
static unsigned ZSTD_NbCommonBytes (size_t val)
{
if (MEM_isLittleEndian()) {
if (MEM_64bits()) {
# if defined(_MSC_VER) && defined(_WIN64)
# if STATIC_BMI2
return _tzcnt_u64(val) >> 3;
# else
unsigned long r = 0;
return _BitScanForward64( &r, (U64)val ) ? (unsigned)(r >> 3) : 0;
# endif
# elif defined(__GNUC__) && (__GNUC__ >= 4)
return (__builtin_ctzll((U64)val) >> 3);
# else
static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
0, 3, 1, 3, 1, 4, 2, 7,
0, 2, 3, 6, 1, 5, 3, 5,
1, 3, 4, 4, 2, 5, 6, 7,
7, 0, 1, 2, 3, 3, 4, 6,
2, 6, 5, 5, 3, 4, 5, 6,
7, 1, 2, 4, 6, 4, 4, 5,
7, 2, 6, 5, 7, 6, 7, 7 };
return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
# endif
} else { /* 32 bits */
# if defined(_MSC_VER)
unsigned long r=0;
return _BitScanForward( &r, (U32)val ) ? (unsigned)(r >> 3) : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3)
return (__builtin_ctz((U32)val) >> 3);
# else
static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
3, 2, 2, 1, 3, 2, 0, 1,
3, 3, 1, 2, 2, 2, 2, 0,
3, 1, 2, 0, 1, 0, 1, 1 };
return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
# endif
}
} else { /* Big Endian CPU */
if (MEM_64bits()) {
# if defined(_MSC_VER) && defined(_WIN64)
# if STATIC_BMI2
return _lzcnt_u64(val) >> 3;
# else
unsigned long r = 0;
return _BitScanReverse64(&r, (U64)val) ? (unsigned)(r >> 3) : 0;
# endif
# elif defined(__GNUC__) && (__GNUC__ >= 4)
return (__builtin_clzll(val) >> 3);
# else
unsigned r;
const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
r += (!val);
return r;
# endif
} else { /* 32 bits */
# if defined(_MSC_VER)
unsigned long r = 0;
return _BitScanReverse( &r, (unsigned long)val ) ? (unsigned)(r >> 3) : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3)
return (__builtin_clz((U32)val) >> 3);
# else
unsigned r;
if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
r += (!val);
return r;
# endif
} }
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x10, %rsp
movq %rdi, -0x10(%rbp)
callq 0xadaf00
cmpl $0x0, %eax
je 0xadb051
callq 0xadb090
cmpl $0x0, %eax
je 0xadb041
movq -0x10(%rbp), %rax
tzcntq %rax, %rax
sarl $0x3, %eax
movl %eax, -0x4(%rbp)
jmp 0xadb07f
movq -0x10(%rbp), %rax
tzcntl %eax, %eax
sarl $0x3, %eax
movl %eax, -0x4(%rbp)
jmp 0xadb07f
callq 0xadb090
cmpl $0x0, %eax
je 0xadb06f
movq -0x10(%rbp), %rax
bsrq %rax, %rax
xorq $0x3f, %rax
sarl $0x3, %eax
movl %eax, -0x4(%rbp)
jmp 0xadb07f
movq -0x10(%rbp), %rax
bsrl %eax, %eax
xorl $0x1f, %eax
sarl $0x3, %eax
movl %eax, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x10, %rsp
popq %rbp
retq
nopl (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress_internal.h
|
FSE_initCState
|
MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
{
const void* ptr = ct;
const U16* u16ptr = (const U16*) ptr;
const U32 tableLog = MEM_read16(ptr);
statePtr->value = (ptrdiff_t)1<<tableLog;
statePtr->stateTable = u16ptr+2;
statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
statePtr->stateLog = tableLog;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x40, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq -0x10(%rbp), %rax
movq %rax, -0x18(%rbp)
movq -0x18(%rbp), %rax
movq %rax, -0x20(%rbp)
movq -0x18(%rbp), %rdi
callq 0xadb0a0
movzwl %ax, %eax
movl %eax, -0x24(%rbp)
movl -0x24(%rbp), %eax
movl %eax, %ecx
movl $0x1, %eax
shlq %cl, %rax
movq %rax, %rcx
movq -0x8(%rbp), %rax
movq %rcx, (%rax)
movq -0x20(%rbp), %rcx
addq $0x4, %rcx
movq -0x8(%rbp), %rax
movq %rcx, 0x8(%rax)
movq -0x10(%rbp), %rax
addq $0x4, %rax
movq %rax, -0x30(%rbp)
cmpl $0x0, -0x24(%rbp)
je 0xadbdda
movl -0x24(%rbp), %ecx
subl $0x1, %ecx
movl $0x1, %eax
shll %cl, %eax
movl %eax, -0x34(%rbp)
jmp 0xadbde4
movl $0x1, %eax
movl %eax, -0x34(%rbp)
jmp 0xadbde4
movq -0x30(%rbp), %rcx
movl -0x34(%rbp), %eax
cltq
shlq $0x2, %rax
addq %rax, %rcx
movq -0x8(%rbp), %rax
movq %rcx, 0x10(%rax)
movl -0x24(%rbp), %ecx
movq -0x8(%rbp), %rax
movl %ecx, 0x18(%rax)
addq $0x40, %rsp
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/../common/fse.h
|
ZSTD_optLdm_maybeAddMatch
|
static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
ZSTD_optLdm_t* optLdm, U32 currPosInBlock) {
U32 posDiff = currPosInBlock - optLdm->startPosInBlock;
/* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
U32 candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
U32 candidateOffCode = optLdm->offset + ZSTD_REP_MOVE;
/* Ensure that current block position is not outside of the match */
if (currPosInBlock < optLdm->startPosInBlock
|| currPosInBlock >= optLdm->endPosInBlock
|| candidateMatchLength < MINMATCH) {
return;
}
if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
candidateOffCode, candidateMatchLength, currPosInBlock);
matches[*nbMatches].len = candidateMatchLength;
matches[*nbMatches].off = candidateOffCode;
(*nbMatches)++;
}
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq %rdx, -0x18(%rbp)
movl %ecx, -0x1c(%rbp)
movl -0x1c(%rbp), %eax
movq -0x18(%rbp), %rcx
subl 0x28(%rcx), %eax
movl %eax, -0x20(%rbp)
movq -0x18(%rbp), %rax
movl 0x2c(%rax), %eax
movq -0x18(%rbp), %rcx
subl 0x28(%rcx), %eax
subl -0x20(%rbp), %eax
movl %eax, -0x24(%rbp)
movq -0x18(%rbp), %rax
movl 0x30(%rax), %eax
addl $0x2, %eax
movl %eax, -0x28(%rbp)
movl -0x1c(%rbp), %eax
movq -0x18(%rbp), %rcx
cmpl 0x28(%rcx), %eax
jb 0xadc16f
movl -0x1c(%rbp), %eax
movq -0x18(%rbp), %rcx
cmpl 0x2c(%rcx), %eax
jae 0xadc16f
cmpl $0x3, -0x24(%rbp)
jae 0xadc171
jmp 0xadc1ca
movq -0x10(%rbp), %rax
cmpl $0x0, (%rax)
je 0xadc19e
movl -0x24(%rbp), %eax
movq -0x8(%rbp), %rcx
movq -0x10(%rbp), %rdx
movl (%rdx), %edx
subl $0x1, %edx
movl %edx, %edx
cmpl 0x4(%rcx,%rdx,8), %eax
jbe 0xadc1ca
movq -0x10(%rbp), %rax
cmpl $0x1000, (%rax) # imm = 0x1000
jae 0xadc1ca
movl -0x24(%rbp), %edx
movq -0x8(%rbp), %rax
movq -0x10(%rbp), %rcx
movl (%rcx), %ecx
movl %edx, 0x4(%rax,%rcx,8)
movl -0x28(%rbp), %edx
movq -0x8(%rbp), %rax
movq -0x10(%rbp), %rcx
movl (%rcx), %ecx
movl %edx, (%rax,%rcx,8)
movq -0x10(%rbp), %rax
movl (%rax), %ecx
addl $0x1, %ecx
movl %ecx, (%rax)
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_opt.c
|
ZSTD_safecopyLiterals
|
static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) {
assert(iend > ilimit_w);
if (ip <= ilimit_w) {
ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
op += ilimit_w - ip;
ip = ilimit_w;
}
while (ip < iend) *op++ = *ip++;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x60, %rsp
movq %rdi, -0x48(%rbp)
movq %rsi, -0x50(%rbp)
movq %rdx, -0x58(%rbp)
movq %rcx, -0x60(%rbp)
movq -0x50(%rbp), %rax
cmpq -0x60(%rbp), %rax
ja 0xadc46d
movq -0x48(%rbp), %rdx
movq -0x50(%rbp), %rcx
movq -0x60(%rbp), %rax
movq -0x50(%rbp), %rsi
subq %rsi, %rax
movq %rdx, -0x8(%rbp)
movq %rcx, -0x10(%rbp)
movq %rax, -0x18(%rbp)
movl $0x0, -0x1c(%rbp)
movq -0x8(%rbp), %rax
movq -0x10(%rbp), %rcx
subq %rcx, %rax
movq %rax, -0x28(%rbp)
movq -0x10(%rbp), %rax
movq %rax, -0x30(%rbp)
movq -0x8(%rbp), %rax
movq %rax, -0x38(%rbp)
movq -0x38(%rbp), %rax
addq -0x18(%rbp), %rax
movq %rax, -0x40(%rbp)
cmpl $0x1, -0x1c(%rbp)
jne 0xadc3ca
cmpq $0x10, -0x28(%rbp)
jge 0xadc3ca
jmp 0xadc396
movq -0x38(%rbp), %rdi
movq -0x30(%rbp), %rsi
callq 0xadc4b0
movq -0x38(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x38(%rbp)
movq -0x30(%rbp), %rax
addq $0x8, %rax
movq %rax, -0x30(%rbp)
movq -0x38(%rbp), %rax
cmpq -0x40(%rbp), %rax
jb 0xadc396
jmp 0xadc452
movq -0x38(%rbp), %rdi
movq -0x30(%rbp), %rsi
callq 0xadc2e0
movl $0x10, %eax
cmpq -0x18(%rbp), %rax
jl 0xadc3e4
jmp 0xadc452
movq -0x38(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x38(%rbp)
movq -0x30(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x30(%rbp)
movq -0x38(%rbp), %rdi
movq -0x30(%rbp), %rsi
callq 0xadc2e0
movq -0x38(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x38(%rbp)
movq -0x30(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x30(%rbp)
movq -0x38(%rbp), %rdi
movq -0x30(%rbp), %rsi
callq 0xadc2e0
movq -0x38(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x38(%rbp)
movq -0x30(%rbp), %rax
addq $0x10, %rax
movq %rax, -0x30(%rbp)
movq -0x38(%rbp), %rax
cmpq -0x40(%rbp), %rax
jb 0xadc3fc
jmp 0xadc452
movq -0x60(%rbp), %rax
movq -0x50(%rbp), %rcx
subq %rcx, %rax
addq -0x48(%rbp), %rax
movq %rax, -0x48(%rbp)
movq -0x60(%rbp), %rax
movq %rax, -0x50(%rbp)
jmp 0xadc46f
movq -0x50(%rbp), %rax
cmpq -0x58(%rbp), %rax
jae 0xadc49d
movq -0x50(%rbp), %rax
movq %rax, %rcx
addq $0x1, %rcx
movq %rcx, -0x50(%rbp)
movb (%rax), %cl
movq -0x48(%rbp), %rax
movq %rax, %rdx
addq $0x1, %rdx
movq %rdx, -0x48(%rbp)
movb %cl, (%rax)
jmp 0xadc46f
addq $0x60, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/zstd_compress_internal.h
|
ZSTD_setRleBlock
|
static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
BYTE b,
size_t regenSize)
{
RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, "");
if (dst == NULL) {
if (regenSize == 0) return 0;
RETURN_ERROR(dstBuffer_null, "");
}
ZSTD_memset(dst, b, regenSize);
return regenSize;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x30, %rsp
movb %dl, %al
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movb %al, -0x19(%rbp)
movq %rcx, -0x28(%rbp)
movq -0x28(%rbp), %rax
cmpq -0x18(%rbp), %rax
jbe 0xade6bd
movq $-0x46, -0x8(%rbp)
jmp 0xade6fd
cmpq $0x0, -0x10(%rbp)
jne 0xade6e1
cmpq $0x0, -0x28(%rbp)
jne 0xade6d5
movq $0x0, -0x8(%rbp)
jmp 0xade6fd
jmp 0xade6d7
movq $-0x4a, -0x8(%rbp)
jmp 0xade6fd
movq -0x10(%rbp), %rdi
movzbl -0x19(%rbp), %eax
movq -0x28(%rbp), %rdx
movzbl %al, %esi
callq 0x3b780
movq -0x28(%rbp), %rax
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x30, %rsp
popq %rbp
retq
nopw (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress.c
|
ZSTD_DCtx_loadDictionary_advanced
|
size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType)
{
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
ZSTD_clearDict(dctx);
if (dict && dictSize != 0) {
dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!");
dctx->ddict = dctx->ddictLocal;
dctx->dictUses = ZSTD_use_indefinitely;
}
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x40, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movl %ecx, -0x24(%rbp)
movl %r8d, -0x28(%rbp)
movq -0x10(%rbp), %rax
cmpl $0x0, 0x760c(%rax)
je 0xadf1c5
movq $-0x3c, -0x8(%rbp)
jmp 0xadf26c
movq -0x10(%rbp), %rdi
callq 0xadca40
cmpq $0x0, -0x18(%rbp)
je 0xadf264
cmpq $0x0, -0x20(%rbp)
je 0xadf264
movq -0x18(%rbp), %rdi
movq -0x20(%rbp), %rsi
movl -0x24(%rbp), %edx
movl -0x28(%rbp), %ecx
movq -0x10(%rbp), %rax
addq $0x75a8, %rax # imm = 0x75A8
movq (%rax), %r8
movq %r8, (%rsp)
movq 0x8(%rax), %r8
movq %r8, 0x8(%rsp)
movq 0x10(%rax), %rax
movq %rax, 0x10(%rsp)
callq 0xaf95f0
movq %rax, %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x75e0(%rax)
movq -0x10(%rbp), %rax
cmpq $0x0, 0x75e0(%rax)
jne 0xadf240
movq $-0x40, -0x8(%rbp)
jmp 0xadf26c
movq -0x10(%rbp), %rax
movq 0x75e0(%rax), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x75e8(%rax)
movq -0x10(%rbp), %rax
movl $0xffffffff, 0x75f8(%rax) # imm = 0xFFFFFFFF
movq $0x0, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x40, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress.c
|
ZSTD_DCtx_getParameter
|
size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value)
{
switch (param) {
case ZSTD_d_windowLogMax:
*value = (int)ZSTD_highbit32((U32)dctx->maxWindowSize);
return 0;
case ZSTD_d_format:
*value = (int)dctx->format;
return 0;
case ZSTD_d_stableOutBuffer:
*value = (int)dctx->outBufferMode;
return 0;
case ZSTD_d_forceIgnoreChecksum:
*value = (int)dctx->forceIgnoreChecksum;
return 0;
case ZSTD_d_refMultipleDDicts:
*value = (int)dctx->refMultipleDDicts;
return 0;
default:;
}
RETURN_ERROR(parameter_unsupported, "");
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x30, %rsp
movq %rdi, -0x10(%rbp)
movl %esi, -0x14(%rbp)
movq %rdx, -0x20(%rbp)
movl -0x14(%rbp), %eax
movl %eax, -0x24(%rbp)
subl $0x64, %eax
je 0xadfc33
jmp 0xadfc00
movl -0x24(%rbp), %eax
subl $0x3e8, %eax # imm = 0x3E8
je 0xadfc57
jmp 0xadfc0c
movl -0x24(%rbp), %eax
subl $0x3e9, %eax # imm = 0x3E9
je 0xadfc71
jmp 0xadfc18
movl -0x24(%rbp), %eax
subl $0x3ea, %eax # imm = 0x3EA
je 0xadfc8b
jmp 0xadfc24
movl -0x24(%rbp), %eax
subl $0x3eb, %eax # imm = 0x3EB
je 0xadfca5
jmp 0xadfcbf
movq -0x10(%rbp), %rax
movq 0x7628(%rax), %rax
movl %eax, %edi
callq 0xadfce0
movl %eax, %ecx
movq -0x20(%rbp), %rax
movl %ecx, (%rax)
movq $0x0, -0x8(%rbp)
jmp 0xadfccb
movq -0x10(%rbp), %rax
movl 0x7590(%rax), %ecx
movq -0x20(%rbp), %rax
movl %ecx, (%rax)
movq $0x0, -0x8(%rbp)
jmp 0xadfccb
movq -0x10(%rbp), %rax
movl 0x7670(%rax), %ecx
movq -0x20(%rbp), %rax
movl %ecx, (%rax)
movq $0x0, -0x8(%rbp)
jmp 0xadfccb
movq -0x10(%rbp), %rax
movl 0x7594(%rax), %ecx
movq -0x20(%rbp), %rax
movl %ecx, (%rax)
movq $0x0, -0x8(%rbp)
jmp 0xadfccb
movq -0x10(%rbp), %rax
movl 0x7608(%rax), %ecx
movq -0x20(%rbp), %rax
movl %ecx, (%rax)
movq $0x0, -0x8(%rbp)
jmp 0xadfccb
jmp 0xadfcc1
jmp 0xadfcc3
movq $-0x28, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x30, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress.c
|
ZSTD_checkOutBuffer
|
static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output)
{
ZSTD_outBuffer const expect = zds->expectedOutBuffer;
/* No requirement when ZSTD_obm_stable is not enabled. */
if (zds->outBufferMode != ZSTD_bm_stable)
return 0;
/* Any buffer is allowed in zdss_init, this must be the same for every other call until
* the context is reset.
*/
if (zds->streamStage == zdss_init)
return 0;
/* The buffer must match our expectation exactly. */
if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size)
return 0;
RETURN_ERROR(dstBuffer_wrong, "ZSTD_d_stableOutBuffer enabled but output differs!");
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq -0x10(%rbp), %rax
movq 0x7678(%rax), %rcx
movq %rcx, -0x30(%rbp)
movq 0x7680(%rax), %rcx
movq %rcx, -0x28(%rbp)
movq 0x7688(%rax), %rax
movq %rax, -0x20(%rbp)
movq -0x10(%rbp), %rax
cmpl $0x1, 0x7670(%rax)
je 0xae0ec8
movq $0x0, -0x8(%rbp)
jmp 0xae0f1c
movq -0x10(%rbp), %rax
cmpl $0x0, 0x760c(%rax)
jne 0xae0edf
movq $0x0, -0x8(%rbp)
jmp 0xae0f1c
movq -0x30(%rbp), %rax
movq -0x18(%rbp), %rcx
cmpq (%rcx), %rax
jne 0xae0f12
movq -0x20(%rbp), %rax
movq -0x18(%rbp), %rcx
cmpq 0x10(%rcx), %rax
jne 0xae0f12
movq -0x28(%rbp), %rax
movq -0x18(%rbp), %rcx
cmpq 0x8(%rcx), %rax
jne 0xae0f12
movq $0x0, -0x8(%rbp)
jmp 0xae0f1c
jmp 0xae0f14
movq $-0x68, -0x8(%rbp)
movq -0x8(%rbp), %rax
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/zstd_decompress.c
|
MEM_swap64
|
MEM_STATIC U64 MEM_swap64(U64 in)
{
#if defined(_MSC_VER) /* Visual Studio */
return _byteswap_uint64(in);
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|| (defined(__clang__) && __has_builtin(__builtin_bswap64))
return __builtin_bswap64(in);
#else
return ((in << 56) & 0xff00000000000000ULL) |
((in << 40) & 0x00ff000000000000ULL) |
((in << 24) & 0x0000ff0000000000ULL) |
((in << 8) & 0x000000ff00000000ULL) |
((in >> 8) & 0x00000000ff000000ULL) |
((in >> 24) & 0x0000000000ff0000ULL) |
((in >> 40) & 0x000000000000ff00ULL) |
((in >> 56) & 0x00000000000000ffULL);
#endif
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x8(%rbp)
movq -0x8(%rbp), %rax
bswapq %rax
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/decompress/../common/mem.h
|
FSE_readNCount_bmi2
|
size_t FSE_readNCount_bmi2(
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize, int bmi2)
{
#if DYNAMIC_BMI2
if (bmi2) {
return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
}
#endif
(void)bmi2;
return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x30, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq %rdx, -0x18(%rbp)
movq %rcx, -0x20(%rbp)
movq %r8, -0x28(%rbp)
movl %r9d, -0x2c(%rbp)
movq -0x8(%rbp), %rdi
movq -0x10(%rbp), %rsi
movq -0x18(%rbp), %rdx
movq -0x20(%rbp), %rcx
movq -0x28(%rbp), %r8
callq 0xae1db0
addq $0x30, %rsp
popq %rbp
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/common/entropy_common.c
|
HUF_readStats_wksp
|
size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize,
int bmi2)
{
#if DYNAMIC_BMI2
if (bmi2) {
return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
}
#endif
(void)bmi2;
return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x50, %rsp
movl 0x28(%rbp), %eax
movq 0x20(%rbp), %rax
movq 0x18(%rbp), %rax
movq 0x10(%rbp), %rax
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq %rdx, -0x18(%rbp)
movq %rcx, -0x20(%rbp)
movq %r8, -0x28(%rbp)
movq %r9, -0x30(%rbp)
movq -0x8(%rbp), %rdi
movq -0x10(%rbp), %rsi
movq -0x18(%rbp), %rdx
movq -0x20(%rbp), %rcx
movq -0x28(%rbp), %r8
movq -0x30(%rbp), %r9
movq 0x10(%rbp), %r11
movq 0x18(%rbp), %r10
movq 0x20(%rbp), %rax
movq %r11, (%rsp)
movq %r10, 0x8(%rsp)
movq %rax, 0x10(%rsp)
callq 0xae2440
addq $0x50, %rsp
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/common/entropy_common.c
|
FSE_buildDTable_raw
|
size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
void* dPtr = dt + 1;
FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSV1 = tableMask+1;
unsigned s;
/* Sanity checks */
if (nbBits < 1) return ERROR(GENERIC); /* min size */
/* Build Decoding Table */
DTableH->tableLog = (U16)nbBits;
DTableH->fastMode = 1;
for (s=0; s<maxSV1; s++) {
dinfo[s].newState = 0;
dinfo[s].symbol = (BYTE)s;
dinfo[s].nbBits = (BYTE)nbBits;
}
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x10(%rbp)
movl %esi, -0x14(%rbp)
movq -0x10(%rbp), %rax
movq %rax, -0x20(%rbp)
movq -0x20(%rbp), %rax
movq %rax, -0x28(%rbp)
movq -0x10(%rbp), %rax
addq $0x4, %rax
movq %rax, -0x30(%rbp)
movq -0x30(%rbp), %rax
movq %rax, -0x38(%rbp)
movl -0x14(%rbp), %ecx
movl $0x1, %eax
shll %cl, %eax
movl %eax, -0x3c(%rbp)
movl -0x3c(%rbp), %eax
subl $0x1, %eax
movl %eax, -0x40(%rbp)
movl -0x40(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x44(%rbp)
cmpl $0x1, -0x14(%rbp)
jae 0xae315e
movq $-0x1, -0x8(%rbp)
jmp 0xae31c4
movl -0x14(%rbp), %eax
movw %ax, %cx
movq -0x28(%rbp), %rax
movw %cx, (%rax)
movq -0x28(%rbp), %rax
movw $0x1, 0x2(%rax)
movl $0x0, -0x48(%rbp)
movl -0x48(%rbp), %eax
cmpl -0x44(%rbp), %eax
jae 0xae31bc
movq -0x38(%rbp), %rax
movl -0x48(%rbp), %ecx
movw $0x0, (%rax,%rcx,4)
movl -0x48(%rbp), %eax
movb %al, %dl
movq -0x38(%rbp), %rax
movl -0x48(%rbp), %ecx
movb %dl, 0x2(%rax,%rcx,4)
movl -0x14(%rbp), %eax
movb %al, %dl
movq -0x38(%rbp), %rax
movl -0x48(%rbp), %ecx
movb %dl, 0x3(%rax,%rcx,4)
movl -0x48(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x48(%rbp)
jmp 0xae317c
movq $0x0, -0x8(%rbp)
movq -0x8(%rbp), %rax
popq %rbp
retq
nopw (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/common/fse_decompress.c
|
BIT_reloadDStream
|
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
{
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
return BIT_DStream_overflow;
if (bitD->ptr >= bitD->limitPtr) {
return BIT_reloadDStreamFast(bitD);
}
if (bitD->ptr == bitD->start) {
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
return BIT_DStream_completed;
}
/* start < ptr < limitPtr */
{ U32 nbBytes = bitD->bitsConsumed >> 3;
BIT_DStream_status result = BIT_DStream_unfinished;
if (bitD->ptr - nbBytes < bitD->start) {
nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
result = BIT_DStream_endOfBuffer;
}
bitD->ptr -= nbBytes;
bitD->bitsConsumed -= nbBytes*8;
bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
return result;
}
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movq %rdi, -0x10(%rbp)
movq -0x10(%rbp), %rax
movl 0x8(%rax), %eax
cmpq $0x40, %rax
jbe 0xae4c45
movl $0x3, -0x4(%rbp)
jmp 0xae4d39
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rax
movq -0x10(%rbp), %rcx
cmpq 0x20(%rcx), %rax
jb 0xae4c68
movq -0x10(%rbp), %rdi
callq 0xae4ff0
movl %eax, -0x4(%rbp)
jmp 0xae4d39
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rax
movq -0x10(%rbp), %rcx
cmpq 0x18(%rcx), %rax
jne 0xae4c9f
movq -0x10(%rbp), %rax
movl 0x8(%rax), %eax
cmpq $0x40, %rax
jae 0xae4c93
movl $0x1, -0x4(%rbp)
jmp 0xae4d39
movl $0x2, -0x4(%rbp)
jmp 0xae4d39
movq -0x10(%rbp), %rax
movl 0x8(%rax), %eax
shrl $0x3, %eax
movl %eax, -0x14(%rbp)
movl $0x0, -0x18(%rbp)
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rax
movl -0x14(%rbp), %ecx
movl %ecx, %edx
xorl %ecx, %ecx
subq %rdx, %rcx
addq %rcx, %rax
movq -0x10(%rbp), %rcx
cmpq 0x18(%rcx), %rax
jae 0xae4cef
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rax
movq -0x10(%rbp), %rcx
movq 0x18(%rcx), %rcx
subq %rcx, %rax
movl %eax, -0x14(%rbp)
movl $0x1, -0x18(%rbp)
movl -0x14(%rbp), %edx
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rcx
movl %edx, %edx
movl %edx, %esi
xorl %edx, %edx
subq %rsi, %rdx
addq %rdx, %rcx
movq %rcx, 0x10(%rax)
movl -0x14(%rbp), %edx
shll $0x3, %edx
movq -0x10(%rbp), %rax
movl 0x8(%rax), %ecx
subl %edx, %ecx
movl %ecx, 0x8(%rax)
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rdi
callq 0xae4e90
movq %rax, %rcx
movq -0x10(%rbp), %rax
movq %rcx, (%rax)
movl -0x18(%rbp), %eax
movl %eax, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x20, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/common/bitstream.h
|
XXH_swap32
|
static U32 XXH_swap32 (U32 x)
{
return ((x << 24) & 0xff000000 ) |
((x << 8) & 0x00ff0000 ) |
((x >> 8) & 0x0000ff00 ) |
((x >> 24) & 0x000000ff );
}
|
pushq %rbp
movq %rsp, %rbp
movl %edi, -0x4(%rbp)
movl -0x4(%rbp), %eax
shll $0x18, %eax
andl $0xff000000, %eax # imm = 0xFF000000
movl -0x4(%rbp), %ecx
shll $0x8, %ecx
andl $0xff0000, %ecx # imm = 0xFF0000
orl %ecx, %eax
movl -0x4(%rbp), %ecx
shrl $0x8, %ecx
andl $0xff00, %ecx # imm = 0xFF00
orl %ecx, %eax
movl -0x4(%rbp), %ecx
shrl $0x18, %ecx
andl $0xff, %ecx
orl %ecx, %eax
popq %rbp
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/common/xxhash.c
|
FSE_normalizeCount
|
size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
const unsigned* count, size_t total,
unsigned maxSymbolValue, unsigned useLowProbCount)
{
/* Sanity checks */
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
{ static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
short const lowProbCount = useLowProbCount ? -1 : 1;
U64 const scale = 62 - tableLog;
U64 const step = ZSTD_div64((U64)1<<62, (U32)total); /* <== here, one division ! */
U64 const vStep = 1ULL<<(scale-20);
int stillToDistribute = 1<<tableLog;
unsigned s;
unsigned largest=0;
short largestP=0;
U32 lowThreshold = (U32)(total >> tableLog);
for (s=0; s<=maxSymbolValue; s++) {
if (count[s] == total) return 0; /* rle special case */
if (count[s] == 0) { normalizedCounter[s]=0; continue; }
if (count[s] <= lowThreshold) {
normalizedCounter[s] = lowProbCount;
stillToDistribute--;
} else {
short proba = (short)((count[s]*step) >> scale);
if (proba<8) {
U64 restToBeat = vStep * rtbTable[proba];
proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
}
if (proba > largestP) { largestP=proba; largest=s; }
normalizedCounter[s] = proba;
stillToDistribute -= proba;
} }
if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
/* corner case, need another normalization method */
size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
if (FSE_isError(errorCode)) return errorCode;
}
else normalizedCounter[largest] += (short)stillToDistribute;
}
#if 0
{ /* Print Table (debug) */
U32 s;
U32 nTotal = 0;
for (s=0; s<=maxSymbolValue; s++)
RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
for (s=0; s<=maxSymbolValue; s++)
nTotal += abs(normalizedCounter[s]);
if (nTotal != (1U<<tableLog))
RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
getchar();
}
#endif
return tableLog;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x80, %rsp
movq %rdi, -0x10(%rbp)
movl %esi, -0x14(%rbp)
movq %rdx, -0x20(%rbp)
movq %rcx, -0x28(%rbp)
movl %r8d, -0x2c(%rbp)
movl %r9d, -0x30(%rbp)
cmpl $0x0, -0x14(%rbp)
jne 0xaebb9f
movl $0xb, -0x14(%rbp)
cmpl $0x5, -0x14(%rbp)
jae 0xaebbb2
movq $-0x1, -0x8(%rbp)
jmp 0xaebe0c
cmpl $0xc, -0x14(%rbp)
jbe 0xaebbc5
movq $-0x2c, -0x8(%rbp)
jmp 0xaebe0c
movl -0x14(%rbp), %eax
movl %eax, -0x7c(%rbp)
movq -0x28(%rbp), %rdi
movl -0x2c(%rbp), %esi
callq 0xaebae0
movl %eax, %ecx
movl -0x7c(%rbp), %eax
cmpl %ecx, %eax
jae 0xaebbed
movq $-0x1, -0x8(%rbp)
jmp 0xaebe0c
movl -0x30(%rbp), %edx
movl $0x1, %eax
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
cmpl $0x0, %edx
cmovnel %ecx, %eax
movw %ax, -0x32(%rbp)
movl $0x3e, %eax
subl -0x14(%rbp), %eax
movl %eax, %eax
movq %rax, -0x40(%rbp)
movq -0x28(%rbp), %rax
movl %eax, %eax
movl %eax, %ecx
movabsq $0x4000000000000000, %rax # imm = 0x4000000000000000
xorl %edx, %edx
divq %rcx
movq %rax, -0x48(%rbp)
movq -0x40(%rbp), %rcx
subq $0x14, %rcx
movl $0x1, %eax
shlq %cl, %rax
movq %rax, -0x50(%rbp)
movl -0x14(%rbp), %ecx
movl $0x1, %eax
shll %cl, %eax
movl %eax, -0x54(%rbp)
movl $0x0, -0x5c(%rbp)
movw $0x0, -0x5e(%rbp)
movq -0x28(%rbp), %rax
movl -0x14(%rbp), %ecx
shrq %cl, %rax
movl %eax, -0x64(%rbp)
movl $0x0, -0x58(%rbp)
movl -0x58(%rbp), %eax
cmpl -0x2c(%rbp), %eax
ja 0xaebd9d
movq -0x20(%rbp), %rax
movl -0x58(%rbp), %ecx
movl (%rax,%rcx,4), %eax
cmpq -0x28(%rbp), %rax
jne 0xaebc98
movq $0x0, -0x8(%rbp)
jmp 0xaebe0c
movq -0x20(%rbp), %rax
movl -0x58(%rbp), %ecx
cmpl $0x0, (%rax,%rcx,4)
jne 0xaebcb7
movq -0x10(%rbp), %rax
movl -0x58(%rbp), %ecx
movw $0x0, (%rax,%rcx,2)
jmp 0xaebd8f
movq -0x20(%rbp), %rax
movl -0x58(%rbp), %ecx
movl (%rax,%rcx,4), %eax
cmpl -0x64(%rbp), %eax
ja 0xaebce3
movw -0x32(%rbp), %dx
movq -0x10(%rbp), %rax
movl -0x58(%rbp), %ecx
movw %dx, (%rax,%rcx,2)
movl -0x54(%rbp), %eax
addl $-0x1, %eax
movl %eax, -0x54(%rbp)
jmp 0xaebd8d
movq -0x20(%rbp), %rax
movl -0x58(%rbp), %ecx
movl (%rax,%rcx,4), %eax
imulq -0x48(%rbp), %rax
movq -0x40(%rbp), %rcx
shrq %cl, %rax
movw %ax, -0x66(%rbp)
movswl -0x66(%rbp), %eax
cmpl $0x8, %eax
jge 0xaebd58
movq -0x50(%rbp), %rax
movswq -0x66(%rbp), %rdx
leaq 0xedbca(%rip), %rcx # 0xbd98e0
movl (%rcx,%rdx,4), %ecx
imulq %rcx, %rax
movq %rax, -0x70(%rbp)
movq -0x20(%rbp), %rax
movl -0x58(%rbp), %ecx
movl (%rax,%rcx,4), %eax
imulq -0x48(%rbp), %rax
movswq -0x66(%rbp), %rdx
movq -0x40(%rbp), %rcx
shlq %cl, %rdx
movq %rdx, %rcx
subq %rcx, %rax
cmpq -0x70(%rbp), %rax
seta %al
andb $0x1, %al
movzbl %al, %ecx
movswl -0x66(%rbp), %eax
addl %ecx, %eax
movw %ax, -0x66(%rbp)
movswl -0x66(%rbp), %eax
movswl -0x5e(%rbp), %ecx
cmpl %ecx, %eax
jle 0xaebd72
movw -0x66(%rbp), %ax
movw %ax, -0x5e(%rbp)
movl -0x58(%rbp), %eax
movl %eax, -0x5c(%rbp)
movw -0x66(%rbp), %dx
movq -0x10(%rbp), %rax
movl -0x58(%rbp), %ecx
movw %dx, (%rax,%rcx,2)
movswl -0x66(%rbp), %ecx
movl -0x54(%rbp), %eax
subl %ecx, %eax
movl %eax, -0x54(%rbp)
jmp 0xaebd8f
movl -0x58(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x58(%rbp)
jmp 0xaebc6f
xorl %eax, %eax
subl -0x54(%rbp), %eax
movq -0x10(%rbp), %rcx
movl -0x5c(%rbp), %edx
movswl (%rcx,%rdx,2), %ecx
sarl %ecx
cmpl %ecx, %eax
jl 0xaebdee
movq -0x10(%rbp), %rdi
movl -0x14(%rbp), %esi
movq -0x20(%rbp), %rdx
movq -0x28(%rbp), %rcx
movl -0x2c(%rbp), %r8d
movswl -0x32(%rbp), %r9d
callq 0xaebe20
movq %rax, -0x78(%rbp)
movq -0x78(%rbp), %rdi
callq 0xaec1f0
cmpl $0x0, %eax
je 0xaebdec
movq -0x78(%rbp), %rax
movq %rax, -0x8(%rbp)
jmp 0xaebe0c
jmp 0xaebe05
movl -0x54(%rbp), %eax
movswl %ax, %esi
movq -0x10(%rbp), %rax
movl -0x5c(%rbp), %ecx
movswl (%rax,%rcx,2), %edx
addl %esi, %edx
movw %dx, (%rax,%rcx,2)
movl -0x14(%rbp), %eax
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x80, %rsp
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/fse_compress.c
|
FSE_buildCTable_raw
|
size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
{
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSymbolValue = tableMask;
void* const ptr = ct;
U16* const tableU16 = ( (U16*) ptr) + 2;
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
unsigned s;
/* Sanity checks */
if (nbBits < 1) return ERROR(GENERIC); /* min size */
/* header */
tableU16[-2] = (U16) nbBits;
tableU16[-1] = (U16) maxSymbolValue;
/* Build table */
for (s=0; s<tableSize; s++)
tableU16[s] = (U16)(tableSize + s);
/* Build Symbol Transformation Table */
{ const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
for (s=0; s<=maxSymbolValue; s++) {
symbolTT[s].deltaNbBits = deltaNbBits;
symbolTT[s].deltaFindState = s-1;
} }
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x10(%rbp)
movl %esi, -0x14(%rbp)
movl -0x14(%rbp), %ecx
movl $0x1, %eax
shll %cl, %eax
movl %eax, -0x18(%rbp)
movl -0x18(%rbp), %eax
subl $0x1, %eax
movl %eax, -0x1c(%rbp)
movl -0x1c(%rbp), %eax
movl %eax, -0x20(%rbp)
movq -0x10(%rbp), %rax
movq %rax, -0x28(%rbp)
movq -0x28(%rbp), %rax
addq $0x4, %rax
movq %rax, -0x30(%rbp)
movq -0x28(%rbp), %rax
addq $0x4, %rax
movl -0x18(%rbp), %ecx
shrl %ecx
movl %ecx, %ecx
shlq $0x2, %rcx
addq %rcx, %rax
movq %rax, -0x38(%rbp)
movq -0x38(%rbp), %rax
movq %rax, -0x40(%rbp)
cmpl $0x1, -0x14(%rbp)
jae 0xaec280
movq $-0x1, -0x8(%rbp)
jmp 0xaec321
movl -0x14(%rbp), %eax
movw %ax, %cx
movq -0x30(%rbp), %rax
movw %cx, -0x4(%rax)
movl -0x20(%rbp), %eax
movw %ax, %cx
movq -0x30(%rbp), %rax
movw %cx, -0x2(%rax)
movl $0x0, -0x44(%rbp)
movl -0x44(%rbp), %eax
cmpl -0x18(%rbp), %eax
jae 0xaec2ca
movl -0x18(%rbp), %eax
addl -0x44(%rbp), %eax
movw %ax, %dx
movq -0x30(%rbp), %rax
movl -0x44(%rbp), %ecx
movw %dx, (%rax,%rcx,2)
movl -0x44(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x44(%rbp)
jmp 0xaec2a3
movl -0x14(%rbp), %eax
shll $0x10, %eax
movl -0x14(%rbp), %ecx
movl $0x1, %edx
shll %cl, %edx
movl %edx, %ecx
subl %ecx, %eax
movl %eax, -0x48(%rbp)
movl $0x0, -0x44(%rbp)
movl -0x44(%rbp), %eax
cmpl -0x20(%rbp), %eax
ja 0xaec319
movl -0x48(%rbp), %edx
movq -0x40(%rbp), %rax
movl -0x44(%rbp), %ecx
movl %edx, 0x4(%rax,%rcx,8)
movl -0x44(%rbp), %edx
subl $0x1, %edx
movq -0x40(%rbp), %rax
movl -0x44(%rbp), %ecx
movl %edx, (%rax,%rcx,8)
movl -0x44(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x44(%rbp)
jmp 0xaec2e8
movq $0x0, -0x8(%rbp)
movq -0x8(%rbp), %rax
popq %rbp
retq
nopw (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/fse_compress.c
|
BIT_initCStream
|
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
void* startPtr, size_t dstCapacity)
{
bitC->bitContainer = 0;
bitC->bitPos = 0;
bitC->startPtr = (char*)startPtr;
bitC->ptr = bitC->startPtr;
bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movq -0x10(%rbp), %rax
movq $0x0, (%rax)
movq -0x10(%rbp), %rax
movl $0x0, 0x8(%rax)
movq -0x18(%rbp), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x10(%rax)
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x18(%rax)
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rcx
addq -0x20(%rbp), %rcx
addq $-0x8, %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x20(%rax)
cmpq $0x8, -0x20(%rbp)
ja 0xaecc4b
movq $-0x46, -0x8(%rbp)
jmp 0xaecc53
movq $0x0, -0x8(%rbp)
movq -0x8(%rbp), %rax
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/../common/bitstream.h
|
HUF_readCTable
|
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
{
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
U32 tableLog = 0;
U32 nbSymbols = 0;
/* get symbol weights */
CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
*hasZeroWeights = (rankVal[0] > 0);
/* check result */
if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
/* Prepare base value per rank */
{ U32 n, nextRankStart = 0;
for (n=1; n<=tableLog; n++) {
U32 curr = nextRankStart;
nextRankStart += (rankVal[n] << (n-1));
rankVal[n] = curr;
} }
/* fill nbBits */
{ U32 n; for (n=0; n<nbSymbols; n++) {
const U32 w = huffWeight[n];
CTable[n].nbBits = (BYTE)(tableLog + 1 - w) & -(w != 0);
} }
/* fill val */
{ U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
{ U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
/* determine stating value per rank */
valPerRank[tableLog+1] = 0; /* for w==0 */
{ U16 min = 0;
U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
valPerRank[n] = min; /* get starting value within each rank */
min += nbPerRank[n];
min >>= 1;
} }
/* assign value within rank, symbol order */
{ U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
}
*maxSymbolValuePtr = nbSymbols - 1;
return readSize;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x1f0, %rsp # imm = 0x1F0
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movq %rcx, -0x28(%rbp)
movq %r8, -0x30(%rbp)
movl $0x0, -0x174(%rbp)
movl $0x0, -0x178(%rbp)
leaq -0x130(%rbp), %rdi
leaq -0x170(%rbp), %rdx
movq -0x20(%rbp), %r9
movq -0x28(%rbp), %rax
movl $0x100, %esi # imm = 0x100
leaq -0x178(%rbp), %rcx
leaq -0x174(%rbp), %r8
movq %rax, (%rsp)
callq 0xae2350
movq %rax, -0x180(%rbp)
movq -0x180(%rbp), %rdi
callq 0xaede40
cmpl $0x0, %eax
je 0xaedf3d
movq -0x180(%rbp), %rax
movq %rax, -0x8(%rbp)
jmp 0xaee1e3
cmpl $0x0, -0x170(%rbp)
seta %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x30(%rbp), %rax
movl %ecx, (%rax)
cmpl $0xc, -0x174(%rbp)
jbe 0xaedf68
movq $-0x2c, -0x8(%rbp)
jmp 0xaee1e3
movl -0x178(%rbp), %eax
movq -0x18(%rbp), %rcx
movl (%rcx), %ecx
addl $0x1, %ecx
cmpl %ecx, %eax
jbe 0xaedf88
movq $-0x30, -0x8(%rbp)
jmp 0xaee1e3
movl $0x0, -0x188(%rbp)
movl $0x1, -0x184(%rbp)
movl -0x184(%rbp), %eax
cmpl -0x174(%rbp), %eax
ja 0xaedffe
movl -0x188(%rbp), %eax
movl %eax, -0x18c(%rbp)
movl -0x184(%rbp), %eax
movl -0x170(%rbp,%rax,4), %eax
movl -0x184(%rbp), %ecx
subl $0x1, %ecx
shll %cl, %eax
addl -0x188(%rbp), %eax
movl %eax, -0x188(%rbp)
movl -0x18c(%rbp), %ecx
movl -0x184(%rbp), %eax
movl %ecx, -0x170(%rbp,%rax,4)
movl -0x184(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x184(%rbp)
jmp 0xaedf9c
movl $0x0, -0x190(%rbp)
movl -0x190(%rbp), %eax
cmpl -0x178(%rbp), %eax
jae 0xaee073
movl -0x190(%rbp), %eax
movzbl -0x130(%rbp,%rax), %eax
movl %eax, -0x194(%rbp)
movl -0x174(%rbp), %eax
addl $0x1, %eax
subl -0x194(%rbp), %eax
movzbl %al, %eax
cmpl $0x0, -0x194(%rbp)
setne %cl
andb $0x1, %cl
movzbl %cl, %edx
xorl %ecx, %ecx
subl %edx, %ecx
andl %ecx, %eax
movb %al, %dl
movq -0x10(%rbp), %rax
movl -0x190(%rbp), %ecx
movb %dl, 0x2(%rax,%rcx,4)
movl -0x190(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x190(%rbp)
jmp 0xaee008
leaq -0x1b0(%rbp), %rdi
xorl %esi, %esi
movl $0x1c, %edx
callq 0x3b780
leaq -0x1d0(%rbp), %rdi
xorl %esi, %esi
movl $0x1c, %edx
callq 0x3b780
movl $0x0, -0x1d4(%rbp)
movl -0x1d4(%rbp), %eax
cmpl -0x178(%rbp), %eax
jae 0xaee0e5
movq -0x10(%rbp), %rax
movl -0x1d4(%rbp), %ecx
movzbl 0x2(%rax,%rcx,4), %eax
movw -0x1b0(%rbp,%rax,2), %cx
addw $0x1, %cx
movw %cx, -0x1b0(%rbp,%rax,2)
movl -0x1d4(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x1d4(%rbp)
jmp 0xaee0a3
movl -0x174(%rbp), %eax
addl $0x1, %eax
movl %eax, %eax
movw $0x0, -0x1d0(%rbp,%rax,2)
movw $0x0, -0x1d6(%rbp)
movl -0x174(%rbp), %eax
movl %eax, -0x1dc(%rbp)
cmpl $0x0, -0x1dc(%rbp)
jbe 0xaee16c
movw -0x1d6(%rbp), %cx
movl -0x1dc(%rbp), %eax
movw %cx, -0x1d0(%rbp,%rax,2)
movl -0x1dc(%rbp), %eax
movzwl -0x1b0(%rbp,%rax,2), %ecx
movzwl -0x1d6(%rbp), %eax
addl %ecx, %eax
movw %ax, -0x1d6(%rbp)
movzwl -0x1d6(%rbp), %eax
sarl %eax
movw %ax, -0x1d6(%rbp)
movl -0x1dc(%rbp), %eax
addl $-0x1, %eax
movl %eax, -0x1dc(%rbp)
jmp 0xaee10f
movl $0x0, -0x1e0(%rbp)
movl -0x1e0(%rbp), %eax
cmpl -0x178(%rbp), %eax
jae 0xaee1c9
movq -0x10(%rbp), %rax
movl -0x1e0(%rbp), %ecx
movzbl 0x2(%rax,%rcx,4), %eax
movw -0x1d0(%rbp,%rax,2), %dx
movw %dx, %cx
addw $0x1, %cx
movw %cx, -0x1d0(%rbp,%rax,2)
movq -0x10(%rbp), %rax
movl -0x1e0(%rbp), %ecx
movw %dx, (%rax,%rcx,4)
movl -0x1e0(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x1e0(%rbp)
jmp 0xaee176
movl -0x178(%rbp), %ecx
subl $0x1, %ecx
movq -0x18(%rbp), %rax
movl %ecx, (%rax)
movq -0x180(%rbp), %rax
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x1f0, %rsp # imm = 0x1F0
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/huf_compress.c
|
HUF_sort
|
static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition)
{
int n;
int const maxSymbolValue1 = (int)maxSymbolValue + 1;
/* Compute base and set curr to base.
* For symbol s let lowerRank = BIT_highbit32(count[n]+1) and rank = lowerRank + 1.
* Then 2^lowerRank <= count[n]+1 <= 2^rank.
* We attribute each symbol to lowerRank's base value, because we want to know where
* each rank begins in the output, so for rank R we want to count ranks R+1 and above.
*/
ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
for (n = 0; n < maxSymbolValue1; ++n) {
U32 lowerRank = BIT_highbit32(count[n] + 1);
rankPosition[lowerRank].base++;
}
assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
rankPosition[n-1].base += rankPosition[n].base;
rankPosition[n-1].curr = rankPosition[n-1].base;
}
/* Sort */
for (n = 0; n < maxSymbolValue1; ++n) {
U32 const c = count[n];
U32 const r = BIT_highbit32(c+1) + 1;
U32 pos = rankPosition[r].curr++;
/* Insert into the correct position in the rank.
* We have at most 256 symbols, so this insertion should be fine.
*/
while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) {
huffNode[pos] = huffNode[pos-1];
pos--;
}
huffNode[pos].count = c;
huffNode[pos].byte = (BYTE)n;
}
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x40, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movl %edx, -0x14(%rbp)
movq %rcx, -0x20(%rbp)
movl -0x14(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x28(%rbp)
movq -0x20(%rbp), %rdi
xorl %esi, %esi
movl $0x100, %edx # imm = 0x100
callq 0x3b780
movl $0x0, -0x24(%rbp)
movl -0x24(%rbp), %eax
cmpl -0x28(%rbp), %eax
jge 0xaee3a0
movq -0x10(%rbp), %rax
movslq -0x24(%rbp), %rcx
movl (%rax,%rcx,4), %edi
addl $0x1, %edi
callq 0xaefc60
movl %eax, -0x2c(%rbp)
movq -0x20(%rbp), %rax
movl -0x2c(%rbp), %ecx
movl (%rax,%rcx,8), %edx
addl $0x1, %edx
movl %edx, (%rax,%rcx,8)
movl -0x24(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x24(%rbp)
jmp 0xaee367
movl $0x1f, -0x24(%rbp)
cmpl $0x0, -0x24(%rbp)
jle 0xaee3f7
movq -0x20(%rbp), %rax
movslq -0x24(%rbp), %rcx
movl (%rax,%rcx,8), %edx
movq -0x20(%rbp), %rax
movl -0x24(%rbp), %ecx
subl $0x1, %ecx
movslq %ecx, %rcx
addl (%rax,%rcx,8), %edx
movl %edx, (%rax,%rcx,8)
movq -0x20(%rbp), %rax
movl -0x24(%rbp), %ecx
subl $0x1, %ecx
movslq %ecx, %rcx
movl (%rax,%rcx,8), %edx
movq -0x20(%rbp), %rax
movl -0x24(%rbp), %ecx
subl $0x1, %ecx
movslq %ecx, %rcx
movl %edx, 0x4(%rax,%rcx,8)
movl -0x24(%rbp), %eax
addl $-0x1, %eax
movl %eax, -0x24(%rbp)
jmp 0xaee3a7
movl $0x0, -0x24(%rbp)
movl -0x24(%rbp), %eax
cmpl -0x28(%rbp), %eax
jge 0xaee4ca
movq -0x10(%rbp), %rax
movslq -0x24(%rbp), %rcx
movl (%rax,%rcx,4), %eax
movl %eax, -0x30(%rbp)
movl -0x30(%rbp), %edi
addl $0x1, %edi
callq 0xaefc60
addl $0x1, %eax
movl %eax, -0x34(%rbp)
movq -0x20(%rbp), %rcx
movl -0x34(%rbp), %eax
movl %eax, %edx
movl 0x4(%rcx,%rdx,8), %eax
movl %eax, %esi
addl $0x1, %esi
movl %esi, 0x4(%rcx,%rdx,8)
movl %eax, -0x38(%rbp)
movl -0x38(%rbp), %ecx
movq -0x20(%rbp), %rdx
movl -0x34(%rbp), %eax
movl %eax, %esi
xorl %eax, %eax
cmpl (%rdx,%rsi,8), %ecx
movb %al, -0x39(%rbp)
jbe 0xaee470
movl -0x30(%rbp), %eax
movq -0x8(%rbp), %rcx
movl -0x38(%rbp), %edx
subl $0x1, %edx
movl %edx, %edx
cmpl (%rcx,%rdx,8), %eax
seta %al
movb %al, -0x39(%rbp)
movb -0x39(%rbp), %al
testb $0x1, %al
jne 0xaee479
jmp 0xaee49f
movq -0x8(%rbp), %rax
movl -0x38(%rbp), %ecx
movq -0x8(%rbp), %rdx
movl -0x38(%rbp), %esi
subl $0x1, %esi
movl %esi, %esi
movq (%rdx,%rsi,8), %rdx
movq %rdx, (%rax,%rcx,8)
movl -0x38(%rbp), %eax
addl $-0x1, %eax
movl %eax, -0x38(%rbp)
jmp 0xaee442
movl -0x30(%rbp), %edx
movq -0x8(%rbp), %rax
movl -0x38(%rbp), %ecx
movl %edx, (%rax,%rcx,8)
movl -0x24(%rbp), %eax
movb %al, %dl
movq -0x8(%rbp), %rax
movl -0x38(%rbp), %ecx
movb %dl, 0x6(%rax,%rcx,8)
movl -0x24(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x24(%rbp)
jmp 0xaee3fe
addq $0x40, %rsp
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/huf_compress.c
|
HUF_compress_internal
|
static size_t
HUF_compress_internal (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
HUF_nbStreams_e nbStreams,
void* workSpace_align4, size_t wkspSize,
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
const int bmi2)
{
HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace_align4;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE);
assert(((size_t)workSpace_align4 & 3) == 0); /* must be aligned on 4-bytes boundaries */
/* checks & inits */
if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
if (!srcSize) return 0; /* Uncompressed */
if (!dstSize) return 0; /* cannot fit anything within dst budget */
if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
/* Heuristic : If old table is valid, use it for small inputs */
if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
nbStreams, oldHufTable, bmi2);
}
/* Scan input and build symbol stats */
{ CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace_align4, wkspSize) );
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
}
/* Check validity of previous table */
if ( repeat
&& *repeat == HUF_repeat_check
&& !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
*repeat = HUF_repeat_none;
}
/* Heuristic : use existing table for small inputs */
if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
nbStreams, oldHufTable, bmi2);
}
/* Build Huffman Tree */
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
{ size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
maxSymbolValue, huffLog,
&table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
CHECK_F(maxBits);
huffLog = (U32)maxBits;
/* Zero unused symbols in CTable, so we can check it for validity */
ZSTD_memset(table->CTable + (maxSymbolValue + 1), 0,
sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
}
/* Write table description header */
{ CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
&table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
/* Check if using previous huffman table is beneficial */
if (repeat && *repeat != HUF_repeat_none) {
size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
nbStreams, oldHufTable, bmi2);
} }
/* Use the new huffman table */
if (hSize + 12ul >= srcSize) { return 0; }
op += hSize;
if (repeat) { *repeat = HUF_repeat_none; }
if (oldHufTable)
ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
}
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
nbStreams, table->CTable, bmi2);
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x90, %rsp
movl 0x40(%rbp), %eax
movl 0x38(%rbp), %eax
movq 0x30(%rbp), %rax
movq 0x28(%rbp), %rax
movq 0x20(%rbp), %rax
movq 0x18(%rbp), %rax
movl 0x10(%rbp), %eax
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movq %rcx, -0x28(%rbp)
movl %r8d, -0x2c(%rbp)
movl %r9d, -0x30(%rbp)
movq 0x18(%rbp), %rax
movq %rax, -0x38(%rbp)
movq -0x10(%rbp), %rax
movq %rax, -0x40(%rbp)
movq -0x40(%rbp), %rax
addq -0x18(%rbp), %rax
movq %rax, -0x48(%rbp)
movq -0x40(%rbp), %rax
movq %rax, -0x50(%rbp)
cmpq $0x1900, 0x20(%rbp) # imm = 0x1900
jae 0xaef537
movq $-0x42, -0x8(%rbp)
jmp 0xaef919
cmpq $0x0, -0x28(%rbp)
jne 0xaef54b
movq $0x0, -0x8(%rbp)
jmp 0xaef919
cmpq $0x0, -0x18(%rbp)
jne 0xaef55f
movq $0x0, -0x8(%rbp)
jmp 0xaef919
cmpq $0x20000, -0x28(%rbp) # imm = 0x20000
jbe 0xaef576
movq $-0x48, -0x8(%rbp)
jmp 0xaef919
cmpl $0xc, -0x30(%rbp)
jbe 0xaef589
movq $-0x2c, -0x8(%rbp)
jmp 0xaef919
cmpl $0xff, -0x2c(%rbp)
jbe 0xaef59f
movq $-0x2e, -0x8(%rbp)
jmp 0xaef919
cmpl $0x0, -0x2c(%rbp)
jne 0xaef5ac
movl $0xff, -0x2c(%rbp)
cmpl $0x0, -0x30(%rbp)
jne 0xaef5b9
movl $0xb, -0x30(%rbp)
cmpl $0x0, 0x38(%rbp)
je 0xaef604
cmpq $0x0, 0x30(%rbp)
je 0xaef604
movq 0x30(%rbp), %rax
cmpl $0x2, (%rax)
jne 0xaef604
movq -0x40(%rbp), %rdi
movq -0x50(%rbp), %rsi
movq -0x48(%rbp), %rdx
movq -0x20(%rbp), %rcx
movq -0x28(%rbp), %r8
movl 0x10(%rbp), %r9d
movq 0x28(%rbp), %r10
movl 0x40(%rbp), %eax
movq %r10, (%rsp)
movl %eax, 0x8(%rsp)
callq 0xaf0050
movq %rax, -0x8(%rbp)
jmp 0xaef919
movq -0x38(%rbp), %rdi
movq -0x20(%rbp), %rdx
movq -0x28(%rbp), %rcx
movq 0x18(%rbp), %r8
movq 0x20(%rbp), %r9
leaq -0x2c(%rbp), %rsi
callq 0xaed820
movq %rax, -0x58(%rbp)
movq -0x58(%rbp), %rdi
callq 0xaede40
cmpl $0x0, %eax
je 0xaef640
movq -0x58(%rbp), %rax
movq %rax, -0x8(%rbp)
jmp 0xaef919
movq -0x58(%rbp), %rax
cmpq -0x28(%rbp), %rax
jne 0xaef663
movq -0x20(%rbp), %rax
movb (%rax), %cl
movq -0x40(%rbp), %rax
movb %cl, (%rax)
movq $0x1, -0x8(%rbp)
jmp 0xaef919
movq -0x58(%rbp), %rax
movq -0x28(%rbp), %rcx
shrq $0x7, %rcx
addq $0x4, %rcx
cmpq %rcx, %rax
ja 0xaef685
movq $0x0, -0x8(%rbp)
jmp 0xaef919
cmpq $0x0, 0x30(%rbp)
je 0xaef6b4
movq 0x30(%rbp), %rax
cmpl $0x1, (%rax)
jne 0xaef6b4
movq 0x28(%rbp), %rdi
movq -0x38(%rbp), %rsi
movl -0x2c(%rbp), %edx
callq 0xaeec30
cmpl $0x0, %eax
jne 0xaef6b4
movq 0x30(%rbp), %rax
movl $0x0, (%rax)
cmpl $0x0, 0x38(%rbp)
je 0xaef6ff
cmpq $0x0, 0x30(%rbp)
je 0xaef6ff
movq 0x30(%rbp), %rax
cmpl $0x0, (%rax)
je 0xaef6ff
movq -0x40(%rbp), %rdi
movq -0x50(%rbp), %rsi
movq -0x48(%rbp), %rdx
movq -0x20(%rbp), %rcx
movq -0x28(%rbp), %r8
movl 0x10(%rbp), %r9d
movq 0x28(%rbp), %r10
movl 0x40(%rbp), %eax
movq %r10, (%rsp)
movl %eax, 0x8(%rsp)
callq 0xaf0050
movq %rax, -0x8(%rbp)
jmp 0xaef919
movl -0x30(%rbp), %edi
movq -0x28(%rbp), %rsi
movl -0x2c(%rbp), %edx
callq 0xaed990
movl %eax, -0x30(%rbp)
movq -0x38(%rbp), %rdi
addq $0x400, %rdi # imm = 0x400
movq -0x38(%rbp), %rsi
movl -0x2c(%rbp), %edx
movl -0x30(%rbp), %ecx
movq -0x38(%rbp), %r8
addq $0x800, %r8 # imm = 0x800
movl $0x1100, %r9d # imm = 0x1100
callq 0xaee220
movq %rax, -0x60(%rbp)
movq -0x60(%rbp), %rax
movq %rax, -0x68(%rbp)
movq -0x68(%rbp), %rdi
callq 0xaede40
cmpl $0x0, %eax
je 0xaef763
movq -0x68(%rbp), %rax
movq %rax, -0x8(%rbp)
jmp 0xaef919
movq -0x60(%rbp), %rax
movl %eax, -0x30(%rbp)
movq -0x38(%rbp), %rdi
addq $0x400, %rdi # imm = 0x400
movl -0x2c(%rbp), %eax
addl $0x1, %eax
movl %eax, %eax
shlq $0x2, %rax
addq %rax, %rdi
movl -0x2c(%rbp), %eax
addl $0x1, %eax
movl %eax, %eax
shlq $0x2, %rax
movl $0x400, %edx # imm = 0x400
subq %rax, %rdx
xorl %esi, %esi
callq 0x3b780
movq -0x50(%rbp), %rdi
movq -0x18(%rbp), %rsi
movq -0x38(%rbp), %rdx
addq $0x400, %rdx # imm = 0x400
movl -0x2c(%rbp), %ecx
movl -0x30(%rbp), %r8d
movq -0x38(%rbp), %r9
addq $0x800, %r9 # imm = 0x800
movq $0x2c0, (%rsp) # imm = 0x2C0
callq 0xaed9c0
movq %rax, -0x70(%rbp)
movq -0x70(%rbp), %rdi
callq 0xaede40
cmpl $0x0, %eax
je 0xaef7f0
movq -0x70(%rbp), %rax
movq %rax, -0x8(%rbp)
jmp 0xaef919
cmpq $0x0, 0x30(%rbp)
je 0xaef88d
movq 0x30(%rbp), %rax
cmpl $0x0, (%rax)
je 0xaef88d
movq 0x28(%rbp), %rdi
movq -0x38(%rbp), %rsi
movl -0x2c(%rbp), %edx
callq 0xaeebd0
movq %rax, -0x78(%rbp)
movq -0x38(%rbp), %rdi
addq $0x400, %rdi # imm = 0x400
movq -0x38(%rbp), %rsi
movl -0x2c(%rbp), %edx
callq 0xaeebd0
movq %rax, -0x80(%rbp)
movq -0x78(%rbp), %rax
movq -0x70(%rbp), %rcx
addq -0x80(%rbp), %rcx
cmpq %rcx, %rax
jbe 0xaef856
movq -0x70(%rbp), %rax
addq $0xc, %rax
cmpq -0x28(%rbp), %rax
jb 0xaef88b
movq -0x40(%rbp), %rdi
movq -0x50(%rbp), %rsi
movq -0x48(%rbp), %rdx
movq -0x20(%rbp), %rcx
movq -0x28(%rbp), %r8
movl 0x10(%rbp), %r9d
movq 0x28(%rbp), %r10
movl 0x40(%rbp), %eax
movq %r10, (%rsp)
movl %eax, 0x8(%rsp)
callq 0xaf0050
movq %rax, -0x8(%rbp)
jmp 0xaef919
jmp 0xaef88d
movq -0x70(%rbp), %rax
addq $0xc, %rax
cmpq -0x28(%rbp), %rax
jb 0xaef8a5
movq $0x0, -0x8(%rbp)
jmp 0xaef919
movq -0x70(%rbp), %rax
addq -0x50(%rbp), %rax
movq %rax, -0x50(%rbp)
cmpq $0x0, 0x30(%rbp)
je 0xaef8c2
movq 0x30(%rbp), %rax
movl $0x0, (%rax)
cmpq $0x0, 0x28(%rbp)
je 0xaef8e2
movq 0x28(%rbp), %rdi
movq -0x38(%rbp), %rsi
addq $0x400, %rsi # imm = 0x400
movl $0x400, %edx # imm = 0x400
callq 0x3cb70
movq -0x40(%rbp), %rdi
movq -0x50(%rbp), %rsi
movq -0x48(%rbp), %rdx
movq -0x20(%rbp), %rcx
movq -0x28(%rbp), %r8
movl 0x10(%rbp), %r9d
movq -0x38(%rbp), %r10
addq $0x400, %r10 # imm = 0x400
movl 0x40(%rbp), %eax
movq %r10, (%rsp)
movl %eax, 0x8(%rsp)
callq 0xaf0050
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x90, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/huf_compress.c
|
MEM_writeLE16
|
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
{
if (MEM_isLittleEndian()) {
MEM_write16(memPtr, val);
} else {
BYTE* p = (BYTE*)memPtr;
p[0] = (BYTE)val;
p[1] = (BYTE)(val>>8);
}
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movw %si, %ax
movq %rdi, -0x8(%rbp)
movw %ax, -0xa(%rbp)
callq 0xaeff40
cmpl $0x0, %eax
je 0xaefffc
movq -0x8(%rbp), %rdi
movzwl -0xa(%rbp), %esi
callq 0xaf0030
jmp 0xaf0020
movq -0x8(%rbp), %rax
movq %rax, -0x18(%rbp)
movw -0xa(%rbp), %ax
movb %al, %cl
movq -0x18(%rbp), %rax
movb %cl, (%rax)
movzwl -0xa(%rbp), %eax
sarl $0x8, %eax
movb %al, %cl
movq -0x18(%rbp), %rax
movb %cl, 0x1(%rax)
addq $0x20, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzstd/lib/compress/../common/mem.h
|
uv__tcp_keepalive
|
int uv__tcp_keepalive(int fd, int on, unsigned int delay) {
if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on)))
return UV__ERR(errno);
#ifdef TCP_KEEPIDLE
if (on) {
int intvl = 1; /* 1 second; same as default on Win32 */
int cnt = 10; /* 10 retries; same as hardcoded on Win32 */
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &delay, sizeof(delay)))
return UV__ERR(errno);
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &intvl, sizeof(intvl)))
return UV__ERR(errno);
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &cnt, sizeof(cnt)))
return UV__ERR(errno);
}
#endif
/* Solaris/SmartOS, if you don't support keep-alive,
* then don't advertise it in your system headers...
*/
/* FIXME(bnoordhuis) That's possibly because sizeof(delay) should be 1. */
#if defined(TCP_KEEPALIVE) && !defined(__sun)
if (on && setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &delay, sizeof(delay)))
return UV__ERR(errno);
#endif
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movl %edi, -0x8(%rbp)
movl %esi, -0xc(%rbp)
movl %edx, -0x10(%rbp)
movl -0x8(%rbp), %edi
movl $0x1, %esi
movl $0x9, %edx
leaq -0xc(%rbp), %rcx
movl $0x4, %r8d
callq 0x3ccf0
cmpl $0x0, %eax
je 0xb20c06
callq 0x3e130
movq %rax, %rcx
xorl %eax, %eax
subl (%rcx), %eax
movl %eax, -0x4(%rbp)
jmp 0xb20cb8
cmpl $0x0, -0xc(%rbp)
je 0xb20cb1
movl $0x1, -0x14(%rbp)
movl $0xa, -0x18(%rbp)
movl -0x8(%rbp), %edi
movl $0x6, %esi
movl $0x4, %r8d
leaq -0x10(%rbp), %rcx
movl %r8d, %edx
callq 0x3ccf0
cmpl $0x0, %eax
je 0xb20c4e
callq 0x3e130
movq %rax, %rcx
xorl %eax, %eax
subl (%rcx), %eax
movl %eax, -0x4(%rbp)
jmp 0xb20cb8
movl -0x8(%rbp), %edi
movl $0x6, %esi
movl $0x5, %edx
leaq -0x14(%rbp), %rcx
movl $0x4, %r8d
callq 0x3ccf0
cmpl $0x0, %eax
je 0xb20c80
callq 0x3e130
movq %rax, %rcx
xorl %eax, %eax
subl (%rcx), %eax
movl %eax, -0x4(%rbp)
jmp 0xb20cb8
movl -0x8(%rbp), %edi
movl $0x6, %edx
leaq -0x18(%rbp), %rcx
movl $0x4, %r8d
movl %edx, %esi
callq 0x3ccf0
cmpl $0x0, %eax
je 0xb20caf
callq 0x3e130
movq %rax, %rcx
xorl %eax, %eax
subl (%rcx), %eax
movl %eax, -0x4(%rbp)
jmp 0xb20cb8
jmp 0xb20cb1
movl $0x0, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x20, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/tcp.c
|
uv_cond_timedwait
|
int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
int r;
struct timespec ts;
#if defined(__MVS__)
struct timeval tv;
#endif
#if defined(__APPLE__) && defined(__MACH__)
ts.tv_sec = timeout / NANOSEC;
ts.tv_nsec = timeout % NANOSEC;
r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
#else
#if defined(__MVS__)
if (gettimeofday(&tv, NULL))
abort();
timeout += tv.tv_sec * NANOSEC + tv.tv_usec * 1e3;
#else
timeout += uv__hrtime(UV_CLOCK_PRECISE);
#endif
ts.tv_sec = timeout / NANOSEC;
ts.tv_nsec = timeout % NANOSEC;
r = pthread_cond_timedwait(cond, mutex, &ts);
#endif
if (r == 0)
return 0;
if (r == ETIMEDOUT)
return UV_ETIMEDOUT;
abort();
#ifndef __SUNPRO_C
return UV_EINVAL; /* Satisfy the compiler. */
#endif
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x40, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
xorl %edi, %edi
callq 0xb263d0
addq -0x20(%rbp), %rax
movq %rax, -0x20(%rbp)
movq -0x20(%rbp), %rax
movl $0x3b9aca00, %ecx # imm = 0x3B9ACA00
xorl %edx, %edx
divq %rcx
movq %rax, -0x38(%rbp)
movq -0x20(%rbp), %rax
movl $0x3b9aca00, %ecx # imm = 0x3B9ACA00
xorl %edx, %edx
divq %rcx
movq %rdx, -0x30(%rbp)
movq -0x10(%rbp), %rdi
movq -0x18(%rbp), %rsi
leaq -0x38(%rbp), %rdx
callq 0x3dc20
movl %eax, -0x24(%rbp)
cmpl $0x0, -0x24(%rbp)
jne 0xb21e9a
movl $0x0, -0x4(%rbp)
jmp 0xb21eae
cmpl $0x6e, -0x24(%rbp)
jne 0xb21ea9
movl $0xffffff92, -0x4(%rbp) # imm = 0xFFFFFF92
jmp 0xb21eae
callq 0x3da50
movl -0x4(%rbp), %eax
addq $0x40, %rsp
popq %rbp
retq
nopw (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/thread.c
|
uv_tty_get_winsize
|
int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) {
struct winsize ws;
int err;
do
err = ioctl(uv__stream_fd(tty), TIOCGWINSZ, &ws);
while (err == -1 && errno == EINTR);
if (err == -1)
return UV__ERR(errno);
*width = ws.ws_col;
*height = ws.ws_row;
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x30, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movq -0x10(%rbp), %rax
movl 0xb8(%rax), %edi
movl $0x5413, %esi # imm = 0x5413
leaq -0x28(%rbp), %rdx
movb $0x0, %al
callq 0x3b2f0
movl %eax, -0x2c(%rbp)
xorl %eax, %eax
cmpl $-0x1, -0x2c(%rbp)
movb %al, -0x2d(%rbp)
jne 0xb2265a
callq 0x3e130
cmpl $0x4, (%rax)
sete %al
movb %al, -0x2d(%rbp)
movb -0x2d(%rbp), %al
testb $0x1, %al
jne 0xb22624
cmpl $-0x1, -0x2c(%rbp)
jne 0xb22678
callq 0x3e130
movq %rax, %rcx
xorl %eax, %eax
subl (%rcx), %eax
movl %eax, -0x4(%rbp)
jmp 0xb22693
movzwl -0x26(%rbp), %ecx
movq -0x18(%rbp), %rax
movl %ecx, (%rax)
movzwl -0x28(%rbp), %ecx
movq -0x20(%rbp), %rax
movl %ecx, (%rax)
movl $0x0, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x30, %rsp
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/tty.c
|
cmpxchgi
|
UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) {
#if defined(__i386__) || defined(__x86_64__)
int out;
__asm__ __volatile__ ("lock; cmpxchg %2, %1;"
: "=a" (out), "+m" (*(volatile int*) ptr)
: "r" (newval), "0" (oldval)
: "memory");
return out;
#elif defined(_AIX) && defined(__ibmxl__)
/* FIXME: This is not actually atomic but XLClang 16.1 for AIX
does not provide __sync_val_compare_and_swap or an equivalent.
Its documentation suggests using C++11 atomics but this is C. */
__compare_and_swap((volatile int*)ptr, &oldval, newval);
return oldval;
#elif defined(__MVS__)
/* Use hand-rolled assembly because codegen from builtin __plo_CSST results in
* a runtime bug.
*/
__asm(" cs %0,%2,%1 \n " : "+r"(oldval), "+m"(*ptr) : "r"(newval) :);
return oldval;
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
return atomic_cas_uint((uint_t *)ptr, (uint_t)oldval, (uint_t)newval);
#else
return __sync_val_compare_and_swap(ptr, oldval, newval);
#endif
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x8(%rbp)
movl %esi, -0xc(%rbp)
movl %edx, -0x10(%rbp)
movq -0x8(%rbp), %rcx
movl -0x10(%rbp), %edx
movl -0xc(%rbp), %eax
lock
cmpxchgl %edx, (%rcx)
movl %eax, -0x14(%rbp)
movl -0x14(%rbp), %eax
popq %rbp
retq
nopw %cs:(%rax,%rax)
nop
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/atomic-ops.h
|
read_models
|
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
#if defined(__PPC__)
static const char model_marker[] = "cpu\t\t: ";
static const char speed_marker[] = "clock\t\t: ";
#else
static const char model_marker[] = "model name\t: ";
static const char speed_marker[] = "cpu MHz\t\t: ";
#endif
const char* inferred_model;
unsigned int model_idx;
unsigned int speed_idx;
unsigned int part_idx;
char buf[1024];
char* model;
FILE* fp;
int model_id;
/* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
(void) &model_marker;
(void) &speed_marker;
(void) &speed_idx;
(void) &part_idx;
(void) &model;
(void) &buf;
(void) &fp;
(void) &model_id;
model_idx = 0;
speed_idx = 0;
part_idx = 0;
#if defined(__arm__) || \
defined(__i386__) || \
defined(__mips__) || \
defined(__aarch64__) || \
defined(__PPC__) || \
defined(__x86_64__)
fp = uv__open_file("/proc/cpuinfo");
if (fp == NULL)
return UV__ERR(errno);
while (fgets(buf, sizeof(buf), fp)) {
if (model_idx < numcpus) {
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
model = buf + sizeof(model_marker) - 1;
model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
if (model == NULL) {
fclose(fp);
return UV_ENOMEM;
}
ci[model_idx++].model = model;
continue;
}
}
#if defined(__arm__) || defined(__mips__) || defined(__aarch64__)
if (model_idx < numcpus) {
#if defined(__arm__)
/* Fallback for pre-3.8 kernels. */
static const char model_marker[] = "Processor\t: ";
#elif defined(__aarch64__)
static const char part_marker[] = "CPU part\t: ";
/* Adapted from: https://github.com/karelzak/util-linux */
struct vendor_part {
const int id;
const char* name;
};
static const struct vendor_part arm_chips[] = {
{ 0x811, "ARM810" },
{ 0x920, "ARM920" },
{ 0x922, "ARM922" },
{ 0x926, "ARM926" },
{ 0x940, "ARM940" },
{ 0x946, "ARM946" },
{ 0x966, "ARM966" },
{ 0xa20, "ARM1020" },
{ 0xa22, "ARM1022" },
{ 0xa26, "ARM1026" },
{ 0xb02, "ARM11 MPCore" },
{ 0xb36, "ARM1136" },
{ 0xb56, "ARM1156" },
{ 0xb76, "ARM1176" },
{ 0xc05, "Cortex-A5" },
{ 0xc07, "Cortex-A7" },
{ 0xc08, "Cortex-A8" },
{ 0xc09, "Cortex-A9" },
{ 0xc0d, "Cortex-A17" }, /* Originally A12 */
{ 0xc0f, "Cortex-A15" },
{ 0xc0e, "Cortex-A17" },
{ 0xc14, "Cortex-R4" },
{ 0xc15, "Cortex-R5" },
{ 0xc17, "Cortex-R7" },
{ 0xc18, "Cortex-R8" },
{ 0xc20, "Cortex-M0" },
{ 0xc21, "Cortex-M1" },
{ 0xc23, "Cortex-M3" },
{ 0xc24, "Cortex-M4" },
{ 0xc27, "Cortex-M7" },
{ 0xc60, "Cortex-M0+" },
{ 0xd01, "Cortex-A32" },
{ 0xd03, "Cortex-A53" },
{ 0xd04, "Cortex-A35" },
{ 0xd05, "Cortex-A55" },
{ 0xd06, "Cortex-A65" },
{ 0xd07, "Cortex-A57" },
{ 0xd08, "Cortex-A72" },
{ 0xd09, "Cortex-A73" },
{ 0xd0a, "Cortex-A75" },
{ 0xd0b, "Cortex-A76" },
{ 0xd0c, "Neoverse-N1" },
{ 0xd0d, "Cortex-A77" },
{ 0xd0e, "Cortex-A76AE" },
{ 0xd13, "Cortex-R52" },
{ 0xd20, "Cortex-M23" },
{ 0xd21, "Cortex-M33" },
{ 0xd41, "Cortex-A78" },
{ 0xd42, "Cortex-A78AE" },
{ 0xd4a, "Neoverse-E1" },
{ 0xd4b, "Cortex-A78C" },
};
if (strncmp(buf, part_marker, sizeof(part_marker) - 1) == 0) {
model = buf + sizeof(part_marker) - 1;
errno = 0;
model_id = strtol(model, NULL, 16);
if ((errno != 0) || model_id < 0) {
fclose(fp);
return UV_EINVAL;
}
for (part_idx = 0; part_idx < ARRAY_SIZE(arm_chips); part_idx++) {
if (model_id == arm_chips[part_idx].id) {
model = uv__strdup(arm_chips[part_idx].name);
if (model == NULL) {
fclose(fp);
return UV_ENOMEM;
}
ci[model_idx++].model = model;
break;
}
}
}
#else /* defined(__mips__) */
static const char model_marker[] = "cpu model\t\t: ";
#endif
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
model = buf + sizeof(model_marker) - 1;
model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
if (model == NULL) {
fclose(fp);
return UV_ENOMEM;
}
ci[model_idx++].model = model;
continue;
}
}
#else /* !__arm__ && !__mips__ && !__aarch64__ */
if (speed_idx < numcpus) {
if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
continue;
}
}
#endif /* __arm__ || __mips__ || __aarch64__ */
}
fclose(fp);
#endif /* __arm__ || __i386__ || __mips__ || __PPC__ || __x86_64__ || __aarch__ */
/* Now we want to make sure that all the models contain *something* because
* it's not safe to leave them as null. Copy the last entry unless there
* isn't one, in that case we simply put "unknown" into everything.
*/
inferred_model = "unknown";
if (model_idx > 0)
inferred_model = ci[model_idx - 1].model;
while (model_idx < numcpus) {
model = uv__strndup(inferred_model, strlen(inferred_model));
if (model == NULL)
return UV_ENOMEM;
ci[model_idx++].model = model;
}
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x460, %rsp # imm = 0x460
movl %edi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movl $0x0, -0x1c(%rbp)
movl $0x0, -0x20(%rbp)
movl $0x0, -0x24(%rbp)
leaq 0x7869b(%rip), %rdi # 0xb9f079
callq 0xb0fbb0
movq %rax, -0x440(%rbp)
cmpq $0x0, -0x440(%rbp)
jne 0xb26a08
callq 0x3e130
movq %rax, %rcx
xorl %eax, %eax
subl (%rcx), %eax
movl %eax, -0x4(%rbp)
jmp 0xb26bf9
jmp 0xb26a0a
leaq -0x430(%rbp), %rdi
movq -0x440(%rbp), %rdx
movl $0x400, %esi # imm = 0x400
callq 0x3d770
cmpq $0x0, %rax
je 0xb26b50
movl -0x1c(%rbp), %eax
cmpl -0x8(%rbp), %eax
jae 0xb26aee
leaq -0x430(%rbp), %rdi
leaq 0xb73ef(%rip), %rsi # 0xbdde35
movl $0xd, %edx
callq 0x3ce80
cmpl $0x0, %eax
jne 0xb26aec
leaq -0x430(%rbp), %rax
addq $0xe, %rax
addq $-0x1, %rax
movq %rax, -0x438(%rbp)
movq -0x438(%rbp), %rax
movq %rax, -0x450(%rbp)
movq -0x438(%rbp), %rdi
callq 0x3b8f0
movq -0x450(%rbp), %rdi
movq %rax, %rsi
subq $0x1, %rsi
callq 0xb09dd0
movq %rax, -0x438(%rbp)
cmpq $0x0, -0x438(%rbp)
jne 0xb26ac5
movq -0x440(%rbp), %rdi
callq 0x3b980
movl $0xfffffff4, -0x4(%rbp) # imm = 0xFFFFFFF4
jmp 0xb26bf9
movq -0x438(%rbp), %rcx
movq -0x10(%rbp), %rax
movl -0x1c(%rbp), %edx
movl %edx, %esi
addl $0x1, %esi
movl %esi, -0x1c(%rbp)
movl %edx, %edx
imulq $0x38, %rdx, %rdx
addq %rdx, %rax
movq %rcx, (%rax)
jmp 0xb26a0a
jmp 0xb26aee
movl -0x20(%rbp), %eax
cmpl -0x8(%rbp), %eax
jae 0xb26b4b
leaq -0x430(%rbp), %rdi
leaq 0xb733f(%rip), %rsi # 0xbdde43
movl $0xb, %edx
callq 0x3ce80
cmpl $0x0, %eax
jne 0xb26b49
leaq -0x430(%rbp), %rdi
addq $0xc, %rdi
addq $-0x1, %rdi
callq 0x3b740
movl %eax, %ecx
movq -0x10(%rbp), %rax
movl -0x20(%rbp), %edx
movl %edx, %esi
addl $0x1, %esi
movl %esi, -0x20(%rbp)
movl %edx, %edx
imulq $0x38, %rdx, %rdx
addq %rdx, %rax
movl %ecx, 0x8(%rax)
jmp 0xb26a0a
jmp 0xb26b4b
jmp 0xb26a0a
movq -0x440(%rbp), %rdi
callq 0x3b980
leaq 0x816ac(%rip), %rax # 0xba820f
movq %rax, -0x18(%rbp)
cmpl $0x0, -0x1c(%rbp)
jbe 0xb26b87
movq -0x10(%rbp), %rax
movl -0x1c(%rbp), %ecx
subl $0x1, %ecx
movl %ecx, %ecx
imulq $0x38, %rcx, %rcx
addq %rcx, %rax
movq (%rax), %rax
movq %rax, -0x18(%rbp)
jmp 0xb26b89
movl -0x1c(%rbp), %eax
cmpl -0x8(%rbp), %eax
jae 0xb26bf2
movq -0x18(%rbp), %rax
movq %rax, -0x458(%rbp)
movq -0x18(%rbp), %rdi
callq 0x3b8f0
movq -0x458(%rbp), %rdi
movq %rax, %rsi
callq 0xb09dd0
movq %rax, -0x438(%rbp)
cmpq $0x0, -0x438(%rbp)
jne 0xb26bce
movl $0xfffffff4, -0x4(%rbp) # imm = 0xFFFFFFF4
jmp 0xb26bf9
movq -0x438(%rbp), %rcx
movq -0x10(%rbp), %rax
movl -0x1c(%rbp), %edx
movl %edx, %esi
addl $0x1, %esi
movl %esi, -0x1c(%rbp)
movl %edx, %edx
imulq $0x38, %rdx, %rdx
addq %rdx, %rax
movq %rcx, (%rax)
jmp 0xb26b89
movl $0x0, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x460, %rsp # imm = 0x460
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/linux-core.c
|
uv_interface_addresses
|
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
#ifndef HAVE_IFADDRS_H
*count = 0;
*addresses = NULL;
return UV_ENOSYS;
#else
struct ifaddrs *addrs, *ent;
uv_interface_address_t* address;
int i;
struct sockaddr_ll *sll;
*count = 0;
*addresses = NULL;
if (getifaddrs(&addrs))
return UV__ERR(errno);
/* Count the number of interfaces */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
continue;
(*count)++;
}
if (*count == 0) {
freeifaddrs(addrs);
return 0;
}
/* Make sure the memory is initiallized to zero using calloc() */
*addresses = uv__calloc(*count, sizeof(**addresses));
if (!(*addresses)) {
freeifaddrs(addrs);
return UV_ENOMEM;
}
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
continue;
address->name = uv__strdup(ent->ifa_name);
if (ent->ifa_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
} else {
address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
}
if (ent->ifa_netmask->sa_family == AF_INET6) {
address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
} else {
address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
}
address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
address++;
}
/* Fill in physical addresses for each interface */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))
continue;
address = *addresses;
for (i = 0; i < (*count); i++) {
size_t namelen = strlen(ent->ifa_name);
/* Alias interface share the same physical address */
if (strncmp(address->name, ent->ifa_name, namelen) == 0 &&
(address->name[namelen] == 0 || address->name[namelen] == ':')) {
sll = (struct sockaddr_ll*)ent->ifa_addr;
memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr));
}
address++;
}
}
freeifaddrs(addrs);
return 0;
#endif
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x50, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq -0x18(%rbp), %rax
movl $0x0, (%rax)
movq -0x10(%rbp), %rax
movq $0x0, (%rax)
leaq -0x20(%rbp), %rdi
callq 0x3df30
cmpl $0x0, %eax
je 0xb26f47
callq 0x3e130
movq %rax, %rcx
xorl %eax, %eax
subl (%rcx), %eax
movl %eax, -0x4(%rbp)
jmp 0xb2720d
movq -0x20(%rbp), %rax
movq %rax, -0x28(%rbp)
cmpq $0x0, -0x28(%rbp)
je 0xb26f83
movq -0x28(%rbp), %rdi
movl $0x1, %esi
callq 0xb27220
cmpl $0x0, %eax
je 0xb26f6b
jmp 0xb26f76
movq -0x18(%rbp), %rax
movl (%rax), %ecx
addl $0x1, %ecx
movl %ecx, (%rax)
movq -0x28(%rbp), %rax
movq (%rax), %rax
movq %rax, -0x28(%rbp)
jmp 0xb26f4f
movq -0x18(%rbp), %rax
cmpl $0x0, (%rax)
jne 0xb26fa1
movq -0x20(%rbp), %rdi
callq 0x3d7c0
movl $0x0, -0x4(%rbp)
jmp 0xb2720d
movq -0x18(%rbp), %rax
movslq (%rax), %rdi
movl $0x50, %esi
callq 0xb09ea0
movq %rax, %rcx
movq -0x10(%rbp), %rax
movq %rcx, (%rax)
movq -0x10(%rbp), %rax
cmpq $0x0, (%rax)
jne 0xb26fdb
movq -0x20(%rbp), %rdi
callq 0x3d7c0
movl $0xfffffff4, -0x4(%rbp) # imm = 0xFFFFFFF4
jmp 0xb2720d
movq -0x10(%rbp), %rax
movq (%rax), %rax
movq %rax, -0x30(%rbp)
movq -0x20(%rbp), %rax
movq %rax, -0x28(%rbp)
cmpq $0x0, -0x28(%rbp)
je 0xb27110
movq -0x28(%rbp), %rdi
movl $0x1, %esi
callq 0xb27220
cmpl $0x0, %eax
je 0xb27011
jmp 0xb27100
movq -0x28(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0xb09d20
movq %rax, %rcx
movq -0x30(%rbp), %rax
movq %rcx, (%rax)
movq -0x28(%rbp), %rax
movq 0x18(%rax), %rax
movzwl (%rax), %eax
cmpl $0xa, %eax
jne 0xb27063
movq -0x30(%rbp), %rax
movq -0x28(%rbp), %rcx
movq 0x18(%rcx), %rcx
movq (%rcx), %rdx
movq %rdx, 0x14(%rax)
movq 0x8(%rcx), %rdx
movq %rdx, 0x1c(%rax)
movq 0x10(%rcx), %rdx
movq %rdx, 0x24(%rax)
movl 0x18(%rcx), %ecx
movl %ecx, 0x2c(%rax)
jmp 0xb2707e
movq -0x30(%rbp), %rax
movq -0x28(%rbp), %rcx
movq 0x18(%rcx), %rcx
movq (%rcx), %rdx
movq %rdx, 0x14(%rax)
movq 0x8(%rcx), %rcx
movq %rcx, 0x1c(%rax)
movq -0x28(%rbp), %rax
movq 0x20(%rax), %rax
movzwl (%rax), %eax
cmpl $0xa, %eax
jne 0xb270b9
movq -0x30(%rbp), %rax
movq -0x28(%rbp), %rcx
movq 0x20(%rcx), %rcx
movq (%rcx), %rdx
movq %rdx, 0x30(%rax)
movq 0x8(%rcx), %rdx
movq %rdx, 0x38(%rax)
movq 0x10(%rcx), %rdx
movq %rdx, 0x40(%rax)
movl 0x18(%rcx), %ecx
movl %ecx, 0x48(%rax)
jmp 0xb270d4
movq -0x30(%rbp), %rax
movq -0x28(%rbp), %rcx
movq 0x20(%rcx), %rcx
movq (%rcx), %rdx
movq %rdx, 0x30(%rax)
movq 0x8(%rcx), %rcx
movq %rcx, 0x38(%rax)
movq -0x28(%rbp), %rax
movl 0x10(%rax), %eax
andl $0x8, %eax
cmpl $0x0, %eax
setne %al
xorb $-0x1, %al
xorb $-0x1, %al
andb $0x1, %al
movzbl %al, %ecx
movq -0x30(%rbp), %rax
movl %ecx, 0x10(%rax)
movq -0x30(%rbp), %rax
addq $0x50, %rax
movq %rax, -0x30(%rbp)
movq -0x28(%rbp), %rax
movq (%rax), %rax
movq %rax, -0x28(%rbp)
jmp 0xb26fee
movq -0x20(%rbp), %rax
movq %rax, -0x28(%rbp)
cmpq $0x0, -0x28(%rbp)
je 0xb271fd
movq -0x28(%rbp), %rdi
xorl %esi, %esi
callq 0xb27220
cmpl $0x0, %eax
je 0xb27138
jmp 0xb271ed
movq -0x10(%rbp), %rax
movq (%rax), %rax
movq %rax, -0x30(%rbp)
movl $0x0, -0x34(%rbp)
movl -0x34(%rbp), %eax
movq -0x18(%rbp), %rcx
cmpl (%rcx), %eax
jge 0xb271eb
movq -0x28(%rbp), %rax
movq 0x8(%rax), %rdi
callq 0x3b8f0
movq %rax, -0x48(%rbp)
movq -0x30(%rbp), %rax
movq (%rax), %rdi
movq -0x28(%rbp), %rax
movq 0x8(%rax), %rsi
movq -0x48(%rbp), %rdx
callq 0x3ce80
cmpl $0x0, %eax
jne 0xb271d1
movq -0x30(%rbp), %rax
movq (%rax), %rax
movq -0x48(%rbp), %rcx
movsbl (%rax,%rcx), %eax
cmpl $0x0, %eax
je 0xb271af
movq -0x30(%rbp), %rax
movq (%rax), %rax
movq -0x48(%rbp), %rcx
movsbl (%rax,%rcx), %eax
cmpl $0x3a, %eax
jne 0xb271d1
movq -0x28(%rbp), %rax
movq 0x18(%rax), %rax
movq %rax, -0x40(%rbp)
movq -0x30(%rbp), %rax
movq -0x40(%rbp), %rcx
movl 0xc(%rcx), %edx
movl %edx, 0x8(%rax)
movw 0x10(%rcx), %cx
movw %cx, 0xc(%rax)
movq -0x30(%rbp), %rax
addq $0x50, %rax
movq %rax, -0x30(%rbp)
movl -0x34(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x34(%rbp)
jmp 0xb2714a
jmp 0xb271ed
movq -0x28(%rbp), %rax
movq (%rax), %rax
movq %rax, -0x28(%rbp)
jmp 0xb27118
movq -0x20(%rbp), %rdi
callq 0x3d7c0
movl $0x0, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x50, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/linux-core.c
|
uv_fs_event_stop
|
int uv_fs_event_stop(uv_fs_event_t* handle) {
struct watcher_list* w;
if (!uv__is_active(handle))
return 0;
w = find_watcher(handle->loop, handle->wd);
assert(w != NULL);
handle->wd = -1;
handle->path = NULL;
uv__handle_stop(handle);
QUEUE_REMOVE(&handle->watchers);
maybe_free_watcher_list(w, handle->loop);
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movq %rdi, -0x10(%rbp)
movq -0x10(%rbp), %rax
movl 0x58(%rax), %eax
andl $0x4, %eax
cmpl $0x0, %eax
jne 0xb27cc7
movl $0x0, -0x4(%rbp)
jmp 0xb27da9
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
movq -0x10(%rbp), %rax
movl 0x80(%rax), %esi
callq 0xb28180
movq %rax, -0x18(%rbp)
cmpq $0x0, -0x18(%rbp)
je 0xb27ceb
jmp 0xb27d0a
leaq 0xb620c(%rip), %rdi # 0xbddefe
leaq 0xb6167(%rip), %rsi # 0xbdde60
movl $0x138, %edx # imm = 0x138
leaq 0xb6203(%rip), %rcx # 0xbddf08
callq 0x3b440
movq -0x10(%rbp), %rax
movl $0xffffffff, 0x80(%rax) # imm = 0xFFFFFFFF
movq -0x10(%rbp), %rax
movq $0x0, 0x60(%rax)
movq -0x10(%rbp), %rax
movl 0x58(%rax), %eax
andl $0x4, %eax
cmpl $0x0, %eax
jne 0xb27d35
jmp 0xb27d68
movq -0x10(%rbp), %rax
movl 0x58(%rax), %ecx
andl $-0x5, %ecx
movl %ecx, 0x58(%rax)
movq -0x10(%rbp), %rax
movl 0x58(%rax), %eax
andl $0x8, %eax
cmpl $0x0, %eax
je 0xb27d66
jmp 0xb27d53
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rax
movl 0x8(%rax), %ecx
addl $-0x1, %ecx
movl %ecx, 0x8(%rax)
jmp 0xb27d66
jmp 0xb27d68
jmp 0xb27d6a
movq -0x10(%rbp), %rax
movq 0x70(%rax), %rcx
movq -0x10(%rbp), %rax
movq 0x78(%rax), %rax
movq %rcx, (%rax)
movq -0x10(%rbp), %rax
movq 0x78(%rax), %rcx
movq -0x10(%rbp), %rax
movq 0x70(%rax), %rax
movq %rcx, 0x8(%rax)
movq -0x18(%rbp), %rdi
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rsi
callq 0xb27dc0
movl $0x0, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x20, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/linux-inotify.c
|
uv_get_process_title
|
int uv_get_process_title(char* buffer, size_t size) {
if (buffer == NULL || size == 0)
return UV_EINVAL;
/* If uv_setup_args wasn't called or failed, we can't continue. */
if (args_mem == NULL)
return UV_ENOBUFS;
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
uv_mutex_lock(&process_title_mutex);
if (size <= process_title.len) {
uv_mutex_unlock(&process_title_mutex);
return UV_ENOBUFS;
}
if (process_title.len != 0)
memcpy(buffer, process_title.str, process_title.len + 1);
buffer[process_title.len] = '\0';
uv_mutex_unlock(&process_title_mutex);
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
cmpq $0x0, -0x10(%rbp)
je 0xb299ce
cmpq $0x0, -0x18(%rbp)
jne 0xb299da
movl $0xffffffea, -0x4(%rbp) # imm = 0xFFFFFFEA
jmp 0xb29a78
cmpq $0x0, 0x3dfa2e(%rip) # 0xf09410
jne 0xb299f0
movl $0xffffff97, -0x4(%rbp) # imm = 0xFFFFFF97
jmp 0xb29a78
leaq 0x3dfa39(%rip), %rdi # 0xf09430
leaq -0x6e(%rip), %rsi # 0xb29990
callq 0xb21780
leaq 0x3dfa2e(%rip), %rdi # 0xf09438
callq 0xb21510
movq -0x18(%rbp), %rax
cmpq 0x3dfa06(%rip), %rax # 0xf09420
ja 0xb29a31
leaq 0x3dfa15(%rip), %rdi # 0xf09438
callq 0xb21590
movl $0xffffff97, -0x4(%rbp) # imm = 0xFFFFFF97
jmp 0xb29a78
cmpq $0x0, 0x3df9e7(%rip) # 0xf09420
je 0xb29a56
movq -0x10(%rbp), %rdi
movq 0x3df9d2(%rip), %rsi # 0xf09418
movq 0x3df9d3(%rip), %rdx # 0xf09420
addq $0x1, %rdx
callq 0x3cb70
movq -0x10(%rbp), %rax
movq 0x3df9bf(%rip), %rcx # 0xf09420
movb $0x0, (%rax,%rcx)
leaq 0x3df9cc(%rip), %rdi # 0xf09438
callq 0xb21590
movl $0x0, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x20, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/unix/proctitle.c
|
uv_inet_pton
|
int uv_inet_pton(int af, const char* src, void* dst) {
if (src == NULL || dst == NULL)
return UV_EINVAL;
switch (af) {
case AF_INET:
return (inet_pton4(src, dst));
case AF_INET6: {
int len;
char tmp[UV__INET6_ADDRSTRLEN], *s, *p;
s = (char*) src;
p = strchr(src, '%');
if (p != NULL) {
s = tmp;
len = p - src;
if (len > UV__INET6_ADDRSTRLEN-1)
return UV_EINVAL;
memcpy(s, src, len);
s[len] = '\0';
}
return inet_pton6(s, dst);
}
default:
return UV_EAFNOSUPPORT;
}
/* NOTREACHED */
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x70, %rsp
movl %edi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq %rdx, -0x18(%rbp)
cmpq $0x0, -0x10(%rbp)
je 0xb2a941
cmpq $0x0, -0x18(%rbp)
jne 0xb2a94d
movl $0xffffffea, -0x4(%rbp) # imm = 0xFFFFFFEA
jmp 0xb2a9f5
movl -0x8(%rbp), %eax
movl %eax, -0x64(%rbp)
subl $0x2, %eax
je 0xb2a967
jmp 0xb2a95a
movl -0x64(%rbp), %eax
subl $0xa, %eax
je 0xb2a979
jmp 0xb2a9ee
movq -0x10(%rbp), %rdi
movq -0x18(%rbp), %rsi
callq 0xb2aa00
movl %eax, -0x4(%rbp)
jmp 0xb2a9f5
movq -0x10(%rbp), %rax
movq %rax, -0x58(%rbp)
movq -0x10(%rbp), %rdi
movl $0x25, %esi
callq 0x3d450
movq %rax, -0x60(%rbp)
cmpq $0x0, -0x60(%rbp)
je 0xb2a9dc
leaq -0x50(%rbp), %rax
movq %rax, -0x58(%rbp)
movq -0x60(%rbp), %rax
movq -0x10(%rbp), %rcx
subq %rcx, %rax
movl %eax, -0x1c(%rbp)
cmpl $0x2d, -0x1c(%rbp)
jle 0xb2a9bf
movl $0xffffffea, -0x4(%rbp) # imm = 0xFFFFFFEA
jmp 0xb2a9f5
movq -0x58(%rbp), %rdi
movq -0x10(%rbp), %rsi
movslq -0x1c(%rbp), %rdx
callq 0x3cb70
movq -0x58(%rbp), %rax
movslq -0x1c(%rbp), %rcx
movb $0x0, (%rax,%rcx)
movq -0x58(%rbp), %rdi
movq -0x18(%rbp), %rsi
callq 0xb2ab60
movl %eax, -0x4(%rbp)
jmp 0xb2a9f5
movl $0xffffff9f, -0x4(%rbp) # imm = 0xFFFFFF9F
movl -0x4(%rbp), %eax
addq $0x70, %rsp
popq %rbp
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmlibuv/src/inet.c
|
slide_hash
|
local void slide_hash(s)
deflate_state *s;
{
unsigned n, m;
Posf *p;
uInt wsize = s->w_size;
n = s->hash_size;
p = &s->head[n];
do {
m = *--p;
*p = (Pos)(m >= wsize ? m - wsize : NIL);
} while (--n);
n = wsize;
#ifndef FASTEST
p = &s->prev[n];
do {
m = *--p;
*p = (Pos)(m >= wsize ? m - wsize : NIL);
/* If n is not on any hash chain, prev[n] is garbage but
* its value will never be used.
*/
} while (--n);
#endif
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x8(%rbp)
movq -0x8(%rbp), %rax
movl 0x50(%rax), %eax
movl %eax, -0x1c(%rbp)
movq -0x8(%rbp), %rax
movl 0x84(%rax), %eax
movl %eax, -0xc(%rbp)
movq -0x8(%rbp), %rax
movq 0x78(%rax), %rax
movl -0xc(%rbp), %ecx
shlq %rcx
addq %rcx, %rax
movq %rax, -0x18(%rbp)
movq -0x18(%rbp), %rax
movq %rax, %rcx
addq $-0x2, %rcx
movq %rcx, -0x18(%rbp)
movzwl -0x2(%rax), %eax
movl %eax, -0x10(%rbp)
movl -0x10(%rbp), %eax
cmpl -0x1c(%rbp), %eax
jb 0xb2f34d
movl -0x10(%rbp), %eax
subl -0x1c(%rbp), %eax
movl %eax, -0x20(%rbp)
jmp 0xb2f354
xorl %eax, %eax
movl %eax, -0x20(%rbp)
jmp 0xb2f354
movl -0x20(%rbp), %eax
movw %ax, %cx
movq -0x18(%rbp), %rax
movw %cx, (%rax)
movl -0xc(%rbp), %eax
addl $-0x1, %eax
movl %eax, -0xc(%rbp)
cmpl $0x0, %eax
jne 0xb2f324
movl -0x1c(%rbp), %eax
movl %eax, -0xc(%rbp)
movq -0x8(%rbp), %rax
movq 0x70(%rax), %rax
movl -0xc(%rbp), %ecx
shlq %rcx
addq %rcx, %rax
movq %rax, -0x18(%rbp)
movq -0x18(%rbp), %rax
movq %rax, %rcx
addq $-0x2, %rcx
movq %rcx, -0x18(%rbp)
movzwl -0x2(%rax), %eax
movl %eax, -0x10(%rbp)
movl -0x10(%rbp), %eax
cmpl -0x1c(%rbp), %eax
jb 0xb2f3b3
movl -0x10(%rbp), %eax
subl -0x1c(%rbp), %eax
movl %eax, -0x24(%rbp)
jmp 0xb2f3ba
xorl %eax, %eax
movl %eax, -0x24(%rbp)
jmp 0xb2f3ba
movl -0x24(%rbp), %eax
movw %ax, %cx
movq -0x18(%rbp), %rax
movw %cx, (%rax)
movl -0xc(%rbp), %eax
addl $-0x1, %eax
movl %eax, -0xc(%rbp)
cmpl $0x0, %eax
jne 0xb2f38a
popq %rbp
retq
nopw (%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzlib/deflate.c
|
cm_zlib_gzseek64
|
z_off64_t ZEXPORT gzseek64(file, offset, whence)
gzFile file;
z_off64_t offset;
int whence;
{
unsigned n;
z_off64_t ret;
gz_statep state;
/* get internal structure and check integrity */
if (file == NULL)
return -1;
state = (gz_statep)file;
if (state->mode != GZ_READ && state->mode != GZ_WRITE)
return -1;
/* check that there's no error */
if (state->err != Z_OK && state->err != Z_BUF_ERROR)
return -1;
/* can only seek from start or relative to current position */
if (whence != SEEK_SET && whence != SEEK_CUR)
return -1;
/* normalize offset to a SEEK_CUR specification */
if (whence == SEEK_SET)
offset -= state->x.pos;
else if (state->seek)
offset += state->skip;
state->seek = 0;
/* if within raw area while reading, just go there */
if (state->mode == GZ_READ && state->how == COPY &&
state->x.pos + offset >= 0) {
ret = LSEEK(state->fd, offset - (z_off64_t)state->x.have, SEEK_CUR);
if (ret == -1)
return -1;
state->x.have = 0;
state->eof = 0;
state->past = 0;
state->seek = 0;
gz_error(state, Z_OK, NULL);
state->strm.avail_in = 0;
state->x.pos += offset;
return state->x.pos;
}
/* calculate skip amount, rewinding if needed for back seek when reading */
if (offset < 0) {
if (state->mode != GZ_READ) /* writing -- can't go backwards */
return -1;
offset += state->x.pos;
if (offset < 0) /* before start of file! */
return -1;
if (gzrewind(file) == -1) /* rewind, then skip to offset */
return -1;
}
/* if reading, skip what's in output buffer (one less gzgetc() check) */
if (state->mode == GZ_READ) {
n = GT_OFF(state->x.have) || (z_off64_t)state->x.have > offset ?
(unsigned)offset : state->x.have;
state->x.have -= n;
state->x.next += n;
state->x.pos += n;
offset -= n;
}
/* request skip (if not zero) */
if (offset) {
state->seek = 1;
state->skip = offset;
}
return state->x.pos + offset;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x40, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movl %edx, -0x1c(%rbp)
cmpq $0x0, -0x10(%rbp)
jne 0xb32c77
movq $-0x1, -0x8(%rbp)
jmp 0xb32ef9
movq -0x10(%rbp), %rax
movq %rax, -0x30(%rbp)
movq -0x30(%rbp), %rax
cmpl $0x1c4f, 0x18(%rax) # imm = 0x1C4F
je 0xb32ca6
movq -0x30(%rbp), %rax
cmpl $0x79b1, 0x18(%rax) # imm = 0x79B1
je 0xb32ca6
movq $-0x1, -0x8(%rbp)
jmp 0xb32ef9
movq -0x30(%rbp), %rax
cmpl $0x0, 0x74(%rax)
je 0xb32cc7
movq -0x30(%rbp), %rax
cmpl $-0x5, 0x74(%rax)
je 0xb32cc7
movq $-0x1, -0x8(%rbp)
jmp 0xb32ef9
cmpl $0x0, -0x1c(%rbp)
je 0xb32ce0
cmpl $0x1, -0x1c(%rbp)
je 0xb32ce0
movq $-0x1, -0x8(%rbp)
jmp 0xb32ef9
cmpl $0x0, -0x1c(%rbp)
jne 0xb32cfb
movq -0x30(%rbp), %rax
movq 0x10(%rax), %rcx
movq -0x18(%rbp), %rax
subq %rcx, %rax
movq %rax, -0x18(%rbp)
jmp 0xb32d17
movq -0x30(%rbp), %rax
cmpl $0x0, 0x70(%rax)
je 0xb32d15
movq -0x30(%rbp), %rax
movq 0x68(%rax), %rax
addq -0x18(%rbp), %rax
movq %rax, -0x18(%rbp)
jmp 0xb32d17
movq -0x30(%rbp), %rax
movl $0x0, 0x70(%rax)
movq -0x30(%rbp), %rax
cmpl $0x1c4f, 0x18(%rax) # imm = 0x1C4F
jne 0xb32df6
movq -0x30(%rbp), %rax
cmpl $0x1, 0x44(%rax)
jne 0xb32df6
movq -0x30(%rbp), %rax
movq 0x10(%rax), %rax
addq -0x18(%rbp), %rax
cmpq $0x0, %rax
jl 0xb32df6
movq -0x30(%rbp), %rax
movl 0x1c(%rax), %edi
movq -0x18(%rbp), %rsi
movq -0x30(%rbp), %rax
movl (%rax), %eax
subq %rax, %rsi
movl $0x1, %edx
callq 0x3b6b0
movq %rax, -0x28(%rbp)
cmpq $-0x1, -0x28(%rbp)
jne 0xb32d8d
movq $-0x1, -0x8(%rbp)
jmp 0xb32ef9
movq -0x30(%rbp), %rax
movl $0x0, (%rax)
movq -0x30(%rbp), %rax
movl $0x0, 0x50(%rax)
movq -0x30(%rbp), %rax
movl $0x0, 0x54(%rax)
movq -0x30(%rbp), %rax
movl $0x0, 0x70(%rax)
movq -0x30(%rbp), %rdi
xorl %esi, %esi
xorl %eax, %eax
movl %eax, %edx
callq 0xb32f10
movq -0x30(%rbp), %rax
movl $0x0, 0x88(%rax)
movq -0x18(%rbp), %rcx
movq -0x30(%rbp), %rax
addq 0x10(%rax), %rcx
movq %rcx, 0x10(%rax)
movq -0x30(%rbp), %rax
movq 0x10(%rax), %rax
movq %rax, -0x8(%rbp)
jmp 0xb32ef9
cmpq $0x0, -0x18(%rbp)
jge 0xb32e58
movq -0x30(%rbp), %rax
cmpl $0x1c4f, 0x18(%rax) # imm = 0x1C4F
je 0xb32e17
movq $-0x1, -0x8(%rbp)
jmp 0xb32ef9
movq -0x30(%rbp), %rax
movq 0x10(%rax), %rax
addq -0x18(%rbp), %rax
movq %rax, -0x18(%rbp)
cmpq $0x0, -0x18(%rbp)
jge 0xb32e3b
movq $-0x1, -0x8(%rbp)
jmp 0xb32ef9
movq -0x10(%rbp), %rdi
callq 0xb32b30
cmpl $-0x1, %eax
jne 0xb32e56
movq $-0x1, -0x8(%rbp)
jmp 0xb32ef9
jmp 0xb32e58
movq -0x30(%rbp), %rax
cmpl $0x1c4f, 0x18(%rax) # imm = 0x1C4F
jne 0xb32ecb
movq -0x30(%rbp), %rax
movl (%rax), %eax
cmpq -0x18(%rbp), %rax
jle 0xb32e7a
movq -0x18(%rbp), %rax
movl %eax, -0x34(%rbp)
jmp 0xb32e83
movq -0x30(%rbp), %rax
movl (%rax), %eax
movl %eax, -0x34(%rbp)
movl -0x34(%rbp), %eax
movl %eax, -0x20(%rbp)
movl -0x20(%rbp), %edx
movq -0x30(%rbp), %rax
movl (%rax), %ecx
subl %edx, %ecx
movl %ecx, (%rax)
movl -0x20(%rbp), %edx
movq -0x30(%rbp), %rax
movq 0x8(%rax), %rcx
movl %edx, %edx
addq %rdx, %rcx
movq %rcx, 0x8(%rax)
movl -0x20(%rbp), %eax
movl %eax, %ecx
movq -0x30(%rbp), %rax
addq 0x10(%rax), %rcx
movq %rcx, 0x10(%rax)
movl -0x20(%rbp), %eax
movl %eax, %ecx
movq -0x18(%rbp), %rax
subq %rcx, %rax
movq %rax, -0x18(%rbp)
cmpq $0x0, -0x18(%rbp)
je 0xb32ee9
movq -0x30(%rbp), %rax
movl $0x1, 0x70(%rax)
movq -0x18(%rbp), %rcx
movq -0x30(%rbp), %rax
movq %rcx, 0x68(%rax)
movq -0x30(%rbp), %rax
movq 0x10(%rax), %rax
addq -0x18(%rbp), %rax
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x40, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzlib/gzlib.c
|
cm_zlib_gzread
|
int ZEXPORT gzread(file, buf, len)
gzFile file;
voidp buf;
unsigned len;
{
gz_statep state;
/* get internal structure */
if (file == NULL)
return -1;
state = (gz_statep)file;
/* check that we're reading and that there's no (serious) error */
if (state->mode != GZ_READ ||
(state->err != Z_OK && state->err != Z_BUF_ERROR))
return -1;
/* since an int is returned, make sure len fits in one, otherwise return
with an error (this avoids a flaw in the interface) */
if ((int)len < 0) {
gz_error(state, Z_STREAM_ERROR, "request does not fit in an int");
return -1;
}
/* read len or fewer bytes to buf */
len = (unsigned)gz_read(state, buf, len);
/* check for an error */
if (len == 0 && state->err != Z_OK && state->err != Z_BUF_ERROR)
return -1;
/* return the number of bytes read (this is assured to fit in an int) */
return (int)len;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x30, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movl %edx, -0x1c(%rbp)
cmpq $0x0, -0x10(%rbp)
jne 0xb33446
movl $0xffffffff, -0x4(%rbp) # imm = 0xFFFFFFFF
jmp 0xb334da
movq -0x10(%rbp), %rax
movq %rax, -0x28(%rbp)
movq -0x28(%rbp), %rax
cmpl $0x1c4f, 0x18(%rax) # imm = 0x1C4F
jne 0xb3346f
movq -0x28(%rbp), %rax
cmpl $0x0, 0x74(%rax)
je 0xb33478
movq -0x28(%rbp), %rax
cmpl $-0x5, 0x74(%rax)
je 0xb33478
movl $0xffffffff, -0x4(%rbp) # imm = 0xFFFFFFFF
jmp 0xb334da
cmpl $0x0, -0x1c(%rbp)
jge 0xb3349c
movq -0x28(%rbp), %rdi
movl $0xfffffffe, %esi # imm = 0xFFFFFFFE
leaq 0xb1bbe(%rip), %rdx # 0xbe504c
callq 0xb32f10
movl $0xffffffff, -0x4(%rbp) # imm = 0xFFFFFFFF
jmp 0xb334da
movq -0x28(%rbp), %rdi
movq -0x18(%rbp), %rsi
movl -0x1c(%rbp), %eax
movl %eax, %edx
callq 0xb334f0
movl %eax, -0x1c(%rbp)
cmpl $0x0, -0x1c(%rbp)
jne 0xb334d4
movq -0x28(%rbp), %rax
cmpl $0x0, 0x74(%rax)
je 0xb334d4
movq -0x28(%rbp), %rax
cmpl $-0x5, 0x74(%rax)
je 0xb334d4
movl $0xffffffff, -0x4(%rbp) # imm = 0xFFFFFFFF
jmp 0xb334da
movl -0x1c(%rbp), %eax
movl %eax, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x30, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzlib/gzread.c
|
gz_fetch
|
local int gz_fetch(state)
gz_statep state;
{
z_streamp strm = &(state->strm);
do {
switch(state->how) {
case LOOK: /* -> LOOK, COPY (only if never GZIP), or GZIP */
if (gz_look(state) == -1)
return -1;
if (state->how == LOOK)
return 0;
break;
case COPY: /* -> COPY */
if (gz_load(state, state->out, state->size << 1, &(state->x.have))
== -1)
return -1;
state->x.next = state->out;
return 0;
case GZIP: /* -> GZIP or LOOK (if end of gzip stream) */
strm->avail_out = state->size << 1;
strm->next_out = state->out;
if (gz_decomp(state) == -1)
return -1;
}
} while (state->x.have == 0 && (!state->eof || strm->avail_in));
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movq %rdi, -0x10(%rbp)
movq -0x10(%rbp), %rax
addq $0x80, %rax
movq %rax, -0x18(%rbp)
movq -0x10(%rbp), %rax
movl 0x44(%rax), %eax
movl %eax, -0x1c(%rbp)
testl %eax, %eax
je 0xb33e45
jmp 0xb33e2a
movl -0x1c(%rbp), %eax
subl $0x1, %eax
je 0xb33e7a
jmp 0xb33e34
movl -0x1c(%rbp), %eax
subl $0x2, %eax
je 0xb33ec2
jmp 0xb33efb
movq -0x10(%rbp), %rdi
callq 0xb33fb0
cmpl $-0x1, %eax
jne 0xb33e5f
movl $0xffffffff, -0x4(%rbp) # imm = 0xFFFFFFFF
jmp 0xb33f40
movq -0x10(%rbp), %rax
cmpl $0x0, 0x44(%rax)
jne 0xb33e75
movl $0x0, -0x4(%rbp)
jmp 0xb33f40
jmp 0xb33efb
movq -0x10(%rbp), %rdi
movq -0x10(%rbp), %rax
movq 0x38(%rax), %rsi
movq -0x10(%rbp), %rax
movl 0x28(%rax), %edx
shll %edx
movq -0x10(%rbp), %rcx
callq 0xb34350
cmpl $-0x1, %eax
jne 0xb33ea9
movl $0xffffffff, -0x4(%rbp) # imm = 0xFFFFFFFF
jmp 0xb33f40
movq -0x10(%rbp), %rax
movq 0x38(%rax), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x8(%rax)
movl $0x0, -0x4(%rbp)
jmp 0xb33f40
movq -0x10(%rbp), %rax
movl 0x28(%rax), %ecx
shll %ecx
movq -0x18(%rbp), %rax
movl %ecx, 0x20(%rax)
movq -0x10(%rbp), %rax
movq 0x38(%rax), %rcx
movq -0x18(%rbp), %rax
movq %rcx, 0x18(%rax)
movq -0x10(%rbp), %rdi
callq 0xb34430
cmpl $-0x1, %eax
jne 0xb33ef9
movl $0xffffffff, -0x4(%rbp) # imm = 0xFFFFFFFF
jmp 0xb33f40
jmp 0xb33efb
jmp 0xb33efd
movq -0x10(%rbp), %rcx
xorl %eax, %eax
cmpl $0x0, (%rcx)
movb %al, -0x1d(%rbp)
jne 0xb33f2e
movq -0x10(%rbp), %rcx
movb $0x1, %al
cmpl $0x0, 0x50(%rcx)
movb %al, -0x1e(%rbp)
je 0xb33f28
movq -0x18(%rbp), %rax
cmpl $0x0, 0x8(%rax)
setne %al
movb %al, -0x1e(%rbp)
movb -0x1e(%rbp), %al
movb %al, -0x1d(%rbp)
movb -0x1d(%rbp), %al
testb $0x1, %al
jne 0xb33e1a
movl $0x0, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x20, %rsp
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzlib/gzread.c
|
gz_look
|
local int gz_look(state)
gz_statep state;
{
z_streamp strm = &(state->strm);
/* allocate read buffers and inflate memory */
if (state->size == 0) {
/* allocate buffers */
state->in = (unsigned char *)malloc(state->want);
state->out = (unsigned char *)malloc(state->want << 1);
if (state->in == NULL || state->out == NULL) {
free(state->out);
free(state->in);
gz_error(state, Z_MEM_ERROR, "out of memory");
return -1;
}
state->size = state->want;
/* allocate inflate memory */
state->strm.zalloc = Z_NULL;
state->strm.zfree = Z_NULL;
state->strm.opaque = Z_NULL;
state->strm.avail_in = 0;
state->strm.next_in = Z_NULL;
if (inflateInit2(&(state->strm), 15 + 16) != Z_OK) { /* gunzip */
free(state->out);
free(state->in);
state->size = 0;
gz_error(state, Z_MEM_ERROR, "out of memory");
return -1;
}
}
/* get at least the magic bytes in the input buffer */
if (strm->avail_in < 2) {
if (gz_avail(state) == -1)
return -1;
if (strm->avail_in == 0)
return 0;
}
/* look for gzip magic bytes -- if there, do gzip decoding (note: there is
a logical dilemma here when considering the case of a partially written
gzip file, to wit, if a single 31 byte is written, then we cannot tell
whether this is a single-byte file, or just a partially written gzip
file -- for here we assume that if a gzip file is being written, then
the header will be written in a single operation, so that reading a
single byte is sufficient indication that it is not a gzip file) */
if (strm->avail_in > 1 &&
strm->next_in[0] == 31 && strm->next_in[1] == 139) {
inflateReset(strm);
state->how = GZIP;
state->direct = 0;
return 0;
}
/* no gzip header -- if we were decoding gzip before, then this is trailing
garbage. Ignore the trailing garbage and finish. */
if (state->direct == 0) {
strm->avail_in = 0;
state->eof = 1;
state->x.have = 0;
return 0;
}
/* doing raw i/o, copy any leftover input to output -- this assumes that
the output buffer is larger than the input buffer, which also assures
space for gzungetc() */
state->x.next = state->out;
if (strm->avail_in) {
memcpy(state->x.next, strm->next_in, strm->avail_in);
state->x.have = strm->avail_in;
strm->avail_in = 0;
}
state->how = COPY;
state->direct = 1;
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movq %rdi, -0x10(%rbp)
movq -0x10(%rbp), %rax
addq $0x80, %rax
movq %rax, -0x18(%rbp)
movq -0x10(%rbp), %rax
cmpl $0x0, 0x28(%rax)
jne 0xb34125
movq -0x10(%rbp), %rax
movl 0x2c(%rax), %eax
movl %eax, %edi
callq 0x3e578
movq %rax, %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x30(%rax)
movq -0x10(%rbp), %rax
movl 0x2c(%rax), %eax
shll %eax
movl %eax, %eax
movl %eax, %edi
callq 0x3e578
movq %rax, %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x38(%rax)
movq -0x10(%rbp), %rax
cmpq $0x0, 0x30(%rax)
je 0xb34024
movq -0x10(%rbp), %rax
cmpq $0x0, 0x38(%rax)
jne 0xb3405f
movq -0x10(%rbp), %rax
movq 0x38(%rax), %rdi
callq 0x3e548
movq -0x10(%rbp), %rax
movq 0x30(%rax), %rdi
callq 0x3e548
movq -0x10(%rbp), %rdi
movl $0xfffffffc, %esi # imm = 0xFFFFFFFC
leaq 0x8610a(%rip), %rdx # 0xbba158
callq 0xb32f10
movl $0xffffffff, -0x4(%rbp) # imm = 0xFFFFFFFF
jmp 0xb34256
movq -0x10(%rbp), %rax
movl 0x2c(%rax), %ecx
movq -0x10(%rbp), %rax
movl %ecx, 0x28(%rax)
movq -0x10(%rbp), %rax
movq $0x0, 0xc0(%rax)
movq -0x10(%rbp), %rax
movq $0x0, 0xc8(%rax)
movq -0x10(%rbp), %rax
movq $0x0, 0xd0(%rax)
movq -0x10(%rbp), %rax
movl $0x0, 0x88(%rax)
movq -0x10(%rbp), %rax
movq $0x0, 0x80(%rax)
movq -0x10(%rbp), %rdi
addq $0x80, %rdi
movl $0x1f, %esi
leaq 0x7046f(%rip), %rdx # 0xba453d
movl $0x70, %ecx
callq 0xb35ba0
cmpl $0x0, %eax
je 0xb34123
movq -0x10(%rbp), %rax
movq 0x38(%rax), %rdi
callq 0x3e548
movq -0x10(%rbp), %rax
movq 0x30(%rax), %rdi
callq 0x3e548
movq -0x10(%rbp), %rax
movl $0x0, 0x28(%rax)
movq -0x10(%rbp), %rdi
movl $0xfffffffc, %esi # imm = 0xFFFFFFFC
leaq 0x86046(%rip), %rdx # 0xbba158
callq 0xb32f10
movl $0xffffffff, -0x4(%rbp) # imm = 0xFFFFFFFF
jmp 0xb34256
jmp 0xb34125
movq -0x18(%rbp), %rax
cmpl $0x2, 0x8(%rax)
jae 0xb34161
movq -0x10(%rbp), %rdi
callq 0xb345d0
cmpl $-0x1, %eax
jne 0xb34149
movl $0xffffffff, -0x4(%rbp) # imm = 0xFFFFFFFF
jmp 0xb34256
movq -0x18(%rbp), %rax
cmpl $0x0, 0x8(%rax)
jne 0xb3415f
movl $0x0, -0x4(%rbp)
jmp 0xb34256
jmp 0xb34161
movq -0x18(%rbp), %rax
cmpl $0x1, 0x8(%rax)
jbe 0xb341b7
movq -0x18(%rbp), %rax
movq (%rax), %rax
movzbl (%rax), %eax
cmpl $0x1f, %eax
jne 0xb341b7
movq -0x18(%rbp), %rax
movq (%rax), %rax
movzbl 0x1(%rax), %eax
cmpl $0x8b, %eax
jne 0xb341b7
movq -0x18(%rbp), %rdi
callq 0xb35a40
movq -0x10(%rbp), %rax
movl $0x2, 0x44(%rax)
movq -0x10(%rbp), %rax
movl $0x0, 0x40(%rax)
movl $0x0, -0x4(%rbp)
jmp 0xb34256
movq -0x10(%rbp), %rax
cmpl $0x0, 0x40(%rax)
jne 0xb341ea
movq -0x18(%rbp), %rax
movl $0x0, 0x8(%rax)
movq -0x10(%rbp), %rax
movl $0x1, 0x50(%rax)
movq -0x10(%rbp), %rax
movl $0x0, (%rax)
movl $0x0, -0x4(%rbp)
jmp 0xb34256
movq -0x10(%rbp), %rax
movq 0x38(%rax), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x8(%rax)
movq -0x18(%rbp), %rax
cmpl $0x0, 0x8(%rax)
je 0xb34239
movq -0x10(%rbp), %rax
movq 0x8(%rax), %rdi
movq -0x18(%rbp), %rax
movq (%rax), %rsi
movq -0x18(%rbp), %rax
movl 0x8(%rax), %eax
movl %eax, %edx
callq 0x3cb70
movq -0x18(%rbp), %rax
movl 0x8(%rax), %ecx
movq -0x10(%rbp), %rax
movl %ecx, (%rax)
movq -0x18(%rbp), %rax
movl $0x0, 0x8(%rax)
movq -0x10(%rbp), %rax
movl $0x1, 0x44(%rax)
movq -0x10(%rbp), %rax
movl $0x1, 0x40(%rax)
movl $0x0, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x20, %rsp
popq %rbp
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmzlib/gzread.c
|
gz_write
|
local z_size_t gz_write(state, buf, len)
gz_statep state;
voidpc buf;
z_size_t len;
{
z_size_t put = len;
/* if len is zero, avoid unnecessary operations */
if (len == 0)
return 0;
/* allocate memory if this is the first time through */
if (state->size == 0 && gz_init(state) == -1)
return 0;
/* check for seek request */
if (state->seek) {
state->seek = 0;
if (gz_zero(state, state->skip) == -1)
return 0;
}
/* for small len, copy to input buffer, otherwise compress directly */
if (len < state->size) {
/* copy to input buffer, compress when full */
do {
unsigned have, copy;
if (state->strm.avail_in == 0)
state->strm.next_in = state->in;
have = (unsigned)((state->strm.next_in + state->strm.avail_in) -
state->in);
copy = state->size - have;
if (copy > len)
copy = (unsigned)len;
memcpy(state->in + have, buf, copy);
state->strm.avail_in += copy;
state->x.pos += copy;
buf = (const char *)buf + copy;
len -= copy;
if (len && gz_comp(state, Z_NO_FLUSH) == -1)
return 0;
} while (len);
}
else {
/* consume whatever's left in the input buffer */
if (state->strm.avail_in && gz_comp(state, Z_NO_FLUSH) == -1)
return 0;
/* directly compress user buffer to file */
state->strm.next_in = (z_const Bytef *)buf;
do {
unsigned n = (unsigned)-1;
if (n > len)
n = (unsigned)len;
state->strm.avail_in = n;
state->x.pos += n;
if (gz_comp(state, Z_NO_FLUSH) == -1)
return 0;
len -= n;
} while (len);
}
/* input was all buffered or compressed */
return put;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x40, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movq -0x20(%rbp), %rax
movq %rax, -0x28(%rbp)
cmpq $0x0, -0x20(%rbp)
jne 0xb347a0
movq $0x0, -0x8(%rbp)
jmp 0xb349b2
movq -0x10(%rbp), %rax
cmpl $0x0, 0x28(%rax)
jne 0xb347c5
movq -0x10(%rbp), %rdi
callq 0xb35010
cmpl $-0x1, %eax
jne 0xb347c5
movq $0x0, -0x8(%rbp)
jmp 0xb349b2
movq -0x10(%rbp), %rax
cmpl $0x0, 0x70(%rax)
je 0xb347ff
movq -0x10(%rbp), %rax
movl $0x0, 0x70(%rax)
movq -0x10(%rbp), %rdi
movq -0x10(%rbp), %rax
movq 0x68(%rax), %rsi
callq 0xb34c00
cmpl $-0x1, %eax
jne 0xb347fd
movq $0x0, -0x8(%rbp)
jmp 0xb349b2
jmp 0xb347ff
movq -0x20(%rbp), %rax
movq -0x10(%rbp), %rcx
movl 0x28(%rcx), %ecx
cmpq %rcx, %rax
jae 0xb3490c
jmp 0xb34815
movq -0x10(%rbp), %rax
cmpl $0x0, 0x88(%rax)
jne 0xb34835
movq -0x10(%rbp), %rax
movq 0x30(%rax), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x80(%rax)
movq -0x10(%rbp), %rax
movq 0x80(%rax), %rax
movq -0x10(%rbp), %rcx
movl 0x88(%rcx), %ecx
addq %rcx, %rax
movq -0x10(%rbp), %rcx
movq 0x30(%rcx), %rcx
subq %rcx, %rax
movl %eax, -0x2c(%rbp)
movq -0x10(%rbp), %rax
movl 0x28(%rax), %eax
subl -0x2c(%rbp), %eax
movl %eax, -0x30(%rbp)
movl -0x30(%rbp), %eax
cmpq -0x20(%rbp), %rax
jbe 0xb34878
movq -0x20(%rbp), %rax
movl %eax, -0x30(%rbp)
movq -0x10(%rbp), %rax
movq 0x30(%rax), %rdi
movl -0x2c(%rbp), %eax
addq %rax, %rdi
movq -0x18(%rbp), %rsi
movl -0x30(%rbp), %eax
movl %eax, %edx
callq 0x3cb70
movl -0x30(%rbp), %ecx
movq -0x10(%rbp), %rax
addl 0x88(%rax), %ecx
movl %ecx, 0x88(%rax)
movl -0x30(%rbp), %eax
movl %eax, %ecx
movq -0x10(%rbp), %rax
addq 0x10(%rax), %rcx
movq %rcx, 0x10(%rax)
movq -0x18(%rbp), %rax
movl -0x30(%rbp), %ecx
addq %rcx, %rax
movq %rax, -0x18(%rbp)
movl -0x30(%rbp), %eax
movl %eax, %ecx
movq -0x20(%rbp), %rax
subq %rcx, %rax
movq %rax, -0x20(%rbp)
cmpq $0x0, -0x20(%rbp)
je 0xb348fa
movq -0x10(%rbp), %rdi
xorl %esi, %esi
callq 0xb351e0
cmpl $-0x1, %eax
jne 0xb348fa
movq $0x0, -0x8(%rbp)
jmp 0xb349b2
jmp 0xb348fc
cmpq $0x0, -0x20(%rbp)
jne 0xb34815
jmp 0xb349aa
movq -0x10(%rbp), %rax
cmpl $0x0, 0x88(%rax)
je 0xb34933
movq -0x10(%rbp), %rdi
xorl %esi, %esi
callq 0xb351e0
cmpl $-0x1, %eax
jne 0xb34933
movq $0x0, -0x8(%rbp)
jmp 0xb349b2
movq -0x18(%rbp), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x80(%rax)
movl $0xffffffff, -0x34(%rbp) # imm = 0xFFFFFFFF
movl -0x34(%rbp), %eax
cmpq -0x20(%rbp), %rax
jbe 0xb34959
movq -0x20(%rbp), %rax
movl %eax, -0x34(%rbp)
movl -0x34(%rbp), %ecx
movq -0x10(%rbp), %rax
movl %ecx, 0x88(%rax)
movl -0x34(%rbp), %eax
movl %eax, %ecx
movq -0x10(%rbp), %rax
addq 0x10(%rax), %rcx
movq %rcx, 0x10(%rax)
movq -0x10(%rbp), %rdi
xorl %esi, %esi
callq 0xb351e0
cmpl $-0x1, %eax
jne 0xb34991
movq $0x0, -0x8(%rbp)
jmp 0xb349b2
movl -0x34(%rbp), %eax
movl %eax, %ecx
movq -0x20(%rbp), %rax
subq %rcx, %rax
movq %rax, -0x20(%rbp)
cmpq $0x0, -0x20(%rbp)
jne 0xb34942
jmp 0xb349aa
movq -0x28(%rbp), %rax
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x40, %rsp
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzlib/gzwrite.c
|
gz_init
|
local int gz_init(state)
gz_statep state;
{
int ret;
z_streamp strm = &(state->strm);
/* allocate input buffer (double size for gzprintf) */
state->in = (unsigned char *)malloc(state->want << 1);
if (state->in == NULL) {
gz_error(state, Z_MEM_ERROR, "out of memory");
return -1;
}
/* only need output buffer and deflate state if compressing */
if (!state->direct) {
/* allocate output buffer */
state->out = (unsigned char *)malloc(state->want);
if (state->out == NULL) {
free(state->in);
gz_error(state, Z_MEM_ERROR, "out of memory");
return -1;
}
/* allocate deflate memory, set up for gzip compression */
strm->zalloc = Z_NULL;
strm->zfree = Z_NULL;
strm->opaque = Z_NULL;
ret = deflateInit2(strm, state->level, Z_DEFLATED,
MAX_WBITS + 16, DEF_MEM_LEVEL, state->strategy);
if (ret != Z_OK) {
free(state->out);
free(state->in);
gz_error(state, Z_MEM_ERROR, "out of memory");
return -1;
}
strm->next_in = NULL;
}
/* mark state as initialized */
state->size = state->want;
/* initialize write buffer if compressing */
if (!state->direct) {
strm->avail_out = state->size;
strm->next_out = state->out;
state->x.next = strm->next_out;
}
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x30, %rsp
movq %rdi, -0x10(%rbp)
movq -0x10(%rbp), %rax
addq $0x80, %rax
movq %rax, -0x20(%rbp)
movq -0x10(%rbp), %rax
movl 0x2c(%rax), %eax
shll %eax
movl %eax, %eax
movl %eax, %edi
callq 0x3e578
movq %rax, %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x30(%rax)
movq -0x10(%rbp), %rax
cmpq $0x0, 0x30(%rax)
jne 0xb35073
movq -0x10(%rbp), %rdi
movl $0xfffffffc, %esi # imm = 0xFFFFFFFC
leaq 0x850f6(%rip), %rdx # 0xbba158
callq 0xb32f10
movl $0xffffffff, -0x4(%rbp) # imm = 0xFFFFFFFF
jmp 0xb351c9
movq -0x10(%rbp), %rax
cmpl $0x0, 0x40(%rax)
jne 0xb3517c
movq -0x10(%rbp), %rax
movl 0x2c(%rax), %eax
movl %eax, %edi
callq 0x3e578
movq %rax, %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x38(%rax)
movq -0x10(%rbp), %rax
cmpq $0x0, 0x38(%rax)
jne 0xb350d3
movq -0x10(%rbp), %rax
movq 0x30(%rax), %rdi
callq 0x3e548
movq -0x10(%rbp), %rdi
movl $0xfffffffc, %esi # imm = 0xFFFFFFFC
leaq 0x85096(%rip), %rdx # 0xbba158
callq 0xb32f10
movl $0xffffffff, -0x4(%rbp) # imm = 0xFFFFFFFF
jmp 0xb351c9
movq -0x20(%rbp), %rax
movq $0x0, 0x40(%rax)
movq -0x20(%rbp), %rax
movq $0x0, 0x48(%rax)
movq -0x20(%rbp), %rax
movq $0x0, 0x50(%rax)
movq -0x20(%rbp), %rdi
movq -0x10(%rbp), %rax
movl 0x58(%rax), %esi
movq -0x10(%rbp), %rax
movl 0x5c(%rax), %r9d
movl $0x8, %r8d
movl $0x1f, %ecx
leaq 0x6f421(%rip), %rax # 0xba453d
movl %r8d, %edx
movq %rax, (%rsp)
movl $0x70, 0x8(%rsp)
callq 0xb2cae0
movl %eax, -0x14(%rbp)
cmpl $0x0, -0x14(%rbp)
je 0xb35171
movq -0x10(%rbp), %rax
movq 0x38(%rax), %rdi
callq 0x3e548
movq -0x10(%rbp), %rax
movq 0x30(%rax), %rdi
callq 0x3e548
movq -0x10(%rbp), %rdi
movl $0xfffffffc, %esi # imm = 0xFFFFFFFC
leaq 0x84ff5(%rip), %rdx # 0xbba158
callq 0xb32f10
movl $0xffffffff, -0x4(%rbp) # imm = 0xFFFFFFFF
jmp 0xb351c9
movq -0x20(%rbp), %rax
movq $0x0, (%rax)
movq -0x10(%rbp), %rax
movl 0x2c(%rax), %ecx
movq -0x10(%rbp), %rax
movl %ecx, 0x28(%rax)
movq -0x10(%rbp), %rax
cmpl $0x0, 0x40(%rax)
jne 0xb351c2
movq -0x10(%rbp), %rax
movl 0x28(%rax), %ecx
movq -0x20(%rbp), %rax
movl %ecx, 0x20(%rax)
movq -0x10(%rbp), %rax
movq 0x38(%rax), %rcx
movq -0x20(%rbp), %rax
movq %rcx, 0x18(%rax)
movq -0x20(%rbp), %rax
movq 0x18(%rax), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x8(%rax)
movl $0x0, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x30, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzlib/gzwrite.c
|
cm_zlib_inflateInit2_
|
int ZEXPORT inflateInit2_(strm, windowBits, version, stream_size)
z_streamp strm;
int windowBits;
const char *version;
int stream_size;
{
int ret;
struct inflate_state FAR *state;
if (version == Z_NULL || version[0] != ZLIB_VERSION[0] ||
stream_size != (int)(sizeof(z_stream)))
return Z_VERSION_ERROR;
if (strm == Z_NULL) return Z_STREAM_ERROR;
strm->msg = Z_NULL; /* in case we return an error */
if (strm->zalloc == (alloc_func)0) {
#ifdef Z_SOLO
return Z_STREAM_ERROR;
#else
strm->zalloc = zcalloc;
strm->opaque = (voidpf)0;
#endif
}
if (strm->zfree == (free_func)0)
#ifdef Z_SOLO
return Z_STREAM_ERROR;
#else
strm->zfree = zcfree;
#endif
state = (struct inflate_state FAR *)
ZALLOC(strm, 1, sizeof(struct inflate_state));
if (state == Z_NULL) return Z_MEM_ERROR;
Tracev((stderr, "inflate: allocated\n"));
strm->state = (struct internal_state FAR *)state;
state->strm = strm;
state->window = Z_NULL;
state->mode = HEAD; /* to pass state test in inflateReset2() */
ret = inflateReset2(strm, windowBits);
if (ret != Z_OK) {
ZFREE(strm, state);
strm->state = Z_NULL;
}
return ret;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x30, %rsp
movq %rdi, -0x10(%rbp)
movl %esi, -0x14(%rbp)
movq %rdx, -0x20(%rbp)
movl %ecx, -0x24(%rbp)
cmpq $0x0, -0x20(%rbp)
je 0xb35bd5
movq -0x20(%rbp), %rax
movsbl (%rax), %eax
movsbl 0x6e972(%rip), %ecx # 0xba453d
cmpl %ecx, %eax
jne 0xb35bd5
cmpl $0x70, -0x24(%rbp)
je 0xb35be1
movl $0xfffffffa, -0x4(%rbp) # imm = 0xFFFFFFFA
jmp 0xb35cdb
cmpq $0x0, -0x10(%rbp)
jne 0xb35bf4
movl $0xfffffffe, -0x4(%rbp) # imm = 0xFFFFFFFE
jmp 0xb35cdb
movq -0x10(%rbp), %rax
movq $0x0, 0x30(%rax)
movq -0x10(%rbp), %rax
cmpq $0x0, 0x40(%rax)
jne 0xb35c26
movq -0x10(%rbp), %rax
leaq 0x707a(%rip), %rcx # 0xb3cc90
movq %rcx, 0x40(%rax)
movq -0x10(%rbp), %rax
movq $0x0, 0x50(%rax)
movq -0x10(%rbp), %rax
cmpq $0x0, 0x48(%rax)
jne 0xb35c40
movq -0x10(%rbp), %rax
leaq 0x7084(%rip), %rcx # 0xb3ccc0
movq %rcx, 0x48(%rax)
movq -0x10(%rbp), %rax
movq 0x40(%rax), %rax
movq -0x10(%rbp), %rcx
movq 0x50(%rcx), %rdi
movl $0x1, %esi
movl $0x1bf8, %edx # imm = 0x1BF8
callq *%rax
movq %rax, -0x30(%rbp)
cmpq $0x0, -0x30(%rbp)
jne 0xb35c70
movl $0xfffffffc, -0x4(%rbp) # imm = 0xFFFFFFFC
jmp 0xb35cdb
movq -0x30(%rbp), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x38(%rax)
movq -0x10(%rbp), %rcx
movq -0x30(%rbp), %rax
movq %rcx, (%rax)
movq -0x30(%rbp), %rax
movq $0x0, 0x48(%rax)
movq -0x30(%rbp), %rax
movl $0x3f34, 0x8(%rax) # imm = 0x3F34
movq -0x10(%rbp), %rdi
movl -0x14(%rbp), %esi
callq 0xb35ab0
movl %eax, -0x28(%rbp)
cmpl $0x0, -0x28(%rbp)
je 0xb35cd5
movq -0x10(%rbp), %rax
movq 0x48(%rax), %rax
movq -0x10(%rbp), %rcx
movq 0x50(%rcx), %rdi
movq -0x30(%rbp), %rsi
callq *%rax
movq -0x10(%rbp), %rax
movq $0x0, 0x38(%rax)
movl -0x28(%rbp), %eax
movl %eax, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x30, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzlib/inflate.c
|
fixedtables
|
local void fixedtables(state)
struct inflate_state FAR *state;
{
#ifdef BUILDFIXED
static int virgin = 1;
static code *lenfix, *distfix;
static code fixed[544];
/* build fixed huffman tables if first call (may not be thread safe) */
if (virgin) {
unsigned sym, bits;
static code *next;
/* literal/length table */
sym = 0;
while (sym < 144) state->lens[sym++] = 8;
while (sym < 256) state->lens[sym++] = 9;
while (sym < 280) state->lens[sym++] = 7;
while (sym < 288) state->lens[sym++] = 8;
next = fixed;
lenfix = next;
bits = 9;
inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work);
/* distance table */
sym = 0;
while (sym < 32) state->lens[sym++] = 5;
distfix = next;
bits = 5;
inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work);
/* do this just once */
virgin = 0;
}
#else /* !BUILDFIXED */
# include "inffixed.h"
#endif /* BUILDFIXED */
state->lencode = lenfix;
state->lenbits = 9;
state->distcode = distfix;
state->distbits = 5;
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x8(%rbp)
movq -0x8(%rbp), %rax
leaq 0xacd2d(%rip), %rcx # 0xbe5230
movq %rcx, 0x68(%rax)
movq -0x8(%rbp), %rax
movl $0x9, 0x78(%rax)
movq -0x8(%rbp), %rax
leaq 0xad513(%rip), %rcx # 0xbe5a30
movq %rcx, 0x70(%rax)
movq -0x8(%rbp), %rax
movl $0x5, 0x7c(%rax)
popq %rbp
retq
nop
|
/JKorbelRA[P]CMake/Utilities/cmzlib/inflate.c
|
cm_zlib_inflateUndermine
|
int ZEXPORT inflateUndermine(strm, subvert)
z_streamp strm;
int subvert;
{
struct inflate_state FAR *state;
if (inflateStateCheck(strm)) return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
state->sane = !subvert;
return Z_OK;
#else
(void)subvert;
state->sane = 1;
return Z_DATA_ERROR;
#endif
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movq %rdi, -0x10(%rbp)
movl %esi, -0x14(%rbp)
movq -0x10(%rbp), %rdi
callq 0xb359c0
cmpl $0x0, %eax
je 0xb38fb6
movl $0xfffffffe, -0x4(%rbp) # imm = 0xFFFFFFFE
jmp 0xb38fd7
movq -0x10(%rbp), %rax
movq 0x38(%rax), %rax
movq %rax, -0x20(%rbp)
movq -0x20(%rbp), %rax
movl $0x1, 0x1be8(%rax)
movl $0xfffffffd, -0x4(%rbp) # imm = 0xFFFFFFFD
movl -0x4(%rbp), %eax
addq $0x20, %rsp
popq %rbp
retq
|
/JKorbelRA[P]CMake/Utilities/cmzlib/inflate.c
|
cm_zlib__tr_align
|
void ZLIB_INTERNAL _tr_align(s)
deflate_state *s;
{
send_bits(s, STATIC_TREES<<1, 3);
send_code(s, END_BLOCK, static_ltree);
#ifdef ZLIB_DEBUG
s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
#endif
bi_flush(s);
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movq %rdi, -0x8(%rbp)
movl $0x3, -0xc(%rbp)
movq -0x8(%rbp), %rax
movl 0x1734(%rax), %eax
movl $0x10, %ecx
subl -0xc(%rbp), %ecx
cmpl %ecx, %eax
jle 0xb39e85
movl $0x2, -0x10(%rbp)
movl -0x10(%rbp), %eax
movzwl %ax, %edx
movq -0x8(%rbp), %rax
movl 0x1734(%rax), %ecx
shll %cl, %edx
movq -0x8(%rbp), %rax
movzwl 0x1730(%rax), %ecx
orl %edx, %ecx
movw %cx, 0x1730(%rax)
movq -0x8(%rbp), %rax
movzwl 0x1730(%rax), %eax
andl $0xff, %eax
movb %al, %dl
movq -0x8(%rbp), %rax
movq 0x10(%rax), %rax
movq -0x8(%rbp), %rsi
movq 0x28(%rsi), %rcx
movq %rcx, %rdi
addq $0x1, %rdi
movq %rdi, 0x28(%rsi)
movb %dl, (%rax,%rcx)
movq -0x8(%rbp), %rax
movzwl 0x1730(%rax), %eax
sarl $0x8, %eax
movb %al, %dl
movq -0x8(%rbp), %rax
movq 0x10(%rax), %rax
movq -0x8(%rbp), %rsi
movq 0x28(%rsi), %rcx
movq %rcx, %rdi
addq $0x1, %rdi
movq %rdi, 0x28(%rsi)
movb %dl, (%rax,%rcx)
movl -0x10(%rbp), %eax
movzwl %ax, %eax
movq -0x8(%rbp), %rdx
movl $0x10, %ecx
subl 0x1734(%rdx), %ecx
sarl %cl, %eax
movw %ax, %cx
movq -0x8(%rbp), %rax
movw %cx, 0x1730(%rax)
movl -0xc(%rbp), %ecx
subl $0x10, %ecx
movq -0x8(%rbp), %rax
addl 0x1734(%rax), %ecx
movl %ecx, 0x1734(%rax)
jmp 0xb39ebd
movq -0x8(%rbp), %rax
movl 0x1734(%rax), %ecx
movl $0x2, %edx
shll %cl, %edx
movq -0x8(%rbp), %rax
movzwl 0x1730(%rax), %ecx
orl %edx, %ecx
movw %cx, 0x1730(%rax)
movl -0xc(%rbp), %ecx
movq -0x8(%rbp), %rax
addl 0x1734(%rax), %ecx
movl %ecx, 0x1734(%rax)
movzwl 0xac5ee(%rip), %eax # 0xbe64b2
movl %eax, -0x14(%rbp)
movq -0x8(%rbp), %rax
movl 0x1734(%rax), %eax
movl $0x10, %ecx
subl -0x14(%rbp), %ecx
cmpl %ecx, %eax
jle 0xb39fac
movzwl 0xac5c8(%rip), %eax # 0xbe64b0
movl %eax, -0x18(%rbp)
movl -0x18(%rbp), %eax
movzwl %ax, %edx
movq -0x8(%rbp), %rax
movl 0x1734(%rax), %ecx
shll %cl, %edx
movq -0x8(%rbp), %rax
movzwl 0x1730(%rax), %ecx
orl %edx, %ecx
movw %cx, 0x1730(%rax)
movq -0x8(%rbp), %rax
movzwl 0x1730(%rax), %eax
andl $0xff, %eax
movb %al, %dl
movq -0x8(%rbp), %rax
movq 0x10(%rax), %rax
movq -0x8(%rbp), %rsi
movq 0x28(%rsi), %rcx
movq %rcx, %rdi
addq $0x1, %rdi
movq %rdi, 0x28(%rsi)
movb %dl, (%rax,%rcx)
movq -0x8(%rbp), %rax
movzwl 0x1730(%rax), %eax
sarl $0x8, %eax
movb %al, %dl
movq -0x8(%rbp), %rax
movq 0x10(%rax), %rax
movq -0x8(%rbp), %rsi
movq 0x28(%rsi), %rcx
movq %rcx, %rdi
addq $0x1, %rdi
movq %rdi, 0x28(%rsi)
movb %dl, (%rax,%rcx)
movl -0x18(%rbp), %eax
movzwl %ax, %eax
movq -0x8(%rbp), %rdx
movl $0x10, %ecx
subl 0x1734(%rdx), %ecx
sarl %cl, %eax
movw %ax, %cx
movq -0x8(%rbp), %rax
movw %cx, 0x1730(%rax)
movl -0x14(%rbp), %ecx
subl $0x10, %ecx
movq -0x8(%rbp), %rax
addl 0x1734(%rax), %ecx
movl %ecx, 0x1734(%rax)
jmp 0xb39fe6
movzwl 0xac4fd(%rip), %edx # 0xbe64b0
movq -0x8(%rbp), %rax
movl 0x1734(%rax), %ecx
shll %cl, %edx
movq -0x8(%rbp), %rax
movzwl 0x1730(%rax), %ecx
orl %edx, %ecx
movw %cx, 0x1730(%rax)
movl -0x14(%rbp), %ecx
movq -0x8(%rbp), %rax
addl 0x1734(%rax), %ecx
movl %ecx, 0x1734(%rax)
movq -0x8(%rbp), %rdi
callq 0xb39c90
addq $0x20, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzlib/trees.c
|
build_tree
|
local void build_tree(s, desc)
deflate_state *s;
tree_desc *desc; /* the tree descriptor */
{
ct_data *tree = desc->dyn_tree;
const ct_data *stree = desc->stat_desc->static_tree;
int elems = desc->stat_desc->elems;
int n, m; /* iterate over heap elements */
int max_code = -1; /* largest code with non zero frequency */
int node; /* new node being created */
/* Construct the initial heap, with least frequent element in
* heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
* heap[0] is not used.
*/
s->heap_len = 0, s->heap_max = HEAP_SIZE;
for (n = 0; n < elems; n++) {
if (tree[n].Freq != 0) {
s->heap[++(s->heap_len)] = max_code = n;
s->depth[n] = 0;
} else {
tree[n].Len = 0;
}
}
/* The pkzip format requires that at least one distance code exists,
* and that at least one bit should be sent even if there is only one
* possible code. So to avoid special checks later on we force at least
* two codes of non zero frequency.
*/
while (s->heap_len < 2) {
node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
tree[node].Freq = 1;
s->depth[node] = 0;
s->opt_len--; if (stree) s->static_len -= stree[node].Len;
/* node is 0 or 1 so it does not have extra bits */
}
desc->max_code = max_code;
/* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
* establish sub-heaps of increasing lengths:
*/
for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
/* Construct the Huffman tree by repeatedly combining the least two
* frequent nodes.
*/
node = elems; /* next internal node of the tree */
do {
pqremove(s, tree, n); /* n = node of least frequency */
m = s->heap[SMALLEST]; /* m = node of next least frequency */
s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
s->heap[--(s->heap_max)] = m;
/* Create a new node father of n and m */
tree[node].Freq = tree[n].Freq + tree[m].Freq;
s->depth[node] = (uch)((s->depth[n] >= s->depth[m] ?
s->depth[n] : s->depth[m]) + 1);
tree[n].Dad = tree[m].Dad = (ush)node;
#ifdef DUMP_BL_TREE
if (tree == s->bl_tree) {
fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
}
#endif
/* and insert the new node in the heap */
s->heap[SMALLEST] = node++;
pqdownheap(s, tree, SMALLEST);
} while (s->heap_len >= 2);
s->heap[--(s->heap_max)] = s->heap[SMALLEST];
/* At this point, the fields freq and dad are set. We can now
* generate the bit lengths.
*/
gen_bitlen(s, (tree_desc *)desc);
/* The field len is now set, we can generate the bit codes */
gen_codes ((ct_data *)tree, max_code, s->bl_count);
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x40, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq -0x10(%rbp), %rax
movq (%rax), %rax
movq %rax, -0x18(%rbp)
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rax
movq (%rax), %rax
movq %rax, -0x20(%rbp)
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rax
movl 0x14(%rax), %eax
movl %eax, -0x24(%rbp)
movl $0xffffffff, -0x30(%rbp) # imm = 0xFFFFFFFF
movq -0x8(%rbp), %rax
movl $0x0, 0x14b4(%rax)
movq -0x8(%rbp), %rax
movl $0x23d, 0x14b8(%rax) # imm = 0x23D
movl $0x0, -0x28(%rbp)
movl -0x28(%rbp), %eax
cmpl -0x24(%rbp), %eax
jge 0xb3a5c0
movq -0x18(%rbp), %rax
movslq -0x28(%rbp), %rcx
movzwl (%rax,%rcx,4), %eax
cmpl $0x0, %eax
je 0xb3a5a4
movl -0x28(%rbp), %edx
movl %edx, -0x30(%rbp)
movq -0x8(%rbp), %rax
movq -0x8(%rbp), %rsi
movl 0x14b4(%rsi), %ecx
addl $0x1, %ecx
movl %ecx, 0x14b4(%rsi)
movslq %ecx, %rcx
movl %edx, 0xbc0(%rax,%rcx,4)
movq -0x8(%rbp), %rax
movslq -0x28(%rbp), %rcx
movb $0x0, 0x14bc(%rax,%rcx)
jmp 0xb3a5b3
movq -0x18(%rbp), %rax
movslq -0x28(%rbp), %rcx
movw $0x0, 0x2(%rax,%rcx,4)
jmp 0xb3a5b5
movl -0x28(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x28(%rbp)
jmp 0xb3a552
jmp 0xb3a5c2
movq -0x8(%rbp), %rax
cmpl $0x2, 0x14b4(%rax)
jge 0xb3a679
cmpl $0x2, -0x30(%rbp)
jge 0xb3a5e7
movl -0x30(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x30(%rbp)
movl %eax, -0x38(%rbp)
jmp 0xb3a5ee
xorl %eax, %eax
movl %eax, -0x38(%rbp)
jmp 0xb3a5ee
movl -0x38(%rbp), %eax
movq -0x8(%rbp), %rcx
movq -0x8(%rbp), %rsi
movl 0x14b4(%rsi), %edx
addl $0x1, %edx
movl %edx, 0x14b4(%rsi)
movslq %edx, %rdx
movl %eax, 0xbc0(%rcx,%rdx,4)
movl %eax, -0x34(%rbp)
movq -0x18(%rbp), %rax
movslq -0x34(%rbp), %rcx
movw $0x1, (%rax,%rcx,4)
movq -0x8(%rbp), %rax
movslq -0x34(%rbp), %rcx
movb $0x0, 0x14bc(%rax,%rcx)
movq -0x8(%rbp), %rax
movq 0x1718(%rax), %rcx
addq $-0x1, %rcx
movq %rcx, 0x1718(%rax)
cmpq $0x0, -0x20(%rbp)
je 0xb3a674
movq -0x20(%rbp), %rax
movslq -0x34(%rbp), %rcx
movzwl 0x2(%rax,%rcx,4), %eax
movl %eax, %edx
movq -0x8(%rbp), %rax
movq 0x1720(%rax), %rcx
subq %rdx, %rcx
movq %rcx, 0x1720(%rax)
jmp 0xb3a5c2
movl -0x30(%rbp), %ecx
movq -0x10(%rbp), %rax
movl %ecx, 0x8(%rax)
movq -0x8(%rbp), %rax
movl 0x14b4(%rax), %eax
movl $0x2, %ecx
cltd
idivl %ecx
movl %eax, -0x28(%rbp)
cmpl $0x1, -0x28(%rbp)
jl 0xb3a6b9
movq -0x8(%rbp), %rdi
movq -0x18(%rbp), %rsi
movl -0x28(%rbp), %edx
callq 0xb3b910
movl -0x28(%rbp), %eax
addl $-0x1, %eax
movl %eax, -0x28(%rbp)
jmp 0xb3a698
movl -0x24(%rbp), %eax
movl %eax, -0x34(%rbp)
movq -0x8(%rbp), %rax
movl 0xbc4(%rax), %eax
movl %eax, -0x28(%rbp)
movq -0x8(%rbp), %rax
movq -0x8(%rbp), %rdx
movl 0x14b4(%rdx), %ecx
movl %ecx, %esi
addl $-0x1, %esi
movl %esi, 0x14b4(%rdx)
movslq %ecx, %rcx
movl 0xbc0(%rax,%rcx,4), %ecx
movq -0x8(%rbp), %rax
movl %ecx, 0xbc4(%rax)
movq -0x8(%rbp), %rdi
movq -0x18(%rbp), %rsi
movl $0x1, %edx
callq 0xb3b910
movq -0x8(%rbp), %rax
movl 0xbc4(%rax), %eax
movl %eax, -0x2c(%rbp)
movl -0x28(%rbp), %edx
movq -0x8(%rbp), %rax
movq -0x8(%rbp), %rsi
movl 0x14b8(%rsi), %ecx
addl $-0x1, %ecx
movl %ecx, 0x14b8(%rsi)
movslq %ecx, %rcx
movl %edx, 0xbc0(%rax,%rcx,4)
movl -0x2c(%rbp), %edx
movq -0x8(%rbp), %rax
movq -0x8(%rbp), %rsi
movl 0x14b8(%rsi), %ecx
addl $-0x1, %ecx
movl %ecx, 0x14b8(%rsi)
movslq %ecx, %rcx
movl %edx, 0xbc0(%rax,%rcx,4)
movq -0x18(%rbp), %rax
movslq -0x28(%rbp), %rcx
movzwl (%rax,%rcx,4), %eax
movq -0x18(%rbp), %rcx
movslq -0x2c(%rbp), %rdx
movzwl (%rcx,%rdx,4), %ecx
addl %ecx, %eax
movw %ax, %dx
movq -0x18(%rbp), %rax
movslq -0x34(%rbp), %rcx
movw %dx, (%rax,%rcx,4)
movq -0x8(%rbp), %rax
movslq -0x28(%rbp), %rcx
movzbl 0x14bc(%rax,%rcx), %eax
movq -0x8(%rbp), %rcx
movslq -0x2c(%rbp), %rdx
movzbl 0x14bc(%rcx,%rdx), %ecx
cmpl %ecx, %eax
jl 0xb3a7c2
movq -0x8(%rbp), %rax
movslq -0x28(%rbp), %rcx
movzbl 0x14bc(%rax,%rcx), %eax
movl %eax, -0x3c(%rbp)
jmp 0xb3a7d5
movq -0x8(%rbp), %rax
movslq -0x2c(%rbp), %rcx
movzbl 0x14bc(%rax,%rcx), %eax
movl %eax, -0x3c(%rbp)
movl -0x3c(%rbp), %eax
addl $0x1, %eax
movb %al, %dl
movq -0x8(%rbp), %rax
movslq -0x34(%rbp), %rcx
movb %dl, 0x14bc(%rax,%rcx)
movl -0x34(%rbp), %eax
movw %ax, %dx
movq -0x18(%rbp), %rax
movslq -0x2c(%rbp), %rcx
movw %dx, 0x2(%rax,%rcx,4)
movq -0x18(%rbp), %rax
movslq -0x28(%rbp), %rcx
movw %dx, 0x2(%rax,%rcx,4)
movl -0x34(%rbp), %ecx
movl %ecx, %eax
addl $0x1, %eax
movl %eax, -0x34(%rbp)
movq -0x8(%rbp), %rax
movl %ecx, 0xbc4(%rax)
movq -0x8(%rbp), %rdi
movq -0x18(%rbp), %rsi
movl $0x1, %edx
callq 0xb3b910
movq -0x8(%rbp), %rax
cmpl $0x2, 0x14b4(%rax)
jge 0xb3a6bf
movq -0x8(%rbp), %rax
movl 0xbc4(%rax), %edx
movq -0x8(%rbp), %rax
movq -0x8(%rbp), %rsi
movl 0x14b8(%rsi), %ecx
addl $-0x1, %ecx
movl %ecx, 0x14b8(%rsi)
movslq %ecx, %rcx
movl %edx, 0xbc0(%rax,%rcx,4)
movq -0x8(%rbp), %rdi
movq -0x10(%rbp), %rsi
callq 0xb3baf0
movq -0x18(%rbp), %rdi
movl -0x30(%rbp), %esi
movq -0x8(%rbp), %rdx
addq $0xba0, %rdx # imm = 0xBA0
callq 0xb3be50
addq $0x40, %rsp
popq %rbp
retq
nopl (%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzlib/trees.c
|
pqdownheap
|
local void pqdownheap(s, tree, k)
deflate_state *s;
ct_data *tree; /* the tree to restore */
int k; /* node to move down */
{
int v = s->heap[k];
int j = k << 1; /* left son of k */
while (j <= s->heap_len) {
/* Set j to the smallest of the two sons: */
if (j < s->heap_len &&
smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
j++;
}
/* Exit if v is smaller than both sons */
if (smaller(tree, v, s->heap[j], s->depth)) break;
/* Exchange v with the smallest son */
s->heap[k] = s->heap[j]; k = j;
/* And continue down the tree, setting j to the left son of k */
j <<= 1;
}
s->heap[k] = v;
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movl %edx, -0x14(%rbp)
movq -0x8(%rbp), %rax
movslq -0x14(%rbp), %rcx
movl 0xbc0(%rax,%rcx,4), %eax
movl %eax, -0x18(%rbp)
movl -0x14(%rbp), %eax
shll %eax
movl %eax, -0x1c(%rbp)
movl -0x1c(%rbp), %eax
movq -0x8(%rbp), %rcx
cmpl 0x14b4(%rcx), %eax
jg 0xb3bace
movl -0x1c(%rbp), %eax
movq -0x8(%rbp), %rcx
cmpl 0x14b4(%rcx), %eax
jge 0xb3ba1b
movq -0x10(%rbp), %rax
movq -0x8(%rbp), %rcx
movl -0x1c(%rbp), %edx
addl $0x1, %edx
movslq %edx, %rdx
movslq 0xbc0(%rcx,%rdx,4), %rcx
movzwl (%rax,%rcx,4), %eax
movq -0x10(%rbp), %rcx
movq -0x8(%rbp), %rdx
movslq -0x1c(%rbp), %rsi
movslq 0xbc0(%rdx,%rsi,4), %rdx
movzwl (%rcx,%rdx,4), %ecx
cmpl %ecx, %eax
jl 0xb3ba12
movq -0x10(%rbp), %rax
movq -0x8(%rbp), %rcx
movl -0x1c(%rbp), %edx
addl $0x1, %edx
movslq %edx, %rdx
movslq 0xbc0(%rcx,%rdx,4), %rcx
movzwl (%rax,%rcx,4), %eax
movq -0x10(%rbp), %rcx
movq -0x8(%rbp), %rdx
movslq -0x1c(%rbp), %rsi
movslq 0xbc0(%rdx,%rsi,4), %rdx
movzwl (%rcx,%rdx,4), %ecx
cmpl %ecx, %eax
jne 0xb3ba1b
movq -0x8(%rbp), %rax
movq -0x8(%rbp), %rcx
movl -0x1c(%rbp), %edx
addl $0x1, %edx
movslq %edx, %rdx
movslq 0xbc0(%rcx,%rdx,4), %rcx
movzbl 0x14bc(%rax,%rcx), %eax
movq -0x8(%rbp), %rcx
movq -0x8(%rbp), %rdx
movslq -0x1c(%rbp), %rsi
movslq 0xbc0(%rdx,%rsi,4), %rdx
movzbl 0x14bc(%rcx,%rdx), %ecx
cmpl %ecx, %eax
jg 0xb3ba1b
movl -0x1c(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x1c(%rbp)
movq -0x10(%rbp), %rax
movslq -0x18(%rbp), %rcx
movzwl (%rax,%rcx,4), %eax
movq -0x10(%rbp), %rcx
movq -0x8(%rbp), %rdx
movslq -0x1c(%rbp), %rsi
movslq 0xbc0(%rdx,%rsi,4), %rdx
movzwl (%rcx,%rdx,4), %ecx
cmpl %ecx, %eax
jl 0xb3ba9b
movq -0x10(%rbp), %rax
movslq -0x18(%rbp), %rcx
movzwl (%rax,%rcx,4), %eax
movq -0x10(%rbp), %rcx
movq -0x8(%rbp), %rdx
movslq -0x1c(%rbp), %rsi
movslq 0xbc0(%rdx,%rsi,4), %rdx
movzwl (%rcx,%rdx,4), %ecx
cmpl %ecx, %eax
jne 0xb3ba9d
movq -0x8(%rbp), %rax
movslq -0x18(%rbp), %rcx
movzbl 0x14bc(%rax,%rcx), %eax
movq -0x8(%rbp), %rcx
movq -0x8(%rbp), %rdx
movslq -0x1c(%rbp), %rsi
movslq 0xbc0(%rdx,%rsi,4), %rdx
movzbl 0x14bc(%rcx,%rdx), %ecx
cmpl %ecx, %eax
jg 0xb3ba9d
jmp 0xb3bace
movq -0x8(%rbp), %rax
movslq -0x1c(%rbp), %rcx
movl 0xbc0(%rax,%rcx,4), %edx
movq -0x8(%rbp), %rax
movslq -0x14(%rbp), %rcx
movl %edx, 0xbc0(%rax,%rcx,4)
movl -0x1c(%rbp), %eax
movl %eax, -0x14(%rbp)
movl -0x1c(%rbp), %eax
shll %eax
movl %eax, -0x1c(%rbp)
jmp 0xb3b939
movl -0x18(%rbp), %edx
movq -0x8(%rbp), %rax
movslq -0x14(%rbp), %rcx
movl %edx, 0xbc0(%rax,%rcx,4)
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzlib/trees.c
|
gen_codes
|
local void gen_codes (tree, max_code, bl_count)
ct_data *tree; /* the tree to decorate */
int max_code; /* largest code with non zero frequency */
ushf *bl_count; /* number of codes at each bit length */
{
ush next_code[MAX_BITS+1]; /* next code value for each bit length */
unsigned code = 0; /* running code value */
int bits; /* bit index */
int n; /* code index */
/* The distribution counts are first used to generate the code values
* without bit reversal.
*/
for (bits = 1; bits <= MAX_BITS; bits++) {
code = (code + bl_count[bits-1]) << 1;
next_code[bits] = (ush)code;
}
/* Check that the bit counts in bl_count are consistent. The last code
* must be all ones.
*/
Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
"inconsistent bit counts");
Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
for (n = 0; n <= max_code; n++) {
int len = tree[n].Len;
if (len == 0) continue;
/* Now reverse the bits */
tree[n].Code = (ush)bi_reverse(next_code[len]++, len);
Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
}
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x50, %rsp
movq %rdi, -0x8(%rbp)
movl %esi, -0xc(%rbp)
movq %rdx, -0x18(%rbp)
movl $0x0, -0x44(%rbp)
movl $0x1, -0x48(%rbp)
cmpl $0xf, -0x48(%rbp)
jg 0xb3beac
movl -0x44(%rbp), %eax
movq -0x18(%rbp), %rcx
movl -0x48(%rbp), %edx
subl $0x1, %edx
movslq %edx, %rdx
movzwl (%rcx,%rdx,2), %ecx
addl %ecx, %eax
shll %eax
movl %eax, -0x44(%rbp)
movl -0x44(%rbp), %eax
movw %ax, %cx
movslq -0x48(%rbp), %rax
movw %cx, -0x40(%rbp,%rax,2)
movl -0x48(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x48(%rbp)
jmp 0xb3be71
movl $0x0, -0x4c(%rbp)
movl -0x4c(%rbp), %eax
cmpl -0xc(%rbp), %eax
jg 0xb3bf0d
movq -0x8(%rbp), %rax
movslq -0x4c(%rbp), %rcx
movzwl 0x2(%rax,%rcx,4), %eax
movl %eax, -0x50(%rbp)
cmpl $0x0, -0x50(%rbp)
jne 0xb3bed3
jmp 0xb3bf02
movslq -0x50(%rbp), %rcx
movw -0x40(%rbp,%rcx,2), %ax
movw %ax, %dx
addw $0x1, %dx
movw %dx, -0x40(%rbp,%rcx,2)
movzwl %ax, %edi
movl -0x50(%rbp), %esi
callq 0xb3bf20
movw %ax, %dx
movq -0x8(%rbp), %rax
movslq -0x4c(%rbp), %rcx
movw %dx, (%rax,%rcx,4)
movl -0x4c(%rbp), %eax
addl $0x1, %eax
movl %eax, -0x4c(%rbp)
jmp 0xb3beb3
addq $0x50, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzlib/trees.c
|
adler32_combine_
|
local uLong adler32_combine_(adler1, adler2, len2)
uLong adler1;
uLong adler2;
z_off64_t len2;
{
unsigned long sum1;
unsigned long sum2;
unsigned rem;
/* for negative len, return invalid adler32 as a clue for debugging */
if (len2 < 0)
return 0xffffffffUL;
/* the derivation of this formula is left as an exercise for the reader */
MOD63(len2); /* assumes len2 >= 0 */
rem = (unsigned)len2;
sum1 = adler1 & 0xffff;
sum2 = rem * sum1;
MOD(sum2);
sum1 += (adler2 & 0xffff) + BASE - 1;
sum2 += ((adler1 >> 16) & 0xffff) + ((adler2 >> 16) & 0xffff) + BASE - rem;
if (sum1 >= BASE) sum1 -= BASE;
if (sum1 >= BASE) sum1 -= BASE;
if (sum2 >= ((unsigned long)BASE << 1)) sum2 -= ((unsigned long)BASE << 1);
if (sum2 >= BASE) sum2 -= BASE;
return sum1 | (sum2 << 16);
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
cmpq $0x0, -0x20(%rbp)
jge 0xb3d345
movl $0xffffffff, %eax # imm = 0xFFFFFFFF
movq %rax, -0x8(%rbp)
jmp 0xb3d44d
movq -0x20(%rbp), %rax
movl $0xfff1, %ecx # imm = 0xFFF1
cqto
idivq %rcx
movq %rdx, -0x20(%rbp)
movq -0x20(%rbp), %rax
movl %eax, -0x34(%rbp)
movq -0x10(%rbp), %rax
andq $0xffff, %rax # imm = 0xFFFF
movq %rax, -0x28(%rbp)
movl -0x34(%rbp), %eax
imulq -0x28(%rbp), %rax
movq %rax, -0x30(%rbp)
movq -0x30(%rbp), %rax
movl $0xfff1, %ecx # imm = 0xFFF1
xorl %edx, %edx
divq %rcx
movq %rdx, -0x30(%rbp)
movq -0x18(%rbp), %rax
andq $0xffff, %rax # imm = 0xFFFF
addq $0xfff1, %rax # imm = 0xFFF1
subq $0x1, %rax
addq -0x28(%rbp), %rax
movq %rax, -0x28(%rbp)
movq -0x10(%rbp), %rax
shrq $0x10, %rax
andq $0xffff, %rax # imm = 0xFFFF
movq -0x18(%rbp), %rcx
shrq $0x10, %rcx
andq $0xffff, %rcx # imm = 0xFFFF
addq %rcx, %rax
addq $0xfff1, %rax # imm = 0xFFF1
movl -0x34(%rbp), %ecx
subq %rcx, %rax
addq -0x30(%rbp), %rax
movq %rax, -0x30(%rbp)
cmpq $0xfff1, -0x28(%rbp) # imm = 0xFFF1
jb 0xb3d3f2
movq -0x28(%rbp), %rax
subq $0xfff1, %rax # imm = 0xFFF1
movq %rax, -0x28(%rbp)
cmpq $0xfff1, -0x28(%rbp) # imm = 0xFFF1
jb 0xb3d40a
movq -0x28(%rbp), %rax
subq $0xfff1, %rax # imm = 0xFFF1
movq %rax, -0x28(%rbp)
cmpq $0x1ffe2, -0x30(%rbp) # imm = 0x1FFE2
jb 0xb3d422
movq -0x30(%rbp), %rax
subq $0x1ffe2, %rax # imm = 0x1FFE2
movq %rax, -0x30(%rbp)
cmpq $0xfff1, -0x30(%rbp) # imm = 0xFFF1
jb 0xb3d43a
movq -0x30(%rbp), %rax
subq $0xfff1, %rax # imm = 0xFFF1
movq %rax, -0x30(%rbp)
movq -0x28(%rbp), %rax
movq -0x30(%rbp), %rcx
shlq $0x10, %rcx
orq %rcx, %rax
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/JKorbelRA[P]CMake/Utilities/cmzlib/adler32.c
|
pause_if_needed()
|
void pause_if_needed() {
#ifdef _WIN32
// For Windows, prompt for a keystroke before the Gnuplot object goes out of scope so that
// the gnuplot window doesn't get closed.
std::cout << "Press enter to exit." << std::endl;
std::cin.get();
#endif
}
|
retq
|
/dstahlke[P]gnuplot-iostream/example-misc.cc
|
demo_binary()
|
void demo_binary() {
Gnuplot gp;
std::vector<std::pair<double, double>> xy_pts_A;
for(double x=-2; x<2; x+=0.01) {
double y = x*x*x;
xy_pts_A.emplace_back(x, y);
}
std::vector<std::pair<double, double>> xy_pts_B;
for(double alpha=0; alpha<1; alpha+=1.0/24.0) {
double theta = alpha*2.0*3.14159;
xy_pts_B.emplace_back(cos(theta), sin(theta));
}
gp << "set xrange [-2:2]\nset yrange [-2:2]\n";
gp << "plot '-' binary" << gp.binFmt1d(xy_pts_A, "record") << "with lines title 'cubic',"
<< "'-' binary" << gp.binFmt1d(xy_pts_B, "record") << "with points title 'circle'\n";
gp.sendBinary1d(xy_pts_A);
gp.sendBinary1d(xy_pts_B);
pause_if_needed();
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0x2a0, %rsp # imm = 0x2A0
leaq 0xa8f3(%rip), %rsi # 0x10241
leaq 0x10(%rsp), %rdi
leaq 0x30(%rsp), %rdx
callq 0x7ab6
leaq 0xd8(%rsp), %rdi
leaq 0x10(%rsp), %rsi
callq 0x7af2
leaq 0x10(%rsp), %rbx
movq %rbx, %rdi
callq 0x5190
xorps %xmm0, %xmm0
leaq 0x80(%rsp), %r14
movaps %xmm0, (%r14)
andq $0x0, 0x10(%r14)
movsd 0xa670(%rip), %xmm0 # 0x10008
leaq 0x30(%rsp), %r15
movsd %xmm0, 0x10(%rsp)
movsd 0xa665(%rip), %xmm1 # 0x10010
ucomisd %xmm0, %xmm1
jbe 0x59e1
movapd %xmm0, %xmm1
mulsd %xmm0, %xmm1
mulsd %xmm0, %xmm1
movsd %xmm1, 0x30(%rsp)
movq %r14, %rdi
movq %rbx, %rsi
movq %r15, %rdx
callq 0x7bf4
movsd 0x10(%rsp), %xmm0
addsd 0xa651(%rip), %xmm0 # 0x10030
jmp 0x599d
leaq 0x60(%rsp), %rbx
andq $0x0, 0x10(%rbx)
xorpd %xmm0, %xmm0
movapd %xmm0, (%rbx)
xorpd %xmm0, %xmm0
leaq 0x10(%rsp), %r14
leaq 0x30(%rsp), %r15
movsd 0xa60f(%rip), %xmm1 # 0x10018
ucomisd %xmm0, %xmm1
jbe 0x5a61
movsd %xmm0, 0x50(%rsp)
addsd %xmm0, %xmm0
mulsd 0xa5ff(%rip), %xmm0 # 0x10020
movsd %xmm0, 0x58(%rsp)
callq 0x55c0
movsd %xmm0, 0x10(%rsp)
movsd 0x58(%rsp), %xmm0
callq 0x50a0
movsd %xmm0, 0x30(%rsp)
movq %rbx, %rdi
movq %r14, %rsi
movq %r15, %rdx
callq 0x7c38
movsd 0x50(%rsp), %xmm0
addsd 0xa5c9(%rip), %xmm0 # 0x10028
jmp 0x5a01
leaq 0xa668(%rip), %rsi # 0x100d0
leaq 0xd8(%rsp), %rdi
callq 0x5320
leaq 0xa84b(%rip), %rsi # 0x102c7
leaq 0xd8(%rsp), %rdi
callq 0x5320
movq %rax, %rbx
leaq 0xa6a5(%rip), %rsi # 0x10138
leaq 0x30(%rsp), %rdi
leaq 0xf(%rsp), %rdx
callq 0x7ab6
leaq 0x10(%rsp), %rdi
leaq 0xd8(%rsp), %rsi
leaq 0x80(%rsp), %rdx
leaq 0x30(%rsp), %rcx
callq 0xc31e
leaq 0x10(%rsp), %rsi
movq %rbx, %rdi
callq 0x52e0
leaq 0xa66a(%rip), %rsi # 0x1013f
movq %rax, %rdi
callq 0x5320
leaq 0xa7e8(%rip), %rsi # 0x102cc
movq %rax, %rdi
callq 0x5320
movq %rax, %rbx
leaq 0xa642(%rip), %rsi # 0x10138
leaq 0x98(%rsp), %rdi
leaq 0xe(%rsp), %rdx
callq 0x7ab6
leaq 0xb8(%rsp), %rdi
leaq 0xd8(%rsp), %rsi
leaq 0x60(%rsp), %rdx
leaq 0x98(%rsp), %rcx
callq 0xc31e
leaq 0xb8(%rsp), %rsi
movq %rbx, %rdi
callq 0x52e0
leaq 0xa5db(%rip), %rsi # 0x1011c
movq %rax, %rdi
callq 0x5320
leaq 0xb8(%rsp), %rdi
callq 0x5190
leaq 0x98(%rsp), %rdi
callq 0x5190
leaq 0x10(%rsp), %rdi
callq 0x5190
leaq 0x30(%rsp), %rdi
callq 0x5190
leaq 0xd8(%rsp), %rdi
leaq 0x80(%rsp), %rsi
callq 0xc640
leaq 0xd8(%rsp), %rdi
leaq 0x60(%rsp), %rsi
callq 0xc640
leaq 0x60(%rsp), %rdi
callq 0xbf5a
leaq 0x80(%rsp), %rdi
callq 0xbf5a
leaq 0xd8(%rsp), %rdi
callq 0x7c7c
addq $0x2a0, %rsp # imm = 0x2A0
popq %rbx
popq %r14
popq %r15
retq
movq %rax, %rbx
jmp 0x5c01
jmp 0x5c10
movq %rax, %rbx
jmp 0x5c1d
jmp 0x5c2b
movq %rax, %rbx
leaq 0x10(%rsp), %rdi
callq 0x5190
jmp 0x5c57
movq %rax, %rbx
jmp 0x5c57
movq %rax, %rbx
leaq 0xb8(%rsp), %rdi
callq 0x5190
leaq 0x98(%rsp), %rdi
callq 0x5190
jmp 0x5c13
movq %rax, %rbx
leaq 0x10(%rsp), %rdi
callq 0x5190
leaq 0x30(%rsp), %rdi
callq 0x5190
jmp 0x5c2e
jmp 0x5c2b
movq %rax, %rbx
leaq 0x60(%rsp), %rdi
callq 0xbf5a
jmp 0x5c3d
movq %rax, %rbx
leaq 0x80(%rsp), %rdi
callq 0xbf5a
leaq 0xd8(%rsp), %rdi
callq 0x7c7c
movq %rbx, %rdi
callq 0x5570
|
/dstahlke[P]gnuplot-iostream/example-misc.cc
|
demo_tmpfile()
|
void demo_tmpfile() {
Gnuplot gp;
std::vector<std::pair<double, double>> xy_pts_A;
for(double x=-2; x<2; x+=0.01) {
double y = x*x*x;
xy_pts_A.emplace_back(x, y);
}
std::vector<std::pair<double, double>> xy_pts_B;
for(double alpha=0; alpha<1; alpha+=1.0/24.0) {
double theta = alpha*2.0*3.14159;
xy_pts_B.emplace_back(cos(theta), sin(theta));
}
gp << "set xrange [-2:2]\nset yrange [-2:2]\n";
// Data will be sent via a temporary file. These are erased when you call
// gp.clearTmpfiles() or when gp goes out of scope. If you pass a filename
// (i.e. `gp.file1d(pts, "mydata.dat")`), then the named file will be created
// and won't be deleted.
//
// Note: you need std::endl here in order to flush the buffer. The send1d()
// function flushes automatically, but we're not using that here.
gp << "plot" << gp.file1d(xy_pts_A) << "with lines title 'cubic',"
<< gp.file1d(xy_pts_B) << "with points title 'circle'" << std::endl;
pause_if_needed();
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0x2a0, %rsp # imm = 0x2A0
leaq 0xa5cf(%rip), %rsi # 0x10241
leaq 0x10(%rsp), %rdi
leaq 0x30(%rsp), %rdx
callq 0x7ab6
leaq 0xd8(%rsp), %rdi
leaq 0x10(%rsp), %rsi
callq 0x7af2
leaq 0x10(%rsp), %rbx
movq %rbx, %rdi
callq 0x5190
xorps %xmm0, %xmm0
leaq 0x80(%rsp), %r14
movaps %xmm0, (%r14)
andq $0x0, 0x10(%r14)
movsd 0xa34c(%rip), %xmm0 # 0x10008
leaq 0x30(%rsp), %r15
movsd %xmm0, 0x10(%rsp)
movsd 0xa341(%rip), %xmm1 # 0x10010
ucomisd %xmm0, %xmm1
jbe 0x5d05
movapd %xmm0, %xmm1
mulsd %xmm0, %xmm1
mulsd %xmm0, %xmm1
movsd %xmm1, 0x30(%rsp)
movq %r14, %rdi
movq %rbx, %rsi
movq %r15, %rdx
callq 0x7bf4
movsd 0x10(%rsp), %xmm0
addsd 0xa32d(%rip), %xmm0 # 0x10030
jmp 0x5cc1
leaq 0x60(%rsp), %rbx
andq $0x0, 0x10(%rbx)
xorpd %xmm0, %xmm0
movapd %xmm0, (%rbx)
xorpd %xmm0, %xmm0
leaq 0x10(%rsp), %r14
leaq 0x30(%rsp), %r15
movsd 0xa2eb(%rip), %xmm1 # 0x10018
ucomisd %xmm0, %xmm1
jbe 0x5d85
movsd %xmm0, 0x50(%rsp)
addsd %xmm0, %xmm0
mulsd 0xa2db(%rip), %xmm0 # 0x10020
movsd %xmm0, 0x58(%rsp)
callq 0x55c0
movsd %xmm0, 0x10(%rsp)
movsd 0x58(%rsp), %xmm0
callq 0x50a0
movsd %xmm0, 0x30(%rsp)
movq %rbx, %rdi
movq %r14, %rsi
movq %r15, %rdx
callq 0x7c38
movsd 0x50(%rsp), %xmm0
addsd 0xa2a5(%rip), %xmm0 # 0x10028
jmp 0x5d25
leaq 0xa344(%rip), %rsi # 0x100d0
leaq 0xd8(%rsp), %rdi
callq 0x5320
leaq 0xa59d(%rip), %rsi # 0x1033d
leaq 0xd8(%rsp), %rdi
callq 0x5320
movq %rax, %rbx
leaq 0xa48a(%rip), %rsi # 0x10241
leaq 0x30(%rsp), %rdi
leaq 0xf(%rsp), %rdx
callq 0x7ab6
leaq 0x10(%rsp), %rdi
leaq 0xd8(%rsp), %rsi
leaq 0x80(%rsp), %rdx
leaq 0x30(%rsp), %rcx
callq 0x7c9c
leaq 0x10(%rsp), %rsi
movq %rbx, %rdi
callq 0x52e0
leaq 0xa346(%rip), %rsi # 0x1013f
movq %rax, %rdi
callq 0x5320
movq %rax, %rbx
leaq 0xa436(%rip), %rsi # 0x10241
leaq 0x98(%rsp), %rdi
leaq 0xe(%rsp), %rdx
callq 0x7ab6
leaq 0xb8(%rsp), %rdi
leaq 0xd8(%rsp), %rsi
leaq 0x60(%rsp), %rdx
leaq 0x98(%rsp), %rcx
callq 0x7c9c
leaq 0xb8(%rsp), %rsi
movq %rbx, %rdi
callq 0x52e0
leaq 0xa303(%rip), %rsi # 0x10159
movq %rax, %rdi
callq 0x5320
movq %rax, %rdi
callq 0x51b0
leaq 0xb8(%rsp), %rdi
callq 0x5190
leaq 0x98(%rsp), %rdi
callq 0x5190
leaq 0x10(%rsp), %rdi
callq 0x5190
leaq 0x30(%rsp), %rdi
callq 0x5190
leaq 0x60(%rsp), %rdi
callq 0xbf5a
leaq 0x80(%rsp), %rdi
callq 0xbf5a
leaq 0xd8(%rsp), %rdi
callq 0x7c7c
addq $0x2a0, %rsp # imm = 0x2A0
popq %rbx
popq %r14
popq %r15
retq
movq %rax, %rbx
jmp 0x5efe
jmp 0x5ee7
movq %rax, %rbx
jmp 0x5f15
jmp 0x5f21
movq %rax, %rbx
leaq 0x10(%rsp), %rdi
callq 0x5190
jmp 0x5f4d
movq %rax, %rbx
jmp 0x5f4d
movq %rax, %rbx
jmp 0x5f0b
jmp 0x5f21
movq %rax, %rbx
leaq 0xb8(%rsp), %rdi
callq 0x5190
leaq 0x98(%rsp), %rdi
callq 0x5190
leaq 0x10(%rsp), %rdi
callq 0x5190
leaq 0x30(%rsp), %rdi
callq 0x5190
jmp 0x5f24
movq %rax, %rbx
leaq 0x60(%rsp), %rdi
callq 0xbf5a
jmp 0x5f33
movq %rax, %rbx
leaq 0x80(%rsp), %rdi
callq 0xbf5a
leaq 0xd8(%rsp), %rdi
callq 0x7c7c
movq %rbx, %rdi
callq 0x5570
|
/dstahlke[P]gnuplot-iostream/example-misc.cc
|
demo_image()
|
void demo_image() {
// Example of plotting an image. Of course you are free (and encouraged) to
// use Blitz or Armadillo rather than std::vector in these situations.
Gnuplot gp;
std::vector<std::vector<double>> image;
for(int j=0; j<100; j++) {
std::vector<double> row;
for(int i=0; i<100; i++) {
double x = (i-50.0)/5.0;
double y = (j-50.0)/5.0;
double z = std::cos(sqrt(x*x+y*y));
row.push_back(z);
}
image.push_back(row);
}
// It may seem counterintuitive that send1d should be used rather than
// send2d. The explanation is as follows. The "send2d" method puts each
// value on its own line, with blank lines between rows. This is what is
// expected by the splot command. The two "dimensions" here are the lines
// and the blank-line-delimited blocks. The "send1d" method doesn't group
// things into blocks. So the elements of each row are printed as columns,
// as expected by Gnuplot's "matrix with image" command. But images
// typically have lots of pixels, so sending as text is not the most
// efficient (although, it's not really that bad in the case of this
// example). See the binary version below.
//
//gp << "plot '-' matrix with image\n";
//gp.send1d(image);
// To be honest, Gnuplot's documentation for "binary" and for "image" are
// both unclear to me. The following example comes by trial-and-error.
gp << "plot '-' binary" << gp.binFmt2d(image, "array") << "with image\n";
gp.sendBinary2d(image);
pause_if_needed();
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x248, %rsp # imm = 0x248
leaq 0x935b(%rip), %rsi # 0x10241
leaq 0x10(%rsp), %rdi
leaq 0x40(%rsp), %rdx
callq 0x7ab6
leaq 0x80(%rsp), %rdi
leaq 0x10(%rsp), %rsi
callq 0x7af2
leaq 0x10(%rsp), %rbx
movq %rbx, %rdi
callq 0x5190
xorpd %xmm0, %xmm0
leaq 0x60(%rsp), %r14
movapd %xmm0, (%r14)
andq $0x0, 0x10(%r14)
xorl %r12d, %r12d
leaq 0x40(%rsp), %r15
cmpl $0x64, %r12d
je 0x6fc3
andq $0x0, 0x20(%rsp)
xorpd %xmm0, %xmm0
movapd %xmm0, 0x10(%rsp)
leal -0x32(%r12), %eax
cvtsi2sd %eax, %xmm0
divsd 0x913e(%rip), %xmm0 # 0x10098
mulsd %xmm0, %xmm0
movsd %xmm0, 0x38(%rsp)
xorl %r13d, %r13d
pushq $0x64
popq %rbp
subl $0x1, %ebp
jb 0x6fa8
leal -0x32(%r13), %eax
cvtsi2sd %eax, %xmm0
divsd 0x9119(%rip), %xmm0 # 0x10098
mulsd %xmm0, %xmm0
addsd 0x38(%rsp), %xmm0
sqrtsd %xmm0, %xmm0
callq 0x55c0
movsd %xmm0, 0x40(%rsp)
movq %rbx, %rdi
movq %r15, %rsi
callq 0x7d00
incl %r13d
jmp 0x6f6a
movq %r14, %rdi
movq %rbx, %rsi
callq 0x8038
movq %rbx, %rdi
callq 0xc826
incl %r12d
jmp 0x6f2f
leaq 0x92fd(%rip), %rsi # 0x102c7
leaq 0x80(%rsp), %rdi
callq 0x5320
movq %rax, %rbx
leaq 0x944a(%rip), %rsi # 0x1042b
leaq 0x40(%rsp), %rdi
leaq 0xf(%rsp), %rdx
callq 0x7ab6
leaq 0x10(%rsp), %rdi
leaq 0x80(%rsp), %rsi
leaq 0x60(%rsp), %rdx
leaq 0x40(%rsp), %rcx
callq 0xe2a6
leaq 0x10(%rsp), %rsi
movq %rbx, %rdi
callq 0x52e0
leaq 0x9448(%rip), %rsi # 0x10468
movq %rax, %rdi
callq 0x5320
leaq 0x10(%rsp), %rdi
callq 0x5190
leaq 0x40(%rsp), %rdi
callq 0x5190
leaq 0x80(%rsp), %rdi
leaq 0x60(%rsp), %rsi
callq 0xe65e
leaq 0x60(%rsp), %rdi
callq 0x8064
leaq 0x80(%rsp), %rdi
callq 0x7c7c
addq $0x248, %rsp # imm = 0x248
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
jmp 0x709f
jmp 0x70ab
movq %rax, %rbx
leaq 0x10(%rsp), %rdi
callq 0x5190
jmp 0x70d6
movq %rax, %rbx
jmp 0x70d6
movq %rax, %rbx
leaq 0x10(%rsp), %rdi
callq 0x5190
leaq 0x40(%rsp), %rdi
callq 0x5190
jmp 0x70bf
movq %rax, %rbx
jmp 0x70bf
jmp 0x70b2
movq %rax, %rbx
leaq 0x10(%rsp), %rdi
callq 0xc826
leaq 0x60(%rsp), %rdi
callq 0x8064
leaq 0x80(%rsp), %rdi
callq 0x7c7c
movq %rbx, %rdi
callq 0x5570
|
/dstahlke[P]gnuplot-iostream/example-misc.cc
|
gnuplotio::Gnuplot::Gnuplot(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
|
explicit Gnuplot(const std::string &_cmd="") :
FileHandleWrapper(open_cmdline(_cmd)),
boost::iostreams::stream<boost::iostreams::file_descriptor_sink>(
fh_fileno(),
#if BOOST_VERSION >= 104400
boost::iostreams::never_close_handle
#else
false
#endif
),
feedback(nullptr),
tmp_files(new GnuplotTmpfileCollection()),
debug_messages(false),
transport_tmpfile(false)
{
set_stream_options(*this);
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rsi, %r15
movq %rdi, %r14
leaq 0xc0(%rdi), %rbx
movq %rbx, %rdi
callq 0x5140
movq %r15, %rdi
callq 0x8222
leaq 0x90(%r14), %rdi
movq %rax, 0x90(%r14)
movb %dl, 0x98(%r14)
leaq 0x11845(%rip), %r15 # 0x19378
movq %r15, (%r14)
leaq 0x11863(%rip), %r12 # 0x193a0
movq %r12, 0xc0(%r14)
callq 0x83d0
leaq 0x4(%rsp), %rdx
movl %eax, (%rdx)
movq %rsp, %rcx
andl $0x0, (%rcx)
leaq 0x1185b(%rip), %rsi # 0x193b8
movq %r14, %rdi
xorl %r8d, %r8d
callq 0x83d8
movq %r15, (%r14)
movq %r12, 0xc0(%r14)
andq $0x0, 0xa0(%r14)
pushq $0x18
popq %rdi
callq 0x5340
leaq 0xa8(%r14), %rdi
xorps %xmm0, %xmm0
movups %xmm0, (%rax)
andq $0x0, 0x10(%rax)
movq %rax, %rsi
callq 0x95a4
andw $0x0, 0xb8(%r14)
movq (%r14), %rax
movq -0x18(%rax), %rcx
andl $0xfffffefb, 0x18(%r14,%rcx) # imm = 0xFFFFFEFB
movq -0x18(%rax), %rax
movq $0x11, 0x8(%r14,%rax)
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
jmp 0x7bd0
movq %rax, %r15
jmp 0x7be4
movq %rax, %r15
addq $0x8, %r14
movq %r14, %rdi
callq 0x877a
movq %rbx, %rdi
callq 0x5110
movq %r15, %rdi
callq 0x5570
|
/dstahlke[P]gnuplot-iostream/gnuplot-iostream.h
|
gnuplotio::Gnuplot::Gnuplot(_IO_FILE*)
|
explicit Gnuplot(FILE *_fh) :
FileHandleWrapper(_fh, 0),
boost::iostreams::stream<boost::iostreams::file_descriptor_sink>(
fh_fileno(),
#if BOOST_VERSION >= 104400
boost::iostreams::never_close_handle
#else
false
#endif
),
feedback(nullptr),
tmp_files(new GnuplotTmpfileCollection()),
debug_messages(false),
transport_tmpfile(false)
{
set_stream_options(*this);
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rsi, %r15
movq %rdi, %r14
leaq 0xc0(%rdi), %rbx
movq %rbx, %rdi
callq 0x5140
leaq 0x90(%r14), %rdi
movq %r15, 0x90(%r14)
movb $0x0, 0x98(%r14)
leaq 0x1156c(%rip), %r15 # 0x19378
movq %r15, (%r14)
leaq 0x1158a(%rip), %r12 # 0x193a0
movq %r12, 0xc0(%r14)
callq 0x83d0
leaq 0x4(%rsp), %rdx
movl %eax, (%rdx)
movq %rsp, %rcx
andl $0x0, (%rcx)
leaq 0x11582(%rip), %rsi # 0x193b8
movq %r14, %rdi
xorl %r8d, %r8d
callq 0x83d8
movq %r15, (%r14)
movq %r12, 0xc0(%r14)
andq $0x0, 0xa0(%r14)
pushq $0x18
popq %rdi
callq 0x5340
leaq 0xa8(%r14), %rdi
xorps %xmm0, %xmm0
movups %xmm0, (%rax)
andq $0x0, 0x10(%rax)
movq %rax, %rsi
callq 0x95a4
andw $0x0, 0xb8(%r14)
movq (%r14), %rax
movq -0x18(%rax), %rcx
andl $0xfffffefb, 0x18(%r14,%rcx) # imm = 0xFFFFFEFB
movq -0x18(%rax), %rax
movq $0x11, 0x8(%r14,%rax)
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
movq %rax, %r15
jmp 0x7ebb
movq %rax, %r15
addq $0x8, %r14
movq %r14, %rdi
callq 0x877a
movq %rbx, %rdi
callq 0x5110
movq %r15, %rdi
callq 0x5570
nop
|
/dstahlke[P]gnuplot-iostream/gnuplot-iostream.h
|
virtual thunk to gnuplotio::Gnuplot::~Gnuplot()
|
~Gnuplot() {
if(debug_messages) {
std::cerr << "ending gnuplot session" << std::endl;
}
// FIXME - boost's close method calls close() on the file descriptor, but we need to
// use sometimes use pclose instead. For now, just skip calling boost's close and use
// flush just in case.
do_flush();
// Wish boost had a pclose method...
//close();
fh_close();
delete feedback;
}
|
movq (%rdi), %rax
addq -0x18(%rax), %rdi
jmp 0x7c7c
|
/dstahlke[P]gnuplot-iostream/gnuplot-iostream.h
|
virtual thunk to gnuplotio::Gnuplot::~Gnuplot()
|
~Gnuplot() {
if(debug_messages) {
std::cerr << "ending gnuplot session" << std::endl;
}
// FIXME - boost's close method calls close() on the file descriptor, but we need to
// use sometimes use pclose instead. For now, just skip calling boost's close and use
// flush just in case.
do_flush();
// Wish boost had a pclose method...
//close();
fh_close();
delete feedback;
}
|
movq (%rdi), %rax
addq -0x18(%rax), %rdi
jmp 0x8508
|
/dstahlke[P]gnuplot-iostream/gnuplot-iostream.h
|
gnuplotio::Gnuplot::get_default_cmd[abi:cxx11]()
|
static std::string get_default_cmd() {
GNUPLOT_MSVC_WARNING_4996_PUSH
char *from_env = std::getenv("GNUPLOT_IOSTREAM_CMD");
GNUPLOT_MSVC_WARNING_4996_POP
if(from_env && from_env[0]) {
return from_env;
} else {
return GNUPLOT_DEFAULT_COMMAND;
}
}
|
pushq %rbx
subq $0x10, %rsp
movq %rdi, %rbx
leaq 0x8184(%rip), %rdi # 0x106cb
callq 0x53c0
testq %rax, %rax
je 0x8563
cmpb $0x0, (%rax)
je 0x8563
leaq 0xf(%rsp), %rdx
movq %rbx, %rdi
movq %rax, %rsi
jmp 0x8572
leaq 0x8176(%rip), %rsi # 0x106e0
leaq 0xe(%rsp), %rdx
movq %rbx, %rdi
callq 0x7ab6
movq %rbx, %rax
addq $0x10, %rsp
popq %rbx
retq
|
/dstahlke[P]gnuplot-iostream/gnuplot-iostream.h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.