name
stringlengths
1
473k
code
stringlengths
7
647k
asm
stringlengths
4
3.39M
file
stringlengths
8
196
aom_memalign
void *aom_memalign(size_t align, size_t size) { void *x = NULL; if (!check_size_argument_overflow(1, size, align)) return NULL; const size_t aligned_size = size + GetAllocationPaddingSize(align); void *const addr = malloc(aligned_size); if (addr) { x = aom_align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, align); SetActualMallocAddress(x, addr); } return x; }
subq $0x38, %rsp movq %rdi, 0x28(%rsp) movq %rsi, 0x20(%rsp) movq $0x0, 0x18(%rsp) movq 0x20(%rsp), %rsi movq 0x28(%rsp), %rdx movl $0x1, %edi callq 0xa0c90 cmpl $0x0, %eax jne 0xa0bfb movq $0x0, 0x30(%rsp) jmp 0xa0c77 movq 0x20(%rsp), %rax movq %rax, (%rsp) movq 0x28(%rsp), %rdi callq 0xa0d10 movq %rax, %rcx movq (%rsp), %rax addq %rcx, %rax movq %rax, 0x10(%rsp) movq 0x10(%rsp), %rdi callq 0x18620 movq %rax, 0x8(%rsp) cmpq $0x0, 0x8(%rsp) je 0xa0c6d movq 0x8(%rsp), %rax addq $0x8, %rax movq 0x28(%rsp), %rcx subq $0x1, %rcx addq %rcx, %rax movq 0x28(%rsp), %rcx subq $0x1, %rcx xorq $-0x1, %rcx andq %rcx, %rax movq %rax, 0x18(%rsp) movq 0x18(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0xa0d30 movq 0x18(%rsp), %rax movq %rax, 0x30(%rsp) movq 0x30(%rsp), %rax addq $0x38, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/aom_mem/aom_mem.c
aom_read_obu_header
aom_codec_err_t aom_read_obu_header(uint8_t *buffer, size_t buffer_length, size_t *consumed, ObuHeader *header, int is_annexb) { if (buffer_length < 1 || !consumed || !header) return AOM_CODEC_INVALID_PARAM; // TODO(tomfinegan): Set the error handler here and throughout this file, and // confirm parsing work done via aom_read_bit_buffer is successful. struct aom_read_bit_buffer rb = { buffer, buffer + buffer_length, 0, NULL, NULL }; aom_codec_err_t parse_result = read_obu_header(&rb, is_annexb, header); if (parse_result == AOM_CODEC_OK) *consumed = header->size; return parse_result; }
subq $0x68, %rsp movq %rdi, 0x58(%rsp) movq %rsi, 0x50(%rsp) movq %rdx, 0x48(%rsp) movq %rcx, 0x40(%rsp) movl %r8d, 0x3c(%rsp) cmpq $0x1, 0x50(%rsp) jb 0xa0ea5 cmpq $0x0, 0x48(%rsp) je 0xa0ea5 cmpq $0x0, 0x40(%rsp) jne 0xa0eaf movl $0x8, 0x64(%rsp) jmp 0xa0f18 movq 0x58(%rsp), %rax movq %rax, 0x10(%rsp) movq 0x58(%rsp), %rax addq 0x50(%rsp), %rax movq %rax, 0x18(%rsp) movl $0x0, 0x20(%rsp) movq $0x0, 0x28(%rsp) movq $0x0, 0x30(%rsp) movl 0x3c(%rsp), %esi movq 0x40(%rsp), %rdx leaq 0x10(%rsp), %rdi callq 0xa0f30 movl %eax, 0xc(%rsp) cmpl $0x0, 0xc(%rsp) jne 0xa0f10 movq 0x40(%rsp), %rax movq (%rax), %rcx movq 0x48(%rsp), %rax movq %rcx, (%rax) movl 0xc(%rsp), %eax movl %eax, 0x64(%rsp) movl 0x64(%rsp), %eax addq $0x68, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/common/obu_util.c
aom_read_obu_header_and_size
aom_codec_err_t aom_read_obu_header_and_size(const uint8_t *data, size_t bytes_available, int is_annexb, ObuHeader *obu_header, size_t *const payload_size, size_t *const bytes_read) { size_t length_field_size_obu = 0; size_t length_field_size_payload = 0; size_t obu_size = 0; aom_codec_err_t status; if (is_annexb) { // Size field comes before the OBU header, and includes the OBU header status = read_obu_size(data, bytes_available, &obu_size, &length_field_size_obu); if (status != AOM_CODEC_OK) return status; } struct aom_read_bit_buffer rb = { data + length_field_size_obu, data + bytes_available, 0, NULL, NULL }; status = read_obu_header(&rb, is_annexb, obu_header); if (status != AOM_CODEC_OK) return status; if (!obu_header->has_size_field) { assert(is_annexb); // Derive the payload size from the data we've already read if (obu_size < obu_header->size) return AOM_CODEC_CORRUPT_FRAME; *payload_size = obu_size - obu_header->size; } else { // Size field comes after the OBU header, and is just the payload size status = read_obu_size( data + length_field_size_obu + obu_header->size, bytes_available - length_field_size_obu - obu_header->size, payload_size, &length_field_size_payload); if (status != AOM_CODEC_OK) return status; } *bytes_read = length_field_size_obu + obu_header->size + length_field_size_payload; return AOM_CODEC_OK; }
subq $0x88, %rsp movq %rdi, 0x78(%rsp) movq %rsi, 0x70(%rsp) movl %edx, 0x6c(%rsp) movq %rcx, 0x60(%rsp) movq %r8, 0x58(%rsp) movq %r9, 0x50(%rsp) movq $0x0, 0x48(%rsp) movq $0x0, 0x40(%rsp) movq $0x0, 0x38(%rsp) cmpl $0x0, 0x6c(%rsp) je 0xa113c movq 0x78(%rsp), %rdi movq 0x70(%rsp), %rsi leaq 0x38(%rsp), %rdx leaq 0x48(%rsp), %rcx callq 0xa1270 movl %eax, 0x34(%rsp) cmpl $0x0, 0x34(%rsp) je 0xa113a movl 0x34(%rsp), %eax movl %eax, 0x84(%rsp) jmp 0xa1255 jmp 0xa113c movq 0x78(%rsp), %rax addq 0x48(%rsp), %rax movq %rax, 0x8(%rsp) movq 0x78(%rsp), %rax addq 0x70(%rsp), %rax movq %rax, 0x10(%rsp) movl $0x0, 0x18(%rsp) movq $0x0, 0x20(%rsp) movq $0x0, 0x28(%rsp) movl 0x6c(%rsp), %esi movq 0x60(%rsp), %rdx leaq 0x8(%rsp), %rdi callq 0xa0f30 movl %eax, 0x34(%rsp) cmpl $0x0, 0x34(%rsp) je 0xa11a2 movl 0x34(%rsp), %eax movl %eax, 0x84(%rsp) jmp 0xa1255 movq 0x60(%rsp), %rax cmpl $0x0, 0xc(%rax) jne 0xa11e3 movq 0x38(%rsp), %rax movq 0x60(%rsp), %rcx cmpq (%rcx), %rax jae 0xa11cc movl $0x7, 0x84(%rsp) jmp 0xa1255 movq 0x38(%rsp), %rcx movq 0x60(%rsp), %rax subq (%rax), %rcx movq 0x58(%rsp), %rax movq %rcx, (%rax) jmp 0xa1230 movq 0x78(%rsp), %rdi addq 0x48(%rsp), %rdi movq 0x60(%rsp), %rax addq (%rax), %rdi movq 0x70(%rsp), %rsi subq 0x48(%rsp), %rsi movq 0x60(%rsp), %rax subq (%rax), %rsi movq 0x58(%rsp), %rdx leaq 0x40(%rsp), %rcx callq 0xa1270 movl %eax, 0x34(%rsp) cmpl $0x0, 0x34(%rsp) je 0xa122e movl 0x34(%rsp), %eax movl %eax, 0x84(%rsp) jmp 0xa1255 jmp 0xa1230 movq 0x48(%rsp), %rcx movq 0x60(%rsp), %rax addq (%rax), %rcx addq 0x40(%rsp), %rcx movq 0x50(%rsp), %rax movq %rcx, (%rax) movl $0x0, 0x84(%rsp) movl 0x84(%rsp), %eax addq $0x88, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/common/obu_util.c
decoder_init
static aom_codec_err_t decoder_init(aom_codec_ctx_t *ctx) { // This function only allocates space for the aom_codec_alg_priv_t // structure. More memory may be required at the time the stream // information becomes known. if (!ctx->priv) { aom_codec_alg_priv_t *const priv = (aom_codec_alg_priv_t *)aom_calloc(1, sizeof(*priv)); if (priv == NULL) return AOM_CODEC_MEM_ERROR; ctx->priv = (aom_codec_priv_t *)priv; ctx->priv->init_flags = ctx->init_flags; priv->flushed = 0; // TODO(tdaede): this should not be exposed to the API priv->cfg.allow_lowbitdepth = !FORCE_HIGHBITDEPTH_DECODING; if (ctx->config.dec) { priv->cfg = *ctx->config.dec; ctx->config.dec = &priv->cfg; } priv->num_grain_image_frame_buffers = 0; // Turn row_mt on by default. priv->row_mt = 1; // Turn on normal tile coding mode by default. // 0 is for normal tile coding mode, and 1 is for large scale tile coding // mode(refer to lightfield example). priv->tile_mode = 0; priv->decode_tile_row = -1; priv->decode_tile_col = -1; } return AOM_CODEC_OK; }
subq $0x18, %rsp movq %rdi, 0x8(%rsp) movq 0x8(%rsp), %rax cmpq $0x0, 0x30(%rax) jne 0xa13f8 movl $0x1, %edi movl $0x6b30, %esi # imm = 0x6B30 callq 0xa0d80 movq %rax, (%rsp) cmpq $0x0, (%rsp) jne 0xa1330 movl $0x2, 0x14(%rsp) jmp 0xa1400 movq (%rsp), %rcx movq 0x8(%rsp), %rax movq %rcx, 0x30(%rax) movq 0x8(%rsp), %rax movq 0x20(%rax), %rcx movq 0x8(%rsp), %rax movq 0x30(%rax), %rax movq %rcx, 0x8(%rax) movq (%rsp), %rax movl $0x0, 0x1a4(%rax) movq (%rsp), %rax movl $0x1, 0xdc(%rax) movq 0x8(%rsp), %rax cmpq $0x0, 0x28(%rax) je 0xa13b1 movq (%rsp), %rax movq 0x8(%rsp), %rcx movq 0x28(%rcx), %rcx movq (%rcx), %rdx movq %rdx, 0xd0(%rax) movq 0x8(%rcx), %rcx movq %rcx, 0xd8(%rax) movq (%rsp), %rcx addq $0xd0, %rcx movq 0x8(%rsp), %rax movq %rcx, 0x28(%rax) movq (%rsp), %rax movq $0x0, 0x6b00(%rax) movq (%rsp), %rax movl $0x1, 0x1d4(%rax) movq (%rsp), %rax movl $0x0, 0x1cc(%rax) movq (%rsp), %rax movl $0xffffffff, 0x1c4(%rax) # imm = 0xFFFFFFFF movq (%rsp), %rax movl $0xffffffff, 0x1c8(%rax) # imm = 0xFFFFFFFF movl $0x0, 0x14(%rsp) movl 0x14(%rsp), %eax addq $0x18, %rsp retq nopl (%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
decoder_decode
static aom_codec_err_t decoder_decode(aom_codec_alg_priv_t *ctx, const uint8_t *data, size_t data_sz, void *user_priv) { aom_codec_err_t res = AOM_CODEC_OK; #if CONFIG_INSPECTION if (user_priv != 0) { return decoder_inspect(ctx, data, data_sz, user_priv); } #endif release_pending_output_frames(ctx); /* Sanity checks */ /* NULL data ptr allowed if data_sz is 0 too */ if (data == NULL && data_sz == 0) { ctx->flushed = 1; return AOM_CODEC_OK; } if (data == NULL || data_sz == 0) return AOM_CODEC_INVALID_PARAM; // Reset flushed when receiving a valid frame. ctx->flushed = 0; // Initialize the decoder worker on the first frame. if (ctx->frame_worker == NULL) { res = init_decoder(ctx); if (res != AOM_CODEC_OK) return res; } const uint8_t *data_start = data; const uint8_t *data_end = data + data_sz; if (ctx->is_annexb) { // read the size of this temporal unit size_t length_of_size; uint64_t temporal_unit_size; if (aom_uleb_decode(data_start, data_sz, &temporal_unit_size, &length_of_size) != 0) { return AOM_CODEC_CORRUPT_FRAME; } data_start += length_of_size; if (temporal_unit_size > (size_t)(data_end - data_start)) return AOM_CODEC_CORRUPT_FRAME; data_end = data_start + temporal_unit_size; } // Decode in serial mode. while (data_start < data_end) { uint64_t frame_size; if (ctx->is_annexb) { // read the size of this frame unit size_t length_of_size; if (aom_uleb_decode(data_start, (size_t)(data_end - data_start), &frame_size, &length_of_size) != 0) { return AOM_CODEC_CORRUPT_FRAME; } data_start += length_of_size; if (frame_size > (size_t)(data_end - data_start)) return AOM_CODEC_CORRUPT_FRAME; } else { frame_size = (uint64_t)(data_end - data_start); } res = decode_one(ctx, &data_start, (size_t)frame_size, user_priv); if (res != AOM_CODEC_OK) return res; // Allow extra zero bytes after the frame end while (data_start < data_end) { const uint8_t marker = data_start[0]; if (marker) break; ++data_start; } } return res; }
subq $0x68, %rsp movq %rdi, 0x58(%rsp) movq %rsi, 0x50(%rsp) movq %rdx, 0x48(%rsp) movq %rcx, 0x40(%rsp) movl $0x0, 0x3c(%rsp) movq 0x58(%rsp), %rdi callq 0xa5aa0 cmpq $0x0, 0x50(%rsp) jne 0xa16c6 cmpq $0x0, 0x48(%rsp) jne 0xa16c6 movq 0x58(%rsp), %rax movl $0x1, 0x1a4(%rax) movl $0x0, 0x64(%rsp) jmp 0xa18c0 cmpq $0x0, 0x50(%rsp) je 0xa16d6 cmpq $0x0, 0x48(%rsp) jne 0xa16e3 movl $0x8, 0x64(%rsp) jmp 0xa18c0 movq 0x58(%rsp), %rax movl $0x0, 0x1a4(%rax) movq 0x58(%rsp), %rax cmpq $0x0, 0x69f0(%rax) jne 0xa1725 movq 0x58(%rsp), %rdi callq 0xa5c30 movl %eax, 0x3c(%rsp) cmpl $0x0, 0x3c(%rsp) je 0xa1723 movl 0x3c(%rsp), %eax movl %eax, 0x64(%rsp) jmp 0xa18c0 jmp 0xa1725 movq 0x50(%rsp), %rax movq %rax, 0x30(%rsp) movq 0x50(%rsp), %rax addq 0x48(%rsp), %rax movq %rax, 0x28(%rsp) movq 0x58(%rsp), %rax cmpl $0x0, 0x69e0(%rax) je 0xa17b9 movq 0x30(%rsp), %rdi movq 0x48(%rsp), %rsi leaq 0x18(%rsp), %rdx leaq 0x20(%rsp), %rcx callq 0x266dc0 cmpl $0x0, %eax je 0xa1777 movl $0x7, 0x64(%rsp) jmp 0xa18c0 movq 0x20(%rsp), %rax addq 0x30(%rsp), %rax movq %rax, 0x30(%rsp) movq 0x18(%rsp), %rax movq 0x28(%rsp), %rcx movq 0x30(%rsp), %rdx subq %rdx, %rcx cmpq %rcx, %rax jbe 0xa17aa movl $0x7, 0x64(%rsp) jmp 0xa18c0 movq 0x30(%rsp), %rax addq 0x18(%rsp), %rax movq %rax, 0x28(%rsp) jmp 0xa17bb movq 0x30(%rsp), %rax cmpq 0x28(%rsp), %rax jae 0xa18b8 movq 0x58(%rsp), %rax cmpl $0x0, 0x69e0(%rax) je 0xa1841 movq 0x30(%rsp), %rdi movq 0x28(%rsp), %rsi movq 0x30(%rsp), %rax subq %rax, %rsi leaq 0x10(%rsp), %rdx leaq 0x8(%rsp), %rcx callq 0x266dc0 cmpl $0x0, %eax je 0xa180c movl $0x7, 0x64(%rsp) jmp 0xa18c0 movq 0x8(%rsp), %rax addq 0x30(%rsp), %rax movq %rax, 0x30(%rsp) movq 0x10(%rsp), %rax movq 0x28(%rsp), %rcx movq 0x30(%rsp), %rdx subq %rdx, %rcx cmpq %rcx, %rax jbe 0xa183f movl $0x7, 0x64(%rsp) jmp 0xa18c0 jmp 0xa1853 movq 0x28(%rsp), %rax movq 0x30(%rsp), %rcx subq %rcx, %rax movq %rax, 0x10(%rsp) movq 0x58(%rsp), %rdi movq 0x10(%rsp), %rdx movq 0x40(%rsp), %rcx leaq 0x30(%rsp), %rsi callq 0xa60a0 movl %eax, 0x3c(%rsp) cmpl $0x0, 0x3c(%rsp) je 0xa1881 movl 0x3c(%rsp), %eax movl %eax, 0x64(%rsp) jmp 0xa18c0 jmp 0xa1883 movq 0x30(%rsp), %rax cmpq 0x28(%rsp), %rax jae 0xa18b3 movq 0x30(%rsp), %rax movb (%rax), %al movb %al, 0x7(%rsp) cmpb $0x0, 0x7(%rsp) je 0xa18a3 jmp 0xa18b3 movq 0x30(%rsp), %rax addq $0x1, %rax movq %rax, 0x30(%rsp) jmp 0xa1883 jmp 0xa17bb movl 0x3c(%rsp), %eax movl %eax, 0x64(%rsp) movl 0x64(%rsp), %eax addq $0x68, %rsp retq nopl (%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
decoder_set_fb_fn
static aom_codec_err_t decoder_set_fb_fn( aom_codec_alg_priv_t *ctx, aom_get_frame_buffer_cb_fn_t cb_get, aom_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) { if (cb_get == NULL || cb_release == NULL) { return AOM_CODEC_INVALID_PARAM; } if (ctx->frame_worker != NULL) { // If the decoder has already been initialized, do not accept changes to // the frame buffer functions. return AOM_CODEC_ERROR; } ctx->get_ext_fb_cb = cb_get; ctx->release_ext_fb_cb = cb_release; ctx->ext_priv = cb_priv; return AOM_CODEC_OK; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq %rdx, -0x20(%rsp) movq %rcx, -0x28(%rsp) cmpq $0x0, -0x18(%rsp) je 0xa20e4 cmpq $0x0, -0x20(%rsp) jne 0xa20ee movl $0x8, -0x4(%rsp) jmp 0xa2142 movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) je 0xa2107 movl $0x1, -0x4(%rsp) jmp 0xa2142 movq -0x18(%rsp), %rcx movq -0x10(%rsp), %rax movq %rcx, 0x6b20(%rax) movq -0x20(%rsp), %rcx movq -0x10(%rsp), %rax movq %rcx, 0x6b28(%rax) movq -0x28(%rsp), %rcx movq -0x10(%rsp), %rax movq %rcx, 0x6b18(%rax) movl $0x0, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_copy_reference
static aom_codec_err_t ctrl_copy_reference(aom_codec_alg_priv_t *ctx, va_list args) { const av1_ref_frame_t *const frame = va_arg(args, av1_ref_frame_t *); if (frame) { YV12_BUFFER_CONFIG sd; AVxWorker *const worker = ctx->frame_worker; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; image2yuvconfig(&frame->img, &sd); return av1_copy_reference_dec(frame_worker_data->pbi, frame->idx, &sd); } else { return AOM_CODEC_INVALID_PARAM; } }
subq $0x118, %rsp # imm = 0x118 movq %rdi, 0x108(%rsp) movq %rsi, 0x100(%rsp) movq 0x100(%rsp), %rax movq %rax, 0x8(%rsp) movl (%rax), %eax movl %eax, 0x14(%rsp) cmpl $0x28, %eax ja 0xa250a movq 0x8(%rsp), %rcx movl 0x14(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, (%rsp) jmp 0xa2522 movq 0x8(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, (%rsp) movq (%rsp), %rax movq (%rax), %rax movq %rax, 0xf8(%rsp) cmpq $0x0, 0xf8(%rsp) je 0xa2599 movq 0x108(%rsp), %rax movq 0x69f0(%rax), %rax movq %rax, 0x20(%rsp) movq 0x20(%rsp), %rax movq 0x20(%rax), %rax movq %rax, 0x18(%rsp) movq 0xf8(%rsp), %rdi addq $0x8, %rdi leaq 0x28(%rsp), %rsi callq 0xa4ab0 movq 0x18(%rsp), %rax movq (%rax), %rdi movq 0xf8(%rsp), %rax movl (%rax), %esi leaq 0x28(%rsp), %rdx callq 0xc7420 movl %eax, 0x114(%rsp) jmp 0xa25a4 movl $0x8, 0x114(%rsp) movl 0x114(%rsp), %eax addq $0x118, %rsp # imm = 0x118 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_set_byte_alignment
static aom_codec_err_t ctrl_set_byte_alignment(aom_codec_alg_priv_t *ctx, va_list args) { const int legacy_byte_alignment = 0; const int min_byte_alignment = 32; const int max_byte_alignment = 1024; const int byte_alignment = va_arg(args, int); if (byte_alignment != legacy_byte_alignment && (byte_alignment < min_byte_alignment || byte_alignment > max_byte_alignment || (byte_alignment & (byte_alignment - 1)) != 0)) return AOM_CODEC_INVALID_PARAM; ctx->byte_alignment = byte_alignment; if (ctx->frame_worker) { AVxWorker *const worker = ctx->frame_worker; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; frame_worker_data->pbi->common.features.byte_alignment = byte_alignment; } return AOM_CODEC_OK; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movl $0x0, -0x1c(%rsp) movl $0x20, -0x20(%rsp) movl $0x400, -0x24(%rsp) # imm = 0x400 movq -0x18(%rsp), %rax movq %rax, -0x48(%rsp) movl (%rax), %eax movl %eax, -0x3c(%rsp) cmpl $0x28, %eax ja 0xa27a3 movq -0x48(%rsp), %rcx movl -0x3c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x50(%rsp) jmp 0xa27bc movq -0x48(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x50(%rsp) movq -0x50(%rsp), %rax movl (%rax), %eax movl %eax, -0x28(%rsp) cmpl $0x0, -0x28(%rsp) je 0xa27fb cmpl $0x20, -0x28(%rsp) jl 0xa27f1 cmpl $0x400, -0x28(%rsp) # imm = 0x400 jg 0xa27f1 movl -0x28(%rsp), %eax movl -0x28(%rsp), %ecx subl $0x1, %ecx andl %ecx, %eax cmpl $0x0, %eax je 0xa27fb movl $0x8, -0x4(%rsp) jmp 0xa2852 movl -0x28(%rsp), %ecx movq -0x10(%rsp), %rax movl %ecx, 0x1b8(%rax) movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) je 0xa284a movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq %rax, -0x30(%rsp) movq -0x30(%rsp), %rax movq 0x20(%rax), %rax movq %rax, -0x38(%rsp) movl -0x28(%rsp), %ecx movq -0x38(%rsp), %rax movq (%rax), %rax movl %ecx, 0x3d60(%rax) movl $0x0, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_set_skip_film_grain
static aom_codec_err_t ctrl_set_skip_film_grain(aom_codec_alg_priv_t *ctx, va_list args) { ctx->skip_film_grain = va_arg(args, int); if (ctx->frame_worker) { AVxWorker *const worker = ctx->frame_worker; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; frame_worker_data->pbi->skip_film_grain = ctx->skip_film_grain; } return AOM_CODEC_OK; }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq -0x10(%rsp), %rax movq %rax, -0x30(%rsp) movl (%rax), %eax movl %eax, -0x24(%rsp) cmpl $0x28, %eax ja 0xa2deb movq -0x30(%rsp), %rcx movl -0x24(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x38(%rsp) jmp 0xa2e04 movq -0x30(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x38(%rsp) movq -0x38(%rsp), %rax movl (%rax), %ecx movq -0x8(%rsp), %rax movl %ecx, 0x1c0(%rax) movq -0x8(%rsp), %rax cmpq $0x0, 0x69f0(%rax) je 0xa2e5d movq -0x8(%rsp), %rax movq 0x69f0(%rax), %rax movq %rax, -0x18(%rsp) movq -0x18(%rsp), %rax movq 0x20(%rax), %rax movq %rax, -0x20(%rsp) movq -0x8(%rsp), %rax movl 0x1c0(%rax), %ecx movq -0x20(%rsp), %rax movq (%rax), %rax movl %ecx, 0x5f728(%rax) xorl %eax, %eax retq
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_last_quantizer
static aom_codec_err_t ctrl_get_last_quantizer(aom_codec_alg_priv_t *ctx, va_list args) { int *const arg = va_arg(args, int *); if (arg == NULL) return AOM_CODEC_INVALID_PARAM; if (ctx->frame_worker == NULL) return AOM_CODEC_ERROR; *arg = ((FrameWorkerData *)ctx->frame_worker->data1) ->pbi->common.quant_params.base_qindex; return AOM_CODEC_OK; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x30(%rsp) movl (%rax), %eax movl %eax, -0x24(%rsp) cmpl $0x28, %eax ja 0xa2fbb movq -0x30(%rsp), %rcx movl -0x24(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x38(%rsp) jmp 0xa2fd4 movq -0x30(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x38(%rsp) movq -0x38(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) jne 0xa2ff3 movl $0x8, -0x4(%rsp) jmp 0xa3034 movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) jne 0xa300c movl $0x1, -0x4(%rsp) jmp 0xa3034 movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq 0x20(%rax), %rax movq (%rax), %rax movl 0x3dc8(%rax), %ecx movq -0x20(%rsp), %rax movl %ecx, (%rax) movl $0x0, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopl (%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_bit_depth
static aom_codec_err_t ctrl_get_bit_depth(aom_codec_alg_priv_t *ctx, va_list args) { unsigned int *const bit_depth = va_arg(args, unsigned int *); AVxWorker *const worker = ctx->frame_worker; if (bit_depth) { if (worker) { FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; const AV1_COMMON *const cm = &frame_worker_data->pbi->common; *bit_depth = cm->seq_params->bit_depth; return AOM_CODEC_OK; } else { return AOM_CODEC_ERROR; } } return AOM_CODEC_INVALID_PARAM; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x48(%rsp) movl (%rax), %eax movl %eax, -0x3c(%rsp) cmpl $0x28, %eax ja 0xa314b movq -0x48(%rsp), %rcx movl -0x3c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x50(%rsp) jmp 0xa3164 movq -0x48(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x50(%rsp) movq -0x50(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq %rax, -0x28(%rsp) cmpq $0x0, -0x20(%rsp) je 0xa31dd cmpq $0x0, -0x28(%rsp) je 0xa31d3 movq -0x28(%rsp), %rax movq 0x20(%rax), %rax movq %rax, -0x30(%rsp) movq -0x30(%rsp), %rax movq (%rax), %rax addq $0x3b60, %rax # imm = 0x3B60 movq %rax, -0x38(%rsp) movq -0x38(%rsp), %rax movq 0x6088(%rax), %rax movl 0x48(%rax), %ecx movq -0x20(%rsp), %rax movl %ecx, (%rax) movl $0x0, -0x4(%rsp) jmp 0xa31e5 movl $0x1, -0x4(%rsp) jmp 0xa31e5 movl $0x8, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_tile_size
static aom_codec_err_t ctrl_get_tile_size(aom_codec_alg_priv_t *ctx, va_list args) { unsigned int *const tile_size = va_arg(args, unsigned int *); AVxWorker *const worker = ctx->frame_worker; if (tile_size) { if (worker) { FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; const AV1_COMMON *const cm = &frame_worker_data->pbi->common; int tile_width, tile_height; if (!av1_get_uniform_tile_size(cm, &tile_width, &tile_height)) { return AOM_CODEC_CORRUPT_FRAME; } *tile_size = ((tile_width * MI_SIZE) << 16) + tile_height * MI_SIZE; return AOM_CODEC_OK; } else { return AOM_CODEC_ERROR; } } return AOM_CODEC_INVALID_PARAM; }
subq $0x58, %rsp movq %rdi, 0x48(%rsp) movq %rsi, 0x40(%rsp) movq 0x40(%rsp), %rax movq %rax, 0x8(%rsp) movl (%rax), %eax movl %eax, 0x14(%rsp) cmpl $0x28, %eax ja 0xa333e movq 0x8(%rsp), %rcx movl 0x14(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, (%rsp) jmp 0xa3356 movq 0x8(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, (%rsp) movq (%rsp), %rax movq (%rax), %rax movq %rax, 0x38(%rsp) movq 0x48(%rsp), %rax movq 0x69f0(%rax), %rax movq %rax, 0x30(%rsp) cmpq $0x0, 0x38(%rsp) je 0xa33f4 cmpq $0x0, 0x30(%rsp) je 0xa33ea movq 0x30(%rsp), %rax movq 0x20(%rax), %rax movq %rax, 0x28(%rsp) movq 0x28(%rsp), %rax movq (%rax), %rax addq $0x3b60, %rax # imm = 0x3B60 movq %rax, 0x20(%rsp) movq 0x20(%rsp), %rdi leaq 0x1c(%rsp), %rsi leaq 0x18(%rsp), %rdx callq 0x5f5510 testb $0x1, %al jne 0xa33c6 movl $0x7, 0x54(%rsp) jmp 0xa33fc movl 0x1c(%rsp), %ecx shll $0x2, %ecx shll $0x10, %ecx movl 0x18(%rsp), %eax shll $0x2, %eax addl %eax, %ecx movq 0x38(%rsp), %rax movl %ecx, (%rax) movl $0x0, 0x54(%rsp) jmp 0xa33fc movl $0x1, 0x54(%rsp) jmp 0xa33fc movl $0x8, 0x54(%rsp) movl 0x54(%rsp), %eax addq $0x58, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_render_size
static aom_codec_err_t ctrl_get_render_size(aom_codec_alg_priv_t *ctx, va_list args) { int *const render_size = va_arg(args, int *); if (render_size) { if (ctx->frame_worker) { AVxWorker *const worker = ctx->frame_worker; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; const AV1_COMMON *const cm = &frame_worker_data->pbi->common; render_size[0] = cm->render_width; render_size[1] = cm->render_height; return AOM_CODEC_OK; } else { return AOM_CODEC_ERROR; } } return AOM_CODEC_INVALID_PARAM; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x48(%rsp) movl (%rax), %eax movl %eax, -0x3c(%rsp) cmpl $0x28, %eax ja 0xa351b movq -0x48(%rsp), %rcx movl -0x3c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x50(%rsp) jmp 0xa3534 movq -0x48(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x50(%rsp) movq -0x50(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) je 0xa35bd movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) je 0xa35b3 movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq %rax, -0x28(%rsp) movq -0x28(%rsp), %rax movq 0x20(%rax), %rax movq %rax, -0x30(%rsp) movq -0x30(%rsp), %rax movq (%rax), %rax addq $0x3b60, %rax # imm = 0x3B60 movq %rax, -0x38(%rsp) movq -0x38(%rsp), %rax movl 0x40(%rax), %ecx movq -0x20(%rsp), %rax movl %ecx, (%rax) movq -0x38(%rsp), %rax movl 0x44(%rax), %ecx movq -0x20(%rsp), %rax movl %ecx, 0x4(%rax) movl $0x0, -0x4(%rsp) jmp 0xa35c5 movl $0x1, -0x4(%rsp) jmp 0xa35c5 movl $0x8, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_accounting
static aom_codec_err_t ctrl_get_accounting(aom_codec_alg_priv_t *ctx, va_list args) { #if !CONFIG_ACCOUNTING (void)ctx; (void)args; return AOM_CODEC_INCAPABLE; #else Accounting **acct = va_arg(args, Accounting **); if (acct) { if (ctx->frame_worker) { AVxWorker *const worker = ctx->frame_worker; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; AV1Decoder *pbi = frame_worker_data->pbi; *acct = &pbi->accounting; return AOM_CODEC_OK; } else { return AOM_CODEC_ERROR; } } return AOM_CODEC_INVALID_PARAM; #endif }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movl $0x4, %eax retq
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_reference
static aom_codec_err_t ctrl_get_reference(aom_codec_alg_priv_t *ctx, va_list args) { av1_ref_frame_t *data = va_arg(args, av1_ref_frame_t *); if (data) { YV12_BUFFER_CONFIG *fb; AVxWorker *const worker = ctx->frame_worker; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; fb = get_ref_frame(&frame_worker_data->pbi->common, data->idx); if (fb == NULL) return AOM_CODEC_ERROR; yuvconfig2image(&data->img, fb, NULL); return AOM_CODEC_OK; } else { return AOM_CODEC_INVALID_PARAM; } }
subq $0x58, %rsp movq %rdi, 0x48(%rsp) movq %rsi, 0x40(%rsp) movq 0x40(%rsp), %rax movq %rax, 0x10(%rsp) movl (%rax), %eax movl %eax, 0x1c(%rsp) cmpl $0x28, %eax ja 0xa393f movq 0x10(%rsp), %rcx movl 0x1c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, 0x8(%rsp) jmp 0xa3958 movq 0x10(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, 0x8(%rsp) movq 0x8(%rsp), %rax movq (%rax), %rax movq %rax, 0x38(%rsp) cmpq $0x0, 0x38(%rsp) je 0xa39df movq 0x48(%rsp), %rax movq 0x69f0(%rax), %rax movq %rax, 0x28(%rsp) movq 0x28(%rsp), %rax movq 0x20(%rax), %rax movq %rax, 0x20(%rsp) movq 0x20(%rsp), %rax movq (%rax), %rdi addq $0x3b60, %rdi # imm = 0x3B60 movq 0x38(%rsp), %rax movl (%rax), %esi callq 0xa5120 movq %rax, 0x30(%rsp) cmpq $0x0, 0x30(%rsp) jne 0xa39be movl $0x1, 0x54(%rsp) jmp 0xa39e7 movq 0x38(%rsp), %rdi addq $0x8, %rdi movq 0x30(%rsp), %rsi xorl %eax, %eax movl %eax, %edx callq 0xa4e10 movl $0x0, 0x54(%rsp) jmp 0xa39e7 movl $0x8, 0x54(%rsp) movl 0x54(%rsp), %eax addq $0x58, %rsp retq
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_frame_header_info
static aom_codec_err_t ctrl_get_frame_header_info(aom_codec_alg_priv_t *ctx, va_list args) { aom_tile_data *const frame_header_info = va_arg(args, aom_tile_data *); if (frame_header_info) { if (ctx->frame_worker) { AVxWorker *const worker = ctx->frame_worker; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; const AV1Decoder *pbi = frame_worker_data->pbi; frame_header_info->coded_tile_data_size = pbi->obu_size_hdr.size; frame_header_info->coded_tile_data = pbi->obu_size_hdr.data; frame_header_info->extra_size = pbi->frame_header_size; return AOM_CODEC_OK; } else { return AOM_CODEC_ERROR; } } return AOM_CODEC_INVALID_PARAM; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x48(%rsp) movl (%rax), %eax movl %eax, -0x3c(%rsp) cmpl $0x28, %eax ja 0xa3a2b movq -0x48(%rsp), %rcx movl -0x3c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x50(%rsp) jmp 0xa3a44 movq -0x48(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x50(%rsp) movq -0x50(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) je 0xa3aea movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) je 0xa3ae0 movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq %rax, -0x28(%rsp) movq -0x28(%rsp), %rax movq 0x20(%rax), %rax movq %rax, -0x30(%rsp) movq -0x30(%rsp), %rax movq (%rax), %rax movq %rax, -0x38(%rsp) movq -0x38(%rsp), %rax movq 0x58dd8(%rax), %rcx movq -0x20(%rsp), %rax movq %rcx, (%rax) movq -0x38(%rsp), %rax movq 0x58dd0(%rax), %rcx movq -0x20(%rsp), %rax movq %rcx, 0x8(%rax) movq -0x38(%rsp), %rax movq 0x58dc8(%rax), %rcx movq -0x20(%rsp), %rax movq %rcx, 0x10(%rax) movl $0x0, -0x4(%rsp) jmp 0xa3af2 movl $0x1, -0x4(%rsp) jmp 0xa3af2 movl $0x8, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_tile_data
static aom_codec_err_t ctrl_get_tile_data(aom_codec_alg_priv_t *ctx, va_list args) { aom_tile_data *const tile_data = va_arg(args, aom_tile_data *); if (tile_data) { if (ctx->frame_worker) { AVxWorker *const worker = ctx->frame_worker; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; const AV1Decoder *pbi = frame_worker_data->pbi; tile_data->coded_tile_data_size = pbi->tile_buffers[pbi->dec_tile_row][pbi->dec_tile_col].size; tile_data->coded_tile_data = pbi->tile_buffers[pbi->dec_tile_row][pbi->dec_tile_col].data; return AOM_CODEC_OK; } else { return AOM_CODEC_ERROR; } } return AOM_CODEC_INVALID_PARAM; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x48(%rsp) movl (%rax), %eax movl %eax, -0x3c(%rsp) cmpl $0x28, %eax ja 0xa3b3b movq -0x48(%rsp), %rcx movl -0x3c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x50(%rsp) jmp 0xa3b54 movq -0x48(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x50(%rsp) movq -0x50(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) je 0xa3c3a movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) je 0xa3c30 movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq %rax, -0x28(%rsp) movq -0x28(%rsp), %rax movq 0x20(%rax), %rax movq %rax, -0x30(%rsp) movq -0x30(%rsp), %rax movq (%rax), %rax movq %rax, -0x38(%rsp) movq -0x38(%rsp), %rax addq $0x48d30, %rax # imm = 0x48D30 movq -0x38(%rsp), %rcx movslq 0x58da0(%rcx), %rcx shlq $0xa, %rcx addq %rcx, %rax movq -0x38(%rsp), %rcx movslq 0x58da4(%rcx), %rcx shlq $0x4, %rcx addq %rcx, %rax movq 0x8(%rax), %rcx movq -0x20(%rsp), %rax movq %rcx, (%rax) movq -0x38(%rsp), %rax addq $0x48d30, %rax # imm = 0x48D30 movq -0x38(%rsp), %rcx movslq 0x58da0(%rcx), %rcx shlq $0xa, %rcx addq %rcx, %rax movq -0x38(%rsp), %rcx movslq 0x58da4(%rcx), %rcx shlq $0x4, %rcx addq %rcx, %rax movq (%rax), %rcx movq -0x20(%rsp), %rax movq %rcx, 0x8(%rax) movl $0x0, -0x4(%rsp) jmp 0xa3c42 movl $0x1, -0x4(%rsp) jmp 0xa3c42 movl $0x8, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_fwd_kf_value
static aom_codec_err_t ctrl_get_fwd_kf_value(aom_codec_alg_priv_t *ctx, va_list args) { int *const arg = va_arg(args, int *); if (arg == NULL) return AOM_CODEC_INVALID_PARAM; if (ctx->frame_worker == NULL) return AOM_CODEC_ERROR; *arg = ((FrameWorkerData *)ctx->frame_worker->data1)->pbi->is_fwd_kf_present; return AOM_CODEC_OK; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x30(%rsp) movl (%rax), %eax movl %eax, -0x24(%rsp) cmpl $0x28, %eax ja 0xa3c8b movq -0x30(%rsp), %rcx movl -0x24(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x38(%rsp) jmp 0xa3ca4 movq -0x30(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x38(%rsp) movq -0x38(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) jne 0xa3cc3 movl $0x8, -0x4(%rsp) jmp 0xa3d04 movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) jne 0xa3cdc movl $0x1, -0x4(%rsp) jmp 0xa3d04 movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq 0x20(%rax), %rax movq (%rax), %rax movl 0x5f750(%rax), %ecx movq -0x20(%rsp), %rax movl %ecx, (%rax) movl $0x0, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopl (%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_altref_present
static aom_codec_err_t ctrl_get_altref_present(aom_codec_alg_priv_t *ctx, va_list args) { int *const arg = va_arg(args, int *); if (arg == NULL) return AOM_CODEC_INVALID_PARAM; if (ctx->frame_worker == NULL) return AOM_CODEC_ERROR; *arg = ((FrameWorkerData *)ctx->frame_worker->data1)->pbi->is_arf_frame_present; return AOM_CODEC_OK; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x30(%rsp) movl (%rax), %eax movl %eax, -0x24(%rsp) cmpl $0x28, %eax ja 0xa3d4b movq -0x30(%rsp), %rcx movl -0x24(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x38(%rsp) jmp 0xa3d64 movq -0x30(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x38(%rsp) movq -0x38(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) jne 0xa3d83 movl $0x8, -0x4(%rsp) jmp 0xa3dc4 movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) jne 0xa3d9c movl $0x1, -0x4(%rsp) jmp 0xa3dc4 movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq 0x20(%rax), %rax movq (%rax), %rax movl 0x5f754(%rax), %ecx movq -0x20(%rsp), %rax movl %ecx, (%rax) movl $0x0, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopl (%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_frame_flags
static aom_codec_err_t ctrl_get_frame_flags(aom_codec_alg_priv_t *ctx, va_list args) { int *const arg = va_arg(args, int *); if (arg == NULL) return AOM_CODEC_INVALID_PARAM; if (ctx->frame_worker == NULL) return AOM_CODEC_ERROR; AV1Decoder *pbi = ((FrameWorkerData *)ctx->frame_worker->data1)->pbi; *arg = 0; switch (pbi->common.current_frame.frame_type) { case KEY_FRAME: *arg |= AOM_FRAME_IS_KEY; *arg |= AOM_FRAME_IS_INTRAONLY; if (!pbi->common.show_frame) { *arg |= AOM_FRAME_IS_DELAYED_RANDOM_ACCESS_POINT; } break; case INTRA_ONLY_FRAME: *arg |= AOM_FRAME_IS_INTRAONLY; break; case S_FRAME: *arg |= AOM_FRAME_IS_SWITCH; break; } if (pbi->common.features.error_resilient_mode) { *arg |= AOM_FRAME_IS_ERROR_RESILIENT; } return AOM_CODEC_OK; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x38(%rsp) movl (%rax), %eax movl %eax, -0x2c(%rsp) cmpl $0x28, %eax ja 0xa3e0b movq -0x38(%rsp), %rcx movl -0x2c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x40(%rsp) jmp 0xa3e24 movq -0x38(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x40(%rsp) movq -0x40(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) jne 0xa3e46 movl $0x8, -0x4(%rsp) jmp 0xa3f24 movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) jne 0xa3e62 movl $0x1, -0x4(%rsp) jmp 0xa3f24 movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq 0x20(%rax), %rax movq (%rax), %rax movq %rax, -0x28(%rsp) movq -0x20(%rsp), %rax movl $0x0, (%rax) movq -0x28(%rsp), %rax movzbl 0x3b60(%rax), %eax movl %eax, -0x44(%rsp) testl %eax, %eax je 0xa3eb1 jmp 0xa3e9b movl -0x44(%rsp), %eax subl $0x2, %eax je 0xa3ee8 jmp 0xa3ea6 movl -0x44(%rsp), %eax subl $0x3, %eax je 0xa3ef6 jmp 0xa3f02 movq -0x20(%rsp), %rax movl (%rax), %ecx orl $0x1, %ecx movl %ecx, (%rax) movq -0x20(%rsp), %rax movl (%rax), %ecx orl $0x10, %ecx movl %ecx, (%rax) movq -0x28(%rsp), %rax cmpl $0x0, 0x3d40(%rax) jne 0xa3ee6 movq -0x20(%rsp), %rax movl (%rax), %ecx orl $0x80, %ecx movl %ecx, (%rax) jmp 0xa3f02 movq -0x20(%rsp), %rax movl (%rax), %ecx orl $0x10, %ecx movl %ecx, (%rax) jmp 0xa3f02 movq -0x20(%rsp), %rax movl (%rax), %ecx orl $0x20, %ecx movl %ecx, (%rax) movq -0x28(%rsp), %rax testb $0x1, 0x3d56(%rax) je 0xa3f1c movq -0x20(%rsp), %rax movl (%rax), %ecx orl $0x40, %ecx movl %ecx, (%rax) movl $0x0, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopl (%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_tile_info
static aom_codec_err_t ctrl_get_tile_info(aom_codec_alg_priv_t *ctx, va_list args) { aom_tile_info *const tile_info = va_arg(args, aom_tile_info *); if (tile_info) { if (ctx->frame_worker) { AVxWorker *const worker = ctx->frame_worker; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; const AV1Decoder *pbi = frame_worker_data->pbi; const CommonTileParams *tiles = &pbi->common.tiles; int tile_rows = tiles->rows; int tile_cols = tiles->cols; if (tiles->uniform_spacing) { tile_info->tile_rows = 1 << tiles->log2_rows; tile_info->tile_columns = 1 << tiles->log2_cols; } else { tile_info->tile_rows = tile_rows; tile_info->tile_columns = tile_cols; } for (int tile_col = 1; tile_col <= tile_cols; tile_col++) { tile_info->tile_widths[tile_col - 1] = tiles->col_start_sb[tile_col] - tiles->col_start_sb[tile_col - 1]; } for (int tile_row = 1; tile_row <= tile_rows; tile_row++) { tile_info->tile_heights[tile_row - 1] = tiles->row_start_sb[tile_row] - tiles->row_start_sb[tile_row - 1]; } tile_info->num_tile_groups = pbi->num_tile_groups; return AOM_CODEC_OK; } else { return AOM_CODEC_ERROR; } } return AOM_CODEC_INVALID_PARAM; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x60(%rsp) movl (%rax), %eax movl %eax, -0x54(%rsp) cmpl $0x28, %eax ja 0xa3f6b movq -0x60(%rsp), %rcx movl -0x54(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x68(%rsp) jmp 0xa3f84 movq -0x60(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x68(%rsp) movq -0x68(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) je 0xa4137 movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) je 0xa412d movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq %rax, -0x28(%rsp) movq -0x28(%rsp), %rax movq 0x20(%rax), %rax movq %rax, -0x30(%rsp) movq -0x30(%rsp), %rax movq (%rax), %rax movq %rax, -0x38(%rsp) movq -0x38(%rsp), %rax addq $0x3b60, %rax # imm = 0x3B60 addq $0x60a0, %rax # imm = 0x60A0 movq %rax, -0x40(%rsp) movq -0x40(%rsp), %rax movl 0x4(%rax), %eax movl %eax, -0x44(%rsp) movq -0x40(%rsp), %rax movl (%rax), %eax movl %eax, -0x48(%rsp) movq -0x40(%rsp), %rax cmpl $0x0, 0x14(%rax) je 0xa4047 movq -0x40(%rsp), %rax movl 0x1c(%rax), %ecx movl $0x1, %eax shll %cl, %eax movl %eax, %ecx movq -0x20(%rsp), %rax movl %ecx, 0x4(%rax) movq -0x40(%rsp), %rax movl 0x18(%rax), %ecx movl $0x1, %eax shll %cl, %eax movl %eax, %ecx movq -0x20(%rsp), %rax movl %ecx, (%rax) jmp 0xa405e movl -0x44(%rsp), %ecx movq -0x20(%rsp), %rax movl %ecx, 0x4(%rax) movl -0x48(%rsp), %ecx movq -0x20(%rsp), %rax movl %ecx, (%rax) movl $0x1, -0x4c(%rsp) movl -0x4c(%rsp), %eax cmpl -0x48(%rsp), %eax jg 0xa40b1 movq -0x40(%rsp), %rax movslq -0x4c(%rsp), %rcx movl 0x3c(%rax,%rcx,4), %edx movq -0x40(%rsp), %rax movl -0x4c(%rsp), %ecx subl $0x1, %ecx movslq %ecx, %rcx subl 0x3c(%rax,%rcx,4), %edx movq -0x20(%rsp), %rax movl -0x4c(%rsp), %ecx subl $0x1, %ecx movslq %ecx, %rcx movl %edx, 0x8(%rax,%rcx,4) movl -0x4c(%rsp), %eax addl $0x1, %eax movl %eax, -0x4c(%rsp) jmp 0xa4066 movl $0x1, -0x50(%rsp) movl -0x50(%rsp), %eax cmpl -0x44(%rsp), %eax jg 0xa410d movq -0x40(%rsp), %rax movslq -0x50(%rsp), %rcx movl 0x140(%rax,%rcx,4), %edx movq -0x40(%rsp), %rax movl -0x50(%rsp), %ecx subl $0x1, %ecx movslq %ecx, %rcx subl 0x140(%rax,%rcx,4), %edx movq -0x20(%rsp), %rax movl -0x50(%rsp), %ecx subl $0x1, %ecx movslq %ecx, %rcx movl %edx, 0x108(%rax,%rcx,4) movl -0x50(%rsp), %eax addl $0x1, %eax movl %eax, -0x50(%rsp) jmp 0xa40b9 movq -0x38(%rsp), %rax movl 0x5f758(%rax), %ecx movq -0x20(%rsp), %rax movl %ecx, 0x208(%rax) movl $0x0, -0x4(%rsp) jmp 0xa413f movl $0x1, -0x4(%rsp) jmp 0xa413f movl $0x8, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_screen_content_tools_info
static aom_codec_err_t ctrl_get_screen_content_tools_info( aom_codec_alg_priv_t *ctx, va_list args) { aom_screen_content_tools_info *const sc_info = va_arg(args, aom_screen_content_tools_info *); if (sc_info) { if (ctx->frame_worker) { AVxWorker *const worker = ctx->frame_worker; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; const AV1Decoder *pbi = frame_worker_data->pbi; sc_info->allow_screen_content_tools = pbi->common.features.allow_screen_content_tools; sc_info->allow_intrabc = pbi->common.features.allow_intrabc; sc_info->force_integer_mv = (int)pbi->common.features.cur_frame_force_integer_mv; return AOM_CODEC_OK; } else { return AOM_CODEC_ERROR; } } return AOM_CODEC_INVALID_PARAM; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x48(%rsp) movl (%rax), %eax movl %eax, -0x3c(%rsp) cmpl $0x28, %eax ja 0xa418b movq -0x48(%rsp), %rcx movl -0x3c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x50(%rsp) jmp 0xa41a4 movq -0x48(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x50(%rsp) movq -0x50(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) je 0xa4253 movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) je 0xa4249 movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq %rax, -0x28(%rsp) movq -0x28(%rsp), %rax movq 0x20(%rax), %rax movq %rax, -0x30(%rsp) movq -0x30(%rsp), %rax movq (%rax), %rax movq %rax, -0x38(%rsp) movq -0x38(%rsp), %rax movb 0x3d4f(%rax), %al andb $0x1, %al movzbl %al, %ecx movq -0x20(%rsp), %rax movl %ecx, (%rax) movq -0x38(%rsp), %rax movb 0x3d50(%rax), %al andb $0x1, %al movzbl %al, %ecx movq -0x20(%rsp), %rax movl %ecx, 0x4(%rax) movq -0x38(%rsp), %rax movb 0x3d4e(%rax), %al andb $0x1, %al movzbl %al, %ecx movq -0x20(%rsp), %rax movl %ecx, 0x8(%rax) movl $0x0, -0x4(%rsp) jmp 0xa425b movl $0x1, -0x4(%rsp) jmp 0xa425b movl $0x8, -0x4(%rsp) movl -0x4(%rsp), %eax retq
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_still_picture
static aom_codec_err_t ctrl_get_still_picture(aom_codec_alg_priv_t *ctx, va_list args) { aom_still_picture_info *const still_picture_info = va_arg(args, aom_still_picture_info *); if (still_picture_info) { if (ctx->frame_worker) { AVxWorker *const worker = ctx->frame_worker; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; const AV1Decoder *pbi = frame_worker_data->pbi; still_picture_info->is_still_picture = (int)pbi->seq_params.still_picture; still_picture_info->is_reduced_still_picture_hdr = (int)(pbi->seq_params.reduced_still_picture_hdr); return AOM_CODEC_OK; } else { return AOM_CODEC_ERROR; } } return AOM_CODEC_INVALID_PARAM; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x48(%rsp) movl (%rax), %eax movl %eax, -0x3c(%rsp) cmpl $0x28, %eax ja 0xa429b movq -0x48(%rsp), %rcx movl -0x3c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x50(%rsp) jmp 0xa42b4 movq -0x48(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x50(%rsp) movq -0x50(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) je 0xa433f movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) je 0xa4335 movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq %rax, -0x28(%rsp) movq -0x28(%rsp), %rax movq 0x20(%rax), %rax movq %rax, -0x30(%rsp) movq -0x30(%rsp), %rax movq (%rax), %rax movq %rax, -0x38(%rsp) movq -0x38(%rsp), %rax movzbl 0x5f7a1(%rax), %ecx movq -0x20(%rsp), %rax movl %ecx, (%rax) movq -0x38(%rsp), %rax movzbl 0x5f7a2(%rax), %ecx movq -0x20(%rsp), %rax movl %ecx, 0x4(%rax) movl $0x0, -0x4(%rsp) jmp 0xa4347 movl $0x1, -0x4(%rsp) jmp 0xa4347 movl $0x8, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopl (%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_sb_size
static aom_codec_err_t ctrl_get_sb_size(aom_codec_alg_priv_t *ctx, va_list args) { aom_superblock_size_t *const sb_size = va_arg(args, aom_superblock_size_t *); if (sb_size) { if (ctx->frame_worker) { AVxWorker *const worker = ctx->frame_worker; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; const AV1Decoder *pbi = frame_worker_data->pbi; if (pbi->seq_params.sb_size == BLOCK_128X128) { *sb_size = AOM_SUPERBLOCK_SIZE_128X128; } else { *sb_size = AOM_SUPERBLOCK_SIZE_64X64; } return AOM_CODEC_OK; } else { return AOM_CODEC_ERROR; } } return AOM_CODEC_INVALID_PARAM; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x48(%rsp) movl (%rax), %eax movl %eax, -0x3c(%rsp) cmpl $0x28, %eax ja 0xa438b movq -0x48(%rsp), %rcx movl -0x3c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x50(%rsp) jmp 0xa43a4 movq -0x48(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x50(%rsp) movq -0x50(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) je 0xa4431 movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) je 0xa4427 movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq %rax, -0x28(%rsp) movq -0x28(%rsp), %rax movq 0x20(%rax), %rax movq %rax, -0x30(%rsp) movq -0x30(%rsp), %rax movq (%rax), %rax movq %rax, -0x38(%rsp) movq -0x38(%rsp), %rax movzbl 0x5f784(%rax), %eax cmpl $0xf, %eax jne 0xa4412 movq -0x20(%rsp), %rax movl $0x1, (%rax) jmp 0xa441d movq -0x20(%rsp), %rax movl $0x0, (%rax) movl $0x0, -0x4(%rsp) jmp 0xa4439 movl $0x1, -0x4(%rsp) jmp 0xa4439 movl $0x8, -0x4(%rsp) movl -0x4(%rsp), %eax retq nop
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_show_existing_frame_flag
static aom_codec_err_t ctrl_get_show_existing_frame_flag( aom_codec_alg_priv_t *ctx, va_list args) { int *const arg = va_arg(args, int *); if (arg == NULL) return AOM_CODEC_INVALID_PARAM; if (ctx->frame_worker == NULL) return AOM_CODEC_ERROR; *arg = ((FrameWorkerData *)ctx->frame_worker->data1) ->pbi->common.show_existing_frame; return AOM_CODEC_OK; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x30(%rsp) movl (%rax), %eax movl %eax, -0x24(%rsp) cmpl $0x28, %eax ja 0xa447b movq -0x30(%rsp), %rcx movl -0x24(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x38(%rsp) jmp 0xa4494 movq -0x30(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x38(%rsp) movq -0x38(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) jne 0xa44b3 movl $0x8, -0x4(%rsp) jmp 0xa44f4 movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) jne 0xa44cc movl $0x1, -0x4(%rsp) jmp 0xa44f4 movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq 0x20(%rax), %rax movq (%rax), %rax movl 0x3d48(%rax), %ecx movq -0x20(%rsp), %rax movl %ecx, (%rax) movl $0x0, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopl (%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_s_frame_info
static aom_codec_err_t ctrl_get_s_frame_info(aom_codec_alg_priv_t *ctx, va_list args) { aom_s_frame_info *const s_frame_info = va_arg(args, aom_s_frame_info *); if (s_frame_info) { if (ctx->frame_worker) { AVxWorker *const worker = ctx->frame_worker; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; const AV1Decoder *pbi = frame_worker_data->pbi; s_frame_info->is_s_frame = pbi->sframe_info.is_s_frame; s_frame_info->is_s_frame_at_altref = pbi->sframe_info.is_s_frame_at_altref; return AOM_CODEC_OK; } else { return AOM_CODEC_ERROR; } } return AOM_CODEC_INVALID_PARAM; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x48(%rsp) movl (%rax), %eax movl %eax, -0x3c(%rsp) cmpl $0x28, %eax ja 0xa453b movq -0x48(%rsp), %rcx movl -0x3c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x50(%rsp) jmp 0xa4554 movq -0x48(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x50(%rsp) movq -0x50(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) je 0xa45dd movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) je 0xa45d3 movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq %rax, -0x28(%rsp) movq -0x28(%rsp), %rax movq 0x20(%rax), %rax movq %rax, -0x30(%rsp) movq -0x30(%rsp), %rax movq (%rax), %rax movq %rax, -0x38(%rsp) movq -0x38(%rsp), %rax movl 0x5f75c(%rax), %ecx movq -0x20(%rsp), %rax movl %ecx, (%rax) movq -0x38(%rsp), %rax movl 0x5f760(%rax), %ecx movq -0x20(%rsp), %rax movl %ecx, 0x4(%rax) movl $0x0, -0x4(%rsp) jmp 0xa45e5 movl $0x1, -0x4(%rsp) jmp 0xa45e5 movl $0x8, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_show_frame_flag
static aom_codec_err_t ctrl_get_show_frame_flag(aom_codec_alg_priv_t *ctx, va_list args) { int *const arg = va_arg(args, int *); if (arg == NULL) return AOM_CODEC_INVALID_PARAM; if (ctx->frame_worker == NULL) return AOM_CODEC_ERROR; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)ctx->frame_worker->data1; *arg = frame_worker_data->pbi->common.show_frame; return AOM_CODEC_OK; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x38(%rsp) movl (%rax), %eax movl %eax, -0x2c(%rsp) cmpl $0x28, %eax ja 0xa462b movq -0x38(%rsp), %rcx movl -0x2c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x40(%rsp) jmp 0xa4644 movq -0x38(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x40(%rsp) movq -0x40(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) jne 0xa4663 movl $0x8, -0x4(%rsp) jmp 0xa46ae movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) jne 0xa467c movl $0x1, -0x4(%rsp) jmp 0xa46ae movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq 0x20(%rax), %rax movq %rax, -0x28(%rsp) movq -0x28(%rsp), %rax movq (%rax), %rax movl 0x3d40(%rax), %ecx movq -0x20(%rsp), %rax movl %ecx, (%rax) movl $0x0, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_base_q_idx
static aom_codec_err_t ctrl_get_base_q_idx(aom_codec_alg_priv_t *ctx, va_list args) { int *const arg = va_arg(args, int *); if (arg == NULL) return AOM_CODEC_INVALID_PARAM; if (ctx->frame_worker == NULL) return AOM_CODEC_ERROR; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)ctx->frame_worker->data1; *arg = frame_worker_data->pbi->common.quant_params.base_qindex; return AOM_CODEC_OK; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x38(%rsp) movl (%rax), %eax movl %eax, -0x2c(%rsp) cmpl $0x28, %eax ja 0xa46fb movq -0x38(%rsp), %rcx movl -0x2c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x40(%rsp) jmp 0xa4714 movq -0x38(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x40(%rsp) movq -0x40(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) jne 0xa4733 movl $0x8, -0x4(%rsp) jmp 0xa477e movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) jne 0xa474c movl $0x1, -0x4(%rsp) jmp 0xa477e movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq 0x20(%rax), %rax movq %rax, -0x28(%rsp) movq -0x28(%rsp), %rax movq (%rax), %rax movl 0x3dc8(%rax), %ecx movq -0x20(%rsp), %rax movl %ecx, (%rax) movl $0x0, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_order_hint
static aom_codec_err_t ctrl_get_order_hint(aom_codec_alg_priv_t *ctx, va_list args) { unsigned int *const arg = va_arg(args, unsigned int *); if (arg == NULL) return AOM_CODEC_INVALID_PARAM; if (ctx->frame_worker == NULL) return AOM_CODEC_ERROR; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)ctx->frame_worker->data1; *arg = frame_worker_data->pbi->common.current_frame.order_hint; return AOM_CODEC_OK; }
movq %rdi, -0x10(%rsp) movq %rsi, -0x18(%rsp) movq -0x18(%rsp), %rax movq %rax, -0x38(%rsp) movl (%rax), %eax movl %eax, -0x2c(%rsp) cmpl $0x28, %eax ja 0xa47cb movq -0x38(%rsp), %rcx movl -0x2c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, -0x40(%rsp) jmp 0xa47e4 movq -0x38(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, -0x40(%rsp) movq -0x40(%rsp), %rax movq (%rax), %rax movq %rax, -0x20(%rsp) cmpq $0x0, -0x20(%rsp) jne 0xa4803 movl $0x8, -0x4(%rsp) jmp 0xa484e movq -0x10(%rsp), %rax cmpq $0x0, 0x69f0(%rax) jne 0xa481c movl $0x1, -0x4(%rsp) jmp 0xa484e movq -0x10(%rsp), %rax movq 0x69f0(%rax), %rax movq 0x20(%rax), %rax movq %rax, -0x28(%rsp) movq -0x28(%rsp), %rax movq (%rax), %rax movl 0x3b64(%rax), %ecx movq -0x20(%rsp), %rax movl %ecx, (%rax) movl $0x0, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
ctrl_get_mi_info
static aom_codec_err_t ctrl_get_mi_info(aom_codec_alg_priv_t *ctx, va_list args) { int mi_row = va_arg(args, int); int mi_col = va_arg(args, int); MB_MODE_INFO *mi = va_arg(args, MB_MODE_INFO *); if (mi == NULL) return AOM_CODEC_INVALID_PARAM; if (ctx->frame_worker == NULL) return AOM_CODEC_ERROR; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)ctx->frame_worker->data1; if (frame_worker_data == NULL) return AOM_CODEC_ERROR; AV1_COMMON *cm = &frame_worker_data->pbi->common; const int mi_rows = cm->mi_params.mi_rows; const int mi_cols = cm->mi_params.mi_cols; const int mi_stride = cm->mi_params.mi_stride; const int offset = mi_row * mi_stride + mi_col; if (mi_row < 0 || mi_row >= mi_rows || mi_col < 0 || mi_col >= mi_cols) { return AOM_CODEC_INVALID_PARAM; } memcpy(mi, cm->mi_params.mi_grid_base[offset], sizeof(*mi)); return AOM_CODEC_OK; }
subq $0x98, %rsp movq %rdi, 0x88(%rsp) movq %rsi, 0x80(%rsp) movq 0x80(%rsp), %rax movq %rax, 0x40(%rsp) movl (%rax), %eax movl %eax, 0x4c(%rsp) cmpl $0x28, %eax ja 0xa48ab movq 0x40(%rsp), %rcx movl 0x4c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, 0x38(%rsp) jmp 0xa48c4 movq 0x40(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, 0x38(%rsp) movq 0x38(%rsp), %rax movl (%rax), %eax movl %eax, 0x7c(%rsp) movq 0x80(%rsp), %rax movq %rax, 0x28(%rsp) movl (%rax), %eax movl %eax, 0x34(%rsp) cmpl $0x28, %eax ja 0xa4903 movq 0x28(%rsp), %rcx movl 0x34(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, 0x20(%rsp) jmp 0xa491c movq 0x28(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, 0x20(%rsp) movq 0x20(%rsp), %rax movl (%rax), %eax movl %eax, 0x78(%rsp) movq 0x80(%rsp), %rax movq %rax, 0x10(%rsp) movl (%rax), %eax movl %eax, 0x1c(%rsp) cmpl $0x28, %eax ja 0xa495b movq 0x10(%rsp), %rcx movl 0x1c(%rsp), %edx movslq %edx, %rax addq 0x10(%rcx), %rax addl $0x8, %edx movl %edx, (%rcx) movq %rax, 0x8(%rsp) jmp 0xa4974 movq 0x10(%rsp), %rcx movq 0x8(%rcx), %rax movq %rax, %rdx addq $0x8, %rdx movq %rdx, 0x8(%rcx) movq %rax, 0x8(%rsp) movq 0x8(%rsp), %rax movq (%rax), %rax movq %rax, 0x70(%rsp) cmpq $0x0, 0x70(%rsp) jne 0xa4999 movl $0x8, 0x94(%rsp) jmp 0xa4a9a movq 0x88(%rsp), %rax cmpq $0x0, 0x69f0(%rax) jne 0xa49bb movl $0x1, 0x94(%rsp) jmp 0xa4a9a movq 0x88(%rsp), %rax movq 0x69f0(%rax), %rax movq 0x20(%rax), %rax movq %rax, 0x68(%rsp) cmpq $0x0, 0x68(%rsp) jne 0xa49eb movl $0x1, 0x94(%rsp) jmp 0xa4a9a movq 0x68(%rsp), %rax movq (%rax), %rax addq $0x3b60, %rax # imm = 0x3B60 movq %rax, 0x60(%rsp) movq 0x60(%rsp), %rax movl 0x214(%rax), %eax movl %eax, 0x5c(%rsp) movq 0x60(%rsp), %rax movl 0x218(%rax), %eax movl %eax, 0x58(%rsp) movq 0x60(%rsp), %rax movl 0x244(%rax), %eax movl %eax, 0x54(%rsp) movl 0x7c(%rsp), %eax imull 0x54(%rsp), %eax addl 0x78(%rsp), %eax movl %eax, 0x50(%rsp) cmpl $0x0, 0x7c(%rsp) jl 0xa4a5e movl 0x7c(%rsp), %eax cmpl 0x5c(%rsp), %eax jge 0xa4a5e cmpl $0x0, 0x78(%rsp) jl 0xa4a5e movl 0x78(%rsp), %eax cmpl 0x58(%rsp), %eax jl 0xa4a6b movl $0x8, 0x94(%rsp) jmp 0xa4a9a movq 0x70(%rsp), %rdi movq 0x60(%rsp), %rax movq 0x238(%rax), %rax movslq 0x50(%rsp), %rcx movq (%rax,%rcx,8), %rsi movl $0xb0, %edx callq 0x183b0 movl $0x0, 0x94(%rsp) movl 0x94(%rsp), %eax addq $0x98, %rsp retq nopl (%rax)
/m-ab-s[P]aom/av1/av1_dx_iface.c
set_mi_row_col
static inline void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile, int mi_row, int bh, int mi_col, int bw, int mi_rows, int mi_cols) { xd->mb_to_top_edge = -GET_MV_SUBPEL(mi_row * MI_SIZE); xd->mb_to_bottom_edge = GET_MV_SUBPEL((mi_rows - bh - mi_row) * MI_SIZE); xd->mb_to_left_edge = -GET_MV_SUBPEL((mi_col * MI_SIZE)); xd->mb_to_right_edge = GET_MV_SUBPEL((mi_cols - bw - mi_col) * MI_SIZE); xd->mi_row = mi_row; xd->mi_col = mi_col; // Are edges available for intra prediction? xd->up_available = (mi_row > tile->mi_row_start); const int ss_x = xd->plane[1].subsampling_x; const int ss_y = xd->plane[1].subsampling_y; xd->left_available = (mi_col > tile->mi_col_start); xd->chroma_up_available = xd->up_available; xd->chroma_left_available = xd->left_available; if (ss_x && bw < mi_size_wide[BLOCK_8X8]) xd->chroma_left_available = (mi_col - 1) > tile->mi_col_start; if (ss_y && bh < mi_size_high[BLOCK_8X8]) xd->chroma_up_available = (mi_row - 1) > tile->mi_row_start; if (xd->up_available) { xd->above_mbmi = xd->mi[-xd->mi_stride]; } else { xd->above_mbmi = NULL; } if (xd->left_available) { xd->left_mbmi = xd->mi[-1]; } else { xd->left_mbmi = NULL; } const int chroma_ref = ((mi_row & 0x01) || !(bh & 0x01) || !ss_y) && ((mi_col & 0x01) || !(bw & 0x01) || !ss_x); xd->is_chroma_ref = chroma_ref; if (chroma_ref) { // To help calculate the "above" and "left" chroma blocks, note that the // current block may cover multiple luma blocks (e.g., if partitioned into // 4x4 luma blocks). // First, find the top-left-most luma block covered by this chroma block MB_MODE_INFO **base_mi = &xd->mi[-(mi_row & ss_y) * xd->mi_stride - (mi_col & ss_x)]; // Then, we consider the luma region covered by the left or above 4x4 chroma // prediction. We want to point to the chroma reference block in that // region, which is the bottom-right-most mi unit. // This leads to the following offsets: MB_MODE_INFO *chroma_above_mi = xd->chroma_up_available ? base_mi[-xd->mi_stride + ss_x] : NULL; xd->chroma_above_mbmi = chroma_above_mi; MB_MODE_INFO *chroma_left_mi = xd->chroma_left_available ? base_mi[ss_y * xd->mi_stride - 1] : NULL; xd->chroma_left_mbmi = chroma_left_mi; } xd->height = bh; xd->width = bw; xd->is_last_vertical_rect = 0; if (xd->width < xd->height) { if (!((mi_col + xd->width) & (xd->height - 1))) { xd->is_last_vertical_rect = 1; } } xd->is_first_horizontal_rect = 0; if (xd->width > xd->height) if (!(mi_row & (xd->width - 1))) xd->is_first_horizontal_rect = 1; }
movl 0x10(%rsp), %eax movl 0x8(%rsp), %eax movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movl %edx, -0x14(%rsp) movl %ecx, -0x18(%rsp) movl %r8d, -0x1c(%rsp) movl %r9d, -0x20(%rsp) movl -0x14(%rsp), %eax shll $0x2, %eax shll $0x3, %eax xorl %ecx, %ecx subl %eax, %ecx movq -0x8(%rsp), %rax movl %ecx, 0x1efc(%rax) movl 0x8(%rsp), %ecx subl -0x18(%rsp), %ecx subl -0x14(%rsp), %ecx shll $0x2, %ecx shll $0x3, %ecx movq -0x8(%rsp), %rax movl %ecx, 0x1f00(%rax) movl -0x1c(%rsp), %eax shll $0x2, %eax shll $0x3, %eax xorl %ecx, %ecx subl %eax, %ecx movq -0x8(%rsp), %rax movl %ecx, 0x1ef4(%rax) movl 0x10(%rsp), %ecx subl -0x20(%rsp), %ecx subl -0x1c(%rsp), %ecx shll $0x2, %ecx shll $0x3, %ecx movq -0x8(%rsp), %rax movl %ecx, 0x1ef8(%rax) movl -0x14(%rsp), %ecx movq -0x8(%rsp), %rax movl %ecx, (%rax) movl -0x1c(%rsp), %ecx movq -0x8(%rsp), %rax movl %ecx, 0x4(%rax) movl -0x14(%rsp), %eax movq -0x10(%rsp), %rcx cmpl (%rcx), %eax setg %cl movq -0x8(%rsp), %rax andb $0x1, %cl movb %cl, 0x1ec0(%rax) movq -0x8(%rsp), %rax movl 0xa44(%rax), %eax movl %eax, -0x24(%rsp) movq -0x8(%rsp), %rax movl 0xa48(%rax), %eax movl %eax, -0x28(%rsp) movl -0x1c(%rsp), %eax movq -0x10(%rsp), %rcx cmpl 0x8(%rcx), %eax setg %cl movq -0x8(%rsp), %rax andb $0x1, %cl movb %cl, 0x1ec1(%rax) movq -0x8(%rsp), %rax movb 0x1ec0(%rax), %cl movq -0x8(%rsp), %rax andb $0x1, %cl movb %cl, 0x1ec2(%rax) movq -0x8(%rsp), %rax movb 0x1ec1(%rax), %cl movq -0x8(%rsp), %rax andb $0x1, %cl movb %cl, 0x1ec3(%rax) cmpl $0x0, -0x24(%rsp) je 0xbb676 movl -0x20(%rsp), %eax movzbl 0x9c9ab1(%rip), %ecx # 0xa85103 cmpl %ecx, %eax jge 0xbb676 movl -0x1c(%rsp), %eax subl $0x1, %eax movq -0x10(%rsp), %rcx cmpl 0x8(%rcx), %eax setg %cl movq -0x8(%rsp), %rax andb $0x1, %cl movb %cl, 0x1ec3(%rax) cmpl $0x0, -0x28(%rsp) je 0xbb6ab movl -0x18(%rsp), %eax movzbl 0x9ca68b(%rip), %ecx # 0xa85d13 cmpl %ecx, %eax jge 0xbb6ab movl -0x14(%rsp), %eax subl $0x1, %eax movq -0x10(%rsp), %rcx cmpl (%rcx), %eax setg %cl movq -0x8(%rsp), %rax andb $0x1, %cl movb %cl, 0x1ec2(%rax) movq -0x8(%rsp), %rax testb $0x1, 0x1ec0(%rax) je 0xbb6e4 movq -0x8(%rsp), %rax movq 0x1eb8(%rax), %rax movq -0x8(%rsp), %rdx xorl %ecx, %ecx subl 0x8(%rdx), %ecx movslq %ecx, %rcx movq (%rax,%rcx,8), %rcx movq -0x8(%rsp), %rax movq %rcx, 0x1ed0(%rax) jmp 0xbb6f4 movq -0x8(%rsp), %rax movq $0x0, 0x1ed0(%rax) movq -0x8(%rsp), %rax testb $0x1, 0x1ec1(%rax) je 0xbb720 movq -0x8(%rsp), %rax movq 0x1eb8(%rax), %rax movq -0x8(%rax), %rcx movq -0x8(%rsp), %rax movq %rcx, 0x1ec8(%rax) jmp 0xbb730 movq -0x8(%rsp), %rax movq $0x0, 0x1ec8(%rax) movl -0x14(%rsp), %eax andl $0x1, %eax cmpl $0x0, %eax jne 0xbb755 movl -0x18(%rsp), %eax andl $0x1, %eax cmpl $0x0, %eax je 0xbb755 xorl %eax, %eax cmpl $0x0, -0x28(%rsp) movb %al, -0x49(%rsp) jne 0xbb78f movl -0x1c(%rsp), %ecx andl $0x1, %ecx movb $0x1, %al cmpl $0x0, %ecx movb %al, -0x4a(%rsp) jne 0xbb787 movl -0x20(%rsp), %ecx andl $0x1, %ecx movb $0x1, %al cmpl $0x0, %ecx movb %al, -0x4a(%rsp) je 0xbb787 cmpl $0x0, -0x24(%rsp) setne %al xorb $-0x1, %al movb %al, -0x4a(%rsp) movb -0x4a(%rsp), %al movb %al, -0x49(%rsp) movb -0x49(%rsp), %al andb $0x1, %al movzbl %al, %eax movl %eax, -0x2c(%rsp) cmpl $0x0, -0x2c(%rsp) setne %cl movq -0x8(%rsp), %rax andb $0x1, %cl movb %cl, 0xc(%rax) cmpl $0x0, -0x2c(%rsp) je 0xbb89c movq -0x8(%rsp), %rax movq 0x1eb8(%rax), %rax movl -0x14(%rsp), %edx andl -0x28(%rsp), %edx xorl %ecx, %ecx subl %edx, %ecx movq -0x8(%rsp), %rdx imull 0x8(%rdx), %ecx movl -0x1c(%rsp), %edx andl -0x24(%rsp), %edx subl %edx, %ecx movslq %ecx, %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, -0x38(%rsp) movq -0x8(%rsp), %rax testb $0x1, 0x1ec2(%rax) je 0xbb823 movq -0x38(%rsp), %rax movq -0x8(%rsp), %rdx xorl %ecx, %ecx subl 0x8(%rdx), %ecx addl -0x24(%rsp), %ecx movslq %ecx, %rcx movq (%rax,%rcx,8), %rax movq %rax, -0x58(%rsp) jmp 0xbb82c xorl %eax, %eax movq %rax, -0x58(%rsp) jmp 0xbb82c movq -0x58(%rsp), %rax movq %rax, -0x40(%rsp) movq -0x40(%rsp), %rcx movq -0x8(%rsp), %rax movq %rcx, 0x1ee0(%rax) movq -0x8(%rsp), %rax testb $0x1, 0x1ec3(%rax) je 0xbb878 movq -0x38(%rsp), %rax movl -0x28(%rsp), %ecx movq -0x8(%rsp), %rdx imull 0x8(%rdx), %ecx subl $0x1, %ecx movslq %ecx, %rcx movq (%rax,%rcx,8), %rax movq %rax, -0x60(%rsp) jmp 0xbb881 xorl %eax, %eax movq %rax, -0x60(%rsp) jmp 0xbb881 movq -0x60(%rsp), %rax movq %rax, -0x48(%rsp) movq -0x48(%rsp), %rcx movq -0x8(%rsp), %rax movq %rcx, 0x1ed8(%rax) movl -0x18(%rsp), %eax movb %al, %cl movq -0x8(%rsp), %rax movb %cl, 0x2075(%rax) movl -0x20(%rsp), %eax movb %al, %cl movq -0x8(%rsp), %rax movb %cl, 0x2074(%rax) movq -0x8(%rsp), %rax movb $0x0, 0x2988(%rax) movq -0x8(%rsp), %rax movzbl 0x2074(%rax), %eax movq -0x8(%rsp), %rcx movzbl 0x2075(%rcx), %ecx cmpl %ecx, %eax jge 0xbb91c movl -0x1c(%rsp), %eax movq -0x8(%rsp), %rcx movzbl 0x2074(%rcx), %ecx addl %ecx, %eax movq -0x8(%rsp), %rcx movzbl 0x2075(%rcx), %ecx subl $0x1, %ecx andl %ecx, %eax cmpl $0x0, %eax jne 0xbb91a movq -0x8(%rsp), %rax movb $0x1, 0x2988(%rax) jmp 0xbb91c movq -0x8(%rsp), %rax movb $0x0, 0x2989(%rax) movq -0x8(%rsp), %rax movzbl 0x2074(%rax), %eax movq -0x8(%rsp), %rcx movzbl 0x2075(%rcx), %ecx cmpl %ecx, %eax jle 0xbb96c movl -0x14(%rsp), %eax movq -0x8(%rsp), %rcx movzbl 0x2074(%rcx), %ecx subl $0x1, %ecx andl %ecx, %eax cmpl $0x0, %eax jne 0xbb96a movq -0x8(%rsp), %rax movb $0x1, 0x2989(%rax) jmp 0xbb96c retq nopl (%rax)
/m-ab-s[P]aom/av1/common/av1_common_int.h
av1_find_ref_dv
static inline void av1_find_ref_dv(int_mv *ref_dv, const TileInfo *const tile, int mib_size, int mi_row) { if (mi_row - mib_size < tile->mi_row_start) { ref_dv->as_fullmv.row = 0; ref_dv->as_fullmv.col = -MI_SIZE * mib_size - INTRABC_DELAY_PIXELS; } else { ref_dv->as_fullmv.row = -MI_SIZE * mib_size; ref_dv->as_fullmv.col = 0; } convert_fullmv_to_mv(ref_dv); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movl %edx, 0x4(%rsp) movl %ecx, (%rsp) movl (%rsp), %eax subl 0x4(%rsp), %eax movq 0x8(%rsp), %rcx cmpl (%rcx), %eax jge 0xc0c9a movq 0x10(%rsp), %rax movw $0x0, (%rax) imull $0xfffffffc, 0x4(%rsp), %eax # imm = 0xFFFFFFFC subl $0x100, %eax # imm = 0x100 movw %ax, %cx movq 0x10(%rsp), %rax movw %cx, 0x2(%rax) jmp 0xc0cb8 imull $0xfffffffc, 0x4(%rsp), %eax # imm = 0xFFFFFFFC movw %ax, %cx movq 0x10(%rsp), %rax movw %cx, (%rax) movq 0x10(%rsp), %rax movw $0x0, 0x2(%rax) movq 0x10(%rsp), %rdi callq 0xc0de0 addq $0x18, %rsp retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/common/mvref_common.h
av1_zero_above_context
static inline void av1_zero_above_context(AV1_COMMON *const cm, const MACROBLOCKD *xd, int mi_col_start, int mi_col_end, const int tile_row) { const SequenceHeader *const seq_params = cm->seq_params; const int num_planes = av1_num_planes(cm); const int width = mi_col_end - mi_col_start; const int aligned_width = ALIGN_POWER_OF_TWO(width, seq_params->mib_size_log2); const int offset_y = mi_col_start; const int width_y = aligned_width; const int offset_uv = offset_y >> seq_params->subsampling_x; const int width_uv = width_y >> seq_params->subsampling_x; CommonContexts *const above_contexts = &cm->above_contexts; av1_zero_array(above_contexts->entropy[0][tile_row] + offset_y, width_y); if (num_planes > 1) { if (above_contexts->entropy[1][tile_row] && above_contexts->entropy[2][tile_row]) { av1_zero_array(above_contexts->entropy[1][tile_row] + offset_uv, width_uv); av1_zero_array(above_contexts->entropy[2][tile_row] + offset_uv, width_uv); } else { aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME, "Invalid value of planes"); } } av1_zero_array(above_contexts->partition[tile_row] + mi_col_start, aligned_width); memset(above_contexts->txfm[tile_row] + mi_col_start, tx_size_wide[TX_SIZES_LARGEST], aligned_width * sizeof(TXFM_CONTEXT)); }
subq $0x58, %rsp movq %rdi, 0x50(%rsp) movq %rsi, 0x48(%rsp) movl %edx, 0x44(%rsp) movl %ecx, 0x40(%rsp) movl %r8d, 0x3c(%rsp) movq 0x50(%rsp), %rax movq 0x6088(%rax), %rax movq %rax, 0x30(%rsp) movq 0x50(%rsp), %rdi callq 0x107890 movl %eax, 0x2c(%rsp) movl 0x40(%rsp), %eax subl 0x44(%rsp), %eax movl %eax, 0x28(%rsp) movl 0x28(%rsp), %eax movq 0x30(%rsp), %rcx movl 0x24(%rcx), %ecx movl $0x1, %edx shll %cl, %edx movl %edx, %ecx subl $0x1, %ecx addl %ecx, %eax movq 0x30(%rsp), %rcx movl 0x24(%rcx), %ecx movl $0x1, %edx shll %cl, %edx movl %edx, %ecx subl $0x1, %ecx xorl $-0x1, %ecx andl %ecx, %eax movl %eax, 0x24(%rsp) movl 0x44(%rsp), %eax movl %eax, 0x20(%rsp) movl 0x24(%rsp), %eax movl %eax, 0x1c(%rsp) movl 0x20(%rsp), %eax movq 0x30(%rsp), %rcx movl 0x60(%rcx), %ecx sarl %cl, %eax movl %eax, 0x18(%rsp) movl 0x1c(%rsp), %eax movq 0x30(%rsp), %rcx movl 0x60(%rcx), %ecx sarl %cl, %eax movl %eax, 0x14(%rsp) movq 0x50(%rsp), %rax addq $0x62f8, %rax # imm = 0x62F8 movq %rax, 0x8(%rsp) movq 0x8(%rsp), %rax movq 0x8(%rax), %rax movslq 0x3c(%rsp), %rcx movq (%rax,%rcx,8), %rdi movslq 0x20(%rsp), %rax addq %rax, %rdi movslq 0x1c(%rsp), %rdx shlq $0x0, %rdx xorl %esi, %esi callq 0x18280 cmpl $0x1, 0x2c(%rsp) jle 0x108b97 movq 0x8(%rsp), %rax movq 0x10(%rax), %rax movslq 0x3c(%rsp), %rcx cmpq $0x0, (%rax,%rcx,8) je 0x108b76 movq 0x8(%rsp), %rax movq 0x18(%rax), %rax movslq 0x3c(%rsp), %rcx cmpq $0x0, (%rax,%rcx,8) je 0x108b76 movq 0x8(%rsp), %rax movq 0x10(%rax), %rax movslq 0x3c(%rsp), %rcx movq (%rax,%rcx,8), %rdi movslq 0x18(%rsp), %rax addq %rax, %rdi movslq 0x14(%rsp), %rdx shlq $0x0, %rdx xorl %esi, %esi callq 0x18280 movq 0x8(%rsp), %rax movq 0x18(%rax), %rax movslq 0x3c(%rsp), %rcx movq (%rax,%rcx,8), %rdi movslq 0x18(%rsp), %rax addq %rax, %rdi movslq 0x14(%rsp), %rdx shlq $0x0, %rdx xorl %esi, %esi callq 0x18280 jmp 0x108b95 movq 0x48(%rsp), %rax movq 0x29f0(%rax), %rdi movl $0x7, %esi leaq 0x97e1c4(%rip), %rdx # 0xa86d52 movb $0x0, %al callq 0x9e4e0 jmp 0x108b97 movq 0x8(%rsp), %rax movq (%rax), %rax movslq 0x3c(%rsp), %rcx movq (%rax,%rcx,8), %rdi movslq 0x44(%rsp), %rax addq %rax, %rdi movslq 0x24(%rsp), %rdx shlq $0x0, %rdx xorl %esi, %esi callq 0x18280 movq 0x8(%rsp), %rax movq 0x20(%rax), %rax movslq 0x3c(%rsp), %rcx movq (%rax,%rcx,8), %rdi movslq 0x44(%rsp), %rax addq %rax, %rdi movl 0x984d00(%rip), %eax # 0xa8d8e0 movslq 0x24(%rsp), %rdx shlq $0x0, %rdx movzbl %al, %esi callq 0x18280 addq $0x58, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/common/av1_common_int.h
max_block_high
static inline int max_block_high(const MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane) { int max_blocks_high = block_size_high[bsize]; if (xd->mb_to_bottom_edge < 0) { const struct macroblockd_plane *const pd = &xd->plane[plane]; max_blocks_high += xd->mb_to_bottom_edge >> (3 + pd->subsampling_y); } // Scale the height in the transform block unit. return max_blocks_high >> MI_SIZE_LOG2; }
movb %sil, %al movq %rdi, -0x8(%rsp) movb %al, -0x9(%rsp) movl %edx, -0x10(%rsp) movzbl -0x9(%rsp), %eax movl %eax, %ecx leaq 0x8ffe32(%rip), %rax # 0xa952d0 movzbl (%rax,%rcx), %eax movl %eax, -0x14(%rsp) movq -0x8(%rsp), %rax cmpl $0x0, 0x1f00(%rax) jge 0x1954f1 movq -0x8(%rsp), %rax addq $0x10, %rax movslq -0x10(%rsp), %rcx imulq $0xa30, %rcx, %rcx # imm = 0xA30 addq %rcx, %rax movq %rax, -0x20(%rsp) movq -0x8(%rsp), %rax movl 0x1f00(%rax), %eax movq -0x20(%rsp), %rcx movl 0x8(%rcx), %ecx addl $0x3, %ecx sarl %cl, %eax addl -0x14(%rsp), %eax movl %eax, -0x14(%rsp) movl -0x14(%rsp), %eax sarl $0x2, %eax retq nopl (%rax)
/m-ab-s[P]aom/av1/common/av1_common_int.h
txfm_partition_update
static inline void txfm_partition_update(TXFM_CONTEXT *above_ctx, TXFM_CONTEXT *left_ctx, TX_SIZE tx_size, TX_SIZE txb_size) { BLOCK_SIZE bsize = txsize_to_bsize[txb_size]; int bh = mi_size_high[bsize]; int bw = mi_size_wide[bsize]; uint8_t txw = tx_size_wide[tx_size]; uint8_t txh = tx_size_high[tx_size]; int i; for (i = 0; i < bh; ++i) left_ctx[i] = txh; for (i = 0; i < bw; ++i) above_ctx[i] = txw; }
movb %cl, %al movb %dl, %cl movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movb %cl, -0x11(%rsp) movb %al, -0x12(%rsp) movzbl -0x12(%rsp), %eax movl %eax, %ecx leaq 0x8ffd5c(%rip), %rax # 0xa954d0 movb (%rax,%rcx), %al movb %al, -0x13(%rsp) movzbl -0x13(%rsp), %eax movl %eax, %ecx leaq 0x8ffa87(%rip), %rax # 0xa95210 movzbl (%rax,%rcx), %eax movl %eax, -0x18(%rsp) movzbl -0x13(%rsp), %eax movl %eax, %ecx leaq 0x8ffa51(%rip), %rax # 0xa951f0 movzbl (%rax,%rcx), %eax movl %eax, -0x1c(%rsp) movzbl -0x11(%rsp), %eax movl %eax, %ecx leaq 0x8ffbfb(%rip), %rax # 0xa953b0 movl (%rax,%rcx,4), %eax movb %al, -0x1d(%rsp) movzbl -0x11(%rsp), %eax movl %eax, %ecx leaq 0x8ffc36(%rip), %rax # 0xa95400 movl (%rax,%rcx,4), %eax movb %al, -0x1e(%rsp) movl $0x0, -0x24(%rsp) movl -0x24(%rsp), %eax cmpl -0x18(%rsp), %eax jge 0x195801 movb -0x1e(%rsp), %dl movq -0x10(%rsp), %rax movslq -0x24(%rsp), %rcx movb %dl, (%rax,%rcx) movl -0x24(%rsp), %eax addl $0x1, %eax movl %eax, -0x24(%rsp) jmp 0x1957d9 movl $0x0, -0x24(%rsp) movl -0x24(%rsp), %eax cmpl -0x1c(%rsp), %eax jge 0x195831 movb -0x1d(%rsp), %dl movq -0x8(%rsp), %rax movslq -0x24(%rsp), %rcx movb %dl, (%rax,%rcx) movl -0x24(%rsp), %eax addl $0x1, %eax movl %eax, -0x24(%rsp) jmp 0x195809 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/common/av1_common_int.h
is_nontrans_global_motion
static inline int is_nontrans_global_motion(const MACROBLOCKD *xd, const MB_MODE_INFO *mbmi) { int ref; // First check if all modes are GLOBALMV if (mbmi->mode != GLOBALMV && mbmi->mode != GLOBAL_GLOBALMV) return 0; if (AOMMIN(mi_size_wide[mbmi->bsize], mi_size_high[mbmi->bsize]) < 2) return 0; // Now check if all global motion is non translational for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) { if (xd->global_motion[mbmi->ref_frame[ref]].wmtype == TRANSLATION) return 0; } return 1; }
subq $0x28, %rsp movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq 0x10(%rsp), %rax movzbl 0x2(%rax), %eax cmpl $0xf, %eax je 0x198db7 movq 0x10(%rsp), %rax movzbl 0x2(%rax), %eax cmpl $0x17, %eax je 0x198db7 movl $0x0, 0x24(%rsp) jmp 0x198ea0 movq 0x10(%rsp), %rax movzbl (%rax), %eax movl %eax, %ecx leaq 0x8fc428(%rip), %rax # 0xa951f0 movzbl (%rax,%rcx), %eax movq 0x10(%rsp), %rcx movzbl (%rcx), %ecx movl %ecx, %edx leaq 0x8fc433(%rip), %rcx # 0xa95210 movzbl (%rcx,%rdx), %ecx cmpl %ecx, %eax jge 0x198e00 movq 0x10(%rsp), %rax movzbl (%rax), %eax movl %eax, %ecx leaq 0x8fc3fa(%rip), %rax # 0xa951f0 movzbl (%rax,%rcx), %eax movl %eax, 0x8(%rsp) jmp 0x198e19 movq 0x10(%rsp), %rax movzbl (%rax), %eax movl %eax, %ecx leaq 0x8fc3ff(%rip), %rax # 0xa95210 movzbl (%rax,%rcx), %eax movl %eax, 0x8(%rsp) movl 0x8(%rsp), %eax cmpl $0x2, %eax jge 0x198e2c movl $0x0, 0x24(%rsp) jmp 0x198ea0 movl $0x0, 0xc(%rsp) movl 0xc(%rsp), %eax movl %eax, 0x4(%rsp) movq 0x10(%rsp), %rdi callq 0x194230 movl %eax, %ecx movl 0x4(%rsp), %eax addl $0x1, %ecx cmpl %ecx, %eax jge 0x198e98 movq 0x18(%rsp), %rax movq 0x29f8(%rax), %rax movq 0x10(%rsp), %rcx movslq 0xc(%rsp), %rdx movsbq 0x10(%rcx,%rdx), %rcx imulq $0x24, %rcx, %rcx addq %rcx, %rax movzbl 0x20(%rax), %eax cmpl $0x1, %eax jne 0x198e89 movl $0x0, 0x24(%rsp) jmp 0x198ea0 jmp 0x198e8b movl 0xc(%rsp), %eax addl $0x1, %eax movl %eax, 0xc(%rsp) jmp 0x198e34 movl $0x1, 0x24(%rsp) movl 0x24(%rsp), %eax addq $0x28, %rsp retq nopl (%rax)
/m-ab-s[P]aom/av1/common/blockd.h
get_uni_comp_ref_idx
static inline int8_t get_uni_comp_ref_idx(const MV_REFERENCE_FRAME *const rf) { // Single ref pred if (rf[1] <= INTRA_FRAME) return -1; // Bi-directional comp ref pred if ((rf[0] < BWDREF_FRAME) && (rf[1] >= BWDREF_FRAME)) return -1; for (int8_t ref_idx = 0; ref_idx < TOTAL_UNIDIR_COMP_REFS; ++ref_idx) { if (rf[0] == comp_ref0(ref_idx) && rf[1] == comp_ref1(ref_idx)) return ref_idx; } return -1; }
subq $0x28, %rsp movq %rdi, 0x18(%rsp) movq 0x18(%rsp), %rax movsbl 0x1(%rax), %eax cmpl $0x0, %eax jg 0x199111 movb $-0x1, 0x27(%rsp) jmp 0x1991a6 movq 0x18(%rsp), %rax movsbl (%rax), %eax cmpl $0x5, %eax jge 0x199133 movq 0x18(%rsp), %rax movsbl 0x1(%rax), %eax cmpl $0x5, %eax jl 0x199133 movb $-0x1, 0x27(%rsp) jmp 0x1991a6 movb $0x0, 0x17(%rsp) movsbl 0x17(%rsp), %eax cmpl $0x9, %eax jge 0x1991a1 movq 0x18(%rsp), %rax movsbl (%rax), %eax movl %eax, 0x10(%rsp) movsbl 0x17(%rsp), %edi callq 0x1991b0 movb %al, %cl movl 0x10(%rsp), %eax movsbl %cl, %ecx cmpl %ecx, %eax jne 0x199193 movq 0x18(%rsp), %rax movsbl 0x1(%rax), %eax movl %eax, 0xc(%rsp) movsbl 0x17(%rsp), %edi callq 0x1991d0 movb %al, %cl movl 0xc(%rsp), %eax movsbl %cl, %ecx cmpl %ecx, %eax jne 0x199193 movb 0x17(%rsp), %al movb %al, 0x27(%rsp) jmp 0x1991a6 jmp 0x199195 movb 0x17(%rsp), %al addb $0x1, %al movb %al, 0x17(%rsp) jmp 0x199138 movb $-0x1, 0x27(%rsp) movb 0x27(%rsp), %al addq $0x28, %rsp retq nop
/m-ab-s[P]aom/av1/common/mvref_common.h
update_partition_context
static inline void update_partition_context(MACROBLOCKD *xd, int mi_row, int mi_col, BLOCK_SIZE subsize, BLOCK_SIZE bsize) { PARTITION_CONTEXT *const above_ctx = xd->above_partition_context + mi_col; PARTITION_CONTEXT *const left_ctx = xd->left_partition_context + (mi_row & MAX_MIB_MASK); const int bw = mi_size_wide[bsize]; const int bh = mi_size_high[bsize]; memset(above_ctx, partition_context_lookup[subsize].above, bw); memset(left_ctx, partition_context_lookup[subsize].left, bh); }
subq $0x38, %rsp movb %r8b, %al movq %rdi, 0x30(%rsp) movl %esi, 0x2c(%rsp) movl %edx, 0x28(%rsp) movb %cl, 0x27(%rsp) movb %al, 0x26(%rsp) movq 0x30(%rsp), %rax movq 0x1f98(%rax), %rax movslq 0x28(%rsp), %rcx addq %rcx, %rax movq %rax, 0x18(%rsp) movq 0x30(%rsp), %rax addq $0x1fa0, %rax # imm = 0x1FA0 movl 0x2c(%rsp), %ecx andl $0x1f, %ecx movslq %ecx, %rcx addq %rcx, %rax movq %rax, 0x10(%rsp) movzbl 0x26(%rsp), %eax movl %eax, %ecx leaq 0x8fbfa0(%rip), %rax # 0xa951f0 movzbl (%rax,%rcx), %eax movl %eax, 0xc(%rsp) movzbl 0x26(%rsp), %eax movl %eax, %ecx leaq 0x8fbfaa(%rip), %rax # 0xa95210 movzbl (%rax,%rcx), %eax movl %eax, 0x8(%rsp) movq 0x18(%rsp), %rdi movzbl 0x27(%rsp), %eax movl %eax, %ecx leaq 0x8fc31f(%rip), %rax # 0xa955a0 movsbl (%rax,%rcx,2), %eax movslq 0xc(%rsp), %rdx movzbl %al, %esi callq 0x18280 movq 0x10(%rsp), %rdi movzbl 0x27(%rsp), %eax movl %eax, %ecx leaq 0x8fc2fb(%rip), %rax # 0xa955a0 movsbl 0x1(%rax,%rcx,2), %eax movslq 0x8(%rsp), %rdx movzbl %al, %esi callq 0x18280 addq $0x38, %rsp retq nopl (%rax)
/m-ab-s[P]aom/av1/common/av1_common_int.h
is_nontrans_global_motion
static inline int is_nontrans_global_motion(const MACROBLOCKD *xd, const MB_MODE_INFO *mbmi) { int ref; // First check if all modes are GLOBALMV if (mbmi->mode != GLOBALMV && mbmi->mode != GLOBAL_GLOBALMV) return 0; if (AOMMIN(mi_size_wide[mbmi->bsize], mi_size_high[mbmi->bsize]) < 2) return 0; // Now check if all global motion is non translational for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) { if (xd->global_motion[mbmi->ref_frame[ref]].wmtype == TRANSLATION) return 0; } return 1; }
subq $0x28, %rsp movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq 0x10(%rsp), %rax movzbl 0x2(%rax), %eax cmpl $0xf, %eax je 0x1e28a7 movq 0x10(%rsp), %rax movzbl 0x2(%rax), %eax cmpl $0x17, %eax je 0x1e28a7 movl $0x0, 0x24(%rsp) jmp 0x1e2990 movq 0x10(%rsp), %rax movzbl (%rax), %eax movl %eax, %ecx leaq 0x8d6648(%rip), %rax # 0xab8f00 movzbl (%rax,%rcx), %eax movq 0x10(%rsp), %rcx movzbl (%rcx), %ecx movl %ecx, %edx leaq 0x8d6653(%rip), %rcx # 0xab8f20 movzbl (%rcx,%rdx), %ecx cmpl %ecx, %eax jge 0x1e28f0 movq 0x10(%rsp), %rax movzbl (%rax), %eax movl %eax, %ecx leaq 0x8d661a(%rip), %rax # 0xab8f00 movzbl (%rax,%rcx), %eax movl %eax, 0x8(%rsp) jmp 0x1e2909 movq 0x10(%rsp), %rax movzbl (%rax), %eax movl %eax, %ecx leaq 0x8d661f(%rip), %rax # 0xab8f20 movzbl (%rax,%rcx), %eax movl %eax, 0x8(%rsp) movl 0x8(%rsp), %eax cmpl $0x2, %eax jge 0x1e291c movl $0x0, 0x24(%rsp) jmp 0x1e2990 movl $0x0, 0xc(%rsp) movl 0xc(%rsp), %eax movl %eax, 0x4(%rsp) movq 0x10(%rsp), %rdi callq 0x1e4000 movl %eax, %ecx movl 0x4(%rsp), %eax addl $0x1, %ecx cmpl %ecx, %eax jge 0x1e2988 movq 0x18(%rsp), %rax movq 0x29f8(%rax), %rax movq 0x10(%rsp), %rcx movslq 0xc(%rsp), %rdx movsbq 0x10(%rcx,%rdx), %rcx imulq $0x24, %rcx, %rcx addq %rcx, %rax movzbl 0x20(%rax), %eax cmpl $0x1, %eax jne 0x1e2979 movl $0x0, 0x24(%rsp) jmp 0x1e2990 jmp 0x1e297b movl 0xc(%rsp), %eax addl $0x1, %eax movl %eax, 0xc(%rsp) jmp 0x1e2924 movl $0x1, 0x24(%rsp) movl 0x24(%rsp), %eax addq $0x28, %rsp retq nopl (%rax)
/m-ab-s[P]aom/av1/common/blockd.h
cost_mv_ref
static int cost_mv_ref(const ModeCosts *const mode_costs, PREDICTION_MODE mode, int16_t mode_context) { if (is_inter_compound_mode(mode)) { return mode_costs ->inter_compound_mode_cost[mode_context][INTER_COMPOUND_OFFSET(mode)]; } int mode_cost = 0; int16_t mode_ctx = mode_context & NEWMV_CTX_MASK; assert(is_inter_mode(mode)); if (mode == NEWMV) { mode_cost = mode_costs->newmv_mode_cost[mode_ctx][0]; return mode_cost; } else { mode_cost = mode_costs->newmv_mode_cost[mode_ctx][1]; mode_ctx = (mode_context >> GLOBALMV_OFFSET) & GLOBALMV_CTX_MASK; if (mode == GLOBALMV) { mode_cost += mode_costs->zeromv_mode_cost[mode_ctx][0]; return mode_cost; } else { mode_cost += mode_costs->zeromv_mode_cost[mode_ctx][1]; mode_ctx = (mode_context >> REFMV_OFFSET) & REFMV_CTX_MASK; mode_cost += mode_costs->refmv_mode_cost[mode_ctx][mode != NEARESTMV]; return mode_cost; } } }
subq $0x28, %rsp movw %dx, %ax movb %sil, %cl movq %rdi, 0x18(%rsp) movb %cl, 0x17(%rsp) movw %ax, 0x14(%rsp) movzbl 0x17(%rsp), %edi callq 0x1e9ab0 cmpl $0x0, %eax je 0x1e99a6 movq 0x18(%rsp), %rax addq $0x3f38, %rax # imm = 0x3F38 movswq 0x14(%rsp), %rcx shlq $0x5, %rcx addq %rcx, %rax movzbl 0x17(%rsp), %ecx subl $0x11, %ecx movzbl %cl, %ecx movl (%rax,%rcx,4), %eax movl %eax, 0x24(%rsp) jmp 0x1e9aa5 movl $0x0, 0x10(%rsp) movswl 0x14(%rsp), %eax andl $0x7, %eax movw %ax, 0xe(%rsp) movzbl 0x17(%rsp), %eax cmpl $0x10, %eax jne 0x1e99e8 movq 0x18(%rsp), %rax movswq 0xe(%rsp), %rcx movl 0x3cb8(%rax,%rcx,8), %eax movl %eax, 0x10(%rsp) movl 0x10(%rsp), %eax movl %eax, 0x24(%rsp) jmp 0x1e9aa5 movq 0x18(%rsp), %rax movswq 0xe(%rsp), %rcx movl 0x3cbc(%rax,%rcx,8), %eax movl %eax, 0x10(%rsp) movswl 0x14(%rsp), %eax sarl $0x3, %eax andl $0x1, %eax movw %ax, 0xe(%rsp) movzbl 0x17(%rsp), %eax cmpl $0xf, %eax jne 0x1e9a3c movq 0x18(%rsp), %rax movswq 0xe(%rsp), %rcx movl 0x3ce8(%rax,%rcx,8), %eax addl 0x10(%rsp), %eax movl %eax, 0x10(%rsp) movl 0x10(%rsp), %eax movl %eax, 0x24(%rsp) jmp 0x1e9aa5 movq 0x18(%rsp), %rax movswq 0xe(%rsp), %rcx movl 0x3cec(%rax,%rcx,8), %eax addl 0x10(%rsp), %eax movl %eax, 0x10(%rsp) movswl 0x14(%rsp), %eax sarl $0x4, %eax andl $0xf, %eax movw %ax, 0xe(%rsp) movq 0x18(%rsp), %rax addq $0x3cf8, %rax # imm = 0x3CF8 movswq 0xe(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movzbl 0x17(%rsp), %ecx cmpl $0xd, %ecx setne %cl andb $0x1, %cl movzbl %cl, %ecx movslq %ecx, %rcx movl (%rax,%rcx,4), %eax addl 0x10(%rsp), %eax movl %eax, 0x10(%rsp) movl 0x10(%rsp), %eax movl %eax, 0x24(%rsp) movl 0x24(%rsp), %eax addq $0x28, %rsp retq nop
/m-ab-s[P]aom/av1/encoder/rdopt.c
compound_skip_get_candidates
static int compound_skip_get_candidates( const AV1_COMP *cpi, const InterModeSearchState *search_state, const int dir, const PREDICTION_MODE mode) { const int mode_offset = INTER_OFFSET(mode); const SingleInterModeState *state = search_state->single_state[dir][mode_offset]; const SingleInterModeState *state_modelled = search_state->single_state_modelled[dir][mode_offset]; int max_candidates = 0; for (int i = 0; i < FWD_REFS; ++i) { if (search_state->single_rd_order[dir][mode_offset][i] == NONE_FRAME) break; max_candidates++; } int candidates = max_candidates; if (cpi->sf.inter_sf.prune_comp_search_by_single_result >= 2) { candidates = AOMMIN(2, max_candidates); } if (cpi->sf.inter_sf.prune_comp_search_by_single_result >= 3) { if (state[0].rd != INT64_MAX && state_modelled[0].rd != INT64_MAX && state[0].ref_frame == state_modelled[0].ref_frame) candidates = 1; if (mode == NEARMV || mode == GLOBALMV) candidates = 1; } if (cpi->sf.inter_sf.prune_comp_search_by_single_result >= 4) { // Limit the number of candidates to 1 in each direction for compound // prediction candidates = AOMMIN(1, candidates); } return candidates; }
movb %cl, %al movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movl %edx, -0x14(%rsp) movb %al, -0x15(%rsp) movzbl -0x15(%rsp), %eax subl $0xd, %eax movl %eax, -0x1c(%rsp) movq -0x10(%rsp), %rax addq $0x2d98, %rax # imm = 0x2D98 movslq -0x14(%rsp), %rcx shlq $0x8, %rcx addq %rcx, %rax movslq -0x1c(%rsp), %rcx shlq $0x6, %rcx addq %rcx, %rax movq %rax, -0x28(%rsp) movq -0x10(%rsp), %rax addq $0x2fb8, %rax # imm = 0x2FB8 movslq -0x14(%rsp), %rcx shlq $0x8, %rcx addq %rcx, %rax movslq -0x1c(%rsp), %rcx shlq $0x6, %rcx addq %rcx, %rax movq %rax, -0x30(%rsp) movl $0x0, -0x34(%rsp) movl $0x0, -0x38(%rsp) cmpl $0x4, -0x38(%rsp) jge 0x1e9ce2 movq -0x10(%rsp), %rax addq $0x31d8, %rax # imm = 0x31D8 movslq -0x14(%rsp), %rcx shlq $0x4, %rcx addq %rcx, %rax movslq -0x1c(%rsp), %rcx shlq $0x2, %rcx addq %rcx, %rax movslq -0x38(%rsp), %rcx movsbl (%rax,%rcx), %eax cmpl $-0x1, %eax jne 0x1e9cca jmp 0x1e9ce2 movl -0x34(%rsp), %eax addl $0x1, %eax movl %eax, -0x34(%rsp) movl -0x38(%rsp), %eax addl $0x1, %eax movl %eax, -0x38(%rsp) jmp 0x1e9c90 movl -0x34(%rsp), %eax movl %eax, -0x3c(%rsp) movq -0x8(%rsp), %rax cmpl $0x2, 0x60abc(%rax) jl 0x1e9d1e movl $0x2, %eax cmpl -0x34(%rsp), %eax jge 0x1e9d0e movl $0x2, %eax movl %eax, -0x40(%rsp) jmp 0x1e9d16 movl -0x34(%rsp), %eax movl %eax, -0x40(%rsp) movl -0x40(%rsp), %eax movl %eax, -0x3c(%rsp) movq -0x8(%rsp), %rax cmpl $0x3, 0x60abc(%rax) jl 0x1e9d90 movq -0x28(%rsp), %rax movabsq $0x7fffffffffffffff, %rcx # imm = 0x7FFFFFFFFFFFFFFF cmpq %rcx, (%rax) je 0x1e9d72 movq -0x30(%rsp), %rax movabsq $0x7fffffffffffffff, %rcx # imm = 0x7FFFFFFFFFFFFFFF cmpq %rcx, (%rax) je 0x1e9d72 movq -0x28(%rsp), %rax movsbl 0x8(%rax), %eax movq -0x30(%rsp), %rcx movsbl 0x8(%rcx), %ecx cmpl %ecx, %eax jne 0x1e9d72 movl $0x1, -0x3c(%rsp) movzbl -0x15(%rsp), %eax cmpl $0xe, %eax je 0x1e9d86 movzbl -0x15(%rsp), %eax cmpl $0xf, %eax jne 0x1e9d8e movl $0x1, -0x3c(%rsp) jmp 0x1e9d90 movq -0x8(%rsp), %rax cmpl $0x4, 0x60abc(%rax) jl 0x1e9dc4 movl $0x1, %eax cmpl -0x3c(%rsp), %eax jge 0x1e9db4 movl $0x1, %eax movl %eax, -0x44(%rsp) jmp 0x1e9dbc movl -0x3c(%rsp), %eax movl %eax, -0x44(%rsp) movl -0x44(%rsp), %eax movl %eax, -0x3c(%rsp) movl -0x3c(%rsp), %eax retq nopl (%rax)
/m-ab-s[P]aom/av1/encoder/rdopt.c
is_filter_search_enabled_blk
static inline int is_filter_search_enabled_blk(AV1_COMP *cpi, MACROBLOCK *x, int mi_row, int mi_col, BLOCK_SIZE bsize, int segment_id, int cb_pred_filter_search, InterpFilter *filt_select) { const AV1_COMMON *const cm = &cpi->common; // filt search disabled if (!cpi->sf.rt_sf.use_nonrd_filter_search) return 0; // filt search purely based on mode properties if (!cb_pred_filter_search) return 1; MACROBLOCKD *const xd = &x->e_mbd; int enable_interp_search = 0; if (!(xd->left_mbmi && xd->above_mbmi)) { // neighbors info unavailable enable_interp_search = 2; } else if (!(is_inter_block(xd->left_mbmi) && is_inter_block(xd->above_mbmi))) { // neighbor is INTRA enable_interp_search = 2; } else if (xd->left_mbmi->interp_filters.as_int != xd->above_mbmi->interp_filters.as_int) { // filters are different enable_interp_search = 2; } else if ((cb_pred_filter_search == 1) && (xd->left_mbmi->interp_filters.as_filters.x_filter != EIGHTTAP_REGULAR)) { // not regular enable_interp_search = 2; } else { // enable prediction based on chessboard pattern if (xd->left_mbmi->interp_filters.as_filters.x_filter == EIGHTTAP_SMOOTH) *filt_select = EIGHTTAP_SMOOTH; const int bsl = mi_size_wide_log2[bsize]; enable_interp_search = (bool)((((mi_row + mi_col) >> bsl) + get_chessboard_index(cm->current_frame.frame_number)) & 0x1); if (cyclic_refresh_segment_id_boosted(segment_id)) enable_interp_search = 1; } return enable_interp_search; }
subq $0x48, %rsp movb %r8b, %al movq 0x58(%rsp), %r8 movl 0x50(%rsp), %r8d movq %rdi, 0x38(%rsp) movq %rsi, 0x30(%rsp) movl %edx, 0x2c(%rsp) movl %ecx, 0x28(%rsp) movb %al, 0x27(%rsp) movl %r9d, 0x20(%rsp) movq 0x38(%rsp), %rax addq $0x3bf80, %rax # imm = 0x3BF80 movq %rax, 0x18(%rsp) movq 0x38(%rsp), %rax cmpl $0x0, 0x60cd8(%rax) jne 0x1f8647 movl $0x0, 0x44(%rsp) jmp 0x1f87bf cmpl $0x0, 0x50(%rsp) jne 0x1f865b movl $0x1, 0x44(%rsp) jmp 0x1f87bf movq 0x30(%rsp), %rax addq $0x1a0, %rax # imm = 0x1A0 movq %rax, 0x10(%rsp) movl $0x0, 0xc(%rsp) movq 0x10(%rsp), %rax cmpq $0x0, 0x1ec8(%rax) je 0x1f8691 movq 0x10(%rsp), %rax cmpq $0x0, 0x1ed0(%rax) jne 0x1f869e movl $0x2, 0xc(%rsp) jmp 0x1f87b7 movq 0x10(%rsp), %rax movq 0x1ec8(%rax), %rdi callq 0x1f9360 cmpl $0x0, %eax je 0x1f86ca movq 0x10(%rsp), %rax movq 0x1ed0(%rax), %rdi callq 0x1f9360 cmpl $0x0, %eax jne 0x1f86d7 movl $0x2, 0xc(%rsp) jmp 0x1f87b5 movq 0x10(%rsp), %rax movq 0x1ec8(%rax), %rax movl 0x14(%rax), %eax movq 0x10(%rsp), %rcx movq 0x1ed0(%rcx), %rcx cmpl 0x14(%rcx), %eax je 0x1f8704 movl $0x2, 0xc(%rsp) jmp 0x1f87b3 cmpl $0x1, 0x50(%rsp) jne 0x1f872d movq 0x10(%rsp), %rax movq 0x1ec8(%rax), %rax movzwl 0x16(%rax), %eax cmpl $0x0, %eax je 0x1f872d movl $0x2, 0xc(%rsp) jmp 0x1f87b1 movq 0x10(%rsp), %rax movq 0x1ec8(%rax), %rax movzwl 0x16(%rax), %eax cmpl $0x1, %eax jne 0x1f874a movq 0x58(%rsp), %rax movb $0x1, (%rax) movzbl 0x27(%rsp), %eax movl %eax, %ecx leaq 0x8c1738(%rip), %rax # 0xab9e90 movzbl (%rax,%rcx), %eax movl %eax, 0x8(%rsp) movl 0x2c(%rsp), %eax addl 0x28(%rsp), %eax movl 0x8(%rsp), %ecx sarl %cl, %eax movl %eax, 0x4(%rsp) movq 0x18(%rsp), %rax movl 0x10(%rax), %edi callq 0x1fa760 movl %eax, %ecx movl 0x4(%rsp), %eax addl %ecx, %eax andl $0x1, %eax cmpl $0x0, %eax setne %al andb $0x1, %al movzbl %al, %eax movl %eax, 0xc(%rsp) movl 0x20(%rsp), %edi callq 0x1fa730 cmpl $0x0, %eax je 0x1f87af movl $0x1, 0xc(%rsp) jmp 0x1f87b1 jmp 0x1f87b3 jmp 0x1f87b5 jmp 0x1f87b7 movl 0xc(%rsp), %eax movl %eax, 0x44(%rsp) movl 0x44(%rsp), %eax addq $0x48, %rsp retq nopl (%rax,%rax)
/m-ab-s[P]aom/av1/encoder/nonrd_pickmode.c
init_inter_block_params
static inline void init_inter_block_params(InterPredParams *inter_pred_params, int block_width, int block_height, int pix_row, int pix_col, int subsampling_x, int subsampling_y, int bit_depth, int use_hbd_buf, int is_intrabc) { inter_pred_params->block_width = block_width; inter_pred_params->block_height = block_height; inter_pred_params->pix_row = pix_row; inter_pred_params->pix_col = pix_col; inter_pred_params->subsampling_x = subsampling_x; inter_pred_params->subsampling_y = subsampling_y; inter_pred_params->bit_depth = bit_depth; inter_pred_params->use_hbd_buf = use_hbd_buf; inter_pred_params->is_intrabc = is_intrabc; inter_pred_params->mode = TRANSLATION_PRED; inter_pred_params->comp_mode = UNIFORM_SINGLE; inter_pred_params->top = -AOM_LEFT_TOP_MARGIN_SCALED(subsampling_y); inter_pred_params->left = -AOM_LEFT_TOP_MARGIN_SCALED(subsampling_x); }
movl 0x20(%rsp), %eax movl 0x18(%rsp), %eax movl 0x10(%rsp), %eax movl 0x8(%rsp), %eax movq %rdi, -0x8(%rsp) movl %esi, -0xc(%rsp) movl %edx, -0x10(%rsp) movl %ecx, -0x14(%rsp) movl %r8d, -0x18(%rsp) movl %r9d, -0x1c(%rsp) movl -0xc(%rsp), %ecx movq -0x8(%rsp), %rax movl %ecx, 0x70(%rax) movl -0x10(%rsp), %ecx movq -0x8(%rsp), %rax movl %ecx, 0x74(%rax) movl -0x14(%rsp), %ecx movq -0x8(%rsp), %rax movl %ecx, 0x78(%rax) movl -0x18(%rsp), %ecx movq -0x8(%rsp), %rax movl %ecx, 0x7c(%rax) movl -0x1c(%rsp), %ecx movq -0x8(%rsp), %rax movl %ecx, 0xa0(%rax) movl 0x8(%rsp), %ecx movq -0x8(%rsp), %rax movl %ecx, 0xa4(%rax) movl 0x10(%rsp), %ecx movq -0x8(%rsp), %rax movl %ecx, 0xb0(%rax) movl 0x18(%rsp), %ecx movq -0x8(%rsp), %rax movl %ecx, 0xb4(%rax) movl 0x20(%rsp), %ecx movq -0x8(%rsp), %rax movl %ecx, 0xcc(%rax) movq -0x8(%rsp), %rax movl $0x0, (%rax) movq -0x8(%rsp), %rax movl $0x0, 0x4(%rax) movl 0x8(%rsp), %ecx movl $0x120, %eax # imm = 0x120 sarl %cl, %eax subl $0x4, %eax shll $0xa, %eax xorl %ecx, %ecx subl %eax, %ecx movq -0x8(%rsp), %rax movl %ecx, 0xd0(%rax) movl -0x1c(%rsp), %ecx movl $0x120, %eax # imm = 0x120 sarl %cl, %eax subl $0x4, %eax shll $0xa, %eax xorl %ecx, %ecx subl %eax, %ecx movq -0x8(%rsp), %rax movl %ecx, 0xd4(%rax) retq nop
/m-ab-s[P]aom/av1/common/reconinter.h
get_uni_comp_ref_idx
static inline int8_t get_uni_comp_ref_idx(const MV_REFERENCE_FRAME *const rf) { // Single ref pred if (rf[1] <= INTRA_FRAME) return -1; // Bi-directional comp ref pred if ((rf[0] < BWDREF_FRAME) && (rf[1] >= BWDREF_FRAME)) return -1; for (int8_t ref_idx = 0; ref_idx < TOTAL_UNIDIR_COMP_REFS; ++ref_idx) { if (rf[0] == comp_ref0(ref_idx) && rf[1] == comp_ref1(ref_idx)) return ref_idx; } return -1; }
subq $0x28, %rsp movq %rdi, 0x18(%rsp) movq 0x18(%rsp), %rax movsbl 0x1(%rax), %eax cmpl $0x0, %eax jg 0x1f9821 movb $-0x1, 0x27(%rsp) jmp 0x1f98b6 movq 0x18(%rsp), %rax movsbl (%rax), %eax cmpl $0x5, %eax jge 0x1f9843 movq 0x18(%rsp), %rax movsbl 0x1(%rax), %eax cmpl $0x5, %eax jl 0x1f9843 movb $-0x1, 0x27(%rsp) jmp 0x1f98b6 movb $0x0, 0x17(%rsp) movsbl 0x17(%rsp), %eax cmpl $0x9, %eax jge 0x1f98b1 movq 0x18(%rsp), %rax movsbl (%rax), %eax movl %eax, 0x10(%rsp) movsbl 0x17(%rsp), %edi callq 0x1f98c0 movb %al, %cl movl 0x10(%rsp), %eax movsbl %cl, %ecx cmpl %ecx, %eax jne 0x1f98a3 movq 0x18(%rsp), %rax movsbl 0x1(%rax), %eax movl %eax, 0xc(%rsp) movsbl 0x17(%rsp), %edi callq 0x1f98e0 movb %al, %cl movl 0xc(%rsp), %eax movsbl %cl, %ecx cmpl %ecx, %eax jne 0x1f98a3 movb 0x17(%rsp), %al movb %al, 0x27(%rsp) jmp 0x1f98b6 jmp 0x1f98a5 movb 0x17(%rsp), %al addb $0x1, %al movb %al, 0x17(%rsp) jmp 0x1f9848 movb $-0x1, 0x27(%rsp) movb 0x27(%rsp), %al addq $0x28, %rsp retq nop
/m-ab-s[P]aom/av1/common/mvref_common.h
get_ref_frame_use_mask
static inline void get_ref_frame_use_mask(AV1_COMP *cpi, MACROBLOCK *x, MB_MODE_INFO *mi, int mi_row, int mi_col, BLOCK_SIZE bsize, int gf_temporal_ref, int use_ref_frame[], int *force_skip_low_temp_var) { AV1_COMMON *const cm = &cpi->common; const struct segmentation *const seg = &cm->seg; const int is_small_sb = (cm->seq_params->sb_size == BLOCK_64X64); // When the ref_frame_config is used to set the reference frame structure // then the usage of alt_ref is determined by the ref_frame_flags // (and not the speed feature use_nonrd_altref_frame). int use_alt_ref_frame = cpi->ppi->rtc_ref.set_ref_frame_config || cpi->sf.rt_sf.use_nonrd_altref_frame; int use_golden_ref_frame = 1; int use_last_ref_frame = 1; // When the ref_frame_config is used to set the reference frame structure: // check if LAST is used as a reference. And only remove golden and altref // references below if last is used as a reference. if (cpi->ppi->rtc_ref.set_ref_frame_config) use_last_ref_frame = cpi->ref_frame_flags & AOM_LAST_FLAG ? use_last_ref_frame : 0; // frame_since_golden is not used when user sets the referene structure. if (!cpi->ppi->rtc_ref.set_ref_frame_config && use_last_ref_frame && cpi->rc.frames_since_golden == 0 && gf_temporal_ref) { use_golden_ref_frame = 0; } if (use_last_ref_frame && cpi->sf.rt_sf.short_circuit_low_temp_var && x->nonrd_prune_ref_frame_search) { if (is_small_sb) *force_skip_low_temp_var = av1_get_force_skip_low_temp_var_small_sb( &x->part_search_info.variance_low[0], mi_row, mi_col, bsize); else *force_skip_low_temp_var = av1_get_force_skip_low_temp_var( &x->part_search_info.variance_low[0], mi_row, mi_col, bsize); // If force_skip_low_temp_var is set, skip golden reference. if (*force_skip_low_temp_var) { use_golden_ref_frame = 0; use_alt_ref_frame = 0; } } if (use_last_ref_frame && (x->nonrd_prune_ref_frame_search > 2 || x->force_zeromv_skip_for_blk || (x->nonrd_prune_ref_frame_search > 1 && bsize > BLOCK_64X64))) { use_golden_ref_frame = 0; use_alt_ref_frame = 0; } if (segfeature_active(seg, mi->segment_id, SEG_LVL_REF_FRAME) && get_segdata(seg, mi->segment_id, SEG_LVL_REF_FRAME) == GOLDEN_FRAME) { use_golden_ref_frame = 1; use_alt_ref_frame = 0; } // Skip golden/altref reference if color is set, on flat blocks with motion. // For screen: always skip golden/alt (if color_sensitivity_sb_g/alt is set) // except when x->nonrd_prune_ref_frame_search = 0. This latter flag // may be set in the variance partition when golden is a much better // reference than last, in which case it may not be worth skipping // golden/altref completely. // Condition on use_last_ref to make sure there remains at least one // reference. if (use_last_ref_frame && ((cpi->oxcf.tune_cfg.content == AOM_CONTENT_SCREEN && x->nonrd_prune_ref_frame_search != 0) || (x->source_variance < 200 && x->content_state_sb.source_sad_nonrd >= kLowSad))) { if (x->color_sensitivity_sb_g[COLOR_SENS_IDX(AOM_PLANE_U)] == 1 || x->color_sensitivity_sb_g[COLOR_SENS_IDX(AOM_PLANE_V)] == 1) use_golden_ref_frame = 0; if (x->color_sensitivity_sb_alt[COLOR_SENS_IDX(AOM_PLANE_U)] == 1 || x->color_sensitivity_sb_alt[COLOR_SENS_IDX(AOM_PLANE_V)] == 1) use_alt_ref_frame = 0; } // For non-screen: if golden and altref are not being selected as references // (use_golden_ref_frame/use_alt_ref_frame = 0) check to allow golden back // based on the sad of nearest/nearmv of LAST ref. If this block sad is large, // keep golden as reference. Only do this for the agrressive pruning mode and // avoid it when color is set for golden reference. if (cpi->oxcf.tune_cfg.content != AOM_CONTENT_SCREEN && (cpi->ref_frame_flags & AOM_LAST_FLAG) && !use_golden_ref_frame && !use_alt_ref_frame && x->pred_mv_sad[LAST_FRAME] != INT_MAX && x->nonrd_prune_ref_frame_search > 2 && x->color_sensitivity_sb_g[COLOR_SENS_IDX(AOM_PLANE_U)] == 0 && x->color_sensitivity_sb_g[COLOR_SENS_IDX(AOM_PLANE_V)] == 0) { int thr = (cm->width * cm->height > RESOLUTION_288P) ? 100 : 150; int pred = x->pred_mv_sad[LAST_FRAME] >> (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]); if (pred > thr) use_golden_ref_frame = 1; } use_alt_ref_frame = cpi->ref_frame_flags & AOM_ALT_FLAG ? use_alt_ref_frame : 0; use_golden_ref_frame = cpi->ref_frame_flags & AOM_GOLD_FLAG ? use_golden_ref_frame : 0; // For spatial layers: enable golden ref if it is set by user and // corresponds to the lower spatial layer. if (cpi->svc.spatial_layer_id > 0 && (cpi->ref_frame_flags & AOM_GOLD_FLAG) && x->content_state_sb.source_sad_nonrd < kHighSad) { const int buffslot_golden = cpi->ppi->rtc_ref.ref_idx[GOLDEN_FRAME - LAST_FRAME]; if (cpi->ppi->rtc_ref.buffer_time_index[buffslot_golden] == cpi->svc.current_superframe) use_golden_ref_frame = 1; } use_ref_frame[ALTREF_FRAME] = use_alt_ref_frame; use_ref_frame[GOLDEN_FRAME] = use_golden_ref_frame; use_ref_frame[LAST_FRAME] = use_last_ref_frame; // Keep this assert on, as only 3 references are used in nonrd_pickmode // (LAST, GOLDEN, ALTREF), and if all 3 are not set by user then this // frame must be an intra-only frame and hence should never enter the // pickmode here for inter frames. assert(use_last_ref_frame || use_golden_ref_frame || use_alt_ref_frame); }
subq $0x68, %rsp movb %r9b, %al movq 0x80(%rsp), %r9 movq 0x78(%rsp), %r9 movl 0x70(%rsp), %r9d movq %rdi, 0x60(%rsp) movq %rsi, 0x58(%rsp) movq %rdx, 0x50(%rsp) movl %ecx, 0x4c(%rsp) movl %r8d, 0x48(%rsp) movb %al, 0x47(%rsp) movq 0x60(%rsp), %rax addq $0x3bf80, %rax # imm = 0x3BF80 movq %rax, 0x38(%rsp) movq 0x38(%rsp), %rax addq $0x4a38, %rax # imm = 0x4A38 movq %rax, 0x30(%rsp) movq 0x38(%rsp), %rax movq 0x6088(%rax), %rax movzbl 0x1c(%rax), %eax cmpl $0xc, %eax sete %al andb $0x1, %al movzbl %al, %eax movl %eax, 0x2c(%rsp) movq 0x60(%rsp), %rax movq (%rax), %rcx movb $0x1, %al cmpl $0x0, 0x14dc0(%rcx) movb %al, 0x13(%rsp) jne 0x1fa0fe movq 0x60(%rsp), %rax cmpl $0x0, 0x60cb4(%rax) setne %al movb %al, 0x13(%rsp) movb 0x13(%rsp), %al andb $0x1, %al movzbl %al, %eax movl %eax, 0x28(%rsp) movl $0x1, 0x24(%rsp) movl $0x1, 0x20(%rsp) movq 0x60(%rsp), %rax movq (%rax), %rax cmpl $0x0, 0x14dc0(%rax) je 0x1fa159 movq 0x60(%rsp), %rax movl 0x608b8(%rax), %eax andl $0x1, %eax cmpl $0x0, %eax je 0x1fa149 movl 0x20(%rsp), %eax movl %eax, 0xc(%rsp) jmp 0x1fa151 xorl %eax, %eax movl %eax, 0xc(%rsp) jmp 0x1fa151 movl 0xc(%rsp), %eax movl %eax, 0x20(%rsp) movq 0x60(%rsp), %rax movq (%rax), %rax cmpl $0x0, 0x14dc0(%rax) jne 0x1fa18e cmpl $0x0, 0x20(%rsp) je 0x1fa18e movq 0x60(%rsp), %rax cmpl $0x0, 0x60784(%rax) jne 0x1fa18e cmpl $0x0, 0x70(%rsp) je 0x1fa18e movl $0x0, 0x24(%rsp) cmpl $0x0, 0x20(%rsp) je 0x1fa247 movq 0x60(%rsp), %rax cmpl $0x0, 0x60ccc(%rax) je 0x1fa247 movq 0x58(%rsp), %rax cmpl $0x0, 0x17200(%rax) je 0x1fa247 cmpl $0x0, 0x2c(%rsp) je 0x1fa1f7 movq 0x58(%rsp), %rdi addq $0x17204, %rdi # imm = 0x17204 addq $0x199c, %rdi # imm = 0x199C movl 0x4c(%rsp), %esi movl 0x48(%rsp), %edx movzbl 0x47(%rsp), %ecx callq 0x254900 movl %eax, %ecx movq 0x80(%rsp), %rax movl %ecx, (%rax) jmp 0x1fa228 movq 0x58(%rsp), %rdi addq $0x17204, %rdi # imm = 0x17204 addq $0x199c, %rdi # imm = 0x199C movl 0x4c(%rsp), %esi movl 0x48(%rsp), %edx movzbl 0x47(%rsp), %ecx callq 0x254ab0 movl %eax, %ecx movq 0x80(%rsp), %rax movl %ecx, (%rax) movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1fa245 movl $0x0, 0x24(%rsp) movl $0x0, 0x28(%rsp) jmp 0x1fa247 cmpl $0x0, 0x20(%rsp) je 0x1fa292 movq 0x58(%rsp), %rax cmpl $0x2, 0x17200(%rax) jg 0x1fa282 movq 0x58(%rsp), %rax cmpl $0x0, 0x15244(%rax) jne 0x1fa282 movq 0x58(%rsp), %rax cmpl $0x1, 0x17200(%rax) jle 0x1fa292 movzbl 0x47(%rsp), %eax cmpl $0xc, %eax jle 0x1fa292 movl $0x0, 0x24(%rsp) movl $0x0, 0x28(%rsp) movq 0x30(%rsp), %rdi movq 0x50(%rsp), %rax movw 0xa7(%rax), %ax andw $0x7, %ax movl $0x5, %edx movzbl %al, %esi callq 0x1f8940 cmpl $0x0, %eax je 0x1fa2f0 movq 0x30(%rsp), %rdi movq 0x50(%rsp), %rax movw 0xa7(%rax), %ax andw $0x7, %ax movzbl %al, %esi movl $0x5, %edx callq 0x1fa700 cmpl $0x4, %eax jne 0x1fa2f0 movl $0x1, 0x24(%rsp) movl $0x0, 0x28(%rsp) cmpl $0x0, 0x20(%rsp) je 0x1fa38c movq 0x60(%rsp), %rax cmpl $0x1, 0x4269c(%rax) jne 0x1fa317 movq 0x58(%rsp), %rax cmpl $0x0, 0x17200(%rax) jne 0x1fa336 movq 0x58(%rsp), %rax cmpl $0xc8, 0x25640(%rax) jae 0x1fa38c movq 0x58(%rsp), %rax cmpl $0x2, 0x15d60(%rax) jb 0x1fa38c movq 0x58(%rsp), %rax movzbl 0x25622(%rax), %eax cmpl $0x1, %eax je 0x1fa358 movq 0x58(%rsp), %rax movzbl 0x25623(%rax), %eax cmpl $0x1, %eax jne 0x1fa360 movl $0x0, 0x24(%rsp) movq 0x58(%rsp), %rax movzbl 0x25624(%rax), %eax cmpl $0x1, %eax je 0x1fa382 movq 0x58(%rsp), %rax movzbl 0x25625(%rax), %eax cmpl $0x1, %eax jne 0x1fa38a movl $0x0, 0x28(%rsp) jmp 0x1fa38c movq 0x60(%rsp), %rax cmpl $0x1, 0x4269c(%rax) je 0x1fa48b movq 0x60(%rsp), %rax movl 0x608b8(%rax), %eax andl $0x1, %eax cmpl $0x0, %eax je 0x1fa48b cmpl $0x0, 0x24(%rsp) jne 0x1fa48b cmpl $0x0, 0x28(%rsp) jne 0x1fa48b movq 0x58(%rsp), %rax cmpl $0x7fffffff, 0x15d74(%rax) # imm = 0x7FFFFFFF je 0x1fa48b movq 0x58(%rsp), %rax cmpl $0x2, 0x17200(%rax) jle 0x1fa48b movq 0x58(%rsp), %rax movzbl 0x25622(%rax), %eax cmpl $0x0, %eax jne 0x1fa48b movq 0x58(%rsp), %rax movzbl 0x25623(%rax), %eax cmpl $0x0, %eax jne 0x1fa48b movq 0x38(%rsp), %rax movl 0x38(%rax), %edx movq 0x38(%rsp), %rax imull 0x3c(%rax), %edx movl $0x96, %eax movl $0x64, %ecx cmpl $0x18c00, %edx # imm = 0x18C00 cmovgl %ecx, %eax movl %eax, 0x1c(%rsp) movq 0x58(%rsp), %rax movl 0x15d74(%rax), %eax movzbl 0x47(%rsp), %ecx movl %ecx, %edx leaq 0x8bf977(%rip), %rcx # 0xab9dd0 movzbl (%rcx,%rdx), %ecx movzbl 0x47(%rsp), %edx movl %edx, %esi leaq 0x8bf975(%rip), %rdx # 0xab9de0 movzbl (%rdx,%rsi), %edx addl %edx, %ecx sarl %cl, %eax movl %eax, 0x18(%rsp) movl 0x18(%rsp), %eax cmpl 0x1c(%rsp), %eax jle 0x1fa489 movl $0x1, 0x24(%rsp) jmp 0x1fa48b movq 0x60(%rsp), %rax movl 0x608b8(%rax), %eax andl $0x40, %eax cmpl $0x0, %eax je 0x1fa4a8 movl 0x28(%rsp), %eax movl %eax, 0x8(%rsp) jmp 0x1fa4b0 xorl %eax, %eax movl %eax, 0x8(%rsp) jmp 0x1fa4b0 movl 0x8(%rsp), %eax movl %eax, 0x28(%rsp) movq 0x60(%rsp), %rax movl 0x608b8(%rax), %eax andl $0x8, %eax cmpl $0x0, %eax je 0x1fa4d5 movl 0x24(%rsp), %eax movl %eax, 0x4(%rsp) jmp 0x1fa4dd xorl %eax, %eax movl %eax, 0x4(%rsp) jmp 0x1fa4dd movl 0x4(%rsp), %eax movl %eax, 0x24(%rsp) movq 0x60(%rsp), %rax cmpl $0x0, 0x9d330(%rax) jle 0x1fa551 movq 0x60(%rsp), %rax movl 0x608b8(%rax), %eax andl $0x8, %eax cmpl $0x0, %eax je 0x1fa551 movq 0x58(%rsp), %rax cmpl $0x4, 0x15d60(%rax) jae 0x1fa551 movq 0x60(%rsp), %rax movq (%rax), %rax movl 0x14d90(%rax), %eax movl %eax, 0x14(%rsp) movq 0x60(%rsp), %rax movq (%rax), %rax movslq 0x14(%rsp), %rcx movl 0x14dd8(%rax,%rcx,4), %eax movq 0x60(%rsp), %rcx cmpl 0x9d358(%rcx), %eax jne 0x1fa54f movl $0x1, 0x24(%rsp) jmp 0x1fa551 movl 0x28(%rsp), %ecx movq 0x78(%rsp), %rax movl %ecx, 0x1c(%rax) movl 0x24(%rsp), %ecx movq 0x78(%rsp), %rax movl %ecx, 0x10(%rax) movl 0x20(%rsp), %ecx movq 0x78(%rsp), %rax movl %ecx, 0x4(%rax) addq $0x68, %rsp retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/encoder/nonrd_pickmode.c
av1_mode_context_analyzer
static inline int16_t av1_mode_context_analyzer( const int16_t *const mode_context, const MV_REFERENCE_FRAME *const rf) { const int8_t ref_frame = av1_ref_frame_type(rf); if (rf[1] <= INTRA_FRAME) return mode_context[ref_frame]; const int16_t newmv_ctx = mode_context[ref_frame] & NEWMV_CTX_MASK; const int16_t refmv_ctx = (mode_context[ref_frame] >> REFMV_OFFSET) & REFMV_CTX_MASK; const int16_t comp_ctx = compound_mode_ctx_map[refmv_ctx >> 1][AOMMIN( newmv_ctx, COMP_NEWMV_CTXS - 1)]; return comp_ctx; }
subq $0x38, %rsp movq %rdi, 0x28(%rsp) movq %rsi, 0x20(%rsp) movq 0x20(%rsp), %rdi callq 0x1f9780 movb %al, 0x1f(%rsp) movq 0x20(%rsp), %rax movsbl 0x1(%rax), %eax cmpl $0x0, %eax jg 0x1fa7b3 movq 0x28(%rsp), %rax movsbq 0x1f(%rsp), %rcx movw (%rax,%rcx,2), %ax movw %ax, 0x36(%rsp) jmp 0x1fa840 movq 0x28(%rsp), %rax movsbq 0x1f(%rsp), %rcx movswl (%rax,%rcx,2), %eax andl $0x7, %eax movw %ax, 0x1c(%rsp) movq 0x28(%rsp), %rax movsbq 0x1f(%rsp), %rcx movswl (%rax,%rcx,2), %eax sarl $0x4, %eax andl $0xf, %eax movw %ax, 0x1a(%rsp) movswl 0x1a(%rsp), %eax sarl %eax movslq %eax, %rcx leaq 0x9a77fb(%rip), %rax # 0xba1ff0 imulq $0xa, %rcx, %rcx addq %rcx, %rax movq %rax, 0x10(%rsp) movswl 0x1c(%rsp), %eax cmpl $0x4, %eax jge 0x1fa816 movswl 0x1c(%rsp), %eax movl %eax, 0xc(%rsp) jmp 0x1fa821 movl $0x4, %eax movl %eax, 0xc(%rsp) jmp 0x1fa821 movq 0x10(%rsp), %rax movl 0xc(%rsp), %ecx movslq %ecx, %rcx movw (%rax,%rcx,2), %ax movw %ax, 0x18(%rsp) movw 0x18(%rsp), %ax movw %ax, 0x36(%rsp) movw 0x36(%rsp), %ax addq $0x38, %rsp retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/common/mvref_common.h
setup_pred_plane
static inline void setup_pred_plane(struct buf_2d *dst, BLOCK_SIZE bsize, uint8_t *src, int width, int height, int stride, int mi_row, int mi_col, const struct scale_factors *scale, int subsampling_x, int subsampling_y) { // Offset the buffer pointer if (subsampling_y && (mi_row & 0x01) && (mi_size_high[bsize] == 1)) mi_row -= 1; if (subsampling_x && (mi_col & 0x01) && (mi_size_wide[bsize] == 1)) mi_col -= 1; const int x = (MI_SIZE * mi_col) >> subsampling_x; const int y = (MI_SIZE * mi_row) >> subsampling_y; dst->buf = src + scaled_buffer_offset(x, y, stride, scale); dst->buf0 = src; dst->width = width; dst->height = height; dst->stride = stride; }
subq $0x38, %rsp movb %sil, %al movl 0x60(%rsp), %esi movl 0x58(%rsp), %esi movq 0x50(%rsp), %rsi movl 0x48(%rsp), %esi movl 0x40(%rsp), %esi movq %rdi, 0x30(%rsp) movb %al, 0x2f(%rsp) movq %rdx, 0x20(%rsp) movl %ecx, 0x1c(%rsp) movl %r8d, 0x18(%rsp) movl %r9d, 0x14(%rsp) cmpl $0x0, 0x60(%rsp) je 0x20994d movl 0x40(%rsp), %eax andl $0x1, %eax cmpl $0x0, %eax je 0x20994d movzbl 0x2f(%rsp), %eax movl %eax, %ecx leaq 0x8b2067(%rip), %rax # 0xabb9a0 movzbl (%rax,%rcx), %eax cmpl $0x1, %eax jne 0x20994d movl 0x40(%rsp), %eax subl $0x1, %eax movl %eax, 0x40(%rsp) cmpl $0x0, 0x58(%rsp) je 0x209982 movl 0x48(%rsp), %eax andl $0x1, %eax cmpl $0x0, %eax je 0x209982 movzbl 0x2f(%rsp), %eax movl %eax, %ecx leaq 0x8b2012(%rip), %rax # 0xabb980 movzbl (%rax,%rcx), %eax cmpl $0x1, %eax jne 0x209982 movl 0x48(%rsp), %eax subl $0x1, %eax movl %eax, 0x48(%rsp) movl 0x48(%rsp), %eax shll $0x2, %eax movl 0x58(%rsp), %ecx sarl %cl, %eax movl %eax, 0x10(%rsp) movl 0x40(%rsp), %eax shll $0x2, %eax movl 0x60(%rsp), %ecx sarl %cl, %eax movl %eax, 0xc(%rsp) movq 0x20(%rsp), %rax movq %rax, (%rsp) movl 0x10(%rsp), %edi movl 0xc(%rsp), %esi movl 0x14(%rsp), %edx movq 0x50(%rsp), %rcx callq 0x209a40 movq (%rsp), %rcx addq %rax, %rcx movq 0x30(%rsp), %rax movq %rcx, (%rax) movq 0x20(%rsp), %rcx movq 0x30(%rsp), %rax movq %rcx, 0x8(%rax) movl 0x1c(%rsp), %ecx movq 0x30(%rsp), %rax movl %ecx, 0x10(%rax) movl 0x18(%rsp), %ecx movq 0x30(%rsp), %rax movl %ecx, 0x14(%rax) movl 0x14(%rsp), %ecx movq 0x30(%rsp), %rax movl %ecx, 0x18(%rax) addq $0x38, %rsp retq nopl (%rax)
/m-ab-s[P]aom/av1/common/reconinter.h
tf_alloc_and_reset_data
static inline bool tf_alloc_and_reset_data(TemporalFilterData *tf_data, int num_pels, int is_high_bitdepth) { tf_data->tmp_mbmi = (MB_MODE_INFO *)aom_calloc(1, sizeof(*tf_data->tmp_mbmi)); tf_data->accum = (uint32_t *)aom_memalign(16, num_pels * sizeof(*tf_data->accum)); tf_data->count = (uint16_t *)aom_memalign(16, num_pels * sizeof(*tf_data->count)); if (is_high_bitdepth) tf_data->pred = CONVERT_TO_BYTEPTR( aom_memalign(32, num_pels * 2 * sizeof(*tf_data->pred))); else tf_data->pred = (uint8_t *)aom_memalign(32, num_pels * sizeof(*tf_data->pred)); // In case of an allocation failure, other successfully allocated buffers will // be freed by the tf_dealloc_data() call in encoder_destroy(). if (!(tf_data->tmp_mbmi && tf_data->accum && tf_data->count && tf_data->pred)) return false; memset(&tf_data->diff, 0, sizeof(tf_data->diff)); return true; }
subq $0x18, %rsp movq %rdi, 0x8(%rsp) movl %esi, 0x4(%rsp) movl %edx, (%rsp) movl $0x1, %edi movl $0xb0, %esi callq 0xa0d80 movq %rax, %rcx movq 0x8(%rsp), %rax movq %rcx, 0x10(%rax) movslq 0x4(%rsp), %rsi shlq $0x2, %rsi movl $0x10, %edi callq 0xa0bc0 movq %rax, %rcx movq 0x8(%rsp), %rax movq %rcx, 0x18(%rax) movslq 0x4(%rsp), %rsi shlq %rsi movl $0x10, %edi callq 0xa0bc0 movq %rax, %rcx movq 0x8(%rsp), %rax movq %rcx, 0x20(%rax) cmpl $0x0, (%rsp) je 0x219906 movl 0x4(%rsp), %eax shll %eax movslq %eax, %rsi shlq $0x0, %rsi movl $0x20, %edi callq 0xa0bc0 movq %rax, %rcx shrq %rcx movq 0x8(%rsp), %rax movq %rcx, 0x28(%rax) jmp 0x219925 movslq 0x4(%rsp), %rsi shlq $0x0, %rsi movl $0x20, %edi callq 0xa0bc0 movq %rax, %rcx movq 0x8(%rsp), %rax movq %rcx, 0x28(%rax) movq 0x8(%rsp), %rax cmpq $0x0, 0x10(%rax) je 0x219955 movq 0x8(%rsp), %rax cmpq $0x0, 0x18(%rax) je 0x219955 movq 0x8(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x219955 movq 0x8(%rsp), %rax cmpq $0x0, 0x28(%rax) jne 0x21995c movb $0x0, 0x17(%rsp) jmp 0x219972 movq 0x8(%rsp), %rdi xorl %esi, %esi movl $0x10, %edx callq 0x18280 movb $0x1, 0x17(%rsp) movb 0x17(%rsp), %al andb $0x1, %al addq $0x18, %rsp retq nopl (%rax)
/m-ab-s[P]aom/av1/encoder/temporal_filter.h
tf_dealloc_data
static inline void tf_dealloc_data(TemporalFilterData *tf_data, int is_high_bitdepth) { if (is_high_bitdepth) tf_data->pred = (uint8_t *)CONVERT_TO_SHORTPTR(tf_data->pred); aom_free(tf_data->tmp_mbmi); tf_data->tmp_mbmi = NULL; aom_free(tf_data->accum); tf_data->accum = NULL; aom_free(tf_data->count); tf_data->count = NULL; aom_free(tf_data->pred); tf_data->pred = NULL; }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movl %esi, 0xc(%rsp) cmpl $0x0, 0xc(%rsp) je 0x219a99 movq 0x10(%rsp), %rax movq 0x28(%rax), %rcx shlq %rcx movq 0x10(%rsp), %rax movq %rcx, 0x28(%rax) movq 0x10(%rsp), %rax movq 0x10(%rax), %rdi callq 0xa0e00 movq 0x10(%rsp), %rax movq $0x0, 0x10(%rax) movq 0x10(%rsp), %rax movq 0x18(%rax), %rdi callq 0xa0e00 movq 0x10(%rsp), %rax movq $0x0, 0x18(%rax) movq 0x10(%rsp), %rax movq 0x20(%rax), %rdi callq 0xa0e00 movq 0x10(%rsp), %rax movq $0x0, 0x20(%rax) movq 0x10(%rsp), %rax movq 0x28(%rax), %rdi callq 0xa0e00 movq 0x10(%rsp), %rax movq $0x0, 0x28(%rax) addq $0x18, %rsp retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/encoder/temporal_filter.h
av1_tf_info_alloc
bool av1_tf_info_alloc(TEMPORAL_FILTER_INFO *tf_info, const AV1_COMP *cpi) { const AV1EncoderConfig *oxcf = &cpi->oxcf; tf_info->is_temporal_filter_on = av1_is_temporal_filter_on(oxcf); if (tf_info->is_temporal_filter_on == 0) return true; const AV1_COMMON *cm = &cpi->common; const SequenceHeader *const seq_params = cm->seq_params; for (int i = 0; i < TF_INFO_BUF_COUNT; ++i) { if (aom_realloc_frame_buffer( &tf_info->tf_buf[i], oxcf->frm_dim_cfg.width, oxcf->frm_dim_cfg.height, seq_params->subsampling_x, seq_params->subsampling_y, seq_params->use_highbitdepth, cpi->oxcf.border_in_pixels, cm->features.byte_alignment, NULL, NULL, NULL, cpi->alloc_pyramid, 0)) { return false; } } return true; }
pushq %rbx subq $0x70, %rsp movq %rdi, 0x60(%rsp) movq %rsi, 0x58(%rsp) movq 0x58(%rsp), %rax addq $0x42320, %rax # imm = 0x42320 movq %rax, 0x50(%rsp) movq 0x50(%rsp), %rdi callq 0x219b10 movl %eax, %ecx movq 0x60(%rsp), %rax movl %ecx, (%rax) movq 0x60(%rsp), %rax cmpl $0x0, (%rax) jne 0x219b96 movb $0x1, 0x6f(%rsp) jmp 0x219c91 movq 0x58(%rsp), %rax addq $0x3bf80, %rax # imm = 0x3BF80 movq %rax, 0x48(%rsp) movq 0x48(%rsp), %rax movq 0x6088(%rax), %rax movq %rax, 0x40(%rsp) movl $0x0, 0x3c(%rsp) cmpl $0x2, 0x3c(%rsp) jge 0x219c8c movq 0x60(%rsp), %rdi addq $0x8, %rdi movslq 0x3c(%rsp), %rax imulq $0xd0, %rax, %rax addq %rax, %rdi movq 0x50(%rsp), %rax movl 0x18(%rax), %esi movq 0x50(%rsp), %rax movl 0x1c(%rax), %edx movq 0x40(%rsp), %rax movl 0x60(%rax), %ecx movq 0x40(%rsp), %rax movl 0x64(%rax), %r8d movq 0x40(%rsp), %rax movzbl 0x4c(%rax), %r9d movq 0x58(%rsp), %rax movl 0x42724(%rax), %r11d movq 0x48(%rsp), %rax movl 0x200(%rax), %r10d movq 0x58(%rsp), %rax movb 0x9d704(%rax), %al xorl %ebx, %ebx xorl %ebx, %ebx andb $0x1, %al movl %r11d, (%rsp) movl %r10d, 0x8(%rsp) movq $0x0, 0x10(%rsp) movq $0x0, 0x18(%rsp) movq $0x0, 0x20(%rsp) movzbl %al, %eax movl %eax, 0x28(%rsp) movl $0x0, 0x30(%rsp) callq 0x5ad660 cmpl $0x0, %eax je 0x219c7a movb $0x0, 0x6f(%rsp) jmp 0x219c91 jmp 0x219c7c movl 0x3c(%rsp), %eax addl $0x1, %eax movl %eax, 0x3c(%rsp) jmp 0x219bbf movb $0x1, 0x6f(%rsp) movb 0x6f(%rsp), %al andb $0x1, %al addq $0x70, %rsp popq %rbx retq nopl (%rax)
/m-ab-s[P]aom/av1/encoder/temporal_filter.c
av1_tf_info_filtering
void av1_tf_info_filtering(TEMPORAL_FILTER_INFO *tf_info, AV1_COMP *cpi, const GF_GROUP *gf_group) { if (tf_info->is_temporal_filter_on == 0) return; const AV1_COMMON *const cm = &cpi->common; for (int gf_index = 0; gf_index < gf_group->size; ++gf_index) { int update_type = gf_group->update_type[gf_index]; if (update_type == KF_UPDATE || update_type == ARF_UPDATE) { int buf_idx = gf_group->frame_type[gf_index] == INTER_FRAME; int lookahead_idx = gf_group->arf_src_offset[gf_index] + gf_group->cur_frame_idx[gf_index]; // This function is designed to be called multiple times after // av1_tf_info_reset(). It will only generate the filtered frame that does // not exist yet. if (tf_info->tf_buf_valid[buf_idx] == 0 || tf_info->tf_buf_display_index_offset[buf_idx] != lookahead_idx) { YV12_BUFFER_CONFIG *out_buf = &tf_info->tf_buf[buf_idx]; av1_temporal_filter(cpi, lookahead_idx, gf_index, &tf_info->frame_diff[buf_idx], out_buf); aom_extend_frame_borders(out_buf, av1_num_planes(cm)); tf_info->tf_buf_gf_index[buf_idx] = gf_index; tf_info->tf_buf_display_index_offset[buf_idx] = lookahead_idx; tf_info->tf_buf_valid[buf_idx] = 1; } } } }
subq $0x48, %rsp movq %rdi, 0x40(%rsp) movq %rsi, 0x38(%rsp) movq %rdx, 0x30(%rsp) movq 0x40(%rsp), %rax cmpl $0x0, (%rax) jne 0x219d82 jmp 0x219f0c movq 0x38(%rsp), %rax addq $0x3bf80, %rax # imm = 0x3BF80 movq %rax, 0x28(%rsp) movl $0x0, 0x24(%rsp) movl 0x24(%rsp), %eax movq 0x30(%rsp), %rcx cmpl 0x1878(%rcx), %eax jge 0x219f0c movq 0x30(%rsp), %rax movslq 0x24(%rsp), %rcx movzbl (%rax,%rcx), %eax movl %eax, 0x20(%rsp) cmpl $0x0, 0x20(%rsp) je 0x219dd3 cmpl $0x3, 0x20(%rsp) jne 0x219efa movq 0x30(%rsp), %rax movslq 0x24(%rsp), %rcx movzbl 0x1680(%rax,%rcx), %eax cmpl $0x1, %eax sete %al andb $0x1, %al movzbl %al, %eax movl %eax, 0x1c(%rsp) movq 0x30(%rsp), %rax movslq 0x24(%rsp), %rcx movzbl 0xfa(%rax,%rcx), %eax movq 0x30(%rsp), %rcx movslq 0x24(%rsp), %rdx movzbl 0x1f4(%rcx,%rdx), %ecx addl %ecx, %eax movl %eax, 0x18(%rsp) movq 0x40(%rsp), %rax movslq 0x1c(%rsp), %rcx cmpl $0x0, 0x2a8(%rax,%rcx,4) je 0x219e4d movq 0x40(%rsp), %rax movslq 0x1c(%rsp), %rcx movl 0x2a0(%rax,%rcx,4), %eax cmpl 0x18(%rsp), %eax je 0x219ef8 movq 0x40(%rsp), %rax addq $0x8, %rax movslq 0x1c(%rsp), %rcx imulq $0xd0, %rcx, %rcx addq %rcx, %rax movq %rax, 0x10(%rsp) movq 0x38(%rsp), %rdi movl 0x18(%rsp), %esi movl 0x24(%rsp), %edx movq 0x40(%rsp), %rcx addq $0x278, %rcx # imm = 0x278 movslq 0x1c(%rsp), %rax shlq $0x4, %rax addq %rax, %rcx movq 0x10(%rsp), %r8 callq 0x2194e0 movq 0x10(%rsp), %rax movq %rax, 0x8(%rsp) movq 0x28(%rsp), %rdi callq 0x2171f0 movq 0x8(%rsp), %rdi movl %eax, %esi callq 0x5aeec0 movl 0x24(%rsp), %edx movq 0x40(%rsp), %rax movslq 0x1c(%rsp), %rcx movl %edx, 0x298(%rax,%rcx,4) movl 0x18(%rsp), %edx movq 0x40(%rsp), %rax movslq 0x1c(%rsp), %rcx movl %edx, 0x2a0(%rax,%rcx,4) movq 0x40(%rsp), %rax movslq 0x1c(%rsp), %rcx movl $0x1, 0x2a8(%rax,%rcx,4) jmp 0x219efa jmp 0x219efc movl 0x24(%rsp), %eax addl $0x1, %eax movl %eax, 0x24(%rsp) jmp 0x219d9a addq $0x48, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/temporal_filter.c
av1_tf_info_get_filtered_buf
YV12_BUFFER_CONFIG *av1_tf_info_get_filtered_buf(TEMPORAL_FILTER_INFO *tf_info, int gf_index, FRAME_DIFF *frame_diff) { if (tf_info->is_temporal_filter_on == 0) return NULL; YV12_BUFFER_CONFIG *out_buf = NULL; for (int i = 0; i < TF_INFO_BUF_COUNT; ++i) { if (tf_info->tf_buf_valid[i] && tf_info->tf_buf_gf_index[i] == gf_index) { out_buf = &tf_info->tf_buf[i]; *frame_diff = tf_info->frame_diff[i]; } } return out_buf; }
movq %rdi, -0x10(%rsp) movl %esi, -0x14(%rsp) movq %rdx, -0x20(%rsp) movq -0x10(%rsp), %rax cmpl $0x0, (%rax) jne 0x219f46 movq $0x0, -0x8(%rsp) jmp 0x219ff1 movq $0x0, -0x28(%rsp) movl $0x0, -0x2c(%rsp) cmpl $0x2, -0x2c(%rsp) jge 0x219fe7 movq -0x10(%rsp), %rax movslq -0x2c(%rsp), %rcx cmpl $0x0, 0x2a8(%rax,%rcx,4) je 0x219fd5 movq -0x10(%rsp), %rax movslq -0x2c(%rsp), %rcx movl 0x298(%rax,%rcx,4), %eax cmpl -0x14(%rsp), %eax jne 0x219fd5 movq -0x10(%rsp), %rax addq $0x8, %rax movslq -0x2c(%rsp), %rcx imulq $0xd0, %rcx, %rcx addq %rcx, %rax movq %rax, -0x28(%rsp) movq -0x20(%rsp), %rax movq -0x10(%rsp), %rcx addq $0x278, %rcx # imm = 0x278 movslq -0x2c(%rsp), %rdx shlq $0x4, %rdx addq %rdx, %rcx movq (%rcx), %rdx movq %rdx, (%rax) movq 0x8(%rcx), %rcx movq %rcx, 0x8(%rax) jmp 0x219fd7 movl -0x2c(%rsp), %eax addl $0x1, %eax movl %eax, -0x2c(%rsp) jmp 0x219f57 movq -0x28(%rsp), %rax movq %rax, -0x8(%rsp) movq -0x8(%rsp), %rax retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/encoder/temporal_filter.c
av1_get_search_site_config
static inline const search_site_config *av1_get_search_site_config( const AV1_COMP *cpi, MACROBLOCK *x, SEARCH_METHODS search_method) { const int ref_stride = x->e_mbd.plane[0].pre[0].stride; // AV1_COMP::mv_search_params.search_site_config is a compressor level cache // that's shared by multiple threads. In most cases where all frames have the // same resolution, the cache contains the search site config that we need. const MotionVectorSearchParams *mv_search_params = &cpi->mv_search_params; if (ref_stride == mv_search_params->search_site_cfg[SS_CFG_SRC]->stride) { return mv_search_params->search_site_cfg[SS_CFG_SRC]; } else if (ref_stride == mv_search_params->search_site_cfg[SS_CFG_LOOKAHEAD]->stride) { return mv_search_params->search_site_cfg[SS_CFG_LOOKAHEAD]; } // If the cache does not contain the correct stride, then we will need to rely // on the thread level config MACROBLOCK::search_site_cfg_buf. If even the // thread level config doesn't match, then we need to update it. search_method = search_method_lookup[search_method]; assert(search_method_lookup[search_method] == search_method && "The search_method_lookup table should be idempotent."); if (ref_stride != x->search_site_cfg_buf[search_method].stride) { av1_refresh_search_site_config(x->search_site_cfg_buf, search_method, ref_stride); } return x->search_site_cfg_buf; }
subq $0x28, %rsp movb %dl, %al movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movb %al, 0xf(%rsp) movq 0x10(%rsp), %rax movl 0x1f8(%rax), %eax movl %eax, 0x8(%rsp) movq 0x18(%rsp), %rax addq $0x60df8, %rax # imm = 0x60DF8 movq %rax, (%rsp) movl 0x8(%rsp), %eax movq (%rsp), %rcx cmpl 0xc74(%rcx), %eax jne 0x21a0b4 movq (%rsp), %rax addq $0x10, %rax movq %rax, 0x20(%rsp) jmp 0x21a143 movl 0x8(%rsp), %eax movq (%rsp), %rcx cmpl 0x634c(%rcx), %eax jne 0x21a0d9 movq (%rsp), %rax addq $0x10, %rax addq $0x56d8, %rax # imm = 0x56D8 movq %rax, 0x20(%rsp) jmp 0x21a143 jmp 0x21a0db movzbl 0xf(%rsp), %eax movl %eax, %ecx leaq 0x8a1e91(%rip), %rax # 0xabbf7a movb (%rax,%rcx), %al movb %al, 0xf(%rsp) movl 0x8(%rsp), %eax movq 0x10(%rsp), %rcx addq $0x1f6f4, %rcx # imm = 0x1F6F4 movzbl 0xf(%rsp), %edx imulq $0xc68, %rdx, %rdx # imm = 0xC68 addq %rdx, %rcx cmpl 0xc64(%rcx), %eax je 0x21a133 movq 0x10(%rsp), %rdi addq $0x1f6f4, %rdi # imm = 0x1F6F4 movb 0xf(%rsp), %al movl 0x8(%rsp), %edx movzbl %al, %esi callq 0x21a3c0 movq 0x10(%rsp), %rax addq $0x1f6f4, %rax # imm = 0x1F6F4 movq %rax, 0x20(%rsp) movq 0x20(%rsp), %rax addq $0x28, %rsp retq nopl (%rax)
/m-ab-s[P]aom/av1/encoder/motion_search_facade.h
get_noise_var
static inline double get_noise_var(const uint8_t *data, const uint8_t *denoised, int w, int h, int stride, int x_o, int y_o, int block_size_x, int block_size_y, int use_highbd) { if (use_highbd) return get_noise_var_highbd((const uint16_t *)data, (const uint16_t *)denoised, w, h, stride, x_o, y_o, block_size_x, block_size_y); return get_noise_var_lowbd(data, denoised, w, h, stride, x_o, y_o, block_size_x, block_size_y); }
subq $0x48, %rsp movl 0x68(%rsp), %eax movl 0x60(%rsp), %eax movl 0x58(%rsp), %eax movl 0x50(%rsp), %eax movq %rdi, 0x38(%rsp) movq %rsi, 0x30(%rsp) movl %edx, 0x2c(%rsp) movl %ecx, 0x28(%rsp) movl %r8d, 0x24(%rsp) movl %r9d, 0x20(%rsp) cmpl $0x0, 0x68(%rsp) je 0x2bd47b movq 0x38(%rsp), %rdi movq 0x30(%rsp), %rsi movl 0x2c(%rsp), %edx movl 0x28(%rsp), %ecx movl 0x24(%rsp), %r8d movl 0x20(%rsp), %r9d movl 0x50(%rsp), %r11d movl 0x58(%rsp), %r10d movl 0x60(%rsp), %eax movl %r11d, (%rsp) movl %r10d, 0x8(%rsp) movl %eax, 0x10(%rsp) callq 0x2bd7c0 movsd %xmm0, 0x40(%rsp) jmp 0x2bd4bd movq 0x38(%rsp), %rdi movq 0x30(%rsp), %rsi movl 0x2c(%rsp), %edx movl 0x28(%rsp), %ecx movl 0x24(%rsp), %r8d movl 0x20(%rsp), %r9d movl 0x50(%rsp), %r11d movl 0x58(%rsp), %r10d movl 0x60(%rsp), %eax movl %r11d, (%rsp) movl %r10d, 0x8(%rsp) movl %eax, 0x10(%rsp) callq 0x2bd990 movsd %xmm0, 0x40(%rsp) movsd 0x40(%rsp), %xmm0 addq $0x48, %rsp retq nopl (%rax,%rax)
/m-ab-s[P]aom/aom_dsp/noise_model.c
get_noise_var_highbd
static inline double get_noise_var(const uint8_t *data, const uint8_t *denoised, int w, int h, int stride, int x_o, int y_o, int block_size_x, int block_size_y, int use_highbd) { if (use_highbd) return get_noise_var_highbd((const uint16_t *)data, (const uint16_t *)denoised, w, h, stride, x_o, y_o, block_size_x, block_size_y); return get_noise_var_lowbd(data, denoised, w, h, stride, x_o, y_o, block_size_x, block_size_y); }
movl 0x18(%rsp), %eax movl 0x10(%rsp), %eax movl 0x8(%rsp), %eax movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movl %edx, -0x14(%rsp) movl %ecx, -0x18(%rsp) movl %r8d, -0x1c(%rsp) movl %r9d, -0x20(%rsp) movl -0x1c(%rsp), %eax subl 0x8(%rsp), %eax cmpl 0x18(%rsp), %eax jge 0x2bd804 movl -0x1c(%rsp), %eax subl 0x8(%rsp), %eax movl %eax, -0x4c(%rsp) jmp 0x2bd80c movl 0x18(%rsp), %eax movl %eax, -0x4c(%rsp) movl -0x4c(%rsp), %eax movl %eax, -0x24(%rsp) movl -0x18(%rsp), %eax subl -0x20(%rsp), %eax cmpl 0x10(%rsp), %eax jge 0x2bd830 movl -0x18(%rsp), %eax subl -0x20(%rsp), %eax movl %eax, -0x50(%rsp) jmp 0x2bd838 movl 0x10(%rsp), %eax movl %eax, -0x50(%rsp) movl -0x50(%rsp), %eax movl %eax, -0x28(%rsp) xorps %xmm0, %xmm0 movsd %xmm0, -0x30(%rsp) xorps %xmm0, %xmm0 movsd %xmm0, -0x38(%rsp) movl $0x0, -0x3c(%rsp) movl -0x3c(%rsp), %eax cmpl -0x24(%rsp), %eax jge 0x2bd92e movl $0x0, -0x40(%rsp) movl -0x40(%rsp), %eax cmpl -0x28(%rsp), %eax jge 0x2bd91c movq -0x8(%rsp), %rax movl 0x8(%rsp), %ecx movl -0x3c(%rsp), %edx addl %edx, %ecx movl -0x14(%rsp), %edx imull %edx, %ecx movl -0x20(%rsp), %edx addl %edx, %ecx movl -0x40(%rsp), %edx addl %edx, %ecx movslq %ecx, %rcx movzwl (%rax,%rcx,2), %eax cvtsi2sd %eax, %xmm0 movq -0x10(%rsp), %rax movl 0x8(%rsp), %ecx addl -0x3c(%rsp), %ecx imull -0x14(%rsp), %ecx addl -0x20(%rsp), %ecx addl -0x40(%rsp), %ecx movslq %ecx, %rcx movzwl (%rax,%rcx,2), %eax cvtsi2sd %eax, %xmm1 subsd %xmm1, %xmm0 movsd %xmm0, -0x48(%rsp) movsd -0x48(%rsp), %xmm0 addsd -0x38(%rsp), %xmm0 movsd %xmm0, -0x38(%rsp) movsd -0x48(%rsp), %xmm0 movsd -0x48(%rsp), %xmm2 movsd -0x30(%rsp), %xmm1 mulsd %xmm2, %xmm0 addsd %xmm1, %xmm0 movsd %xmm0, -0x30(%rsp) movl -0x40(%rsp), %eax addl $0x1, %eax movl %eax, -0x40(%rsp) jmp 0x2bd870 jmp 0x2bd91e movl -0x3c(%rsp), %eax addl $0x1, %eax movl %eax, -0x3c(%rsp) jmp 0x2bd85a movl -0x28(%rsp), %eax imull -0x24(%rsp), %eax cvtsi2sd %eax, %xmm1 movsd -0x38(%rsp), %xmm0 divsd %xmm1, %xmm0 movsd %xmm0, -0x38(%rsp) movsd -0x30(%rsp), %xmm1 movl -0x28(%rsp), %eax imull -0x24(%rsp), %eax cvtsi2sd %eax, %xmm0 divsd %xmm0, %xmm1 movsd -0x38(%rsp), %xmm0 movsd -0x38(%rsp), %xmm2 movq %xmm0, %rax movabsq $-0x8000000000000000, %rcx # imm = 0x8000000000000000 xorq %rcx, %rax movq %rax, %xmm0 mulsd %xmm2, %xmm0 addsd %xmm1, %xmm0 retq nop
/m-ab-s[P]aom/aom_dsp/noise_model.c
ransac
bool ransac(const Correspondence *matched_points, int npoints, TransformationType type, MotionModel *motion_models, int num_desired_motions, bool *mem_alloc_failed) { #if ALLOW_TRANSLATION_MODELS assert(type > IDENTITY && type < TRANS_TYPES); #else assert(type > TRANSLATION && type < TRANS_TYPES); #endif // ALLOW_TRANSLATION_MODELS return ransac_internal(matched_points, npoints, motion_models, num_desired_motions, &ransac_model_info[type], mem_alloc_failed); }
subq $0x28, %rsp movb %dl, %al movq %rdi, 0x20(%rsp) movl %esi, 0x1c(%rsp) movb %al, 0x1b(%rsp) movq %rcx, 0x10(%rsp) movl %r8d, 0xc(%rsp) movq %r9, (%rsp) movq 0x20(%rsp), %rdi movl 0x1c(%rsp), %esi movq 0x10(%rsp), %rdx movl 0xc(%rsp), %ecx movzbl 0x1b(%rsp), %eax leaq 0x8db911(%rip), %r8 # 0xb9a360 imulq $0x18, %rax, %rax addq %rax, %r8 movq (%rsp), %r9 callq 0x2bea70 andb $0x1, %al addq $0x28, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/aom_dsp/flow_estimation/ransac.c
aom_highbd_lpf_horizontal_8_dual_sse2
void aom_highbd_lpf_horizontal_8_dual_sse2( uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0, const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1, const uint8_t *_thresh1, int bd) { __m128i p2, p1, p0, q0, q1, q2, p3, q3; p3 = _mm_loadu_si128((__m128i *)(s - 4 * p)); q3 = _mm_loadu_si128((__m128i *)(s + 3 * p)); p2 = _mm_loadu_si128((__m128i *)(s - 3 * p)); q2 = _mm_loadu_si128((__m128i *)(s + 2 * p)); p1 = _mm_loadu_si128((__m128i *)(s - 2 * p)); q1 = _mm_loadu_si128((__m128i *)(s + 1 * p)); p0 = _mm_loadu_si128((__m128i *)(s - 1 * p)); q0 = _mm_loadu_si128((__m128i *)(s + 0 * p)); highbd_lpf_internal_8_dual_sse2(&p3, &q3, &p2, &q2, &p1, &q1, &p0, &q0, _blimit0, _limit0, _thresh0, _blimit1, _limit1, _thresh1, bd); _mm_storeu_si128((__m128i *)(s - 3 * p), p2); _mm_storeu_si128((__m128i *)(s - 2 * p), p1); _mm_storeu_si128((__m128i *)(s - 1 * p), p0); _mm_storeu_si128((__m128i *)(s + 0 * p), q0); _mm_storeu_si128((__m128i *)(s + 1 * p), q1); _mm_storeu_si128((__m128i *)(s + 2 * p), q2); }
subq $0x1508, %rsp # imm = 0x1508 movl 0x1520(%rsp), %eax movq 0x1518(%rsp), %rax movq 0x1510(%rsp), %rax movq %rdi, 0xe8(%rsp) movl %esi, 0xe4(%rsp) movq %rdx, 0xd8(%rsp) movq %rcx, 0xd0(%rsp) movq %r8, 0xc8(%rsp) movq %r9, 0xc0(%rsp) movq 0xe8(%rsp), %rax movl 0xe4(%rsp), %ecx shll $0x2, %ecx movslq %ecx, %rcx addq %rcx, %rcx subq %rcx, %rax movq %rax, 0x1e8(%rsp) movq 0x1e8(%rsp), %rax movups (%rax), %xmm0 movaps %xmm0, 0x50(%rsp) movq 0xe8(%rsp), %rax movl 0xe4(%rsp), %edx movl %edx, %ecx leal (%rcx,%rcx,2), %ecx movslq %ecx, %rcx leaq (%rax,%rcx,2), %rax movq %rax, 0x1e0(%rsp) movq 0x1e0(%rsp), %rax movups (%rax), %xmm0 movaps %xmm0, 0x40(%rsp) movq 0xe8(%rsp), %rax movl 0xe4(%rsp), %edx movl %edx, %ecx leal (%rcx,%rcx,2), %ecx movslq %ecx, %rcx addq %rcx, %rcx subq %rcx, %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax movups (%rax), %xmm0 movaps %xmm0, 0xb0(%rsp) movq 0xe8(%rsp), %rax movl 0xe4(%rsp), %ecx addl %ecx, %ecx movslq %ecx, %rcx leaq (%rax,%rcx,2), %rax movq %rax, 0x1d0(%rsp) movq 0x1d0(%rsp), %rax movups (%rax), %xmm0 movaps %xmm0, 0x60(%rsp) movq 0xe8(%rsp), %rax movl 0xe4(%rsp), %ecx addl %ecx, %ecx movslq %ecx, %rcx addq %rcx, %rcx subq %rcx, %rax movq %rax, 0x1c8(%rsp) movq 0x1c8(%rsp), %rax movups (%rax), %xmm0 movaps %xmm0, 0xa0(%rsp) movq 0xe8(%rsp), %rax movslq 0xe4(%rsp), %rcx leaq (%rax,%rcx,2), %rax movq %rax, 0x1c0(%rsp) movq 0x1c0(%rsp), %rax movups (%rax), %xmm0 movaps %xmm0, 0x70(%rsp) movq 0xe8(%rsp), %rax movslq 0xe4(%rsp), %rcx addq %rcx, %rcx subq %rcx, %rax movq %rax, 0x1b8(%rsp) movq 0x1b8(%rsp), %rax movups (%rax), %xmm0 movaps %xmm0, 0x90(%rsp) movq 0xe8(%rsp), %rax movq %rax, 0x1b0(%rsp) movq 0x1b0(%rsp), %rax movups (%rax), %xmm0 movaps %xmm0, 0x80(%rsp) movq 0xd8(%rsp), %r9 movq 0xd0(%rsp), %r8 movq 0xc8(%rsp), %rdi movq 0xc0(%rsp), %rsi movq 0x1510(%rsp), %rdx movq 0x1518(%rsp), %rcx movl 0x1520(%rsp), %eax leaq 0x50(%rsp), %r10 movq %r10, 0x468(%rsp) leaq 0x40(%rsp), %r10 movq %r10, 0x460(%rsp) leaq 0xb0(%rsp), %r10 movq %r10, 0x458(%rsp) leaq 0x60(%rsp), %r10 movq %r10, 0x450(%rsp) leaq 0xa0(%rsp), %r10 movq %r10, 0x448(%rsp) leaq 0x70(%rsp), %r10 movq %r10, 0x440(%rsp) leaq 0x90(%rsp), %r10 movq %r10, 0x438(%rsp) leaq 0x80(%rsp), %r10 movq %r10, 0x430(%rsp) movq %r9, 0x428(%rsp) movq %r8, 0x420(%rsp) movq %rdi, 0x418(%rsp) movq %rsi, 0x410(%rsp) movq %rdx, 0x408(%rsp) movq %rcx, 0x400(%rsp) movl %eax, 0x3fc(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 0x470(%rsp) movaps 0x470(%rsp), %xmm0 movaps %xmm0, 0x2c0(%rsp) movw $0x4, 0x63e(%rsp) movw 0x63e(%rsp), %ax movw %ax, 0x2c(%rsp) movw %ax, 0x14de(%rsp) movw %ax, 0x14dc(%rsp) movw %ax, 0x14da(%rsp) movw %ax, 0x14d8(%rsp) movw %ax, 0x14d6(%rsp) movw %ax, 0x14d4(%rsp) movw %ax, 0x14d2(%rsp) movw %ax, 0x14d0(%rsp) movzwl 0x14de(%rsp), %eax movd %eax, %xmm1 movzwl 0x14dc(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movzwl 0x14da(%rsp), %eax movd %eax, %xmm2 movzwl 0x14d8(%rsp), %eax movd %eax, %xmm1 punpcklwd %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movzwl 0x14d6(%rsp), %eax movd %eax, %xmm0 movzwl 0x14d4(%rsp), %eax movd %eax, %xmm2 punpcklwd %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] movzwl 0x14d2(%rsp), %eax movd %eax, %xmm3 movzwl 0x14d0(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm3, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x14c0(%rsp) movaps 0x14c0(%rsp), %xmm0 movaps %xmm0, 0x2b0(%rsp) movw $0x1, 0x63c(%rsp) movw 0x63c(%rsp), %ax movw %ax, 0x2e(%rsp) movw %ax, 0x1506(%rsp) movw %ax, 0x1504(%rsp) movw %ax, 0x1502(%rsp) movw %ax, 0x1500(%rsp) movw %ax, 0x14fe(%rsp) movw %ax, 0x14fc(%rsp) movw %ax, 0x14fa(%rsp) movw %ax, 0x14f8(%rsp) movzwl 0x1506(%rsp), %eax movd %eax, %xmm1 movzwl 0x1504(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movzwl 0x1502(%rsp), %eax movd %eax, %xmm2 movzwl 0x1500(%rsp), %eax movd %eax, %xmm1 punpcklwd %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movzwl 0x14fe(%rsp), %eax movd %eax, %xmm0 movzwl 0x14fc(%rsp), %eax movd %eax, %xmm2 punpcklwd %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] movzwl 0x14fa(%rsp), %eax movd %eax, %xmm3 movzwl 0x14f8(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm3, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x14e0(%rsp) movaps 0x14e0(%rsp), %xmm0 movaps %xmm0, 0x2a0(%rsp) movaps 0x2a0(%rsp), %xmm0 movaps %xmm0, 0x620(%rsp) movaps %xmm0, 0x610(%rsp) movaps 0x620(%rsp), %xmm0 movaps 0x610(%rsp), %xmm1 pcmpeqw %xmm1, %xmm0 movaps %xmm0, 0x290(%rsp) movq 0x428(%rsp), %rdi movq 0x420(%rsp), %rsi movq 0x418(%rsp), %rdx movq 0x410(%rsp), %rcx movq 0x408(%rsp), %r8 movq 0x400(%rsp), %r9 movl 0x3fc(%rsp), %r10d movq %rsp, %rax leaq 0x3b0(%rsp), %r11 movq %r11, 0x30(%rsp) movq %r11, 0x20(%rax) leaq 0x3c0(%rsp), %r11 movq %r11, 0x38(%rsp) movq %r11, 0x18(%rax) leaq 0x3d0(%rsp), %r11 movq %r11, 0x10(%rax) leaq 0x3e0(%rsp), %r11 movq %r11, 0x8(%rax) movl %r10d, (%rax) callq 0x3264f0 movq 0x30(%rsp), %rdx movq 0x38(%rsp), %r9 movq 0x438(%rsp), %rax movaps (%rax), %xmm1 movq 0x430(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0xf70(%rsp) movaps %xmm0, 0xf60(%rsp) movaps 0xf70(%rsp), %xmm1 movaps 0xf60(%rsp), %xmm0 movaps %xmm1, 0x1030(%rsp) movaps %xmm0, 0x1020(%rsp) movaps 0x1030(%rsp), %xmm1 movaps 0x1020(%rsp), %xmm0 psubusw %xmm0, %xmm1 movaps 0xf60(%rsp), %xmm2 movaps 0xf70(%rsp), %xmm0 movaps %xmm2, 0x1010(%rsp) movaps %xmm0, 0x1000(%rsp) movaps 0x1010(%rsp), %xmm0 movaps 0x1000(%rsp), %xmm2 psubusw %xmm2, %xmm0 movaps %xmm1, 0xf90(%rsp) movaps %xmm0, 0xf80(%rsp) movaps 0xf90(%rsp), %xmm0 movaps 0xf80(%rsp), %xmm1 por %xmm1, %xmm0 movaps %xmm0, 0x300(%rsp) movq 0x448(%rsp), %rax movaps (%rax), %xmm1 movq 0x440(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0xf30(%rsp) movaps %xmm0, 0xf20(%rsp) movaps 0xf30(%rsp), %xmm1 movaps 0xf20(%rsp), %xmm0 movaps %xmm1, 0x1070(%rsp) movaps %xmm0, 0x1060(%rsp) movaps 0x1070(%rsp), %xmm1 movaps 0x1060(%rsp), %xmm0 psubusw %xmm0, %xmm1 movaps 0xf20(%rsp), %xmm2 movaps 0xf30(%rsp), %xmm0 movaps %xmm2, 0x1050(%rsp) movaps %xmm0, 0x1040(%rsp) movaps 0x1050(%rsp), %xmm0 movaps 0x1040(%rsp), %xmm2 psubusw %xmm2, %xmm0 movaps %xmm1, 0xf50(%rsp) movaps %xmm0, 0xf40(%rsp) movaps 0xf50(%rsp), %xmm0 movaps 0xf40(%rsp), %xmm1 por %xmm1, %xmm0 movaps %xmm0, 0x310(%rsp) movaps 0x300(%rsp), %xmm0 movaps %xmm0, 0xff0(%rsp) movaps %xmm0, 0xfe0(%rsp) movaps 0xff0(%rsp), %xmm0 movaps 0xfe0(%rsp), %xmm1 paddusw %xmm1, %xmm0 movaps %xmm0, 0x300(%rsp) movaps 0x310(%rsp), %xmm0 movaps %xmm0, 0xaf0(%rsp) movl $0x1, 0xaec(%rsp) movaps 0xaf0(%rsp), %xmm0 movd 0xaec(%rsp), %xmm1 psrlw %xmm1, %xmm0 movaps %xmm0, 0x310(%rsp) movaps 0x300(%rsp), %xmm1 movaps 0x310(%rsp), %xmm0 movaps %xmm1, 0xfd0(%rsp) movaps %xmm0, 0xfc0(%rsp) movaps 0xfd0(%rsp), %xmm1 movaps 0xfc0(%rsp), %xmm0 paddusw %xmm0, %xmm1 movaps 0x3e0(%rsp), %xmm0 movaps %xmm1, 0x1350(%rsp) movaps %xmm0, 0x1340(%rsp) movaps 0x1350(%rsp), %xmm0 movaps 0x1340(%rsp), %xmm1 psubusw %xmm1, %xmm0 movaps %xmm0, 0x3a0(%rsp) movaps 0x3a0(%rsp), %xmm1 movaps 0x2c0(%rsp), %xmm0 movaps %xmm1, 0x600(%rsp) movaps %xmm0, 0x5f0(%rsp) movaps 0x600(%rsp), %xmm1 movaps 0x5f0(%rsp), %xmm0 pcmpeqw %xmm0, %xmm1 movaps 0x290(%rsp), %xmm0 movaps %xmm1, 0x1370(%rsp) movaps %xmm0, 0x1360(%rsp) movaps 0x1370(%rsp), %xmm0 movaps 0x1360(%rsp), %xmm1 pxor %xmm1, %xmm0 movaps %xmm0, 0x3a0(%rsp) movaps 0x3a0(%rsp), %xmm1 movaps 0x3d0(%rsp), %xmm2 movaps 0x2a0(%rsp), %xmm0 movaps %xmm2, 0xfb0(%rsp) movaps %xmm0, 0xfa0(%rsp) movaps 0xfb0(%rsp), %xmm0 movaps 0xfa0(%rsp), %xmm2 paddusw %xmm2, %xmm0 movaps %xmm1, 0x570(%rsp) movaps %xmm0, 0x560(%rsp) movaps 0x570(%rsp), %xmm0 movaps 0x560(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, 0x3a0(%rsp) movq 0x468(%rsp), %rax movaps (%rax), %xmm1 movq 0x458(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0xef0(%rsp) movaps %xmm0, 0xee0(%rsp) movaps 0xef0(%rsp), %xmm1 movaps 0xee0(%rsp), %xmm0 movaps %xmm1, 0x10b0(%rsp) movaps %xmm0, 0x10a0(%rsp) movaps 0x10b0(%rsp), %xmm1 movaps 0x10a0(%rsp), %xmm0 psubusw %xmm0, %xmm1 movaps 0xee0(%rsp), %xmm2 movaps 0xef0(%rsp), %xmm0 movaps %xmm2, 0x1090(%rsp) movaps %xmm0, 0x1080(%rsp) movaps 0x1090(%rsp), %xmm0 movaps 0x1080(%rsp), %xmm2 psubusw %xmm2, %xmm0 movaps %xmm1, 0xf10(%rsp) movaps %xmm0, 0xf00(%rsp) movaps 0xf10(%rsp), %xmm1 movaps 0xf00(%rsp), %xmm0 por %xmm0, %xmm1 movq 0x458(%rsp), %rax movaps (%rax), %xmm2 movq 0x448(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm2, 0xeb0(%rsp) movaps %xmm0, 0xea0(%rsp) movaps 0xeb0(%rsp), %xmm2 movaps 0xea0(%rsp), %xmm0 movaps %xmm2, 0x10f0(%rsp) movaps %xmm0, 0x10e0(%rsp) movaps 0x10f0(%rsp), %xmm2 movaps 0x10e0(%rsp), %xmm0 psubusw %xmm0, %xmm2 movaps 0xea0(%rsp), %xmm3 movaps 0xeb0(%rsp), %xmm0 movaps %xmm3, 0x10d0(%rsp) movaps %xmm0, 0x10c0(%rsp) movaps 0x10d0(%rsp), %xmm0 movaps 0x10c0(%rsp), %xmm3 psubusw %xmm3, %xmm0 movaps %xmm2, 0xed0(%rsp) movaps %xmm0, 0xec0(%rsp) movaps 0xed0(%rsp), %xmm0 movaps 0xec0(%rsp), %xmm2 por %xmm2, %xmm0 movaps %xmm1, 0x14b0(%rsp) movaps %xmm0, 0x14a0(%rsp) movaps 0x14b0(%rsp), %xmm0 movaps 0x14a0(%rsp), %xmm1 pmaxsw %xmm1, %xmm0 movaps %xmm0, 0x2f0(%rsp) movq 0x448(%rsp), %rax movaps (%rax), %xmm1 movq 0x438(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0xe70(%rsp) movaps %xmm0, 0xe60(%rsp) movaps 0xe70(%rsp), %xmm1 movaps 0xe60(%rsp), %xmm0 movaps %xmm1, 0x1130(%rsp) movaps %xmm0, 0x1120(%rsp) movaps 0x1130(%rsp), %xmm1 movaps 0x1120(%rsp), %xmm0 psubusw %xmm0, %xmm1 movaps 0xe60(%rsp), %xmm2 movaps 0xe70(%rsp), %xmm0 movaps %xmm2, 0x1110(%rsp) movaps %xmm0, 0x1100(%rsp) movaps 0x1110(%rsp), %xmm0 movaps 0x1100(%rsp), %xmm2 psubusw %xmm2, %xmm0 movaps %xmm1, 0xe90(%rsp) movaps %xmm0, 0xe80(%rsp) movaps 0xe90(%rsp), %xmm1 movaps 0xe80(%rsp), %xmm0 por %xmm0, %xmm1 movq 0x440(%rsp), %rax movaps (%rax), %xmm2 movq 0x430(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm2, 0xe30(%rsp) movaps %xmm0, 0xe20(%rsp) movaps 0xe30(%rsp), %xmm2 movaps 0xe20(%rsp), %xmm0 movaps %xmm2, 0x1170(%rsp) movaps %xmm0, 0x1160(%rsp) movaps 0x1170(%rsp), %xmm2 movaps 0x1160(%rsp), %xmm0 psubusw %xmm0, %xmm2 movaps 0xe20(%rsp), %xmm3 movaps 0xe30(%rsp), %xmm0 movaps %xmm3, 0x1150(%rsp) movaps %xmm0, 0x1140(%rsp) movaps 0x1150(%rsp), %xmm0 movaps 0x1140(%rsp), %xmm3 psubusw %xmm3, %xmm0 movaps %xmm2, 0xe50(%rsp) movaps %xmm0, 0xe40(%rsp) movaps 0xe50(%rsp), %xmm0 movaps 0xe40(%rsp), %xmm2 por %xmm2, %xmm0 movaps %xmm1, 0x1490(%rsp) movaps %xmm0, 0x1480(%rsp) movaps 0x1490(%rsp), %xmm0 movaps 0x1480(%rsp), %xmm1 pmaxsw %xmm1, %xmm0 movaps %xmm0, 0x2e0(%rsp) movaps 0x2f0(%rsp), %xmm1 movaps 0x2e0(%rsp), %xmm0 movaps %xmm1, 0x1470(%rsp) movaps %xmm0, 0x1460(%rsp) movaps 0x1470(%rsp), %xmm0 movaps 0x1460(%rsp), %xmm1 pmaxsw %xmm1, %xmm0 movaps %xmm0, 0x2f0(%rsp) movq 0x450(%rsp), %rax movaps (%rax), %xmm1 movq 0x440(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0xdf0(%rsp) movaps %xmm0, 0xde0(%rsp) movaps 0xdf0(%rsp), %xmm1 movaps 0xde0(%rsp), %xmm0 movaps %xmm1, 0x11b0(%rsp) movaps %xmm0, 0x11a0(%rsp) movaps 0x11b0(%rsp), %xmm1 movaps 0x11a0(%rsp), %xmm0 psubusw %xmm0, %xmm1 movaps 0xde0(%rsp), %xmm2 movaps 0xdf0(%rsp), %xmm0 movaps %xmm2, 0x1190(%rsp) movaps %xmm0, 0x1180(%rsp) movaps 0x1190(%rsp), %xmm0 movaps 0x1180(%rsp), %xmm2 psubusw %xmm2, %xmm0 movaps %xmm1, 0xe10(%rsp) movaps %xmm0, 0xe00(%rsp) movaps 0xe10(%rsp), %xmm1 movaps 0xe00(%rsp), %xmm0 por %xmm0, %xmm1 movq 0x450(%rsp), %rax movaps (%rax), %xmm2 movq 0x460(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm2, 0xdb0(%rsp) movaps %xmm0, 0xda0(%rsp) movaps 0xdb0(%rsp), %xmm2 movaps 0xda0(%rsp), %xmm0 movaps %xmm2, 0x11f0(%rsp) movaps %xmm0, 0x11e0(%rsp) movaps 0x11f0(%rsp), %xmm2 movaps 0x11e0(%rsp), %xmm0 psubusw %xmm0, %xmm2 movaps 0xda0(%rsp), %xmm3 movaps 0xdb0(%rsp), %xmm0 movaps %xmm3, 0x11d0(%rsp) movaps %xmm0, 0x11c0(%rsp) movaps 0x11d0(%rsp), %xmm0 movaps 0x11c0(%rsp), %xmm3 psubusw %xmm3, %xmm0 movaps %xmm2, 0xdd0(%rsp) movaps %xmm0, 0xdc0(%rsp) movaps 0xdd0(%rsp), %xmm0 movaps 0xdc0(%rsp), %xmm2 por %xmm2, %xmm0 movaps %xmm1, 0x1450(%rsp) movaps %xmm0, 0x1440(%rsp) movaps 0x1450(%rsp), %xmm0 movaps 0x1440(%rsp), %xmm1 pmaxsw %xmm1, %xmm0 movaps %xmm0, 0x2d0(%rsp) movaps 0x2d0(%rsp), %xmm1 movaps 0x2f0(%rsp), %xmm0 movaps %xmm1, 0x1430(%rsp) movaps %xmm0, 0x1420(%rsp) movaps 0x1430(%rsp), %xmm0 movaps 0x1420(%rsp), %xmm1 pmaxsw %xmm1, %xmm0 movaps %xmm0, 0x2d0(%rsp) movaps 0x2d0(%rsp), %xmm1 movaps 0x3a0(%rsp), %xmm0 movaps %xmm1, 0x1410(%rsp) movaps %xmm0, 0x1400(%rsp) movaps 0x1410(%rsp), %xmm0 movaps 0x1400(%rsp), %xmm1 pmaxsw %xmm1, %xmm0 movaps %xmm0, 0x3a0(%rsp) movaps 0x3a0(%rsp), %xmm1 movaps 0x3d0(%rsp), %xmm0 movaps %xmm1, 0x1330(%rsp) movaps %xmm0, 0x1320(%rsp) movaps 0x1330(%rsp), %xmm0 movaps 0x1320(%rsp), %xmm1 psubusw %xmm1, %xmm0 movaps %xmm0, 0x3a0(%rsp) movaps 0x3a0(%rsp), %xmm1 movaps 0x2c0(%rsp), %xmm0 movaps %xmm1, 0x5e0(%rsp) movaps %xmm0, 0x5d0(%rsp) movaps 0x5e0(%rsp), %xmm0 movaps 0x5d0(%rsp), %xmm1 pcmpeqw %xmm1, %xmm0 movaps %xmm0, 0x3a0(%rsp) movq 0x438(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm0, 0x230(%rsp) movq 0x448(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm0, 0x240(%rsp) movq 0x430(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm0, 0x210(%rsp) movq 0x440(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm0, 0x220(%rsp) movl 0x3fc(%rsp), %ecx movq %rsp, %rax movq %rdx, 0x8(%rax) movl %ecx, (%rax) leaq 0x230(%rsp), %rdi leaq 0x210(%rsp), %rsi leaq 0x270(%rsp), %rdx leaq 0x250(%rsp), %rcx leaq 0x3a0(%rsp), %r8 callq 0x3271e0 movq 0x458(%rsp), %rax movaps (%rax), %xmm1 movq 0x438(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0xd70(%rsp) movaps %xmm0, 0xd60(%rsp) movaps 0xd70(%rsp), %xmm1 movaps 0xd60(%rsp), %xmm0 movaps %xmm1, 0x1230(%rsp) movaps %xmm0, 0x1220(%rsp) movaps 0x1230(%rsp), %xmm1 movaps 0x1220(%rsp), %xmm0 psubusw %xmm0, %xmm1 movaps 0xd60(%rsp), %xmm2 movaps 0xd70(%rsp), %xmm0 movaps %xmm2, 0x1210(%rsp) movaps %xmm0, 0x1200(%rsp) movaps 0x1210(%rsp), %xmm0 movaps 0x1200(%rsp), %xmm2 psubusw %xmm2, %xmm0 movaps %xmm1, 0xd90(%rsp) movaps %xmm0, 0xd80(%rsp) movaps 0xd90(%rsp), %xmm1 movaps 0xd80(%rsp), %xmm0 por %xmm0, %xmm1 movq 0x450(%rsp), %rax movaps (%rax), %xmm2 movq 0x430(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm2, 0xd30(%rsp) movaps %xmm0, 0xd20(%rsp) movaps 0xd30(%rsp), %xmm2 movaps 0xd20(%rsp), %xmm0 movaps %xmm2, 0x1270(%rsp) movaps %xmm0, 0x1260(%rsp) movaps 0x1270(%rsp), %xmm2 movaps 0x1260(%rsp), %xmm0 psubusw %xmm0, %xmm2 movaps 0xd20(%rsp), %xmm3 movaps 0xd30(%rsp), %xmm0 movaps %xmm3, 0x1250(%rsp) movaps %xmm0, 0x1240(%rsp) movaps 0x1250(%rsp), %xmm0 movaps 0x1240(%rsp), %xmm3 psubusw %xmm3, %xmm0 movaps %xmm2, 0xd50(%rsp) movaps %xmm0, 0xd40(%rsp) movaps 0xd50(%rsp), %xmm0 movaps 0xd40(%rsp), %xmm2 por %xmm2, %xmm0 movaps %xmm1, 0x13f0(%rsp) movaps %xmm0, 0x13e0(%rsp) movaps 0x13f0(%rsp), %xmm0 movaps 0x13e0(%rsp), %xmm1 pmaxsw %xmm1, %xmm0 movaps %xmm0, 0x390(%rsp) movaps 0x2e0(%rsp), %xmm1 movaps 0x390(%rsp), %xmm0 movaps %xmm1, 0x13d0(%rsp) movaps %xmm0, 0x13c0(%rsp) movaps 0x13d0(%rsp), %xmm0 movaps 0x13c0(%rsp), %xmm1 pmaxsw %xmm1, %xmm0 movaps %xmm0, 0x390(%rsp) movq 0x468(%rsp), %rax movaps (%rax), %xmm1 movq 0x438(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0xcf0(%rsp) movaps %xmm0, 0xce0(%rsp) movaps 0xcf0(%rsp), %xmm1 movaps 0xce0(%rsp), %xmm0 movaps %xmm1, 0x12b0(%rsp) movaps %xmm0, 0x12a0(%rsp) movaps 0x12b0(%rsp), %xmm1 movaps 0x12a0(%rsp), %xmm0 psubusw %xmm0, %xmm1 movaps 0xce0(%rsp), %xmm2 movaps 0xcf0(%rsp), %xmm0 movaps %xmm2, 0x1290(%rsp) movaps %xmm0, 0x1280(%rsp) movaps 0x1290(%rsp), %xmm0 movaps 0x1280(%rsp), %xmm2 psubusw %xmm2, %xmm0 movaps %xmm1, 0xd10(%rsp) movaps %xmm0, 0xd00(%rsp) movaps 0xd10(%rsp), %xmm1 movaps 0xd00(%rsp), %xmm0 por %xmm0, %xmm1 movq 0x460(%rsp), %rax movaps (%rax), %xmm2 movq 0x430(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm2, 0xcb0(%rsp) movaps %xmm0, 0xca0(%rsp) movaps 0xcb0(%rsp), %xmm2 movaps 0xca0(%rsp), %xmm0 movaps %xmm2, 0x12f0(%rsp) movaps %xmm0, 0x12e0(%rsp) movaps 0x12f0(%rsp), %xmm2 movaps 0x12e0(%rsp), %xmm0 psubusw %xmm0, %xmm2 movaps 0xca0(%rsp), %xmm3 movaps 0xcb0(%rsp), %xmm0 movaps %xmm3, 0x12d0(%rsp) movaps %xmm0, 0x12c0(%rsp) movaps 0x12d0(%rsp), %xmm0 movaps 0x12c0(%rsp), %xmm3 psubusw %xmm3, %xmm0 movaps %xmm2, 0xcd0(%rsp) movaps %xmm0, 0xcc0(%rsp) movaps 0xcd0(%rsp), %xmm0 movaps 0xcc0(%rsp), %xmm2 por %xmm2, %xmm0 movaps %xmm1, 0x13b0(%rsp) movaps %xmm0, 0x13a0(%rsp) movaps 0x13b0(%rsp), %xmm0 movaps 0x13a0(%rsp), %xmm1 pmaxsw %xmm1, %xmm0 movaps %xmm0, 0x2f0(%rsp) movaps 0x2f0(%rsp), %xmm1 movaps 0x390(%rsp), %xmm0 movaps %xmm1, 0x1390(%rsp) movaps %xmm0, 0x1380(%rsp) movaps 0x1390(%rsp), %xmm0 movaps 0x1380(%rsp), %xmm1 pmaxsw %xmm1, %xmm0 movaps %xmm0, 0x390(%rsp) movaps 0x390(%rsp), %xmm1 movaps 0x2a0(%rsp), %xmm0 movl 0x3fc(%rsp), %eax addl $-0x8, %eax movaps %xmm0, 0xc90(%rsp) movl %eax, 0xc8c(%rsp) movaps 0xc90(%rsp), %xmm0 movd 0xc8c(%rsp), %xmm2 psllw %xmm2, %xmm0 movaps %xmm1, 0x1310(%rsp) movaps %xmm0, 0x1300(%rsp) movaps 0x1310(%rsp), %xmm0 movaps 0x1300(%rsp), %xmm1 psubusw %xmm1, %xmm0 movaps %xmm0, 0x390(%rsp) movaps 0x390(%rsp), %xmm1 movaps 0x2c0(%rsp), %xmm0 movaps %xmm1, 0x5c0(%rsp) movaps %xmm0, 0x5b0(%rsp) movaps 0x5c0(%rsp), %xmm0 movaps 0x5b0(%rsp), %xmm1 pcmpeqw %xmm1, %xmm0 movaps %xmm0, 0x390(%rsp) movaps 0x390(%rsp), %xmm1 movaps 0x3a0(%rsp), %xmm0 movaps %xmm1, 0x550(%rsp) movaps %xmm0, 0x540(%rsp) movaps 0x550(%rsp), %xmm0 movaps 0x540(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, 0x390(%rsp) movaps 0x390(%rsp), %xmm1 movaps 0x2c0(%rsp), %xmm0 movaps %xmm1, 0x5a0(%rsp) movaps %xmm0, 0x590(%rsp) movaps 0x5a0(%rsp), %xmm0 movaps 0x590(%rsp), %xmm1 pcmpeqw %xmm1, %xmm0 movdqa %xmm0, 0x580(%rsp) movdqa 0x580(%rsp), %xmm0 pmovmskb %xmm0, %ecx movl $0xffff, %eax # imm = 0xFFFF cmpl %ecx, %eax je 0x3132b0 movq 0x468(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm0, 0x910(%rsp) movaps %xmm0, 0x900(%rsp) movaps 0x910(%rsp), %xmm1 movaps 0x900(%rsp), %xmm0 paddw %xmm0, %xmm1 movq 0x458(%rsp), %rax movaps (%rax), %xmm2 movq 0x448(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm2, 0x8f0(%rsp) movaps %xmm0, 0x8e0(%rsp) movaps 0x8f0(%rsp), %xmm0 movaps 0x8e0(%rsp), %xmm2 paddw %xmm2, %xmm0 movaps %xmm1, 0x8d0(%rsp) movaps %xmm0, 0x8c0(%rsp) movaps 0x8d0(%rsp), %xmm0 movaps 0x8c0(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0x200(%rsp) movaps 0x200(%rsp), %xmm1 movaps 0x2b0(%rsp), %xmm0 movaps %xmm1, 0x8b0(%rsp) movaps %xmm0, 0x8a0(%rsp) movaps 0x8b0(%rsp), %xmm1 movaps 0x8a0(%rsp), %xmm0 paddw %xmm0, %xmm1 movq 0x438(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x890(%rsp) movaps %xmm0, 0x880(%rsp) movaps 0x890(%rsp), %xmm0 movaps 0x880(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0x200(%rsp) movq 0x430(%rsp), %rax movaps (%rax), %xmm1 movq 0x458(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x870(%rsp) movaps %xmm0, 0x860(%rsp) movaps 0x870(%rsp), %xmm1 movaps 0x860(%rsp), %xmm0 paddw %xmm0, %xmm1 movq 0x468(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x850(%rsp) movaps %xmm0, 0x840(%rsp) movaps 0x850(%rsp), %xmm0 movaps 0x840(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0x1f0(%rsp) movaps 0x200(%rsp), %xmm1 movaps 0x1f0(%rsp), %xmm0 movaps %xmm1, 0x830(%rsp) movaps %xmm0, 0x820(%rsp) movaps 0x830(%rsp), %xmm0 movaps 0x820(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0xad0(%rsp) movl $0x3, 0xacc(%rsp) movaps 0xad0(%rsp), %xmm0 movd 0xacc(%rsp), %xmm1 psrlw %xmm1, %xmm0 movaps %xmm0, 0x370(%rsp) movq 0x430(%rsp), %rax movaps (%rax), %xmm1 movq 0x440(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x810(%rsp) movaps %xmm0, 0x800(%rsp) movaps 0x810(%rsp), %xmm1 movaps 0x800(%rsp), %xmm0 paddw %xmm0, %xmm1 movq 0x448(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x7f0(%rsp) movaps %xmm0, 0x7e0(%rsp) movaps 0x7f0(%rsp), %xmm0 movaps 0x7e0(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0x1f0(%rsp) movaps 0x200(%rsp), %xmm1 movaps 0x1f0(%rsp), %xmm0 movaps %xmm1, 0x7d0(%rsp) movaps %xmm0, 0x7c0(%rsp) movaps 0x7d0(%rsp), %xmm0 movaps 0x7c0(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0xab0(%rsp) movl $0x3, 0xaac(%rsp) movaps 0xab0(%rsp), %xmm0 movd 0xaac(%rsp), %xmm1 psrlw %xmm1, %xmm0 movaps %xmm0, 0x350(%rsp) movaps 0x200(%rsp), %xmm1 movq 0x468(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0xa10(%rsp) movaps %xmm0, 0xa00(%rsp) movaps 0xa10(%rsp), %xmm1 movaps 0xa00(%rsp), %xmm0 psubw %xmm0, %xmm1 movq 0x450(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x7b0(%rsp) movaps %xmm0, 0x7a0(%rsp) movaps 0x7b0(%rsp), %xmm0 movaps 0x7a0(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0x200(%rsp) movaps 0x1f0(%rsp), %xmm1 movq 0x448(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x9f0(%rsp) movaps %xmm0, 0x9e0(%rsp) movaps 0x9f0(%rsp), %xmm1 movaps 0x9e0(%rsp), %xmm0 psubw %xmm0, %xmm1 movq 0x438(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x790(%rsp) movaps %xmm0, 0x780(%rsp) movaps 0x790(%rsp), %xmm0 movaps 0x780(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0x1f0(%rsp) movaps 0x200(%rsp), %xmm1 movaps 0x1f0(%rsp), %xmm0 movaps %xmm1, 0x770(%rsp) movaps %xmm0, 0x760(%rsp) movaps 0x770(%rsp), %xmm0 movaps 0x760(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0xa90(%rsp) movl $0x3, 0xa8c(%rsp) movaps 0xa90(%rsp), %xmm0 movd 0xa8c(%rsp), %xmm1 psrlw %xmm1, %xmm0 movaps %xmm0, 0x340(%rsp) movaps 0x200(%rsp), %xmm1 movq 0x468(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x9d0(%rsp) movaps %xmm0, 0x9c0(%rsp) movaps 0x9d0(%rsp), %xmm1 movaps 0x9c0(%rsp), %xmm0 psubw %xmm0, %xmm1 movq 0x460(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x750(%rsp) movaps %xmm0, 0x740(%rsp) movaps 0x750(%rsp), %xmm0 movaps 0x740(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0x200(%rsp) movaps 0x1f0(%rsp), %xmm1 movq 0x438(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x9b0(%rsp) movaps %xmm0, 0x9a0(%rsp) movaps 0x9b0(%rsp), %xmm1 movaps 0x9a0(%rsp), %xmm0 psubw %xmm0, %xmm1 movq 0x430(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x730(%rsp) movaps %xmm0, 0x720(%rsp) movaps 0x730(%rsp), %xmm0 movaps 0x720(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0x1f0(%rsp) movaps 0x200(%rsp), %xmm1 movaps 0x1f0(%rsp), %xmm0 movaps %xmm1, 0x710(%rsp) movaps %xmm0, 0x700(%rsp) movaps 0x710(%rsp), %xmm0 movaps 0x700(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0xa70(%rsp) movl $0x3, 0xa6c(%rsp) movaps 0xa70(%rsp), %xmm0 movd 0xa6c(%rsp), %xmm1 psrlw %xmm1, %xmm0 movaps %xmm0, 0x330(%rsp) movaps 0x200(%rsp), %xmm1 movq 0x458(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x990(%rsp) movaps %xmm0, 0x980(%rsp) movaps 0x990(%rsp), %xmm1 movaps 0x980(%rsp), %xmm0 psubw %xmm0, %xmm1 movq 0x460(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x6f0(%rsp) movaps %xmm0, 0x6e0(%rsp) movaps 0x6f0(%rsp), %xmm0 movaps 0x6e0(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0x200(%rsp) movaps 0x1f0(%rsp), %xmm1 movq 0x430(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x970(%rsp) movaps %xmm0, 0x960(%rsp) movaps 0x970(%rsp), %xmm1 movaps 0x960(%rsp), %xmm0 psubw %xmm0, %xmm1 movq 0x440(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x6d0(%rsp) movaps %xmm0, 0x6c0(%rsp) movaps 0x6d0(%rsp), %xmm0 movaps 0x6c0(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0x1f0(%rsp) movaps 0x200(%rsp), %xmm1 movaps 0x1f0(%rsp), %xmm0 movaps %xmm1, 0x6b0(%rsp) movaps %xmm0, 0x6a0(%rsp) movaps 0x6b0(%rsp), %xmm0 movaps 0x6a0(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0xa50(%rsp) movl $0x3, 0xa4c(%rsp) movaps 0xa50(%rsp), %xmm0 movd 0xa4c(%rsp), %xmm1 psrlw %xmm1, %xmm0 movaps %xmm0, 0x320(%rsp) movaps 0x200(%rsp), %xmm1 movq 0x448(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x950(%rsp) movaps %xmm0, 0x940(%rsp) movaps 0x950(%rsp), %xmm1 movaps 0x940(%rsp), %xmm0 psubw %xmm0, %xmm1 movq 0x460(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x690(%rsp) movaps %xmm0, 0x680(%rsp) movaps 0x690(%rsp), %xmm0 movaps 0x680(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0x200(%rsp) movaps 0x1f0(%rsp), %xmm1 movq 0x440(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x930(%rsp) movaps %xmm0, 0x920(%rsp) movaps 0x930(%rsp), %xmm1 movaps 0x920(%rsp), %xmm0 psubw %xmm0, %xmm1 movq 0x450(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x670(%rsp) movaps %xmm0, 0x660(%rsp) movaps 0x670(%rsp), %xmm0 movaps 0x660(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0x1f0(%rsp) movaps 0x200(%rsp), %xmm1 movaps 0x1f0(%rsp), %xmm0 movaps %xmm1, 0x650(%rsp) movaps %xmm0, 0x640(%rsp) movaps 0x650(%rsp), %xmm0 movaps 0x640(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0xa30(%rsp) movl $0x3, 0xa2c(%rsp) movaps 0xa30(%rsp), %xmm0 movd 0xa2c(%rsp), %xmm1 psrlw %xmm1, %xmm0 movaps %xmm0, 0x360(%rsp) movaps 0x390(%rsp), %xmm1 movaps 0x250(%rsp), %xmm0 movaps %xmm1, 0xbb0(%rsp) movaps %xmm0, 0xba0(%rsp) movaps 0xbb0(%rsp), %xmm0 pcmpeqd %xmm1, %xmm1 movaps 0xba0(%rsp), %xmm2 pandn %xmm2, %xmm0 movaps %xmm0, 0x250(%rsp) movaps 0x390(%rsp), %xmm2 movaps 0x330(%rsp), %xmm0 movaps %xmm2, 0x530(%rsp) movaps %xmm0, 0x520(%rsp) movaps 0x530(%rsp), %xmm0 movaps 0x520(%rsp), %xmm2 pand %xmm2, %xmm0 movaps %xmm0, 0x330(%rsp) movaps 0x250(%rsp), %xmm2 movaps 0x330(%rsp), %xmm0 movaps %xmm2, 0xc70(%rsp) movaps %xmm0, 0xc60(%rsp) movaps 0xc70(%rsp), %xmm0 movaps 0xc60(%rsp), %xmm2 por %xmm2, %xmm0 movq 0x430(%rsp), %rax movaps %xmm0, (%rax) movaps 0x390(%rsp), %xmm2 movaps 0x260(%rsp), %xmm0 movaps %xmm2, 0xb90(%rsp) movaps %xmm0, 0xb80(%rsp) movaps 0xb90(%rsp), %xmm0 movaps 0xb80(%rsp), %xmm2 pandn %xmm2, %xmm0 movaps %xmm0, 0x260(%rsp) movaps 0x390(%rsp), %xmm2 movaps 0x320(%rsp), %xmm0 movaps %xmm2, 0x510(%rsp) movaps %xmm0, 0x500(%rsp) movaps 0x510(%rsp), %xmm0 movaps 0x500(%rsp), %xmm2 pand %xmm2, %xmm0 movaps %xmm0, 0x320(%rsp) movaps 0x260(%rsp), %xmm2 movaps 0x320(%rsp), %xmm0 movaps %xmm2, 0xc50(%rsp) movaps %xmm0, 0xc40(%rsp) movaps 0xc50(%rsp), %xmm0 movaps 0xc40(%rsp), %xmm2 por %xmm2, %xmm0 movq 0x440(%rsp), %rax movaps %xmm0, (%rax) movaps 0x390(%rsp), %xmm2 movaps 0x270(%rsp), %xmm0 movaps %xmm2, 0xb70(%rsp) movaps %xmm0, 0xb60(%rsp) movaps 0xb70(%rsp), %xmm0 movaps 0xb60(%rsp), %xmm2 pandn %xmm2, %xmm0 movaps %xmm0, 0x270(%rsp) movaps 0x390(%rsp), %xmm2 movaps 0x340(%rsp), %xmm0 movaps %xmm2, 0x4f0(%rsp) movaps %xmm0, 0x4e0(%rsp) movaps 0x4f0(%rsp), %xmm0 movaps 0x4e0(%rsp), %xmm2 pand %xmm2, %xmm0 movaps %xmm0, 0x340(%rsp) movaps 0x270(%rsp), %xmm2 movaps 0x340(%rsp), %xmm0 movaps %xmm2, 0xc30(%rsp) movaps %xmm0, 0xc20(%rsp) movaps 0xc30(%rsp), %xmm0 movaps 0xc20(%rsp), %xmm2 por %xmm2, %xmm0 movq 0x438(%rsp), %rax movaps %xmm0, (%rax) movaps 0x390(%rsp), %xmm2 movaps 0x280(%rsp), %xmm0 movaps %xmm2, 0xb50(%rsp) movaps %xmm0, 0xb40(%rsp) movaps 0xb50(%rsp), %xmm0 movaps 0xb40(%rsp), %xmm2 pandn %xmm2, %xmm0 movaps %xmm0, 0x280(%rsp) movaps 0x390(%rsp), %xmm2 movaps 0x350(%rsp), %xmm0 movaps %xmm2, 0x4d0(%rsp) movaps %xmm0, 0x4c0(%rsp) movaps 0x4d0(%rsp), %xmm0 movaps 0x4c0(%rsp), %xmm2 pand %xmm2, %xmm0 movaps %xmm0, 0x350(%rsp) movaps 0x280(%rsp), %xmm2 movaps 0x350(%rsp), %xmm0 movaps %xmm2, 0xc10(%rsp) movaps %xmm0, 0xc00(%rsp) movaps 0xc10(%rsp), %xmm0 movaps 0xc00(%rsp), %xmm2 por %xmm2, %xmm0 movq 0x448(%rsp), %rax movaps %xmm0, (%rax) movaps 0x390(%rsp), %xmm2 movq 0x450(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm2, 0xb30(%rsp) movaps %xmm0, 0xb20(%rsp) movaps 0xb30(%rsp), %xmm0 movaps 0xb20(%rsp), %xmm2 pandn %xmm2, %xmm0 movaps %xmm0, 0x380(%rsp) movaps 0x390(%rsp), %xmm2 movaps 0x360(%rsp), %xmm0 movaps %xmm2, 0x4b0(%rsp) movaps %xmm0, 0x4a0(%rsp) movaps 0x4b0(%rsp), %xmm0 movaps 0x4a0(%rsp), %xmm2 pand %xmm2, %xmm0 movq 0x450(%rsp), %rax movaps %xmm0, (%rax) movaps 0x380(%rsp), %xmm2 movq 0x450(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm2, 0xbf0(%rsp) movaps %xmm0, 0xbe0(%rsp) movaps 0xbf0(%rsp), %xmm0 movaps 0xbe0(%rsp), %xmm2 por %xmm2, %xmm0 movq 0x450(%rsp), %rax movaps %xmm0, (%rax) movaps 0x390(%rsp), %xmm2 movq 0x458(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm2, 0xb10(%rsp) movaps %xmm0, 0xb00(%rsp) movaps 0xb10(%rsp), %xmm0 pxor %xmm1, %xmm0 pand 0xb00(%rsp), %xmm0 movdqa %xmm0, 0x380(%rsp) movdqa 0x390(%rsp), %xmm1 movdqa 0x370(%rsp), %xmm0 movdqa %xmm1, 0x490(%rsp) movdqa %xmm0, 0x480(%rsp) movdqa 0x490(%rsp), %xmm0 pand 0x480(%rsp), %xmm0 movq 0x458(%rsp), %rax movdqa %xmm0, (%rax) movdqa 0x380(%rsp), %xmm1 movq 0x458(%rsp), %rax movdqa (%rax), %xmm0 movdqa %xmm1, 0xbd0(%rsp) movdqa %xmm0, 0xbc0(%rsp) movdqa 0xbd0(%rsp), %xmm0 por 0xbc0(%rsp), %xmm0 movq 0x458(%rsp), %rax movdqa %xmm0, (%rax) jmp 0x313304 movdqa 0x250(%rsp), %xmm0 movq 0x430(%rsp), %rax movdqa %xmm0, (%rax) movdqa 0x260(%rsp), %xmm0 movq 0x440(%rsp), %rax movdqa %xmm0, (%rax) movdqa 0x270(%rsp), %xmm0 movq 0x438(%rsp), %rax movdqa %xmm0, (%rax) movdqa 0x280(%rsp), %xmm0 movq 0x448(%rsp), %rax movdqa %xmm0, (%rax) movq 0xe8(%rsp), %rax imull $0x3, 0xe4(%rsp), %ecx movslq %ecx, %rdx xorl %ecx, %ecx subq %rdx, %rcx shlq %rcx addq %rcx, %rax movdqa 0xb0(%rsp), %xmm0 movq %rax, 0x1a8(%rsp) movdqa %xmm0, 0x190(%rsp) movdqa 0x190(%rsp), %xmm0 movq 0x1a8(%rsp), %rax movdqu %xmm0, (%rax) movq 0xe8(%rsp), %rax movl 0xe4(%rsp), %ecx shll %ecx movslq %ecx, %rdx xorl %ecx, %ecx subq %rdx, %rcx shlq %rcx addq %rcx, %rax movdqa 0xa0(%rsp), %xmm0 movq %rax, 0x188(%rsp) movdqa %xmm0, 0x170(%rsp) movdqa 0x170(%rsp), %xmm0 movq 0x188(%rsp), %rax movdqu %xmm0, (%rax) movq 0xe8(%rsp), %rax movl 0xe4(%rsp), %ecx shll $0x0, %ecx movslq %ecx, %rdx xorl %ecx, %ecx subq %rdx, %rcx shlq %rcx addq %rcx, %rax movdqa 0x90(%rsp), %xmm0 movq %rax, 0x168(%rsp) movdqa %xmm0, 0x150(%rsp) movdqa 0x150(%rsp), %xmm0 movq 0x168(%rsp), %rax movdqu %xmm0, (%rax) movq 0xe8(%rsp), %rax imull $0x0, 0xe4(%rsp), %ecx movslq %ecx, %rcx shlq %rcx addq %rcx, %rax movdqa 0x80(%rsp), %xmm0 movq %rax, 0x148(%rsp) movdqa %xmm0, 0x130(%rsp) movdqa 0x130(%rsp), %xmm0 movq 0x148(%rsp), %rax movdqu %xmm0, (%rax) movq 0xe8(%rsp), %rax movl 0xe4(%rsp), %ecx shll $0x0, %ecx movslq %ecx, %rcx shlq %rcx addq %rcx, %rax movdqa 0x70(%rsp), %xmm0 movq %rax, 0x128(%rsp) movdqa %xmm0, 0x110(%rsp) movdqa 0x110(%rsp), %xmm0 movq 0x128(%rsp), %rax movdqu %xmm0, (%rax) movq 0xe8(%rsp), %rax movl 0xe4(%rsp), %ecx shll %ecx movslq %ecx, %rcx shlq %rcx addq %rcx, %rax movdqa 0x60(%rsp), %xmm0 movq %rax, 0x108(%rsp) movdqa %xmm0, 0xf0(%rsp) movdqa 0xf0(%rsp), %xmm0 movq 0x108(%rsp), %rax movdqu %xmm0, (%rax) addq $0x1508, %rsp # imm = 0x1508 retq nopl (%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/highbd_loopfilter_sse2.c
aom_highbd_lpf_horizontal_4_sse2
void aom_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p, const uint8_t *_blimit, const uint8_t *_limit, const uint8_t *_thresh, int bd) { __m128i p1p0, q1q0; __m128i p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p)); __m128i p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p)); __m128i q0 = _mm_loadl_epi64((__m128i *)(s - 0 * p)); __m128i q1 = _mm_loadl_epi64((__m128i *)(s + 1 * p)); highbd_lpf_internal_4_sse2(&p1, &p0, &q0, &q1, &q1q0, &p1p0, _blimit, _limit, _thresh, bd); _mm_storel_epi64((__m128i *)(s - 2 * p), _mm_srli_si128(p1p0, 8)); _mm_storel_epi64((__m128i *)(s - 1 * p), p1p0); _mm_storel_epi64((__m128i *)(s + 0 * p), q1q0); _mm_storel_epi64((__m128i *)(s + 1 * p), _mm_srli_si128(q1q0, 8)); }
subq $0x998, %rsp # imm = 0x998 movq %rdi, 0xe8(%rsp) movl %esi, 0xe4(%rsp) movq %rdx, 0xd8(%rsp) movq %rcx, 0xd0(%rsp) movq %r8, 0xc8(%rsp) movl %r9d, 0xc4(%rsp) movq 0xe8(%rsp), %rax movl 0xe4(%rsp), %ecx addl %ecx, %ecx movslq %ecx, %rcx addq %rcx, %rcx subq %rcx, %rax movq %rax, 0x168(%rsp) movq 0x168(%rsp), %rax movq (%rax), %xmm0 movaps %xmm0, 0x150(%rsp) movaps 0x150(%rsp), %xmm0 movaps %xmm0, 0x90(%rsp) movq 0xe8(%rsp), %rax movslq 0xe4(%rsp), %rcx addq %rcx, %rcx subq %rcx, %rax movq %rax, 0x148(%rsp) movq 0x148(%rsp), %rax movq (%rax), %xmm0 movaps %xmm0, 0x130(%rsp) movaps 0x130(%rsp), %xmm0 movaps %xmm0, 0x80(%rsp) movq 0xe8(%rsp), %rax movq %rax, 0x128(%rsp) movq 0x128(%rsp), %rax movq (%rax), %xmm0 movaps %xmm0, 0x110(%rsp) movaps 0x110(%rsp), %xmm0 movaps %xmm0, 0x70(%rsp) movq 0xe8(%rsp), %rax movslq 0xe4(%rsp), %rcx leaq (%rax,%rcx,2), %rax movq %rax, 0x108(%rsp) movq 0x108(%rsp), %rax movq (%rax), %xmm0 movaps %xmm0, 0xf0(%rsp) movaps 0xf0(%rsp), %xmm0 movaps %xmm0, 0x60(%rsp) movq 0xd8(%rsp), %rsi movq 0xd0(%rsp), %rdx movq 0xc8(%rsp), %rcx movl 0xc4(%rsp), %eax leaq 0x90(%rsp), %rdi movq %rdi, 0x2e8(%rsp) leaq 0x80(%rsp), %rdi movq %rdi, 0x2e0(%rsp) leaq 0x70(%rsp), %rdi movq %rdi, 0x2d8(%rsp) leaq 0x60(%rsp), %rdi movq %rdi, 0x2d0(%rsp) leaq 0xa0(%rsp), %rdi movq %rdi, 0x2c8(%rsp) leaq 0xb0(%rsp), %rdi movq %rdi, 0x2c0(%rsp) movq %rsi, 0x2b8(%rsp) movq %rdx, 0x2b0(%rsp) movq %rcx, 0x2a8(%rsp) movl %eax, 0x2a4(%rsp) movq 0x2b8(%rsp), %rdi movq 0x2b0(%rsp), %rsi movq 0x2a8(%rsp), %rdx movl 0x2a4(%rsp), %ecx movq %rsp, %rax leaq 0x1f0(%rsp), %r8 movq %r8, 0x58(%rsp) movq %r8, 0x8(%rax) leaq 0x270(%rsp), %r8 movq %r8, 0x20(%rsp) movq %r8, (%rax) leaq 0x290(%rsp), %r8 movq %r8, 0x28(%rsp) leaq 0x280(%rsp), %r9 movq %r9, 0x30(%rsp) callq 0x3254d0 movq 0x20(%rsp), %rdx movq 0x28(%rsp), %rcx movq 0x30(%rsp), %r9 movq 0x2e0(%rsp), %rax movaps (%rax), %xmm1 movq 0x2d8(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x320(%rsp) movaps %xmm0, 0x310(%rsp) movaps 0x320(%rsp), %xmm0 movaps 0x310(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x210(%rsp) movq 0x2e8(%rsp), %rax movaps (%rax), %xmm1 movq 0x2d0(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x300(%rsp) movaps %xmm0, 0x2f0(%rsp) movaps 0x300(%rsp), %xmm0 movaps 0x2f0(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x220(%rsp) movq %rsp, %rax leaq 0x260(%rsp), %rsi movq %rsi, 0x50(%rsp) movq %rsi, 0x18(%rax) leaq 0x250(%rsp), %rsi movq %rsi, 0x48(%rsp) movq %rsi, 0x10(%rax) movq %rdx, 0x8(%rax) movq %rcx, (%rax) leaq 0x210(%rsp), %rdi movl $0x2, %esi leaq 0x240(%rsp), %rdx movq %rdx, 0x38(%rsp) leaq 0x230(%rsp), %rcx movq %rcx, 0x40(%rsp) leaq 0x200(%rsp), %r8 callq 0x325750 movq 0x38(%rsp), %r10 movq 0x40(%rsp), %r9 movq 0x48(%rsp), %r8 movq 0x50(%rsp), %rdi movq 0x58(%rsp), %rcx movq 0x2c8(%rsp), %rsi movq 0x2c0(%rsp), %rdx movl 0x2a4(%rsp), %eax movq %r10, 0x428(%rsp) movq %r9, 0x420(%rsp) movq %r8, 0x418(%rsp) movq %rdi, 0x410(%rsp) movq %rsi, 0x408(%rsp) movq %rdx, 0x400(%rsp) movq %rcx, 0x3f8(%rsp) movl %eax, 0x3f4(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 0x430(%rsp) movaps 0x430(%rsp), %xmm0 movaps %xmm0, 0x3e0(%rsp) movw $0x1, 0x4ee(%rsp) movw 0x4ee(%rsp), %ax movw %ax, 0x6ae(%rsp) movw %ax, 0x6ac(%rsp) movw %ax, 0x6aa(%rsp) movw %ax, 0x6a8(%rsp) movw %ax, 0x6a6(%rsp) movw %ax, 0x6a4(%rsp) movw %ax, 0x6a2(%rsp) movw %ax, 0x6a0(%rsp) movzwl 0x6ae(%rsp), %eax movd %eax, %xmm1 movzwl 0x6ac(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movzwl 0x6aa(%rsp), %eax movd %eax, %xmm2 movzwl 0x6a8(%rsp), %eax movd %eax, %xmm1 punpcklwd %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movzwl 0x6a6(%rsp), %eax movd %eax, %xmm0 movzwl 0x6a4(%rsp), %eax movd %eax, %xmm2 punpcklwd %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] movzwl 0x6a2(%rsp), %eax movd %eax, %xmm3 movzwl 0x6a0(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm3, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x690(%rsp) movaps 0x690(%rsp), %xmm0 movaps %xmm0, 0x3d0(%rsp) movaps 0x3d0(%rsp), %xmm0 movl 0x3f4(%rsp), %eax movaps %xmm0, 0x540(%rsp) movl %eax, 0x53c(%rsp) movaps 0x540(%rsp), %xmm1 movd 0x53c(%rsp), %xmm0 psllw %xmm0, %xmm1 movaps 0x3d0(%rsp), %xmm0 movaps %xmm1, 0x680(%rsp) movaps %xmm0, 0x670(%rsp) movaps 0x680(%rsp), %xmm1 movaps 0x670(%rsp), %xmm0 psubsw %xmm0, %xmm1 movq 0x3f8(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x660(%rsp) movaps %xmm0, 0x650(%rsp) movaps 0x660(%rsp), %xmm0 movaps 0x650(%rsp), %xmm1 psubsw %xmm1, %xmm0 movaps %xmm0, 0x3c0(%rsp) movaps 0x3e0(%rsp), %xmm1 movq 0x3f8(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x640(%rsp) movaps %xmm0, 0x630(%rsp) movaps 0x640(%rsp), %xmm0 movaps 0x630(%rsp), %xmm1 psubsw %xmm1, %xmm0 movaps %xmm0, 0x3b0(%rsp) movw $0x3, 0x6d6(%rsp) movw $0x3, 0x6d4(%rsp) movw $0x3, 0x6d2(%rsp) movw $0x3, 0x6d0(%rsp) movw $0x4, 0x6ce(%rsp) movw $0x4, 0x6cc(%rsp) movw $0x4, 0x6ca(%rsp) movw $0x4, 0x6c8(%rsp) movzwl 0x6d6(%rsp), %eax movd %eax, %xmm1 movzwl 0x6d4(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movzwl 0x6d2(%rsp), %eax movd %eax, %xmm2 movzwl 0x6d0(%rsp), %eax movd %eax, %xmm1 punpcklwd %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movzwl 0x6ce(%rsp), %eax movd %eax, %xmm0 movzwl 0x6cc(%rsp), %eax movd %eax, %xmm2 punpcklwd %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] movzwl 0x6ca(%rsp), %eax movd %eax, %xmm3 movzwl 0x6c8(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm3, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x6b0(%rsp) movaps 0x6b0(%rsp), %xmm0 movaps %xmm0, 0x3a0(%rsp) movq 0x428(%rsp), %rax movaps (%rax), %xmm1 movq 0x3f8(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x620(%rsp) movaps %xmm0, 0x610(%rsp) movaps 0x620(%rsp), %xmm0 movaps 0x610(%rsp), %xmm1 psubsw %xmm1, %xmm0 movaps %xmm0, 0x390(%rsp) movq 0x420(%rsp), %rax movaps (%rax), %xmm1 movq 0x3f8(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x600(%rsp) movaps %xmm0, 0x5f0(%rsp) movaps 0x600(%rsp), %xmm0 movaps 0x5f0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movaps %xmm0, 0x380(%rsp) movaps 0x390(%rsp), %xmm1 movaps 0x380(%rsp), %xmm0 movaps %xmm1, 0x5e0(%rsp) movaps %xmm0, 0x5d0(%rsp) movaps 0x5e0(%rsp), %xmm0 movaps 0x5d0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movaps %xmm0, 0x370(%rsp) leaq 0x3b0(%rsp), %rcx movq %rcx, 0x7e8(%rsp) leaq 0x3c0(%rsp), %rax movq %rax, 0x7e0(%rsp) leaq 0x370(%rsp), %rdx movq %rdx, 0x7d8(%rsp) movq 0x7d8(%rsp), %rdx movaps (%rdx), %xmm1 movq 0x7e0(%rsp), %rdx movaps (%rdx), %xmm0 movaps %xmm1, 0x900(%rsp) movaps %xmm0, 0x8f0(%rsp) movaps 0x900(%rsp), %xmm0 movaps 0x8f0(%rsp), %xmm1 pminsw %xmm1, %xmm0 movq 0x7d8(%rsp), %rdx movaps %xmm0, (%rdx) movq 0x7d8(%rsp), %rdx movaps (%rdx), %xmm1 movq 0x7e8(%rsp), %rdx movaps (%rdx), %xmm0 movaps %xmm1, 0x800(%rsp) movaps %xmm0, 0x7f0(%rsp) movaps 0x800(%rsp), %xmm0 movaps 0x7f0(%rsp), %xmm1 pmaxsw %xmm1, %xmm0 movq 0x7d8(%rsp), %rdx movaps %xmm0, (%rdx) movaps 0x370(%rsp), %xmm1 psrldq $0x8, %xmm1 # xmm1 = xmm1[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero movq 0x418(%rsp), %rdx movaps (%rdx), %xmm0 movaps %xmm1, 0x4d0(%rsp) movaps %xmm0, 0x4c0(%rsp) movaps 0x4d0(%rsp), %xmm0 movaps 0x4c0(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, 0x360(%rsp) movaps 0x360(%rsp), %xmm1 movaps 0x370(%rsp), %xmm0 movaps %xmm1, 0x5c0(%rsp) movaps %xmm0, 0x5b0(%rsp) movaps 0x5c0(%rsp), %xmm0 movaps 0x5b0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movaps %xmm0, 0x360(%rsp) movaps 0x360(%rsp), %xmm1 movaps 0x370(%rsp), %xmm0 movaps %xmm1, 0x5a0(%rsp) movaps %xmm0, 0x590(%rsp) movaps 0x5a0(%rsp), %xmm0 movaps 0x590(%rsp), %xmm1 psubsw %xmm1, %xmm0 movaps %xmm0, 0x360(%rsp) movaps 0x360(%rsp), %xmm1 movaps 0x370(%rsp), %xmm0 movaps %xmm1, 0x580(%rsp) movaps %xmm0, 0x570(%rsp) movaps 0x580(%rsp), %xmm0 movaps 0x570(%rsp), %xmm1 psubsw %xmm1, %xmm0 movaps %xmm0, 0x360(%rsp) movq %rcx, 0x7a8(%rsp) movq %rax, 0x7a0(%rsp) leaq 0x360(%rsp), %rdx movq %rdx, 0x798(%rsp) movq 0x798(%rsp), %rdx movaps (%rdx), %xmm1 movq 0x7a0(%rsp), %rdx movaps (%rdx), %xmm0 movaps %xmm1, 0x920(%rsp) movaps %xmm0, 0x910(%rsp) movaps 0x920(%rsp), %xmm0 movaps 0x910(%rsp), %xmm1 pminsw %xmm1, %xmm0 movq 0x798(%rsp), %rdx movaps %xmm0, (%rdx) movq 0x798(%rsp), %rdx movaps (%rdx), %xmm1 movq 0x7a8(%rsp), %rdx movaps (%rdx), %xmm0 movaps %xmm1, 0x7c0(%rsp) movaps %xmm0, 0x7b0(%rsp) movaps 0x7c0(%rsp), %xmm0 movaps 0x7b0(%rsp), %xmm1 pmaxsw %xmm1, %xmm0 movq 0x798(%rsp), %rdx movaps %xmm0, (%rdx) movaps 0x360(%rsp), %xmm1 movq 0x410(%rsp), %rdx movaps (%rdx), %xmm0 movaps %xmm1, 0x4b0(%rsp) movaps %xmm0, 0x4a0(%rsp) movaps 0x4b0(%rsp), %xmm0 movaps 0x4a0(%rsp), %xmm1 pand %xmm1, %xmm0 movaps %xmm0, 0x360(%rsp) movaps 0x360(%rsp), %xmm0 movaps %xmm0, 0x490(%rsp) movaps %xmm0, 0x480(%rsp) movaps 0x490(%rsp), %xmm0 movaps 0x480(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x360(%rsp) movaps 0x360(%rsp), %xmm1 movaps 0x3a0(%rsp), %xmm0 movaps %xmm1, 0x8a0(%rsp) movaps %xmm0, 0x890(%rsp) movaps 0x8a0(%rsp), %xmm0 movaps 0x890(%rsp), %xmm1 paddsw %xmm1, %xmm0 movaps %xmm0, 0x350(%rsp) movq %rcx, 0x768(%rsp) movq %rax, 0x760(%rsp) leaq 0x350(%rsp), %rdx movq %rdx, 0x758(%rsp) movq 0x758(%rsp), %rdx movaps (%rdx), %xmm1 movq 0x760(%rsp), %rdx movaps (%rdx), %xmm0 movaps %xmm1, 0x940(%rsp) movaps %xmm0, 0x930(%rsp) movaps 0x940(%rsp), %xmm0 movaps 0x930(%rsp), %xmm1 pminsw %xmm1, %xmm0 movq 0x758(%rsp), %rdx movaps %xmm0, (%rdx) movq 0x758(%rsp), %rdx movaps (%rdx), %xmm1 movq 0x768(%rsp), %rdx movaps (%rdx), %xmm0 movaps %xmm1, 0x780(%rsp) movaps %xmm0, 0x770(%rsp) movaps 0x780(%rsp), %xmm0 movaps 0x770(%rsp), %xmm1 pmaxsw %xmm1, %xmm0 movq 0x758(%rsp), %rdx movaps %xmm0, (%rdx) movaps 0x350(%rsp), %xmm0 movaps %xmm0, 0x8e0(%rsp) movl $0x3, 0x8dc(%rsp) movaps 0x8e0(%rsp), %xmm0 movd 0x8dc(%rsp), %xmm1 psraw %xmm1, %xmm0 movaps %xmm0, 0x350(%rsp) movaps 0x350(%rsp), %xmm0 movaps %xmm0, 0x470(%rsp) movaps %xmm0, 0x460(%rsp) movaps 0x470(%rsp), %xmm0 movaps 0x460(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x360(%rsp) movaps 0x360(%rsp), %xmm1 movaps 0x3d0(%rsp), %xmm0 movaps %xmm1, 0x880(%rsp) movaps %xmm0, 0x870(%rsp) movaps 0x880(%rsp), %xmm0 movaps 0x870(%rsp), %xmm1 paddsw %xmm1, %xmm0 movaps %xmm0, 0x360(%rsp) movaps 0x360(%rsp), %xmm0 movaps %xmm0, 0x8c0(%rsp) movl $0x1, 0x8bc(%rsp) movaps 0x8c0(%rsp), %xmm0 movd 0x8bc(%rsp), %xmm1 psraw %xmm1, %xmm0 movaps %xmm0, 0x360(%rsp) movq 0x418(%rsp), %rdx movaps (%rdx), %xmm1 movaps 0x360(%rsp), %xmm0 movaps %xmm1, 0x520(%rsp) movaps %xmm0, 0x510(%rsp) movaps 0x520(%rsp), %xmm0 movaps 0x510(%rsp), %xmm1 pandn %xmm1, %xmm0 movaps %xmm0, 0x360(%rsp) movaps 0x350(%rsp), %xmm1 movaps 0x360(%rsp), %xmm0 movaps %xmm1, 0x500(%rsp) movaps %xmm0, 0x4f0(%rsp) movaps 0x500(%rsp), %xmm0 movaps 0x4f0(%rsp), %xmm1 punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] movaps %xmm0, 0x340(%rsp) movaps 0x350(%rsp), %xmm1 movaps 0x360(%rsp), %xmm0 movaps %xmm1, 0x450(%rsp) movaps %xmm0, 0x440(%rsp) movaps 0x450(%rsp), %xmm0 movaps 0x440(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x330(%rsp) movaps 0x380(%rsp), %xmm1 movaps 0x330(%rsp), %xmm0 movaps %xmm1, 0x560(%rsp) movaps %xmm0, 0x550(%rsp) movaps 0x560(%rsp), %xmm0 movaps 0x550(%rsp), %xmm1 psubsw %xmm1, %xmm0 movaps %xmm0, 0x380(%rsp) movaps 0x390(%rsp), %xmm1 movaps 0x340(%rsp), %xmm0 movaps %xmm1, 0x860(%rsp) movaps %xmm0, 0x850(%rsp) movaps 0x860(%rsp), %xmm0 movaps 0x850(%rsp), %xmm1 paddsw %xmm1, %xmm0 movaps %xmm0, 0x390(%rsp) movq %rcx, 0x728(%rsp) movq %rax, 0x720(%rsp) leaq 0x380(%rsp), %rdx movq %rdx, 0x718(%rsp) movq 0x718(%rsp), %rdx movaps (%rdx), %xmm1 movq 0x720(%rsp), %rdx movaps (%rdx), %xmm0 movaps %xmm1, 0x960(%rsp) movaps %xmm0, 0x950(%rsp) movaps 0x960(%rsp), %xmm0 movaps 0x950(%rsp), %xmm1 pminsw %xmm1, %xmm0 movq 0x718(%rsp), %rdx movaps %xmm0, (%rdx) movq 0x718(%rsp), %rdx movaps (%rdx), %xmm1 movq 0x728(%rsp), %rdx movaps (%rdx), %xmm0 movaps %xmm1, 0x740(%rsp) movaps %xmm0, 0x730(%rsp) movaps 0x740(%rsp), %xmm0 movaps 0x730(%rsp), %xmm1 pmaxsw %xmm1, %xmm0 movq 0x718(%rsp), %rdx movaps %xmm0, (%rdx) movq %rcx, 0x6e8(%rsp) movq %rax, 0x6e0(%rsp) leaq 0x390(%rsp), %rax movq %rax, 0x6d8(%rsp) movq 0x6d8(%rsp), %rax movaps (%rax), %xmm1 movq 0x6e0(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x980(%rsp) movaps %xmm0, 0x970(%rsp) movaps 0x980(%rsp), %xmm0 movaps 0x970(%rsp), %xmm1 pminsw %xmm1, %xmm0 movq 0x6d8(%rsp), %rax movaps %xmm0, (%rax) movq 0x6d8(%rsp), %rax movaps (%rax), %xmm1 movq 0x6e8(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x700(%rsp) movaps %xmm0, 0x6f0(%rsp) movaps 0x700(%rsp), %xmm0 movaps 0x6f0(%rsp), %xmm1 pmaxsw %xmm1, %xmm0 movq 0x6d8(%rsp), %rax movaps %xmm0, (%rax) movaps 0x380(%rsp), %xmm1 movq 0x3f8(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x840(%rsp) movaps %xmm0, 0x830(%rsp) movaps 0x840(%rsp), %xmm0 movaps 0x830(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq 0x408(%rsp), %rax movaps %xmm0, (%rax) movaps 0x390(%rsp), %xmm1 movq 0x3f8(%rsp), %rax movaps (%rax), %xmm0 movaps %xmm1, 0x820(%rsp) movaps %xmm0, 0x810(%rsp) movaps 0x820(%rsp), %xmm0 movaps 0x810(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq 0x400(%rsp), %rax movaps %xmm0, (%rax) movq 0xe8(%rsp), %rax movl 0xe4(%rsp), %ecx addl %ecx, %ecx movslq %ecx, %rcx addq %rcx, %rcx subq %rcx, %rax movaps 0xb0(%rsp), %xmm0 psrldq $0x8, %xmm0 # xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero movq %rax, 0x1e8(%rsp) movaps %xmm0, 0x1d0(%rsp) movq 0x1d0(%rsp), %rcx movq 0x1e8(%rsp), %rax movq %rcx, (%rax) movq 0xe8(%rsp), %rax movslq 0xe4(%rsp), %rcx addq %rcx, %rcx subq %rcx, %rax movaps 0xb0(%rsp), %xmm0 movq %rax, 0x1c8(%rsp) movaps %xmm0, 0x1b0(%rsp) movq 0x1b0(%rsp), %rcx movq 0x1c8(%rsp), %rax movq %rcx, (%rax) movq 0xe8(%rsp), %rax movaps 0xa0(%rsp), %xmm0 movq %rax, 0x1a8(%rsp) movaps %xmm0, 0x190(%rsp) movq 0x190(%rsp), %rcx movq 0x1a8(%rsp), %rax movq %rcx, (%rax) movq 0xe8(%rsp), %rax movslq 0xe4(%rsp), %rcx leaq (%rax,%rcx,2), %rax movaps 0xa0(%rsp), %xmm0 psrldq $0x8, %xmm0 # xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero movq %rax, 0x188(%rsp) movaps %xmm0, 0x170(%rsp) movq 0x170(%rsp), %rcx movq 0x188(%rsp), %rax movq %rcx, (%rax) addq $0x998, %rsp # imm = 0x998 retq nopl (%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/highbd_loopfilter_sse2.c
aom_masked_sub_pixel_variance16x64_ssse3
static inline __m128i filter_block(const __m128i a, const __m128i b, const __m128i filter) { __m128i v0 = _mm_unpacklo_epi8(a, b); v0 = _mm_maddubs_epi16(v0, filter); v0 = xx_roundn_epu16(v0, FILTER_BITS); __m128i v1 = _mm_unpackhi_epi8(a, b); v1 = _mm_maddubs_epi16(v1, filter); v1 = xx_roundn_epu16(v1, FILTER_BITS); return _mm_packus_epi16(v0, v1); }
pushq %rbx subq $0x480, %rsp # imm = 0x480 movq 0x4b0(%rsp), %rax movl 0x4a8(%rsp), %eax movl 0x4a0(%rsp), %eax movq 0x498(%rsp), %rax movq 0x490(%rsp), %rax movq %rdi, 0x478(%rsp) movl %esi, 0x474(%rsp) movl %edx, 0x470(%rsp) movl %ecx, 0x46c(%rsp) movq %r8, 0x460(%rsp) movl %r9d, 0x45c(%rsp) movq 0x478(%rsp), %rdi movl 0x474(%rsp), %esi movl 0x470(%rsp), %edx movl 0x46c(%rsp), %ecx leaq 0x40(%rsp), %r8 movl $0x10, %r9d movl $0x40, (%rsp) callq 0x40cca0 cmpl $0x0, 0x4a8(%rsp) jne 0x411658 movq 0x460(%rsp), %rdi movl 0x45c(%rsp), %esi leaq 0x40(%rsp), %rdx movq 0x490(%rsp), %r8 movq 0x498(%rsp), %rbx movl 0x4a0(%rsp), %r11d movq 0x4b0(%rsp), %r10 movl $0x10, %r9d leaq 0x458(%rsp), %rax movl %r9d, %ecx movq %rbx, (%rsp) movl %r11d, 0x8(%rsp) movl $0x10, 0x10(%rsp) movl $0x40, 0x18(%rsp) movq %r10, 0x20(%rsp) movq %rax, 0x28(%rsp) callq 0x40d680 jmp 0x4116c5 movq 0x460(%rsp), %rdi movl 0x45c(%rsp), %esi movq 0x490(%rsp), %rdx leaq 0x40(%rsp), %r8 movq 0x498(%rsp), %rbx movl 0x4a0(%rsp), %r11d movq 0x4b0(%rsp), %r10 movl $0x10, %r9d leaq 0x458(%rsp), %rax movl %r9d, %ecx movq %rbx, (%rsp) movl %r11d, 0x8(%rsp) movl $0x10, 0x10(%rsp) movl $0x40, 0x18(%rsp) movq %r10, 0x20(%rsp) movq %rax, 0x28(%rsp) callq 0x40d680 movq 0x4b0(%rsp), %rax movl (%rax), %eax movl %eax, 0x3c(%rsp) movslq 0x458(%rsp), %rax movslq 0x458(%rsp), %rcx imulq %rcx, %rax movl $0x400, %ecx # imm = 0x400 cqto idivq %rcx movq %rax, %rcx movl 0x3c(%rsp), %eax subl %ecx, %eax addq $0x480, %rsp # imm = 0x480 popq %rbx retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/masked_variance_intrin_ssse3.c
blend_a64_mask_sx_sy_w8_sse4_1
static void blend_a64_mask_sx_sy_w8_sse4_1( uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int w, int h) { const __m128i v_shuffle_b = xx_loadu_128(g_blend_a64_mask_shuffle); const __m128i v_maxval_b = _mm_set1_epi8(AOM_BLEND_A64_MAX_ALPHA); const __m128i _r = _mm_set1_epi16(1 << (15 - AOM_BLEND_A64_ROUND_BITS)); (void)w; do { const __m128i v_ra_b = xx_loadu_128(mask); const __m128i v_rb_b = xx_loadu_128(mask + mask_stride); const __m128i v_rvs_b = _mm_add_epi8(v_ra_b, v_rb_b); const __m128i v_r_s_b = _mm_shuffle_epi8(v_rvs_b, v_shuffle_b); const __m128i v_r0_s_w = _mm_cvtepu8_epi16(v_r_s_b); const __m128i v_r1_s_w = _mm_cvtepu8_epi16(_mm_srli_si128(v_r_s_b, 8)); const __m128i v_rs_w = _mm_add_epi16(v_r0_s_w, v_r1_s_w); const __m128i v_m0_w = xx_roundn_epu16(v_rs_w, 2); const __m128i v_m0_b = _mm_packus_epi16(v_m0_w, v_m0_w); const __m128i v_m1_b = _mm_sub_epi8(v_maxval_b, v_m0_b); const __m128i v_res_b = blend_8_u8(src0, src1, &v_m0_b, &v_m1_b, &_r); xx_storel_64(dst, v_res_b); dst += dst_stride; src0 += src0_stride; src1 += src1_stride; mask += 2 * mask_stride; } while (--h); }
subq $0x238, %rsp # imm = 0x238 movl 0x258(%rsp), %eax movl 0x250(%rsp), %eax movl 0x248(%rsp), %eax movq 0x240(%rsp), %rax movq %rdi, 0x120(%rsp) movl %esi, 0x11c(%rsp) movq %rdx, 0x110(%rsp) movl %ecx, 0x10c(%rsp) movq %r8, 0x100(%rsp) movl %r9d, 0xfc(%rsp) leaq 0x69f287(%rip), %rdi # 0xac73e0 callq 0x42eca0 movaps %xmm0, 0xe0(%rsp) movb $0x40, 0x12f(%rsp) movb 0x12f(%rsp), %al movb %al, 0xd(%rsp) movb %al, 0x16f(%rsp) movb %al, 0x16e(%rsp) movb %al, 0x16d(%rsp) movb %al, 0x16c(%rsp) movb %al, 0x16b(%rsp) movb %al, 0x16a(%rsp) movb %al, 0x169(%rsp) movb %al, 0x168(%rsp) movb %al, 0x167(%rsp) movb %al, 0x166(%rsp) movb %al, 0x165(%rsp) movb %al, 0x164(%rsp) movb %al, 0x163(%rsp) movb %al, 0x162(%rsp) movb %al, 0x161(%rsp) movb %al, 0x160(%rsp) movzbl 0x160(%rsp), %eax movd %eax, %xmm0 movzbl 0x161(%rsp), %eax pinsrb $0x1, %eax, %xmm0 movzbl 0x162(%rsp), %eax pinsrb $0x2, %eax, %xmm0 movzbl 0x163(%rsp), %eax pinsrb $0x3, %eax, %xmm0 movzbl 0x164(%rsp), %eax pinsrb $0x4, %eax, %xmm0 movzbl 0x165(%rsp), %eax pinsrb $0x5, %eax, %xmm0 movzbl 0x166(%rsp), %eax pinsrb $0x6, %eax, %xmm0 movzbl 0x167(%rsp), %eax pinsrb $0x7, %eax, %xmm0 movzbl 0x168(%rsp), %eax pinsrb $0x8, %eax, %xmm0 movzbl 0x169(%rsp), %eax pinsrb $0x9, %eax, %xmm0 movzbl 0x16a(%rsp), %eax pinsrb $0xa, %eax, %xmm0 movzbl 0x16b(%rsp), %eax pinsrb $0xb, %eax, %xmm0 movzbl 0x16c(%rsp), %eax pinsrb $0xc, %eax, %xmm0 movzbl 0x16d(%rsp), %eax pinsrb $0xd, %eax, %xmm0 movzbl 0x16e(%rsp), %eax pinsrb $0xe, %eax, %xmm0 movzbl 0x16f(%rsp), %eax pinsrb $0xf, %eax, %xmm0 movaps %xmm0, 0x150(%rsp) movaps 0x150(%rsp), %xmm0 movaps %xmm0, 0xd0(%rsp) movw $0x200, 0x12c(%rsp) # imm = 0x200 movw 0x12c(%rsp), %ax movw %ax, 0xe(%rsp) movw %ax, 0x236(%rsp) movw %ax, 0x234(%rsp) movw %ax, 0x232(%rsp) movw %ax, 0x230(%rsp) movw %ax, 0x22e(%rsp) movw %ax, 0x22c(%rsp) movw %ax, 0x22a(%rsp) movw %ax, 0x228(%rsp) movzwl 0x228(%rsp), %eax movd %eax, %xmm0 movzwl 0x22a(%rsp), %eax pinsrw $0x1, %eax, %xmm0 movzwl 0x22c(%rsp), %eax pinsrw $0x2, %eax, %xmm0 movzwl 0x22e(%rsp), %eax pinsrw $0x3, %eax, %xmm0 movzwl 0x230(%rsp), %eax pinsrw $0x4, %eax, %xmm0 movzwl 0x232(%rsp), %eax pinsrw $0x5, %eax, %xmm0 movzwl 0x234(%rsp), %eax pinsrw $0x6, %eax, %xmm0 movzwl 0x236(%rsp), %eax pinsrw $0x7, %eax, %xmm0 movdqa %xmm0, 0x210(%rsp) movdqa 0x210(%rsp), %xmm0 movdqa %xmm0, 0xc0(%rsp) movq 0x240(%rsp), %rdi callq 0x42eca0 movaps %xmm0, 0xb0(%rsp) movq 0x240(%rsp), %rdi movl 0x248(%rsp), %eax addq %rax, %rdi callq 0x42eca0 movaps %xmm0, 0xa0(%rsp) movaps 0xb0(%rsp), %xmm1 movaps 0xa0(%rsp), %xmm0 movaps %xmm1, 0x1c0(%rsp) movaps %xmm0, 0x1b0(%rsp) movaps 0x1c0(%rsp), %xmm0 movaps 0x1b0(%rsp), %xmm1 paddb %xmm1, %xmm0 movaps %xmm0, 0x90(%rsp) movaps 0x90(%rsp), %xmm1 movaps 0xe0(%rsp), %xmm0 movaps %xmm1, 0x1a0(%rsp) movaps %xmm0, 0x190(%rsp) movaps 0x1a0(%rsp), %xmm0 movaps 0x190(%rsp), %xmm1 pshufb %xmm1, %xmm0 movaps %xmm0, 0x80(%rsp) movaps 0x80(%rsp), %xmm0 movaps %xmm0, 0x200(%rsp) pmovzxbw 0x200(%rsp), %xmm0 movaps %xmm0, 0x70(%rsp) movaps 0x80(%rsp), %xmm0 psrldq $0x8, %xmm0 # xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero movaps %xmm0, 0x1f0(%rsp) pmovzxbw 0x1f0(%rsp), %xmm0 movdqa %xmm0, 0x60(%rsp) movdqa 0x70(%rsp), %xmm1 movdqa 0x60(%rsp), %xmm0 movdqa %xmm1, 0x1e0(%rsp) movdqa %xmm0, 0x1d0(%rsp) movdqa 0x1e0(%rsp), %xmm0 movdqa 0x1d0(%rsp), %xmm1 paddw %xmm1, %xmm0 movdqa %xmm0, 0x50(%rsp) movdqa 0x50(%rsp), %xmm0 movl $0x2, %edi callq 0x42ef40 movdqa %xmm0, 0x40(%rsp) movdqa 0x40(%rsp), %xmm1 movdqa 0x40(%rsp), %xmm0 movdqa %xmm1, 0x180(%rsp) movdqa %xmm0, 0x170(%rsp) movdqa 0x180(%rsp), %xmm0 movdqa 0x170(%rsp), %xmm1 packuswb %xmm1, %xmm0 movdqa %xmm0, 0x30(%rsp) movdqa 0xd0(%rsp), %xmm1 movdqa 0x30(%rsp), %xmm0 movdqa %xmm1, 0x140(%rsp) movdqa %xmm0, 0x130(%rsp) movdqa 0x140(%rsp), %xmm0 movdqa 0x130(%rsp), %xmm1 psubb %xmm1, %xmm0 movdqa %xmm0, 0x20(%rsp) movq 0x110(%rsp), %rdi movq 0x100(%rsp), %rsi leaq 0x30(%rsp), %rdx leaq 0x20(%rsp), %rcx leaq 0xc0(%rsp), %r8 callq 0x42f1b0 movdqa %xmm0, 0x10(%rsp) movq 0x120(%rsp), %rdi movdqa 0x10(%rsp), %xmm0 callq 0x42f310 movl 0x11c(%rsp), %ecx movq 0x120(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0x120(%rsp) movl 0x10c(%rsp), %ecx movq 0x110(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0x110(%rsp) movl 0xfc(%rsp), %ecx movq 0x100(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0x100(%rsp) movl 0x248(%rsp), %ecx shll %ecx movq 0x240(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0x240(%rsp) movl 0x258(%rsp), %eax addl $-0x1, %eax movl %eax, 0x258(%rsp) cmpl $0x0, %eax jne 0x4283b8 addq $0x238, %rsp # imm = 0x238 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/blend_a64_mask_sse4.c
aom_highbd_blend_a64_mask_sse4_1
void aom_highbd_blend_a64_mask_sse4_1(uint8_t *dst_8, uint32_t dst_stride, const uint8_t *src0_8, uint32_t src0_stride, const uint8_t *src1_8, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int w, int h, int subw, int subh, int bd) { typedef void (*blend_fn)( uint16_t * dst, uint32_t dst_stride, const uint16_t *src0, uint32_t src0_stride, const uint16_t *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int w, int h); // Dimensions are: bd_index X width_index X subw X subh static const blend_fn blend[2][2][2][2] = { { // bd == 8 or 10 { // w % 8 == 0 { blend_a64_mask_b10_w8n_sse4_1, blend_a64_mask_b10_sy_w8n_sse4_1 }, { blend_a64_mask_b10_sx_w8n_sse4_1, blend_a64_mask_b10_sx_sy_w8n_sse4_1 } }, { // w == 4 { blend_a64_mask_b10_w4_sse4_1, blend_a64_mask_b10_sy_w4_sse4_1 }, { blend_a64_mask_b10_sx_w4_sse4_1, blend_a64_mask_b10_sx_sy_w4_sse4_1 } } }, { // bd == 12 { // w % 8 == 0 { blend_a64_mask_b12_w8n_sse4_1, blend_a64_mask_b12_sy_w8n_sse4_1 }, { blend_a64_mask_b12_sx_w8n_sse4_1, blend_a64_mask_b12_sx_sy_w8n_sse4_1 } }, { // w == 4 { blend_a64_mask_b12_w4_sse4_1, blend_a64_mask_b12_sy_w4_sse4_1 }, { blend_a64_mask_b12_sx_w4_sse4_1, blend_a64_mask_b12_sx_sy_w4_sse4_1 } } } }; assert(IMPLIES(src0_8 == dst_8, src0_stride == dst_stride)); assert(IMPLIES(src1_8 == dst_8, src1_stride == dst_stride)); assert(h >= 1); assert(w >= 1); assert(IS_POWER_OF_TWO(h)); assert(IS_POWER_OF_TWO(w)); assert(bd == 8 || bd == 10 || bd == 12); if (UNLIKELY((h | w) & 3)) { // if (w <= 2 || h <= 2) aom_highbd_blend_a64_mask_c(dst_8, dst_stride, src0_8, src0_stride, src1_8, src1_stride, mask, mask_stride, w, h, subw, subh, bd); } else { uint16_t *const dst = CONVERT_TO_SHORTPTR(dst_8); const uint16_t *const src0 = CONVERT_TO_SHORTPTR(src0_8); const uint16_t *const src1 = CONVERT_TO_SHORTPTR(src1_8); blend[bd == 12][(w >> 2) & 1][subw != 0][subh != 0]( dst, dst_stride, src0, src0_stride, src1, src1_stride, mask, mask_stride, w, h); } }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx subq $0x88, %rsp movl 0xe0(%rsp), %eax movl 0xd8(%rsp), %eax movl 0xd0(%rsp), %eax movl 0xc8(%rsp), %eax movl 0xc0(%rsp), %eax movl 0xb8(%rsp), %eax movq 0xb0(%rsp), %rax movq %rdi, 0x80(%rsp) movl %esi, 0x7c(%rsp) movq %rdx, 0x70(%rsp) movl %ecx, 0x6c(%rsp) movq %r8, 0x60(%rsp) movl %r9d, 0x5c(%rsp) movl 0xc8(%rsp), %eax orl 0xc0(%rsp), %eax andl $0x3, %eax cltq cmpq $0x0, %rax je 0x428744 movq 0x80(%rsp), %rdi movl 0x7c(%rsp), %esi movq 0x70(%rsp), %rdx movl 0x6c(%rsp), %ecx movq 0x60(%rsp), %r8 movl 0x5c(%rsp), %r9d movq 0xb0(%rsp), %r15 movl 0xb8(%rsp), %r14d movl 0xc0(%rsp), %ebp movl 0xc8(%rsp), %ebx movl 0xd0(%rsp), %r11d movl 0xd8(%rsp), %r10d movl 0xe0(%rsp), %eax movq %r15, (%rsp) movl %r14d, 0x8(%rsp) movl %ebp, 0x10(%rsp) movl %ebx, 0x18(%rsp) movl %r11d, 0x20(%rsp) movl %r10d, 0x28(%rsp) movl %eax, 0x30(%rsp) callq 0x275450 jmp 0x428828 movq 0x80(%rsp), %rax shlq %rax movq %rax, 0x50(%rsp) movq 0x70(%rsp), %rax shlq %rax movq %rax, 0x48(%rsp) movq 0x60(%rsp), %rax shlq %rax movq %rax, 0x40(%rsp) cmpl $0xc, 0xe0(%rsp) sete %al andb $0x1, %al movzbl %al, %eax movslq %eax, %rcx leaq 0x771c98(%rip), %rax # 0xb9a420 shlq $0x6, %rcx addq %rcx, %rax movl 0xc0(%rsp), %ecx sarl $0x2, %ecx andl $0x1, %ecx movslq %ecx, %rcx shlq $0x5, %rcx addq %rcx, %rax cmpl $0x0, 0xd0(%rsp) setne %cl andb $0x1, %cl movzbl %cl, %ecx movslq %ecx, %rcx shlq $0x4, %rcx addq %rcx, %rax cmpl $0x0, 0xd8(%rsp) setne %cl andb $0x1, %cl movzbl %cl, %ecx movslq %ecx, %rcx movq (%rax,%rcx,8), %rax movq 0x50(%rsp), %rdi movl 0x7c(%rsp), %esi movq 0x48(%rsp), %rdx movl 0x6c(%rsp), %ecx movq 0x40(%rsp), %r8 movl 0x5c(%rsp), %r9d movq 0xb0(%rsp), %r14 movl 0xb8(%rsp), %ebx movl 0xc0(%rsp), %r11d movl 0xc8(%rsp), %r10d movq %r14, (%rsp) movl %ebx, 0x8(%rsp) movl %r11d, 0x10(%rsp) movl %r10d, 0x18(%rsp) callq *%rax addq $0x88, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/blend_a64_mask_sse4.c
aom_lowbd_blend_a64_d16_mask_sse4_1
void aom_lowbd_blend_a64_d16_mask_sse4_1( uint8_t *dst, uint32_t dst_stride, const CONV_BUF_TYPE *src0, uint32_t src0_stride, const CONV_BUF_TYPE *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int w, int h, int subw, int subh, ConvolveParams *conv_params) { const int bd = 8; const int round_bits = 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; const int round_offset = ((1 << (round_bits + bd)) + (1 << (round_bits + bd - 1)) - (1 << (round_bits - 1))) << AOM_BLEND_A64_ROUND_BITS; const int shift = round_bits + AOM_BLEND_A64_ROUND_BITS; assert(IMPLIES((void *)src0 == dst, src0_stride == dst_stride)); assert(IMPLIES((void *)src1 == dst, src1_stride == dst_stride)); assert(h >= 4); assert(w >= 4); assert(IS_POWER_OF_TWO(h)); assert(IS_POWER_OF_TWO(w)); const __m128i v_round_offset = _mm_set1_epi32(round_offset); if (subw == 0 && subh == 0) { switch (w) { case 4: aom_lowbd_blend_a64_d16_mask_subw0_subh0_w4_sse4_1( dst, dst_stride, src0, src0_stride, src1, src1_stride, mask, mask_stride, h, &v_round_offset, shift); break; case 8: aom_lowbd_blend_a64_d16_mask_subw0_subh0_w8_sse4_1( dst, dst_stride, src0, src0_stride, src1, src1_stride, mask, mask_stride, h, &v_round_offset, shift); break; default: lowbd_blend_a64_d16_mask_subw0_subh0_w16_sse4_1( dst, dst_stride, src0, src0_stride, src1, src1_stride, mask, mask_stride, h, w, &v_round_offset, shift); break; } } else if (subw == 1 && subh == 1) { switch (w) { case 4: aom_lowbd_blend_a64_d16_mask_subw1_subh1_w4_sse4_1( dst, dst_stride, src0, src0_stride, src1, src1_stride, mask, mask_stride, h, &v_round_offset, shift); break; case 8: aom_lowbd_blend_a64_d16_mask_subw1_subh1_w8_sse4_1( dst, dst_stride, src0, src0_stride, src1, src1_stride, mask, mask_stride, h, &v_round_offset, shift); break; default: lowbd_blend_a64_d16_mask_subw1_subh1_w16_sse4_1( dst, dst_stride, src0, src0_stride, src1, src1_stride, mask, mask_stride, h, w, &v_round_offset, shift); break; } } else if (subw == 1 && subh == 0) { switch (w) { case 4: aom_lowbd_blend_a64_d16_mask_subw1_subh0_w4_sse4_1( dst, dst_stride, src0, src0_stride, src1, src1_stride, mask, mask_stride, h, &v_round_offset, shift); break; case 8: aom_lowbd_blend_a64_d16_mask_subw1_subh0_w8_sse4_1( dst, dst_stride, src0, src0_stride, src1, src1_stride, mask, mask_stride, h, &v_round_offset, shift); break; default: lowbd_blend_a64_d16_mask_subw1_subh0_w16_sse4_1( dst, dst_stride, src0, src0_stride, src1, src1_stride, mask, mask_stride, h, w, &v_round_offset, shift); break; } } else { switch (w) { case 4: aom_lowbd_blend_a64_d16_mask_subw0_subh1_w4_sse4_1( dst, dst_stride, src0, src0_stride, src1, src1_stride, mask, mask_stride, h, &v_round_offset, shift); break; case 8: aom_lowbd_blend_a64_d16_mask_subw0_subh1_w8_sse4_1( dst, dst_stride, src0, src0_stride, src1, src1_stride, mask, mask_stride, h, &v_round_offset, shift); break; default: lowbd_blend_a64_d16_mask_subw0_subh1_w16_sse4_1( dst, dst_stride, src0, src0_stride, src1, src1_stride, mask, mask_stride, h, w, &v_round_offset, shift); break; } } }
pushq %rbp pushq %r14 pushq %rbx subq $0xd0, %rsp movq 0x120(%rsp), %rax movl 0x118(%rsp), %eax movl 0x110(%rsp), %eax movl 0x108(%rsp), %eax movl 0x100(%rsp), %eax movl 0xf8(%rsp), %eax movq 0xf0(%rsp), %rax movq %rdi, 0xa0(%rsp) movl %esi, 0x9c(%rsp) movq %rdx, 0x90(%rsp) movl %ecx, 0x8c(%rsp) movq %r8, 0x80(%rsp) movl %r9d, 0x7c(%rsp) movl $0x8, 0x78(%rsp) movq 0x120(%rsp), %rax movl 0x14(%rax), %ecx movl 0x18(%rax), %eax addl %eax, %ecx movl $0xe, %eax subl %ecx, %eax movl %eax, 0x74(%rsp) movb 0x74(%rsp), %cl movb %cl, 0x4f(%rsp) addb $0x8, %cl movl $0x1, %edx movl %edx, %eax shll %cl, %eax movb 0x4f(%rsp), %cl addb $0x7, %cl movl %edx, %esi shll %cl, %esi movb 0x4f(%rsp), %cl addl %esi, %eax decb %cl shll %cl, %edx movl %edx, %ecx subl %ecx, %eax shll $0x6, %eax movl %eax, 0x70(%rsp) movl 0x74(%rsp), %eax addl $0x6, %eax movl %eax, 0x6c(%rsp) movl 0x70(%rsp), %eax movl %eax, 0xac(%rsp) movl 0xac(%rsp), %eax movl %eax, 0xcc(%rsp) movl %eax, 0xc8(%rsp) movl %eax, 0xc4(%rsp) movl %eax, 0xc0(%rsp) movl 0xc4(%rsp), %edx movl 0xc8(%rsp), %ecx movl 0xcc(%rsp), %eax movd 0xc0(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movdqa %xmm0, 0xb0(%rsp) movdqa 0xb0(%rsp), %xmm0 movdqa %xmm0, 0x50(%rsp) cmpl $0x0, 0x110(%rsp) jne 0x429486 cmpl $0x0, 0x118(%rsp) jne 0x429486 movl 0x100(%rsp), %eax movl %eax, 0x48(%rsp) subl $0x4, %eax je 0x42933d jmp 0x42932f movl 0x48(%rsp), %eax subl $0x8, %eax je 0x4293a8 jmp 0x429410 movq 0xa0(%rsp), %rdi movl 0x9c(%rsp), %esi movq 0x90(%rsp), %rdx movl 0x8c(%rsp), %ecx movq 0x80(%rsp), %r8 movl 0x7c(%rsp), %r9d movq 0xf0(%rsp), %r14 movl 0xf8(%rsp), %ebx movl 0x108(%rsp), %r11d movl 0x6c(%rsp), %eax leaq 0x50(%rsp), %r10 movq %r14, (%rsp) movl %ebx, 0x8(%rsp) movl %r11d, 0x10(%rsp) movq %r10, 0x18(%rsp) movl %eax, 0x20(%rsp) callq 0x429910 jmp 0x429481 movq 0xa0(%rsp), %rdi movl 0x9c(%rsp), %esi movq 0x90(%rsp), %rdx movl 0x8c(%rsp), %ecx movq 0x80(%rsp), %r8 movl 0x7c(%rsp), %r9d movq 0xf0(%rsp), %r14 movl 0xf8(%rsp), %ebx movl 0x108(%rsp), %r11d movl 0x6c(%rsp), %eax leaq 0x50(%rsp), %r10 movq %r14, (%rsp) movl %ebx, 0x8(%rsp) movl %r11d, 0x10(%rsp) movq %r10, 0x18(%rsp) movl %eax, 0x20(%rsp) callq 0x429b30 jmp 0x429481 movq 0xa0(%rsp), %rdi movl 0x9c(%rsp), %esi movq 0x90(%rsp), %rdx movl 0x8c(%rsp), %ecx movq 0x80(%rsp), %r8 movl 0x7c(%rsp), %r9d movq 0xf0(%rsp), %r14 movl 0xf8(%rsp), %ebp movl 0x108(%rsp), %ebx movl 0x100(%rsp), %r11d movl 0x6c(%rsp), %eax leaq 0x50(%rsp), %r10 movq %r14, (%rsp) movl %ebp, 0x8(%rsp) movl %ebx, 0x10(%rsp) movl %r11d, 0x18(%rsp) movq %r10, 0x20(%rsp) movl %eax, 0x28(%rsp) callq 0x429d50 jmp 0x4298fa cmpl $0x1, 0x110(%rsp) jne 0x42960b cmpl $0x1, 0x118(%rsp) jne 0x42960b movl 0x100(%rsp), %eax movl %eax, 0x44(%rsp) subl $0x4, %eax je 0x4294c2 jmp 0x4294b4 movl 0x44(%rsp), %eax subl $0x8, %eax je 0x42952d jmp 0x429595 movq 0xa0(%rsp), %rdi movl 0x9c(%rsp), %esi movq 0x90(%rsp), %rdx movl 0x8c(%rsp), %ecx movq 0x80(%rsp), %r8 movl 0x7c(%rsp), %r9d movq 0xf0(%rsp), %r14 movl 0xf8(%rsp), %ebx movl 0x108(%rsp), %r11d movl 0x6c(%rsp), %eax leaq 0x50(%rsp), %r10 movq %r14, (%rsp) movl %ebx, 0x8(%rsp) movl %r11d, 0x10(%rsp) movq %r10, 0x18(%rsp) movl %eax, 0x20(%rsp) callq 0x42a010 jmp 0x429606 movq 0xa0(%rsp), %rdi movl 0x9c(%rsp), %esi movq 0x90(%rsp), %rdx movl 0x8c(%rsp), %ecx movq 0x80(%rsp), %r8 movl 0x7c(%rsp), %r9d movq 0xf0(%rsp), %r14 movl 0xf8(%rsp), %ebx movl 0x108(%rsp), %r11d movl 0x6c(%rsp), %eax leaq 0x50(%rsp), %r10 movq %r14, (%rsp) movl %ebx, 0x8(%rsp) movl %r11d, 0x10(%rsp) movq %r10, 0x18(%rsp) movl %eax, 0x20(%rsp) callq 0x42a5a0 jmp 0x429606 movq 0xa0(%rsp), %rdi movl 0x9c(%rsp), %esi movq 0x90(%rsp), %rdx movl 0x8c(%rsp), %ecx movq 0x80(%rsp), %r8 movl 0x7c(%rsp), %r9d movq 0xf0(%rsp), %r14 movl 0xf8(%rsp), %ebp movl 0x108(%rsp), %ebx movl 0x100(%rsp), %r11d movl 0x6c(%rsp), %eax leaq 0x50(%rsp), %r10 movq %r14, (%rsp) movl %ebp, 0x8(%rsp) movl %ebx, 0x10(%rsp) movl %r11d, 0x18(%rsp) movq %r10, 0x20(%rsp) movl %eax, 0x28(%rsp) callq 0x42ab30 jmp 0x4298f8 cmpl $0x1, 0x110(%rsp) jne 0x429790 cmpl $0x0, 0x118(%rsp) jne 0x429790 movl 0x100(%rsp), %eax movl %eax, 0x40(%rsp) subl $0x4, %eax je 0x429647 jmp 0x429639 movl 0x40(%rsp), %eax subl $0x8, %eax je 0x4296b2 jmp 0x42971a movq 0xa0(%rsp), %rdi movl 0x9c(%rsp), %esi movq 0x90(%rsp), %rdx movl 0x8c(%rsp), %ecx movq 0x80(%rsp), %r8 movl 0x7c(%rsp), %r9d movq 0xf0(%rsp), %r14 movl 0xf8(%rsp), %ebx movl 0x108(%rsp), %r11d movl 0x6c(%rsp), %eax leaq 0x50(%rsp), %r10 movq %r14, (%rsp) movl %ebx, 0x8(%rsp) movl %r11d, 0x10(%rsp) movq %r10, 0x18(%rsp) movl %eax, 0x20(%rsp) callq 0x42b290 jmp 0x42978b movq 0xa0(%rsp), %rdi movl 0x9c(%rsp), %esi movq 0x90(%rsp), %rdx movl 0x8c(%rsp), %ecx movq 0x80(%rsp), %r8 movl 0x7c(%rsp), %r9d movq 0xf0(%rsp), %r14 movl 0xf8(%rsp), %ebx movl 0x108(%rsp), %r11d movl 0x6c(%rsp), %eax leaq 0x50(%rsp), %r10 movq %r14, (%rsp) movl %ebx, 0x8(%rsp) movl %r11d, 0x10(%rsp) movq %r10, 0x18(%rsp) movl %eax, 0x20(%rsp) callq 0x42b6c0 jmp 0x42978b movq 0xa0(%rsp), %rdi movl 0x9c(%rsp), %esi movq 0x90(%rsp), %rdx movl 0x8c(%rsp), %ecx movq 0x80(%rsp), %r8 movl 0x7c(%rsp), %r9d movq 0xf0(%rsp), %r14 movl 0xf8(%rsp), %ebp movl 0x108(%rsp), %ebx movl 0x100(%rsp), %r11d movl 0x6c(%rsp), %eax leaq 0x50(%rsp), %r10 movq %r14, (%rsp) movl %ebp, 0x8(%rsp) movl %ebx, 0x10(%rsp) movl %r11d, 0x18(%rsp) movq %r10, 0x20(%rsp) movl %eax, 0x28(%rsp) callq 0x42baf0 jmp 0x4298f6 movl 0x100(%rsp), %eax movl %eax, 0x3c(%rsp) subl $0x4, %eax je 0x4297b0 jmp 0x4297a2 movl 0x3c(%rsp), %eax subl $0x8, %eax je 0x42981b jmp 0x429883 movq 0xa0(%rsp), %rdi movl 0x9c(%rsp), %esi movq 0x90(%rsp), %rdx movl 0x8c(%rsp), %ecx movq 0x80(%rsp), %r8 movl 0x7c(%rsp), %r9d movq 0xf0(%rsp), %r14 movl 0xf8(%rsp), %ebx movl 0x108(%rsp), %r11d movl 0x6c(%rsp), %eax leaq 0x50(%rsp), %r10 movq %r14, (%rsp) movl %ebx, 0x8(%rsp) movl %r11d, 0x10(%rsp) movq %r10, 0x18(%rsp) movl %eax, 0x20(%rsp) callq 0x42c050 jmp 0x4298f4 movq 0xa0(%rsp), %rdi movl 0x9c(%rsp), %esi movq 0x90(%rsp), %rdx movl 0x8c(%rsp), %ecx movq 0x80(%rsp), %r8 movl 0x7c(%rsp), %r9d movq 0xf0(%rsp), %r14 movl 0xf8(%rsp), %ebx movl 0x108(%rsp), %r11d movl 0x6c(%rsp), %eax leaq 0x50(%rsp), %r10 movq %r14, (%rsp) movl %ebx, 0x8(%rsp) movl %r11d, 0x10(%rsp) movq %r10, 0x18(%rsp) movl %eax, 0x20(%rsp) callq 0x42c320 jmp 0x4298f4 movq 0xa0(%rsp), %rdi movl 0x9c(%rsp), %esi movq 0x90(%rsp), %rdx movl 0x8c(%rsp), %ecx movq 0x80(%rsp), %r8 movl 0x7c(%rsp), %r9d movq 0xf0(%rsp), %r14 movl 0xf8(%rsp), %ebp movl 0x108(%rsp), %ebx movl 0x100(%rsp), %r11d movl 0x6c(%rsp), %eax leaq 0x50(%rsp), %r10 movq %r14, (%rsp) movl %ebp, 0x8(%rsp) movl %ebx, 0x10(%rsp) movl %r11d, 0x18(%rsp) movq %r10, 0x20(%rsp) movl %eax, 0x28(%rsp) callq 0x42c5f0 jmp 0x4298f6 jmp 0x4298f8 jmp 0x4298fa addq $0xd0, %rsp popq %rbx popq %r14 popq %rbp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/blend_a64_mask_sse4.c
aom_lowbd_blend_a64_d16_mask_subw0_subh0_w4_sse4_1
static inline void aom_lowbd_blend_a64_d16_mask_subw0_subh0_w4_sse4_1( uint8_t *dst, uint32_t dst_stride, const CONV_BUF_TYPE *src0, uint32_t src0_stride, const CONV_BUF_TYPE *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, const __m128i *round_offset, int shift) { const __m128i v_maxval = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA); for (int i = 0; i < h; ++i) { const __m128i m0 = xx_loadl_32(mask); const __m128i m = _mm_cvtepu8_epi16(m0); blend_a64_d16_mask_w4_sse41(dst, src0, src1, &m, round_offset, &v_maxval, shift); mask += mask_stride; dst += dst_stride; src0 += src0_stride; src1 += src1_stride; } }
subq $0xc8, %rsp movl 0xf0(%rsp), %eax movq 0xe8(%rsp), %rax movl 0xe0(%rsp), %eax movl 0xd8(%rsp), %eax movq 0xd0(%rsp), %rax movq %rdi, 0x80(%rsp) movl %esi, 0x7c(%rsp) movq %rdx, 0x70(%rsp) movl %ecx, 0x6c(%rsp) movq %r8, 0x60(%rsp) movl %r9d, 0x5c(%rsp) movw $0x40, 0x8e(%rsp) movw 0x8e(%rsp), %ax movw %ax, 0xe(%rsp) movw %ax, 0xc6(%rsp) movw %ax, 0xc4(%rsp) movw %ax, 0xc2(%rsp) movw %ax, 0xc0(%rsp) movw %ax, 0xbe(%rsp) movw %ax, 0xbc(%rsp) movw %ax, 0xba(%rsp) movw %ax, 0xb8(%rsp) movzwl 0xb8(%rsp), %eax movd %eax, %xmm0 movzwl 0xba(%rsp), %eax pinsrw $0x1, %eax, %xmm0 movzwl 0xbc(%rsp), %eax pinsrw $0x2, %eax, %xmm0 movzwl 0xbe(%rsp), %eax pinsrw $0x3, %eax, %xmm0 movzwl 0xc0(%rsp), %eax pinsrw $0x4, %eax, %xmm0 movzwl 0xc2(%rsp), %eax pinsrw $0x5, %eax, %xmm0 movzwl 0xc4(%rsp), %eax pinsrw $0x6, %eax, %xmm0 movzwl 0xc6(%rsp), %eax pinsrw $0x7, %eax, %xmm0 movdqa %xmm0, 0xa0(%rsp) movdqa 0xa0(%rsp), %xmm0 movdqa %xmm0, 0x40(%rsp) movl $0x0, 0x3c(%rsp) movl 0x3c(%rsp), %eax cmpl 0xe0(%rsp), %eax jge 0x429b1d movq 0xd0(%rsp), %rdi callq 0x42efb0 movaps %xmm0, 0x20(%rsp) movaps 0x20(%rsp), %xmm0 movaps %xmm0, 0x90(%rsp) pmovzxbw 0x90(%rsp), %xmm0 movdqa %xmm0, 0x10(%rsp) movq 0x80(%rsp), %rdi movq 0x70(%rsp), %rsi movq 0x60(%rsp), %rdx movq 0xe8(%rsp), %r8 movl 0xf0(%rsp), %eax leaq 0x10(%rsp), %rcx leaq 0x40(%rsp), %r9 movl %eax, (%rsp) callq 0x431820 movl 0xd8(%rsp), %ecx movq 0xd0(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0xd0(%rsp) movl 0x7c(%rsp), %ecx movq 0x80(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0x80(%rsp) movl 0x6c(%rsp), %ecx movq 0x70(%rsp), %rax movl %ecx, %ecx shlq %rcx addq %rcx, %rax movq %rax, 0x70(%rsp) movl 0x5c(%rsp), %ecx movq 0x60(%rsp), %rax movl %ecx, %ecx shlq %rcx addq %rcx, %rax movq %rax, 0x60(%rsp) movl 0x3c(%rsp), %eax addl $0x1, %eax movl %eax, 0x3c(%rsp) jmp 0x429a39 addq $0xc8, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/blend_mask_sse4.h
aom_lowbd_blend_a64_d16_mask_subw1_subh1_w8_sse4_1
static inline void aom_lowbd_blend_a64_d16_mask_subw1_subh1_w8_sse4_1( uint8_t *dst, uint32_t dst_stride, const CONV_BUF_TYPE *src0, uint32_t src0_stride, const CONV_BUF_TYPE *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, const __m128i *round_offset, int shift) { const __m128i v_maxval = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA); const __m128i one_b = _mm_set1_epi8(1); const __m128i two_w = _mm_set1_epi16(2); for (int i = 0; i < h; ++i) { const __m128i m_i0 = xx_loadu_128(mask); const __m128i m_i1 = xx_loadu_128(mask + mask_stride); const __m128i m_ac = _mm_adds_epu8(m_i0, m_i1); const __m128i m_acbd = _mm_maddubs_epi16(m_ac, one_b); const __m128i m_acbd_2 = _mm_add_epi16(m_acbd, two_w); const __m128i m = _mm_srli_epi16(m_acbd_2, 2); blend_a64_d16_mask_w8_sse41(dst, src0, src1, &m, round_offset, &v_maxval, shift); mask += mask_stride << 1; dst += dst_stride; src0 += src0_stride; src1 += src1_stride; } }
subq $0x1d8, %rsp # imm = 0x1D8 movl 0x200(%rsp), %eax movq 0x1f8(%rsp), %rax movl 0x1f0(%rsp), %eax movl 0x1e8(%rsp), %eax movq 0x1e0(%rsp), %rax movq %rdi, 0xe0(%rsp) movl %esi, 0xdc(%rsp) movq %rdx, 0xd0(%rsp) movl %ecx, 0xcc(%rsp) movq %r8, 0xc0(%rsp) movl %r9d, 0xbc(%rsp) movw $0x40, 0xec(%rsp) movw 0xec(%rsp), %ax movw %ax, 0xa(%rsp) movw %ax, 0x1ae(%rsp) movw %ax, 0x1ac(%rsp) movw %ax, 0x1aa(%rsp) movw %ax, 0x1a8(%rsp) movw %ax, 0x1a6(%rsp) movw %ax, 0x1a4(%rsp) movw %ax, 0x1a2(%rsp) movw %ax, 0x1a0(%rsp) movzwl 0x1a0(%rsp), %eax movd %eax, %xmm0 movzwl 0x1a2(%rsp), %eax pinsrw $0x1, %eax, %xmm0 movzwl 0x1a4(%rsp), %eax pinsrw $0x2, %eax, %xmm0 movzwl 0x1a6(%rsp), %eax pinsrw $0x3, %eax, %xmm0 movzwl 0x1a8(%rsp), %eax pinsrw $0x4, %eax, %xmm0 movzwl 0x1aa(%rsp), %eax pinsrw $0x5, %eax, %xmm0 movzwl 0x1ac(%rsp), %eax pinsrw $0x6, %eax, %xmm0 movzwl 0x1ae(%rsp), %eax pinsrw $0x7, %eax, %xmm0 movaps %xmm0, 0x190(%rsp) movaps 0x190(%rsp), %xmm0 movaps %xmm0, 0xa0(%rsp) movb $0x1, 0xef(%rsp) movb 0xef(%rsp), %al movb %al, 0xd(%rsp) movb %al, 0x10f(%rsp) movb %al, 0x10e(%rsp) movb %al, 0x10d(%rsp) movb %al, 0x10c(%rsp) movb %al, 0x10b(%rsp) movb %al, 0x10a(%rsp) movb %al, 0x109(%rsp) movb %al, 0x108(%rsp) movb %al, 0x107(%rsp) movb %al, 0x106(%rsp) movb %al, 0x105(%rsp) movb %al, 0x104(%rsp) movb %al, 0x103(%rsp) movb %al, 0x102(%rsp) movb %al, 0x101(%rsp) movb %al, 0x100(%rsp) movzbl 0x100(%rsp), %eax movd %eax, %xmm0 movzbl 0x101(%rsp), %eax pinsrb $0x1, %eax, %xmm0 movzbl 0x102(%rsp), %eax pinsrb $0x2, %eax, %xmm0 movzbl 0x103(%rsp), %eax pinsrb $0x3, %eax, %xmm0 movzbl 0x104(%rsp), %eax pinsrb $0x4, %eax, %xmm0 movzbl 0x105(%rsp), %eax pinsrb $0x5, %eax, %xmm0 movzbl 0x106(%rsp), %eax pinsrb $0x6, %eax, %xmm0 movzbl 0x107(%rsp), %eax pinsrb $0x7, %eax, %xmm0 movzbl 0x108(%rsp), %eax pinsrb $0x8, %eax, %xmm0 movzbl 0x109(%rsp), %eax pinsrb $0x9, %eax, %xmm0 movzbl 0x10a(%rsp), %eax pinsrb $0xa, %eax, %xmm0 movzbl 0x10b(%rsp), %eax pinsrb $0xb, %eax, %xmm0 movzbl 0x10c(%rsp), %eax pinsrb $0xc, %eax, %xmm0 movzbl 0x10d(%rsp), %eax pinsrb $0xd, %eax, %xmm0 movzbl 0x10e(%rsp), %eax pinsrb $0xe, %eax, %xmm0 movzbl 0x10f(%rsp), %eax pinsrb $0xf, %eax, %xmm0 movaps %xmm0, 0xf0(%rsp) movaps 0xf0(%rsp), %xmm0 movaps %xmm0, 0x90(%rsp) movw $0x2, 0xea(%rsp) movw 0xea(%rsp), %ax movw %ax, 0xe(%rsp) movw %ax, 0x1d6(%rsp) movw %ax, 0x1d4(%rsp) movw %ax, 0x1d2(%rsp) movw %ax, 0x1d0(%rsp) movw %ax, 0x1ce(%rsp) movw %ax, 0x1cc(%rsp) movw %ax, 0x1ca(%rsp) movw %ax, 0x1c8(%rsp) movzwl 0x1c8(%rsp), %eax movd %eax, %xmm0 movzwl 0x1ca(%rsp), %eax pinsrw $0x1, %eax, %xmm0 movzwl 0x1cc(%rsp), %eax pinsrw $0x2, %eax, %xmm0 movzwl 0x1ce(%rsp), %eax pinsrw $0x3, %eax, %xmm0 movzwl 0x1d0(%rsp), %eax pinsrw $0x4, %eax, %xmm0 movzwl 0x1d2(%rsp), %eax pinsrw $0x5, %eax, %xmm0 movzwl 0x1d4(%rsp), %eax pinsrw $0x6, %eax, %xmm0 movzwl 0x1d6(%rsp), %eax pinsrw $0x7, %eax, %xmm0 movdqa %xmm0, 0x1b0(%rsp) movdqa 0x1b0(%rsp), %xmm0 movdqa %xmm0, 0x80(%rsp) movl $0x0, 0x7c(%rsp) movl 0x7c(%rsp), %eax cmpl 0x1f0(%rsp), %eax jge 0x42ab1c movq 0x1e0(%rsp), %rdi callq 0x42eca0 movdqa %xmm0, 0x60(%rsp) movq 0x1e0(%rsp), %rdi movl 0x1e8(%rsp), %eax addq %rax, %rdi callq 0x42eca0 movdqa %xmm0, 0x50(%rsp) movdqa 0x60(%rsp), %xmm1 movdqa 0x50(%rsp), %xmm0 movdqa %xmm1, 0x180(%rsp) movdqa %xmm0, 0x170(%rsp) movdqa 0x180(%rsp), %xmm0 movdqa 0x170(%rsp), %xmm1 paddusb %xmm1, %xmm0 movdqa %xmm0, 0x40(%rsp) movdqa 0x40(%rsp), %xmm1 movdqa 0x90(%rsp), %xmm0 movdqa %xmm1, 0x120(%rsp) movdqa %xmm0, 0x110(%rsp) movdqa 0x120(%rsp), %xmm0 movdqa 0x110(%rsp), %xmm1 pmaddubsw %xmm1, %xmm0 movdqa %xmm0, 0x30(%rsp) movdqa 0x30(%rsp), %xmm1 movdqa 0x80(%rsp), %xmm0 movdqa %xmm1, 0x140(%rsp) movdqa %xmm0, 0x130(%rsp) movdqa 0x140(%rsp), %xmm0 movdqa 0x130(%rsp), %xmm1 paddw %xmm1, %xmm0 movdqa %xmm0, 0x20(%rsp) movdqa 0x20(%rsp), %xmm0 movdqa %xmm0, 0x160(%rsp) movl $0x2, 0x15c(%rsp) movdqa 0x160(%rsp), %xmm0 movl 0x15c(%rsp), %eax movd %eax, %xmm1 psrlw %xmm1, %xmm0 movdqa %xmm0, 0x10(%rsp) movq 0xe0(%rsp), %rdi movq 0xd0(%rsp), %rsi movq 0xc0(%rsp), %rdx movq 0x1f8(%rsp), %r8 movl 0x200(%rsp), %eax leaq 0x10(%rsp), %rcx leaq 0xa0(%rsp), %r9 movl %eax, (%rsp) callq 0x431a80 movl 0x1e8(%rsp), %ecx shll %ecx movq 0x1e0(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0x1e0(%rsp) movl 0xdc(%rsp), %ecx movq 0xe0(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0xe0(%rsp) movl 0xcc(%rsp), %ecx movq 0xd0(%rsp), %rax movl %ecx, %ecx shlq %rcx addq %rcx, %rax movq %rax, 0xd0(%rsp) movl 0xbc(%rsp), %ecx movq 0xc0(%rsp), %rax movl %ecx, %ecx shlq %rcx addq %rcx, %rax movq %rax, 0xc0(%rsp) movl 0x7c(%rsp), %eax addl $0x1, %eax movl %eax, 0x7c(%rsp) jmp 0x42a92a addq $0x1d8, %rsp # imm = 0x1D8 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/blend_mask_sse4.h
lowbd_blend_a64_d16_mask_subw1_subh1_w16_sse4_1
static inline void lowbd_blend_a64_d16_mask_subw1_subh1_w16_sse4_1( uint8_t *dst, uint32_t dst_stride, const CONV_BUF_TYPE *src0, uint32_t src0_stride, const CONV_BUF_TYPE *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, int w, const __m128i *round_offset, int shift) { const __m128i v_maxval = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA); const __m128i one_b = _mm_set1_epi8(1); const __m128i two_w = _mm_set1_epi16(2); for (int i = 0; i < h; ++i) { for (int j = 0; j < w; j += 16) { const __m128i m_i00 = xx_loadu_128(mask + 2 * j); const __m128i m_i01 = xx_loadu_128(mask + 2 * j + 16); const __m128i m_i10 = xx_loadu_128(mask + mask_stride + 2 * j); const __m128i m_i11 = xx_loadu_128(mask + mask_stride + 2 * j + 16); const __m128i m0_ac = _mm_adds_epu8(m_i00, m_i10); const __m128i m1_ac = _mm_adds_epu8(m_i01, m_i11); const __m128i m0_acbd = _mm_maddubs_epi16(m0_ac, one_b); const __m128i m1_acbd = _mm_maddubs_epi16(m1_ac, one_b); const __m128i m0 = _mm_srli_epi16(_mm_add_epi16(m0_acbd, two_w), 2); const __m128i m1 = _mm_srli_epi16(_mm_add_epi16(m1_acbd, two_w), 2); blend_a64_d16_mask_w16_sse41(dst + j, src0 + j, src1 + j, &m0, &m1, round_offset, &v_maxval, shift); } mask += mask_stride << 1; dst += dst_stride; src0 += src0_stride; src1 += src1_stride; } }
subq $0x2a8, %rsp # imm = 0x2A8 movl 0x2d8(%rsp), %eax movq 0x2d0(%rsp), %rax movl 0x2c8(%rsp), %eax movl 0x2c0(%rsp), %eax movl 0x2b8(%rsp), %eax movq 0x2b0(%rsp), %rax movq %rdi, 0x130(%rsp) movl %esi, 0x12c(%rsp) movq %rdx, 0x120(%rsp) movl %ecx, 0x11c(%rsp) movq %r8, 0x110(%rsp) movl %r9d, 0x10c(%rsp) movw $0x40, 0x13c(%rsp) movw 0x13c(%rsp), %ax movw %ax, 0x1a(%rsp) movw %ax, 0x27e(%rsp) movw %ax, 0x27c(%rsp) movw %ax, 0x27a(%rsp) movw %ax, 0x278(%rsp) movw %ax, 0x276(%rsp) movw %ax, 0x274(%rsp) movw %ax, 0x272(%rsp) movw %ax, 0x270(%rsp) movzwl 0x270(%rsp), %eax movd %eax, %xmm0 movzwl 0x272(%rsp), %eax pinsrw $0x1, %eax, %xmm0 movzwl 0x274(%rsp), %eax pinsrw $0x2, %eax, %xmm0 movzwl 0x276(%rsp), %eax pinsrw $0x3, %eax, %xmm0 movzwl 0x278(%rsp), %eax pinsrw $0x4, %eax, %xmm0 movzwl 0x27a(%rsp), %eax pinsrw $0x5, %eax, %xmm0 movzwl 0x27c(%rsp), %eax pinsrw $0x6, %eax, %xmm0 movzwl 0x27e(%rsp), %eax pinsrw $0x7, %eax, %xmm0 movaps %xmm0, 0x260(%rsp) movaps 0x260(%rsp), %xmm0 movaps %xmm0, 0xf0(%rsp) movb $0x1, 0x13f(%rsp) movb 0x13f(%rsp), %al movb %al, 0x1d(%rsp) movb %al, 0x15f(%rsp) movb %al, 0x15e(%rsp) movb %al, 0x15d(%rsp) movb %al, 0x15c(%rsp) movb %al, 0x15b(%rsp) movb %al, 0x15a(%rsp) movb %al, 0x159(%rsp) movb %al, 0x158(%rsp) movb %al, 0x157(%rsp) movb %al, 0x156(%rsp) movb %al, 0x155(%rsp) movb %al, 0x154(%rsp) movb %al, 0x153(%rsp) movb %al, 0x152(%rsp) movb %al, 0x151(%rsp) movb %al, 0x150(%rsp) movzbl 0x150(%rsp), %eax movd %eax, %xmm0 movzbl 0x151(%rsp), %eax pinsrb $0x1, %eax, %xmm0 movzbl 0x152(%rsp), %eax pinsrb $0x2, %eax, %xmm0 movzbl 0x153(%rsp), %eax pinsrb $0x3, %eax, %xmm0 movzbl 0x154(%rsp), %eax pinsrb $0x4, %eax, %xmm0 movzbl 0x155(%rsp), %eax pinsrb $0x5, %eax, %xmm0 movzbl 0x156(%rsp), %eax pinsrb $0x6, %eax, %xmm0 movzbl 0x157(%rsp), %eax pinsrb $0x7, %eax, %xmm0 movzbl 0x158(%rsp), %eax pinsrb $0x8, %eax, %xmm0 movzbl 0x159(%rsp), %eax pinsrb $0x9, %eax, %xmm0 movzbl 0x15a(%rsp), %eax pinsrb $0xa, %eax, %xmm0 movzbl 0x15b(%rsp), %eax pinsrb $0xb, %eax, %xmm0 movzbl 0x15c(%rsp), %eax pinsrb $0xc, %eax, %xmm0 movzbl 0x15d(%rsp), %eax pinsrb $0xd, %eax, %xmm0 movzbl 0x15e(%rsp), %eax pinsrb $0xe, %eax, %xmm0 movzbl 0x15f(%rsp), %eax pinsrb $0xf, %eax, %xmm0 movaps %xmm0, 0x140(%rsp) movaps 0x140(%rsp), %xmm0 movaps %xmm0, 0xe0(%rsp) movw $0x2, 0x13a(%rsp) movw 0x13a(%rsp), %ax movw %ax, 0x1e(%rsp) movw %ax, 0x2a6(%rsp) movw %ax, 0x2a4(%rsp) movw %ax, 0x2a2(%rsp) movw %ax, 0x2a0(%rsp) movw %ax, 0x29e(%rsp) movw %ax, 0x29c(%rsp) movw %ax, 0x29a(%rsp) movw %ax, 0x298(%rsp) movzwl 0x298(%rsp), %eax movd %eax, %xmm0 movzwl 0x29a(%rsp), %eax pinsrw $0x1, %eax, %xmm0 movzwl 0x29c(%rsp), %eax pinsrw $0x2, %eax, %xmm0 movzwl 0x29e(%rsp), %eax pinsrw $0x3, %eax, %xmm0 movzwl 0x2a0(%rsp), %eax pinsrw $0x4, %eax, %xmm0 movzwl 0x2a2(%rsp), %eax pinsrw $0x5, %eax, %xmm0 movzwl 0x2a4(%rsp), %eax pinsrw $0x6, %eax, %xmm0 movzwl 0x2a6(%rsp), %eax pinsrw $0x7, %eax, %xmm0 movdqa %xmm0, 0x280(%rsp) movdqa 0x280(%rsp), %xmm0 movdqa %xmm0, 0xd0(%rsp) movl $0x0, 0xcc(%rsp) movl 0xcc(%rsp), %eax cmpl 0x2c0(%rsp), %eax jge 0x42b282 movl $0x0, 0xc8(%rsp) movl 0xc8(%rsp), %eax cmpl 0x2c8(%rsp), %eax jge 0x42b1f4 movq 0x2b0(%rsp), %rdi movl 0xc8(%rsp), %eax shll %eax cltq addq %rax, %rdi callq 0x42eca0 movdqa %xmm0, 0xb0(%rsp) movq 0x2b0(%rsp), %rdi movl 0xc8(%rsp), %eax shll %eax cltq addq %rax, %rdi addq $0x10, %rdi callq 0x42eca0 movdqa %xmm0, 0xa0(%rsp) movq 0x2b0(%rsp), %rdi movl 0x2b8(%rsp), %eax addq %rax, %rdi movl 0xc8(%rsp), %eax shll %eax cltq addq %rax, %rdi callq 0x42eca0 movdqa %xmm0, 0x90(%rsp) movq 0x2b0(%rsp), %rdi movl 0x2b8(%rsp), %eax addq %rax, %rdi movl 0xc8(%rsp), %eax shll %eax cltq addq %rax, %rdi addq $0x10, %rdi callq 0x42eca0 movdqa %xmm0, 0x80(%rsp) movdqa 0xb0(%rsp), %xmm1 movdqa 0x90(%rsp), %xmm0 movdqa %xmm1, 0x250(%rsp) movdqa %xmm0, 0x240(%rsp) movdqa 0x250(%rsp), %xmm0 movdqa 0x240(%rsp), %xmm1 paddusb %xmm1, %xmm0 movdqa %xmm0, 0x70(%rsp) movdqa 0xa0(%rsp), %xmm1 movdqa 0x80(%rsp), %xmm0 movdqa %xmm1, 0x230(%rsp) movdqa %xmm0, 0x220(%rsp) movdqa 0x230(%rsp), %xmm0 movdqa 0x220(%rsp), %xmm1 paddusb %xmm1, %xmm0 movdqa %xmm0, 0x60(%rsp) movdqa 0x70(%rsp), %xmm1 movdqa 0xe0(%rsp), %xmm0 movdqa %xmm1, 0x190(%rsp) movdqa %xmm0, 0x180(%rsp) movdqa 0x190(%rsp), %xmm0 movdqa 0x180(%rsp), %xmm1 pmaddubsw %xmm1, %xmm0 movdqa %xmm0, 0x50(%rsp) movdqa 0x60(%rsp), %xmm1 movdqa 0xe0(%rsp), %xmm0 movdqa %xmm1, 0x170(%rsp) movdqa %xmm0, 0x160(%rsp) movdqa 0x170(%rsp), %xmm0 movdqa 0x160(%rsp), %xmm1 pmaddubsw %xmm1, %xmm0 movdqa %xmm0, 0x40(%rsp) movdqa 0x50(%rsp), %xmm1 movdqa 0xd0(%rsp), %xmm0 movdqa %xmm1, 0x1d0(%rsp) movdqa %xmm0, 0x1c0(%rsp) movdqa 0x1d0(%rsp), %xmm0 movdqa 0x1c0(%rsp), %xmm1 paddw %xmm1, %xmm0 movdqa %xmm0, 0x210(%rsp) movl $0x2, 0x20c(%rsp) movdqa 0x210(%rsp), %xmm0 movl 0x20c(%rsp), %eax movd %eax, %xmm1 psrlw %xmm1, %xmm0 movdqa %xmm0, 0x30(%rsp) movdqa 0x40(%rsp), %xmm1 movdqa 0xd0(%rsp), %xmm0 movdqa %xmm1, 0x1b0(%rsp) movdqa %xmm0, 0x1a0(%rsp) movdqa 0x1b0(%rsp), %xmm0 movdqa 0x1a0(%rsp), %xmm1 paddw %xmm1, %xmm0 movdqa %xmm0, 0x1f0(%rsp) movl $0x2, 0x1ec(%rsp) movdqa 0x1f0(%rsp), %xmm0 movl 0x1ec(%rsp), %eax movd %eax, %xmm1 psrlw %xmm1, %xmm0 movdqa %xmm0, 0x20(%rsp) movq 0x130(%rsp), %rdi movslq 0xc8(%rsp), %rax addq %rax, %rdi movq 0x120(%rsp), %rsi movslq 0xc8(%rsp), %rax shlq %rax addq %rax, %rsi movq 0x110(%rsp), %rdx movslq 0xc8(%rsp), %rax shlq %rax addq %rax, %rdx movq 0x2d0(%rsp), %r9 movl 0x2d8(%rsp), %eax leaq 0x30(%rsp), %rcx leaq 0x20(%rsp), %r8 leaq 0xf0(%rsp), %r10 movq %r10, (%rsp) movl %eax, 0x8(%rsp) callq 0x431d90 movl 0xc8(%rsp), %eax addl $0x10, %eax movl %eax, 0xc8(%rsp) jmp 0x42aee3 movl 0x2b8(%rsp), %ecx shll %ecx movq 0x2b0(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0x2b0(%rsp) movl 0x12c(%rsp), %ecx movq 0x130(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0x130(%rsp) movl 0x11c(%rsp), %ecx movq 0x120(%rsp), %rax movl %ecx, %ecx shlq %rcx addq %rcx, %rax movq %rax, 0x120(%rsp) movl 0x10c(%rsp), %ecx movq 0x110(%rsp), %rax movl %ecx, %ecx shlq %rcx addq %rcx, %rax movq %rax, 0x110(%rsp) movl 0xcc(%rsp), %eax addl $0x1, %eax movl %eax, 0xcc(%rsp) jmp 0x42aec4 addq $0x2a8, %rsp # imm = 0x2A8 retq nopw (%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/blend_a64_mask_sse4.c
aom_lowbd_blend_a64_d16_mask_subw1_subh0_w4_sse4_1
static inline void aom_lowbd_blend_a64_d16_mask_subw1_subh0_w4_sse4_1( uint8_t *dst, uint32_t dst_stride, const CONV_BUF_TYPE *src0, uint32_t src0_stride, const CONV_BUF_TYPE *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, const __m128i *round_offset, int shift) { const __m128i v_maxval = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA); const __m128i one_b = _mm_set1_epi8(1); const __m128i zeros = _mm_setzero_si128(); for (int i = 0; i < h; ++i) { const __m128i m_i0 = xx_loadl_64(mask); const __m128i m_ac = _mm_maddubs_epi16(m_i0, one_b); const __m128i m = _mm_avg_epu16(m_ac, zeros); blend_a64_d16_mask_w4_sse41(dst, src0, src1, &m, round_offset, &v_maxval, shift); mask += mask_stride; dst += dst_stride; src0 += src0_stride; src1 += src1_stride; } }
subq $0x158, %rsp # imm = 0x158 movl 0x180(%rsp), %eax movq 0x178(%rsp), %rax movl 0x170(%rsp), %eax movl 0x168(%rsp), %eax movq 0x160(%rsp), %rax movq %rdi, 0xa8(%rsp) movl %esi, 0xa4(%rsp) movq %rdx, 0x98(%rsp) movl %ecx, 0x94(%rsp) movq %r8, 0x88(%rsp) movl %r9d, 0x84(%rsp) movw $0x40, 0xcc(%rsp) movw 0xcc(%rsp), %ax movw %ax, 0xc(%rsp) movw %ax, 0x156(%rsp) movw %ax, 0x154(%rsp) movw %ax, 0x152(%rsp) movw %ax, 0x150(%rsp) movw %ax, 0x14e(%rsp) movw %ax, 0x14c(%rsp) movw %ax, 0x14a(%rsp) movw %ax, 0x148(%rsp) movzwl 0x148(%rsp), %eax movd %eax, %xmm0 movzwl 0x14a(%rsp), %eax pinsrw $0x1, %eax, %xmm0 movzwl 0x14c(%rsp), %eax pinsrw $0x2, %eax, %xmm0 movzwl 0x14e(%rsp), %eax pinsrw $0x3, %eax, %xmm0 movzwl 0x150(%rsp), %eax pinsrw $0x4, %eax, %xmm0 movzwl 0x152(%rsp), %eax pinsrw $0x5, %eax, %xmm0 movzwl 0x154(%rsp), %eax pinsrw $0x6, %eax, %xmm0 movzwl 0x156(%rsp), %eax pinsrw $0x7, %eax, %xmm0 movaps %xmm0, 0x130(%rsp) movaps 0x130(%rsp), %xmm0 movaps %xmm0, 0x70(%rsp) movb $0x1, 0xcf(%rsp) movb 0xcf(%rsp), %al movb %al, 0xf(%rsp) movb %al, 0xef(%rsp) movb %al, 0xee(%rsp) movb %al, 0xed(%rsp) movb %al, 0xec(%rsp) movb %al, 0xeb(%rsp) movb %al, 0xea(%rsp) movb %al, 0xe9(%rsp) movb %al, 0xe8(%rsp) movb %al, 0xe7(%rsp) movb %al, 0xe6(%rsp) movb %al, 0xe5(%rsp) movb %al, 0xe4(%rsp) movb %al, 0xe3(%rsp) movb %al, 0xe2(%rsp) movb %al, 0xe1(%rsp) movb %al, 0xe0(%rsp) movzbl 0xe0(%rsp), %eax movd %eax, %xmm0 movzbl 0xe1(%rsp), %eax pinsrb $0x1, %eax, %xmm0 movzbl 0xe2(%rsp), %eax pinsrb $0x2, %eax, %xmm0 movzbl 0xe3(%rsp), %eax pinsrb $0x3, %eax, %xmm0 movzbl 0xe4(%rsp), %eax pinsrb $0x4, %eax, %xmm0 movzbl 0xe5(%rsp), %eax pinsrb $0x5, %eax, %xmm0 movzbl 0xe6(%rsp), %eax pinsrb $0x6, %eax, %xmm0 movzbl 0xe7(%rsp), %eax pinsrb $0x7, %eax, %xmm0 movzbl 0xe8(%rsp), %eax pinsrb $0x8, %eax, %xmm0 movzbl 0xe9(%rsp), %eax pinsrb $0x9, %eax, %xmm0 movzbl 0xea(%rsp), %eax pinsrb $0xa, %eax, %xmm0 movzbl 0xeb(%rsp), %eax pinsrb $0xb, %eax, %xmm0 movzbl 0xec(%rsp), %eax pinsrb $0xc, %eax, %xmm0 movzbl 0xed(%rsp), %eax pinsrb $0xd, %eax, %xmm0 movzbl 0xee(%rsp), %eax pinsrb $0xe, %eax, %xmm0 movzbl 0xef(%rsp), %eax pinsrb $0xf, %eax, %xmm0 movaps %xmm0, 0xd0(%rsp) movaps 0xd0(%rsp), %xmm0 movaps %xmm0, 0x60(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 0xb0(%rsp) movdqa 0xb0(%rsp), %xmm0 movdqa %xmm0, 0x50(%rsp) movl $0x0, 0x4c(%rsp) movl 0x4c(%rsp), %eax cmpl 0x170(%rsp), %eax jge 0x42b6ad movq 0x160(%rsp), %rdi callq 0x42f180 movdqa %xmm0, 0x30(%rsp) movdqa 0x30(%rsp), %xmm1 movdqa 0x60(%rsp), %xmm0 movdqa %xmm1, 0x100(%rsp) movdqa %xmm0, 0xf0(%rsp) movdqa 0x100(%rsp), %xmm0 movdqa 0xf0(%rsp), %xmm1 pmaddubsw %xmm1, %xmm0 movdqa %xmm0, 0x20(%rsp) movdqa 0x20(%rsp), %xmm1 movdqa 0x50(%rsp), %xmm0 movdqa %xmm1, 0x120(%rsp) movdqa %xmm0, 0x110(%rsp) movdqa 0x120(%rsp), %xmm0 movdqa 0x110(%rsp), %xmm1 pavgw %xmm1, %xmm0 movdqa %xmm0, 0x10(%rsp) movq 0xa8(%rsp), %rdi movq 0x98(%rsp), %rsi movq 0x88(%rsp), %rdx movq 0x178(%rsp), %r8 movl 0x180(%rsp), %eax leaq 0x10(%rsp), %rcx leaq 0x70(%rsp), %r9 movl %eax, (%rsp) callq 0x431820 movl 0x168(%rsp), %ecx movq 0x160(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0x160(%rsp) movl 0xa4(%rsp), %ecx movq 0xa8(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0xa8(%rsp) movl 0x94(%rsp), %ecx movq 0x98(%rsp), %rax movl %ecx, %ecx shlq %rcx addq %rcx, %rax movq %rax, 0x98(%rsp) movl 0x84(%rsp), %ecx movq 0x88(%rsp), %rax movl %ecx, %ecx shlq %rcx addq %rcx, %rax movq %rax, 0x88(%rsp) movl 0x4c(%rsp), %eax addl $0x1, %eax movl %eax, 0x4c(%rsp) jmp 0x42b555 addq $0x158, %rsp # imm = 0x158 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/blend_mask_sse4.h
aom_lowbd_blend_a64_d16_mask_subw1_subh0_w8_sse4_1
static inline void aom_lowbd_blend_a64_d16_mask_subw1_subh0_w8_sse4_1( uint8_t *dst, uint32_t dst_stride, const CONV_BUF_TYPE *src0, uint32_t src0_stride, const CONV_BUF_TYPE *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, const __m128i *round_offset, int shift) { const __m128i v_maxval = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA); const __m128i one_b = _mm_set1_epi8(1); const __m128i zeros = _mm_setzero_si128(); for (int i = 0; i < h; ++i) { const __m128i m_i0 = xx_loadu_128(mask); const __m128i m_ac = _mm_maddubs_epi16(m_i0, one_b); const __m128i m = _mm_avg_epu16(m_ac, zeros); blend_a64_d16_mask_w8_sse41(dst, src0, src1, &m, round_offset, &v_maxval, shift); mask += mask_stride; dst += dst_stride; src0 += src0_stride; src1 += src1_stride; } }
subq $0x158, %rsp # imm = 0x158 movl 0x180(%rsp), %eax movq 0x178(%rsp), %rax movl 0x170(%rsp), %eax movl 0x168(%rsp), %eax movq 0x160(%rsp), %rax movq %rdi, 0xa8(%rsp) movl %esi, 0xa4(%rsp) movq %rdx, 0x98(%rsp) movl %ecx, 0x94(%rsp) movq %r8, 0x88(%rsp) movl %r9d, 0x84(%rsp) movw $0x40, 0xcc(%rsp) movw 0xcc(%rsp), %ax movw %ax, 0xc(%rsp) movw %ax, 0x156(%rsp) movw %ax, 0x154(%rsp) movw %ax, 0x152(%rsp) movw %ax, 0x150(%rsp) movw %ax, 0x14e(%rsp) movw %ax, 0x14c(%rsp) movw %ax, 0x14a(%rsp) movw %ax, 0x148(%rsp) movzwl 0x148(%rsp), %eax movd %eax, %xmm0 movzwl 0x14a(%rsp), %eax pinsrw $0x1, %eax, %xmm0 movzwl 0x14c(%rsp), %eax pinsrw $0x2, %eax, %xmm0 movzwl 0x14e(%rsp), %eax pinsrw $0x3, %eax, %xmm0 movzwl 0x150(%rsp), %eax pinsrw $0x4, %eax, %xmm0 movzwl 0x152(%rsp), %eax pinsrw $0x5, %eax, %xmm0 movzwl 0x154(%rsp), %eax pinsrw $0x6, %eax, %xmm0 movzwl 0x156(%rsp), %eax pinsrw $0x7, %eax, %xmm0 movaps %xmm0, 0x130(%rsp) movaps 0x130(%rsp), %xmm0 movaps %xmm0, 0x70(%rsp) movb $0x1, 0xcf(%rsp) movb 0xcf(%rsp), %al movb %al, 0xf(%rsp) movb %al, 0xef(%rsp) movb %al, 0xee(%rsp) movb %al, 0xed(%rsp) movb %al, 0xec(%rsp) movb %al, 0xeb(%rsp) movb %al, 0xea(%rsp) movb %al, 0xe9(%rsp) movb %al, 0xe8(%rsp) movb %al, 0xe7(%rsp) movb %al, 0xe6(%rsp) movb %al, 0xe5(%rsp) movb %al, 0xe4(%rsp) movb %al, 0xe3(%rsp) movb %al, 0xe2(%rsp) movb %al, 0xe1(%rsp) movb %al, 0xe0(%rsp) movzbl 0xe0(%rsp), %eax movd %eax, %xmm0 movzbl 0xe1(%rsp), %eax pinsrb $0x1, %eax, %xmm0 movzbl 0xe2(%rsp), %eax pinsrb $0x2, %eax, %xmm0 movzbl 0xe3(%rsp), %eax pinsrb $0x3, %eax, %xmm0 movzbl 0xe4(%rsp), %eax pinsrb $0x4, %eax, %xmm0 movzbl 0xe5(%rsp), %eax pinsrb $0x5, %eax, %xmm0 movzbl 0xe6(%rsp), %eax pinsrb $0x6, %eax, %xmm0 movzbl 0xe7(%rsp), %eax pinsrb $0x7, %eax, %xmm0 movzbl 0xe8(%rsp), %eax pinsrb $0x8, %eax, %xmm0 movzbl 0xe9(%rsp), %eax pinsrb $0x9, %eax, %xmm0 movzbl 0xea(%rsp), %eax pinsrb $0xa, %eax, %xmm0 movzbl 0xeb(%rsp), %eax pinsrb $0xb, %eax, %xmm0 movzbl 0xec(%rsp), %eax pinsrb $0xc, %eax, %xmm0 movzbl 0xed(%rsp), %eax pinsrb $0xd, %eax, %xmm0 movzbl 0xee(%rsp), %eax pinsrb $0xe, %eax, %xmm0 movzbl 0xef(%rsp), %eax pinsrb $0xf, %eax, %xmm0 movaps %xmm0, 0xd0(%rsp) movaps 0xd0(%rsp), %xmm0 movaps %xmm0, 0x60(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 0xb0(%rsp) movdqa 0xb0(%rsp), %xmm0 movdqa %xmm0, 0x50(%rsp) movl $0x0, 0x4c(%rsp) movl 0x4c(%rsp), %eax cmpl 0x170(%rsp), %eax jge 0x42badd movq 0x160(%rsp), %rdi callq 0x42eca0 movdqa %xmm0, 0x30(%rsp) movdqa 0x30(%rsp), %xmm1 movdqa 0x60(%rsp), %xmm0 movdqa %xmm1, 0x100(%rsp) movdqa %xmm0, 0xf0(%rsp) movdqa 0x100(%rsp), %xmm0 movdqa 0xf0(%rsp), %xmm1 pmaddubsw %xmm1, %xmm0 movdqa %xmm0, 0x20(%rsp) movdqa 0x20(%rsp), %xmm1 movdqa 0x50(%rsp), %xmm0 movdqa %xmm1, 0x120(%rsp) movdqa %xmm0, 0x110(%rsp) movdqa 0x120(%rsp), %xmm0 movdqa 0x110(%rsp), %xmm1 pavgw %xmm1, %xmm0 movdqa %xmm0, 0x10(%rsp) movq 0xa8(%rsp), %rdi movq 0x98(%rsp), %rsi movq 0x88(%rsp), %rdx movq 0x178(%rsp), %r8 movl 0x180(%rsp), %eax leaq 0x10(%rsp), %rcx leaq 0x70(%rsp), %r9 movl %eax, (%rsp) callq 0x431a80 movl 0x168(%rsp), %ecx movq 0x160(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0x160(%rsp) movl 0xa4(%rsp), %ecx movq 0xa8(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0xa8(%rsp) movl 0x94(%rsp), %ecx movq 0x98(%rsp), %rax movl %ecx, %ecx shlq %rcx addq %rcx, %rax movq %rax, 0x98(%rsp) movl 0x84(%rsp), %ecx movq 0x88(%rsp), %rax movl %ecx, %ecx shlq %rcx addq %rcx, %rax movq %rax, 0x88(%rsp) movl 0x4c(%rsp), %eax addl $0x1, %eax movl %eax, 0x4c(%rsp) jmp 0x42b985 addq $0x158, %rsp # imm = 0x158 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/blend_mask_sse4.h
lowbd_blend_a64_d16_mask_subw1_subh0_w16_sse4_1
static inline void lowbd_blend_a64_d16_mask_subw1_subh0_w16_sse4_1( uint8_t *dst, uint32_t dst_stride, const CONV_BUF_TYPE *src0, uint32_t src0_stride, const CONV_BUF_TYPE *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, int w, const __m128i *round_offset, int shift) { const __m128i v_maxval = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA); const __m128i one_b = _mm_set1_epi8(1); const __m128i zeros = _mm_setzero_si128(); for (int i = 0; i < h; ++i) { for (int j = 0; j < w; j += 16) { const __m128i m_i00 = xx_loadu_128(mask + 2 * j); const __m128i m_i01 = xx_loadu_128(mask + 2 * j + 16); const __m128i m0_ac = _mm_maddubs_epi16(m_i00, one_b); const __m128i m1_ac = _mm_maddubs_epi16(m_i01, one_b); const __m128i m0 = _mm_avg_epu16(m0_ac, zeros); const __m128i m1 = _mm_avg_epu16(m1_ac, zeros); blend_a64_d16_mask_w16_sse41(dst + j, src0 + j, src1 + j, &m0, &m1, round_offset, &v_maxval, shift); } mask += mask_stride; dst += dst_stride; src0 += src0_stride; src1 += src1_stride; } }
subq $0x1d8, %rsp # imm = 0x1D8 movl 0x208(%rsp), %eax movq 0x200(%rsp), %rax movl 0x1f8(%rsp), %eax movl 0x1f0(%rsp), %eax movl 0x1e8(%rsp), %eax movq 0x1e0(%rsp), %rax movq %rdi, 0xe8(%rsp) movl %esi, 0xe4(%rsp) movq %rdx, 0xd8(%rsp) movl %ecx, 0xd4(%rsp) movq %r8, 0xc8(%rsp) movl %r9d, 0xc4(%rsp) movw $0x40, 0x10c(%rsp) movw 0x10c(%rsp), %ax movw %ax, 0x1c(%rsp) movw %ax, 0x1d6(%rsp) movw %ax, 0x1d4(%rsp) movw %ax, 0x1d2(%rsp) movw %ax, 0x1d0(%rsp) movw %ax, 0x1ce(%rsp) movw %ax, 0x1cc(%rsp) movw %ax, 0x1ca(%rsp) movw %ax, 0x1c8(%rsp) movzwl 0x1c8(%rsp), %eax movd %eax, %xmm0 movzwl 0x1ca(%rsp), %eax pinsrw $0x1, %eax, %xmm0 movzwl 0x1cc(%rsp), %eax pinsrw $0x2, %eax, %xmm0 movzwl 0x1ce(%rsp), %eax pinsrw $0x3, %eax, %xmm0 movzwl 0x1d0(%rsp), %eax pinsrw $0x4, %eax, %xmm0 movzwl 0x1d2(%rsp), %eax pinsrw $0x5, %eax, %xmm0 movzwl 0x1d4(%rsp), %eax pinsrw $0x6, %eax, %xmm0 movzwl 0x1d6(%rsp), %eax pinsrw $0x7, %eax, %xmm0 movaps %xmm0, 0x1b0(%rsp) movaps 0x1b0(%rsp), %xmm0 movaps %xmm0, 0xb0(%rsp) movb $0x1, 0x10f(%rsp) movb 0x10f(%rsp), %al movb %al, 0x1f(%rsp) movb %al, 0x12f(%rsp) movb %al, 0x12e(%rsp) movb %al, 0x12d(%rsp) movb %al, 0x12c(%rsp) movb %al, 0x12b(%rsp) movb %al, 0x12a(%rsp) movb %al, 0x129(%rsp) movb %al, 0x128(%rsp) movb %al, 0x127(%rsp) movb %al, 0x126(%rsp) movb %al, 0x125(%rsp) movb %al, 0x124(%rsp) movb %al, 0x123(%rsp) movb %al, 0x122(%rsp) movb %al, 0x121(%rsp) movb %al, 0x120(%rsp) movzbl 0x120(%rsp), %eax movd %eax, %xmm0 movzbl 0x121(%rsp), %eax pinsrb $0x1, %eax, %xmm0 movzbl 0x122(%rsp), %eax pinsrb $0x2, %eax, %xmm0 movzbl 0x123(%rsp), %eax pinsrb $0x3, %eax, %xmm0 movzbl 0x124(%rsp), %eax pinsrb $0x4, %eax, %xmm0 movzbl 0x125(%rsp), %eax pinsrb $0x5, %eax, %xmm0 movzbl 0x126(%rsp), %eax pinsrb $0x6, %eax, %xmm0 movzbl 0x127(%rsp), %eax pinsrb $0x7, %eax, %xmm0 movzbl 0x128(%rsp), %eax pinsrb $0x8, %eax, %xmm0 movzbl 0x129(%rsp), %eax pinsrb $0x9, %eax, %xmm0 movzbl 0x12a(%rsp), %eax pinsrb $0xa, %eax, %xmm0 movzbl 0x12b(%rsp), %eax pinsrb $0xb, %eax, %xmm0 movzbl 0x12c(%rsp), %eax pinsrb $0xc, %eax, %xmm0 movzbl 0x12d(%rsp), %eax pinsrb $0xd, %eax, %xmm0 movzbl 0x12e(%rsp), %eax pinsrb $0xe, %eax, %xmm0 movzbl 0x12f(%rsp), %eax pinsrb $0xf, %eax, %xmm0 movaps %xmm0, 0x110(%rsp) movaps 0x110(%rsp), %xmm0 movaps %xmm0, 0xa0(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 0xf0(%rsp) movdqa 0xf0(%rsp), %xmm0 movdqa %xmm0, 0x90(%rsp) movl $0x0, 0x8c(%rsp) movl 0x8c(%rsp), %eax cmpl 0x1f0(%rsp), %eax jge 0x42c046 movl $0x0, 0x88(%rsp) movl 0x88(%rsp), %eax cmpl 0x1f8(%rsp), %eax jge 0x42bfba movq 0x1e0(%rsp), %rdi movl 0x88(%rsp), %eax shll %eax cltq addq %rax, %rdi callq 0x42eca0 movdqa %xmm0, 0x70(%rsp) movq 0x1e0(%rsp), %rdi movl 0x88(%rsp), %eax shll %eax cltq addq %rax, %rdi addq $0x10, %rdi callq 0x42eca0 movdqa %xmm0, 0x60(%rsp) movdqa 0x70(%rsp), %xmm1 movdqa 0xa0(%rsp), %xmm0 movdqa %xmm1, 0x160(%rsp) movdqa %xmm0, 0x150(%rsp) movdqa 0x160(%rsp), %xmm0 movdqa 0x150(%rsp), %xmm1 pmaddubsw %xmm1, %xmm0 movdqa %xmm0, 0x50(%rsp) movdqa 0x60(%rsp), %xmm1 movdqa 0xa0(%rsp), %xmm0 movdqa %xmm1, 0x140(%rsp) movdqa %xmm0, 0x130(%rsp) movdqa 0x140(%rsp), %xmm0 movdqa 0x130(%rsp), %xmm1 pmaddubsw %xmm1, %xmm0 movdqa %xmm0, 0x40(%rsp) movdqa 0x50(%rsp), %xmm1 movdqa 0x90(%rsp), %xmm0 movdqa %xmm1, 0x1a0(%rsp) movdqa %xmm0, 0x190(%rsp) movdqa 0x1a0(%rsp), %xmm0 movdqa 0x190(%rsp), %xmm1 pavgw %xmm1, %xmm0 movdqa %xmm0, 0x30(%rsp) movdqa 0x40(%rsp), %xmm1 movdqa 0x90(%rsp), %xmm0 movdqa %xmm1, 0x180(%rsp) movdqa %xmm0, 0x170(%rsp) movdqa 0x180(%rsp), %xmm0 movdqa 0x170(%rsp), %xmm1 pavgw %xmm1, %xmm0 movdqa %xmm0, 0x20(%rsp) movq 0xe8(%rsp), %rdi movslq 0x88(%rsp), %rax addq %rax, %rdi movq 0xd8(%rsp), %rsi movslq 0x88(%rsp), %rax shlq %rax addq %rax, %rsi movq 0xc8(%rsp), %rdx movslq 0x88(%rsp), %rax shlq %rax addq %rax, %rdx movq 0x200(%rsp), %r9 movl 0x208(%rsp), %eax leaq 0x30(%rsp), %rcx leaq 0x20(%rsp), %r8 leaq 0xb0(%rsp), %r10 movq %r10, (%rsp) movl %eax, 0x8(%rsp) callq 0x431d90 movl 0x88(%rsp), %eax addl $0x10, %eax movl %eax, 0x88(%rsp) jmp 0x42bde7 movl 0x1e8(%rsp), %ecx movq 0x1e0(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0x1e0(%rsp) movl 0xe4(%rsp), %ecx movq 0xe8(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0xe8(%rsp) movl 0xd4(%rsp), %ecx movq 0xd8(%rsp), %rax movl %ecx, %ecx shlq %rcx addq %rcx, %rax movq %rax, 0xd8(%rsp) movl 0xc4(%rsp), %ecx movq 0xc8(%rsp), %rax movl %ecx, %ecx shlq %rcx addq %rcx, %rax movq %rax, 0xc8(%rsp) movl 0x8c(%rsp), %eax addl $0x1, %eax movl %eax, 0x8c(%rsp) jmp 0x42bdc8 addq $0x1d8, %rsp # imm = 0x1D8 retq nop
/m-ab-s[P]aom/aom_dsp/x86/blend_a64_mask_sse4.c
aom_lowbd_blend_a64_d16_mask_subw0_subh1_w8_sse4_1
static inline void aom_lowbd_blend_a64_d16_mask_subw0_subh1_w8_sse4_1( uint8_t *dst, uint32_t dst_stride, const CONV_BUF_TYPE *src0, uint32_t src0_stride, const CONV_BUF_TYPE *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, const __m128i *round_offset, int shift) { const __m128i v_maxval = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA); const __m128i zeros = _mm_setzero_si128(); for (int i = 0; i < h; ++i) { const __m128i m_i0 = xx_loadl_64(mask); const __m128i m_i1 = xx_loadl_64(mask + mask_stride); const __m128i m_ac = _mm_adds_epu8(m_i0, m_i1); const __m128i m = _mm_cvtepu8_epi16(_mm_avg_epu8(m_ac, zeros)); blend_a64_d16_mask_w8_sse41(dst, src0, src1, &m, round_offset, &v_maxval, shift); mask += mask_stride << 1; dst += dst_stride; src0 += src0_stride; src1 += src1_stride; } }
subq $0x148, %rsp # imm = 0x148 movl 0x170(%rsp), %eax movq 0x168(%rsp), %rax movl 0x160(%rsp), %eax movl 0x158(%rsp), %eax movq 0x150(%rsp), %rax movq %rdi, 0xa8(%rsp) movl %esi, 0xa4(%rsp) movq %rdx, 0x98(%rsp) movl %ecx, 0x94(%rsp) movq %r8, 0x88(%rsp) movl %r9d, 0x84(%rsp) movw $0x40, 0xce(%rsp) movw 0xce(%rsp), %ax movw %ax, 0xe(%rsp) movw %ax, 0x146(%rsp) movw %ax, 0x144(%rsp) movw %ax, 0x142(%rsp) movw %ax, 0x140(%rsp) movw %ax, 0x13e(%rsp) movw %ax, 0x13c(%rsp) movw %ax, 0x13a(%rsp) movw %ax, 0x138(%rsp) movzwl 0x138(%rsp), %eax movd %eax, %xmm0 movzwl 0x13a(%rsp), %eax pinsrw $0x1, %eax, %xmm0 movzwl 0x13c(%rsp), %eax pinsrw $0x2, %eax, %xmm0 movzwl 0x13e(%rsp), %eax pinsrw $0x3, %eax, %xmm0 movzwl 0x140(%rsp), %eax pinsrw $0x4, %eax, %xmm0 movzwl 0x142(%rsp), %eax pinsrw $0x5, %eax, %xmm0 movzwl 0x144(%rsp), %eax pinsrw $0x6, %eax, %xmm0 movzwl 0x146(%rsp), %eax pinsrw $0x7, %eax, %xmm0 movaps %xmm0, 0x120(%rsp) movaps 0x120(%rsp), %xmm0 movaps %xmm0, 0x70(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 0xb0(%rsp) movdqa 0xb0(%rsp), %xmm0 movdqa %xmm0, 0x60(%rsp) movl $0x0, 0x5c(%rsp) movl 0x5c(%rsp), %eax cmpl 0x160(%rsp), %eax jge 0x42c5e8 movq 0x150(%rsp), %rdi callq 0x42f180 movaps %xmm0, 0x40(%rsp) movq 0x150(%rsp), %rdi movl 0x158(%rsp), %eax addq %rax, %rdi callq 0x42f180 movaps %xmm0, 0x30(%rsp) movaps 0x40(%rsp), %xmm1 movaps 0x30(%rsp), %xmm0 movaps %xmm1, 0x110(%rsp) movaps %xmm0, 0x100(%rsp) movaps 0x110(%rsp), %xmm0 movaps 0x100(%rsp), %xmm1 paddusb %xmm1, %xmm0 movaps %xmm0, 0x20(%rsp) movaps 0x20(%rsp), %xmm1 movaps 0x60(%rsp), %xmm0 movaps %xmm1, 0xe0(%rsp) movaps %xmm0, 0xd0(%rsp) movaps 0xe0(%rsp), %xmm0 movaps 0xd0(%rsp), %xmm1 pavgb %xmm1, %xmm0 movaps %xmm0, 0xf0(%rsp) pmovzxbw 0xf0(%rsp), %xmm0 movdqa %xmm0, 0x10(%rsp) movq 0xa8(%rsp), %rdi movq 0x98(%rsp), %rsi movq 0x88(%rsp), %rdx movq 0x168(%rsp), %r8 movl 0x170(%rsp), %eax leaq 0x10(%rsp), %rcx leaq 0x70(%rsp), %r9 movl %eax, (%rsp) callq 0x431a80 movl 0x158(%rsp), %ecx shll %ecx movq 0x150(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0x150(%rsp) movl 0xa4(%rsp), %ecx movq 0xa8(%rsp), %rax movl %ecx, %ecx addq %rcx, %rax movq %rax, 0xa8(%rsp) movl 0x94(%rsp), %ecx movq 0x98(%rsp), %rax movl %ecx, %ecx shlq %rcx addq %rcx, %rax movq %rax, 0x98(%rsp) movl 0x84(%rsp), %ecx movq 0x88(%rsp), %rax movl %ecx, %ecx shlq %rcx addq %rcx, %rax movq %rax, 0x88(%rsp) movl 0x5c(%rsp), %eax addl $0x1, %eax movl %eax, 0x5c(%rsp) jmp 0x42c46f addq $0x148, %rsp # imm = 0x148 retq
/m-ab-s[P]aom/aom_dsp/x86/blend_mask_sse4.h
get_var_sse_sum_16x16_dual_avx2
static inline void get_var_sse_sum_16x16_dual_avx2( const uint8_t *src, int src_stride, const uint8_t *ref, const int ref_stride, const int h, uint32_t *sse16x16, unsigned int *tot_sse, int *tot_sum, uint32_t *var16x16) { assert(h <= 128); // May overflow for larger height. __m256i sse_16x16[2], sum_16x16[2]; sum_16x16[0] = _mm256_setzero_si256(); sse_16x16[0] = _mm256_setzero_si256(); sum_16x16[1] = sum_16x16[0]; sse_16x16[1] = sse_16x16[0]; const __m256i set_one_minusone = _mm256_set1_epi16((short)0xff01); for (int i = 0; i < h; i++) { // Process 16x32 block of one row. calc_sum_sse_wd32_avx2(src, ref, set_one_minusone, sse_16x16, sum_16x16); src += src_stride; ref += ref_stride; } const __m256i sum_sse_order_add = calc_sum_sse_order(sse_16x16, sum_16x16, tot_sse, tot_sum); const __m256i sum_sse_order_add_1 = _mm256_hadd_epi32(sum_sse_order_add, sum_sse_order_add); // s0+s1 s2+s3 x x _mm_storel_epi64((__m128i *)sse16x16, _mm256_castsi256_si128(sum_sse_order_add_1)); // d0+d1 d2+d3 x x const __m128i sum_temp16x16 = _mm256_extractf128_si256(sum_sse_order_add_1, 1); // (d0xd0 >> 6)=f0 (d1xd1 >> 6)=f1 (d2xd2 >> 6)=f2 (d3xd3 >> 6)=f3 const __m128i mull_results = _mm_srli_epi32(_mm_mullo_epi32(sum_temp16x16, sum_temp16x16), 8); // s0-f0=v0 s1-f1=v1 s2-f2=v2 s3-f3=v3 const __m128i variance_16x16 = _mm_sub_epi32(_mm256_castsi256_si128(sum_sse_order_add_1), mull_results); // v0 v1 v2 v3 _mm_storel_epi64((__m128i *)var16x16, variance_16x16); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x360, %rsp # imm = 0x360 movq 0x20(%rbp), %rax movq 0x18(%rbp), %rax movq 0x10(%rbp), %rax movq %rdi, 0x178(%rsp) movl %esi, 0x174(%rsp) movq %rdx, 0x168(%rsp) movl %ecx, 0x164(%rsp) movl %r8d, 0x160(%rsp) movq %r9, 0x158(%rsp) vxorps %xmm0, %xmm0, %xmm0 vmovaps %ymm0, 0x1a0(%rsp) vmovaps 0x1a0(%rsp), %ymm1 vmovaps %ymm1, 0xc0(%rsp) vmovaps %ymm0, 0x180(%rsp) vmovaps 0x180(%rsp), %ymm0 vmovaps %ymm0, 0x100(%rsp) vmovaps 0xc0(%rsp), %ymm0 vmovaps %ymm0, 0xe0(%rsp) vmovaps 0x100(%rsp), %ymm0 vmovaps %ymm0, 0x120(%rsp) movw $0xff01, 0x21e(%rsp) # imm = 0xFF01 movw 0x21e(%rsp), %ax movw %ax, 0xe(%rsp) movw %ax, 0x26e(%rsp) movw %ax, 0x26c(%rsp) movw %ax, 0x26a(%rsp) movw %ax, 0x268(%rsp) movw %ax, 0x266(%rsp) movw %ax, 0x264(%rsp) movw %ax, 0x262(%rsp) movw %ax, 0x260(%rsp) movw %ax, 0x25e(%rsp) movw %ax, 0x25c(%rsp) movw %ax, 0x25a(%rsp) movw %ax, 0x258(%rsp) movw %ax, 0x256(%rsp) movw %ax, 0x254(%rsp) movw %ax, 0x252(%rsp) movw %ax, 0x250(%rsp) movzwl 0x260(%rsp), %eax vmovd %eax, %xmm0 movzwl 0x262(%rsp), %eax vpinsrw $0x1, %eax, %xmm0, %xmm0 movzwl 0x264(%rsp), %eax vpinsrw $0x2, %eax, %xmm0, %xmm0 movzwl 0x266(%rsp), %eax vpinsrw $0x3, %eax, %xmm0, %xmm0 movzwl 0x268(%rsp), %eax vpinsrw $0x4, %eax, %xmm0, %xmm0 movzwl 0x26a(%rsp), %eax vpinsrw $0x5, %eax, %xmm0, %xmm0 movzwl 0x26c(%rsp), %eax vpinsrw $0x6, %eax, %xmm0, %xmm0 movzwl 0x26e(%rsp), %eax vpinsrw $0x7, %eax, %xmm0, %xmm1 movzwl 0x250(%rsp), %eax vmovd %eax, %xmm0 movzwl 0x252(%rsp), %eax vpinsrw $0x1, %eax, %xmm0, %xmm0 movzwl 0x254(%rsp), %eax vpinsrw $0x2, %eax, %xmm0, %xmm0 movzwl 0x256(%rsp), %eax vpinsrw $0x3, %eax, %xmm0, %xmm0 movzwl 0x258(%rsp), %eax vpinsrw $0x4, %eax, %xmm0, %xmm0 movzwl 0x25a(%rsp), %eax vpinsrw $0x5, %eax, %xmm0, %xmm0 movzwl 0x25c(%rsp), %eax vpinsrw $0x6, %eax, %xmm0, %xmm0 movzwl 0x25e(%rsp), %eax vpinsrw $0x7, %eax, %xmm0, %xmm2 vmovaps %xmm2, %xmm0 vinserti128 $0x1, %xmm1, %ymm0, %ymm0 vmovdqa %ymm0, 0x220(%rsp) vmovdqa 0x220(%rsp), %ymm0 vmovdqa %ymm0, 0xa0(%rsp) movl $0x0, 0x9c(%rsp) movl 0x9c(%rsp), %eax cmpl 0x160(%rsp), %eax jge 0x549a6a movq 0x178(%rsp), %rdi movq 0x168(%rsp), %rsi vmovdqa 0xa0(%rsp), %ymm0 leaq 0x100(%rsp), %rdx leaq 0xc0(%rsp), %rcx callq 0x54a280 movl 0x174(%rsp), %ecx movq 0x178(%rsp), %rax movslq %ecx, %rcx addq %rcx, %rax movq %rax, 0x178(%rsp) movl 0x164(%rsp), %ecx movq 0x168(%rsp), %rax movslq %ecx, %rcx addq %rcx, %rax movq %rax, 0x168(%rsp) movl 0x9c(%rsp), %eax addl $0x1, %eax movl %eax, 0x9c(%rsp) jmp 0x5499dc movq 0x10(%rbp), %rdx movq 0x18(%rbp), %rcx leaq 0x100(%rsp), %rdi leaq 0xc0(%rsp), %rsi callq 0x54a580 vmovaps %ymm0, 0x60(%rsp) vmovaps 0x60(%rsp), %ymm0 vmovaps %ymm0, 0x320(%rsp) vmovaps %ymm0, 0x300(%rsp) vmovaps 0x320(%rsp), %ymm0 vmovaps 0x300(%rsp), %ymm1 vphaddd %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x40(%rsp) movq 0x158(%rsp), %rax vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm0, 0x1e0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 movq %rax, 0x2a0(%rsp) vmovdqa %xmm0, 0x290(%rsp) movq 0x290(%rsp), %rcx movq 0x2a0(%rsp), %rax movq %rcx, (%rax) vmovdqa 0x50(%rsp), %xmm0 vmovdqa %xmm0, 0x30(%rsp) vmovdqa 0x30(%rsp), %xmm0 vmovdqa %xmm0, 0x2d0(%rsp) vmovdqa %xmm0, 0x2c0(%rsp) vmovdqa 0x2d0(%rsp), %xmm0 vmovdqa 0x2c0(%rsp), %xmm1 vpmulld %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, 0x2b0(%rsp) movl $0x8, 0x2ac(%rsp) vmovdqa 0x2b0(%rsp), %xmm0 vmovd 0x2ac(%rsp), %xmm1 vpsrld %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, 0x20(%rsp) vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %ymm0 vmovaps %xmm0, %xmm1 vmovdqa 0x20(%rsp), %xmm0 vmovdqa %xmm1, 0x2f0(%rsp) vmovdqa %xmm0, 0x2e0(%rsp) vmovdqa 0x2f0(%rsp), %xmm0 vmovdqa 0x2e0(%rsp), %xmm1 vpsubd %xmm1, %xmm0, %xmm0 vmovdqa %xmm0, 0x10(%rsp) movq 0x20(%rbp), %rax vmovdqa 0x10(%rsp), %xmm0 movq %rax, 0x288(%rsp) vmovdqa %xmm0, 0x270(%rsp) movq 0x270(%rsp), %rcx movq 0x288(%rsp), %rax movq %rcx, (%rax) movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/variance_avx2.c
variance16_kernel_avx2
static inline void variance16_kernel_avx2( const uint8_t *const src, const int src_stride, const uint8_t *const ref, const int ref_stride, __m256i *const sse, __m256i *const sum) { const __m128i s0 = _mm_loadu_si128((__m128i const *)(src + 0 * src_stride)); const __m128i s1 = _mm_loadu_si128((__m128i const *)(src + 1 * src_stride)); const __m128i r0 = _mm_loadu_si128((__m128i const *)(ref + 0 * ref_stride)); const __m128i r1 = _mm_loadu_si128((__m128i const *)(ref + 1 * ref_stride)); const __m256i s = _mm256_inserti128_si256(_mm256_castsi128_si256(s0), s1, 1); const __m256i r = _mm256_inserti128_si256(_mm256_castsi128_si256(r0), r1, 1); variance_kernel_avx2(s, r, sse, sum); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x100, %rsp # imm = 0x100 movq %rdi, 0xa8(%rsp) movl %esi, 0xa4(%rsp) movq %rdx, 0x98(%rsp) movl %ecx, 0x94(%rsp) movq %r8, 0x88(%rsp) movq %r9, 0x80(%rsp) movq 0xa8(%rsp), %rax movq %rax, 0xe8(%rsp) movq 0xe8(%rsp), %rax vmovdqu (%rax), %xmm0 vmovdqa %xmm0, 0x70(%rsp) movq 0xa8(%rsp), %rax movslq 0xa4(%rsp), %rcx addq %rcx, %rax movq %rax, 0xe0(%rsp) movq 0xe0(%rsp), %rax vmovdqu (%rax), %xmm0 vmovdqa %xmm0, 0x60(%rsp) movq 0x98(%rsp), %rax movq %rax, 0xd8(%rsp) movq 0xd8(%rsp), %rax vmovdqu (%rax), %xmm0 vmovdqa %xmm0, 0x50(%rsp) movq 0x98(%rsp), %rax movslq 0x94(%rsp), %rcx addq %rcx, %rax movq %rax, 0xd0(%rsp) movq 0xd0(%rsp), %rax vmovdqu (%rax), %xmm0 vmovdqa %xmm0, 0x40(%rsp) vmovdqa 0x70(%rsp), %xmm0 vmovdqa %xmm0, 0xc0(%rsp) vmovdqa 0xc0(%rsp), %xmm0 vmovdqa 0x60(%rsp), %xmm1 vmovdqa %xmm1, 0x30(%rsp) vmovdqa %xmm0, 0x20(%rsp) vmovdqa 0x50(%rsp), %xmm0 vmovdqa %xmm0, 0xb0(%rsp) vmovdqa 0xb0(%rsp), %xmm2 vmovdqa 0x40(%rsp), %xmm1 vmovaps %xmm2, %xmm0 vinserti128 $0x1, %xmm1, %ymm0, %ymm0 vmovdqa %ymm0, (%rsp) vmovdqa 0x20(%rsp), %ymm0 vmovdqa (%rsp), %ymm1 movq 0x88(%rsp), %rdi movq 0x80(%rsp), %rsi callq 0x549d60 movq %rbp, %rsp popq %rbp vzeroupper retq nopw (%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/variance_avx2.c
variance_kernel_avx2
static inline void variance_kernel_avx2(const __m256i src, const __m256i ref, __m256i *const sse, __m256i *const sum) { const __m256i adj_sub = _mm256_set1_epi16((short)0xff01); // (1,-1) // unpack into pairs of source and reference values const __m256i src_ref0 = _mm256_unpacklo_epi8(src, ref); const __m256i src_ref1 = _mm256_unpackhi_epi8(src, ref); // subtract adjacent elements using src*1 + ref*-1 const __m256i diff0 = _mm256_maddubs_epi16(src_ref0, adj_sub); const __m256i diff1 = _mm256_maddubs_epi16(src_ref1, adj_sub); const __m256i madd0 = _mm256_madd_epi16(diff0, diff0); const __m256i madd1 = _mm256_madd_epi16(diff1, diff1); // add to the running totals *sum = _mm256_add_epi16(*sum, _mm256_add_epi16(diff0, diff1)); *sse = _mm256_add_epi32(*sse, _mm256_add_epi32(madd0, madd1)); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x440, %rsp # imm = 0x440 vmovaps %ymm0, 0x120(%rsp) vmovaps %ymm1, 0x100(%rsp) movq %rdi, 0xf8(%rsp) movq %rsi, 0xf0(%rsp) movw $0xff01, 0x21e(%rsp) # imm = 0xFF01 movw 0x21e(%rsp), %ax movw %ax, 0x42e(%rsp) movw %ax, 0x42c(%rsp) movw %ax, 0x42a(%rsp) movw %ax, 0x428(%rsp) movw %ax, 0x426(%rsp) movw %ax, 0x424(%rsp) movw %ax, 0x422(%rsp) movw %ax, 0x420(%rsp) movw %ax, 0x41e(%rsp) movw %ax, 0x41c(%rsp) movw %ax, 0x41a(%rsp) movw %ax, 0x418(%rsp) movw %ax, 0x416(%rsp) movw %ax, 0x414(%rsp) movw %ax, 0x412(%rsp) movw %ax, 0x410(%rsp) movzwl 0x410(%rsp), %eax vmovd %eax, %xmm0 movzwl 0x412(%rsp), %eax vpinsrw $0x1, %eax, %xmm0, %xmm0 movzwl 0x414(%rsp), %eax vpinsrw $0x2, %eax, %xmm0, %xmm0 movzwl 0x416(%rsp), %eax vpinsrw $0x3, %eax, %xmm0, %xmm0 movzwl 0x418(%rsp), %eax vpinsrw $0x4, %eax, %xmm0, %xmm0 movzwl 0x41a(%rsp), %eax vpinsrw $0x5, %eax, %xmm0, %xmm0 movzwl 0x41c(%rsp), %eax vpinsrw $0x6, %eax, %xmm0, %xmm0 movzwl 0x41e(%rsp), %eax vpinsrw $0x7, %eax, %xmm0, %xmm0 movzwl 0x420(%rsp), %eax vmovd %eax, %xmm1 movzwl 0x422(%rsp), %eax vpinsrw $0x1, %eax, %xmm1, %xmm1 movzwl 0x424(%rsp), %eax vpinsrw $0x2, %eax, %xmm1, %xmm1 movzwl 0x426(%rsp), %eax vpinsrw $0x3, %eax, %xmm1, %xmm1 movzwl 0x428(%rsp), %eax vpinsrw $0x4, %eax, %xmm1, %xmm1 movzwl 0x42a(%rsp), %eax vpinsrw $0x5, %eax, %xmm1, %xmm1 movzwl 0x42c(%rsp), %eax vpinsrw $0x6, %eax, %xmm1, %xmm1 movzwl 0x42e(%rsp), %eax vpinsrw $0x7, %eax, %xmm1, %xmm1 vmovdqa %xmm1, 0x3f0(%rsp) vmovdqa %xmm0, 0x3e0(%rsp) vmovaps 0x3e0(%rsp), %ymm0 vmovaps %ymm0, 0xc0(%rsp) vmovaps 0x120(%rsp), %ymm1 vmovaps 0x100(%rsp), %ymm0 vmovaps %ymm1, 0x1e0(%rsp) vmovaps %ymm0, 0x1c0(%rsp) vmovaps 0x1e0(%rsp), %ymm0 vmovaps 0x1c0(%rsp), %ymm1 vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] vmovaps %ymm0, 0xa0(%rsp) vmovaps 0x120(%rsp), %ymm1 vmovaps 0x100(%rsp), %ymm0 vmovaps %ymm1, 0x240(%rsp) vmovaps %ymm0, 0x220(%rsp) vmovaps 0x240(%rsp), %ymm0 vmovaps 0x220(%rsp), %ymm1 vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] vmovdqa %ymm0, 0x80(%rsp) vmovdqa 0xa0(%rsp), %ymm1 vmovdqa 0xc0(%rsp), %ymm0 vmovdqa %ymm1, 0x2c0(%rsp) vmovdqa %ymm0, 0x2a0(%rsp) vmovdqa 0x2c0(%rsp), %ymm0 vmovdqa 0x2a0(%rsp), %ymm1 vpmaddubsw %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0x60(%rsp) vmovdqa 0x80(%rsp), %ymm1 vmovdqa 0xc0(%rsp), %ymm0 vmovdqa %ymm1, 0x280(%rsp) vmovdqa %ymm0, 0x260(%rsp) vmovdqa 0x280(%rsp), %ymm0 vmovdqa 0x260(%rsp), %ymm1 vpmaddubsw %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0x40(%rsp) vmovdqa 0x60(%rsp), %ymm1 vmovdqa 0x60(%rsp), %ymm0 vmovdqa %ymm1, 0x340(%rsp) vmovdqa %ymm0, 0x320(%rsp) vmovdqa 0x340(%rsp), %ymm0 vmovdqa 0x320(%rsp), %ymm1 vpmaddwd %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, 0x20(%rsp) vmovdqa 0x40(%rsp), %ymm1 vmovdqa 0x40(%rsp), %ymm0 vmovdqa %ymm1, 0x300(%rsp) vmovdqa %ymm0, 0x2e0(%rsp) vmovdqa 0x300(%rsp), %ymm0 vmovdqa 0x2e0(%rsp), %ymm1 vpmaddwd %ymm1, %ymm0, %ymm0 vmovdqa %ymm0, (%rsp) movq 0xf0(%rsp), %rax vmovdqa (%rax), %ymm1 vmovdqa 0x60(%rsp), %ymm2 vmovdqa 0x40(%rsp), %ymm0 vmovdqa %ymm2, 0x3c0(%rsp) vmovdqa %ymm0, 0x3a0(%rsp) vmovdqa 0x3c0(%rsp), %ymm0 vmovdqa 0x3a0(%rsp), %ymm2 vpaddw %ymm2, %ymm0, %ymm0 vmovdqa %ymm1, 0x380(%rsp) vmovdqa %ymm0, 0x360(%rsp) vmovdqa 0x380(%rsp), %ymm0 vmovdqa 0x360(%rsp), %ymm1 vpaddw %ymm1, %ymm0, %ymm0 movq 0xf0(%rsp), %rax vmovdqa %ymm0, (%rax) movq 0xf8(%rsp), %rax vmovdqa (%rax), %ymm1 vmovdqa 0x20(%rsp), %ymm2 vmovdqa (%rsp), %ymm0 vmovdqa %ymm2, 0x1a0(%rsp) vmovdqa %ymm0, 0x180(%rsp) vmovdqa 0x1a0(%rsp), %ymm0 vmovdqa 0x180(%rsp), %ymm2 vpaddd %ymm2, %ymm0, %ymm0 vmovdqa %ymm1, 0x160(%rsp) vmovdqa %ymm0, 0x140(%rsp) vmovdqa 0x160(%rsp), %ymm0 vmovdqa 0x140(%rsp), %ymm1 vpaddd %ymm1, %ymm0, %ymm0 movq 0xf8(%rsp), %rax vmovdqa %ymm0, (%rax) movq %rbp, %rsp popq %rbp vzeroupper retq nop
/m-ab-s[P]aom/aom_dsp/x86/variance_avx2.c
constrain
static inline int constrain(int diff, int threshold, int damping) { if (!threshold) return 0; const int shift = AOMMAX(0, damping - get_msb(threshold)); return sign(diff) * AOMMIN(abs(diff), AOMMAX(0, threshold - (abs(diff) >> shift))); }
subq $0x48, %rsp movl %edi, 0x40(%rsp) movl %esi, 0x3c(%rsp) movl %edx, 0x38(%rsp) cmpl $0x0, 0x3c(%rsp) jne 0x5c2c84 movl $0x0, 0x44(%rsp) jmp 0x5c2db2 movl 0x38(%rsp), %eax movl %eax, 0x30(%rsp) movl 0x3c(%rsp), %edi callq 0x5c2e10 movl 0x30(%rsp), %ecx subl %eax, %ecx xorl %eax, %eax cmpl %ecx, %eax jle 0x5c2ca9 xorl %eax, %eax movl %eax, 0x2c(%rsp) jmp 0x5c2cc6 movl 0x38(%rsp), %eax movl %eax, 0x28(%rsp) movl 0x3c(%rsp), %edi callq 0x5c2e10 movl %eax, %ecx movl 0x28(%rsp), %eax subl %ecx, %eax movl %eax, 0x2c(%rsp) movl 0x2c(%rsp), %eax movl %eax, 0x34(%rsp) movl 0x40(%rsp), %edi callq 0x5c2e30 movl %eax, 0x1c(%rsp) movl 0x40(%rsp), %eax movl %eax, %ecx negl %ecx cmovnsl %ecx, %eax movl %eax, 0x20(%rsp) movl 0x3c(%rsp), %eax movl %eax, 0x24(%rsp) movl 0x40(%rsp), %eax movl %eax, %ecx negl %ecx cmovnsl %ecx, %eax movl 0x34(%rsp), %ecx sarl %cl, %eax movl 0x24(%rsp), %ecx subl %eax, %ecx xorl %eax, %eax cmpl %ecx, %eax jle 0x5c2d17 xorl %eax, %eax movl %eax, 0x18(%rsp) jmp 0x5c2d34 movl 0x3c(%rsp), %eax movl 0x40(%rsp), %edx movl %edx, %ecx negl %ecx cmovnsl %ecx, %edx movl 0x34(%rsp), %ecx sarl %cl, %edx movl %edx, %ecx subl %ecx, %eax movl %eax, 0x18(%rsp) movl 0x20(%rsp), %eax movl 0x18(%rsp), %ecx cmpl %ecx, %eax jge 0x5c2d51 movl 0x40(%rsp), %eax movl %eax, %ecx negl %ecx cmovnsl %ecx, %eax movl %eax, 0x14(%rsp) jmp 0x5c2da3 movl 0x3c(%rsp), %eax movl %eax, 0x10(%rsp) movl 0x40(%rsp), %eax movl %eax, %ecx negl %ecx cmovnsl %ecx, %eax movl 0x34(%rsp), %ecx sarl %cl, %eax movl 0x10(%rsp), %ecx subl %eax, %ecx xorl %eax, %eax cmpl %ecx, %eax jle 0x5c2d7e xorl %eax, %eax movl %eax, 0xc(%rsp) jmp 0x5c2d9b movl 0x3c(%rsp), %eax movl 0x40(%rsp), %edx movl %edx, %ecx negl %ecx cmovnsl %ecx, %edx movl 0x34(%rsp), %ecx sarl %cl, %edx movl %edx, %ecx subl %ecx, %eax movl %eax, 0xc(%rsp) movl 0xc(%rsp), %eax movl %eax, 0x14(%rsp) movl 0x1c(%rsp), %eax movl 0x14(%rsp), %ecx imull %ecx, %eax movl %eax, 0x44(%rsp) movl 0x44(%rsp), %eax addq $0x48, %rsp retq nopl (%rax,%rax)
/m-ab-s[P]aom/av1/common/cdef.h
loop_filter_rows
static void loop_filter_rows(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm, MACROBLOCKD *xd, int start, int stop, const int planes_to_lf[MAX_MB_PLANE], int lpf_opt_level) { // Filter top rows of all planes first, in case the output can be partially // reconstructed row by row. int mi_row, plane, dir; AV1_DEBLOCKING_PARAMETERS params_buf[MAX_MIB_SIZE]; TX_SIZE tx_buf[MAX_MIB_SIZE]; for (mi_row = start; mi_row < stop; mi_row += MAX_MIB_SIZE) { for (plane = 0; plane < MAX_MB_PLANE; ++plane) { if (skip_loop_filter_plane(planes_to_lf, plane, lpf_opt_level)) { continue; } for (dir = 0; dir < 2; ++dir) { av1_thread_loop_filter_rows(frame, cm, xd->plane, xd, mi_row, plane, dir, lpf_opt_level, /*lf_sync=*/NULL, xd->error_info, params_buf, tx_buf, MAX_MIB_SIZE_LOG2); } } } }
pushq %rbp pushq %r14 pushq %rbx subq $0x2b0, %rsp # imm = 0x2B0 movl 0x2d0(%rsp), %eax movq %rdi, 0x290(%rsp) movq %rsi, 0x288(%rsp) movq %rdx, 0x280(%rsp) movl %ecx, 0x27c(%rsp) movl %r8d, 0x278(%rsp) movq %r9, 0x270(%rsp) movl 0x27c(%rsp), %eax movl %eax, 0x26c(%rsp) movl 0x26c(%rsp), %eax cmpl 0x278(%rsp), %eax jge 0x5f1d98 movl $0x0, 0x268(%rsp) cmpl $0x3, 0x268(%rsp) jge 0x5f1d80 movq 0x270(%rsp), %rdx movl 0x268(%rsp), %ecx movl 0x2d0(%rsp), %eax movq %rdx, 0x2a0(%rsp) movl %ecx, 0x29c(%rsp) movl %eax, 0x298(%rsp) cmpl $0x2, 0x298(%rsp) jne 0x5f1c7c cmpl $0x0, 0x29c(%rsp) jne 0x5f1c24 movq 0x2a0(%rsp), %rax movslq 0x29c(%rsp), %rcx cmpl $0x0, (%rax,%rcx,4) setne %al xorb $-0x1, %al andb $0x1, %al movb %al, 0x2af(%rsp) jmp 0x5f1c9e cmpl $0x1, 0x29c(%rsp) jne 0x5f1c66 movq 0x2a0(%rsp), %rcx xorl %eax, %eax cmpl $0x0, 0x4(%rcx) movb %al, 0x3f(%rsp) jne 0x5f1c57 movq 0x2a0(%rsp), %rax cmpl $0x0, 0x8(%rax) setne %al xorb $-0x1, %al movb %al, 0x3f(%rsp) movb 0x3f(%rsp), %al andb $0x1, %al movb %al, 0x2af(%rsp) jmp 0x5f1c9e cmpl $0x2, 0x29c(%rsp) jne 0x5f1c7a movb $0x1, 0x2af(%rsp) jmp 0x5f1c9e jmp 0x5f1c7c movq 0x2a0(%rsp), %rax movslq 0x29c(%rsp), %rcx cmpl $0x0, (%rax,%rcx,4) setne %al xorb $-0x1, %al andb $0x1, %al movb %al, 0x2af(%rsp) testb $0x1, 0x2af(%rsp) jne 0x5f1caa jmp 0x5f1caf jmp 0x5f1d6a movl $0x0, 0x264(%rsp) cmpl $0x2, 0x264(%rsp) jge 0x5f1d68 movq 0x290(%rsp), %rdi movq 0x288(%rsp), %rsi movq 0x280(%rsp), %rdx addq $0x10, %rdx movq 0x280(%rsp), %rcx movl 0x26c(%rsp), %r8d movl 0x268(%rsp), %r9d movl 0x264(%rsp), %ebp movl 0x2d0(%rsp), %ebx movq 0x280(%rsp), %rax movq 0x29f0(%rax), %r11 leaq 0x60(%rsp), %r10 leaq 0x40(%rsp), %rax xorl %r14d, %r14d movl %ebp, (%rsp) movl %ebx, 0x8(%rsp) movq $0x0, 0x10(%rsp) movq %r11, 0x18(%rsp) movq %r10, 0x20(%rsp) movq %rax, 0x28(%rsp) movl $0x5, 0x30(%rsp) callq 0x5f1030 movl 0x264(%rsp), %eax addl $0x1, %eax movl %eax, 0x264(%rsp) jmp 0x5f1cba jmp 0x5f1d6a movl 0x268(%rsp), %eax addl $0x1, %eax movl %eax, 0x268(%rsp) jmp 0x5f1bae jmp 0x5f1d82 movl 0x26c(%rsp), %eax addl $0x20, %eax movl %eax, 0x26c(%rsp) jmp 0x5f1b8f addq $0x2b0, %rsp # imm = 0x2B0 popq %rbx popq %r14 popq %rbp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/common/thread_common.c
av1_loop_restoration_alloc
void av1_loop_restoration_alloc(AV1LrSync *lr_sync, AV1_COMMON *cm, int num_workers, int num_rows_lr, int num_planes, int width) { lr_sync->rows = num_rows_lr; lr_sync->num_planes = num_planes; #if CONFIG_MULTITHREAD { int i, j; for (j = 0; j < num_planes; j++) { CHECK_MEM_ERROR(cm, lr_sync->mutex_[j], aom_malloc(sizeof(*(lr_sync->mutex_[j])) * num_rows_lr)); if (lr_sync->mutex_[j]) { for (i = 0; i < num_rows_lr; ++i) { pthread_mutex_init(&lr_sync->mutex_[j][i], NULL); } } CHECK_MEM_ERROR(cm, lr_sync->cond_[j], aom_malloc(sizeof(*(lr_sync->cond_[j])) * num_rows_lr)); if (lr_sync->cond_[j]) { for (i = 0; i < num_rows_lr; ++i) { pthread_cond_init(&lr_sync->cond_[j][i], NULL); } } } CHECK_MEM_ERROR(cm, lr_sync->job_mutex, aom_malloc(sizeof(*(lr_sync->job_mutex)))); if (lr_sync->job_mutex) { pthread_mutex_init(lr_sync->job_mutex, NULL); } } #endif // CONFIG_MULTITHREAD CHECK_MEM_ERROR(cm, lr_sync->lrworkerdata, aom_calloc(num_workers, sizeof(*(lr_sync->lrworkerdata)))); lr_sync->num_workers = num_workers; for (int worker_idx = 0; worker_idx < num_workers; ++worker_idx) { if (worker_idx < num_workers - 1) { CHECK_MEM_ERROR(cm, lr_sync->lrworkerdata[worker_idx].rst_tmpbuf, (int32_t *)aom_memalign(16, RESTORATION_TMPBUF_SIZE)); CHECK_MEM_ERROR(cm, lr_sync->lrworkerdata[worker_idx].rlbs, aom_malloc(sizeof(RestorationLineBuffers))); } else { lr_sync->lrworkerdata[worker_idx].rst_tmpbuf = cm->rst_tmpbuf; lr_sync->lrworkerdata[worker_idx].rlbs = cm->rlbs; } } for (int j = 0; j < num_planes; j++) { CHECK_MEM_ERROR( cm, lr_sync->cur_sb_col[j], aom_malloc(sizeof(*(lr_sync->cur_sb_col[j])) * num_rows_lr)); } CHECK_MEM_ERROR( cm, lr_sync->job_queue, aom_malloc(sizeof(*(lr_sync->job_queue)) * num_rows_lr * num_planes)); // Set up nsync. lr_sync->sync_range = get_lr_sync_range(width); }
subq $0x38, %rsp movq %rdi, 0x30(%rsp) movq %rsi, 0x28(%rsp) movl %edx, 0x24(%rsp) movl %ecx, 0x20(%rsp) movl %r8d, 0x1c(%rsp) movl %r9d, 0x18(%rsp) movl 0x20(%rsp), %ecx movq 0x30(%rsp), %rax movl %ecx, 0x4c(%rax) movl 0x1c(%rsp), %ecx movq 0x30(%rsp), %rax movl %ecx, 0x50(%rax) movl $0x0, 0x10(%rsp) movl 0x10(%rsp), %eax cmpl 0x1c(%rsp), %eax jge 0x5f1f5e jmp 0x5f1e00 movslq 0x20(%rsp), %rax imulq $0x28, %rax, %rdi callq 0xa0d60 movq %rax, %rdx movq 0x30(%rsp), %rax movslq 0x10(%rsp), %rcx movq %rdx, (%rax,%rcx,8) movq 0x30(%rsp), %rax movslq 0x10(%rsp), %rcx cmpq $0x0, (%rax,%rcx,8) jne 0x5f1e4c movq 0x28(%rsp), %rax movq 0x30(%rax), %rdi movl $0x2, %esi leaq 0x52269f(%rip), %rdx # 0xb144e4 movb $0x0, %al callq 0x9e4e0 jmp 0x5f1e4e movq 0x30(%rsp), %rax movslq 0x10(%rsp), %rcx cmpq $0x0, (%rax,%rcx,8) je 0x5f1ea3 movl $0x0, 0x14(%rsp) movl 0x14(%rsp), %eax cmpl 0x20(%rsp), %eax jge 0x5f1ea1 movq 0x30(%rsp), %rax movslq 0x10(%rsp), %rcx movq (%rax,%rcx,8), %rdi movslq 0x14(%rsp), %rax imulq $0x28, %rax, %rax addq %rax, %rdi xorl %eax, %eax movl %eax, %esi callq 0x18730 movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0x5f1e67 jmp 0x5f1ea3 jmp 0x5f1ea5 movslq 0x20(%rsp), %rax imulq $0x30, %rax, %rdi callq 0xa0d60 movq %rax, %rdx movq 0x30(%rsp), %rax movslq 0x10(%rsp), %rcx movq %rdx, 0x18(%rax,%rcx,8) movq 0x30(%rsp), %rax movslq 0x10(%rsp), %rcx cmpq $0x0, 0x18(%rax,%rcx,8) jne 0x5f1ef3 movq 0x28(%rsp), %rax movq 0x30(%rax), %rdi movl $0x2, %esi leaq 0x52261e(%rip), %rdx # 0xb1450a movb $0x0, %al callq 0x9e4e0 jmp 0x5f1ef5 movq 0x30(%rsp), %rax movslq 0x10(%rsp), %rcx cmpq $0x0, 0x18(%rax,%rcx,8) je 0x5f1f4c movl $0x0, 0x14(%rsp) movl 0x14(%rsp), %eax cmpl 0x20(%rsp), %eax jge 0x5f1f4a movq 0x30(%rsp), %rax movslq 0x10(%rsp), %rcx movq 0x18(%rax,%rcx,8), %rdi movslq 0x14(%rsp), %rax imulq $0x30, %rax, %rax addq %rax, %rdi xorl %eax, %eax movl %eax, %esi callq 0x18580 movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0x5f1f0f jmp 0x5f1f4c jmp 0x5f1f4e movl 0x10(%rsp), %eax addl $0x1, %eax movl %eax, 0x10(%rsp) jmp 0x5f1df0 jmp 0x5f1f60 movl $0x28, %edi callq 0xa0d60 movq %rax, %rcx movq 0x30(%rsp), %rax movq %rcx, 0x58(%rax) movq 0x30(%rsp), %rax cmpq $0x0, 0x58(%rax) jne 0x5f1f9e movq 0x28(%rsp), %rax movq 0x30(%rax), %rdi movl $0x2, %esi leaq 0x522598(%rip), %rdx # 0xb1452f movb $0x0, %al callq 0x9e4e0 jmp 0x5f1fa0 movq 0x30(%rsp), %rax cmpq $0x0, 0x58(%rax) je 0x5f1fbe movq 0x30(%rsp), %rax movq 0x58(%rax), %rdi xorl %eax, %eax movl %eax, %esi callq 0x18730 jmp 0x5f1fc0 movslq 0x24(%rsp), %rdi movl $0x1c0, %esi # imm = 0x1C0 callq 0xa0d80 movq %rax, %rcx movq 0x30(%rsp), %rax movq %rcx, 0x60(%rax) movq 0x30(%rsp), %rax cmpq $0x0, 0x60(%rax) jne 0x5f2003 movq 0x28(%rsp), %rax movq 0x30(%rax), %rdi movl $0x2, %esi leaq 0x522559(%rip), %rdx # 0xb14555 movb $0x0, %al callq 0x9e4e0 jmp 0x5f2005 movl 0x24(%rsp), %ecx movq 0x30(%rsp), %rax movl %ecx, 0x54(%rax) movl $0x0, 0xc(%rsp) movl 0xc(%rsp), %eax cmpl 0x24(%rsp), %eax jge 0x5f2170 movl 0xc(%rsp), %eax movl 0x24(%rsp), %ecx subl $0x1, %ecx cmpl %ecx, %eax jge 0x5f210f jmp 0x5f203c movl $0x10, %edi movl $0x13b9a0, %esi # imm = 0x13B9A0 callq 0xa0bc0 movq %rax, %rcx movq 0x30(%rsp), %rax movq 0x60(%rax), %rax movslq 0xc(%rsp), %rdx imulq $0x1c0, %rdx, %rdx # imm = 0x1C0 addq %rdx, %rax movq %rcx, (%rax) movq 0x30(%rsp), %rax movq 0x60(%rax), %rax movslq 0xc(%rsp), %rcx imulq $0x1c0, %rcx, %rcx # imm = 0x1C0 addq %rcx, %rax cmpq $0x0, (%rax) jne 0x5f20a3 movq 0x28(%rsp), %rax movq 0x30(%rax), %rdi movl $0x2, %esi leaq 0x5224e2(%rip), %rdx # 0xb1457e movb $0x0, %al callq 0x9e4e0 jmp 0x5f20a5 jmp 0x5f20a7 movl $0x1260, %edi # imm = 0x1260 callq 0xa0d60 movq %rax, %rcx movq 0x30(%rsp), %rax movq 0x60(%rax), %rax movslq 0xc(%rsp), %rdx imulq $0x1c0, %rdx, %rdx # imm = 0x1C0 addq %rdx, %rax movq %rcx, 0x8(%rax) movq 0x30(%rsp), %rax movq 0x60(%rax), %rax movslq 0xc(%rsp), %rcx imulq $0x1c0, %rcx, %rcx # imm = 0x1C0 addq %rcx, %rax cmpq $0x0, 0x8(%rax) jne 0x5f210b movq 0x28(%rsp), %rax movq 0x30(%rax), %rdi movl $0x2, %esi leaq 0x5224ba(%rip), %rdx # 0xb145be movb $0x0, %al callq 0x9e4e0 jmp 0x5f210d jmp 0x5f215e movq 0x28(%rsp), %rax movq 0x5ae0(%rax), %rcx movq 0x30(%rsp), %rax movq 0x60(%rax), %rax movslq 0xc(%rsp), %rdx imulq $0x1c0, %rdx, %rdx # imm = 0x1C0 addq %rdx, %rax movq %rcx, (%rax) movq 0x28(%rsp), %rax movq 0x5ae8(%rax), %rcx movq 0x30(%rsp), %rax movq 0x60(%rax), %rax movslq 0xc(%rsp), %rdx imulq $0x1c0, %rdx, %rdx # imm = 0x1C0 addq %rdx, %rax movq %rcx, 0x8(%rax) jmp 0x5f2160 movl 0xc(%rsp), %eax addl $0x1, %eax movl %eax, 0xc(%rsp) jmp 0x5f2019 movl $0x0, 0x8(%rsp) movl 0x8(%rsp), %eax cmpl 0x1c(%rsp), %eax jge 0x5f21e3 jmp 0x5f2184 movslq 0x20(%rsp), %rdi shlq $0x2, %rdi callq 0xa0d60 movq %rax, %rdx movq 0x30(%rsp), %rax movslq 0x8(%rsp), %rcx movq %rdx, 0x30(%rax,%rcx,8) movq 0x30(%rsp), %rax movslq 0x8(%rsp), %rcx cmpq $0x0, 0x30(%rax,%rcx,8) jne 0x5f21d2 movq 0x28(%rsp), %rax movq 0x30(%rax), %rdi movl $0x2, %esi leaq 0x52242d(%rip), %rdx # 0xb145f8 movb $0x0, %al callq 0x9e4e0 jmp 0x5f21d4 jmp 0x5f21d6 movl 0x8(%rsp), %eax addl $0x1, %eax movl %eax, 0x8(%rsp) jmp 0x5f2178 jmp 0x5f21e5 movslq 0x20(%rsp), %rax imulq $0x1c, %rax, %rdi movslq 0x1c(%rsp), %rax imulq %rax, %rdi callq 0xa0d60 movq %rax, %rcx movq 0x30(%rsp), %rax movq %rcx, 0x68(%rax) movq 0x30(%rsp), %rax cmpq $0x0, 0x68(%rax) jne 0x5f2230 movq 0x28(%rsp), %rax movq 0x30(%rax), %rdi movl $0x2, %esi leaq 0x5223f9(%rip), %rdx # 0xb14622 movb $0x0, %al callq 0x9e4e0 jmp 0x5f2232 movl 0x18(%rsp), %edi callq 0x5f2250 movl %eax, %ecx movq 0x30(%rsp), %rax movl %ecx, 0x48(%rax) addq $0x38, %rsp retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/common/thread_common.c
av1_cdef_init_fb_row_mt
void av1_cdef_init_fb_row_mt(const AV1_COMMON *const cm, const MACROBLOCKD *const xd, CdefBlockInfo *const fb_info, uint16_t **const linebuf, uint16_t *const src, struct AV1CdefSyncData *const cdef_sync, int fbr) { const int num_planes = av1_num_planes(cm); const int nvfb = (cm->mi_params.mi_rows + MI_SIZE_64X64 - 1) / MI_SIZE_64X64; const int luma_stride = ALIGN_POWER_OF_TWO(cm->mi_params.mi_cols << MI_SIZE_LOG2, 4); // for the current filter block, it's top left corner mi structure (mi_tl) // is first accessed to check whether the top and left boundaries are // frame boundaries. Then bottom-left and top-right mi structures are // accessed to check whether the bottom and right boundaries // (respectively) are frame boundaries. // // Note that we can't just check the bottom-right mi structure - eg. if // we're at the right-hand edge of the frame but not the bottom, then // the bottom-right mi is NULL but the bottom-left is not. fb_info->frame_boundary[TOP] = (MI_SIZE_64X64 * fbr == 0) ? 1 : 0; if (fbr != nvfb - 1) fb_info->frame_boundary[BOTTOM] = (MI_SIZE_64X64 * (fbr + 1) == cm->mi_params.mi_rows) ? 1 : 0; else fb_info->frame_boundary[BOTTOM] = 1; fb_info->src = src; fb_info->damping = cm->cdef_info.cdef_damping; fb_info->coeff_shift = AOMMAX(cm->seq_params->bit_depth - 8, 0); av1_zero(fb_info->dir); av1_zero(fb_info->var); for (int plane = 0; plane < num_planes; plane++) { const int stride = luma_stride >> xd->plane[plane].subsampling_x; uint16_t *top_linebuf = &linebuf[plane][0]; uint16_t *bot_linebuf = &linebuf[plane][nvfb * CDEF_VBORDER * stride]; { const int mi_high_l2 = MI_SIZE_LOG2 - xd->plane[plane].subsampling_y; const int top_offset = MI_SIZE_64X64 * (fbr + 1) << mi_high_l2; const int bot_offset = MI_SIZE_64X64 * (fbr + 1) << mi_high_l2; if (fbr != nvfb - 1) // if (fbr != 0) // top line buffer copy av1_cdef_copy_sb8_16( cm, &top_linebuf[(fbr + 1) * CDEF_VBORDER * stride], stride, xd->plane[plane].dst.buf, top_offset - CDEF_VBORDER, 0, xd->plane[plane].dst.stride, CDEF_VBORDER, stride); if (fbr != nvfb - 1) // bottom line buffer copy av1_cdef_copy_sb8_16(cm, &bot_linebuf[fbr * CDEF_VBORDER * stride], stride, xd->plane[plane].dst.buf, bot_offset, 0, xd->plane[plane].dst.stride, CDEF_VBORDER, stride); } fb_info->top_linebuf[plane] = &linebuf[plane][fbr * CDEF_VBORDER * stride]; fb_info->bot_linebuf[plane] = &linebuf[plane] [nvfb * CDEF_VBORDER * stride + (fbr * CDEF_VBORDER * stride)]; } cdef_row_mt_sync_write(cdef_sync, fbr); cdef_row_mt_sync_read(cdef_sync, fbr); }
subq $0x88, %rsp movl 0x90(%rsp), %eax movq %rdi, 0x80(%rsp) movq %rsi, 0x78(%rsp) movq %rdx, 0x70(%rsp) movq %rcx, 0x68(%rsp) movq %r8, 0x60(%rsp) movq %r9, 0x58(%rsp) movq 0x80(%rsp), %rdi callq 0x5f24f0 movl %eax, 0x54(%rsp) movq 0x80(%rsp), %rax movl 0x214(%rax), %eax addl $0x10, %eax subl $0x1, %eax movl $0x10, %ecx cltd idivl %ecx movl %eax, 0x50(%rsp) movq 0x80(%rsp), %rax movl 0x218(%rax), %eax shll $0x2, %eax addl $0xf, %eax andl $-0x10, %eax movl %eax, 0x4c(%rsp) movl 0x90(%rsp), %edx shll $0x4, %edx xorl %ecx, %ecx movl $0x1, %eax cmpl $0x0, %edx cmovel %eax, %ecx movq 0x70(%rsp), %rax movl %ecx, 0x250(%rax) movl 0x90(%rsp), %eax movl 0x50(%rsp), %ecx subl $0x1, %ecx cmpl %ecx, %eax je 0x5f28e3 movl 0x90(%rsp), %edx addl $0x1, %edx shll $0x4, %edx movq 0x80(%rsp), %rax movl 0x214(%rax), %esi xorl %ecx, %ecx movl $0x1, %eax cmpl %esi, %edx cmovel %eax, %ecx movq 0x70(%rsp), %rax movl %ecx, 0x258(%rax) jmp 0x5f28f2 movq 0x70(%rsp), %rax movl $0x1, 0x258(%rax) movq 0x60(%rsp), %rcx movq 0x70(%rsp), %rax movq %rcx, (%rax) movq 0x80(%rsp), %rax movl 0x5c30(%rax), %ecx movq 0x70(%rsp), %rax movl %ecx, 0x260(%rax) movq 0x80(%rsp), %rax movq 0x6088(%rax), %rax movl 0x48(%rax), %eax subl $0x8, %eax cmpl $0x0, %eax jbe 0x5f294d movq 0x80(%rsp), %rax movq 0x6088(%rax), %rax movl 0x48(%rax), %eax subl $0x8, %eax movl %eax, 0x20(%rsp) jmp 0x5f2955 xorl %eax, %eax movl %eax, 0x20(%rsp) jmp 0x5f2955 movl 0x20(%rsp), %ecx movq 0x70(%rsp), %rax movl %ecx, 0x264(%rax) movq 0x70(%rsp), %rdi addq $0x274, %rdi # imm = 0x274 xorl %esi, %esi movl $0x400, %edx # imm = 0x400 callq 0x18280 movq 0x70(%rsp), %rdi addq $0x674, %rdi # imm = 0x674 xorl %esi, %esi movl $0x400, %edx # imm = 0x400 callq 0x18280 movl $0x0, 0x48(%rsp) movl 0x48(%rsp), %eax cmpl 0x54(%rsp), %eax jge 0x5f2c11 movl 0x4c(%rsp), %eax movq 0x78(%rsp), %rcx addq $0x10, %rcx movslq 0x48(%rsp), %rdx imulq $0xa30, %rdx, %rdx # imm = 0xA30 addq %rdx, %rcx movl 0x4(%rcx), %ecx sarl %cl, %eax movl %eax, 0x44(%rsp) movq 0x68(%rsp), %rax movslq 0x48(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x38(%rsp) movq 0x68(%rsp), %rax movslq 0x48(%rsp), %rcx movq (%rax,%rcx,8), %rax movl 0x50(%rsp), %ecx shll %ecx imull 0x44(%rsp), %ecx movslq %ecx, %rcx shlq %rcx addq %rcx, %rax movq %rax, 0x30(%rsp) movq 0x78(%rsp), %rcx addq $0x10, %rcx movslq 0x48(%rsp), %rax imulq $0xa30, %rax, %rax # imm = 0xA30 addq %rax, %rcx movl $0x2, %eax subl 0x8(%rcx), %eax movl %eax, 0x2c(%rsp) movl 0x90(%rsp), %eax addl $0x1, %eax shll $0x4, %eax movl 0x2c(%rsp), %ecx shll %cl, %eax movl %eax, 0x28(%rsp) movl 0x90(%rsp), %eax addl $0x1, %eax shll $0x4, %eax movl 0x2c(%rsp), %ecx shll %cl, %eax movl %eax, 0x24(%rsp) movl 0x90(%rsp), %eax movl 0x50(%rsp), %ecx subl $0x1, %ecx cmpl %ecx, %eax je 0x5f2af8 movq 0x80(%rsp), %rdi movq 0x38(%rsp), %rsi movl 0x90(%rsp), %eax addl $0x1, %eax shll %eax imull 0x44(%rsp), %eax cltq shlq %rax addq %rax, %rsi movl 0x44(%rsp), %edx movq 0x78(%rsp), %rax addq $0x10, %rax movslq 0x48(%rsp), %rcx imulq $0xa30, %rcx, %rcx # imm = 0xA30 addq %rcx, %rax movq 0x10(%rax), %rcx movl 0x28(%rsp), %r8d subl $0x2, %r8d movq 0x78(%rsp), %rax addq $0x10, %rax movslq 0x48(%rsp), %r9 imulq $0xa30, %r9, %r9 # imm = 0xA30 addq %r9, %rax movl 0x28(%rax), %r10d movl 0x44(%rsp), %eax xorl %r9d, %r9d movl %r10d, (%rsp) movl $0x2, 0x8(%rsp) movl %eax, 0x10(%rsp) callq 0x5bf330 movl 0x90(%rsp), %eax movl 0x50(%rsp), %ecx subl $0x1, %ecx cmpl %ecx, %eax je 0x5f2b8e movq 0x80(%rsp), %rdi movq 0x30(%rsp), %rsi movl 0x90(%rsp), %eax shll %eax imull 0x44(%rsp), %eax cltq shlq %rax addq %rax, %rsi movl 0x44(%rsp), %edx movq 0x78(%rsp), %rax addq $0x10, %rax movslq 0x48(%rsp), %rcx imulq $0xa30, %rcx, %rcx # imm = 0xA30 addq %rcx, %rax movq 0x10(%rax), %rcx movl 0x24(%rsp), %r8d movq 0x78(%rsp), %rax addq $0x10, %rax movslq 0x48(%rsp), %r9 imulq $0xa30, %r9, %r9 # imm = 0xA30 addq %r9, %rax movl 0x28(%rax), %r10d movl 0x44(%rsp), %eax xorl %r9d, %r9d movl %r10d, (%rsp) movl $0x2, 0x8(%rsp) movl %eax, 0x10(%rsp) callq 0x5bf330 movq 0x68(%rsp), %rax movslq 0x48(%rsp), %rcx movq (%rax,%rcx,8), %rdx movl 0x90(%rsp), %eax shll %eax imull 0x44(%rsp), %eax cltq shlq %rax addq %rax, %rdx movq 0x70(%rsp), %rax movslq 0x48(%rsp), %rcx movq %rdx, 0x8(%rax,%rcx,8) movq 0x68(%rsp), %rax movslq 0x48(%rsp), %rcx movq (%rax,%rcx,8), %rdx movl 0x50(%rsp), %eax shll %eax imull 0x44(%rsp), %eax movl 0x90(%rsp), %ecx shll %ecx imull 0x44(%rsp), %ecx addl %ecx, %eax cltq shlq %rax addq %rax, %rdx movq 0x70(%rsp), %rax movslq 0x48(%rsp), %rcx movq %rdx, 0x20(%rax,%rcx,8) movl 0x48(%rsp), %eax addl $0x1, %eax movl %eax, 0x48(%rsp) jmp 0x5f299c movq 0x58(%rsp), %rdi movl 0x90(%rsp), %esi callq 0x5f2c40 movq 0x58(%rsp), %rdi movl 0x90(%rsp), %esi callq 0x5f2cc0 addq $0x88, %rsp retq nopl (%rax,%rax)
/m-ab-s[P]aom/av1/common/thread_common.c
set_segment_id
static inline void set_segment_id(uint8_t *segment_ids, int mi_offset, int x_mis, int y_mis, int mi_stride, uint8_t segment_id) { segment_ids += mi_offset; for (int y = 0; y < y_mis; ++y) { memset(&segment_ids[y * mi_stride], segment_id, x_mis * sizeof(segment_ids[0])); } }
subq $0x28, %rsp movb %r9b, %al movq %rdi, 0x20(%rsp) movl %esi, 0x1c(%rsp) movl %edx, 0x18(%rsp) movl %ecx, 0x14(%rsp) movl %r8d, 0x10(%rsp) movb %al, 0xf(%rsp) movl 0x1c(%rsp), %ecx movq 0x20(%rsp), %rax movslq %ecx, %rcx addq %rcx, %rax movq %rax, 0x20(%rsp) movl $0x0, 0x8(%rsp) movl 0x8(%rsp), %eax cmpl 0x14(%rsp), %eax jge 0x5fb3ed movq 0x20(%rsp), %rdi movl 0x8(%rsp), %eax imull 0x10(%rsp), %eax cltq addq %rax, %rdi movzbl 0xf(%rsp), %eax movslq 0x18(%rsp), %rdx shlq $0x0, %rdx movzbl %al, %esi callq 0x18280 movl 0x8(%rsp), %eax addl $0x1, %eax movl %eax, 0x8(%rsp) jmp 0x5fb3ad addq $0x28, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax)
/m-ab-s[P]aom/av1/common/seg_common.h
av1_vaq_frame_setup
void av1_vaq_frame_setup(AV1_COMP *cpi) { AV1_COMMON *cm = &cpi->common; const RefreshFrameInfo *const refresh_frame = &cpi->refresh_frame; const int base_qindex = cm->quant_params.base_qindex; struct segmentation *seg = &cm->seg; int i; int resolution_change = cm->prev_frame && (cm->width != cm->prev_frame->width || cm->height != cm->prev_frame->height); int avg_energy = (int)(cpi->twopass_frame.mb_av_energy - 2); double avg_ratio; if (avg_energy > 7) avg_energy = 7; if (avg_energy < 0) avg_energy = 0; avg_ratio = rate_ratio[avg_energy]; if (resolution_change) { memset(cpi->enc_seg.map, 0, cm->mi_params.mi_rows * cm->mi_params.mi_cols); av1_clearall_segfeatures(seg); av1_disable_segmentation(seg); return; } if (frame_is_intra_only(cm) || cm->features.error_resilient_mode || refresh_frame->alt_ref_frame || (refresh_frame->golden_frame && !cpi->rc.is_src_frame_alt_ref)) { cpi->vaq_refresh = 1; av1_enable_segmentation(seg); av1_clearall_segfeatures(seg); for (i = 0; i < MAX_SEGMENTS; ++i) { // Set up avg segment id to be 1.0 and adjust the other segments around // it. int qindex_delta = av1_compute_qdelta_by_rate(cpi, cm->current_frame.frame_type, base_qindex, rate_ratio[i] / avg_ratio); // We don't allow qindex 0 in a segment if the base value is not 0. // Q index 0 (lossless) implies 4x4 encoding only and in AQ mode a segment // Q delta is sometimes applied without going back around the rd loop. // This could lead to an illegal combination of partition size and q. if ((base_qindex != 0) && ((base_qindex + qindex_delta) == 0)) { qindex_delta = -base_qindex + 1; } av1_set_segdata(seg, i, SEG_LVL_ALT_Q, qindex_delta); av1_enable_segfeature(seg, i, SEG_LVL_ALT_Q); } } }
subq $0x48, %rsp movq %rdi, 0x40(%rsp) movq 0x40(%rsp), %rax addq $0x3bf80, %rax # imm = 0x3BF80 movq %rax, 0x38(%rsp) movq 0x40(%rsp), %rax addq $0x42d78, %rax # imm = 0x42D78 movq %rax, 0x30(%rsp) movq 0x38(%rsp), %rax movl 0x268(%rax), %eax movl %eax, 0x2c(%rsp) movq 0x38(%rsp), %rax addq $0x4a38, %rax # imm = 0x4A38 movq %rax, 0x20(%rsp) movq 0x38(%rsp), %rcx xorl %eax, %eax cmpq $0x0, 0xe0(%rcx) movb %al, 0x3(%rsp) je 0x5fb4a8 movq 0x38(%rsp), %rax movl 0x38(%rax), %ecx movq 0x38(%rsp), %rax movq 0xe0(%rax), %rdx movb $0x1, %al cmpl 0x10c(%rdx), %ecx movb %al, 0x2(%rsp) jne 0x5fb4a0 movq 0x38(%rsp), %rax movl 0x3c(%rax), %eax movq 0x38(%rsp), %rcx movq 0xe0(%rcx), %rcx cmpl 0x110(%rcx), %eax setne %al movb %al, 0x2(%rsp) movb 0x2(%rsp), %al movb %al, 0x3(%rsp) movb 0x3(%rsp), %al andb $0x1, %al movzbl %al, %eax movl %eax, 0x18(%rsp) movq 0x40(%rsp), %rax movsd 0x9d628(%rax), %xmm0 movsd 0x486e6e(%rip), %xmm1 # 0xa82338 subsd %xmm1, %xmm0 cvttsd2si %xmm0, %eax movl %eax, 0x14(%rsp) cmpl $0x7, 0x14(%rsp) jle 0x5fb4e5 movl $0x7, 0x14(%rsp) cmpl $0x0, 0x14(%rsp) jge 0x5fb4f4 movl $0x0, 0x14(%rsp) movslq 0x14(%rsp), %rcx leaq 0x51b780(%rip), %rax # 0xb16c80 movsd (%rax,%rcx,8), %xmm0 movsd %xmm0, 0x8(%rsp) cmpl $0x0, 0x18(%rsp) je 0x5fb558 movq 0x40(%rsp), %rax movq 0x71298(%rax), %rdi movq 0x38(%rsp), %rax movl 0x214(%rax), %eax movq 0x38(%rsp), %rcx imull 0x218(%rcx), %eax movslq %eax, %rdx xorl %esi, %esi callq 0x18280 movq 0x20(%rsp), %rdi callq 0x5f0880 movq 0x20(%rsp), %rdi callq 0x209dd0 jmp 0x5fb660 movq 0x38(%rsp), %rdi callq 0x5fb670 cmpl $0x0, %eax jne 0x5fb5a0 movq 0x38(%rsp), %rax testb $0x1, 0x1f6(%rax) jne 0x5fb5a0 movq 0x30(%rsp), %rax testb $0x1, 0x2(%rax) jne 0x5fb5a0 movq 0x30(%rsp), %rax testb $0x1, (%rax) je 0x5fb660 movq 0x40(%rsp), %rax cmpl $0x0, 0x607a8(%rax) jne 0x5fb660 movq 0x40(%rsp), %rax movl $0x1, 0x81330(%rax) movq 0x20(%rsp), %rdi callq 0x209da0 movq 0x20(%rsp), %rdi callq 0x5f0880 movl $0x0, 0x1c(%rsp) cmpl $0x8, 0x1c(%rsp) jge 0x5fb65e movq 0x40(%rsp), %rdi movq 0x38(%rsp), %rax movb (%rax), %al movl 0x2c(%rsp), %edx movslq 0x1c(%rsp), %rsi leaq 0x51b68e(%rip), %rcx # 0xb16c80 movsd (%rcx,%rsi,8), %xmm0 divsd 0x8(%rsp), %xmm0 movzbl %al, %esi callq 0x1c89c0 movl %eax, 0x4(%rsp) cmpl $0x0, 0x2c(%rsp) je 0x5fb62a movl 0x2c(%rsp), %eax addl 0x4(%rsp), %eax cmpl $0x0, %eax jne 0x5fb62a xorl %eax, %eax subl 0x2c(%rsp), %eax addl $0x1, %eax movl %eax, 0x4(%rsp) movq 0x20(%rsp), %rdi movl 0x1c(%rsp), %esi movl 0x4(%rsp), %ecx xorl %edx, %edx callq 0x5f0a00 movq 0x20(%rsp), %rdi movl 0x1c(%rsp), %esi xorl %edx, %edx callq 0x5f0980 movl 0x1c(%rsp), %eax addl $0x1, %eax movl %eax, 0x1c(%rsp) jmp 0x5fb5cb jmp 0x5fb660 addq $0x48, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/aq_variance.c
av1_log_block_avg
int av1_log_block_avg(const AV1_COMP *cpi, const MACROBLOCK *x, BLOCK_SIZE bs, int mi_row, int mi_col) { // This functions returns the block average of luma block unsigned int sum, avg, num_pix; int r, c; const int pic_w = cpi->common.width; const int pic_h = cpi->common.height; const int bw = MI_SIZE * mi_size_wide[bs]; const int bh = MI_SIZE * mi_size_high[bs]; const uint16_t *x16 = CONVERT_TO_SHORTPTR(x->plane[0].src.buf); sum = 0; num_pix = 0; avg = 0; int row = mi_row << MI_SIZE_LOG2; int col = mi_col << MI_SIZE_LOG2; for (r = row; (r < (row + bh)) && (r < pic_h); r++) { for (c = col; (c < (col + bw)) && (c < pic_w); c++) { sum += *(x16 + r * x->plane[0].src.stride + c); num_pix++; } } if (num_pix != 0) { avg = sum / num_pix; } return avg; }
movb %dl, %al movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movb %al, -0x11(%rsp) movl %ecx, -0x18(%rsp) movl %r8d, -0x1c(%rsp) movq -0x8(%rsp), %rax movl 0x3bfb8(%rax), %eax movl %eax, -0x34(%rsp) movq -0x8(%rsp), %rax movl 0x3bfbc(%rax), %eax movl %eax, -0x38(%rsp) movzbl -0x11(%rsp), %eax movl %eax, %ecx leaq 0x51b5cb(%rip), %rax # 0xb16cc0 movzbl (%rax,%rcx), %eax shll $0x2, %eax movl %eax, -0x3c(%rsp) movzbl -0x11(%rsp), %eax movl %eax, %ecx leaq 0x51b5d2(%rip), %rax # 0xb16ce0 movzbl (%rax,%rcx), %eax shll $0x2, %eax movl %eax, -0x40(%rsp) movq -0x10(%rsp), %rax movq 0x30(%rax), %rax shlq %rax movq %rax, -0x48(%rsp) movl $0x0, -0x20(%rsp) movl $0x0, -0x28(%rsp) movl $0x0, -0x24(%rsp) movl -0x18(%rsp), %eax shll $0x2, %eax movl %eax, -0x4c(%rsp) movl -0x1c(%rsp), %eax shll $0x2, %eax movl %eax, -0x50(%rsp) movl -0x4c(%rsp), %eax movl %eax, -0x2c(%rsp) movl -0x2c(%rsp), %ecx movl -0x4c(%rsp), %edx addl -0x40(%rsp), %edx xorl %eax, %eax cmpl %edx, %ecx movb %al, -0x51(%rsp) jge 0x5fb785 movl -0x2c(%rsp), %eax cmpl -0x38(%rsp), %eax setl %al movb %al, -0x51(%rsp) movb -0x51(%rsp), %al testb $0x1, %al jne 0x5fb792 jmp 0x5fb81f movl -0x50(%rsp), %eax movl %eax, -0x30(%rsp) movl -0x30(%rsp), %ecx movl -0x50(%rsp), %edx addl -0x3c(%rsp), %edx xorl %eax, %eax cmpl %edx, %ecx movb %al, -0x52(%rsp) jge 0x5fb7bf movl -0x30(%rsp), %eax cmpl -0x34(%rsp), %eax setl %al movb %al, -0x52(%rsp) movb -0x52(%rsp), %al testb $0x1, %al jne 0x5fb7c9 jmp 0x5fb80d movq -0x48(%rsp), %rax movl -0x2c(%rsp), %ecx movq -0x10(%rsp), %rdx imull 0x48(%rdx), %ecx movslq %ecx, %rcx shlq %rcx addq %rcx, %rax movslq -0x30(%rsp), %rcx movzwl (%rax,%rcx,2), %eax addl -0x20(%rsp), %eax movl %eax, -0x20(%rsp) movl -0x28(%rsp), %eax addl $0x1, %eax movl %eax, -0x28(%rsp) movl -0x30(%rsp), %eax addl $0x1, %eax movl %eax, -0x30(%rsp) jmp 0x5fb79a jmp 0x5fb80f movl -0x2c(%rsp), %eax addl $0x1, %eax movl %eax, -0x2c(%rsp) jmp 0x5fb760 cmpl $0x0, -0x28(%rsp) je 0x5fb834 movl -0x20(%rsp), %eax xorl %edx, %edx divl -0x28(%rsp) movl %eax, -0x24(%rsp) movl -0x24(%rsp), %eax retq nopl (%rax)
/m-ab-s[P]aom/av1/encoder/aq_variance.c
save_comp_rd_search_stat
static inline void save_comp_rd_search_stat( MACROBLOCK *x, const MB_MODE_INFO *const mbmi, const int32_t *comp_rate, const int64_t *comp_dist, const int32_t *comp_model_rate, const int64_t *comp_model_dist, const int_mv *cur_mv, const int *comp_rs2) { const int offset = x->comp_rd_stats_idx; if (offset < MAX_COMP_RD_STATS) { COMP_RD_STATS *const rd_stats = x->comp_rd_stats + offset; memcpy(rd_stats->rate, comp_rate, sizeof(rd_stats->rate)); memcpy(rd_stats->dist, comp_dist, sizeof(rd_stats->dist)); memcpy(rd_stats->model_rate, comp_model_rate, sizeof(rd_stats->model_rate)); memcpy(rd_stats->model_dist, comp_model_dist, sizeof(rd_stats->model_dist)); memcpy(rd_stats->comp_rs2, comp_rs2, sizeof(rd_stats->comp_rs2)); memcpy(rd_stats->mv, cur_mv, sizeof(rd_stats->mv)); memcpy(rd_stats->ref_frames, mbmi->ref_frame, sizeof(rd_stats->ref_frames)); rd_stats->mode = mbmi->mode; rd_stats->filter = mbmi->interp_filters; rd_stats->ref_mv_idx = mbmi->ref_mv_idx; const MACROBLOCKD *const xd = &x->e_mbd; for (int i = 0; i < 2; ++i) { const WarpedMotionParams *const wm = &xd->global_motion[mbmi->ref_frame[i]]; rd_stats->is_global[i] = is_global_mv_block(mbmi, wm->wmtype); } memcpy(&rd_stats->interinter_comp, &mbmi->interinter_comp, sizeof(rd_stats->interinter_comp)); ++x->comp_rd_stats_idx; } }
subq $0x58, %rsp movq 0x68(%rsp), %rax movq 0x60(%rsp), %rax movq %rdi, 0x50(%rsp) movq %rsi, 0x48(%rsp) movq %rdx, 0x40(%rsp) movq %rcx, 0x38(%rsp) movq %r8, 0x30(%rsp) movq %r9, 0x28(%rsp) movq 0x50(%rsp), %rax movl 0x1ee50(%rax), %eax movl %eax, 0x24(%rsp) cmpl $0x40, 0x24(%rsp) jge 0x607d62 movq 0x50(%rsp), %rax addq $0x1c650, %rax # imm = 0x1C650 movslq 0x24(%rsp), %rcx imulq $0xa0, %rcx, %rcx addq %rcx, %rax movq %rax, 0x18(%rsp) movq 0x18(%rsp), %rax movq 0x40(%rsp), %rcx movq (%rcx), %rdx movq %rdx, (%rax) movq 0x8(%rcx), %rcx movq %rcx, 0x8(%rax) movq 0x18(%rsp), %rax movq 0x38(%rsp), %rcx movq (%rcx), %rdx movq %rdx, 0x10(%rax) movq 0x8(%rcx), %rdx movq %rdx, 0x18(%rax) movq 0x10(%rcx), %rdx movq %rdx, 0x20(%rax) movq 0x18(%rcx), %rcx movq %rcx, 0x28(%rax) movq 0x18(%rsp), %rax movq 0x30(%rsp), %rcx movq (%rcx), %rdx movq %rdx, 0x30(%rax) movq 0x8(%rcx), %rcx movq %rcx, 0x38(%rax) movq 0x18(%rsp), %rax movq 0x28(%rsp), %rcx movq (%rcx), %rdx movq %rdx, 0x40(%rax) movq 0x8(%rcx), %rdx movq %rdx, 0x48(%rax) movq 0x10(%rcx), %rdx movq %rdx, 0x50(%rax) movq 0x18(%rcx), %rcx movq %rcx, 0x58(%rax) movq 0x18(%rsp), %rax movq 0x68(%rsp), %rcx movq (%rcx), %rdx movq %rdx, 0x60(%rax) movq 0x8(%rcx), %rcx movq %rcx, 0x68(%rax) movq 0x18(%rsp), %rax movq 0x60(%rsp), %rcx movq (%rcx), %rcx movq %rcx, 0x70(%rax) movq 0x18(%rsp), %rax movq 0x48(%rsp), %rcx movw 0x10(%rcx), %cx movw %cx, 0x78(%rax) movq 0x48(%rsp), %rax movb 0x2(%rax), %cl movq 0x18(%rsp), %rax movb %cl, 0x7a(%rax) movq 0x18(%rsp), %rax movq 0x48(%rsp), %rcx movl 0x14(%rcx), %ecx movl %ecx, 0x7c(%rax) movq 0x48(%rsp), %rax movw 0xa7(%rax), %ax shrw $0x4, %ax andw $0x3, %ax movzbl %al, %ecx movq 0x18(%rsp), %rax movl %ecx, 0x80(%rax) movq 0x50(%rsp), %rax addq $0x1a0, %rax # imm = 0x1A0 movq %rax, 0x10(%rsp) movl $0x0, 0xc(%rsp) cmpl $0x2, 0xc(%rsp) jge 0x607d2e movq 0x10(%rsp), %rax movq 0x29f8(%rax), %rax movq 0x48(%rsp), %rcx movslq 0xc(%rsp), %rdx movsbq 0x10(%rcx,%rdx), %rcx imulq $0x24, %rcx, %rcx addq %rcx, %rax movq %rax, (%rsp) movq 0x48(%rsp), %rdi movq (%rsp), %rax movzbl 0x20(%rax), %esi callq 0x609100 movl %eax, %edx movq 0x18(%rsp), %rax movslq 0xc(%rsp), %rcx movl %edx, 0x84(%rax,%rcx,4) movl 0xc(%rsp), %eax addl $0x1, %eax movl %eax, 0xc(%rsp) jmp 0x607cce movq 0x18(%rsp), %rax movq 0x48(%rsp), %rcx movq 0x48(%rcx), %rdx movq %rdx, 0x90(%rax) movq 0x50(%rcx), %rcx movq %rcx, 0x98(%rax) movq 0x50(%rsp), %rax movl 0x1ee50(%rax), %ecx addl $0x1, %ecx movl %ecx, 0x1ee50(%rax) addq $0x58, %rsp retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/encoder/compound_type.c
compute_best_interintra_mode
static inline void compute_best_interintra_mode( const AV1_COMP *const cpi, MB_MODE_INFO *mbmi, MACROBLOCKD *xd, MACROBLOCK *const x, const int *const interintra_mode_cost, const BUFFER_SET *orig_dst, uint8_t *intrapred, const uint8_t *tmp_buf, INTERINTRA_MODE *best_interintra_mode, int64_t *best_interintra_rd, INTERINTRA_MODE interintra_mode, BLOCK_SIZE bsize) { const AV1_COMMON *const cm = &cpi->common; int rate; uint8_t skip_txfm_sb; int64_t dist, skip_sse_sb; const int bw = block_size_wide[bsize]; mbmi->interintra_mode = interintra_mode; int rmode = interintra_mode_cost[interintra_mode]; av1_build_intra_predictors_for_interintra(cm, xd, bsize, 0, orig_dst, intrapred, bw); av1_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw); model_rd_sb_fn[MODELRD_TYPE_INTERINTRA](cpi, bsize, x, xd, 0, 0, &rate, &dist, &skip_txfm_sb, &skip_sse_sb, NULL, NULL, NULL); int64_t rd = RDCOST(x->rdmult, rate + rmode, dist); if (rd < *best_interintra_rd) { *best_interintra_rd = rd; *best_interintra_mode = mbmi->interintra_mode; } }
pushq %r14 pushq %rbx subq $0x98, %rsp movb 0xd8(%rsp), %al movb 0xd0(%rsp), %al movq 0xc8(%rsp), %rax movq 0xc0(%rsp), %rax movq 0xb8(%rsp), %rax movq 0xb0(%rsp), %rax movq %rdi, 0x90(%rsp) movq %rsi, 0x88(%rsp) movq %rdx, 0x80(%rsp) movq %rcx, 0x78(%rsp) movq %r8, 0x70(%rsp) movq %r9, 0x68(%rsp) movq 0x90(%rsp), %rax addq $0x3bf80, %rax # imm = 0x3BF80 movq %rax, 0x60(%rsp) movzbl 0xd8(%rsp), %eax movl %eax, %ecx leaq 0x50f3ed(%rip), %rax # 0xb17200 movzbl (%rax,%rcx), %eax movl %eax, 0x44(%rsp) movb 0xd0(%rsp), %cl movq 0x88(%rsp), %rax movb %cl, 0x40(%rax) movq 0x70(%rsp), %rax movzbl 0xd0(%rsp), %ecx movl (%rax,%rcx,4), %eax movl %eax, 0x40(%rsp) movq 0x60(%rsp), %rdi movq 0x80(%rsp), %rsi movb 0xd8(%rsp), %dl movq 0x68(%rsp), %r8 movq 0xb0(%rsp), %r9 movl 0x44(%rsp), %eax xorl %ecx, %ecx movzbl %dl, %edx movl %eax, (%rsp) callq 0x5dcab0 movq 0x80(%rsp), %rdi movb 0xd8(%rsp), %sil movq 0xb8(%rsp), %rcx movl 0x44(%rsp), %r8d movq 0xb0(%rsp), %r9 movl 0x44(%rsp), %eax xorl %edx, %edx movzbl %sil, %esi movl %eax, (%rsp) callq 0x5dccb0 movq 0x5944e7(%rip), %rax # 0xb9c398 movq 0x90(%rsp), %rdi movb 0xd8(%rsp), %sil movq 0x78(%rsp), %rdx movq 0x80(%rsp), %rcx xorl %r9d, %r9d leaq 0x5c(%rsp), %r14 leaq 0x50(%rsp), %rbx leaq 0x5b(%rsp), %r11 leaq 0x48(%rsp), %r10 xorl %r8d, %r8d movzbl %sil, %esi movl %r9d, %r8d movq %r14, (%rsp) movq %rbx, 0x8(%rsp) movq %r11, 0x10(%rsp) movq %r10, 0x18(%rsp) movq $0x0, 0x20(%rsp) movq $0x0, 0x28(%rsp) movq $0x0, 0x30(%rsp) callq *%rax movl 0x5c(%rsp), %eax addl 0x40(%rsp), %eax cltq movq 0x78(%rsp), %rcx movslq 0x4218(%rcx), %rcx imulq %rcx, %rax addq $0x100, %rax # imm = 0x100 sarq $0x9, %rax movq 0x50(%rsp), %rcx shlq $0x7, %rcx addq %rcx, %rax movq %rax, 0x38(%rsp) movq 0x38(%rsp), %rax movq 0xc8(%rsp), %rcx cmpq (%rcx), %rax jge 0x607f8b movq 0x38(%rsp), %rcx movq 0xc8(%rsp), %rax movq %rcx, (%rax) movq 0x88(%rsp), %rax movb 0x40(%rax), %cl movq 0xc0(%rsp), %rax movb %cl, (%rax) addq $0x98, %rsp popq %rbx popq %r14 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/compound_type.c
get_rd_thresh_from_best_rd
static inline int64_t get_rd_thresh_from_best_rd(int64_t ref_best_rd, int mul_factor, int div_factor) { int64_t rd_thresh = ref_best_rd; if (div_factor != 0) { rd_thresh = ref_best_rd < (div_factor * (INT64_MAX / mul_factor)) ? ((ref_best_rd / div_factor) * mul_factor) : INT64_MAX; } return rd_thresh; }
movq %rdi, -0x8(%rsp) movl %esi, -0xc(%rsp) movl %edx, -0x10(%rsp) movq -0x8(%rsp), %rax movq %rax, -0x18(%rsp) cmpl $0x0, -0x10(%rsp) je 0x60808c movq -0x8(%rsp), %rax movq %rax, -0x20(%rsp) movslq -0x10(%rsp), %rcx movslq -0xc(%rsp), %rsi movabsq $0x7fffffffffffffff, %rax # imm = 0x7FFFFFFFFFFFFFFF cqto idivq %rsi movq %rax, %rdx movq -0x20(%rsp), %rax imulq %rdx, %rcx cmpq %rcx, %rax jge 0x608071 movq -0x8(%rsp), %rax movslq -0x10(%rsp), %rcx cqto idivq %rcx movslq -0xc(%rsp), %rcx imulq %rcx, %rax movq %rax, -0x28(%rsp) jmp 0x608082 movabsq $0x7fffffffffffffff, %rax # imm = 0x7FFFFFFFFFFFFFFF movq %rax, -0x28(%rsp) jmp 0x608082 movq -0x28(%rsp), %rax movq %rax, -0x18(%rsp) movq -0x18(%rsp), %rax retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/rdopt_utils.h
pick_interintra_wedge
static int64_t pick_interintra_wedge(const AV1_COMP *const cpi, const MACROBLOCK *const x, const BLOCK_SIZE bsize, const uint8_t *const p0, const uint8_t *const p1) { const MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *const mbmi = xd->mi[0]; assert(av1_is_wedge_used(bsize)); assert(cpi->common.seq_params->enable_interintra_compound); const struct buf_2d *const src = &x->plane[0].src; const int bw = block_size_wide[bsize]; const int bh = block_size_high[bsize]; DECLARE_ALIGNED(32, int16_t, residual1[MAX_SB_SQUARE]); // src - pred1 DECLARE_ALIGNED(32, int16_t, diff10[MAX_SB_SQUARE]); // pred1 - pred0 #if CONFIG_AV1_HIGHBITDEPTH if (is_cur_buf_hbd(xd)) { aom_highbd_subtract_block(bh, bw, residual1, bw, src->buf, src->stride, CONVERT_TO_BYTEPTR(p1), bw); aom_highbd_subtract_block(bh, bw, diff10, bw, CONVERT_TO_BYTEPTR(p1), bw, CONVERT_TO_BYTEPTR(p0), bw); } else { aom_subtract_block(bh, bw, residual1, bw, src->buf, src->stride, p1, bw); aom_subtract_block(bh, bw, diff10, bw, p1, bw, p0, bw); } #else aom_subtract_block(bh, bw, residual1, bw, src->buf, src->stride, p1, bw); aom_subtract_block(bh, bw, diff10, bw, p1, bw, p0, bw); #endif int8_t wedge_index = -1; uint64_t sse; int64_t rd = pick_wedge_fixed_sign(cpi, x, bsize, residual1, diff10, 0, &wedge_index, &sse); mbmi->interintra_wedge_index = wedge_index; return rd; }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x100a0, %rsp # imm = 0x100A0 movb %dl, %al movq %rdi, 0x10088(%rsp) movq %rsi, 0x10080(%rsp) movb %al, 0x1007f(%rsp) movq %rcx, 0x10070(%rsp) movq %r8, 0x10068(%rsp) movq 0x10080(%rsp), %rax addq $0x1a0, %rax # imm = 0x1A0 movq %rax, 0x10060(%rsp) movq 0x10060(%rsp), %rax movq 0x1eb8(%rax), %rax movq (%rax), %rax movq %rax, 0x10058(%rsp) movq 0x10080(%rsp), %rax addq $0x30, %rax movq %rax, 0x10050(%rsp) movzbl 0x1007f(%rsp), %eax movl %eax, %ecx leaq 0x50eec3(%rip), %rax # 0xb17200 movzbl (%rax,%rcx), %eax movl %eax, 0x1004c(%rsp) movzbl 0x1007f(%rsp), %eax movl %eax, %ecx leaq 0x50eef7(%rip), %rax # 0xb17250 movzbl (%rax,%rcx), %eax movl %eax, 0x10048(%rsp) movq 0x10060(%rsp), %rdi callq 0x608590 cmpl $0x0, %eax je 0x608424 movl 0x10048(%rsp), %edi movl 0x1004c(%rsp), %esi leaq 0x8040(%rsp), %rdx movslq 0x1004c(%rsp), %rcx movq 0x10050(%rsp), %rax movq (%rax), %r8 movq 0x10050(%rsp), %rax movslq 0x18(%rax), %r9 movq 0x10068(%rsp), %r10 shrq %r10 movslq 0x1004c(%rsp), %rax movq %r10, (%rsp) movq %rax, 0x8(%rsp) callq 0x3ad520 movl 0x10048(%rsp), %edi movl 0x1004c(%rsp), %esi leaq 0x40(%rsp), %rdx movslq 0x1004c(%rsp), %rcx movq 0x10068(%rsp), %r8 shrq %r8 movslq 0x1004c(%rsp), %r9 movq 0x10070(%rsp), %r10 shrq %r10 movslq 0x1004c(%rsp), %rax movq %r10, (%rsp) movq %rax, 0x8(%rsp) callq 0x3ad520 jmp 0x6084ce leaq 0x5a5165(%rip), %rax # 0xbad590 movq (%rax), %rax movl 0x10048(%rsp), %edi movl 0x1004c(%rsp), %esi leaq 0x8040(%rsp), %rdx movslq 0x1004c(%rsp), %rcx movq 0x10050(%rsp), %r8 movq (%r8), %r8 movq 0x10050(%rsp), %r9 movslq 0x18(%r9), %r9 movq 0x10068(%rsp), %r11 movslq 0x1004c(%rsp), %r10 movq %r11, (%rsp) movq %r10, 0x8(%rsp) callq *%rax leaq 0x5a510b(%rip), %rax # 0xbad590 movq (%rax), %rax movl 0x10048(%rsp), %edi movl 0x1004c(%rsp), %esi leaq 0x40(%rsp), %rdx movslq 0x1004c(%rsp), %rcx movq 0x10068(%rsp), %r8 movslq 0x1004c(%rsp), %r9 movq 0x10070(%rsp), %r11 movslq 0x1004c(%rsp), %r10 movq %r11, (%rsp) movq %r10, 0x8(%rsp) callq *%rax movb $-0x1, 0x3f(%rsp) movq 0x10088(%rsp), %rdi movq 0x10080(%rsp), %rsi movb 0x1007f(%rsp), %dl leaq 0x8040(%rsp), %rcx leaq 0x40(%rsp), %r8 xorl %r9d, %r9d leaq 0x3f(%rsp), %r10 leaq 0x30(%rsp), %rax movzbl %dl, %edx movq %r10, (%rsp) movq %rax, 0x8(%rsp) callq 0x6085c0 movq %rax, 0x28(%rsp) movb 0x3f(%rsp), %cl movq 0x10058(%rsp), %rax movb %cl, 0x41(%rax) movq 0x28(%rsp), %rax movq %rbp, %rsp popq %rbp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/compound_type.c
get_txfm_rd_gate_level
static inline int get_txfm_rd_gate_level( const int is_masked_compound_enabled, const int txfm_rd_gate_level[TX_SEARCH_CASES], BLOCK_SIZE bsize, TX_SEARCH_CASE tx_search_case, int eval_motion_mode) { assert(tx_search_case < TX_SEARCH_CASES); if (tx_search_case == TX_SEARCH_MOTION_MODE && !eval_motion_mode && num_pels_log2_lookup[bsize] > 8) return txfm_rd_gate_level[TX_SEARCH_MOTION_MODE]; // Enable aggressive gating of transform search only when masked compound type // is enabled. else if (tx_search_case == TX_SEARCH_COMP_TYPE_MODE && is_masked_compound_enabled) return txfm_rd_gate_level[TX_SEARCH_COMP_TYPE_MODE]; return txfm_rd_gate_level[TX_SEARCH_DEFAULT]; }
movb %cl, %al movb %dl, %cl movl %edi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movb %cl, -0x11(%rsp) movb %al, -0x12(%rsp) movl %r8d, -0x18(%rsp) movzbl -0x12(%rsp), %eax cmpl $0x1, %eax jne 0x6094d0 cmpl $0x0, -0x18(%rsp) jne 0x6094d0 movzbl -0x11(%rsp), %eax movl %eax, %ecx leaq 0x50ddb7(%rip), %rax # 0xb17270 movzbl (%rax,%rcx), %eax cmpl $0x8, %eax jle 0x6094d0 movq -0x10(%rsp), %rax movl 0x4(%rax), %eax movl %eax, -0x4(%rsp) jmp 0x6094fc movzbl -0x12(%rsp), %eax cmpl $0x2, %eax jne 0x6094ef cmpl $0x0, -0x8(%rsp) je 0x6094ef movq -0x10(%rsp), %rax movl 0x8(%rax), %eax movl %eax, -0x4(%rsp) jmp 0x6094fc jmp 0x6094f1 movq -0x10(%rsp), %rax movl (%rax), %eax movl %eax, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/rdopt_utils.h
check_txfm_eval
static inline int check_txfm_eval(MACROBLOCK *const x, BLOCK_SIZE bsize, int64_t best_skip_rd, int64_t skip_rd, int level, int is_luma_only) { int eval_txfm = 1; // Derive aggressiveness factor for gating the transform search // Lower value indicates more aggressiveness. Be more conservative (high // value) for (i) low quantizers (ii) regions where prediction is poor const int scale[MAX_TX_RD_GATE_LEVEL + 1] = { INT_MAX, 4, 3, 2, 2, 1 }; const int qslope = 2 * (!is_luma_only); const int level_to_qindex_map[MAX_TX_RD_GATE_LEVEL + 1] = { 0, 0, 0, 80, 100, 140 }; int aggr_factor = 4; assert(level <= MAX_TX_RD_GATE_LEVEL); const int pred_qindex_thresh = level_to_qindex_map[level]; if (!is_luma_only && level <= 2) { aggr_factor = 4 * AOMMAX(1, ROUND_POWER_OF_TWO((MAXQ - x->qindex) * qslope, QINDEX_BITS)); } if ((best_skip_rd > (x->source_variance << (num_pels_log2_lookup[bsize] + RDDIV_BITS))) && (x->qindex >= pred_qindex_thresh)) aggr_factor *= scale[level]; // For level setting 1, be more conservative for non-luma-only case even when // prediction is good. else if ((level <= 1) && !is_luma_only) aggr_factor = (aggr_factor >> 2) * 6; // Be more conservative for luma only cases (called from compound type rd) // since best_skip_rd is computed after and skip_rd is computed (with 8-bit // prediction signals blended for WEDGE/DIFFWTD rather than 16-bit) before // interpolation filter search const int luma_mul[MAX_TX_RD_GATE_LEVEL + 1] = { INT_MAX, 32, 29, 17, 17, 17 }; int mul_factor = is_luma_only ? luma_mul[level] : 16; int64_t rd_thresh = (best_skip_rd == INT64_MAX) ? best_skip_rd : (int64_t)(best_skip_rd * aggr_factor * mul_factor >> 6); if (skip_rd > rd_thresh) eval_txfm = 0; return eval_txfm; }
subq $0x28, %rsp movb %sil, %al movq %rdi, 0x20(%rsp) movb %al, 0x1f(%rsp) movq %rdx, 0x10(%rsp) movq %rcx, 0x8(%rsp) movl %r8d, 0x4(%rsp) movl %r9d, (%rsp) movl $0x1, -0x4(%rsp) movq 0x50dc6e(%rip), %rax # 0xb172a0 movq %rax, -0x20(%rsp) movq 0x50dc6a(%rip), %rax # 0xb172a8 movq %rax, -0x18(%rsp) movq 0x50dc66(%rip), %rax # 0xb172b0 movq %rax, -0x10(%rsp) cmpl $0x0, (%rsp) setne %al xorb $-0x1, %al andb $0x1, %al movzbl %al, %eax shll %eax movl %eax, -0x24(%rsp) movq 0x50dc56(%rip), %rax # 0xb172c0 movq %rax, -0x40(%rsp) movq 0x50dc52(%rip), %rax # 0xb172c8 movq %rax, -0x38(%rsp) movq 0x50dc4e(%rip), %rax # 0xb172d0 movq %rax, -0x30(%rsp) movl $0x4, -0x44(%rsp) movslq 0x4(%rsp), %rax movl -0x40(%rsp,%rax,4), %eax movl %eax, -0x48(%rsp) cmpl $0x0, (%rsp) jne 0x609707 cmpl $0x2, 0x4(%rsp) jg 0x609707 movq 0x20(%rsp), %rax movl $0xff, %ecx subl 0x4208(%rax), %ecx imull -0x24(%rsp), %ecx addl $0x80, %ecx sarl $0x8, %ecx movl $0x1, %eax cmpl %ecx, %eax jle 0x6096db movl $0x1, %eax movl %eax, -0x74(%rsp) jmp 0x6096fc movq 0x20(%rsp), %rcx movl $0xff, %eax subl 0x4208(%rcx), %eax imull -0x24(%rsp), %eax addl $0x80, %eax sarl $0x8, %eax movl %eax, -0x74(%rsp) movl -0x74(%rsp), %eax shll $0x2, %eax movl %eax, -0x44(%rsp) movq 0x10(%rsp), %rax movq 0x20(%rsp), %rcx movl 0x25640(%rcx), %edx movzbl 0x1f(%rsp), %ecx movl %ecx, %esi leaq 0x50db4b(%rip), %rcx # 0xb17270 movzbl (%rcx,%rsi), %ecx addl $0x7, %ecx shll %cl, %edx movl %edx, %ecx movl %ecx, %ecx cmpq %rcx, %rax jle 0x60975c movq 0x20(%rsp), %rax movl 0x4208(%rax), %eax cmpl -0x48(%rsp), %eax jl 0x60975c movslq 0x4(%rsp), %rax movl -0x20(%rsp,%rax,4), %eax imull -0x44(%rsp), %eax movl %eax, -0x44(%rsp) jmp 0x609779 cmpl $0x1, 0x4(%rsp) jg 0x609777 cmpl $0x0, (%rsp) jne 0x609777 movl -0x44(%rsp), %eax sarl $0x2, %eax imull $0x6, %eax, %eax movl %eax, -0x44(%rsp) jmp 0x609779 movq 0x50db60(%rip), %rax # 0xb172e0 movq %rax, -0x60(%rsp) movq 0x50db5c(%rip), %rax # 0xb172e8 movq %rax, -0x58(%rsp) movq 0x50db58(%rip), %rax # 0xb172f0 movq %rax, -0x50(%rsp) cmpl $0x0, (%rsp) je 0x6097b2 movslq 0x4(%rsp), %rax movl -0x60(%rsp,%rax,4), %eax movl %eax, -0x78(%rsp) jmp 0x6097bd movl $0x10, %eax movl %eax, -0x78(%rsp) jmp 0x6097bd movl -0x78(%rsp), %eax movl %eax, -0x64(%rsp) movabsq $0x7fffffffffffffff, %rax # imm = 0x7FFFFFFFFFFFFFFF cmpq %rax, 0x10(%rsp) jne 0x6097e2 movq 0x10(%rsp), %rax movq %rax, -0x80(%rsp) jmp 0x609802 movq 0x10(%rsp), %rax movslq -0x44(%rsp), %rcx imulq %rcx, %rax movslq -0x64(%rsp), %rcx imulq %rcx, %rax sarq $0x6, %rax movq %rax, -0x80(%rsp) movq -0x80(%rsp), %rax movq %rax, -0x70(%rsp) movq 0x8(%rsp), %rax cmpq -0x70(%rsp), %rax jle 0x609820 movl $0x0, -0x4(%rsp) movl -0x4(%rsp), %eax addq $0x28, %rsp retq nopl (%rax)
/m-ab-s[P]aom/av1/encoder/rdopt_utils.h
model_rd_for_sb
static inline void model_rd_for_sb(const AV1_COMP *const cpi, BLOCK_SIZE bsize, MACROBLOCK *x, MACROBLOCKD *xd, int plane_from, int plane_to, int *out_rate_sum, int64_t *out_dist_sum, uint8_t *skip_txfm_sb, int64_t *skip_sse_sb, int *plane_rate, int64_t *plane_sse, int64_t *plane_dist) { // Note our transform coeffs are 8 times an orthogonal transform. // Hence quantizer step is also 8 times. To get effective quantizer // we need to divide by 8 before sending to modeling function. int plane; const int ref = xd->mi[0]->ref_frame[0]; int64_t rate_sum = 0; int64_t dist_sum = 0; int64_t total_sse = 0; assert(bsize < BLOCK_SIZES_ALL); for (plane = plane_from; plane <= plane_to; ++plane) { if (plane && !xd->is_chroma_ref) break; struct macroblock_plane *const p = &x->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane]; const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd->subsampling_x, pd->subsampling_y); assert(plane_bsize < BLOCK_SIZES_ALL); const int bw = block_size_wide[plane_bsize]; const int bh = block_size_high[plane_bsize]; int64_t sse; int rate; int64_t dist; sse = calculate_sse(xd, p, pd, bw, bh); model_rd_from_sse(cpi, x, plane_bsize, plane, sse, bw * bh, &rate, &dist); if (plane == 0) x->pred_sse[ref] = (unsigned int)AOMMIN(sse, UINT_MAX); total_sse += sse; rate_sum += rate; dist_sum += dist; if (plane_rate) plane_rate[plane] = rate; if (plane_sse) plane_sse[plane] = sse; if (plane_dist) plane_dist[plane] = dist; assert(rate_sum >= 0); } if (skip_txfm_sb) *skip_txfm_sb = total_sse == 0; if (skip_sse_sb) *skip_sse_sb = total_sse << 4; rate_sum = AOMMIN(rate_sum, INT_MAX); *out_rate_sum = (int)rate_sum; *out_dist_sum = dist_sum; }
subq $0xa8, %rsp movb %sil, %al movq 0xe0(%rsp), %rsi movq 0xd8(%rsp), %rsi movq 0xd0(%rsp), %rsi movq 0xc8(%rsp), %rsi movq 0xc0(%rsp), %rsi movq 0xb8(%rsp), %rsi movq 0xb0(%rsp), %rsi movq %rdi, 0xa0(%rsp) movb %al, 0x9f(%rsp) movq %rdx, 0x90(%rsp) movq %rcx, 0x88(%rsp) movl %r8d, 0x84(%rsp) movl %r9d, 0x80(%rsp) movq 0x88(%rsp), %rax movq 0x1eb8(%rax), %rax movq (%rax), %rax movsbl 0x10(%rax), %eax movl %eax, 0x78(%rsp) movq $0x0, 0x70(%rsp) movq $0x0, 0x68(%rsp) movq $0x0, 0x60(%rsp) movl 0x84(%rsp), %eax movl %eax, 0x7c(%rsp) movl 0x7c(%rsp), %eax cmpl 0x80(%rsp), %eax jg 0x609e95 cmpl $0x0, 0x7c(%rsp) je 0x609cbc movq 0x88(%rsp), %rax testb $0x1, 0xc(%rax) jne 0x609cbc jmp 0x609e95 movq 0x90(%rsp), %rax movslq 0x7c(%rsp), %rcx imulq $0x88, %rcx, %rcx addq %rcx, %rax movq %rax, 0x58(%rsp) movq 0x88(%rsp), %rax addq $0x10, %rax movslq 0x7c(%rsp), %rcx imulq $0xa30, %rcx, %rcx # imm = 0xA30 addq %rcx, %rax movq %rax, 0x50(%rsp) movb 0x9f(%rsp), %al movq 0x50(%rsp), %rcx movl 0x4(%rcx), %esi movq 0x50(%rsp), %rcx movl 0x8(%rcx), %edx movzbl %al, %edi callq 0x609830 movb %al, 0x4f(%rsp) movzbl 0x4f(%rsp), %eax movl %eax, %ecx leaq 0x50d4d7(%rip), %rax # 0xb17200 movzbl (%rax,%rcx), %eax movl %eax, 0x48(%rsp) movzbl 0x4f(%rsp), %eax movl %eax, %ecx leaq 0x50d511(%rip), %rax # 0xb17250 movzbl (%rax,%rcx), %eax movl %eax, 0x44(%rsp) movq 0x88(%rsp), %rdi movq 0x58(%rsp), %rsi movq 0x50(%rsp), %rdx movl 0x48(%rsp), %ecx movl 0x44(%rsp), %r8d callq 0x609a00 movq %rax, 0x38(%rsp) movq 0xa0(%rsp), %rdi movq 0x90(%rsp), %rsi movb 0x4f(%rsp), %dl movl 0x7c(%rsp), %ecx movq 0x38(%rsp), %r8 movl 0x48(%rsp), %r9d imull 0x44(%rsp), %r9d leaq 0x34(%rsp), %r10 leaq 0x28(%rsp), %rax movzbl %dl, %edx movq %r10, (%rsp) movq %rax, 0x8(%rsp) callq 0x608890 cmpl $0x0, 0x7c(%rsp) jne 0x609df5 movl $0xffffffff, %eax # imm = 0xFFFFFFFF cmpq %rax, 0x38(%rsp) jge 0x609dce movq 0x38(%rsp), %rax movq %rax, 0x20(%rsp) jmp 0x609dda movl $0xffffffff, %eax # imm = 0xFFFFFFFF movq %rax, 0x20(%rsp) jmp 0x609dda movq 0x20(%rsp), %rax movl %eax, %edx movq 0x90(%rsp), %rax movslq 0x78(%rsp), %rcx movl %edx, 0x25660(%rax,%rcx,4) movq 0x38(%rsp), %rax addq 0x60(%rsp), %rax movq %rax, 0x60(%rsp) movslq 0x34(%rsp), %rax addq 0x70(%rsp), %rax movq %rax, 0x70(%rsp) movq 0x28(%rsp), %rax addq 0x68(%rsp), %rax movq %rax, 0x68(%rsp) cmpq $0x0, 0xd0(%rsp) je 0x609e41 movl 0x34(%rsp), %edx movq 0xd0(%rsp), %rax movslq 0x7c(%rsp), %rcx movl %edx, (%rax,%rcx,4) cmpq $0x0, 0xd8(%rsp) je 0x609e62 movq 0x38(%rsp), %rdx movq 0xd8(%rsp), %rax movslq 0x7c(%rsp), %rcx movq %rdx, (%rax,%rcx,8) cmpq $0x0, 0xe0(%rsp) je 0x609e83 movq 0x28(%rsp), %rdx movq 0xe0(%rsp), %rax movslq 0x7c(%rsp), %rcx movq %rdx, (%rax,%rcx,8) jmp 0x609e85 movl 0x7c(%rsp), %eax addl $0x1, %eax movl %eax, 0x7c(%rsp) jmp 0x609c91 cmpq $0x0, 0xc0(%rsp) je 0x609eba cmpq $0x0, 0x60(%rsp) sete %al andb $0x1, %al movzbl %al, %eax movb %al, %cl movq 0xc0(%rsp), %rax movb %cl, (%rax) cmpq $0x0, 0xc8(%rsp) je 0x609ed9 movq 0x60(%rsp), %rcx shlq $0x4, %rcx movq 0xc8(%rsp), %rax movq %rcx, (%rax) cmpq $0x7fffffff, 0x70(%rsp) # imm = 0x7FFFFFFF jge 0x609ef0 movq 0x70(%rsp), %rax movq %rax, 0x18(%rsp) jmp 0x609efc movl $0x7fffffff, %eax # imm = 0x7FFFFFFF movq %rax, 0x18(%rsp) jmp 0x609efc movq 0x18(%rsp), %rax movq %rax, 0x70(%rsp) movq 0x70(%rsp), %rax movl %eax, %ecx movq 0xb0(%rsp), %rax movl %ecx, (%rax) movq 0x68(%rsp), %rcx movq 0xb8(%rsp), %rax movq %rcx, (%rax) addq $0xa8, %rsp retq nop
/m-ab-s[P]aom/av1/encoder/model_rd.h
get_inter_predictors_masked_compound
static inline void get_inter_predictors_masked_compound( MACROBLOCK *x, const BLOCK_SIZE bsize, uint8_t **preds0, uint8_t **preds1, int16_t *residual1, int16_t *diff10, int *strides) { MACROBLOCKD *xd = &x->e_mbd; const int bw = block_size_wide[bsize]; const int bh = block_size_high[bsize]; // get inter predictors to use for masked compound modes av1_build_inter_predictors_for_planes_single_buf(xd, bsize, 0, 0, 0, preds0, strides); av1_build_inter_predictors_for_planes_single_buf(xd, bsize, 0, 0, 1, preds1, strides); const struct buf_2d *const src = &x->plane[0].src; #if CONFIG_AV1_HIGHBITDEPTH if (is_cur_buf_hbd(xd)) { aom_highbd_subtract_block(bh, bw, residual1, bw, src->buf, src->stride, CONVERT_TO_BYTEPTR(*preds1), bw); aom_highbd_subtract_block(bh, bw, diff10, bw, CONVERT_TO_BYTEPTR(*preds1), bw, CONVERT_TO_BYTEPTR(*preds0), bw); } else { aom_subtract_block(bh, bw, residual1, bw, src->buf, src->stride, *preds1, bw); aom_subtract_block(bh, bw, diff10, bw, *preds1, bw, *preds0, bw); } #else aom_subtract_block(bh, bw, residual1, bw, src->buf, src->stride, *preds1, bw); aom_subtract_block(bh, bw, diff10, bw, *preds1, bw, *preds0, bw); #endif }
subq $0x58, %rsp movb %sil, %al movq 0x60(%rsp), %rsi movq %rdi, 0x50(%rsp) movb %al, 0x4f(%rsp) movq %rdx, 0x40(%rsp) movq %rcx, 0x38(%rsp) movq %r8, 0x30(%rsp) movq %r9, 0x28(%rsp) movq 0x50(%rsp), %rax addq $0x1a0, %rax # imm = 0x1A0 movq %rax, 0x20(%rsp) movzbl 0x4f(%rsp), %eax movl %eax, %ecx leaq 0x50c9d9(%rip), %rax # 0xb17200 movzbl (%rax,%rcx), %eax movl %eax, 0x1c(%rsp) movzbl 0x4f(%rsp), %eax movl %eax, %ecx leaq 0x50ca13(%rip), %rax # 0xb17250 movzbl (%rax,%rcx), %eax movl %eax, 0x18(%rsp) movq 0x20(%rsp), %rdi movb 0x4f(%rsp), %cl movq 0x40(%rsp), %r9 movq 0x60(%rsp), %rax xorl %r8d, %r8d movzbl %cl, %esi movl %r8d, %edx movl %r8d, %ecx movq %rax, (%rsp) callq 0x206080 movq 0x20(%rsp), %rdi movb 0x4f(%rsp), %dl movq 0x38(%rsp), %r9 movq 0x60(%rsp), %rax xorl %ecx, %ecx movl $0x1, %r8d movzbl %dl, %esi movl %ecx, %edx movq %rax, (%rsp) callq 0x206080 movq 0x50(%rsp), %rax addq $0x30, %rax movq %rax, 0x10(%rsp) movq 0x20(%rsp), %rdi callq 0x608590 cmpl $0x0, %eax je 0x60a93d movl 0x18(%rsp), %edi movl 0x1c(%rsp), %esi movq 0x30(%rsp), %rdx movslq 0x1c(%rsp), %rcx movq 0x10(%rsp), %rax movq (%rax), %r8 movq 0x10(%rsp), %rax movslq 0x18(%rax), %r9 movq 0x38(%rsp), %rax movq (%rax), %r10 shrq %r10 movslq 0x1c(%rsp), %rax movq %r10, (%rsp) movq %rax, 0x8(%rsp) callq 0x3ad520 movl 0x18(%rsp), %edi movl 0x1c(%rsp), %esi movq 0x28(%rsp), %rdx movslq 0x1c(%rsp), %rcx movq 0x38(%rsp), %rax movq (%rax), %r8 shrq %r8 movslq 0x1c(%rsp), %r9 movq 0x40(%rsp), %rax movq (%rax), %r10 shrq %r10 movslq 0x1c(%rsp), %rax movq %r10, (%rsp) movq %rax, 0x8(%rsp) callq 0x3ad520 jmp 0x60a9c3 leaq 0x5a2c4c(%rip), %rax # 0xbad590 movq (%rax), %rax movl 0x18(%rsp), %edi movl 0x1c(%rsp), %esi movq 0x30(%rsp), %rdx movslq 0x1c(%rsp), %rcx movq 0x10(%rsp), %r8 movq (%r8), %r8 movq 0x10(%rsp), %r9 movslq 0x18(%r9), %r9 movq 0x38(%rsp), %r10 movq (%r10), %r11 movslq 0x1c(%rsp), %r10 movq %r11, (%rsp) movq %r10, 0x8(%rsp) callq *%rax leaq 0x5a2c07(%rip), %rax # 0xbad590 movq (%rax), %rax movl 0x18(%rsp), %edi movl 0x1c(%rsp), %esi movq 0x28(%rsp), %rdx movslq 0x1c(%rsp), %rcx movq 0x38(%rsp), %r8 movq (%r8), %r8 movslq 0x1c(%rsp), %r9 movq 0x40(%rsp), %r10 movq (%r10), %r11 movslq 0x1c(%rsp), %r10 movq %r11, (%rsp) movq %r10, 0x8(%rsp) callq *%rax addq $0x58, %rsp retq nopl (%rax,%rax)
/m-ab-s[P]aom/av1/encoder/compound_type.c
prepare_coeffs_12tap
static inline void prepare_coeffs_12tap(const InterpFilterParams *filter_params, int subpel_q4, __m128i *coeffs /* [6] */) { const int16_t *const y_filter = av1_get_interp_filter_subpel_kernel( filter_params, subpel_q4 & SUBPEL_MASK); __m128i coeffs_y = _mm_loadu_si128((__m128i *)y_filter); coeffs[0] = _mm_shuffle_epi32(coeffs_y, 0); // coeffs 0 1 0 1 0 1 0 1 coeffs[1] = _mm_shuffle_epi32(coeffs_y, 85); // coeffs 2 3 2 3 2 3 2 3 coeffs[2] = _mm_shuffle_epi32(coeffs_y, 170); // coeffs 4 5 4 5 4 5 4 5 coeffs[3] = _mm_shuffle_epi32(coeffs_y, 255); // coeffs 6 7 6 7 6 7 6 7 coeffs_y = _mm_loadl_epi64((__m128i *)(y_filter + 8)); coeffs[4] = _mm_shuffle_epi32(coeffs_y, 0); // coeffs 8 9 8 9 8 9 8 9 coeffs[5] = _mm_shuffle_epi32(coeffs_y, 85); // coeffs 10 11 10 11 10 11 10 11 }
subq $0x58, %rsp movq %rdi, 0x28(%rsp) movl %esi, 0x24(%rsp) movq %rdx, 0x18(%rsp) movq 0x28(%rsp), %rdi movl 0x24(%rsp), %esi andl $0xf, %esi callq 0x637370 movq %rax, 0x10(%rsp) movq 0x10(%rsp), %rax movq %rax, 0x50(%rsp) movq 0x50(%rsp), %rax movups (%rax), %xmm0 movaps %xmm0, (%rsp) movaps (%rsp), %xmm0 pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0] movq 0x18(%rsp), %rax movaps %xmm0, (%rax) movaps (%rsp), %xmm0 pshufd $0x55, %xmm0, %xmm0 # xmm0 = xmm0[1,1,1,1] movq 0x18(%rsp), %rax movaps %xmm0, 0x10(%rax) movaps (%rsp), %xmm0 pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2] movq 0x18(%rsp), %rax movaps %xmm0, 0x20(%rax) movaps (%rsp), %xmm0 pshufd $0xff, %xmm0, %xmm0 # xmm0 = xmm0[3,3,3,3] movq 0x18(%rsp), %rax movaps %xmm0, 0x30(%rax) movq 0x10(%rsp), %rax addq $0x10, %rax movq %rax, 0x48(%rsp) movq 0x48(%rsp), %rax movq (%rax), %xmm0 movaps %xmm0, 0x30(%rsp) movaps 0x30(%rsp), %xmm0 movaps %xmm0, (%rsp) movaps (%rsp), %xmm0 pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0] movq 0x18(%rsp), %rax movaps %xmm0, 0x40(%rax) movaps (%rsp), %xmm0 pshufd $0x55, %xmm0, %xmm0 # xmm0 = xmm0[1,1,1,1] movq 0x18(%rsp), %rax movdqa %xmm0, 0x50(%rax) addq $0x58, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/convolve_common_intrin.h
highbd_apply_temporal_filter
static void highbd_apply_temporal_filter( const uint16_t *frame1, const unsigned int stride, const uint16_t *frame2, const unsigned int stride2, const int block_width, const int block_height, const int *subblock_mses, unsigned int *accumulator, uint16_t *count, uint32_t *frame_sse, uint32_t *luma_sse_sum, int bd, const double inv_num_ref_pixels, const double decay_factor, const double inv_factor, const double weight_factor, double *d_factor, int tf_wgt_calc_lvl) { assert(((block_width == 16) || (block_width == 32)) && ((block_height == 16) || (block_height == 32))); uint32_t acc_5x5_sse[BH][BW]; get_squared_error(frame1, stride, frame2, stride2, block_width, block_height, frame_sse, SSE_STRIDE); __m128i vsrc[5][2]; // Traverse 4 columns at a time // First and last columns will require padding for (int col = 0; col < block_width; col += 4) { uint32_t *src = frame_sse + col; // Load and pad(for first and last col) 3 rows from the top for (int i = 2; i < 5; i++) { xx_load_and_pad(src, vsrc[i], col, block_width); src += SSE_STRIDE; } // Padding for top 2 rows vsrc[0][0] = vsrc[2][0]; vsrc[0][1] = vsrc[2][1]; vsrc[1][0] = vsrc[2][0]; vsrc[1][1] = vsrc[2][1]; for (int row = 0; row < block_height - 3; row++) { __m128i vsum11 = _mm_add_epi32(vsrc[0][0], vsrc[1][0]); __m128i vsum12 = _mm_add_epi32(vsrc[2][0], vsrc[3][0]); __m128i vsum13 = _mm_add_epi32(vsum11, vsum12); __m128i vsum1 = _mm_add_epi32(vsum13, vsrc[4][0]); __m128i vsum21 = _mm_add_epi32(vsrc[0][1], vsrc[1][1]); __m128i vsum22 = _mm_add_epi32(vsrc[2][1], vsrc[3][1]); __m128i vsum23 = _mm_add_epi32(vsum21, vsum22); __m128i vsum2 = _mm_add_epi32(vsum23, vsrc[4][1]); vsrc[0][0] = vsrc[1][0]; vsrc[0][1] = vsrc[1][1]; vsrc[1][0] = vsrc[2][0]; vsrc[1][1] = vsrc[2][1]; vsrc[2][0] = vsrc[3][0]; vsrc[2][1] = vsrc[3][1]; vsrc[3][0] = vsrc[4][0]; vsrc[3][1] = vsrc[4][1]; // Load next row xx_load_and_pad(src, vsrc[4], col, block_width); src += SSE_STRIDE; acc_5x5_sse[row][col] = xx_mask_and_hadd(vsum1, vsum2, 0); acc_5x5_sse[row][col + 1] = xx_mask_and_hadd(vsum1, vsum2, 1); acc_5x5_sse[row][col + 2] = xx_mask_and_hadd(vsum1, vsum2, 2); acc_5x5_sse[row][col + 3] = xx_mask_and_hadd(vsum1, vsum2, 3); } for (int row = block_height - 3; row < block_height; row++) { __m128i vsum11 = _mm_add_epi32(vsrc[0][0], vsrc[1][0]); __m128i vsum12 = _mm_add_epi32(vsrc[2][0], vsrc[3][0]); __m128i vsum13 = _mm_add_epi32(vsum11, vsum12); __m128i vsum1 = _mm_add_epi32(vsum13, vsrc[4][0]); __m128i vsum21 = _mm_add_epi32(vsrc[0][1], vsrc[1][1]); __m128i vsum22 = _mm_add_epi32(vsrc[2][1], vsrc[3][1]); __m128i vsum23 = _mm_add_epi32(vsum21, vsum22); __m128i vsum2 = _mm_add_epi32(vsum23, vsrc[4][1]); vsrc[0][0] = vsrc[1][0]; vsrc[0][1] = vsrc[1][1]; vsrc[1][0] = vsrc[2][0]; vsrc[1][1] = vsrc[2][1]; vsrc[2][0] = vsrc[3][0]; vsrc[2][1] = vsrc[3][1]; vsrc[3][0] = vsrc[4][0]; vsrc[3][1] = vsrc[4][1]; acc_5x5_sse[row][col] = xx_mask_and_hadd(vsum1, vsum2, 0); acc_5x5_sse[row][col + 1] = xx_mask_and_hadd(vsum1, vsum2, 1); acc_5x5_sse[row][col + 2] = xx_mask_and_hadd(vsum1, vsum2, 2); acc_5x5_sse[row][col + 3] = xx_mask_and_hadd(vsum1, vsum2, 3); } } double subblock_mses_scaled[4]; double d_factor_decayed[4]; for (int idx = 0; idx < 4; idx++) { subblock_mses_scaled[idx] = subblock_mses[idx] * inv_factor; d_factor_decayed[idx] = d_factor[idx] * decay_factor; } if (tf_wgt_calc_lvl == 0) { for (int i = 0, k = 0; i < block_height; i++) { const int y_blk_raster_offset = (i >= block_height / 2) * 2; for (int j = 0; j < block_width; j++, k++) { const int pixel_value = frame2[i * stride2 + j]; uint32_t diff_sse = acc_5x5_sse[i][j] + luma_sse_sum[i * BW + j]; // Scale down the difference for high bit depth input. diff_sse >>= ((bd - 8) * 2); const double window_error = diff_sse * inv_num_ref_pixels; const int subblock_idx = y_blk_raster_offset + (j >= block_width / 2); const double combined_error = weight_factor * window_error + subblock_mses_scaled[subblock_idx]; double scaled_error = combined_error * d_factor_decayed[subblock_idx]; scaled_error = AOMMIN(scaled_error, 7); const int weight = (int)(exp(-scaled_error) * TF_WEIGHT_SCALE); count[k] += weight; accumulator[k] += weight * pixel_value; } } } else { for (int i = 0, k = 0; i < block_height; i++) { const int y_blk_raster_offset = (i >= block_height / 2) * 2; for (int j = 0; j < block_width; j++, k++) { const int pixel_value = frame2[i * stride2 + j]; uint32_t diff_sse = acc_5x5_sse[i][j] + luma_sse_sum[i * BW + j]; // Scale down the difference for high bit depth input. diff_sse >>= ((bd - 8) * 2); const double window_error = diff_sse * inv_num_ref_pixels; const int subblock_idx = y_blk_raster_offset + (j >= block_width / 2); const double combined_error = weight_factor * window_error + subblock_mses_scaled[subblock_idx]; double scaled_error = combined_error * d_factor_decayed[subblock_idx]; scaled_error = AOMMIN(scaled_error, 7); const float fweight = approx_exp((float)-scaled_error) * TF_WEIGHT_SCALE; const int weight = iroundpf(fweight); count[k] += weight; accumulator[k] += weight * pixel_value; } } } }
subq $0x1528, %rsp # imm = 0x1528 movl 0x1568(%rsp), %eax movq 0x1560(%rsp), %rax movl 0x1558(%rsp), %eax movq 0x1550(%rsp), %rax movq 0x1548(%rsp), %rax movq 0x1540(%rsp), %rax movq 0x1538(%rsp), %rax movq 0x1530(%rsp), %rax movq %rdi, 0x1318(%rsp) movl %esi, 0x1314(%rsp) movq %rdx, 0x1308(%rsp) movl %ecx, 0x1304(%rsp) movl %r8d, 0x1300(%rsp) movl %r9d, 0x12fc(%rsp) movsd %xmm0, 0x12f0(%rsp) movsd %xmm1, 0x12e8(%rsp) movsd %xmm2, 0x12e0(%rsp) movsd %xmm3, 0x12d8(%rsp) movq 0x1318(%rsp), %rdi movl 0x1314(%rsp), %esi movq 0x1308(%rsp), %rdx movl 0x1304(%rsp), %ecx movl 0x1300(%rsp), %r8d movl 0x12fc(%rsp), %r9d movq 0x1548(%rsp), %rax movq %rax, (%rsp) movl $0x24, 0x8(%rsp) callq 0x647940 movl $0x0, 0x22c(%rsp) movl 0x22c(%rsp), %eax cmpl 0x1300(%rsp), %eax jge 0x647389 movq 0x1548(%rsp), %rax movslq 0x22c(%rsp), %rcx shlq $0x2, %rcx addq %rcx, %rax movq %rax, 0x220(%rsp) movl $0x2, 0x21c(%rsp) cmpl $0x5, 0x21c(%rsp) jge 0x646b0c movq 0x220(%rsp), %rdi movslq 0x21c(%rsp), %rax leaq 0x230(%rsp), %rsi shlq $0x5, %rax addq %rax, %rsi movl 0x22c(%rsp), %edx movl 0x1300(%rsp), %ecx callq 0x647bd0 movq 0x220(%rsp), %rax addq $0x90, %rax movq %rax, 0x220(%rsp) movl 0x21c(%rsp), %eax addl $0x1, %eax movl %eax, 0x21c(%rsp) jmp 0x646aa7 movdqa 0x270(%rsp), %xmm0 movdqa %xmm0, 0x230(%rsp) movdqa 0x280(%rsp), %xmm0 movdqa %xmm0, 0x240(%rsp) movdqa 0x270(%rsp), %xmm0 movdqa %xmm0, 0x250(%rsp) movdqa 0x280(%rsp), %xmm0 movdqa %xmm0, 0x260(%rsp) movl $0x0, 0x218(%rsp) movl 0x218(%rsp), %eax movl 0x12fc(%rsp), %ecx subl $0x3, %ecx cmpl %ecx, %eax jge 0x646f82 movdqa 0x230(%rsp), %xmm1 movdqa 0x250(%rsp), %xmm0 movdqa %xmm1, 0x1510(%rsp) movdqa %xmm0, 0x1500(%rsp) movdqa 0x1510(%rsp), %xmm0 movdqa 0x1500(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x200(%rsp) movdqa 0x270(%rsp), %xmm1 movdqa 0x290(%rsp), %xmm0 movdqa %xmm1, 0x14f0(%rsp) movdqa %xmm0, 0x14e0(%rsp) movdqa 0x14f0(%rsp), %xmm0 movdqa 0x14e0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x1f0(%rsp) movdqa 0x200(%rsp), %xmm1 movdqa 0x1f0(%rsp), %xmm0 movdqa %xmm1, 0x14d0(%rsp) movdqa %xmm0, 0x14c0(%rsp) movdqa 0x14d0(%rsp), %xmm0 movdqa 0x14c0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x1e0(%rsp) movdqa 0x1e0(%rsp), %xmm1 movdqa 0x2b0(%rsp), %xmm0 movdqa %xmm1, 0x14b0(%rsp) movdqa %xmm0, 0x14a0(%rsp) movdqa 0x14b0(%rsp), %xmm0 movdqa 0x14a0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x1d0(%rsp) movdqa 0x240(%rsp), %xmm1 movdqa 0x260(%rsp), %xmm0 movdqa %xmm1, 0x1490(%rsp) movdqa %xmm0, 0x1480(%rsp) movdqa 0x1490(%rsp), %xmm0 movdqa 0x1480(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x1c0(%rsp) movdqa 0x280(%rsp), %xmm1 movdqa 0x2a0(%rsp), %xmm0 movdqa %xmm1, 0x1470(%rsp) movdqa %xmm0, 0x1460(%rsp) movdqa 0x1470(%rsp), %xmm0 movdqa 0x1460(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x1b0(%rsp) movdqa 0x1c0(%rsp), %xmm1 movdqa 0x1b0(%rsp), %xmm0 movdqa %xmm1, 0x1450(%rsp) movdqa %xmm0, 0x1440(%rsp) movdqa 0x1450(%rsp), %xmm0 movdqa 0x1440(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x1a0(%rsp) movdqa 0x1a0(%rsp), %xmm1 movdqa 0x2c0(%rsp), %xmm0 movdqa %xmm1, 0x1430(%rsp) movdqa %xmm0, 0x1420(%rsp) movdqa 0x1430(%rsp), %xmm0 movdqa 0x1420(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x190(%rsp) movdqa 0x250(%rsp), %xmm0 movdqa %xmm0, 0x230(%rsp) movdqa 0x260(%rsp), %xmm0 movdqa %xmm0, 0x240(%rsp) movdqa 0x270(%rsp), %xmm0 movdqa %xmm0, 0x250(%rsp) movdqa 0x280(%rsp), %xmm0 movdqa %xmm0, 0x260(%rsp) movdqa 0x290(%rsp), %xmm0 movdqa %xmm0, 0x270(%rsp) movdqa 0x2a0(%rsp), %xmm0 movdqa %xmm0, 0x280(%rsp) movdqa 0x2b0(%rsp), %xmm0 movdqa %xmm0, 0x290(%rsp) movdqa 0x2c0(%rsp), %xmm0 movdqa %xmm0, 0x2a0(%rsp) movq 0x220(%rsp), %rdi leaq 0x230(%rsp), %rsi addq $0x80, %rsi movl 0x22c(%rsp), %edx movl 0x1300(%rsp), %ecx callq 0x647bd0 movq 0x220(%rsp), %rax addq $0x90, %rax movq %rax, 0x220(%rsp) movdqa 0x1d0(%rsp), %xmm0 movdqa 0x190(%rsp), %xmm1 xorl %edi, %edi callq 0x647c90 movl %eax, %edx movslq 0x218(%rsp), %rcx leaq 0x2d0(%rsp), %rax shlq $0x7, %rcx addq %rcx, %rax movslq 0x22c(%rsp), %rcx movl %edx, (%rax,%rcx,4) movdqa 0x1d0(%rsp), %xmm0 movdqa 0x190(%rsp), %xmm1 movl $0x1, %edi callq 0x647c90 movl %eax, %edx movslq 0x218(%rsp), %rcx leaq 0x2d0(%rsp), %rax shlq $0x7, %rcx addq %rcx, %rax movl 0x22c(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx movl %edx, (%rax,%rcx,4) movdqa 0x1d0(%rsp), %xmm0 movdqa 0x190(%rsp), %xmm1 movl $0x2, %edi callq 0x647c90 movl %eax, %edx movslq 0x218(%rsp), %rcx leaq 0x2d0(%rsp), %rax shlq $0x7, %rcx addq %rcx, %rax movl 0x22c(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx movl %edx, (%rax,%rcx,4) movdqa 0x1d0(%rsp), %xmm0 movdqa 0x190(%rsp), %xmm1 movl $0x3, %edi callq 0x647c90 movl %eax, %edx movslq 0x218(%rsp), %rcx leaq 0x2d0(%rsp), %rax shlq $0x7, %rcx addq %rcx, %rax movl 0x22c(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx movl %edx, (%rax,%rcx,4) movl 0x218(%rsp), %eax addl $0x1, %eax movl %eax, 0x218(%rsp) jmp 0x646b5f movl 0x12fc(%rsp), %eax subl $0x3, %eax movl %eax, 0x18c(%rsp) movl 0x18c(%rsp), %eax cmpl 0x12fc(%rsp), %eax jge 0x647371 movdqa 0x230(%rsp), %xmm1 movdqa 0x250(%rsp), %xmm0 movdqa %xmm1, 0x1410(%rsp) movdqa %xmm0, 0x1400(%rsp) movdqa 0x1410(%rsp), %xmm0 movdqa 0x1400(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x170(%rsp) movdqa 0x270(%rsp), %xmm1 movdqa 0x290(%rsp), %xmm0 movdqa %xmm1, 0x13f0(%rsp) movdqa %xmm0, 0x13e0(%rsp) movdqa 0x13f0(%rsp), %xmm0 movdqa 0x13e0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x160(%rsp) movdqa 0x170(%rsp), %xmm1 movdqa 0x160(%rsp), %xmm0 movdqa %xmm1, 0x13d0(%rsp) movdqa %xmm0, 0x13c0(%rsp) movdqa 0x13d0(%rsp), %xmm0 movdqa 0x13c0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x150(%rsp) movdqa 0x150(%rsp), %xmm1 movdqa 0x2b0(%rsp), %xmm0 movdqa %xmm1, 0x13b0(%rsp) movdqa %xmm0, 0x13a0(%rsp) movdqa 0x13b0(%rsp), %xmm0 movdqa 0x13a0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x140(%rsp) movdqa 0x240(%rsp), %xmm1 movdqa 0x260(%rsp), %xmm0 movdqa %xmm1, 0x1390(%rsp) movdqa %xmm0, 0x1380(%rsp) movdqa 0x1390(%rsp), %xmm0 movdqa 0x1380(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x130(%rsp) movdqa 0x280(%rsp), %xmm1 movdqa 0x2a0(%rsp), %xmm0 movdqa %xmm1, 0x1370(%rsp) movdqa %xmm0, 0x1360(%rsp) movdqa 0x1370(%rsp), %xmm0 movdqa 0x1360(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x120(%rsp) movdqa 0x130(%rsp), %xmm1 movdqa 0x120(%rsp), %xmm0 movdqa %xmm1, 0x1350(%rsp) movdqa %xmm0, 0x1340(%rsp) movdqa 0x1350(%rsp), %xmm0 movdqa 0x1340(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x110(%rsp) movdqa 0x110(%rsp), %xmm1 movdqa 0x2c0(%rsp), %xmm0 movdqa %xmm1, 0x1330(%rsp) movdqa %xmm0, 0x1320(%rsp) movdqa 0x1330(%rsp), %xmm0 movdqa 0x1320(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x100(%rsp) movdqa 0x250(%rsp), %xmm0 movdqa %xmm0, 0x230(%rsp) movdqa 0x260(%rsp), %xmm0 movdqa %xmm0, 0x240(%rsp) movdqa 0x270(%rsp), %xmm0 movdqa %xmm0, 0x250(%rsp) movdqa 0x280(%rsp), %xmm0 movdqa %xmm0, 0x260(%rsp) movdqa 0x290(%rsp), %xmm0 movdqa %xmm0, 0x270(%rsp) movdqa 0x2a0(%rsp), %xmm0 movdqa %xmm0, 0x280(%rsp) movdqa 0x2b0(%rsp), %xmm0 movdqa %xmm0, 0x290(%rsp) movdqa 0x2c0(%rsp), %xmm0 movdqa %xmm0, 0x2a0(%rsp) movdqa 0x140(%rsp), %xmm0 movdqa 0x100(%rsp), %xmm1 xorl %edi, %edi callq 0x647c90 movl %eax, %edx movslq 0x18c(%rsp), %rcx leaq 0x2d0(%rsp), %rax shlq $0x7, %rcx addq %rcx, %rax movslq 0x22c(%rsp), %rcx movl %edx, (%rax,%rcx,4) movdqa 0x140(%rsp), %xmm0 movdqa 0x100(%rsp), %xmm1 movl $0x1, %edi callq 0x647c90 movl %eax, %edx movslq 0x18c(%rsp), %rcx leaq 0x2d0(%rsp), %rax shlq $0x7, %rcx addq %rcx, %rax movl 0x22c(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx movl %edx, (%rax,%rcx,4) movdqa 0x140(%rsp), %xmm0 movdqa 0x100(%rsp), %xmm1 movl $0x2, %edi callq 0x647c90 movl %eax, %edx movslq 0x18c(%rsp), %rcx leaq 0x2d0(%rsp), %rax shlq $0x7, %rcx addq %rcx, %rax movl 0x22c(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx movl %edx, (%rax,%rcx,4) movdqa 0x140(%rsp), %xmm0 movdqa 0x100(%rsp), %xmm1 movl $0x3, %edi callq 0x647c90 movl %eax, %edx movslq 0x18c(%rsp), %rcx leaq 0x2d0(%rsp), %rax shlq $0x7, %rcx addq %rcx, %rax movl 0x22c(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx movl %edx, (%rax,%rcx,4) movl 0x18c(%rsp), %eax addl $0x1, %eax movl %eax, 0x18c(%rsp) jmp 0x646f93 jmp 0x647373 movl 0x22c(%rsp), %eax addl $0x4, %eax movl %eax, 0x22c(%rsp) jmp 0x646a69 movl $0x0, 0xbc(%rsp) cmpl $0x4, 0xbc(%rsp) jge 0x64740f movq 0x1530(%rsp), %rax movslq 0xbc(%rsp), %rcx cvtsi2sdl (%rax,%rcx,4), %xmm0 mulsd 0x12e0(%rsp), %xmm0 movslq 0xbc(%rsp), %rax movsd %xmm0, 0xe0(%rsp,%rax,8) movq 0x1560(%rsp), %rax movslq 0xbc(%rsp), %rcx movsd (%rax,%rcx,8), %xmm0 mulsd 0x12e8(%rsp), %xmm0 movslq 0xbc(%rsp), %rax movsd %xmm0, 0xc0(%rsp,%rax,8) movl 0xbc(%rsp), %eax addl $0x1, %eax movl %eax, 0xbc(%rsp) jmp 0x647394 cmpl $0x0, 0x1568(%rsp) jne 0x6476d1 movl $0x0, 0xb8(%rsp) movl $0x0, 0xb4(%rsp) movl 0xb8(%rsp), %eax cmpl 0x12fc(%rsp), %eax jge 0x6476cc movl 0xb8(%rsp), %eax movl %eax, 0x34(%rsp) movl 0x12fc(%rsp), %eax movl $0x2, %ecx cltd idivl %ecx movl %eax, %ecx movl 0x34(%rsp), %eax cmpl %ecx, %eax setge %al andb $0x1, %al movzbl %al, %eax shll %eax movl %eax, 0xb0(%rsp) movl $0x0, 0xac(%rsp) movl 0xac(%rsp), %eax cmpl 0x1300(%rsp), %eax jge 0x6476b4 movq 0x1308(%rsp), %rax movl 0xb8(%rsp), %ecx movl 0x1304(%rsp), %edx imull %edx, %ecx movl 0xac(%rsp), %edx addl %edx, %ecx movzwl (%rax,%rcx,2), %eax movl %eax, 0xa8(%rsp) movslq 0xb8(%rsp), %rax movl %eax, %edx shlq $0x7, %rax leaq 0x2d0(%rsp,%rax), %rax movslq 0xac(%rsp), %rcx movl %ecx, %esi movl (%rax,%rcx,4), %eax movq 0x1550(%rsp), %rcx shll $0x5, %edx addl %esi, %edx movslq %edx, %rdx movl (%rcx,%rdx,4), %ecx addl %ecx, %eax movl %eax, 0xa4(%rsp) movb 0x1558(%rsp), %cl movl 0xa4(%rsp), %eax addb %cl, %cl addb $-0x10, %cl shrl %cl, %eax movl %eax, 0xa4(%rsp) movl 0xa4(%rsp), %eax cvtsi2sd %rax, %xmm0 mulsd 0x12f0(%rsp), %xmm0 movsd %xmm0, 0x98(%rsp) movl 0xb0(%rsp), %eax movl %eax, 0x30(%rsp) movl 0xac(%rsp), %ecx movl 0x1300(%rsp), %eax movl $0x2, %esi cltd idivl %esi movl %eax, %edx movl 0x30(%rsp), %eax cmpl %edx, %ecx setge %cl andb $0x1, %cl movzbl %cl, %ecx addl %ecx, %eax movl %eax, 0x94(%rsp) movsd 0x12d8(%rsp), %xmm0 movsd 0x98(%rsp), %xmm2 movslq 0x94(%rsp), %rax movsd 0xe0(%rsp,%rax,8), %xmm1 mulsd %xmm2, %xmm0 addsd %xmm1, %xmm0 movsd %xmm0, 0x88(%rsp) movsd 0x88(%rsp), %xmm0 movslq 0x94(%rsp), %rax mulsd 0xc0(%rsp,%rax,8), %xmm0 movsd %xmm0, 0x80(%rsp) movsd 0x47480f(%rip), %xmm0 # 0xabbde8 ucomisd 0x80(%rsp), %xmm0 jbe 0x6475f5 movsd 0x80(%rsp), %xmm0 movsd %xmm0, 0x28(%rsp) jmp 0x647605 movsd 0x4747eb(%rip), %xmm0 # 0xabbde8 movsd %xmm0, 0x28(%rsp) jmp 0x647605 movsd 0x28(%rsp), %xmm0 movsd %xmm0, 0x80(%rsp) movsd 0x80(%rsp), %xmm0 movq %xmm0, %rax movabsq $-0x8000000000000000, %rcx # imm = 0x8000000000000000 xorq %rcx, %rax movq %rax, %xmm0 callq 0x188d0 movsd 0x46dacf(%rip), %xmm1 # 0xab5110 mulsd %xmm1, %xmm0 cvttsd2si %xmm0, %eax movl %eax, 0x7c(%rsp) movl 0x7c(%rsp), %esi movq 0x1540(%rsp), %rax movslq 0xb4(%rsp), %rcx movzwl (%rax,%rcx,2), %edx addl %esi, %edx movw %dx, (%rax,%rcx,2) movl 0x7c(%rsp), %edx imull 0xa8(%rsp), %edx movq 0x1538(%rsp), %rax movslq 0xb4(%rsp), %rcx addl (%rax,%rcx,4), %edx movl %edx, (%rax,%rcx,4) movl 0xac(%rsp), %eax addl $0x1, %eax movl %eax, 0xac(%rsp) movl 0xb4(%rsp), %eax addl $0x1, %eax movl %eax, 0xb4(%rsp) jmp 0x647485 jmp 0x6476b6 movl 0xb8(%rsp), %eax addl $0x1, %eax movl %eax, 0xb8(%rsp) jmp 0x647433 jmp 0x647937 movl $0x0, 0x78(%rsp) movl $0x0, 0x74(%rsp) movl 0x78(%rsp), %eax cmpl 0x12fc(%rsp), %eax jge 0x647935 movl 0x78(%rsp), %eax movl %eax, 0x24(%rsp) movl 0x12fc(%rsp), %eax movl $0x2, %ecx cltd idivl %ecx movl %eax, %ecx movl 0x24(%rsp), %eax cmpl %ecx, %eax setge %al andb $0x1, %al movzbl %al, %eax shll %eax movl %eax, 0x70(%rsp) movl $0x0, 0x6c(%rsp) movl 0x6c(%rsp), %eax cmpl 0x1300(%rsp), %eax jge 0x647923 movq 0x1308(%rsp), %rax movl 0x78(%rsp), %ecx movl 0x1304(%rsp), %edx imull %edx, %ecx movl 0x6c(%rsp), %edx addl %edx, %ecx movzwl (%rax,%rcx,2), %eax movl %eax, 0x68(%rsp) movslq 0x78(%rsp), %rax movl %eax, %edx shlq $0x7, %rax leaq 0x2d0(%rsp,%rax), %rax movslq 0x6c(%rsp), %rcx movl %ecx, %esi movl (%rax,%rcx,4), %eax movq 0x1550(%rsp), %rcx shll $0x5, %edx addl %esi, %edx movslq %edx, %rdx movl (%rcx,%rdx,4), %ecx addl %ecx, %eax movl %eax, 0x64(%rsp) movb 0x1558(%rsp), %cl movl 0x64(%rsp), %eax addb %cl, %cl addb $-0x10, %cl shrl %cl, %eax movl %eax, 0x64(%rsp) movl 0x64(%rsp), %eax cvtsi2sd %rax, %xmm0 mulsd 0x12f0(%rsp), %xmm0 movsd %xmm0, 0x58(%rsp) movl 0x70(%rsp), %eax movl %eax, 0x20(%rsp) movl 0x6c(%rsp), %ecx movl 0x1300(%rsp), %eax movl $0x2, %esi cltd idivl %esi movl %eax, %edx movl 0x20(%rsp), %eax cmpl %edx, %ecx setge %cl andb $0x1, %cl movzbl %cl, %ecx addl %ecx, %eax movl %eax, 0x54(%rsp) movsd 0x12d8(%rsp), %xmm0 movsd 0x58(%rsp), %xmm2 movslq 0x54(%rsp), %rax movsd 0xe0(%rsp,%rax,8), %xmm1 mulsd %xmm2, %xmm0 addsd %xmm1, %xmm0 movsd %xmm0, 0x48(%rsp) movsd 0x48(%rsp), %xmm0 movslq 0x54(%rsp), %rax mulsd 0xc0(%rsp,%rax,8), %xmm0 movsd %xmm0, 0x40(%rsp) movsd 0x4745a9(%rip), %xmm0 # 0xabbde8 ucomisd 0x40(%rsp), %xmm0 jbe 0x647855 movsd 0x40(%rsp), %xmm0 movsd %xmm0, 0x18(%rsp) jmp 0x647865 movsd 0x47458b(%rip), %xmm0 # 0xabbde8 movsd %xmm0, 0x18(%rsp) jmp 0x647865 movsd 0x18(%rsp), %xmm0 movsd %xmm0, 0x40(%rsp) movsd 0x40(%rsp), %xmm0 movq %xmm0, %rax movabsq $-0x8000000000000000, %rcx # imm = 0x8000000000000000 xorq %rcx, %rax movq %rax, %xmm0 cvtsd2ss %xmm0, %xmm0 callq 0x647dd0 movss 0x474559(%rip), %xmm1 # 0xabbdf8 mulss %xmm1, %xmm0 movss %xmm0, 0x3c(%rsp) movss 0x3c(%rsp), %xmm0 movss %xmm0, 0x1524(%rsp) movss 0x446cd0(%rip), %xmm0 # 0xa8e590 addss 0x1524(%rsp), %xmm0 cvttss2si %xmm0, %eax movl %eax, 0x38(%rsp) movl 0x38(%rsp), %esi movq 0x1540(%rsp), %rax movslq 0x74(%rsp), %rcx movzwl (%rax,%rcx,2), %edx addl %esi, %edx movw %dx, (%rax,%rcx,2) movl 0x38(%rsp), %edx imull 0x68(%rsp), %edx movq 0x1538(%rsp), %rax movslq 0x74(%rsp), %rcx addl (%rax,%rcx,4), %edx movl %edx, (%rax,%rcx,4) movl 0x6c(%rsp), %eax addl $0x1, %eax movl %eax, 0x6c(%rsp) movl 0x74(%rsp), %eax addl $0x1, %eax movl %eax, 0x74(%rsp) jmp 0x647727 jmp 0x647925 movl 0x78(%rsp), %eax addl $0x1, %eax movl %eax, 0x78(%rsp) jmp 0x6476e1 jmp 0x647937 addq $0x1528, %rsp # imm = 0x1528 retq nop
/m-ab-s[P]aom/av1/encoder/x86/highbd_temporal_filter_sse2.c
nn_propagate_8to1
static void nn_propagate_8to1(const float *const inputs, const float *const weights, __m128 *const output) { const __m128 inputs_h = _mm_loadu_ps(&inputs[4]); const __m128 inputs_l = _mm_loadu_ps(inputs); const __m128 weights_h = _mm_loadu_ps(&weights[4]); const __m128 weights_l = _mm_loadu_ps(weights); const __m128 mul_h = _mm_mul_ps(inputs_h, weights_h); const __m128 mul_l = _mm_mul_ps(inputs_l, weights_l); // [7 6 5 4] [3 2 1 0] (weight and input indices) const __m128 vadd = _mm_add_ps(mul_l, mul_h); // [7+3 6+2 5+1 4+0] const __m128 hadd1 = _mm_hadd_ps(vadd, vadd); // [7+6+3+2 5+4+1+0 7+6+3+2 5+4+1+0] const __m128 hadd2 = _mm_hadd_ps(hadd1, hadd1); // [7+6+5+4+3+2+1+0 7+6+5+4+3+2+1+0 7+6+5+4+3+2+1+0 7+6+5+4+3+2+1+0] *output = _mm_add_ps(*output, hadd2); }
subq $0x118, %rsp # imm = 0x118 movq %rdi, 0x28(%rsp) movq %rsi, 0x20(%rsp) movq %rdx, 0x18(%rsp) movq 0x28(%rsp), %rax addq $0x10, %rax movq %rax, 0x48(%rsp) movq 0x48(%rsp), %rax movups (%rax), %xmm0 movaps %xmm0, (%rsp) movq 0x28(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x40(%rsp), %rax movups (%rax), %xmm0 movaps %xmm0, -0x10(%rsp) movq 0x20(%rsp), %rax addq $0x10, %rax movq %rax, 0x38(%rsp) movq 0x38(%rsp), %rax movups (%rax), %xmm0 movaps %xmm0, -0x20(%rsp) movq 0x20(%rsp), %rax movq %rax, 0x30(%rsp) movq 0x30(%rsp), %rax movups (%rax), %xmm0 movaps %xmm0, -0x30(%rsp) movaps (%rsp), %xmm1 movaps -0x20(%rsp), %xmm0 movaps %xmm1, 0x80(%rsp) movaps %xmm0, 0x70(%rsp) movaps 0x80(%rsp), %xmm0 mulps 0x70(%rsp), %xmm0 movaps %xmm0, -0x40(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x30(%rsp), %xmm0 movaps %xmm1, 0x60(%rsp) movaps %xmm0, 0x50(%rsp) movaps 0x60(%rsp), %xmm0 mulps 0x50(%rsp), %xmm0 movaps %xmm0, -0x50(%rsp) movaps -0x50(%rsp), %xmm1 movaps -0x40(%rsp), %xmm0 movaps %xmm1, 0x100(%rsp) movaps %xmm0, 0xf0(%rsp) movaps 0x100(%rsp), %xmm0 addps 0xf0(%rsp), %xmm0 movaps %xmm0, -0x60(%rsp) movaps -0x60(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0xc0(%rsp) movaps %xmm0, 0xb0(%rsp) movaps 0xc0(%rsp), %xmm0 movaps 0xb0(%rsp), %xmm1 haddps %xmm1, %xmm0 movaps %xmm0, -0x70(%rsp) movaps -0x70(%rsp), %xmm1 movaps -0x70(%rsp), %xmm0 movaps %xmm1, 0xa0(%rsp) movaps %xmm0, 0x90(%rsp) movaps 0xa0(%rsp), %xmm0 movaps 0x90(%rsp), %xmm1 haddps %xmm1, %xmm0 movaps %xmm0, -0x80(%rsp) movq 0x18(%rsp), %rax movaps (%rax), %xmm1 movaps -0x80(%rsp), %xmm0 movaps %xmm1, 0xe0(%rsp) movaps %xmm0, 0xd0(%rsp) movaps 0xe0(%rsp), %xmm0 addps 0xd0(%rsp), %xmm0 movq 0x18(%rsp), %rax movaps %xmm0, (%rax) addq $0x118, %rsp # imm = 0x118 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/ml_sse3.c
reduce_max
static inline __m128 reduce_max(__m128 reg) { __m128 tmp_reg; tmp_reg = _mm_shuffle_ps(reg, reg, 0x4e); // 01 00 11 10 reg = _mm_max_ps(reg, tmp_reg); tmp_reg = _mm_shuffle_ps(reg, reg, 0xb1); // 10 11 00 01 reg = _mm_max_ps(reg, tmp_reg); return reg; }
movaps %xmm0, -0x58(%rsp) movapd -0x58(%rsp), %xmm0 shufpd $0x1, %xmm0, %xmm0 # xmm0 = xmm0[1,0] movapd %xmm0, -0x68(%rsp) movaps -0x58(%rsp), %xmm1 movaps -0x68(%rsp), %xmm0 movaps %xmm1, -0x18(%rsp) movaps %xmm0, -0x28(%rsp) movaps -0x18(%rsp), %xmm0 movaps -0x28(%rsp), %xmm1 maxps %xmm1, %xmm0 movaps %xmm0, -0x58(%rsp) movaps -0x58(%rsp), %xmm0 shufps $0xb1, %xmm0, %xmm0 # xmm0 = xmm0[1,0,3,2] movaps %xmm0, -0x68(%rsp) movaps -0x58(%rsp), %xmm1 movaps -0x68(%rsp), %xmm0 movaps %xmm1, -0x38(%rsp) movaps %xmm0, -0x48(%rsp) movaps -0x38(%rsp), %xmm0 movaps -0x48(%rsp), %xmm1 maxps %xmm1, %xmm0 movaps %xmm0, -0x58(%rsp) movaps -0x58(%rsp), %xmm0 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/ml_sse3.c
av1_idct8_sse2
void av1_idct8_sse2(const __m128i *input, __m128i *output) { const int8_t cos_bit = INV_COS_BIT; const int32_t *cospi = cospi_arr(INV_COS_BIT); const __m128i __rounding = _mm_set1_epi32(1 << (INV_COS_BIT - 1)); const __m128i cospi_p56_m08 = pair_set_epi16(cospi[56], -cospi[8]); const __m128i cospi_p08_p56 = pair_set_epi16(cospi[8], cospi[56]); const __m128i cospi_p24_m40 = pair_set_epi16(cospi[24], -cospi[40]); const __m128i cospi_p40_p24 = pair_set_epi16(cospi[40], cospi[24]); const __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]); const __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]); const __m128i cospi_p48_m16 = pair_set_epi16(cospi[48], -cospi[16]); const __m128i cospi_p16_p48 = pair_set_epi16(cospi[16], cospi[48]); const __m128i cospi_m32_p32 = pair_set_epi16(-cospi[32], cospi[32]); // stage 1 __m128i x[8]; x[0] = input[0]; x[1] = input[4]; x[2] = input[2]; x[3] = input[6]; x[4] = input[1]; x[5] = input[5]; x[6] = input[3]; x[7] = input[7]; // stage 2 btf_16_sse2(cospi_p56_m08, cospi_p08_p56, x[4], x[7], x[4], x[7]); btf_16_sse2(cospi_p24_m40, cospi_p40_p24, x[5], x[6], x[5], x[6]); // stage 3 btf_16_sse2(cospi_p32_p32, cospi_p32_m32, x[0], x[1], x[0], x[1]); btf_16_sse2(cospi_p48_m16, cospi_p16_p48, x[2], x[3], x[2], x[3]); btf_16_adds_subs_sse2(x[4], x[5]); btf_16_subs_adds_sse2(x[7], x[6]); // stage 4 btf_16_adds_subs_sse2(x[0], x[3]); btf_16_adds_subs_sse2(x[1], x[2]); btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x[5], x[6], x[5], x[6]); // stage 5 btf_16_adds_subs_out_sse2(output[0], output[7], x[0], x[7]); btf_16_adds_subs_out_sse2(output[1], output[6], x[1], x[6]); btf_16_adds_subs_out_sse2(output[2], output[5], x[2], x[5]); btf_16_adds_subs_out_sse2(output[3], output[4], x[3], x[4]); }
subq $0x1418, %rsp # imm = 0x1418 movq %rdi, 0x6a0(%rsp) movq %rsi, 0x698(%rsp) movb $0xc, 0x697(%rsp) movl $0xc, %edi callq 0x649d00 movq %rax, 0x688(%rsp) movl $0x800, 0x6cc(%rsp) # imm = 0x800 movl 0x6cc(%rsp), %eax movl %eax, 0x12ec(%rsp) movl %eax, 0x12e8(%rsp) movl %eax, 0x12e4(%rsp) movl %eax, 0x12e0(%rsp) movd 0x12ec(%rsp), %xmm0 movd 0x12e8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x12e4(%rsp), %xmm2 movd 0x12e0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x12d0(%rsp) movaps 0x12d0(%rsp), %xmm0 movaps %xmm0, 0x670(%rsp) movq 0x688(%rsp), %rcx movzwl 0xe0(%rcx), %eax movl 0x20(%rcx), %ecx shll $0x10, %ecx subl %ecx, %eax movl %eax, 0x6c8(%rsp) movl 0x6c8(%rsp), %eax movl %eax, 0x130c(%rsp) movl %eax, 0x1308(%rsp) movl %eax, 0x1304(%rsp) movl %eax, 0x1300(%rsp) movd 0x130c(%rsp), %xmm0 movd 0x1308(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1304(%rsp), %xmm2 movd 0x1300(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x12f0(%rsp) movaps 0x12f0(%rsp), %xmm0 movaps %xmm0, 0x660(%rsp) movq 0x688(%rsp), %rcx movzwl 0x20(%rcx), %eax movl 0xe0(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x6c4(%rsp) movl 0x6c4(%rsp), %eax movl %eax, 0x132c(%rsp) movl %eax, 0x1328(%rsp) movl %eax, 0x1324(%rsp) movl %eax, 0x1320(%rsp) movd 0x132c(%rsp), %xmm0 movd 0x1328(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1324(%rsp), %xmm2 movd 0x1320(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1310(%rsp) movaps 0x1310(%rsp), %xmm0 movaps %xmm0, 0x650(%rsp) movq 0x688(%rsp), %rcx movzwl 0x60(%rcx), %eax movl 0xa0(%rcx), %ecx shll $0x10, %ecx subl %ecx, %eax movl %eax, 0x6c0(%rsp) movl 0x6c0(%rsp), %eax movl %eax, 0x134c(%rsp) movl %eax, 0x1348(%rsp) movl %eax, 0x1344(%rsp) movl %eax, 0x1340(%rsp) movd 0x134c(%rsp), %xmm0 movd 0x1348(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1344(%rsp), %xmm2 movd 0x1340(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1330(%rsp) movaps 0x1330(%rsp), %xmm0 movaps %xmm0, 0x640(%rsp) movq 0x688(%rsp), %rcx movzwl 0xa0(%rcx), %eax movl 0x60(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x6bc(%rsp) movl 0x6bc(%rsp), %eax movl %eax, 0x136c(%rsp) movl %eax, 0x1368(%rsp) movl %eax, 0x1364(%rsp) movl %eax, 0x1360(%rsp) movd 0x136c(%rsp), %xmm0 movd 0x1368(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1364(%rsp), %xmm2 movd 0x1360(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1350(%rsp) movaps 0x1350(%rsp), %xmm0 movaps %xmm0, 0x630(%rsp) movq 0x688(%rsp), %rax movzwl 0x80(%rax), %eax movl %eax, %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x6b8(%rsp) movl 0x6b8(%rsp), %eax movl %eax, 0x138c(%rsp) movl %eax, 0x1388(%rsp) movl %eax, 0x1384(%rsp) movl %eax, 0x1380(%rsp) movd 0x138c(%rsp), %xmm0 movd 0x1388(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1384(%rsp), %xmm2 movd 0x1380(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1370(%rsp) movaps 0x1370(%rsp), %xmm0 movaps %xmm0, 0x620(%rsp) movq 0x688(%rsp), %rax movl 0x80(%rax), %ecx movw %cx, %ax movzwl %ax, %eax shll $0x10, %ecx subl %ecx, %eax movl %eax, 0x6b4(%rsp) movl 0x6b4(%rsp), %eax movl %eax, 0x13ac(%rsp) movl %eax, 0x13a8(%rsp) movl %eax, 0x13a4(%rsp) movl %eax, 0x13a0(%rsp) movd 0x13ac(%rsp), %xmm0 movd 0x13a8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x13a4(%rsp), %xmm2 movd 0x13a0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1390(%rsp) movaps 0x1390(%rsp), %xmm0 movaps %xmm0, 0x610(%rsp) movq 0x688(%rsp), %rcx movzwl 0xc0(%rcx), %eax movl 0x40(%rcx), %ecx shll $0x10, %ecx subl %ecx, %eax movl %eax, 0x6b0(%rsp) movl 0x6b0(%rsp), %eax movl %eax, 0x13cc(%rsp) movl %eax, 0x13c8(%rsp) movl %eax, 0x13c4(%rsp) movl %eax, 0x13c0(%rsp) movd 0x13cc(%rsp), %xmm0 movd 0x13c8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x13c4(%rsp), %xmm2 movd 0x13c0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x13b0(%rsp) movaps 0x13b0(%rsp), %xmm0 movaps %xmm0, 0x600(%rsp) movq 0x688(%rsp), %rcx movzwl 0x40(%rcx), %eax movl 0xc0(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x6ac(%rsp) movl 0x6ac(%rsp), %eax movl %eax, 0x13ec(%rsp) movl %eax, 0x13e8(%rsp) movl %eax, 0x13e4(%rsp) movl %eax, 0x13e0(%rsp) movd 0x13ec(%rsp), %xmm0 movd 0x13e8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x13e4(%rsp), %xmm2 movd 0x13e0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x13d0(%rsp) movaps 0x13d0(%rsp), %xmm0 movaps %xmm0, 0x5f0(%rsp) movq 0x688(%rsp), %rax movl 0x80(%rax), %ecx movl %ecx, %eax negl %eax movzwl %ax, %eax shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x6a8(%rsp) movl 0x6a8(%rsp), %eax movl %eax, 0x1414(%rsp) movl %eax, 0x1410(%rsp) movl %eax, 0x140c(%rsp) movl %eax, 0x1408(%rsp) movd 0x1414(%rsp), %xmm0 movd 0x1410(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x140c(%rsp), %xmm2 movd 0x1408(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movdqa %xmm0, 0x13f0(%rsp) movdqa 0x13f0(%rsp), %xmm0 movdqa %xmm0, 0x5e0(%rsp) movq 0x6a0(%rsp), %rax movdqa (%rax), %xmm0 movdqa %xmm0, 0x560(%rsp) movq 0x6a0(%rsp), %rax movdqa 0x40(%rax), %xmm0 movdqa %xmm0, 0x570(%rsp) movq 0x6a0(%rsp), %rax movdqa 0x20(%rax), %xmm0 movdqa %xmm0, 0x580(%rsp) movq 0x6a0(%rsp), %rax movdqa 0x60(%rax), %xmm0 movdqa %xmm0, 0x590(%rsp) movq 0x6a0(%rsp), %rax movdqa 0x10(%rax), %xmm0 movdqa %xmm0, 0x5a0(%rsp) movq 0x6a0(%rsp), %rax movdqa 0x50(%rax), %xmm0 movdqa %xmm0, 0x5b0(%rsp) movq 0x6a0(%rsp), %rax movdqa 0x30(%rax), %xmm0 movdqa %xmm0, 0x5c0(%rsp) movq 0x6a0(%rsp), %rax movdqa 0x70(%rax), %xmm0 movdqa %xmm0, 0x5d0(%rsp) movaps 0x5a0(%rsp), %xmm1 movaps 0x5d0(%rsp), %xmm0 movaps %xmm1, 0x760(%rsp) movaps %xmm0, 0x750(%rsp) movaps 0x760(%rsp), %xmm0 movaps 0x750(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x550(%rsp) movaps 0x5a0(%rsp), %xmm1 movaps 0x5d0(%rsp), %xmm0 movaps %xmm1, 0x800(%rsp) movaps %xmm0, 0x7f0(%rsp) movaps 0x800(%rsp), %xmm0 movaps 0x7f0(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x540(%rsp) movdqa 0x550(%rsp), %xmm1 movdqa 0x660(%rsp), %xmm0 movdqa %xmm1, 0xa80(%rsp) movdqa %xmm0, 0xa70(%rsp) movdqa 0xa80(%rsp), %xmm0 movdqa 0xa70(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x530(%rsp) movdqa 0x540(%rsp), %xmm1 movdqa 0x660(%rsp), %xmm0 movdqa %xmm1, 0xa60(%rsp) movdqa %xmm0, 0xa50(%rsp) movdqa 0xa60(%rsp), %xmm0 movdqa 0xa50(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x520(%rsp) movdqa 0x550(%rsp), %xmm1 movdqa 0x650(%rsp), %xmm0 movdqa %xmm1, 0xa40(%rsp) movdqa %xmm0, 0xa30(%rsp) movdqa 0xa40(%rsp), %xmm0 movdqa 0xa30(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x510(%rsp) movdqa 0x540(%rsp), %xmm1 movdqa 0x650(%rsp), %xmm0 movdqa %xmm1, 0xa20(%rsp) movdqa %xmm0, 0xa10(%rsp) movdqa 0xa20(%rsp), %xmm0 movdqa 0xa10(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x500(%rsp) movdqa 0x530(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xd00(%rsp) movdqa %xmm0, 0xcf0(%rsp) movdqa 0xd00(%rsp), %xmm0 movdqa 0xcf0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x4f0(%rsp) movdqa 0x520(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xce0(%rsp) movdqa %xmm0, 0xcd0(%rsp) movdqa 0xce0(%rsp), %xmm0 movdqa 0xcd0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x4e0(%rsp) movdqa 0x510(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xcc0(%rsp) movdqa %xmm0, 0xcb0(%rsp) movdqa 0xcc0(%rsp), %xmm0 movdqa 0xcb0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x4d0(%rsp) movdqa 0x500(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xca0(%rsp) movdqa %xmm0, 0xc90(%rsp) movdqa 0xca0(%rsp), %xmm0 movdqa 0xc90(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x4c0(%rsp) movdqa 0x4f0(%rsp), %xmm0 movdqa %xmm0, 0xf80(%rsp) movl $0xc, 0xf7c(%rsp) movdqa 0xf80(%rsp), %xmm0 movl 0xf7c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x4b0(%rsp) movdqa 0x4e0(%rsp), %xmm0 movdqa %xmm0, 0xf60(%rsp) movl $0xc, 0xf5c(%rsp) movdqa 0xf60(%rsp), %xmm0 movl 0xf5c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x4a0(%rsp) movdqa 0x4d0(%rsp), %xmm0 movdqa %xmm0, 0xf40(%rsp) movl $0xc, 0xf3c(%rsp) movdqa 0xf40(%rsp), %xmm0 movl 0xf3c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x490(%rsp) movdqa 0x4c0(%rsp), %xmm0 movdqa %xmm0, 0xf20(%rsp) movl $0xc, 0xf1c(%rsp) movdqa 0xf20(%rsp), %xmm0 movl 0xf1c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x480(%rsp) movdqa 0x4b0(%rsp), %xmm1 movdqa 0x4a0(%rsp), %xmm0 movdqa %xmm1, 0x10c0(%rsp) movdqa %xmm0, 0x10b0(%rsp) movdqa 0x10c0(%rsp), %xmm0 movdqa 0x10b0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x5a0(%rsp) movdqa 0x490(%rsp), %xmm1 movdqa 0x480(%rsp), %xmm0 movdqa %xmm1, 0x10a0(%rsp) movdqa %xmm0, 0x1090(%rsp) movdqa 0x10a0(%rsp), %xmm0 movdqa 0x1090(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x5d0(%rsp) jmp 0x64a75f movaps 0x5b0(%rsp), %xmm1 movaps 0x5c0(%rsp), %xmm0 movaps %xmm1, 0x740(%rsp) movaps %xmm0, 0x730(%rsp) movaps 0x740(%rsp), %xmm0 movaps 0x730(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x470(%rsp) movaps 0x5b0(%rsp), %xmm1 movaps 0x5c0(%rsp), %xmm0 movaps %xmm1, 0x7e0(%rsp) movaps %xmm0, 0x7d0(%rsp) movaps 0x7e0(%rsp), %xmm0 movaps 0x7d0(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x460(%rsp) movdqa 0x470(%rsp), %xmm1 movdqa 0x640(%rsp), %xmm0 movdqa %xmm1, 0xa00(%rsp) movdqa %xmm0, 0x9f0(%rsp) movdqa 0xa00(%rsp), %xmm0 movdqa 0x9f0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x450(%rsp) movdqa 0x460(%rsp), %xmm1 movdqa 0x640(%rsp), %xmm0 movdqa %xmm1, 0x9e0(%rsp) movdqa %xmm0, 0x9d0(%rsp) movdqa 0x9e0(%rsp), %xmm0 movdqa 0x9d0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x440(%rsp) movdqa 0x470(%rsp), %xmm1 movdqa 0x630(%rsp), %xmm0 movdqa %xmm1, 0x9c0(%rsp) movdqa %xmm0, 0x9b0(%rsp) movdqa 0x9c0(%rsp), %xmm0 movdqa 0x9b0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x430(%rsp) movdqa 0x460(%rsp), %xmm1 movdqa 0x630(%rsp), %xmm0 movdqa %xmm1, 0x9a0(%rsp) movdqa %xmm0, 0x990(%rsp) movdqa 0x9a0(%rsp), %xmm0 movdqa 0x990(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x420(%rsp) movdqa 0x450(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xc80(%rsp) movdqa %xmm0, 0xc70(%rsp) movdqa 0xc80(%rsp), %xmm0 movdqa 0xc70(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x410(%rsp) movdqa 0x440(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xc60(%rsp) movdqa %xmm0, 0xc50(%rsp) movdqa 0xc60(%rsp), %xmm0 movdqa 0xc50(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x400(%rsp) movdqa 0x430(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xc40(%rsp) movdqa %xmm0, 0xc30(%rsp) movdqa 0xc40(%rsp), %xmm0 movdqa 0xc30(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x3f0(%rsp) movdqa 0x420(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xc20(%rsp) movdqa %xmm0, 0xc10(%rsp) movdqa 0xc20(%rsp), %xmm0 movdqa 0xc10(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x3e0(%rsp) movdqa 0x410(%rsp), %xmm0 movdqa %xmm0, 0xf00(%rsp) movl $0xc, 0xefc(%rsp) movdqa 0xf00(%rsp), %xmm0 movl 0xefc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x3d0(%rsp) movdqa 0x400(%rsp), %xmm0 movdqa %xmm0, 0xee0(%rsp) movl $0xc, 0xedc(%rsp) movdqa 0xee0(%rsp), %xmm0 movl 0xedc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x3c0(%rsp) movdqa 0x3f0(%rsp), %xmm0 movdqa %xmm0, 0xec0(%rsp) movl $0xc, 0xebc(%rsp) movdqa 0xec0(%rsp), %xmm0 movl 0xebc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x3b0(%rsp) movdqa 0x3e0(%rsp), %xmm0 movdqa %xmm0, 0xea0(%rsp) movl $0xc, 0xe9c(%rsp) movdqa 0xea0(%rsp), %xmm0 movl 0xe9c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x3a0(%rsp) movdqa 0x3d0(%rsp), %xmm1 movdqa 0x3c0(%rsp), %xmm0 movdqa %xmm1, 0x1080(%rsp) movdqa %xmm0, 0x1070(%rsp) movdqa 0x1080(%rsp), %xmm0 movdqa 0x1070(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x5b0(%rsp) movdqa 0x3b0(%rsp), %xmm1 movdqa 0x3a0(%rsp), %xmm0 movdqa %xmm1, 0x1060(%rsp) movdqa %xmm0, 0x1050(%rsp) movdqa 0x1060(%rsp), %xmm0 movdqa 0x1050(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x5c0(%rsp) jmp 0x64ab70 movaps 0x560(%rsp), %xmm1 movaps 0x570(%rsp), %xmm0 movaps %xmm1, 0x720(%rsp) movaps %xmm0, 0x710(%rsp) movaps 0x720(%rsp), %xmm0 movaps 0x710(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x390(%rsp) movaps 0x560(%rsp), %xmm1 movaps 0x570(%rsp), %xmm0 movaps %xmm1, 0x7c0(%rsp) movaps %xmm0, 0x7b0(%rsp) movaps 0x7c0(%rsp), %xmm0 movaps 0x7b0(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x380(%rsp) movdqa 0x390(%rsp), %xmm1 movdqa 0x620(%rsp), %xmm0 movdqa %xmm1, 0x980(%rsp) movdqa %xmm0, 0x970(%rsp) movdqa 0x980(%rsp), %xmm0 movdqa 0x970(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x370(%rsp) movdqa 0x380(%rsp), %xmm1 movdqa 0x620(%rsp), %xmm0 movdqa %xmm1, 0x960(%rsp) movdqa %xmm0, 0x950(%rsp) movdqa 0x960(%rsp), %xmm0 movdqa 0x950(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x360(%rsp) movdqa 0x390(%rsp), %xmm1 movdqa 0x610(%rsp), %xmm0 movdqa %xmm1, 0x940(%rsp) movdqa %xmm0, 0x930(%rsp) movdqa 0x940(%rsp), %xmm0 movdqa 0x930(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x350(%rsp) movdqa 0x380(%rsp), %xmm1 movdqa 0x610(%rsp), %xmm0 movdqa %xmm1, 0x920(%rsp) movdqa %xmm0, 0x910(%rsp) movdqa 0x920(%rsp), %xmm0 movdqa 0x910(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x340(%rsp) movdqa 0x370(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xc00(%rsp) movdqa %xmm0, 0xbf0(%rsp) movdqa 0xc00(%rsp), %xmm0 movdqa 0xbf0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x330(%rsp) movdqa 0x360(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xbe0(%rsp) movdqa %xmm0, 0xbd0(%rsp) movdqa 0xbe0(%rsp), %xmm0 movdqa 0xbd0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x320(%rsp) movdqa 0x350(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xbc0(%rsp) movdqa %xmm0, 0xbb0(%rsp) movdqa 0xbc0(%rsp), %xmm0 movdqa 0xbb0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x310(%rsp) movdqa 0x340(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xba0(%rsp) movdqa %xmm0, 0xb90(%rsp) movdqa 0xba0(%rsp), %xmm0 movdqa 0xb90(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x300(%rsp) movdqa 0x330(%rsp), %xmm0 movdqa %xmm0, 0xe80(%rsp) movl $0xc, 0xe7c(%rsp) movdqa 0xe80(%rsp), %xmm0 movl 0xe7c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x2f0(%rsp) movdqa 0x320(%rsp), %xmm0 movdqa %xmm0, 0xe60(%rsp) movl $0xc, 0xe5c(%rsp) movdqa 0xe60(%rsp), %xmm0 movl 0xe5c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x2e0(%rsp) movdqa 0x310(%rsp), %xmm0 movdqa %xmm0, 0xe40(%rsp) movl $0xc, 0xe3c(%rsp) movdqa 0xe40(%rsp), %xmm0 movl 0xe3c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x2d0(%rsp) movdqa 0x300(%rsp), %xmm0 movdqa %xmm0, 0xe20(%rsp) movl $0xc, 0xe1c(%rsp) movdqa 0xe20(%rsp), %xmm0 movl 0xe1c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x2c0(%rsp) movdqa 0x2f0(%rsp), %xmm1 movdqa 0x2e0(%rsp), %xmm0 movdqa %xmm1, 0x1040(%rsp) movdqa %xmm0, 0x1030(%rsp) movdqa 0x1040(%rsp), %xmm0 movdqa 0x1030(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x560(%rsp) movdqa 0x2d0(%rsp), %xmm1 movdqa 0x2c0(%rsp), %xmm0 movdqa %xmm1, 0x1020(%rsp) movdqa %xmm0, 0x1010(%rsp) movdqa 0x1020(%rsp), %xmm0 movdqa 0x1010(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x570(%rsp) jmp 0x64af81 movaps 0x580(%rsp), %xmm1 movaps 0x590(%rsp), %xmm0 movaps %xmm1, 0x700(%rsp) movaps %xmm0, 0x6f0(%rsp) movaps 0x700(%rsp), %xmm0 movaps 0x6f0(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x2b0(%rsp) movaps 0x580(%rsp), %xmm1 movaps 0x590(%rsp), %xmm0 movaps %xmm1, 0x7a0(%rsp) movaps %xmm0, 0x790(%rsp) movaps 0x7a0(%rsp), %xmm0 movaps 0x790(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x2a0(%rsp) movdqa 0x2b0(%rsp), %xmm1 movdqa 0x600(%rsp), %xmm0 movdqa %xmm1, 0x900(%rsp) movdqa %xmm0, 0x8f0(%rsp) movdqa 0x900(%rsp), %xmm0 movdqa 0x8f0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x290(%rsp) movdqa 0x2a0(%rsp), %xmm1 movdqa 0x600(%rsp), %xmm0 movdqa %xmm1, 0x8e0(%rsp) movdqa %xmm0, 0x8d0(%rsp) movdqa 0x8e0(%rsp), %xmm0 movdqa 0x8d0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x280(%rsp) movdqa 0x2b0(%rsp), %xmm1 movdqa 0x5f0(%rsp), %xmm0 movdqa %xmm1, 0x8c0(%rsp) movdqa %xmm0, 0x8b0(%rsp) movdqa 0x8c0(%rsp), %xmm0 movdqa 0x8b0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x270(%rsp) movdqa 0x2a0(%rsp), %xmm1 movdqa 0x5f0(%rsp), %xmm0 movdqa %xmm1, 0x8a0(%rsp) movdqa %xmm0, 0x890(%rsp) movdqa 0x8a0(%rsp), %xmm0 movdqa 0x890(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x260(%rsp) movdqa 0x290(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xb80(%rsp) movdqa %xmm0, 0xb70(%rsp) movdqa 0xb80(%rsp), %xmm0 movdqa 0xb70(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x250(%rsp) movdqa 0x280(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xb60(%rsp) movdqa %xmm0, 0xb50(%rsp) movdqa 0xb60(%rsp), %xmm0 movdqa 0xb50(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x240(%rsp) movdqa 0x270(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xb40(%rsp) movdqa %xmm0, 0xb30(%rsp) movdqa 0xb40(%rsp), %xmm0 movdqa 0xb30(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x230(%rsp) movdqa 0x260(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xb20(%rsp) movdqa %xmm0, 0xb10(%rsp) movdqa 0xb20(%rsp), %xmm0 movdqa 0xb10(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x220(%rsp) movdqa 0x250(%rsp), %xmm0 movdqa %xmm0, 0xe00(%rsp) movl $0xc, 0xdfc(%rsp) movdqa 0xe00(%rsp), %xmm0 movl 0xdfc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x210(%rsp) movdqa 0x240(%rsp), %xmm0 movdqa %xmm0, 0xde0(%rsp) movl $0xc, 0xddc(%rsp) movdqa 0xde0(%rsp), %xmm0 movl 0xddc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x200(%rsp) movdqa 0x230(%rsp), %xmm0 movdqa %xmm0, 0xdc0(%rsp) movl $0xc, 0xdbc(%rsp) movdqa 0xdc0(%rsp), %xmm0 movl 0xdbc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x1f0(%rsp) movdqa 0x220(%rsp), %xmm0 movdqa %xmm0, 0xda0(%rsp) movl $0xc, 0xd9c(%rsp) movdqa 0xda0(%rsp), %xmm0 movl 0xd9c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x1e0(%rsp) movdqa 0x210(%rsp), %xmm1 movdqa 0x200(%rsp), %xmm0 movdqa %xmm1, 0x1000(%rsp) movdqa %xmm0, 0xff0(%rsp) movdqa 0x1000(%rsp), %xmm0 movdqa 0xff0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x580(%rsp) movdqa 0x1f0(%rsp), %xmm1 movdqa 0x1e0(%rsp), %xmm0 movdqa %xmm1, 0xfe0(%rsp) movdqa %xmm0, 0xfd0(%rsp) movdqa 0xfe0(%rsp), %xmm0 movdqa 0xfd0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x590(%rsp) jmp 0x64b392 movdqa 0x5a0(%rsp), %xmm0 movdqa %xmm0, 0x1d0(%rsp) movdqa 0x5b0(%rsp), %xmm0 movdqa %xmm0, 0x1c0(%rsp) movdqa 0x1d0(%rsp), %xmm1 movdqa 0x1c0(%rsp), %xmm0 movdqa %xmm1, 0x11c0(%rsp) movdqa %xmm0, 0x11b0(%rsp) movdqa 0x11c0(%rsp), %xmm0 movdqa 0x11b0(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x5a0(%rsp) movdqa 0x1d0(%rsp), %xmm1 movdqa 0x1c0(%rsp), %xmm0 movdqa %xmm1, 0x12c0(%rsp) movdqa %xmm0, 0x12b0(%rsp) movdqa 0x12c0(%rsp), %xmm0 movdqa 0x12b0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x5b0(%rsp) jmp 0x64b43e movdqa 0x5d0(%rsp), %xmm0 movdqa %xmm0, 0x1b0(%rsp) movdqa 0x5c0(%rsp), %xmm0 movdqa %xmm0, 0x1a0(%rsp) movdqa 0x1b0(%rsp), %xmm1 movdqa 0x1a0(%rsp), %xmm0 movdqa %xmm1, 0x12a0(%rsp) movdqa %xmm0, 0x1290(%rsp) movdqa 0x12a0(%rsp), %xmm0 movdqa 0x1290(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x5c0(%rsp) movdqa 0x1b0(%rsp), %xmm1 movdqa 0x1a0(%rsp), %xmm0 movdqa %xmm1, 0x11a0(%rsp) movdqa %xmm0, 0x1190(%rsp) movdqa 0x11a0(%rsp), %xmm0 movdqa 0x1190(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x5d0(%rsp) jmp 0x64b4ea movdqa 0x560(%rsp), %xmm0 movdqa %xmm0, 0x190(%rsp) movdqa 0x590(%rsp), %xmm0 movdqa %xmm0, 0x180(%rsp) movdqa 0x190(%rsp), %xmm1 movdqa 0x180(%rsp), %xmm0 movdqa %xmm1, 0x1180(%rsp) movdqa %xmm0, 0x1170(%rsp) movdqa 0x1180(%rsp), %xmm0 movdqa 0x1170(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x560(%rsp) movdqa 0x190(%rsp), %xmm1 movdqa 0x180(%rsp), %xmm0 movdqa %xmm1, 0x1280(%rsp) movdqa %xmm0, 0x1270(%rsp) movdqa 0x1280(%rsp), %xmm0 movdqa 0x1270(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x590(%rsp) jmp 0x64b596 movdqa 0x570(%rsp), %xmm0 movdqa %xmm0, 0x170(%rsp) movdqa 0x580(%rsp), %xmm0 movdqa %xmm0, 0x160(%rsp) movdqa 0x170(%rsp), %xmm1 movdqa 0x160(%rsp), %xmm0 movdqa %xmm1, 0x1160(%rsp) movdqa %xmm0, 0x1150(%rsp) movdqa 0x1160(%rsp), %xmm0 movdqa 0x1150(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x570(%rsp) movdqa 0x170(%rsp), %xmm1 movdqa 0x160(%rsp), %xmm0 movdqa %xmm1, 0x1260(%rsp) movdqa %xmm0, 0x1250(%rsp) movdqa 0x1260(%rsp), %xmm0 movdqa 0x1250(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x580(%rsp) jmp 0x64b642 movaps 0x5b0(%rsp), %xmm1 movaps 0x5c0(%rsp), %xmm0 movaps %xmm1, 0x6e0(%rsp) movaps %xmm0, 0x6d0(%rsp) movaps 0x6e0(%rsp), %xmm0 movaps 0x6d0(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x150(%rsp) movaps 0x5b0(%rsp), %xmm1 movaps 0x5c0(%rsp), %xmm0 movaps %xmm1, 0x780(%rsp) movaps %xmm0, 0x770(%rsp) movaps 0x780(%rsp), %xmm0 movaps 0x770(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x140(%rsp) movdqa 0x150(%rsp), %xmm1 movdqa 0x5e0(%rsp), %xmm0 movdqa %xmm1, 0x880(%rsp) movdqa %xmm0, 0x870(%rsp) movdqa 0x880(%rsp), %xmm0 movdqa 0x870(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x130(%rsp) movdqa 0x140(%rsp), %xmm1 movdqa 0x5e0(%rsp), %xmm0 movdqa %xmm1, 0x860(%rsp) movdqa %xmm0, 0x850(%rsp) movdqa 0x860(%rsp), %xmm0 movdqa 0x850(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x120(%rsp) movdqa 0x150(%rsp), %xmm1 movdqa 0x620(%rsp), %xmm0 movdqa %xmm1, 0x840(%rsp) movdqa %xmm0, 0x830(%rsp) movdqa 0x840(%rsp), %xmm0 movdqa 0x830(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x110(%rsp) movdqa 0x140(%rsp), %xmm1 movdqa 0x620(%rsp), %xmm0 movdqa %xmm1, 0x820(%rsp) movdqa %xmm0, 0x810(%rsp) movdqa 0x820(%rsp), %xmm0 movdqa 0x810(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x100(%rsp) movdqa 0x130(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xb00(%rsp) movdqa %xmm0, 0xaf0(%rsp) movdqa 0xb00(%rsp), %xmm0 movdqa 0xaf0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0xf0(%rsp) movdqa 0x120(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xae0(%rsp) movdqa %xmm0, 0xad0(%rsp) movdqa 0xae0(%rsp), %xmm0 movdqa 0xad0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0xe0(%rsp) movdqa 0x110(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xac0(%rsp) movdqa %xmm0, 0xab0(%rsp) movdqa 0xac0(%rsp), %xmm0 movdqa 0xab0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0xd0(%rsp) movdqa 0x100(%rsp), %xmm1 movdqa 0x670(%rsp), %xmm0 movdqa %xmm1, 0xaa0(%rsp) movdqa %xmm0, 0xa90(%rsp) movdqa 0xaa0(%rsp), %xmm0 movdqa 0xa90(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0xc0(%rsp) movdqa 0xf0(%rsp), %xmm0 movdqa %xmm0, 0xd80(%rsp) movl $0xc, 0xd7c(%rsp) movdqa 0xd80(%rsp), %xmm0 movl 0xd7c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0xb0(%rsp) movdqa 0xe0(%rsp), %xmm0 movdqa %xmm0, 0xd60(%rsp) movl $0xc, 0xd5c(%rsp) movdqa 0xd60(%rsp), %xmm0 movl 0xd5c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0xa0(%rsp) movdqa 0xd0(%rsp), %xmm0 movdqa %xmm0, 0xd40(%rsp) movl $0xc, 0xd3c(%rsp) movdqa 0xd40(%rsp), %xmm0 movl 0xd3c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x90(%rsp) movdqa 0xc0(%rsp), %xmm0 movdqa %xmm0, 0xd20(%rsp) movl $0xc, 0xd1c(%rsp) movdqa 0xd20(%rsp), %xmm0 movl 0xd1c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x80(%rsp) movdqa 0xb0(%rsp), %xmm1 movdqa 0xa0(%rsp), %xmm0 movdqa %xmm1, 0xfc0(%rsp) movdqa %xmm0, 0xfb0(%rsp) movdqa 0xfc0(%rsp), %xmm0 movdqa 0xfb0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x5b0(%rsp) movdqa 0x90(%rsp), %xmm1 movdqa 0x80(%rsp), %xmm0 movdqa %xmm1, 0xfa0(%rsp) movdqa %xmm0, 0xf90(%rsp) movdqa 0xfa0(%rsp), %xmm0 movdqa 0xf90(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x5c0(%rsp) jmp 0x64ba53 movdqa 0x560(%rsp), %xmm0 movdqa %xmm0, 0x70(%rsp) movdqa 0x5d0(%rsp), %xmm0 movdqa %xmm0, 0x60(%rsp) movdqa 0x70(%rsp), %xmm1 movdqa 0x60(%rsp), %xmm0 movdqa %xmm1, 0x1140(%rsp) movdqa %xmm0, 0x1130(%rsp) movdqa 0x1140(%rsp), %xmm0 movdqa 0x1130(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq 0x698(%rsp), %rax movdqa %xmm0, (%rax) movdqa 0x70(%rsp), %xmm1 movdqa 0x60(%rsp), %xmm0 movdqa %xmm1, 0x1240(%rsp) movdqa %xmm0, 0x1230(%rsp) movdqa 0x1240(%rsp), %xmm0 movdqa 0x1230(%rsp), %xmm1 psubsw %xmm1, %xmm0 movq 0x698(%rsp), %rax movdqa %xmm0, 0x70(%rax) jmp 0x64baf4 movdqa 0x570(%rsp), %xmm0 movdqa %xmm0, 0x50(%rsp) movdqa 0x5c0(%rsp), %xmm0 movdqa %xmm0, 0x40(%rsp) movdqa 0x50(%rsp), %xmm1 movdqa 0x40(%rsp), %xmm0 movdqa %xmm1, 0x1120(%rsp) movdqa %xmm0, 0x1110(%rsp) movdqa 0x1120(%rsp), %xmm0 movdqa 0x1110(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq 0x698(%rsp), %rax movdqa %xmm0, 0x10(%rax) movdqa 0x50(%rsp), %xmm1 movdqa 0x40(%rsp), %xmm0 movdqa %xmm1, 0x1220(%rsp) movdqa %xmm0, 0x1210(%rsp) movdqa 0x1220(%rsp), %xmm0 movdqa 0x1210(%rsp), %xmm1 psubsw %xmm1, %xmm0 movq 0x698(%rsp), %rax movdqa %xmm0, 0x60(%rax) jmp 0x64bb96 movdqa 0x580(%rsp), %xmm0 movdqa %xmm0, 0x30(%rsp) movdqa 0x5b0(%rsp), %xmm0 movdqa %xmm0, 0x20(%rsp) movdqa 0x30(%rsp), %xmm1 movdqa 0x20(%rsp), %xmm0 movdqa %xmm1, 0x1100(%rsp) movdqa %xmm0, 0x10f0(%rsp) movdqa 0x1100(%rsp), %xmm0 movdqa 0x10f0(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq 0x698(%rsp), %rax movdqa %xmm0, 0x20(%rax) movdqa 0x30(%rsp), %xmm1 movdqa 0x20(%rsp), %xmm0 movdqa %xmm1, 0x1200(%rsp) movdqa %xmm0, 0x11f0(%rsp) movdqa 0x1200(%rsp), %xmm0 movdqa 0x11f0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movq 0x698(%rsp), %rax movdqa %xmm0, 0x50(%rax) jmp 0x64bc38 movdqa 0x590(%rsp), %xmm0 movdqa %xmm0, 0x10(%rsp) movdqa 0x5a0(%rsp), %xmm0 movdqa %xmm0, (%rsp) movdqa 0x10(%rsp), %xmm1 movdqa (%rsp), %xmm0 movdqa %xmm1, 0x10e0(%rsp) movdqa %xmm0, 0x10d0(%rsp) movdqa 0x10e0(%rsp), %xmm0 movdqa 0x10d0(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq 0x698(%rsp), %rax movdqa %xmm0, 0x30(%rax) movdqa 0x10(%rsp), %xmm1 movdqa (%rsp), %xmm0 movdqa %xmm1, 0x11e0(%rsp) movdqa %xmm0, 0x11d0(%rsp) movdqa 0x11e0(%rsp), %xmm0 movdqa 0x11d0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movq 0x698(%rsp), %rax movdqa %xmm0, 0x40(%rax) addq $0x1418, %rsp # imm = 0x1418 retq nopl (%rax)
/m-ab-s[P]aom/av1/common/x86/av1_inv_txfm_ssse3.c
get_rect_tx_log_ratio
static inline int get_rect_tx_log_ratio(int col, int row) { if (col == row) return 0; if (col > row) { if (col == row * 2) return 1; if (col == row * 4) return 2; assert(0 && "Unsupported transform size"); } else { if (row == col * 2) return -1; if (row == col * 4) return -2; assert(0 && "Unsupported transform size"); } return 0; // Invalid }
movl %edi, -0x8(%rsp) movl %esi, -0xc(%rsp) movl -0x8(%rsp), %eax cmpl -0xc(%rsp), %eax jne 0x6502dc movl $0x0, -0x4(%rsp) jmp 0x650354 movl -0x8(%rsp), %eax cmpl -0xc(%rsp), %eax jle 0x650319 movl -0x8(%rsp), %eax movl -0xc(%rsp), %ecx shll %ecx cmpl %ecx, %eax jne 0x6502fe movl $0x1, -0x4(%rsp) jmp 0x650354 movl -0x8(%rsp), %eax movl -0xc(%rsp), %ecx shll $0x2, %ecx cmpl %ecx, %eax jne 0x650317 movl $0x2, -0x4(%rsp) jmp 0x650354 jmp 0x65034c movl -0xc(%rsp), %eax movl -0x8(%rsp), %ecx shll %ecx cmpl %ecx, %eax jne 0x650331 movl $0xffffffff, -0x4(%rsp) # imm = 0xFFFFFFFF jmp 0x650354 movl -0xc(%rsp), %eax movl -0x8(%rsp), %ecx shll $0x2, %ecx cmpl %ecx, %eax jne 0x65034a movl $0xfffffffe, -0x4(%rsp) # imm = 0xFFFFFFFE jmp 0x650354 jmp 0x65034c movl $0x0, -0x4(%rsp) movl -0x4(%rsp), %eax retq nopl (%rax)
/m-ab-s[P]aom/av1/common/av1_txfm.h
transpose_16bit_8x8
static inline void transpose_16bit_8x8(const __m128i *const in, __m128i *const out) { // Unpack 16 bit elements. Goes from: // in[0]: 00 01 02 03 04 05 06 07 // in[1]: 10 11 12 13 14 15 16 17 // in[2]: 20 21 22 23 24 25 26 27 // in[3]: 30 31 32 33 34 35 36 37 // in[4]: 40 41 42 43 44 45 46 47 // in[5]: 50 51 52 53 54 55 56 57 // in[6]: 60 61 62 63 64 65 66 67 // in[7]: 70 71 72 73 74 75 76 77 // to: // a0: 00 10 01 11 02 12 03 13 // a1: 20 30 21 31 22 32 23 33 // a2: 40 50 41 51 42 52 43 53 // a3: 60 70 61 71 62 72 63 73 // a4: 04 14 05 15 06 16 07 17 // a5: 24 34 25 35 26 36 27 37 // a6: 44 54 45 55 46 56 47 57 // a7: 64 74 65 75 66 76 67 77 const __m128i a0 = _mm_unpacklo_epi16(in[0], in[1]); const __m128i a1 = _mm_unpacklo_epi16(in[2], in[3]); const __m128i a2 = _mm_unpacklo_epi16(in[4], in[5]); const __m128i a3 = _mm_unpacklo_epi16(in[6], in[7]); const __m128i a4 = _mm_unpackhi_epi16(in[0], in[1]); const __m128i a5 = _mm_unpackhi_epi16(in[2], in[3]); const __m128i a6 = _mm_unpackhi_epi16(in[4], in[5]); const __m128i a7 = _mm_unpackhi_epi16(in[6], in[7]); // Unpack 32 bit elements resulting in: // b0: 00 10 20 30 01 11 21 31 // b1: 40 50 60 70 41 51 61 71 // b2: 04 14 24 34 05 15 25 35 // b3: 44 54 64 74 45 55 65 75 // b4: 02 12 22 32 03 13 23 33 // b5: 42 52 62 72 43 53 63 73 // b6: 06 16 26 36 07 17 27 37 // b7: 46 56 66 76 47 57 67 77 const __m128i b0 = _mm_unpacklo_epi32(a0, a1); const __m128i b1 = _mm_unpacklo_epi32(a2, a3); const __m128i b2 = _mm_unpacklo_epi32(a4, a5); const __m128i b3 = _mm_unpacklo_epi32(a6, a7); const __m128i b4 = _mm_unpackhi_epi32(a0, a1); const __m128i b5 = _mm_unpackhi_epi32(a2, a3); const __m128i b6 = _mm_unpackhi_epi32(a4, a5); const __m128i b7 = _mm_unpackhi_epi32(a6, a7); // Unpack 64 bit elements resulting in: // out[0]: 00 10 20 30 40 50 60 70 // out[1]: 01 11 21 31 41 51 61 71 // out[2]: 02 12 22 32 42 52 62 72 // out[3]: 03 13 23 33 43 53 63 73 // out[4]: 04 14 24 34 44 54 64 74 // out[5]: 05 15 25 35 45 55 65 75 // out[6]: 06 16 26 36 46 56 66 76 // out[7]: 07 17 27 37 47 57 67 77 out[0] = _mm_unpacklo_epi64(b0, b1); out[1] = _mm_unpackhi_epi64(b0, b1); out[2] = _mm_unpacklo_epi64(b4, b5); out[3] = _mm_unpackhi_epi64(b4, b5); out[4] = _mm_unpacklo_epi64(b2, b3); out[5] = _mm_unpackhi_epi64(b2, b3); out[6] = _mm_unpacklo_epi64(b6, b7); out[7] = _mm_unpackhi_epi64(b6, b7); }
subq $0x398, %rsp # imm = 0x398 movq %rdi, 0x88(%rsp) movq %rsi, 0x80(%rsp) movq 0x88(%rsp), %rax movaps (%rax), %xmm1 movaps 0x10(%rax), %xmm0 movaps %xmm1, 0x100(%rsp) movaps %xmm0, 0xf0(%rsp) movaps 0x100(%rsp), %xmm0 movaps 0xf0(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x70(%rsp) movq 0x88(%rsp), %rax movaps 0x20(%rax), %xmm1 movaps 0x30(%rax), %xmm0 movaps %xmm1, 0xe0(%rsp) movaps %xmm0, 0xd0(%rsp) movaps 0xe0(%rsp), %xmm0 movaps 0xd0(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x60(%rsp) movq 0x88(%rsp), %rax movaps 0x40(%rax), %xmm1 movaps 0x50(%rax), %xmm0 movaps %xmm1, 0xc0(%rsp) movaps %xmm0, 0xb0(%rsp) movaps 0xc0(%rsp), %xmm0 movaps 0xb0(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x50(%rsp) movq 0x88(%rsp), %rax movaps 0x60(%rax), %xmm1 movaps 0x70(%rax), %xmm0 movaps %xmm1, 0xa0(%rsp) movaps %xmm0, 0x90(%rsp) movaps 0xa0(%rsp), %xmm0 movaps 0x90(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x40(%rsp) movq 0x88(%rsp), %rax movaps (%rax), %xmm1 movaps 0x10(%rax), %xmm0 movaps %xmm1, 0x180(%rsp) movaps %xmm0, 0x170(%rsp) movaps 0x180(%rsp), %xmm0 movaps 0x170(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movaps %xmm0, 0x30(%rsp) movq 0x88(%rsp), %rax movaps 0x20(%rax), %xmm1 movaps 0x30(%rax), %xmm0 movaps %xmm1, 0x160(%rsp) movaps %xmm0, 0x150(%rsp) movaps 0x160(%rsp), %xmm0 movaps 0x150(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movaps %xmm0, 0x20(%rsp) movq 0x88(%rsp), %rax movaps 0x40(%rax), %xmm1 movaps 0x50(%rax), %xmm0 movaps %xmm1, 0x140(%rsp) movaps %xmm0, 0x130(%rsp) movaps 0x140(%rsp), %xmm0 movaps 0x130(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movaps %xmm0, 0x10(%rsp) movq 0x88(%rsp), %rax movaps 0x60(%rax), %xmm1 movaps 0x70(%rax), %xmm0 movaps %xmm1, 0x120(%rsp) movaps %xmm0, 0x110(%rsp) movaps 0x120(%rsp), %xmm0 movaps 0x110(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movaps %xmm0, (%rsp) movaps 0x70(%rsp), %xmm1 movaps 0x60(%rsp), %xmm0 movaps %xmm1, 0x200(%rsp) movaps %xmm0, 0x1f0(%rsp) movaps 0x200(%rsp), %xmm0 movaps 0x1f0(%rsp), %xmm1 punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] movaps %xmm0, -0x10(%rsp) movaps 0x50(%rsp), %xmm1 movaps 0x40(%rsp), %xmm0 movaps %xmm1, 0x1e0(%rsp) movaps %xmm0, 0x1d0(%rsp) movaps 0x1e0(%rsp), %xmm0 movaps 0x1d0(%rsp), %xmm1 punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] movaps %xmm0, -0x20(%rsp) movaps 0x30(%rsp), %xmm1 movaps 0x20(%rsp), %xmm0 movaps %xmm1, 0x1c0(%rsp) movaps %xmm0, 0x1b0(%rsp) movaps 0x1c0(%rsp), %xmm0 movaps 0x1b0(%rsp), %xmm1 punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] movaps %xmm0, -0x30(%rsp) movaps 0x10(%rsp), %xmm1 movaps (%rsp), %xmm0 movaps %xmm1, 0x1a0(%rsp) movaps %xmm0, 0x190(%rsp) movaps 0x1a0(%rsp), %xmm0 movaps 0x190(%rsp), %xmm1 punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] movaps %xmm0, -0x40(%rsp) movaps 0x70(%rsp), %xmm1 movaps 0x60(%rsp), %xmm0 movaps %xmm1, 0x280(%rsp) movaps %xmm0, 0x270(%rsp) movaps 0x280(%rsp), %xmm0 movaps 0x270(%rsp), %xmm1 punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, -0x50(%rsp) movaps 0x50(%rsp), %xmm1 movaps 0x40(%rsp), %xmm0 movaps %xmm1, 0x260(%rsp) movaps %xmm0, 0x250(%rsp) movaps 0x260(%rsp), %xmm0 movaps 0x250(%rsp), %xmm1 punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, -0x60(%rsp) movaps 0x30(%rsp), %xmm1 movaps 0x20(%rsp), %xmm0 movaps %xmm1, 0x240(%rsp) movaps %xmm0, 0x230(%rsp) movaps 0x240(%rsp), %xmm0 movaps 0x230(%rsp), %xmm1 punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, -0x70(%rsp) movaps 0x10(%rsp), %xmm1 movaps (%rsp), %xmm0 movaps %xmm1, 0x220(%rsp) movaps %xmm0, 0x210(%rsp) movaps 0x220(%rsp), %xmm0 movaps 0x210(%rsp), %xmm1 punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, -0x80(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x20(%rsp), %xmm0 movaps %xmm1, 0x300(%rsp) movaps %xmm0, 0x2f0(%rsp) movaps 0x300(%rsp), %xmm0 movaps 0x2f0(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movq 0x80(%rsp), %rax movaps %xmm0, (%rax) movaps -0x10(%rsp), %xmm1 movaps -0x20(%rsp), %xmm0 movaps %xmm1, 0x380(%rsp) movaps %xmm0, 0x370(%rsp) movaps 0x380(%rsp), %xmm0 movaps 0x370(%rsp), %xmm1 punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] movq 0x80(%rsp), %rax movaps %xmm0, 0x10(%rax) movaps -0x50(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x2e0(%rsp) movaps %xmm0, 0x2d0(%rsp) movaps 0x2e0(%rsp), %xmm0 movaps 0x2d0(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movq 0x80(%rsp), %rax movaps %xmm0, 0x20(%rax) movaps -0x50(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x360(%rsp) movaps %xmm0, 0x350(%rsp) movaps 0x360(%rsp), %xmm0 movaps 0x350(%rsp), %xmm1 punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] movq 0x80(%rsp), %rax movaps %xmm0, 0x30(%rax) movaps -0x30(%rsp), %xmm1 movaps -0x40(%rsp), %xmm0 movaps %xmm1, 0x2c0(%rsp) movaps %xmm0, 0x2b0(%rsp) movaps 0x2c0(%rsp), %xmm0 movaps 0x2b0(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movq 0x80(%rsp), %rax movaps %xmm0, 0x40(%rax) movaps -0x30(%rsp), %xmm1 movaps -0x40(%rsp), %xmm0 movaps %xmm1, 0x340(%rsp) movaps %xmm0, 0x330(%rsp) movaps 0x340(%rsp), %xmm0 movaps 0x330(%rsp), %xmm1 punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] movq 0x80(%rsp), %rax movaps %xmm0, 0x50(%rax) movaps -0x70(%rsp), %xmm1 movaps -0x80(%rsp), %xmm0 movaps %xmm1, 0x2a0(%rsp) movaps %xmm0, 0x290(%rsp) movaps 0x2a0(%rsp), %xmm0 movaps 0x290(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movq 0x80(%rsp), %rax movaps %xmm0, 0x60(%rax) movaps -0x70(%rsp), %xmm1 movaps -0x80(%rsp), %xmm0 movaps %xmm1, 0x320(%rsp) movaps %xmm0, 0x310(%rsp) movaps 0x320(%rsp), %xmm0 movaps 0x310(%rsp), %xmm1 punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] movq 0x80(%rsp), %rax movdqa %xmm0, 0x70(%rax) addq $0x398, %rsp # imm = 0x398 retq nopw (%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/transpose_sse2.h
av1_highbd_inv_txfm_add_16x4_sse4_1
static void av1_highbd_inv_txfm_add_16x4_sse4_1(const tran_low_t *input, uint8_t *dest, int stride, const TxfmParam *txfm_param) { int bd = txfm_param->bd; const TX_TYPE tx_type = txfm_param->tx_type; const TX_SIZE tx_size = txfm_param->tx_size; int eob = txfm_param->eob; highbd_inv_txfm2d_add_16x4_sse4_1(input, CONVERT_TO_SHORTPTR(dest), stride, tx_type, tx_size, eob, bd); }
subq $0x38, %rsp movq %rdi, 0x30(%rsp) movq %rsi, 0x28(%rsp) movl %edx, 0x24(%rsp) movq %rcx, 0x18(%rsp) movq 0x18(%rsp), %rax movl 0x8(%rax), %eax movl %eax, 0x14(%rsp) movq 0x18(%rsp), %rax movb (%rax), %al movb %al, 0x13(%rsp) movq 0x18(%rsp), %rax movb 0x1(%rax), %al movb %al, 0x12(%rsp) movq 0x18(%rsp), %rax movl 0x14(%rax), %eax movl %eax, 0xc(%rsp) movq 0x30(%rsp), %rdi movq 0x28(%rsp), %rsi shlq %rsi movl 0x24(%rsp), %edx movb 0x13(%rsp), %cl movb 0x12(%rsp), %r8b movl 0xc(%rsp), %r9d movl 0x14(%rsp), %eax movzbl %cl, %ecx movzbl %r8b, %r8d movl %eax, (%rsp) callq 0x7d3930 addq $0x38, %rsp retq nopl (%rax)
/m-ab-s[P]aom/av1/common/x86/highbd_inv_txfm_sse4.c
av1_round_shift_array_32_sse4_1
static inline void av1_round_shift_array_32_sse4_1(const __m128i *input, __m128i *output, const int size, const int bit) { if (bit > 0) { int i; for (i = 0; i < size; i++) { output[i] = av1_round_shift_32_sse4_1(input[i], bit); } } else { int i; for (i = 0; i < size; i++) { output[i] = _mm_slli_epi32(input[i], -bit); } } }
subq $0x48, %rsp movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movl %edx, 0x14(%rsp) movl %ecx, 0x10(%rsp) cmpl $0x0, 0x10(%rsp) jle 0x7a2f81 movl $0x0, 0xc(%rsp) movl 0xc(%rsp), %eax cmpl 0x14(%rsp), %eax jge 0x7a2f7f movq 0x20(%rsp), %rax movslq 0xc(%rsp), %rcx shlq $0x4, %rcx addq %rcx, %rax movdqa (%rax), %xmm0 movl 0x10(%rsp), %edi callq 0x7d24c0 movq 0x18(%rsp), %rax movslq 0xc(%rsp), %rcx shlq $0x4, %rcx addq %rcx, %rax movdqa %xmm0, (%rax) movl 0xc(%rsp), %eax addl $0x1, %eax movl %eax, 0xc(%rsp) jmp 0x7a2f35 jmp 0x7a2fee movl $0x0, 0x8(%rsp) movl 0x8(%rsp), %eax cmpl 0x14(%rsp), %eax jge 0x7a2fec movq 0x20(%rsp), %rax movslq 0x8(%rsp), %rcx shlq $0x4, %rcx addq %rcx, %rax movdqa (%rax), %xmm0 xorl %eax, %eax subl 0x10(%rsp), %eax movdqa %xmm0, 0x30(%rsp) movl %eax, 0x2c(%rsp) movdqa 0x30(%rsp), %xmm0 movl 0x2c(%rsp), %eax movd %eax, %xmm1 pslld %xmm1, %xmm0 movq 0x18(%rsp), %rax movslq 0x8(%rsp), %rcx shlq $0x4, %rcx addq %rcx, %rax movdqa %xmm0, (%rax) movl 0x8(%rsp), %eax addl $0x1, %eax movl %eax, 0x8(%rsp) jmp 0x7a2f89 jmp 0x7a2fee addq $0x48, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/common/x86/av1_txfm_sse4.h
iidentity8_sse4_1
static void iidentity8_sse4_1(__m128i *in, __m128i *out, int bit, int do_cols, int bd, int out_shift) { (void)bit; out[0] = _mm_add_epi32(in[0], in[0]); out[1] = _mm_add_epi32(in[1], in[1]); out[2] = _mm_add_epi32(in[2], in[2]); out[3] = _mm_add_epi32(in[3], in[3]); out[4] = _mm_add_epi32(in[4], in[4]); out[5] = _mm_add_epi32(in[5], in[5]); out[6] = _mm_add_epi32(in[6], in[6]); out[7] = _mm_add_epi32(in[7], in[7]); if (!do_cols) { const int log_range = AOMMAX(16, bd + 6); const __m128i clamp_lo = _mm_set1_epi32(-(1 << (log_range - 1))); const __m128i clamp_hi = _mm_set1_epi32((1 << (log_range - 1)) - 1); round_shift_4x4(out, out_shift); round_shift_4x4(out + 4, out_shift); highbd_clamp_epi32_sse4_1(out, out, &clamp_lo, &clamp_hi, 8); } }
subq $0x1b8, %rsp # imm = 0x1B8 movq %rdi, 0x58(%rsp) movq %rsi, 0x50(%rsp) movl %edx, 0x4c(%rsp) movl %ecx, 0x48(%rsp) movl %r8d, 0x44(%rsp) movl %r9d, 0x40(%rsp) movq 0x58(%rsp), %rax movdqa (%rax), %xmm1 movq 0x58(%rsp), %rax movdqa (%rax), %xmm0 movdqa %xmm1, 0x150(%rsp) movdqa %xmm0, 0x140(%rsp) movdqa 0x150(%rsp), %xmm0 movdqa 0x140(%rsp), %xmm1 paddd %xmm1, %xmm0 movq 0x50(%rsp), %rax movdqa %xmm0, (%rax) movq 0x58(%rsp), %rax movdqa 0x10(%rax), %xmm1 movq 0x58(%rsp), %rax movdqa 0x10(%rax), %xmm0 movdqa %xmm1, 0x130(%rsp) movdqa %xmm0, 0x120(%rsp) movdqa 0x130(%rsp), %xmm0 movdqa 0x120(%rsp), %xmm1 paddd %xmm1, %xmm0 movq 0x50(%rsp), %rax movdqa %xmm0, 0x10(%rax) movq 0x58(%rsp), %rax movdqa 0x20(%rax), %xmm1 movq 0x58(%rsp), %rax movdqa 0x20(%rax), %xmm0 movdqa %xmm1, 0x110(%rsp) movdqa %xmm0, 0x100(%rsp) movdqa 0x110(%rsp), %xmm0 movdqa 0x100(%rsp), %xmm1 paddd %xmm1, %xmm0 movq 0x50(%rsp), %rax movdqa %xmm0, 0x20(%rax) movq 0x58(%rsp), %rax movdqa 0x30(%rax), %xmm1 movq 0x58(%rsp), %rax movdqa 0x30(%rax), %xmm0 movdqa %xmm1, 0xf0(%rsp) movdqa %xmm0, 0xe0(%rsp) movdqa 0xf0(%rsp), %xmm0 movdqa 0xe0(%rsp), %xmm1 paddd %xmm1, %xmm0 movq 0x50(%rsp), %rax movdqa %xmm0, 0x30(%rax) movq 0x58(%rsp), %rax movdqa 0x40(%rax), %xmm1 movq 0x58(%rsp), %rax movdqa 0x40(%rax), %xmm0 movdqa %xmm1, 0xd0(%rsp) movdqa %xmm0, 0xc0(%rsp) movdqa 0xd0(%rsp), %xmm0 movdqa 0xc0(%rsp), %xmm1 paddd %xmm1, %xmm0 movq 0x50(%rsp), %rax movdqa %xmm0, 0x40(%rax) movq 0x58(%rsp), %rax movdqa 0x50(%rax), %xmm1 movq 0x58(%rsp), %rax movdqa 0x50(%rax), %xmm0 movdqa %xmm1, 0xb0(%rsp) movdqa %xmm0, 0xa0(%rsp) movdqa 0xb0(%rsp), %xmm0 movdqa 0xa0(%rsp), %xmm1 paddd %xmm1, %xmm0 movq 0x50(%rsp), %rax movdqa %xmm0, 0x50(%rax) movq 0x58(%rsp), %rax movdqa 0x60(%rax), %xmm1 movq 0x58(%rsp), %rax movdqa 0x60(%rax), %xmm0 movdqa %xmm1, 0x90(%rsp) movdqa %xmm0, 0x80(%rsp) movdqa 0x90(%rsp), %xmm0 movdqa 0x80(%rsp), %xmm1 paddd %xmm1, %xmm0 movq 0x50(%rsp), %rax movdqa %xmm0, 0x60(%rax) movq 0x58(%rsp), %rax movdqa 0x70(%rax), %xmm1 movq 0x58(%rsp), %rax movdqa 0x70(%rax), %xmm0 movdqa %xmm1, 0x70(%rsp) movdqa %xmm0, 0x60(%rsp) movdqa 0x70(%rsp), %xmm0 movdqa 0x60(%rsp), %xmm1 paddd %xmm1, %xmm0 movq 0x50(%rsp), %rax movdqa %xmm0, 0x70(%rax) cmpl $0x0, 0x48(%rsp) jne 0x7a841a movl 0x44(%rsp), %ecx addl $0x6, %ecx movl $0x10, %eax cmpl %ecx, %eax jle 0x7a82ca movl $0x10, %eax movl %eax, 0xc(%rsp) jmp 0x7a82d5 movl 0x44(%rsp), %eax addl $0x6, %eax movl %eax, 0xc(%rsp) movl 0xc(%rsp), %eax movl %eax, 0x3c(%rsp) movb 0x3c(%rsp), %cl decb %cl movl $0x1, %eax movl %eax, %edx shll %cl, %edx movl %edx, %ecx negl %ecx movl %ecx, 0x16c(%rsp) movl 0x16c(%rsp), %ecx movl %ecx, 0x18c(%rsp) movl %ecx, 0x188(%rsp) movl %ecx, 0x184(%rsp) movl %ecx, 0x180(%rsp) movl 0x184(%rsp), %esi movl 0x188(%rsp), %edx movl 0x18c(%rsp), %ecx movd 0x180(%rsp), %xmm0 pinsrd $0x1, %esi, %xmm0 pinsrd $0x2, %edx, %xmm0 pinsrd $0x3, %ecx, %xmm0 movaps %xmm0, 0x170(%rsp) movaps 0x170(%rsp), %xmm0 movaps %xmm0, 0x20(%rsp) movb 0x3c(%rsp), %cl decb %cl shll %cl, %eax decl %eax movl %eax, 0x168(%rsp) movl 0x168(%rsp), %eax movl %eax, 0x1b4(%rsp) movl %eax, 0x1b0(%rsp) movl %eax, 0x1ac(%rsp) movl %eax, 0x1a8(%rsp) movl 0x1ac(%rsp), %edx movl 0x1b0(%rsp), %ecx movl 0x1b4(%rsp), %eax movd 0x1a8(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movdqa %xmm0, 0x190(%rsp) movdqa 0x190(%rsp), %xmm0 movdqa %xmm0, 0x10(%rsp) movq 0x50(%rsp), %rdi movl 0x40(%rsp), %esi callq 0x7a1fe0 movq 0x50(%rsp), %rdi addq $0x40, %rdi movl 0x40(%rsp), %esi callq 0x7a1fe0 movq 0x50(%rsp), %rdi movq 0x50(%rsp), %rsi leaq 0x20(%rsp), %rdx leaq 0x10(%rsp), %rcx movl $0x8, %r8d callq 0x7a2260 addq $0x1b8, %rsp # imm = 0x1B8 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/common/x86/highbd_inv_txfm_sse4.c
idct32x32_low8_sse4_1
static void idct32x32_low8_sse4_1(__m128i *in, __m128i *out, int bit, int do_cols, int bd, int out_shift) { const int32_t *cospi = cospi_arr(bit); const __m128i cospi62 = _mm_set1_epi32(cospi[62]); const __m128i cospi14 = _mm_set1_epi32(cospi[14]); const __m128i cospi54 = _mm_set1_epi32(cospi[54]); const __m128i cospi6 = _mm_set1_epi32(cospi[6]); const __m128i cospi10 = _mm_set1_epi32(cospi[10]); const __m128i cospi2 = _mm_set1_epi32(cospi[2]); const __m128i cospim58 = _mm_set1_epi32(-cospi[58]); const __m128i cospim50 = _mm_set1_epi32(-cospi[50]); const __m128i cospi60 = _mm_set1_epi32(cospi[60]); const __m128i cospi12 = _mm_set1_epi32(cospi[12]); const __m128i cospi4 = _mm_set1_epi32(cospi[4]); const __m128i cospim52 = _mm_set1_epi32(-cospi[52]); const __m128i cospi56 = _mm_set1_epi32(cospi[56]); const __m128i cospi24 = _mm_set1_epi32(cospi[24]); const __m128i cospi40 = _mm_set1_epi32(cospi[40]); const __m128i cospi8 = _mm_set1_epi32(cospi[8]); const __m128i cospim40 = _mm_set1_epi32(-cospi[40]); const __m128i cospim8 = _mm_set1_epi32(-cospi[8]); const __m128i cospim56 = _mm_set1_epi32(-cospi[56]); const __m128i cospim24 = _mm_set1_epi32(-cospi[24]); const __m128i cospi32 = _mm_set1_epi32(cospi[32]); const __m128i cospim32 = _mm_set1_epi32(-cospi[32]); const __m128i cospi48 = _mm_set1_epi32(cospi[48]); const __m128i cospim48 = _mm_set1_epi32(-cospi[48]); const __m128i cospi16 = _mm_set1_epi32(cospi[16]); const __m128i cospim16 = _mm_set1_epi32(-cospi[16]); const __m128i rounding = _mm_set1_epi32(1 << (bit - 1)); const int log_range = AOMMAX(16, bd + (do_cols ? 6 : 8)); const __m128i clamp_lo = _mm_set1_epi32(-(1 << (log_range - 1))); const __m128i clamp_hi = _mm_set1_epi32((1 << (log_range - 1)) - 1); __m128i bf1[32]; // stage 0 // stage 1 bf1[0] = in[0]; bf1[4] = in[4]; bf1[8] = in[2]; bf1[12] = in[6]; bf1[16] = in[1]; bf1[20] = in[5]; bf1[24] = in[3]; bf1[28] = in[7]; // stage 2 bf1[31] = half_btf_0_sse4_1(&cospi2, &bf1[16], &rounding, bit); bf1[16] = half_btf_0_sse4_1(&cospi62, &bf1[16], &rounding, bit); bf1[19] = half_btf_0_sse4_1(&cospim50, &bf1[28], &rounding, bit); bf1[28] = half_btf_0_sse4_1(&cospi14, &bf1[28], &rounding, bit); bf1[27] = half_btf_0_sse4_1(&cospi10, &bf1[20], &rounding, bit); bf1[20] = half_btf_0_sse4_1(&cospi54, &bf1[20], &rounding, bit); bf1[23] = half_btf_0_sse4_1(&cospim58, &bf1[24], &rounding, bit); bf1[24] = half_btf_0_sse4_1(&cospi6, &bf1[24], &rounding, bit); // stage 3 bf1[15] = half_btf_0_sse4_1(&cospi4, &bf1[8], &rounding, bit); bf1[8] = half_btf_0_sse4_1(&cospi60, &bf1[8], &rounding, bit); bf1[11] = half_btf_0_sse4_1(&cospim52, &bf1[12], &rounding, bit); bf1[12] = half_btf_0_sse4_1(&cospi12, &bf1[12], &rounding, bit); bf1[17] = bf1[16]; bf1[18] = bf1[19]; bf1[21] = bf1[20]; bf1[22] = bf1[23]; bf1[25] = bf1[24]; bf1[26] = bf1[27]; bf1[29] = bf1[28]; bf1[30] = bf1[31]; // stage 4 : bf1[7] = half_btf_0_sse4_1(&cospi8, &bf1[4], &rounding, bit); bf1[4] = half_btf_0_sse4_1(&cospi56, &bf1[4], &rounding, bit); bf1[9] = bf1[8]; bf1[10] = bf1[11]; bf1[13] = bf1[12]; bf1[14] = bf1[15]; idct32_stage4_sse4_1(bf1, &cospim8, &cospi56, &cospi8, &cospim56, &cospim40, &cospi24, &cospi40, &cospim24, &rounding, bit); // stage 5 bf1[0] = half_btf_0_sse4_1(&cospi32, &bf1[0], &rounding, bit); bf1[1] = bf1[0]; bf1[5] = bf1[4]; bf1[6] = bf1[7]; idct32_stage5_sse4_1(bf1, &cospim16, &cospi48, &cospi16, &cospim48, &clamp_lo, &clamp_hi, &rounding, bit); // stage 6 bf1[3] = bf1[0]; bf1[2] = bf1[1]; idct32_stage6_sse4_1(bf1, &cospim32, &cospi32, &cospim16, &cospi48, &cospi16, &cospim48, &clamp_lo, &clamp_hi, &rounding, bit); // stage 7 idct32_stage7_sse4_1(bf1, &cospim32, &cospi32, &clamp_lo, &clamp_hi, &rounding, bit); // stage 8 idct32_stage8_sse4_1(bf1, &cospim32, &cospi32, &clamp_lo, &clamp_hi, &rounding, bit); // stage 9 idct32_stage9_sse4_1(bf1, out, do_cols, bd, out_shift, &clamp_lo, &clamp_hi); }
pushq %r14 pushq %rbx subq $0x858, %rsp # imm = 0x858 movq %rdi, 0x430(%rsp) movq %rsi, 0x428(%rsp) movl %edx, 0x424(%rsp) movl %ecx, 0x420(%rsp) movl %r8d, 0x41c(%rsp) movl %r9d, 0x418(%rsp) movl 0x424(%rsp), %edi callq 0x7a1bb0 movq %rax, 0x410(%rsp) movq 0x410(%rsp), %rax movl 0xf8(%rax), %eax movl %eax, 0x4ac(%rsp) movl 0x4ac(%rsp), %eax movl %eax, 0x4cc(%rsp) movl %eax, 0x4c8(%rsp) movl %eax, 0x4c4(%rsp) movl %eax, 0x4c0(%rsp) movl 0x4c4(%rsp), %edx movl 0x4c8(%rsp), %ecx movl 0x4cc(%rsp), %eax movd 0x4c0(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x4b0(%rsp) movaps 0x4b0(%rsp), %xmm0 movaps %xmm0, 0x400(%rsp) movq 0x410(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0x4a8(%rsp) movl 0x4a8(%rsp), %eax movl %eax, 0x4ec(%rsp) movl %eax, 0x4e8(%rsp) movl %eax, 0x4e4(%rsp) movl %eax, 0x4e0(%rsp) movl 0x4e4(%rsp), %edx movl 0x4e8(%rsp), %ecx movl 0x4ec(%rsp), %eax movd 0x4e0(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x4d0(%rsp) movaps 0x4d0(%rsp), %xmm0 movaps %xmm0, 0x3f0(%rsp) movq 0x410(%rsp), %rax movl 0xd8(%rax), %eax movl %eax, 0x4a4(%rsp) movl 0x4a4(%rsp), %eax movl %eax, 0x50c(%rsp) movl %eax, 0x508(%rsp) movl %eax, 0x504(%rsp) movl %eax, 0x500(%rsp) movl 0x504(%rsp), %edx movl 0x508(%rsp), %ecx movl 0x50c(%rsp), %eax movd 0x500(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x4f0(%rsp) movaps 0x4f0(%rsp), %xmm0 movaps %xmm0, 0x3e0(%rsp) movq 0x410(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0x4a0(%rsp) movl 0x4a0(%rsp), %eax movl %eax, 0x52c(%rsp) movl %eax, 0x528(%rsp) movl %eax, 0x524(%rsp) movl %eax, 0x520(%rsp) movl 0x524(%rsp), %edx movl 0x528(%rsp), %ecx movl 0x52c(%rsp), %eax movd 0x520(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x510(%rsp) movaps 0x510(%rsp), %xmm0 movaps %xmm0, 0x3d0(%rsp) movq 0x410(%rsp), %rax movl 0x28(%rax), %eax movl %eax, 0x49c(%rsp) movl 0x49c(%rsp), %eax movl %eax, 0x54c(%rsp) movl %eax, 0x548(%rsp) movl %eax, 0x544(%rsp) movl %eax, 0x540(%rsp) movl 0x544(%rsp), %edx movl 0x548(%rsp), %ecx movl 0x54c(%rsp), %eax movd 0x540(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x530(%rsp) movaps 0x530(%rsp), %xmm0 movaps %xmm0, 0x3c0(%rsp) movq 0x410(%rsp), %rax movl 0x8(%rax), %eax movl %eax, 0x498(%rsp) movl 0x498(%rsp), %eax movl %eax, 0x56c(%rsp) movl %eax, 0x568(%rsp) movl %eax, 0x564(%rsp) movl %eax, 0x560(%rsp) movl 0x564(%rsp), %edx movl 0x568(%rsp), %ecx movl 0x56c(%rsp), %eax movd 0x560(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x550(%rsp) movaps 0x550(%rsp), %xmm0 movaps %xmm0, 0x3b0(%rsp) movq 0x410(%rsp), %rax movl 0xe8(%rax), %eax negl %eax movl %eax, 0x494(%rsp) movl 0x494(%rsp), %eax movl %eax, 0x58c(%rsp) movl %eax, 0x588(%rsp) movl %eax, 0x584(%rsp) movl %eax, 0x580(%rsp) movl 0x584(%rsp), %edx movl 0x588(%rsp), %ecx movl 0x58c(%rsp), %eax movd 0x580(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x570(%rsp) movaps 0x570(%rsp), %xmm0 movaps %xmm0, 0x3a0(%rsp) movq 0x410(%rsp), %rax movl 0xc8(%rax), %eax negl %eax movl %eax, 0x490(%rsp) movl 0x490(%rsp), %eax movl %eax, 0x5ac(%rsp) movl %eax, 0x5a8(%rsp) movl %eax, 0x5a4(%rsp) movl %eax, 0x5a0(%rsp) movl 0x5a4(%rsp), %edx movl 0x5a8(%rsp), %ecx movl 0x5ac(%rsp), %eax movd 0x5a0(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x590(%rsp) movaps 0x590(%rsp), %xmm0 movaps %xmm0, 0x390(%rsp) movq 0x410(%rsp), %rax movl 0xf0(%rax), %eax movl %eax, 0x48c(%rsp) movl 0x48c(%rsp), %eax movl %eax, 0x5cc(%rsp) movl %eax, 0x5c8(%rsp) movl %eax, 0x5c4(%rsp) movl %eax, 0x5c0(%rsp) movl 0x5c4(%rsp), %edx movl 0x5c8(%rsp), %ecx movl 0x5cc(%rsp), %eax movd 0x5c0(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x5b0(%rsp) movaps 0x5b0(%rsp), %xmm0 movaps %xmm0, 0x380(%rsp) movq 0x410(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0x488(%rsp) movl 0x488(%rsp), %eax movl %eax, 0x5ec(%rsp) movl %eax, 0x5e8(%rsp) movl %eax, 0x5e4(%rsp) movl %eax, 0x5e0(%rsp) movl 0x5e4(%rsp), %edx movl 0x5e8(%rsp), %ecx movl 0x5ec(%rsp), %eax movd 0x5e0(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x5d0(%rsp) movaps 0x5d0(%rsp), %xmm0 movaps %xmm0, 0x370(%rsp) movq 0x410(%rsp), %rax movl 0x10(%rax), %eax movl %eax, 0x484(%rsp) movl 0x484(%rsp), %eax movl %eax, 0x60c(%rsp) movl %eax, 0x608(%rsp) movl %eax, 0x604(%rsp) movl %eax, 0x600(%rsp) movl 0x604(%rsp), %edx movl 0x608(%rsp), %ecx movl 0x60c(%rsp), %eax movd 0x600(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x5f0(%rsp) movaps 0x5f0(%rsp), %xmm0 movaps %xmm0, 0x360(%rsp) movq 0x410(%rsp), %rax movl 0xd0(%rax), %eax negl %eax movl %eax, 0x480(%rsp) movl 0x480(%rsp), %eax movl %eax, 0x62c(%rsp) movl %eax, 0x628(%rsp) movl %eax, 0x624(%rsp) movl %eax, 0x620(%rsp) movl 0x624(%rsp), %edx movl 0x628(%rsp), %ecx movl 0x62c(%rsp), %eax movd 0x620(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x610(%rsp) movaps 0x610(%rsp), %xmm0 movaps %xmm0, 0x350(%rsp) movq 0x410(%rsp), %rax movl 0xe0(%rax), %eax movl %eax, 0x47c(%rsp) movl 0x47c(%rsp), %eax movl %eax, 0x64c(%rsp) movl %eax, 0x648(%rsp) movl %eax, 0x644(%rsp) movl %eax, 0x640(%rsp) movl 0x644(%rsp), %edx movl 0x648(%rsp), %ecx movl 0x64c(%rsp), %eax movd 0x640(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x630(%rsp) movaps 0x630(%rsp), %xmm0 movaps %xmm0, 0x340(%rsp) movq 0x410(%rsp), %rax movl 0x60(%rax), %eax movl %eax, 0x478(%rsp) movl 0x478(%rsp), %eax movl %eax, 0x66c(%rsp) movl %eax, 0x668(%rsp) movl %eax, 0x664(%rsp) movl %eax, 0x660(%rsp) movl 0x664(%rsp), %edx movl 0x668(%rsp), %ecx movl 0x66c(%rsp), %eax movd 0x660(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x650(%rsp) movaps 0x650(%rsp), %xmm0 movaps %xmm0, 0x330(%rsp) movq 0x410(%rsp), %rax movl 0xa0(%rax), %eax movl %eax, 0x474(%rsp) movl 0x474(%rsp), %eax movl %eax, 0x68c(%rsp) movl %eax, 0x688(%rsp) movl %eax, 0x684(%rsp) movl %eax, 0x680(%rsp) movl 0x684(%rsp), %edx movl 0x688(%rsp), %ecx movl 0x68c(%rsp), %eax movd 0x680(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x670(%rsp) movaps 0x670(%rsp), %xmm0 movaps %xmm0, 0x320(%rsp) movq 0x410(%rsp), %rax movl 0x20(%rax), %eax movl %eax, 0x470(%rsp) movl 0x470(%rsp), %eax movl %eax, 0x6ac(%rsp) movl %eax, 0x6a8(%rsp) movl %eax, 0x6a4(%rsp) movl %eax, 0x6a0(%rsp) movl 0x6a4(%rsp), %edx movl 0x6a8(%rsp), %ecx movl 0x6ac(%rsp), %eax movd 0x6a0(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x690(%rsp) movaps 0x690(%rsp), %xmm0 movaps %xmm0, 0x310(%rsp) movq 0x410(%rsp), %rax movl 0xa0(%rax), %eax negl %eax movl %eax, 0x46c(%rsp) movl 0x46c(%rsp), %eax movl %eax, 0x6cc(%rsp) movl %eax, 0x6c8(%rsp) movl %eax, 0x6c4(%rsp) movl %eax, 0x6c0(%rsp) movl 0x6c4(%rsp), %edx movl 0x6c8(%rsp), %ecx movl 0x6cc(%rsp), %eax movd 0x6c0(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x6b0(%rsp) movaps 0x6b0(%rsp), %xmm0 movaps %xmm0, 0x300(%rsp) movq 0x410(%rsp), %rax movl 0x20(%rax), %eax negl %eax movl %eax, 0x468(%rsp) movl 0x468(%rsp), %eax movl %eax, 0x6ec(%rsp) movl %eax, 0x6e8(%rsp) movl %eax, 0x6e4(%rsp) movl %eax, 0x6e0(%rsp) movl 0x6e4(%rsp), %edx movl 0x6e8(%rsp), %ecx movl 0x6ec(%rsp), %eax movd 0x6e0(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x6d0(%rsp) movaps 0x6d0(%rsp), %xmm0 movaps %xmm0, 0x2f0(%rsp) movq 0x410(%rsp), %rax movl 0xe0(%rax), %eax negl %eax movl %eax, 0x464(%rsp) movl 0x464(%rsp), %eax movl %eax, 0x70c(%rsp) movl %eax, 0x708(%rsp) movl %eax, 0x704(%rsp) movl %eax, 0x700(%rsp) movl 0x704(%rsp), %edx movl 0x708(%rsp), %ecx movl 0x70c(%rsp), %eax movd 0x700(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x6f0(%rsp) movaps 0x6f0(%rsp), %xmm0 movaps %xmm0, 0x2e0(%rsp) movq 0x410(%rsp), %rax movl 0x60(%rax), %eax negl %eax movl %eax, 0x460(%rsp) movl 0x460(%rsp), %eax movl %eax, 0x72c(%rsp) movl %eax, 0x728(%rsp) movl %eax, 0x724(%rsp) movl %eax, 0x720(%rsp) movl 0x724(%rsp), %edx movl 0x728(%rsp), %ecx movl 0x72c(%rsp), %eax movd 0x720(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x710(%rsp) movaps 0x710(%rsp), %xmm0 movaps %xmm0, 0x2d0(%rsp) movq 0x410(%rsp), %rax movl 0x80(%rax), %eax movl %eax, 0x45c(%rsp) movl 0x45c(%rsp), %eax movl %eax, 0x74c(%rsp) movl %eax, 0x748(%rsp) movl %eax, 0x744(%rsp) movl %eax, 0x740(%rsp) movl 0x744(%rsp), %edx movl 0x748(%rsp), %ecx movl 0x74c(%rsp), %eax movd 0x740(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x730(%rsp) movaps 0x730(%rsp), %xmm0 movaps %xmm0, 0x2c0(%rsp) movq 0x410(%rsp), %rax movl 0x80(%rax), %eax negl %eax movl %eax, 0x458(%rsp) movl 0x458(%rsp), %eax movl %eax, 0x76c(%rsp) movl %eax, 0x768(%rsp) movl %eax, 0x764(%rsp) movl %eax, 0x760(%rsp) movl 0x764(%rsp), %edx movl 0x768(%rsp), %ecx movl 0x76c(%rsp), %eax movd 0x760(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x750(%rsp) movaps 0x750(%rsp), %xmm0 movaps %xmm0, 0x2b0(%rsp) movq 0x410(%rsp), %rax movl 0xc0(%rax), %eax movl %eax, 0x454(%rsp) movl 0x454(%rsp), %eax movl %eax, 0x78c(%rsp) movl %eax, 0x788(%rsp) movl %eax, 0x784(%rsp) movl %eax, 0x780(%rsp) movl 0x784(%rsp), %edx movl 0x788(%rsp), %ecx movl 0x78c(%rsp), %eax movd 0x780(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x770(%rsp) movaps 0x770(%rsp), %xmm0 movaps %xmm0, 0x2a0(%rsp) movq 0x410(%rsp), %rax movl 0xc0(%rax), %eax negl %eax movl %eax, 0x450(%rsp) movl 0x450(%rsp), %eax movl %eax, 0x7ac(%rsp) movl %eax, 0x7a8(%rsp) movl %eax, 0x7a4(%rsp) movl %eax, 0x7a0(%rsp) movl 0x7a4(%rsp), %edx movl 0x7a8(%rsp), %ecx movl 0x7ac(%rsp), %eax movd 0x7a0(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x790(%rsp) movaps 0x790(%rsp), %xmm0 movaps %xmm0, 0x290(%rsp) movq 0x410(%rsp), %rax movl 0x40(%rax), %eax movl %eax, 0x44c(%rsp) movl 0x44c(%rsp), %eax movl %eax, 0x7cc(%rsp) movl %eax, 0x7c8(%rsp) movl %eax, 0x7c4(%rsp) movl %eax, 0x7c0(%rsp) movl 0x7c4(%rsp), %edx movl 0x7c8(%rsp), %ecx movl 0x7cc(%rsp), %eax movd 0x7c0(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x7b0(%rsp) movaps 0x7b0(%rsp), %xmm0 movaps %xmm0, 0x280(%rsp) movq 0x410(%rsp), %rax movl 0x40(%rax), %eax negl %eax movl %eax, 0x448(%rsp) movl 0x448(%rsp), %eax movl %eax, 0x7ec(%rsp) movl %eax, 0x7e8(%rsp) movl %eax, 0x7e4(%rsp) movl %eax, 0x7e0(%rsp) movl 0x7e4(%rsp), %edx movl 0x7e8(%rsp), %ecx movl 0x7ec(%rsp), %eax movd 0x7e0(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movaps %xmm0, 0x7d0(%rsp) movaps 0x7d0(%rsp), %xmm0 movaps %xmm0, 0x270(%rsp) movb 0x424(%rsp), %cl decb %cl movl $0x1, %eax shll %cl, %eax movl %eax, 0x444(%rsp) movl 0x444(%rsp), %eax movl %eax, 0x80c(%rsp) movl %eax, 0x808(%rsp) movl %eax, 0x804(%rsp) movl %eax, 0x800(%rsp) movl 0x804(%rsp), %edx movl 0x808(%rsp), %ecx movl 0x80c(%rsp), %eax movd 0x800(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movdqa %xmm0, 0x7f0(%rsp) movdqa 0x7f0(%rsp), %xmm0 movdqa %xmm0, 0x260(%rsp) movl 0x41c(%rsp), %ecx movl 0x420(%rsp), %esi movl $0x8, %eax movl $0x6, %edx cmpl $0x0, %esi cmovnel %edx, %eax addl %eax, %ecx movl $0x10, %eax cmpl %ecx, %eax jle 0x7b9c15 movl $0x10, %eax movl %eax, 0x2c(%rsp) jmp 0x7b9c39 movl 0x41c(%rsp), %eax movl 0x420(%rsp), %esi movl $0x8, %ecx movl $0x6, %edx cmpl $0x0, %esi cmovnel %edx, %ecx addl %ecx, %eax movl %eax, 0x2c(%rsp) movl 0x2c(%rsp), %eax movl %eax, 0x25c(%rsp) movb 0x25c(%rsp), %cl decb %cl movl $0x1, %eax movl %eax, %edx shll %cl, %edx movl %edx, %ecx negl %ecx movl %ecx, 0x440(%rsp) movl 0x440(%rsp), %ecx movl %ecx, 0x82c(%rsp) movl %ecx, 0x828(%rsp) movl %ecx, 0x824(%rsp) movl %ecx, 0x820(%rsp) movl 0x824(%rsp), %esi movl 0x828(%rsp), %edx movl 0x82c(%rsp), %ecx movd 0x820(%rsp), %xmm0 pinsrd $0x1, %esi, %xmm0 pinsrd $0x2, %edx, %xmm0 pinsrd $0x3, %ecx, %xmm0 movaps %xmm0, 0x810(%rsp) movaps 0x810(%rsp), %xmm0 movaps %xmm0, 0x240(%rsp) movb 0x25c(%rsp), %cl decb %cl shll %cl, %eax decl %eax movl %eax, 0x43c(%rsp) movl 0x43c(%rsp), %eax movl %eax, 0x854(%rsp) movl %eax, 0x850(%rsp) movl %eax, 0x84c(%rsp) movl %eax, 0x848(%rsp) movl 0x84c(%rsp), %edx movl 0x850(%rsp), %ecx movl 0x854(%rsp), %eax movd 0x848(%rsp), %xmm0 pinsrd $0x1, %edx, %xmm0 pinsrd $0x2, %ecx, %xmm0 pinsrd $0x3, %eax, %xmm0 movdqa %xmm0, 0x830(%rsp) movdqa 0x830(%rsp), %xmm0 movdqa %xmm0, 0x230(%rsp) movq 0x430(%rsp), %rax movdqa (%rax), %xmm0 movdqa %xmm0, 0x30(%rsp) movq 0x430(%rsp), %rax movdqa 0x40(%rax), %xmm0 movdqa %xmm0, 0x70(%rsp) movq 0x430(%rsp), %rax movdqa 0x20(%rax), %xmm0 movdqa %xmm0, 0xb0(%rsp) movq 0x430(%rsp), %rax movdqa 0x60(%rax), %xmm0 movdqa %xmm0, 0xf0(%rsp) movq 0x430(%rsp), %rax movdqa 0x10(%rax), %xmm0 movdqa %xmm0, 0x130(%rsp) movq 0x430(%rsp), %rax movdqa 0x50(%rax), %xmm0 movdqa %xmm0, 0x170(%rsp) movq 0x430(%rsp), %rax movdqa 0x30(%rax), %xmm0 movdqa %xmm0, 0x1b0(%rsp) movq 0x430(%rsp), %rax movdqa 0x70(%rax), %xmm0 movdqa %xmm0, 0x1f0(%rsp) leaq 0x30(%rsp), %rsi addq $0x100, %rsi # imm = 0x100 movl 0x424(%rsp), %ecx leaq 0x3b0(%rsp), %rdi leaq 0x260(%rsp), %rdx callq 0x7cf7c0 movdqa %xmm0, 0x220(%rsp) leaq 0x30(%rsp), %rsi addq $0x100, %rsi # imm = 0x100 movl 0x424(%rsp), %ecx leaq 0x400(%rsp), %rdi leaq 0x260(%rsp), %rdx callq 0x7cf7c0 movdqa %xmm0, 0x130(%rsp) leaq 0x30(%rsp), %rsi addq $0x1c0, %rsi # imm = 0x1C0 movl 0x424(%rsp), %ecx leaq 0x390(%rsp), %rdi leaq 0x260(%rsp), %rdx callq 0x7cf7c0 movdqa %xmm0, 0x160(%rsp) leaq 0x30(%rsp), %rsi addq $0x1c0, %rsi # imm = 0x1C0 movl 0x424(%rsp), %ecx leaq 0x3f0(%rsp), %rdi leaq 0x260(%rsp), %rdx callq 0x7cf7c0 movdqa %xmm0, 0x1f0(%rsp) leaq 0x30(%rsp), %rsi addq $0x140, %rsi # imm = 0x140 movl 0x424(%rsp), %ecx leaq 0x3c0(%rsp), %rdi leaq 0x260(%rsp), %rdx callq 0x7cf7c0 movdqa %xmm0, 0x1e0(%rsp) leaq 0x30(%rsp), %rsi addq $0x140, %rsi # imm = 0x140 movl 0x424(%rsp), %ecx leaq 0x3e0(%rsp), %rdi leaq 0x260(%rsp), %rdx callq 0x7cf7c0 movdqa %xmm0, 0x170(%rsp) leaq 0x30(%rsp), %rsi addq $0x180, %rsi # imm = 0x180 movl 0x424(%rsp), %ecx leaq 0x3a0(%rsp), %rdi leaq 0x260(%rsp), %rdx callq 0x7cf7c0 movdqa %xmm0, 0x1a0(%rsp) leaq 0x30(%rsp), %rsi addq $0x180, %rsi # imm = 0x180 movl 0x424(%rsp), %ecx leaq 0x3d0(%rsp), %rdi leaq 0x260(%rsp), %rdx callq 0x7cf7c0 movdqa %xmm0, 0x1b0(%rsp) leaq 0x30(%rsp), %rsi addq $0x80, %rsi movl 0x424(%rsp), %ecx leaq 0x360(%rsp), %rdi leaq 0x260(%rsp), %rdx callq 0x7cf7c0 movdqa %xmm0, 0x120(%rsp) leaq 0x30(%rsp), %rsi addq $0x80, %rsi movl 0x424(%rsp), %ecx leaq 0x380(%rsp), %rdi leaq 0x260(%rsp), %rdx callq 0x7cf7c0 movdqa %xmm0, 0xb0(%rsp) leaq 0x30(%rsp), %rsi addq $0xc0, %rsi movl 0x424(%rsp), %ecx leaq 0x350(%rsp), %rdi leaq 0x260(%rsp), %rdx callq 0x7cf7c0 movdqa %xmm0, 0xe0(%rsp) leaq 0x30(%rsp), %rsi addq $0xc0, %rsi movl 0x424(%rsp), %ecx leaq 0x370(%rsp), %rdi leaq 0x260(%rsp), %rdx callq 0x7cf7c0 movdqa %xmm0, 0xf0(%rsp) movdqa 0x130(%rsp), %xmm0 movdqa %xmm0, 0x140(%rsp) movdqa 0x160(%rsp), %xmm0 movdqa %xmm0, 0x150(%rsp) movdqa 0x170(%rsp), %xmm0 movdqa %xmm0, 0x180(%rsp) movdqa 0x1a0(%rsp), %xmm0 movdqa %xmm0, 0x190(%rsp) movdqa 0x1b0(%rsp), %xmm0 movdqa %xmm0, 0x1c0(%rsp) movdqa 0x1e0(%rsp), %xmm0 movdqa %xmm0, 0x1d0(%rsp) movdqa 0x1f0(%rsp), %xmm0 movdqa %xmm0, 0x200(%rsp) movdqa 0x220(%rsp), %xmm0 movdqa %xmm0, 0x210(%rsp) leaq 0x30(%rsp), %rsi addq $0x40, %rsi movl 0x424(%rsp), %ecx leaq 0x310(%rsp), %rdi leaq 0x260(%rsp), %rdx callq 0x7cf7c0 movdqa %xmm0, 0xa0(%rsp) leaq 0x30(%rsp), %rsi addq $0x40, %rsi movl 0x424(%rsp), %ecx leaq 0x340(%rsp), %rdi leaq 0x260(%rsp), %rdx callq 0x7cf7c0 movdqa %xmm0, 0x70(%rsp) movdqa 0xb0(%rsp), %xmm0 movdqa %xmm0, 0xc0(%rsp) movdqa 0xe0(%rsp), %xmm0 movdqa %xmm0, 0xd0(%rsp) movdqa 0xf0(%rsp), %xmm0 movdqa %xmm0, 0x100(%rsp) movdqa 0x120(%rsp), %xmm0 movdqa %xmm0, 0x110(%rsp) leaq 0x30(%rsp), %rdi movl 0x424(%rsp), %eax leaq 0x2f0(%rsp), %rsi leaq 0x340(%rsp), %rdx leaq 0x310(%rsp), %rcx leaq 0x2e0(%rsp), %r8 leaq 0x300(%rsp), %r9 leaq 0x330(%rsp), %r14 leaq 0x320(%rsp), %rbx leaq 0x2d0(%rsp), %r11 leaq 0x260(%rsp), %r10 movq %r14, (%rsp) movq %rbx, 0x8(%rsp) movq %r11, 0x10(%rsp) movq %r10, 0x18(%rsp) movl %eax, 0x20(%rsp) callq 0x7cf9a0 leaq 0x30(%rsp), %rsi movl 0x424(%rsp), %ecx leaq 0x2c0(%rsp), %rdi leaq 0x260(%rsp), %rdx callq 0x7cf7c0 movdqa %xmm0, 0x30(%rsp) movdqa 0x30(%rsp), %xmm0 movdqa %xmm0, 0x40(%rsp) movdqa 0x70(%rsp), %xmm0 movdqa %xmm0, 0x80(%rsp) movdqa 0xa0(%rsp), %xmm0 movdqa %xmm0, 0x90(%rsp) leaq 0x30(%rsp), %rdi movl 0x424(%rsp), %eax leaq 0x270(%rsp), %rsi leaq 0x2a0(%rsp), %rdx leaq 0x280(%rsp), %rcx leaq 0x290(%rsp), %r8 leaq 0x240(%rsp), %r9 leaq 0x230(%rsp), %r11 leaq 0x260(%rsp), %r10 movq %r11, (%rsp) movq %r10, 0x8(%rsp) movl %eax, 0x10(%rsp) callq 0x7cfc20 movdqa 0x30(%rsp), %xmm0 movdqa %xmm0, 0x60(%rsp) movdqa 0x40(%rsp), %xmm0 movdqa %xmm0, 0x50(%rsp) leaq 0x30(%rsp), %rdi movl 0x424(%rsp), %eax leaq 0x2b0(%rsp), %rsi leaq 0x2c0(%rsp), %rdx leaq 0x270(%rsp), %rcx leaq 0x2a0(%rsp), %r8 leaq 0x280(%rsp), %r9 leaq 0x290(%rsp), %r14 leaq 0x240(%rsp), %rbx leaq 0x230(%rsp), %r11 leaq 0x260(%rsp), %r10 movq %r14, (%rsp) movq %rbx, 0x8(%rsp) movq %r11, 0x10(%rsp) movq %r10, 0x18(%rsp) movl %eax, 0x20(%rsp) callq 0x7cff70 leaq 0x30(%rsp), %rdi movl 0x424(%rsp), %eax leaq 0x2b0(%rsp), %rsi leaq 0x2c0(%rsp), %rdx leaq 0x240(%rsp), %rcx leaq 0x230(%rsp), %r8 leaq 0x260(%rsp), %r9 movl %eax, (%rsp) callq 0x7d0370 leaq 0x30(%rsp), %rdi movl 0x424(%rsp), %eax leaq 0x2b0(%rsp), %rsi leaq 0x2c0(%rsp), %rdx leaq 0x240(%rsp), %rcx leaq 0x230(%rsp), %r8 leaq 0x260(%rsp), %r9 movl %eax, (%rsp) callq 0x7d0780 leaq 0x30(%rsp), %rdi movq 0x428(%rsp), %rsi movl 0x420(%rsp), %edx movl 0x41c(%rsp), %ecx movl 0x418(%rsp), %r8d leaq 0x240(%rsp), %r9 leaq 0x230(%rsp), %rax movq %rax, (%rsp) callq 0x7d0ba0 addq $0x858, %rsp # imm = 0x858 popq %rbx popq %r14 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/common/x86/highbd_inv_txfm_sse4.c
av1_build_compound_diffwtd_mask_d16_sse4_1
void av1_build_compound_diffwtd_mask_d16_sse4_1( uint8_t *mask, DIFFWTD_MASK_TYPE mask_type, const CONV_BUF_TYPE *src0, int src0_stride, const CONV_BUF_TYPE *src1, int src1_stride, int h, int w, ConvolveParams *conv_params, int bd) { const int which_inverse = (mask_type == DIFFWTD_38) ? 0 : 1; const int mask_base = 38; int round = 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1 + (bd - 8); const __m128i round_const = _mm_set1_epi16((1 << round) >> 1); const __m128i mask_base_16 = _mm_set1_epi16(mask_base); const __m128i clip_diff = _mm_set1_epi16(AOM_BLEND_A64_MAX_ALPHA); const __m128i add_const = _mm_set1_epi16((which_inverse ? AOM_BLEND_A64_MAX_ALPHA : 0)); const __m128i add_sign = _mm_set1_epi16((which_inverse ? -1 : 1)); int i, j; // When rounding constant is added, there is a possibility of overflow. // However that much precision is not required. Code should very well work for // other values of DIFF_FACTOR_LOG2 and AOM_BLEND_A64_MAX_ALPHA as well. But // there is a possibility of corner case bugs. assert(DIFF_FACTOR_LOG2 == 4); assert(AOM_BLEND_A64_MAX_ALPHA == 64); for (i = 0; i < h; ++i) { for (j = 0; j < w; j += 8) { const __m128i data_src0 = _mm_loadu_si128((__m128i *)&src0[(i * src0_stride) + j]); const __m128i data_src1 = _mm_loadu_si128((__m128i *)&src1[(i * src1_stride) + j]); const __m128i diffa = _mm_subs_epu16(data_src0, data_src1); const __m128i diffb = _mm_subs_epu16(data_src1, data_src0); const __m128i diff = _mm_max_epu16(diffa, diffb); const __m128i diff_round = _mm_srli_epi16(_mm_adds_epu16(diff, round_const), round); const __m128i diff_factor = _mm_srli_epi16(diff_round, DIFF_FACTOR_LOG2); const __m128i diff_mask = _mm_adds_epi16(diff_factor, mask_base_16); __m128i diff_clamp = _mm_min_epi16(diff_mask, clip_diff); // clamp to 0 can be skipped since we are using add and saturate // instruction const __m128i diff_sign = _mm_sign_epi16(diff_clamp, add_sign); const __m128i diff_const_16 = _mm_add_epi16(diff_sign, add_const); // 8 bit conversion and saturation to uint8 const __m128i res_8 = _mm_packus_epi16(diff_const_16, diff_const_16); // Store values into the destination buffer __m128i *const dst = (__m128i *)&mask[i * w + j]; if ((w - j) > 4) { _mm_storel_epi64(dst, res_8); } else { // w==4 *(int *)dst = _mm_cvtsi128_si32(res_8); } } } }
subq $0x368, %rsp # imm = 0x368 movb %sil, %al movl 0x388(%rsp), %esi movq 0x380(%rsp), %rsi movl 0x378(%rsp), %esi movl 0x370(%rsp), %esi movq %rdi, 0xf8(%rsp) movb %al, 0xf7(%rsp) movq %rdx, 0xe8(%rsp) movl %ecx, 0xe4(%rsp) movq %r8, 0xd8(%rsp) movl %r9d, 0xd4(%rsp) movb 0xf7(%rsp), %al testb %al, %al setne %al movzbl %al, %eax movl %eax, 0xd0(%rsp) movl $0x26, 0xcc(%rsp) movq 0x380(%rsp), %rax movl 0x14(%rax), %edx movl 0x18(%rax), %eax addl %eax, %edx movl 0x388(%rsp), %ecx movl %ecx, %eax negl %edx movl %edx, %ecx leal 0x6(%rax,%rcx), %eax movl %eax, 0xc8(%rsp) movb 0xc8(%rsp), %cl movl $0x1, %eax shll %cl, %eax shrl %eax movw %ax, 0x10e(%rsp) movw 0x10e(%rsp), %ax movw %ax, -0x72(%rsp) movw %ax, 0x2de(%rsp) movw %ax, 0x2dc(%rsp) movw %ax, 0x2da(%rsp) movw %ax, 0x2d8(%rsp) movw %ax, 0x2d6(%rsp) movw %ax, 0x2d4(%rsp) movw %ax, 0x2d2(%rsp) movw %ax, 0x2d0(%rsp) movzwl 0x2d0(%rsp), %eax movd %eax, %xmm0 movzwl 0x2d2(%rsp), %eax pinsrw $0x1, %eax, %xmm0 movzwl 0x2d4(%rsp), %eax pinsrw $0x2, %eax, %xmm0 movzwl 0x2d6(%rsp), %eax pinsrw $0x3, %eax, %xmm0 movzwl 0x2d8(%rsp), %eax pinsrw $0x4, %eax, %xmm0 movzwl 0x2da(%rsp), %eax pinsrw $0x5, %eax, %xmm0 movzwl 0x2dc(%rsp), %eax pinsrw $0x6, %eax, %xmm0 movzwl 0x2de(%rsp), %eax pinsrw $0x7, %eax, %xmm0 movaps %xmm0, 0x2c0(%rsp) movaps 0x2c0(%rsp), %xmm0 movaps %xmm0, 0xb0(%rsp) movw $0x26, 0x10c(%rsp) movw 0x10c(%rsp), %ax movw %ax, -0x70(%rsp) movw %ax, 0x2fe(%rsp) movw %ax, 0x2fc(%rsp) movw %ax, 0x2fa(%rsp) movw %ax, 0x2f8(%rsp) movw %ax, 0x2f6(%rsp) movw %ax, 0x2f4(%rsp) movw %ax, 0x2f2(%rsp) movw %ax, 0x2f0(%rsp) movzwl 0x2f0(%rsp), %eax movd %eax, %xmm0 movzwl 0x2f2(%rsp), %eax pinsrw $0x1, %eax, %xmm0 movzwl 0x2f4(%rsp), %eax pinsrw $0x2, %eax, %xmm0 movzwl 0x2f6(%rsp), %eax pinsrw $0x3, %eax, %xmm0 movzwl 0x2f8(%rsp), %eax pinsrw $0x4, %eax, %xmm0 movzwl 0x2fa(%rsp), %eax pinsrw $0x5, %eax, %xmm0 movzwl 0x2fc(%rsp), %eax pinsrw $0x6, %eax, %xmm0 movzwl 0x2fe(%rsp), %eax pinsrw $0x7, %eax, %xmm0 movaps %xmm0, 0x2e0(%rsp) movaps 0x2e0(%rsp), %xmm0 movaps %xmm0, 0xa0(%rsp) movw $0x40, 0x10a(%rsp) movw 0x10a(%rsp), %ax movw %ax, -0x6e(%rsp) movw %ax, 0x31e(%rsp) movw %ax, 0x31c(%rsp) movw %ax, 0x31a(%rsp) movw %ax, 0x318(%rsp) movw %ax, 0x316(%rsp) movw %ax, 0x314(%rsp) movw %ax, 0x312(%rsp) movw %ax, 0x310(%rsp) movzwl 0x310(%rsp), %eax movd %eax, %xmm0 movzwl 0x312(%rsp), %eax pinsrw $0x1, %eax, %xmm0 movzwl 0x314(%rsp), %eax pinsrw $0x2, %eax, %xmm0 movzwl 0x316(%rsp), %eax pinsrw $0x3, %eax, %xmm0 movzwl 0x318(%rsp), %eax pinsrw $0x4, %eax, %xmm0 movzwl 0x31a(%rsp), %eax pinsrw $0x5, %eax, %xmm0 movzwl 0x31c(%rsp), %eax pinsrw $0x6, %eax, %xmm0 movzwl 0x31e(%rsp), %eax pinsrw $0x7, %eax, %xmm0 movaps %xmm0, 0x300(%rsp) movaps 0x300(%rsp), %xmm0 movaps %xmm0, 0x90(%rsp) movl 0xd0(%rsp), %eax testl %eax, %eax setne %al movzbl %al, %eax shll $0x6, %eax movw %ax, 0x108(%rsp) movw 0x108(%rsp), %ax movw %ax, -0x6c(%rsp) movw %ax, 0x33e(%rsp) movw %ax, 0x33c(%rsp) movw %ax, 0x33a(%rsp) movw %ax, 0x338(%rsp) movw %ax, 0x336(%rsp) movw %ax, 0x334(%rsp) movw %ax, 0x332(%rsp) movw %ax, 0x330(%rsp) movzwl 0x330(%rsp), %eax movd %eax, %xmm0 movzwl 0x332(%rsp), %eax pinsrw $0x1, %eax, %xmm0 movzwl 0x334(%rsp), %eax pinsrw $0x2, %eax, %xmm0 movzwl 0x336(%rsp), %eax pinsrw $0x3, %eax, %xmm0 movzwl 0x338(%rsp), %eax pinsrw $0x4, %eax, %xmm0 movzwl 0x33a(%rsp), %eax pinsrw $0x5, %eax, %xmm0 movzwl 0x33c(%rsp), %eax pinsrw $0x6, %eax, %xmm0 movzwl 0x33e(%rsp), %eax pinsrw $0x7, %eax, %xmm0 movaps %xmm0, 0x320(%rsp) movaps 0x320(%rsp), %xmm0 movaps %xmm0, 0x80(%rsp) movl 0xd0(%rsp), %ecx xorl %eax, %eax negl %ecx sbbl %eax, %eax orl $0x1, %eax movw %ax, 0x106(%rsp) movw 0x106(%rsp), %ax movw %ax, -0x6a(%rsp) movw %ax, 0x366(%rsp) movw %ax, 0x364(%rsp) movw %ax, 0x362(%rsp) movw %ax, 0x360(%rsp) movw %ax, 0x35e(%rsp) movw %ax, 0x35c(%rsp) movw %ax, 0x35a(%rsp) movw %ax, 0x358(%rsp) movzwl 0x358(%rsp), %eax movd %eax, %xmm0 movzwl 0x35a(%rsp), %eax pinsrw $0x1, %eax, %xmm0 movzwl 0x35c(%rsp), %eax pinsrw $0x2, %eax, %xmm0 movzwl 0x35e(%rsp), %eax pinsrw $0x3, %eax, %xmm0 movzwl 0x360(%rsp), %eax pinsrw $0x4, %eax, %xmm0 movzwl 0x362(%rsp), %eax pinsrw $0x5, %eax, %xmm0 movzwl 0x364(%rsp), %eax pinsrw $0x6, %eax, %xmm0 movzwl 0x366(%rsp), %eax pinsrw $0x7, %eax, %xmm0 movdqa %xmm0, 0x340(%rsp) movdqa 0x340(%rsp), %xmm0 movdqa %xmm0, 0x70(%rsp) movl $0x0, 0x6c(%rsp) movl 0x6c(%rsp), %eax cmpl 0x370(%rsp), %eax jge 0x7d84ce movl $0x0, 0x68(%rsp) movl 0x68(%rsp), %eax cmpl 0x378(%rsp), %eax jge 0x7d84bc movq 0xe8(%rsp), %rax movl 0x6c(%rsp), %ecx imull 0xe4(%rsp), %ecx addl 0x68(%rsp), %ecx movslq %ecx, %rcx shlq %rcx addq %rcx, %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax movdqu (%rax), %xmm0 movdqa %xmm0, 0x50(%rsp) movq 0xd8(%rsp), %rax movl 0x6c(%rsp), %ecx imull 0xd4(%rsp), %ecx addl 0x68(%rsp), %ecx movslq %ecx, %rcx shlq %rcx addq %rcx, %rax movq %rax, 0x170(%rsp) movq 0x170(%rsp), %rax movdqu (%rax), %xmm0 movdqa %xmm0, 0x40(%rsp) movdqa 0x50(%rsp), %xmm1 movdqa 0x40(%rsp), %xmm0 movdqa %xmm1, 0x1b0(%rsp) movdqa %xmm0, 0x1a0(%rsp) movdqa 0x1b0(%rsp), %xmm0 movdqa 0x1a0(%rsp), %xmm1 psubusw %xmm1, %xmm0 movdqa %xmm0, 0x30(%rsp) movdqa 0x40(%rsp), %xmm1 movdqa 0x50(%rsp), %xmm0 movdqa %xmm1, 0x190(%rsp) movdqa %xmm0, 0x180(%rsp) movdqa 0x190(%rsp), %xmm0 movdqa 0x180(%rsp), %xmm1 psubusw %xmm1, %xmm0 movdqa %xmm0, 0x20(%rsp) movdqa 0x30(%rsp), %xmm1 movdqa 0x20(%rsp), %xmm0 movdqa %xmm1, 0x1d0(%rsp) movdqa %xmm0, 0x1c0(%rsp) movdqa 0x1d0(%rsp), %xmm0 movdqa 0x1c0(%rsp), %xmm1 pmaxuw %xmm1, %xmm0 movdqa %xmm0, 0x10(%rsp) movdqa 0x10(%rsp), %xmm1 movdqa 0xb0(%rsp), %xmm0 movdqa %xmm1, 0x230(%rsp) movdqa %xmm0, 0x220(%rsp) movdqa 0x230(%rsp), %xmm0 movdqa 0x220(%rsp), %xmm1 paddusw %xmm1, %xmm0 movl 0xc8(%rsp), %eax movdqa %xmm0, 0x210(%rsp) movl %eax, 0x20c(%rsp) movdqa 0x210(%rsp), %xmm0 movl 0x20c(%rsp), %eax movd %eax, %xmm1 psrlw %xmm1, %xmm0 movdqa %xmm0, (%rsp) movdqa (%rsp), %xmm0 movdqa %xmm0, 0x1f0(%rsp) movl $0x4, 0x1ec(%rsp) movdqa 0x1f0(%rsp), %xmm0 movl 0x1ec(%rsp), %eax movd %eax, %xmm1 psrlw %xmm1, %xmm0 movdqa %xmm0, -0x10(%rsp) movdqa -0x10(%rsp), %xmm1 movdqa 0xa0(%rsp), %xmm0 movdqa %xmm1, 0x250(%rsp) movdqa %xmm0, 0x240(%rsp) movdqa 0x250(%rsp), %xmm0 movdqa 0x240(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, -0x20(%rsp) movdqa -0x20(%rsp), %xmm1 movdqa 0x90(%rsp), %xmm0 movdqa %xmm1, 0x270(%rsp) movdqa %xmm0, 0x260(%rsp) movdqa 0x270(%rsp), %xmm0 movdqa 0x260(%rsp), %xmm1 pminsw %xmm1, %xmm0 movdqa %xmm0, -0x30(%rsp) movdqa -0x30(%rsp), %xmm1 movdqa 0x70(%rsp), %xmm0 movdqa %xmm1, 0x290(%rsp) movdqa %xmm0, 0x280(%rsp) movdqa 0x290(%rsp), %xmm0 movdqa 0x280(%rsp), %xmm1 psignw %xmm1, %xmm0 movdqa %xmm0, -0x40(%rsp) movdqa -0x40(%rsp), %xmm1 movdqa 0x80(%rsp), %xmm0 movdqa %xmm1, 0x2b0(%rsp) movdqa %xmm0, 0x2a0(%rsp) movdqa 0x2b0(%rsp), %xmm0 movdqa 0x2a0(%rsp), %xmm1 paddw %xmm1, %xmm0 movdqa %xmm0, -0x50(%rsp) movdqa -0x50(%rsp), %xmm1 movdqa -0x50(%rsp), %xmm0 movdqa %xmm1, 0x120(%rsp) movdqa %xmm0, 0x110(%rsp) movdqa 0x120(%rsp), %xmm0 movdqa 0x110(%rsp), %xmm1 packuswb %xmm1, %xmm0 movdqa %xmm0, -0x60(%rsp) movq 0xf8(%rsp), %rax movl 0x6c(%rsp), %ecx imull 0x378(%rsp), %ecx addl 0x68(%rsp), %ecx movslq %ecx, %rcx addq %rcx, %rax movq %rax, -0x68(%rsp) movl 0x378(%rsp), %eax subl 0x68(%rsp), %eax cmpl $0x4, %eax jle 0x7d847f movq -0x68(%rsp), %rax movaps -0x60(%rsp), %xmm0 movq %rax, 0x168(%rsp) movaps %xmm0, 0x150(%rsp) movq 0x150(%rsp), %rcx movq 0x168(%rsp), %rax movq %rcx, (%rax) jmp 0x7d84aa movaps -0x60(%rsp), %xmm0 movaps %xmm0, 0x140(%rsp) movaps 0x140(%rsp), %xmm0 movaps %xmm0, 0x130(%rsp) movl 0x130(%rsp), %ecx movq -0x68(%rsp), %rax movl %ecx, (%rax) jmp 0x7d84ac movl 0x68(%rsp), %eax addl $0x8, %eax movl %eax, 0x68(%rsp) jmp 0x7d8119 jmp 0x7d84be movl 0x6c(%rsp), %eax addl $0x1, %eax movl %eax, 0x6c(%rsp) jmp 0x7d8100 addq $0x368, %rsp # imm = 0x368 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/common/x86/reconinter_sse4.c
av1_selfguided_restoration_sse4_1
int av1_selfguided_restoration_sse4_1(const uint8_t *dgd8, int width, int height, int dgd_stride, int32_t *flt0, int32_t *flt1, int flt_stride, int sgr_params_idx, int bit_depth, int highbd) { int32_t *buf = (int32_t *)aom_memalign( 16, 4 * sizeof(*buf) * RESTORATION_PROC_UNIT_PELS); if (!buf) return -1; memset(buf, 0, 4 * sizeof(*buf) * RESTORATION_PROC_UNIT_PELS); const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ; const int height_ext = height + 2 * SGRPROJ_BORDER_VERT; // Adjusting the stride of A and B here appears to avoid bad cache effects, // leading to a significant speed improvement. // We also align the stride to a multiple of 16 bytes for efficiency. int buf_stride = ((width_ext + 3) & ~3) + 16; // The "tl" pointers point at the top-left of the initialised data for the // array. Adding 3 here ensures that column 1 is 16-byte aligned. int32_t *Atl = buf + 0 * RESTORATION_PROC_UNIT_PELS + 3; int32_t *Btl = buf + 1 * RESTORATION_PROC_UNIT_PELS + 3; int32_t *Ctl = buf + 2 * RESTORATION_PROC_UNIT_PELS + 3; int32_t *Dtl = buf + 3 * RESTORATION_PROC_UNIT_PELS + 3; // The "0" pointers are (- SGRPROJ_BORDER_VERT, -SGRPROJ_BORDER_HORZ). Note // there's a zero row and column in A, B (integral images), so we move down // and right one for them. const int buf_diag_border = SGRPROJ_BORDER_HORZ + buf_stride * SGRPROJ_BORDER_VERT; int32_t *A0 = Atl + 1 + buf_stride; int32_t *B0 = Btl + 1 + buf_stride; int32_t *C0 = Ctl + 1 + buf_stride; int32_t *D0 = Dtl + 1 + buf_stride; // Finally, A, B, C, D point at position (0, 0). int32_t *A = A0 + buf_diag_border; int32_t *B = B0 + buf_diag_border; int32_t *C = C0 + buf_diag_border; int32_t *D = D0 + buf_diag_border; const int dgd_diag_border = SGRPROJ_BORDER_HORZ + dgd_stride * SGRPROJ_BORDER_VERT; const uint8_t *dgd0 = dgd8 - dgd_diag_border; // Generate integral images from the input. C will contain sums of squares; D // will contain just sums if (highbd) integral_images_highbd(CONVERT_TO_SHORTPTR(dgd0), dgd_stride, width_ext, height_ext, Ctl, Dtl, buf_stride); else integral_images(dgd0, dgd_stride, width_ext, height_ext, Ctl, Dtl, buf_stride); const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx]; // Write to flt0 and flt1 // If params->r == 0 we skip the corresponding filter. We only allow one of // the radii to be 0, as having both equal to 0 would be equivalent to // skipping SGR entirely. assert(!(params->r[0] == 0 && params->r[1] == 0)); assert(params->r[0] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ)); assert(params->r[1] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ)); if (params->r[0] > 0) { calc_ab_fast(A, B, C, D, width, height, buf_stride, bit_depth, sgr_params_idx, 0); final_filter_fast(flt0, flt_stride, A, B, buf_stride, dgd8, dgd_stride, width, height, highbd); } if (params->r[1] > 0) { calc_ab(A, B, C, D, width, height, buf_stride, bit_depth, sgr_params_idx, 1); final_filter(flt1, flt_stride, A, B, buf_stride, dgd8, dgd_stride, width, height, highbd); } aom_free(buf); return 0; }
pushq %rbx subq $0xf0, %rsp movl 0x118(%rsp), %eax movl 0x110(%rsp), %eax movl 0x108(%rsp), %eax movl 0x100(%rsp), %eax movq %rdi, 0xe0(%rsp) movl %esi, 0xdc(%rsp) movl %edx, 0xd8(%rsp) movl %ecx, 0xd4(%rsp) movq %r8, 0xc8(%rsp) movq %r9, 0xc0(%rsp) movl $0x10, %edi movl $0x1fa40, %esi # imm = 0x1FA40 callq 0xa0bc0 movq %rax, 0xb8(%rsp) cmpq $0x0, 0xb8(%rsp) jne 0x7d8563 movl $0xffffffff, 0xec(%rsp) # imm = 0xFFFFFFFF jmp 0x7d8968 movq 0xb8(%rsp), %rdi xorl %esi, %esi movl $0x1fa40, %edx # imm = 0x1FA40 callq 0x18280 movl 0xdc(%rsp), %eax addl $0x6, %eax movl %eax, 0xb4(%rsp) movl 0xd8(%rsp), %eax addl $0x6, %eax movl %eax, 0xb0(%rsp) movl 0xb4(%rsp), %eax addl $0x3, %eax andl $-0x4, %eax addl $0x10, %eax movl %eax, 0xac(%rsp) movq 0xb8(%rsp), %rax addq $0xc, %rax movq %rax, 0xa0(%rsp) movq 0xb8(%rsp), %rax addq $0x7e90, %rax # imm = 0x7E90 addq $0xc, %rax movq %rax, 0x98(%rsp) movq 0xb8(%rsp), %rax addq $0xfd20, %rax # imm = 0xFD20 addq $0xc, %rax movq %rax, 0x90(%rsp) movq 0xb8(%rsp), %rax addq $0x17bb0, %rax # imm = 0x17BB0 addq $0xc, %rax movq %rax, 0x88(%rsp) imull $0x3, 0xac(%rsp), %eax addl $0x3, %eax movl %eax, 0x84(%rsp) movq 0xa0(%rsp), %rax addq $0x4, %rax movslq 0xac(%rsp), %rcx shlq $0x2, %rcx addq %rcx, %rax movq %rax, 0x78(%rsp) movq 0x98(%rsp), %rax addq $0x4, %rax movslq 0xac(%rsp), %rcx shlq $0x2, %rcx addq %rcx, %rax movq %rax, 0x70(%rsp) movq 0x90(%rsp), %rax addq $0x4, %rax movslq 0xac(%rsp), %rcx shlq $0x2, %rcx addq %rcx, %rax movq %rax, 0x68(%rsp) movq 0x88(%rsp), %rax addq $0x4, %rax movslq 0xac(%rsp), %rcx shlq $0x2, %rcx addq %rcx, %rax movq %rax, 0x60(%rsp) movq 0x78(%rsp), %rax movslq 0x84(%rsp), %rcx shlq $0x2, %rcx addq %rcx, %rax movq %rax, 0x58(%rsp) movq 0x70(%rsp), %rax movslq 0x84(%rsp), %rcx shlq $0x2, %rcx addq %rcx, %rax movq %rax, 0x50(%rsp) movq 0x68(%rsp), %rax movslq 0x84(%rsp), %rcx shlq $0x2, %rcx addq %rcx, %rax movq %rax, 0x48(%rsp) movq 0x60(%rsp), %rax movslq 0x84(%rsp), %rcx shlq $0x2, %rcx addq %rcx, %rax movq %rax, 0x40(%rsp) imull $0x3, 0xd4(%rsp), %eax addl $0x3, %eax movl %eax, 0x3c(%rsp) movq 0xe0(%rsp), %rax movslq 0x3c(%rsp), %rdx xorl %ecx, %ecx subq %rdx, %rcx addq %rcx, %rax movq %rax, 0x30(%rsp) cmpl $0x0, 0x118(%rsp) je 0x7d8779 movq 0x30(%rsp), %rdi shlq %rdi movl 0xd4(%rsp), %esi movl 0xb4(%rsp), %edx movl 0xb0(%rsp), %ecx movq 0x90(%rsp), %r8 movq 0x88(%rsp), %r9 movl 0xac(%rsp), %eax movl %eax, (%rsp) callq 0x7d8980 jmp 0x7d87b2 movq 0x30(%rsp), %rdi movl 0xd4(%rsp), %esi movl 0xb4(%rsp), %edx movl 0xb0(%rsp), %ecx movq 0x90(%rsp), %r8 movq 0x88(%rsp), %r9 movl 0xac(%rsp), %eax movl %eax, (%rsp) callq 0x7d8db0 movslq 0x108(%rsp), %rcx leaq 0x33192f(%rip), %rax # 0xb0a0f0 shlq $0x4, %rcx addq %rcx, %rax movq %rax, 0x28(%rsp) movq 0x28(%rsp), %rax cmpl $0x0, (%rax) jle 0x7d888f movq 0x58(%rsp), %rdi movq 0x50(%rsp), %rsi movq 0x48(%rsp), %rdx movq 0x40(%rsp), %rcx movl 0xdc(%rsp), %r8d movl 0xd8(%rsp), %r9d movl 0xac(%rsp), %r11d movl 0x110(%rsp), %r10d movl 0x108(%rsp), %eax xorl %ebx, %ebx movl %r11d, (%rsp) movl %r10d, 0x8(%rsp) movl %eax, 0x10(%rsp) movl $0x0, 0x18(%rsp) callq 0x7d91e0 movq 0xc8(%rsp), %rdi movl 0x100(%rsp), %esi movq 0x58(%rsp), %rdx movq 0x50(%rsp), %rcx movl 0xac(%rsp), %r8d movq 0xe0(%rsp), %r9 movl 0xd4(%rsp), %ebx movl 0xdc(%rsp), %r11d movl 0xd8(%rsp), %r10d movl 0x118(%rsp), %eax movl %ebx, (%rsp) movl %r11d, 0x8(%rsp) movl %r10d, 0x10(%rsp) movl %eax, 0x18(%rsp) callq 0x7d9af0 movq 0x28(%rsp), %rax cmpl $0x0, 0x4(%rax) jle 0x7d8950 movq 0x58(%rsp), %rdi movq 0x50(%rsp), %rsi movq 0x48(%rsp), %rdx movq 0x40(%rsp), %rcx movl 0xdc(%rsp), %r8d movl 0xd8(%rsp), %r9d movl 0xac(%rsp), %r11d movl 0x110(%rsp), %r10d movl 0x108(%rsp), %eax movl %r11d, (%rsp) movl %r10d, 0x8(%rsp) movl %eax, 0x10(%rsp) movl $0x1, 0x18(%rsp) callq 0x7da0c0 movq 0xc0(%rsp), %rdi movl 0x100(%rsp), %esi movq 0x58(%rsp), %rdx movq 0x50(%rsp), %rcx movl 0xac(%rsp), %r8d movq 0xe0(%rsp), %r9 movl 0xd4(%rsp), %ebx movl 0xdc(%rsp), %r11d movl 0xd8(%rsp), %r10d movl 0x118(%rsp), %eax movl %ebx, (%rsp) movl %r11d, 0x8(%rsp) movl %r10d, 0x10(%rsp) movl %eax, 0x18(%rsp) callq 0x7da9d0 movq 0xb8(%rsp), %rdi callq 0xa0e00 movl $0x0, 0xec(%rsp) movl 0xec(%rsp), %eax addq $0xf0, %rsp popq %rbx retq nopl (%rax,%rax)
/m-ab-s[P]aom/av1/common/x86/selfguided_sse4.c