project
stringclasses
2 values
commit_id
stringlengths
40
40
target
int64
0
1
func
stringlengths
26
142k
idx
int64
0
27.3k
qemu
ee68697551cd81186c5b12eba10c158350cf1165
0
void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context) { s->aio_context = new_context; s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s); aio_set_event_notifier(new_context, &s->e, false, qemu_laio_completion_cb, NULL); }
2,076
qemu
7d08c73e7bdc39b10e5f2f5acdce700f17ffe962
0
process_tx_desc(E1000State *s, struct e1000_tx_desc *dp) { PCIDevice *d = PCI_DEVICE(s); uint32_t txd_lower = le32_to_cpu(dp->lower.data); uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D); unsigned int split_size = txd_lower & 0xffff, bytes, sz; unsigned int msh = 0xfffff; uint64_t addr; struct e1000_context_desc *xp = (struct e1000_context_desc *)dp; struct e1000_tx *tp = &s->tx; s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE); if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */ e1000x_read_tx_ctx_descr(xp, &tp->props); tp->tso_frames = 0; if (tp->props.tucso == 0) { /* this is probably wrong */ DBGOUT(TXSUM, "TCP/UDP: cso 0!\n"); tp->props.tucso = tp->props.tucss + (tp->props.tcp ? 16 : 6); } return; } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) { // data descriptor if (tp->size == 0) { tp->props.sum_needed = le32_to_cpu(dp->upper.data) >> 8; } tp->props.cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0; } else { // legacy descriptor tp->props.cptse = 0; } if (e1000x_vlan_enabled(s->mac_reg) && e1000x_is_vlan_txd(txd_lower) && (tp->props.cptse || txd_lower & E1000_TXD_CMD_EOP)) { tp->vlan_needed = 1; stw_be_p(tp->vlan_header, le16_to_cpu(s->mac_reg[VET])); stw_be_p(tp->vlan_header + 2, le16_to_cpu(dp->upper.fields.special)); } addr = le64_to_cpu(dp->buffer_addr); if (tp->props.tse && tp->props.cptse) { msh = tp->props.hdr_len + tp->props.mss; do { bytes = split_size; if (tp->size + bytes > msh) bytes = msh - tp->size; bytes = MIN(sizeof(tp->data) - tp->size, bytes); pci_dma_read(d, addr, tp->data + tp->size, bytes); sz = tp->size + bytes; if (sz >= tp->props.hdr_len && tp->size < tp->props.hdr_len) { memmove(tp->header, tp->data, tp->props.hdr_len); } tp->size = sz; addr += bytes; if (sz == msh) { xmit_seg(s); memmove(tp->data, tp->header, tp->props.hdr_len); tp->size = tp->props.hdr_len; } split_size -= bytes; } while (bytes && split_size); } else if (!tp->props.tse && tp->props.cptse) { // context descriptor TSE is not set, while data descriptor TSE is set DBGOUT(TXERR, "TCP segmentation error\n"); } else { split_size = MIN(sizeof(tp->data) - tp->size, split_size); pci_dma_read(d, addr, tp->data + tp->size, split_size); tp->size += split_size; } if (!(txd_lower & E1000_TXD_CMD_EOP)) return; if (!(tp->props.tse && tp->props.cptse && tp->size < tp->props.hdr_len)) { xmit_seg(s); } tp->tso_frames = 0; tp->props.sum_needed = 0; tp->vlan_needed = 0; tp->size = 0; tp->props.cptse = 0; }
2,077
qemu
537b41f5013e1951fa15e8f18855b18d76124ce4
0
int unix_socket_outgoing(const char *path) { Error *local_err = NULL; int fd = unix_connect(path, &local_err); if (local_err != NULL) { qerror_report_err(local_err); error_free(local_err); } return fd; }
2,078
qemu
185b43386ad999c80bdc58e41b87f05e5b3e8463
0
static ssize_t nbd_receive_request(int csock, struct nbd_request *request) { uint8_t buf[4 + 4 + 8 + 8 + 4]; uint32_t magic; if (read_sync(csock, buf, sizeof(buf)) != sizeof(buf)) { LOG("read failed"); errno = EINVAL; return -1; } /* Request [ 0 .. 3] magic (NBD_REQUEST_MAGIC) [ 4 .. 7] type (0 == READ, 1 == WRITE) [ 8 .. 15] handle [16 .. 23] from [24 .. 27] len */ magic = be32_to_cpup((uint32_t*)buf); request->type = be32_to_cpup((uint32_t*)(buf + 4)); request->handle = be64_to_cpup((uint64_t*)(buf + 8)); request->from = be64_to_cpup((uint64_t*)(buf + 16)); request->len = be32_to_cpup((uint32_t*)(buf + 24)); TRACE("Got request: " "{ magic = 0x%x, .type = %d, from = %" PRIu64" , len = %u }", magic, request->type, request->from, request->len); if (magic != NBD_REQUEST_MAGIC) { LOG("invalid magic (got 0x%x)", magic); errno = EINVAL; return -1; } return 0; }
2,079
qemu
3c529d935923a70519557d420db1d5a09a65086a
0
static void raw_close_fd_pool(BDRVRawState *s) { int i; for (i = 0; i < RAW_FD_POOL_SIZE; i++) { if (s->fd_pool[i] != -1) { close(s->fd_pool[i]); s->fd_pool[i] = -1; } } }
2,080
qemu
fff23ee9a5de74ab111b3cea9eec56782e7d7c50
0
static UHCIAsync *uhci_async_alloc(UHCIState *s) { UHCIAsync *async = g_malloc(sizeof(UHCIAsync)); memset(&async->packet, 0, sizeof(async->packet)); async->uhci = s; async->valid = 0; async->td = 0; async->token = 0; async->done = 0; async->isoc = 0; usb_packet_init(&async->packet); qemu_sglist_init(&async->sgl, 1); return async; }
2,081
qemu
b09ea7d55cfab5a75912bb56ed1fcd757604a759
0
static void apic_startup(APICState *s, int vector_num) { CPUState *env = s->cpu_env; if (!env->halted) return; env->eip = 0; cpu_x86_load_seg_cache(env, R_CS, vector_num << 8, vector_num << 12, 0xffff, 0); env->halted = 0; }
2,085
qemu
de13d2161473d02ae97ec0f8e4503147554892dd
0
void kvm_s390_virtio_irq(S390CPU *cpu, int config_change, uint64_t token) { kvm_s390_interrupt_internal(cpu, KVM_S390_INT_VIRTIO, config_change, token, 1); }
2,086
qemu
61007b316cd71ee7333ff7a0a749a8949527575f
0
static void bdrv_io_limits_intercept(BlockDriverState *bs, unsigned int bytes, bool is_write) { /* does this io must wait */ bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write); /* if must wait or any request of this type throttled queue the IO */ if (must_wait || !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) { qemu_co_queue_wait(&bs->throttled_reqs[is_write]); } /* the IO will be executed, do the accounting */ throttle_account(&bs->throttle_state, is_write, bytes); /* if the next request must wait -> do nothing */ if (throttle_schedule_timer(&bs->throttle_state, is_write)) { return; } /* else queue next request for execution */ qemu_co_queue_next(&bs->throttled_reqs[is_write]); }
2,087
FFmpeg
e9e642cbfbf36285f60d1dba00103f068b077940
0
static int parse_bintree(Indeo3DecodeContext *ctx, AVCodecContext *avctx, Plane *plane, int code, Cell *ref_cell, const int depth, const int strip_width) { Cell curr_cell; int bytes_used; if (depth <= 0) { av_log(avctx, AV_LOG_ERROR, "Stack overflow (corrupted binary tree)!\n"); return AVERROR_INVALIDDATA; // unwind recursion } curr_cell = *ref_cell; // clone parent cell if (code == H_SPLIT) { SPLIT_CELL(ref_cell->height, curr_cell.height); ref_cell->ypos += curr_cell.height; ref_cell->height -= curr_cell.height; } else if (code == V_SPLIT) { if (curr_cell.width > strip_width) { /* split strip */ curr_cell.width = (curr_cell.width <= (strip_width << 1) ? 1 : 2) * strip_width; } else SPLIT_CELL(ref_cell->width, curr_cell.width); ref_cell->xpos += curr_cell.width; ref_cell->width -= curr_cell.width; } while (1) { /* loop until return */ RESYNC_BITSTREAM; switch (code = get_bits(&ctx->gb, 2)) { case H_SPLIT: case V_SPLIT: if (parse_bintree(ctx, avctx, plane, code, &curr_cell, depth - 1, strip_width)) return AVERROR_INVALIDDATA; break; case INTRA_NULL: if (!curr_cell.tree) { /* MC tree INTRA code */ curr_cell.mv_ptr = 0; /* mark the current strip as INTRA */ curr_cell.tree = 1; /* enter the VQ tree */ } else { /* VQ tree NULL code */ RESYNC_BITSTREAM; code = get_bits(&ctx->gb, 2); if (code >= 2) { av_log(avctx, AV_LOG_ERROR, "Invalid VQ_NULL code: %d\n", code); return AVERROR_INVALIDDATA; } if (code == 1) av_log(avctx, AV_LOG_ERROR, "SkipCell procedure not implemented yet!\n"); CHECK_CELL copy_cell(ctx, plane, &curr_cell); return 0; } break; case INTER_DATA: if (!curr_cell.tree) { /* MC tree INTER code */ /* get motion vector index and setup the pointer to the mv set */ if (!ctx->need_resync) ctx->next_cell_data = &ctx->gb.buffer[(get_bits_count(&ctx->gb) + 7) >> 3]; curr_cell.mv_ptr = &ctx->mc_vectors[*(ctx->next_cell_data++) << 1]; curr_cell.tree = 1; /* enter the VQ tree */ UPDATE_BITPOS(8); } else { /* VQ tree DATA code */ if (!ctx->need_resync) ctx->next_cell_data = &ctx->gb.buffer[(get_bits_count(&ctx->gb) + 7) >> 3]; CHECK_CELL bytes_used = decode_cell(ctx, avctx, plane, &curr_cell, ctx->next_cell_data, ctx->last_byte); if (bytes_used < 0) return AVERROR_INVALIDDATA; UPDATE_BITPOS(bytes_used << 3); ctx->next_cell_data += bytes_used; return 0; } break; } }//while return 0; }
2,088
FFmpeg
d676598f879ba01ddb62f9abe8e17b2e94cb91cd
0
int64_t swr_next_pts(struct SwrContext *s, int64_t pts){ if(pts == INT64_MIN) return s->outpts; if(s->min_compensation >= FLT_MAX) { return (s->outpts = pts - swr_get_delay(s, s->in_sample_rate * (int64_t)s->out_sample_rate)); } else { int64_t delta = pts - swr_get_delay(s, s->in_sample_rate * (int64_t)s->out_sample_rate) - s->outpts; double fdelta = delta /(double)(s->in_sample_rate * (int64_t)s->out_sample_rate); if(fabs(fdelta) > s->min_compensation) { if(!s->outpts || fabs(fdelta) > s->min_hard_compensation){ int ret; if(delta > 0) ret = swr_inject_silence(s, delta / s->out_sample_rate); else ret = swr_drop_output (s, -delta / s-> in_sample_rate); if(ret<0){ av_log(s, AV_LOG_ERROR, "Failed to compensate for timestamp delta of %f\n", fdelta); } } else if(s->soft_compensation_duration && s->max_soft_compensation) { int duration = s->out_sample_rate * s->soft_compensation_duration; double max_soft_compensation = s->max_soft_compensation / (s->max_soft_compensation < 0 ? -s->in_sample_rate : 1); int comp = av_clipf(fdelta, -max_soft_compensation, max_soft_compensation) * duration ; av_log(s, AV_LOG_VERBOSE, "compensating audio timestamp drift:%f compensation:%d in:%d\n", fdelta, comp, duration); swr_set_compensation(s, comp, duration); } } return s->outpts; } }
2,089
FFmpeg
d4ec07dfe7dbc86e8f6403781c511b9463a526d2
0
int av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end, unsigned int flags) { const uint8_t *p = *bufp; uint32_t top; uint64_t code; int ret = 0; if (p >= buf_end) return 0; code = *p++; /* first sequence byte starts with 10, or is 1111-1110 or 1111-1111, which is not admitted */ if ((code & 0xc0) == 0x80 || code >= 0xFE) { ret = AVERROR(EILSEQ); goto end; } top = (code & 128) >> 1; while (code & top) { int tmp; if (p >= buf_end) { (*bufp) ++; return AVERROR(EILSEQ); /* incomplete sequence */ } /* we assume the byte to be in the form 10xx-xxxx */ tmp = *p++ - 128; /* strip leading 1 */ if (tmp>>6) { (*bufp) ++; return AVERROR(EILSEQ); } code = (code<<6) + tmp; top <<= 5; } code &= (top << 1) - 1; if (code >= 1<<31) { ret = AVERROR(EILSEQ); /* out-of-range value */ goto end; } *codep = code; if (code > 0x10FFFF && !(flags & AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES)) ret = AVERROR(EILSEQ); if (code < 0x20 && code != 0x9 && code != 0xA && code != 0xD && flags & AV_UTF8_FLAG_EXCLUDE_XML_INVALID_CONTROL_CODES) ret = AVERROR(EILSEQ); if (code >= 0xD800 && code <= 0xDFFF && !(flags & AV_UTF8_FLAG_ACCEPT_SURROGATES)) ret = AVERROR(EILSEQ); if ((code == 0xFFFE || code == 0xFFFF) && !(flags & AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS)) ret = AVERROR(EILSEQ); end: *bufp = p; return ret; }
2,090
FFmpeg
dd561441b1e849df7d8681c6f32af82d4088dafd
0
static av_always_inline av_flatten void h264_loop_filter_luma_intra_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta) { int d; for( d = 0; d < 16; d++ ) { const int p2 = pix[-3*xstride]; const int p1 = pix[-2*xstride]; const int p0 = pix[-1*xstride]; const int q0 = pix[ 0*xstride]; const int q1 = pix[ 1*xstride]; const int q2 = pix[ 2*xstride]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { if(FFABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){ if( FFABS( p2 - p0 ) < beta) { const int p3 = pix[-4*xstride]; /* p0', p1', p2' */ pix[-1*xstride] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3; pix[-2*xstride] = ( p2 + p1 + p0 + q0 + 2 ) >> 2; pix[-3*xstride] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3; } else { /* p0' */ pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; } if( FFABS( q2 - q0 ) < beta) { const int q3 = pix[3*xstride]; /* q0', q1', q2' */ pix[0*xstride] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3; pix[1*xstride] = ( p0 + q0 + q1 + q2 + 2 ) >> 2; pix[2*xstride] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3; } else { /* q0' */ pix[0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } }else{ /* p0', q0' */ pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; pix[ 0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } } pix += ystride; } }
2,092
qemu
1e356fc14beaa3ece6c0e961bd479af58be3198b
1
static void sigbus_handler(int signal) { siglongjmp(sigjump, 1); }
2,094
qemu
089da572b956ef0f8f5b8d5917358e07892a77c2
1
void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, uint8_t *data, uint32_t len) { int arch = !!(key & FW_CFG_ARCH_LOCAL); key &= FW_CFG_ENTRY_MASK; assert(key < FW_CFG_MAX_ENTRY); s->entries[arch][key].data = data; s->entries[arch][key].len = len; }
2,095
qemu
4f298a4b2957b7833bc607c951ca27c458d98d88
1
static void set_bmc_global_enables(IPMIBmcSim *ibs, uint8_t *cmd, unsigned int cmd_len, uint8_t *rsp, unsigned int *rsp_len, unsigned int max_rsp_len) { IPMI_CHECK_CMD_LEN(3); set_global_enables(ibs, cmd[2]); }
2,096
FFmpeg
98a0053d0f90e3309dc1038b1bae3a48bbd9067c
1
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, int parse_extradata) { AVCodecContext *const avctx = h->avctx; H264SliceContext *sl; int buf_index; unsigned context_count; int next_avc; int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts int nal_index; int idr_cleared=0; int ret = 0; h->nal_unit_type= 0; if(!h->slice_context_count) h->slice_context_count= 1; h->max_contexts = h->slice_context_count; if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) { h->current_slice = 0; if (!h->first_field) h->cur_pic_ptr = NULL; ff_h264_reset_sei(h); if (h->nal_length_size == 4) { if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) { h->is_avc = 0; }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size) h->is_avc = 1; if (avctx->active_thread_type & FF_THREAD_FRAME) nals_needed = get_last_needed_nal(h, buf, buf_size); { buf_index = 0; context_count = 0; next_avc = h->is_avc ? 0 : buf_size; nal_index = 0; for (;;) { int consumed; int dst_length; int bit_length; const uint8_t *ptr; int nalsize = 0; int err; if (buf_index >= next_avc) { nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index); if (nalsize < 0) break; next_avc = buf_index + nalsize; } else { buf_index = find_start_code(buf, buf_size, buf_index, next_avc); if (buf_index >= buf_size) break; if (buf_index >= next_avc) continue; sl = &h->slice_ctx[context_count]; ptr = ff_h264_decode_nal(h, sl, buf + buf_index, &dst_length, &consumed, next_avc - buf_index); if (!ptr || dst_length < 0) { ret = -1; goto end; bit_length = get_bit_length(h, buf, ptr, dst_length, buf_index + consumed, next_avc); if (h->avctx->debug & FF_DEBUG_STARTCODE) av_log(h->avctx, AV_LOG_DEBUG, "NAL %d/%d at %d/%d length %d\n", h->nal_unit_type, h->nal_ref_idc, buf_index, buf_size, dst_length); if (h->is_avc && (nalsize != consumed) && nalsize) av_log(h->avctx, AV_LOG_DEBUG, "AVC: Consumed only %d bytes instead of %d\n", consumed, nalsize); buf_index += consumed; nal_index++; if (avctx->skip_frame >= AVDISCARD_NONREF && h->nal_ref_idc == 0 && h->nal_unit_type != NAL_SEI) continue; again: /* Ignore per frame NAL unit type during extradata * parsing. Decoding slices is not possible in codec init * with frame-mt */ if (parse_extradata) { switch (h->nal_unit_type) { case NAL_IDR_SLICE: case NAL_SLICE: case NAL_DPA: case NAL_DPB: case NAL_DPC: av_log(h->avctx, AV_LOG_WARNING, "Ignoring NAL %d in global header/extradata\n", h->nal_unit_type); // fall through to next case case NAL_AUXILIARY_SLICE: h->nal_unit_type = NAL_FF_IGNORE; err = 0; switch (h->nal_unit_type) { case NAL_IDR_SLICE: if ((ptr[0] & 0xFC) == 0x98) { av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame\n"); h->next_outputed_poc = INT_MIN; ret = -1; goto end; if (h->nal_unit_type != NAL_IDR_SLICE) { av_log(h->avctx, AV_LOG_ERROR, "Invalid mix of idr and non-idr slices\n"); ret = -1; goto end; if(!idr_cleared) { if (h->current_slice && (avctx->active_thread_type & FF_THREAD_SLICE)) { av_log(h, AV_LOG_ERROR, "invalid mixed IDR / non IDR frames cannot be decoded in slice multithreading mode\n"); ret = AVERROR_INVALIDDATA; goto end; idr(h); // FIXME ensure we don't lose some frames if there is reordering idr_cleared = 1; h->has_recovery_point = 1; case NAL_SLICE: init_get_bits(&sl->gb, ptr, bit_length); if ( nals_needed >= nal_index || (!(avctx->active_thread_type & FF_THREAD_FRAME) && !context_count)) h->au_pps_id = -1; if ((err = ff_h264_decode_slice_header(h, sl))) break; if (h->sei_recovery_frame_cnt >= 0) { if (h->frame_num != h->sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I) h->valid_recovery_point = 1; if ( h->recovery_frame < 0 || av_mod_uintp2(h->recovery_frame - h->frame_num, h->sps.log2_max_frame_num) > h->sei_recovery_frame_cnt) { h->recovery_frame = av_mod_uintp2(h->frame_num + h->sei_recovery_frame_cnt, h->sps.log2_max_frame_num); if (!h->valid_recovery_point) h->recovery_frame = h->frame_num; h->cur_pic_ptr->f->key_frame |= (h->nal_unit_type == NAL_IDR_SLICE); if (h->nal_unit_type == NAL_IDR_SLICE || h->recovery_frame == h->frame_num) { h->recovery_frame = -1; h->cur_pic_ptr->recovered = 1; // If we have an IDR, all frames after it in decoded order are // "recovered". if (h->nal_unit_type == NAL_IDR_SLICE) h->frame_recovered |= FRAME_RECOVERED_IDR; #if 1 h->cur_pic_ptr->recovered |= h->frame_recovered; #else h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR); #endif if (h->current_slice == 1) { if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) decode_postinit(h, nal_index >= nals_needed); if (h->avctx->hwaccel && (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0) goto end; #if FF_API_CAP_VDPAU if (CONFIG_H264_VDPAU_DECODER && h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) ff_vdpau_h264_picture_start(h); #endif if (sl->redundant_pic_count == 0) { if (avctx->hwaccel) { ret = avctx->hwaccel->decode_slice(avctx, &buf[buf_index - consumed], consumed); if (ret < 0) goto end; #if FF_API_CAP_VDPAU } else if (CONFIG_H264_VDPAU_DECODER && h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) { ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0], start_code, sizeof(start_code)); ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0], &buf[buf_index - consumed], consumed); #endif context_count++; break; case NAL_DPA: case NAL_DPB: case NAL_DPC: avpriv_request_sample(avctx, "data partitioning"); break; case NAL_SEI: init_get_bits(&h->gb, ptr, bit_length); ret = ff_h264_decode_sei(h); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) goto end; break; case NAL_SPS: init_get_bits(&h->gb, ptr, bit_length); if (ff_h264_decode_seq_parameter_set(h, 0) >= 0) break; if (h->is_avc ? nalsize : 1) { av_log(h->avctx, AV_LOG_DEBUG, "SPS decoding failure, trying again with the complete NAL\n"); if (h->is_avc) av_assert0(next_avc - buf_index + consumed == nalsize); if ((next_avc - buf_index + consumed - 1) >= INT_MAX/8) break; init_get_bits(&h->gb, &buf[buf_index + 1 - consumed], 8*(next_avc - buf_index + consumed - 1)); if (ff_h264_decode_seq_parameter_set(h, 0) >= 0) break; init_get_bits(&h->gb, ptr, bit_length); ff_h264_decode_seq_parameter_set(h, 1); break; case NAL_PPS: init_get_bits(&h->gb, ptr, bit_length); ret = ff_h264_decode_picture_parameter_set(h, bit_length); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) goto end; break; case NAL_AUD: case NAL_END_SEQUENCE: case NAL_END_STREAM: case NAL_FILLER_DATA: case NAL_SPS_EXT: case NAL_AUXILIARY_SLICE: break; case NAL_FF_IGNORE: break; default: av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n", h->nal_unit_type, bit_length); if (context_count == h->max_contexts) { ret = ff_h264_execute_decode_slices(h, context_count); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) goto end; context_count = 0; if (err < 0 || err == SLICE_SKIPED) { if (err < 0) av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n"); sl->ref_count[0] = sl->ref_count[1] = sl->list_count = 0; } else if (err == SLICE_SINGLETHREAD) { if (context_count > 1) { ret = ff_h264_execute_decode_slices(h, context_count - 1); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) goto end; context_count = 0; /* Slice could not be decoded in parallel mode, restart. Note * that rbsp_buffer is not transferred, but since we no longer * run in parallel mode this should not be an issue. */ sl = &h->slice_ctx[0]; goto again; if (context_count) { ret = ff_h264_execute_decode_slices(h, context_count); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) goto end; ret = 0; end: /* clean up */ if (h->cur_pic_ptr && !h->droppable) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, h->picture_structure == PICT_BOTTOM_FIELD); return (ret < 0) ? ret : buf_index;
2,097
qemu
10f12e6450407b18b4d5a6b50d3852dcfd7fff75
1
int spapr_h_cas_compose_response(sPAPRMachineState *spapr, target_ulong addr, target_ulong size, sPAPROptionVector *ov5_updates) { void *fdt, *fdt_skel; sPAPRDeviceTreeUpdateHeader hdr = { .version_id = 1 }; size -= sizeof(hdr); /* Create sceleton */ fdt_skel = g_malloc0(size); _FDT((fdt_create(fdt_skel, size))); _FDT((fdt_begin_node(fdt_skel, ""))); _FDT((fdt_end_node(fdt_skel))); _FDT((fdt_finish(fdt_skel))); fdt = g_malloc0(size); _FDT((fdt_open_into(fdt_skel, fdt, size))); g_free(fdt_skel); /* Fixup cpu nodes */ _FDT((spapr_fixup_cpu_dt(fdt, spapr))); if (spapr_dt_cas_updates(spapr, fdt, ov5_updates)) { return -1; } /* Pack resulting tree */ _FDT((fdt_pack(fdt))); if (fdt_totalsize(fdt) + sizeof(hdr) > size) { trace_spapr_cas_failed(size); return -1; } cpu_physical_memory_write(addr, &hdr, sizeof(hdr)); cpu_physical_memory_write(addr + sizeof(hdr), fdt, fdt_totalsize(fdt)); trace_spapr_cas_continue(fdt_totalsize(fdt) + sizeof(hdr)); g_free(fdt); return 0; }
2,098
FFmpeg
7e240f95818310ed721321e62130aa1c69f9cbe6
1
static MatroskaLevel1Element *matroska_find_level1_elem(MatroskaDemuxContext *matroska, uint32_t id) { int i; MatroskaLevel1Element *elem; // Some files link to all clusters; useless. if (id == MATROSKA_ID_CLUSTER) // There can be multiple seekheads. if (id != MATROSKA_ID_SEEKHEAD) { for (i = 0; i < matroska->num_level1_elems; i++) { if (matroska->level1_elems[i].id == id) return &matroska->level1_elems[i]; } } // Only a completely broken file would have more elements. // It also provides a low-effort way to escape from circular seekheads // (every iteration will add a level1 entry). if (matroska->num_level1_elems >= FF_ARRAY_ELEMS(matroska->level1_elems)) { av_log(matroska->ctx, AV_LOG_ERROR, "Too many level1 elements or circular seekheads.\n"); } elem = &matroska->level1_elems[matroska->num_level1_elems++]; *elem = (MatroskaLevel1Element){.id = id}; return elem; }
2,099
qemu
72f0d0bf51362011c4d841a89fb8f5cfb16e0bf3
1
static int mp_dacl_removexattr(FsContext *ctx, const char *path, const char *name) { int ret; char *buffer; buffer = rpath(ctx, path); ret = lremovexattr(buffer, MAP_ACL_DEFAULT); if (ret == -1 && errno == ENODATA) { /* * We don't get ENODATA error when trying to remove a * posix acl that is not present. So don't throw the error * even in case of mapped security model */ errno = 0; ret = 0; } g_free(buffer); return ret; }
2,100
qemu
af7e9e74c6a62a5bcd911726a9e88d28b61490e0
1
static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ) { IRQ_dst_t *dst; IRQ_src_t *src; int priority; dst = &opp->dst[n_CPU]; src = &opp->src[n_IRQ]; priority = IPVP_PRIORITY(src->ipvp); if (priority <= dst->pctp) { /* Too low priority */ DPRINTF("%s: IRQ %d has too low priority on CPU %d\n", __func__, n_IRQ, n_CPU); return; } if (IRQ_testbit(&dst->raised, n_IRQ)) { /* Interrupt miss */ DPRINTF("%s: IRQ %d was missed on CPU %d\n", __func__, n_IRQ, n_CPU); return; } src->ipvp |= IPVP_ACTIVITY_MASK; IRQ_setbit(&dst->raised, n_IRQ); if (priority < dst->raised.priority) { /* An higher priority IRQ is already raised */ DPRINTF("%s: IRQ %d is hidden by raised IRQ %d on CPU %d\n", __func__, n_IRQ, dst->raised.next, n_CPU); return; } IRQ_get_next(opp, &dst->raised); if (IRQ_get_next(opp, &dst->servicing) != -1 && priority <= dst->servicing.priority) { DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n", __func__, n_IRQ, dst->servicing.next, n_CPU); /* Already servicing a higher priority IRQ */ return; } DPRINTF("Raise OpenPIC INT output cpu %d irq %d\n", n_CPU, n_IRQ); openpic_irq_raise(opp, n_CPU, src); }
2,101
qemu
d35ff5e6b3aa3a706b0aa3bcf11400fac945b67a
1
static void process_incoming_migration_bh(void *opaque) { Error *local_err = NULL; MigrationIncomingState *mis = opaque; /* Make sure all file formats flush their mutable metadata */ bdrv_invalidate_cache_all(&local_err); migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_FAILED); error_report_err(local_err); migrate_decompress_threads_join(); exit(EXIT_FAILURE); /* * This must happen after all error conditions are dealt with and * we're sure the VM is going to be running on this host. */ qemu_announce_self(); /* If global state section was not received or we are in running state, we need to obey autostart. Any other state is set with runstate_set. */ if (!global_state_received() || global_state_get_runstate() == RUN_STATE_RUNNING) { if (autostart) { vm_start(); } else { runstate_set(RUN_STATE_PAUSED); } else { runstate_set(global_state_get_runstate()); migrate_decompress_threads_join(); /* * This must happen after any state changes since as soon as an external * observer sees this event they might start to prod at the VM assuming * it's ready to use. */ migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_COMPLETED); qemu_bh_delete(mis->bh); migration_incoming_state_destroy();
2,102
FFmpeg
478f1c3d5e5463a284ea7efecfc62d47ba3be11a
1
static int decode_phys_chunk(AVCodecContext *avctx, PNGDecContext *s) { if (s->state & PNG_IDAT) { av_log(avctx, AV_LOG_ERROR, "pHYs after IDAT\n"); return AVERROR_INVALIDDATA; } avctx->sample_aspect_ratio.num = bytestream2_get_be32(&s->gb); avctx->sample_aspect_ratio.den = bytestream2_get_be32(&s->gb); if (avctx->sample_aspect_ratio.num < 0 || avctx->sample_aspect_ratio.den < 0) avctx->sample_aspect_ratio = (AVRational){ 0, 1 }; bytestream2_skip(&s->gb, 1); /* unit specifier */ bytestream2_skip(&s->gb, 4); /* crc */ return 0; }
2,103
qemu
1108b2f8a939fb5778d384149e2f1b99062a72da
1
static int xio3130_downstream_initfn(PCIDevice *d) { PCIEPort *p = PCIE_PORT(d); PCIESlot *s = PCIE_SLOT(d); int rc; pci_bridge_initfn(d, TYPE_PCIE_BUS); pcie_port_init_reg(d); rc = msi_init(d, XIO3130_MSI_OFFSET, XIO3130_MSI_NR_VECTOR, XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_64BIT, XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT); if (rc < 0) { goto err_bridge; } rc = pci_bridge_ssvid_init(d, XIO3130_SSVID_OFFSET, XIO3130_SSVID_SVID, XIO3130_SSVID_SSID); if (rc < 0) { goto err_bridge; } rc = pcie_cap_init(d, XIO3130_EXP_OFFSET, PCI_EXP_TYPE_DOWNSTREAM, p->port); if (rc < 0) { goto err_msi; } pcie_cap_flr_init(d); pcie_cap_deverr_init(d); pcie_cap_slot_init(d, s->slot); pcie_cap_arifwd_init(d); pcie_chassis_create(s->chassis); rc = pcie_chassis_add_slot(s); if (rc < 0) { goto err_pcie_cap; } rc = pcie_aer_init(d, XIO3130_AER_OFFSET, PCI_ERR_SIZEOF); if (rc < 0) { goto err; } return 0; err: pcie_chassis_del_slot(s); err_pcie_cap: pcie_cap_exit(d); err_msi: msi_uninit(d); err_bridge: pci_bridge_exitfn(d); return rc; }
2,104
FFmpeg
3fa9692ae2324b8fcc1a8aa47b9a75826740b32e
0
static av_cold int hevc_decode_free(AVCodecContext *avctx) { HEVCContext *s = avctx->priv_data; HEVCLocalContext *lc = s->HEVClc; int i; pic_arrays_free(s); av_freep(&s->md5_ctx); for(i=0; i < s->nals_allocated; i++) { av_freep(&s->skipped_bytes_pos_nal[i]); } av_freep(&s->skipped_bytes_pos_size_nal); av_freep(&s->skipped_bytes_nal); av_freep(&s->skipped_bytes_pos_nal); av_freep(&s->cabac_state); av_frame_free(&s->tmp_frame); av_frame_free(&s->output_frame); for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) { ff_hevc_unref_frame(s, &s->DPB[i], ~0); av_frame_free(&s->DPB[i].frame); } for (i = 0; i < FF_ARRAY_ELEMS(s->vps_list); i++) av_buffer_unref(&s->vps_list[i]); for (i = 0; i < FF_ARRAY_ELEMS(s->sps_list); i++) av_buffer_unref(&s->sps_list[i]); for (i = 0; i < FF_ARRAY_ELEMS(s->pps_list); i++) av_buffer_unref(&s->pps_list[i]); s->sps = NULL; s->pps = NULL; s->vps = NULL; av_buffer_unref(&s->current_sps); av_freep(&s->sh.entry_point_offset); av_freep(&s->sh.offset); av_freep(&s->sh.size); for (i = 1; i < s->threads_number; i++) { lc = s->HEVClcList[i]; if (lc) { av_freep(&s->HEVClcList[i]); av_freep(&s->sList[i]); } } if (s->HEVClc == s->HEVClcList[0]) s->HEVClc = NULL; av_freep(&s->HEVClcList[0]); for (i = 0; i < s->nals_allocated; i++) av_freep(&s->nals[i].rbsp_buffer); av_freep(&s->nals); s->nals_allocated = 0; return 0; }
2,105
FFmpeg
cf1e0786ed64e69614760bfb4ecd7adbde8e6094
0
static av_cold int dct_init(MpegEncContext *s) { ff_blockdsp_init(&s->bdsp, s->avctx); ff_hpeldsp_init(&s->hdsp, s->avctx->flags); ff_me_cmp_init(&s->mecc, s->avctx); ff_mpegvideodsp_init(&s->mdsp); ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample); s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c; s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c; s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c; s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c; s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c; if (s->flags & CODEC_FLAG_BITEXACT) s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact; s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c; if (HAVE_INTRINSICS_NEON) ff_mpv_common_init_neon(s); if (ARCH_ARM) ff_mpv_common_init_arm(s); if (ARCH_PPC) ff_mpv_common_init_ppc(s); if (ARCH_X86) ff_mpv_common_init_x86(s); return 0; }
2,106
FFmpeg
3ebc7e04dea6072400d91c1c90eb3911754cee06
0
static void filter_mb_edgeh( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) { int i, d; const int index_a = clip( qp + h->slice_alpha_c0_offset, 0, 51 ); const int alpha = alpha_table[index_a]; const int beta = beta_table[clip( qp + h->slice_beta_offset, 0, 51 )]; const int pix_next = stride; for( i = 0; i < 4; i++ ) { if( bS[i] == 0 ) { pix += 4; continue; } /* 4px edge length */ for( d = 0; d < 4; d++ ) { const uint8_t p0 = pix[-1*pix_next]; const uint8_t p1 = pix[-2*pix_next]; const uint8_t p2 = pix[-3*pix_next]; const uint8_t q0 = pix[0]; const uint8_t q1 = pix[1*pix_next]; const uint8_t q2 = pix[2*pix_next]; if( abs( p0 - q0 ) >= alpha || abs( p1 - p0 ) >= beta || abs( q1 - q0 ) >= beta ) { pix++; continue; } if( bS[i] < 4 ) { const int tc0 = tc0_table[index_a][bS[i] - 1]; int tc = tc0; int i_delta; if( abs( p2 - p0 ) < beta ) { pix[-2*pix_next] = p1 + clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 ); tc++; } if( abs( q2 - q0 ) < beta ) { pix[pix_next] = q1 + clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 ); tc++; } i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); pix[-pix_next] = clip( p0 + i_delta, 0, 255 ); /* p0' */ pix[0] = clip( q0 - i_delta, 0, 255 ); /* q0' */ } else { const uint8_t p3 = pix[-4*pix_next]; const uint8_t q3 = pix[ 3*pix_next]; const int c = abs( p0 - q0 ) < (( alpha >> 2 ) + 2 ); if( abs( p2 - p0 ) < beta && c ) { /* p0', p1', p2' */ pix[-1*pix_next] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3; pix[-2*pix_next] = ( p2 + p1 + p0 + q0 + 2 ) >> 2; pix[-3*pix_next] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3; } else { /* p0' */ pix[-1*pix_next] = ( 2*p1 + p0 + q1 + 2 ) >> 2; } if( abs( q2 - q0 ) < beta && c ) { /* q0', q1', q2' */ pix[0*pix_next] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3; pix[1*pix_next] = ( p0 + q0 + q1 + q2 + 2 ) >> 2; pix[2*pix_next] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3; } else { /* q0' */ pix[0*pix_next] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } } pix++; } } }
2,107
FFmpeg
5b349c8d7cc5dd26b3fbbce6e3883ce02861eeb7
0
static void aflat(WaveformContext *s, AVFrame *in, AVFrame *out, int component, int intensity, int offset, int column) { const int plane = s->desc->comp[component].plane; const int mirror = s->mirror; const int c0_linesize = in->linesize[ plane + 0 ]; const int c1_linesize = in->linesize[(plane + 1) % s->ncomp]; const int c2_linesize = in->linesize[(plane + 2) % s->ncomp]; const int d0_linesize = out->linesize[ plane + 0 ]; const int d1_linesize = out->linesize[(plane + 1) % s->ncomp]; const int d2_linesize = out->linesize[(plane + 2) % s->ncomp]; const int max = 255 - intensity; const int src_h = in->height; const int src_w = in->width; int x, y; if (column) { const int d0_signed_linesize = d0_linesize * (mirror == 1 ? -1 : 1); const int d1_signed_linesize = d1_linesize * (mirror == 1 ? -1 : 1); const int d2_signed_linesize = d2_linesize * (mirror == 1 ? -1 : 1); for (x = 0; x < src_w; x++) { const uint8_t *c0_data = in->data[plane + 0]; const uint8_t *c1_data = in->data[(plane + 1) % s->ncomp]; const uint8_t *c2_data = in->data[(plane + 2) % s->ncomp]; uint8_t *d0_data = out->data[plane] + offset * d0_linesize; uint8_t *d1_data = out->data[(plane + 1) % s->ncomp] + offset * d1_linesize; uint8_t *d2_data = out->data[(plane + 2) % s->ncomp] + offset * d2_linesize; uint8_t * const d0_bottom_line = d0_data + d0_linesize * (s->size - 1); uint8_t * const d0 = (mirror ? d0_bottom_line : d0_data); uint8_t * const d1_bottom_line = d1_data + d1_linesize * (s->size - 1); uint8_t * const d1 = (mirror ? d1_bottom_line : d1_data); uint8_t * const d2_bottom_line = d2_data + d2_linesize * (s->size - 1); uint8_t * const d2 = (mirror ? d2_bottom_line : d2_data); for (y = 0; y < src_h; y++) { const int c0 = c0_data[x] + 128; const int c1 = c1_data[x] - 128; const int c2 = c2_data[x] - 128; uint8_t *target; int p; target = d0 + x + d0_signed_linesize * c0; update(target, max, intensity); for (p = c0 + c1; p < c0; p++) { target = d1 + x + d1_signed_linesize * p; update(target, max, 1); } for (p = c0 + c1 - 1; p > c0; p--) { target = d1 + x + d1_signed_linesize * p; update(target, max, 1); } for (p = c0 + c2; p < c0; p++) { target = d2 + x + d2_signed_linesize * p; update(target, max, 1); } for (p = c0 + c2 - 1; p > c0; p--) { target = d2 + x + d2_signed_linesize * p; update(target, max, 1); } c0_data += c0_linesize; c1_data += c1_linesize; c2_data += c2_linesize; d0_data += d0_linesize; d1_data += d1_linesize; d2_data += d2_linesize; } } } else { const uint8_t *c0_data = in->data[plane]; const uint8_t *c1_data = in->data[(plane + 1) % s->ncomp]; const uint8_t *c2_data = in->data[(plane + 2) % s->ncomp]; uint8_t *d0_data = out->data[plane] + offset; uint8_t *d1_data = out->data[(plane + 1) % s->ncomp] + offset; uint8_t *d2_data = out->data[(plane + 2) % s->ncomp] + offset; if (mirror) { d0_data += s->size - 1; d1_data += s->size - 1; d2_data += s->size - 1; } for (y = 0; y < src_h; y++) { for (x = 0; x < src_w; x++) { const int c0 = c0_data[x] + 128; const int c1 = c1_data[x] - 128; const int c2 = c2_data[x] - 128; uint8_t *target; int p; if (mirror) target = d0_data - c0; else target = d0_data + c0; update(target, max, intensity); for (p = c0 + c1; p < c0; p++) { if (mirror) target = d1_data - p; else target = d1_data + p; update(target, max, 1); } for (p = c0 + 1; p < c0 + c1; p++) { if (mirror) target = d1_data - p; else target = d1_data + p; update(target, max, 1); } for (p = c0 + c2; p < c0; p++) { if (mirror) target = d2_data - p; else target = d2_data + p; update(target, max, 1); } for (p = c0 + 1; p < c0 + c2; p++) { if (mirror) target = d2_data - p; else target = d2_data + p; update(target, max, 1); } } c0_data += c0_linesize; c1_data += c1_linesize; c2_data += c2_linesize; d0_data += d0_linesize; d1_data += d1_linesize; d2_data += d2_linesize; } } envelope(s, out, plane, (plane + 0) % s->ncomp); envelope(s, out, plane, (plane + 1) % s->ncomp); envelope(s, out, plane, (plane + 2) % s->ncomp); }
2,108
FFmpeg
f41e37b84f3d57c29d4a2a21f9337159135b981d
0
int ff_dirac_golomb_read_32bit(DiracGolombLUT *lut_ctx, const uint8_t *buf, int bytes, uint8_t *_dst, int coeffs) { int i, b, c_idx = 0; int32_t *dst = (int32_t *)_dst; DiracGolombLUT *future[4], *l = &lut_ctx[2*LUT_SIZE + buf[0]]; INIT_RESIDUE(res); for (b = 1; b <= bytes; b++) { future[0] = &lut_ctx[buf[b]]; future[1] = future[0] + 1*LUT_SIZE; future[2] = future[0] + 2*LUT_SIZE; future[3] = future[0] + 3*LUT_SIZE; if ((c_idx + 1) > coeffs) return c_idx; /* res_bits is a hint for better branch prediction */ if (res_bits && l->sign) { int32_t coeff = 1; APPEND_RESIDUE(res, l->preamble); for (i = 0; i < (res_bits >> 1) - 1; i++) { coeff <<= 1; coeff |= (res >> (RSIZE_BITS - 2*i - 2)) & 1; } dst[c_idx++] = l->sign * (coeff - 1); SET_RESIDUE(res, 0, 0); } memcpy(&dst[c_idx], l->ready, LUT_BITS*sizeof(int32_t)); c_idx += l->ready_num; APPEND_RESIDUE(res, l->leftover); l = future[l->need_s ? 3 : !res_bits ? 2 : res_bits & 1]; } return c_idx; }
2,109
FFmpeg
9c661e952fbcbf044709f9a7031c68cc4860336b
1
static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length){ int x, y; const int width= f->avctx->width; const int height= f->avctx->height; uint16_t *dst= (uint16_t*)f->current_picture.data[0]; const int stride= f->current_picture.linesize[0]>>1; const unsigned int bitstream_size= AV_RL32(buf); const int token_count av_unused = AV_RL32(buf + bitstream_size + 8); unsigned int prestream_size= 4*AV_RL32(buf + bitstream_size + 4); const uint8_t *prestream= buf + bitstream_size + 12; if(prestream_size + bitstream_size + 12 != length || bitstream_size > (1<<26) || prestream_size > (1<<26)){ av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d %d\n", prestream_size, bitstream_size, length); return -1; } prestream= read_huffman_tables(f, prestream); init_get_bits(&f->gb, buf + 4, 8*bitstream_size); prestream_size= length + buf - prestream; av_fast_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size, prestream_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!f->bitstream_buffer) return AVERROR(ENOMEM); f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)prestream, prestream_size/4); memset((uint8_t*)f->bitstream_buffer + prestream_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); init_get_bits(&f->pre_gb, f->bitstream_buffer, 8*prestream_size); f->last_dc= 0*128*8*8; for(y=0; y<height; y+=16){ for(x=0; x<width; x+=16){ if(decode_i_mb(f) < 0) return -1; idct_put(f, x, y); } dst += 16*stride; } if(get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3) != 256) av_log(f->avctx, AV_LOG_ERROR, "end mismatch\n"); return 0; }
2,111
qemu
4c3b22459d3589cf84d1ccadc6b09e586497820d
1
static int pci_pcnet_init(PCIDevice *pci_dev) { PCIPCNetState *d = PCI_PCNET(pci_dev); PCNetState *s = &d->state; uint8_t *pci_conf; #if 0 printf("sizeof(RMD)=%d, sizeof(TMD)=%d\n", sizeof(struct pcnet_RMD), sizeof(struct pcnet_TMD)); #endif pci_conf = pci_dev->config; pci_set_word(pci_conf + PCI_STATUS, PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MEDIUM); pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, 0x0); pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, 0x0); pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */ pci_conf[PCI_MIN_GNT] = 0x06; pci_conf[PCI_MAX_LAT] = 0xff; /* Handler for memory-mapped I/O */ memory_region_init_io(&d->state.mmio, OBJECT(d), &pcnet_mmio_ops, s, "pcnet-mmio", PCNET_PNPMMIO_SIZE); memory_region_init_io(&d->io_bar, OBJECT(d), &pcnet_io_ops, s, "pcnet-io", PCNET_IOPORT_SIZE); pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &d->io_bar); pci_register_bar(pci_dev, 1, 0, &s->mmio); s->irq = pci_allocate_irq(pci_dev); s->phys_mem_read = pci_physical_memory_read; s->phys_mem_write = pci_physical_memory_write; s->dma_opaque = pci_dev; return pcnet_common_init(DEVICE(pci_dev), s, &net_pci_pcnet_info); }
2,112
qemu
2ee73ac3a855fb0cfba3db91fdd1ecebdbc6f971
1
void OPPROTO op_fdiv_STN_ST0(void) { ST(PARAM1) /= ST0; }
2,113
FFmpeg
0dcac9c3f0f8f32009098edb704fac4b08bac951
1
static int open_file(AVFormatContext *avf, unsigned fileno) { ConcatContext *cat = avf->priv_data; ConcatFile *file = &cat->files[fileno]; int ret; if (cat->avf) avformat_close_input(&cat->avf); cat->avf = avformat_alloc_context(); if (!cat->avf) return AVERROR(ENOMEM); cat->avf->flags |= avf->flags; cat->avf->interrupt_callback = avf->interrupt_callback; if ((ret = ff_copy_whiteblacklists(cat->avf, avf)) < 0) return ret; if ((ret = avformat_open_input(&cat->avf, file->url, NULL, NULL)) < 0 || (ret = avformat_find_stream_info(cat->avf, NULL)) < 0) { av_log(avf, AV_LOG_ERROR, "Impossible to open '%s'\n", file->url); avformat_close_input(&cat->avf); return ret; } cat->cur_file = file; if (file->start_time == AV_NOPTS_VALUE) file->start_time = !fileno ? 0 : cat->files[fileno - 1].start_time + cat->files[fileno - 1].duration; file->file_start_time = (cat->avf->start_time == AV_NOPTS_VALUE) ? 0 : cat->avf->start_time; file->file_inpoint = (file->inpoint == AV_NOPTS_VALUE) ? file->file_start_time : file->inpoint; if (file->duration == AV_NOPTS_VALUE && file->outpoint != AV_NOPTS_VALUE) file->duration = file->outpoint - file->file_inpoint; if (cat->segment_time_metadata) { av_dict_set_int(&file->metadata, "lavf.concatdec.start_time", file->start_time, 0); if (file->duration != AV_NOPTS_VALUE) av_dict_set_int(&file->metadata, "lavf.concatdec.duration", file->duration, 0); } if ((ret = match_streams(avf)) < 0) return ret; if (file->inpoint != AV_NOPTS_VALUE) { if ((ret = avformat_seek_file(cat->avf, -1, INT64_MIN, file->inpoint, file->inpoint, 0)) < 0) return ret; } return 0; }
2,114
qemu
5839e53bbc0fec56021d758aab7610df421ed8c8
1
BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs) { BdrvDirtyBitmap *bm; BlockDirtyInfoList *list = NULL; BlockDirtyInfoList **plist = &list; QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) { BlockDirtyInfo *info = g_malloc0(sizeof(BlockDirtyInfo)); BlockDirtyInfoList *entry = g_malloc0(sizeof(BlockDirtyInfoList)); info->count = bdrv_get_dirty_count(bs, bm); info->granularity = ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap)); entry->value = info; *plist = entry; plist = &entry->next; } return list; }
2,116
qemu
d3002b0463727bf8110833b9d1a6efaa28990c28
1
void register_displaychangelistener(DisplayChangeListener *dcl) { QemuConsole *con; trace_displaychangelistener_register(dcl, dcl->ops->dpy_name); dcl->ds = get_alloc_displaystate(); QLIST_INSERT_HEAD(&dcl->ds->listeners, dcl, next); gui_setup_refresh(dcl->ds); if (dcl->con) { dcl->con->dcls++; con = dcl->con; } else { con = active_console; } if (dcl->ops->dpy_gfx_switch && con) { dcl->ops->dpy_gfx_switch(dcl, con->surface); } }
2,117
qemu
a38648290ee277c7cb8a53eabdcdb08bb7a9f23f
1
static void cpu_ioreq_move(ioreq_t *req) { int i, sign; sign = req->df ? -1 : 1; if (!req->data_is_ptr) { if (req->dir == IOREQ_READ) { for (i = 0; i < req->count; i++) { cpu_physical_memory_read( req->addr + (sign * i * (int64_t)req->size), (uint8_t *) &req->data, req->size); } } else if (req->dir == IOREQ_WRITE) { for (i = 0; i < req->count; i++) { cpu_physical_memory_write( req->addr + (sign * i * (int64_t)req->size), (uint8_t *) &req->data, req->size); } } } else { uint64_t tmp; if (req->dir == IOREQ_READ) { for (i = 0; i < req->count; i++) { cpu_physical_memory_read( req->addr + (sign * i * (int64_t)req->size), (uint8_t*) &tmp, req->size); cpu_physical_memory_write( req->data + (sign * i * (int64_t)req->size), (uint8_t*) &tmp, req->size); } } else if (req->dir == IOREQ_WRITE) { for (i = 0; i < req->count; i++) { cpu_physical_memory_read( req->data + (sign * i * (int64_t)req->size), (uint8_t*) &tmp, req->size); cpu_physical_memory_write( req->addr + (sign * i * (int64_t)req->size), (uint8_t*) &tmp, req->size); } } } }
2,118
FFmpeg
cbf09545f250a4bd12c50c3a96fe481098ab2d49
0
static void imc_get_coeffs(AVCodecContext *avctx, IMCContext *q, IMCChannel *chctx) { int i, j, cw_len, cw; for (i = 0; i < BANDS; i++) { if (!chctx->sumLenArr[i]) continue; if (chctx->bandFlagsBuf[i] || chctx->bandWidthT[i]) { for (j = band_tab[i]; j < band_tab[i + 1]; j++) { cw_len = chctx->CWlengthT[j]; cw = 0; if (get_bits_count(&q->gb) + cw_len > 512) { av_log(avctx, AV_LOG_WARNING, "Potential problem on band %i, coefficient %i" ": cw_len=%i\n", i, j, cw_len); } if (cw_len && (!chctx->bandFlagsBuf[i] || !chctx->skipFlags[j])) cw = get_bits(&q->gb, cw_len); chctx->codewords[j] = cw; } } } }
2,119
FFmpeg
ea382767ad2191acbe97e90624059723e15f0e4b
0
void ff_generate_sliding_window_mmcos(H264Context *h, int first_slice) { MpegEncContext * const s = &h->s; MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = first_slice ? h->mmco : mmco_temp; int mmco_index = 0, i; assert(h->long_ref_count + h->short_ref_count <= h->sps.ref_frame_count); if (h->short_ref_count && h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count && !(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->f.reference)) { mmco[0].opcode = MMCO_SHORT2UNUSED; mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num; mmco_index = 1; if (FIELD_PICTURE) { mmco[0].short_pic_num *= 2; mmco[1].opcode = MMCO_SHORT2UNUSED; mmco[1].short_pic_num = mmco[0].short_pic_num + 1; mmco_index = 2; } } if (first_slice) { h->mmco_index = mmco_index; } else if (!first_slice && mmco_index >= 0 && (mmco_index != h->mmco_index || (i = check_opcodes(h->mmco, mmco_temp, mmco_index)))) { av_log(h->s.avctx, AV_LOG_ERROR, "Inconsistent MMCO state between slices [%d, %d, %d]\n", mmco_index, h->mmco_index, i); return AVERROR_INVALIDDATA; } }
2,121
qemu
f897bf751fbd95e4015b95d202c706548586813a
1
void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) { uint32_t type; struct iovec *in_iov = req->elem->in_sg; struct iovec *iov = req->elem->out_sg; unsigned in_num = req->elem->in_num; unsigned out_num = req->elem->out_num; if (req->elem->out_num < 1 || req->elem->in_num < 1) { error_report("virtio-blk missing headers"); exit(1); } if (unlikely(iov_to_buf(iov, out_num, 0, &req->out, sizeof(req->out)) != sizeof(req->out))) { error_report("virtio-blk request outhdr too short"); exit(1); } iov_discard_front(&iov, &out_num, sizeof(req->out)); if (in_num < 1 || in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) { error_report("virtio-blk request inhdr too short"); exit(1); } req->in = (void *)in_iov[in_num - 1].iov_base + in_iov[in_num - 1].iov_len - sizeof(struct virtio_blk_inhdr); iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr)); type = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type); if (type & VIRTIO_BLK_T_FLUSH) { virtio_blk_handle_flush(req, mrb); } else if (type & VIRTIO_BLK_T_SCSI_CMD) { virtio_blk_handle_scsi(req); } else if (type & VIRTIO_BLK_T_GET_ID) { VirtIOBlock *s = req->dev; /* * NB: per existing s/n string convention the string is * terminated by '\0' only when shorter than buffer. */ strncpy(req->elem->in_sg[0].iov_base, s->blk.serial ? s->blk.serial : "", MIN(req->elem->in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES)); virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); virtio_blk_free_request(req); } else if (type & VIRTIO_BLK_T_OUT) { qemu_iovec_init_external(&req->qiov, &req->elem->out_sg[1], req->elem->out_num - 1); virtio_blk_handle_write(req, mrb); } else if (type == VIRTIO_BLK_T_IN || type == VIRTIO_BLK_T_BARRIER) { /* VIRTIO_BLK_T_IN is 0, so we can't just & it. */ qemu_iovec_init_external(&req->qiov, &req->elem->in_sg[0], req->elem->in_num - 1); virtio_blk_handle_read(req); } else { virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); virtio_blk_free_request(req); } }
2,122
qemu
9bb234b3b170299c39c9e88cfe7da5434a92d99d
1
void OPPROTO op_sdiv_T1_T0(void) { int64_t x0; int32_t x1; x0 = T0 | ((int64_t) (env->y) << 32); x1 = T1; x0 = x0 / x1; if ((int32_t) x0 != x0) { T0 = x0 < 0? 0x80000000: 0x7fffffff; T1 = 1; } else { T0 = x0; T1 = 0; FORCE_RET();
2,123
qemu
c8721d35994fd3731e592f81ba2f9c08e7dc8c31
1
static void spapr_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(oc); FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc); NMIClass *nc = NMI_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); mc->desc = "pSeries Logical Partition (PAPR compliant)"; /* * We set up the default / latest behaviour here. The class_init * functions for the specific versioned machine types can override * these details for backwards compatibility */ mc->init = ppc_spapr_init; mc->reset = ppc_spapr_reset; mc->block_default_type = IF_SCSI; mc->max_cpus = MAX_CPUMASK_BITS; mc->no_parallel = 1; mc->default_boot_order = ""; mc->default_ram_size = 512 * M_BYTE; mc->kvm_type = spapr_kvm_type; mc->has_dynamic_sysbus = true; mc->pci_allow_0_address = true; mc->get_hotplug_handler = spapr_get_hotpug_handler; hc->pre_plug = spapr_machine_device_pre_plug; hc->plug = spapr_machine_device_plug; hc->unplug = spapr_machine_device_unplug; mc->cpu_index_to_socket_id = spapr_cpu_index_to_socket_id; mc->query_hotpluggable_cpus = spapr_query_hotpluggable_cpus; smc->dr_lmb_enabled = true; smc->dr_cpu_enabled = true; fwc->get_dev_path = spapr_get_fw_dev_path; nc->nmi_monitor_handler = spapr_nmi; }
2,125
qemu
94fb0909645de18481cc726ee0ec9b5afa861394
1
static int ram_decompress_open(RamDecompressState *s, QEMUFile *f) { int ret; memset(s, 0, sizeof(*s)); s->f = f; ret = inflateInit(&s->zstream); if (ret != Z_OK) return -1; return 0; }
2,126
qemu
a8f2e5c8fffbaf7fbd4f0efc8efbeebade78008f
1
static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); int rc; /* Set up virtqueue notify */ rc = k->set_host_notifier(qbus->parent, n, true); if (rc != 0) { fprintf(stderr, "virtio-scsi: Failed to set host notifier (%d)\n", rc); s->dataplane_fenced = true; return rc; } virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, true, true); return 0; }
2,127
qemu
56c4bfb3f07f3107894c00281276aea4f5e8834d
1
void qemu_get_guest_memory_mapping(MemoryMappingList *list, Error **errp) { CPUState *cpu, *first_paging_enabled_cpu; RAMBlock *block; ram_addr_t offset, length; first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu); if (first_paging_enabled_cpu) { for (cpu = first_paging_enabled_cpu; cpu != NULL; cpu = cpu->next_cpu) { Error *err = NULL; cpu_get_memory_mapping(cpu, list, &err); if (err) { error_propagate(errp, err); return; } } return; } /* * If the guest doesn't use paging, the virtual address is equal to physical * address. */ QTAILQ_FOREACH(block, &ram_list.blocks, next) { offset = block->offset; length = block->length; create_new_memory_mapping(list, offset, offset, length); } }
2,128
FFmpeg
90540c2d5ace46a1e9789c75fde0b1f7dbb12a9b
1
static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, int src_size) { uint8_t *dest = dst; const uint8_t *s = src; const uint8_t *end; const uint8_t *mm_end; end = s + src_size; __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); mm_end = end - 23; __asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory"); while (s < mm_end) { __asm__ volatile( PREFETCH" 32%1 \n\t" "movd %1, %%mm0 \n\t" "punpckldq 3%1, %%mm0 \n\t" "movd 6%1, %%mm1 \n\t" "punpckldq 9%1, %%mm1 \n\t" "movd 12%1, %%mm2 \n\t" "punpckldq 15%1, %%mm2 \n\t" "movd 18%1, %%mm3 \n\t" "punpckldq 21%1, %%mm3 \n\t" "por %%mm7, %%mm0 \n\t" "por %%mm7, %%mm1 \n\t" "por %%mm7, %%mm2 \n\t" "por %%mm7, %%mm3 \n\t" MOVNTQ" %%mm0, %0 \n\t" MOVNTQ" %%mm1, 8%0 \n\t" MOVNTQ" %%mm2, 16%0 \n\t" MOVNTQ" %%mm3, 24%0" :"=m"(*dest) :"m"(*s) :"memory"); dest += 32; s += 24; } __asm__ volatile(SFENCE:::"memory"); __asm__ volatile(EMMS:::"memory"); while (s < end) { *dest++ = *s++; *dest++ = *s++; *dest++ = *s++; *dest++ = 255; } }
2,129
qemu
902b27d0b8d5bfa840eaf389d7cbcc28b57e3fbe
1
int bdrv_open2(BlockDriverState *bs, const char *filename, int flags, BlockDriver *drv) { int ret, open_flags; char tmp_filename[PATH_MAX]; char backing_filename[PATH_MAX]; bs->read_only = 0; bs->is_temporary = 0; bs->encrypted = 0; if (flags & BDRV_O_SNAPSHOT) { BlockDriverState *bs1; int64_t total_size; /* if snapshot, we create a temporary backing file and open it instead of opening 'filename' directly */ /* if there is a backing file, use it */ bs1 = bdrv_new(""); if (!bs1) { return -ENOMEM; } if (bdrv_open(bs1, filename, 0) < 0) { bdrv_delete(bs1); return -1; } total_size = bdrv_getlength(bs1) >> SECTOR_BITS; bdrv_delete(bs1); get_tmp_filename(tmp_filename, sizeof(tmp_filename)); realpath(filename, backing_filename); if (bdrv_create(&bdrv_qcow2, tmp_filename, total_size, backing_filename, 0) < 0) { return -1; } filename = tmp_filename; bs->is_temporary = 1; } pstrcpy(bs->filename, sizeof(bs->filename), filename); if (flags & BDRV_O_FILE) { drv = find_protocol(filename); if (!drv) return -ENOENT; } else { if (!drv) { drv = find_image_format(filename); if (!drv) return -1; } } bs->drv = drv; bs->opaque = qemu_mallocz(drv->instance_size); bs->total_sectors = 0; /* driver will set if it does not do getlength */ if (bs->opaque == NULL && drv->instance_size > 0) return -1; /* Note: for compatibility, we open disk image files as RDWR, and RDONLY as fallback */ if (!(flags & BDRV_O_FILE)) open_flags = BDRV_O_RDWR | (flags & BDRV_O_DIRECT); else open_flags = flags & ~(BDRV_O_FILE | BDRV_O_SNAPSHOT); ret = drv->bdrv_open(bs, filename, open_flags); if (ret == -EACCES && !(flags & BDRV_O_FILE)) { ret = drv->bdrv_open(bs, filename, BDRV_O_RDONLY); bs->read_only = 1; } if (ret < 0) { qemu_free(bs->opaque); bs->opaque = NULL; bs->drv = NULL; return ret; } if (drv->bdrv_getlength) { bs->total_sectors = bdrv_getlength(bs) >> SECTOR_BITS; } #ifndef _WIN32 if (bs->is_temporary) { unlink(filename); } #endif if (bs->backing_file[0] != '\0') { /* if there is a backing file, use it */ bs->backing_hd = bdrv_new(""); if (!bs->backing_hd) { fail: bdrv_close(bs); return -ENOMEM; } path_combine(backing_filename, sizeof(backing_filename), filename, bs->backing_file); if (bdrv_open(bs->backing_hd, backing_filename, 0) < 0) goto fail; } /* call the change callback */ bs->media_changed = 1; if (bs->change_cb) bs->change_cb(bs->change_opaque); return 0; }
2,131
FFmpeg
48f1e5212c90b511c90fa0449655abb06a9edda2
1
static int wma_decode_superframe(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; WMACodecContext *s = avctx->priv_data; int nb_frames, bit_offset, i, pos, len, ret; uint8_t *q; int16_t *samples; tprintf(avctx, "***decode_superframe:\n"); if(buf_size==0){ s->last_superframe_len = 0; return 0; } if (buf_size < s->block_align) return 0; buf_size = s->block_align; init_get_bits(&s->gb, buf, buf_size*8); if (s->use_bit_reservoir) { /* read super frame header */ skip_bits(&s->gb, 4); /* super frame index */ nb_frames = get_bits(&s->gb, 4) - (s->last_superframe_len <= 0); } else { nb_frames = 1; } /* get output buffer */ s->frame.nb_samples = nb_frames * s->frame_len; if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } samples = (int16_t *)s->frame.data[0]; if (s->use_bit_reservoir) { bit_offset = get_bits(&s->gb, s->byte_offset_bits + 3); if (s->last_superframe_len > 0) { // printf("skip=%d\n", s->last_bitoffset); /* add bit_offset bits to last frame */ if ((s->last_superframe_len + ((bit_offset + 7) >> 3)) > MAX_CODED_SUPERFRAME_SIZE) goto fail; q = s->last_superframe + s->last_superframe_len; len = bit_offset; while (len > 7) { *q++ = (get_bits)(&s->gb, 8); len -= 8; } if (len > 0) { *q++ = (get_bits)(&s->gb, len) << (8 - len); } /* XXX: bit_offset bits into last frame */ init_get_bits(&s->gb, s->last_superframe, MAX_CODED_SUPERFRAME_SIZE*8); /* skip unused bits */ if (s->last_bitoffset > 0) skip_bits(&s->gb, s->last_bitoffset); /* this frame is stored in the last superframe and in the current one */ if (wma_decode_frame(s, samples) < 0) goto fail; samples += s->nb_channels * s->frame_len; nb_frames--; } /* read each frame starting from bit_offset */ pos = bit_offset + 4 + 4 + s->byte_offset_bits + 3; init_get_bits(&s->gb, buf + (pos >> 3), (MAX_CODED_SUPERFRAME_SIZE - (pos >> 3))*8); len = pos & 7; if (len > 0) skip_bits(&s->gb, len); s->reset_block_lengths = 1; for(i=0;i<nb_frames;i++) { if (wma_decode_frame(s, samples) < 0) goto fail; samples += s->nb_channels * s->frame_len; } /* we copy the end of the frame in the last frame buffer */ pos = get_bits_count(&s->gb) + ((bit_offset + 4 + 4 + s->byte_offset_bits + 3) & ~7); s->last_bitoffset = pos & 7; pos >>= 3; len = buf_size - pos; if (len > MAX_CODED_SUPERFRAME_SIZE || len < 0) { av_log(s->avctx, AV_LOG_ERROR, "len %d invalid\n", len); goto fail; } s->last_superframe_len = len; memcpy(s->last_superframe, buf + pos, len); } else { /* single frame decode */ if (wma_decode_frame(s, samples) < 0) goto fail; samples += s->nb_channels * s->frame_len; } //av_log(NULL, AV_LOG_ERROR, "%d %d %d %d outbytes:%d eaten:%d\n", s->frame_len_bits, s->block_len_bits, s->frame_len, s->block_len, (int8_t *)samples - (int8_t *)data, s->block_align); *got_frame_ptr = 1; *(AVFrame *)data = s->frame; return s->block_align; fail: /* when error, we reset the bit reservoir */ s->last_superframe_len = 0; return -1; }
2,132
FFmpeg
a52f443714b5c2a40ed272d8445f4c39220a4b69
1
static int vc1_decode_p_mb(VC1Context *v) { MpegEncContext *s = &v->s; GetBitContext *gb = &s->gb; int i, j; int mb_pos = s->mb_x + s->mb_y * s->mb_stride; int cbp; /* cbp decoding stuff */ int mqdiff, mquant; /* MB quantization */ int ttmb = v->ttfrm; /* MB Transform type */ int mb_has_coeffs = 1; /* last_flag */ int dmv_x, dmv_y; /* Differential MV components */ int index, index1; /* LUT indexes */ int val, sign; /* temp values */ int first_block = 1; int dst_idx, off; int skipped, fourmv; int block_cbp = 0, pat, block_tt = 0, block_intra = 0; mquant = v->pq; /* lossy initialization */ if (v->mv_type_is_raw) fourmv = get_bits1(gb); else fourmv = v->mv_type_mb_plane[mb_pos]; if (v->skip_is_raw) skipped = get_bits1(gb); else skipped = v->s.mbskip_table[mb_pos]; if (!fourmv) { /* 1MV mode */ if (!skipped) { GET_MVDATA(dmv_x, dmv_y); if (s->mb_intra) { s->current_picture.motion_val[1][s->block_index[0]][0] = 0; s->current_picture.motion_val[1][s->block_index[0]][1] = 0; } s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16; vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0); /* FIXME Set DC val for inter block ? */ if (s->mb_intra && !mb_has_coeffs) { GET_MQUANT(); s->ac_pred = get_bits1(gb); cbp = 0; } else if (mb_has_coeffs) { if (s->mb_intra) s->ac_pred = get_bits1(gb); cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2); GET_MQUANT(); } else { mquant = v->pq; cbp = 0; } s->current_picture.qscale_table[mb_pos] = mquant; if (!v->ttmbf && !s->mb_intra && mb_has_coeffs) ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2); if (!s->mb_intra) vc1_mc_1mv(v, 0); dst_idx = 0; for (i = 0; i < 6; i++) { s->dc_val[0][s->block_index[i]] = 0; dst_idx += i >> 2; val = ((cbp >> (5 - i)) & 1); off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize); v->mb_type[0][s->block_index[i]] = s->mb_intra; if (s->mb_intra) { /* check if prediction blocks A and C are available */ v->a_avail = v->c_avail = 0; if (i == 2 || i == 3 || !s->first_slice_line) v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]]; if (i == 1 || i == 3 || s->mb_x) v->c_avail = v->mb_type[0][s->block_index[i] - 1]; vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i & 4) ? v->codingset2 : v->codingset); if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue; v->vc1dsp.vc1_inv_trans_8x8(s->block[i]); if (v->rangeredfrm) for (j = 0; j < 64; j++) s->block[i][j] <<= 1; s->idsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize); if (v->pq >= 9 && v->overlap) { if (v->c_avail) v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize); if (v->a_avail) v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize); } block_cbp |= 0xF << (i << 2); block_intra |= 1 << i; } else if (val) { pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize, (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt); block_cbp |= pat << (i << 2); if (!v->ttmbf && ttmb < 8) ttmb = -1; first_block = 0; } } } else { // skipped s->mb_intra = 0; for (i = 0; i < 6; i++) { v->mb_type[0][s->block_index[i]] = 0; s->dc_val[0][s->block_index[i]] = 0; } s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP; s->current_picture.qscale_table[mb_pos] = 0; vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0); vc1_mc_1mv(v, 0); } } else { // 4MV mode if (!skipped /* unskipped MB */) { int intra_count = 0, coded_inter = 0; int is_intra[6], is_coded[6]; /* Get CBPCY */ cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2); for (i = 0; i < 6; i++) { val = ((cbp >> (5 - i)) & 1); s->dc_val[0][s->block_index[i]] = 0; s->mb_intra = 0; if (i < 4) { dmv_x = dmv_y = 0; s->mb_intra = 0; mb_has_coeffs = 0; if (val) { GET_MVDATA(dmv_x, dmv_y); } vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0); if (!s->mb_intra) vc1_mc_4mv_luma(v, i, 0, 0); intra_count += s->mb_intra; is_intra[i] = s->mb_intra; is_coded[i] = mb_has_coeffs; } if (i & 4) { is_intra[i] = (intra_count >= 3); is_coded[i] = val; } if (i == 4) vc1_mc_4mv_chroma(v, 0); v->mb_type[0][s->block_index[i]] = is_intra[i]; if (!coded_inter) coded_inter = !is_intra[i] && is_coded[i]; } // if there are no coded blocks then don't do anything more dst_idx = 0; if (!intra_count && !coded_inter) goto end; GET_MQUANT(); s->current_picture.qscale_table[mb_pos] = mquant; /* test if block is intra and has pred */ { int intrapred = 0; for (i = 0; i < 6; i++) if (is_intra[i]) { if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]]) || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) { intrapred = 1; break; } } if (intrapred) s->ac_pred = get_bits1(gb); else s->ac_pred = 0; } if (!v->ttmbf && coded_inter) ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2); for (i = 0; i < 6; i++) { dst_idx += i >> 2; off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize); s->mb_intra = is_intra[i]; if (is_intra[i]) { /* check if prediction blocks A and C are available */ v->a_avail = v->c_avail = 0; if (i == 2 || i == 3 || !s->first_slice_line) v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]]; if (i == 1 || i == 3 || s->mb_x) v->c_avail = v->mb_type[0][s->block_index[i] - 1]; vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i & 4) ? v->codingset2 : v->codingset); if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue; v->vc1dsp.vc1_inv_trans_8x8(s->block[i]); if (v->rangeredfrm) for (j = 0; j < 64; j++) s->block[i][j] <<= 1; s->idsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize); if (v->pq >= 9 && v->overlap) { if (v->c_avail) v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize); if (v->a_avail) v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize); } block_cbp |= 0xF << (i << 2); block_intra |= 1 << i; } else if (is_coded[i]) { pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize, (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt); block_cbp |= pat << (i << 2); if (!v->ttmbf && ttmb < 8) ttmb = -1; first_block = 0; } } } else { // skipped MB s->mb_intra = 0; s->current_picture.qscale_table[mb_pos] = 0; for (i = 0; i < 6; i++) { v->mb_type[0][s->block_index[i]] = 0; s->dc_val[0][s->block_index[i]] = 0; } for (i = 0; i < 4; i++) { vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0); vc1_mc_4mv_luma(v, i, 0, 0); } vc1_mc_4mv_chroma(v, 0); s->current_picture.qscale_table[mb_pos] = 0; } } end: v->cbp[s->mb_x] = block_cbp; v->ttblk[s->mb_x] = block_tt; v->is_intra[s->mb_x] = block_intra; return 0; }
2,134
qemu
4cae4d5acaea23f3def84c8dc67ef5106323e5cb
1
int qdev_build_hotpluggable_device_list(Object *obj, void *opaque) { GSList **list = opaque; DeviceState *dev = DEVICE(obj); if (dev->realized && object_property_get_bool(obj, "hotpluggable", NULL)) { *list = g_slist_append(*list, dev); } object_child_foreach(obj, qdev_build_hotpluggable_device_list, opaque); return 0; }
2,135
qemu
d6f723b513a0c3c4e58343b7c52a2f9850861fa0
1
static void test_qemu_strtoul_max(void) { const char *str = g_strdup_printf("%lu", ULONG_MAX); char f = 'X'; const char *endptr = &f; unsigned long res = 999; int err; err = qemu_strtoul(str, &endptr, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, ULONG_MAX); g_assert(endptr == str + strlen(str)); }
2,136
qemu
fa66b909f382619da15f8c7e323145adfa94fdac
1
static int usb_msd_initfn(USBDevice *dev) { MSDState *s = DO_UPCAST(MSDState, dev, dev); if (!s->conf.dinfo || !s->conf.dinfo->bdrv) { error_report("usb-msd: drive property not set"); s->dev.speed = USB_SPEED_FULL; scsi_bus_new(&s->bus, &s->dev.qdev, 0, 1, usb_msd_command_complete); s->scsi_dev = scsi_bus_legacy_add_drive(&s->bus, s->conf.dinfo, 0); s->bus.qbus.allow_hotplug = 0; usb_msd_handle_reset(dev); if (bdrv_key_required(s->conf.dinfo->bdrv)) { if (cur_mon) { monitor_read_bdrv_key_start(cur_mon, s->conf.dinfo->bdrv, usb_msd_password_cb, s); s->dev.auto_attach = 0; } else { autostart = 0; return 0;
2,137
qemu
5c843af22604edecda10d4bb89d4eede9e1bd3d0
1
static int net_slirp_init(NetClientState *peer, const char *model, const char *name, int restricted, bool ipv4, const char *vnetwork, const char *vhost, bool ipv6, const char *vprefix6, int vprefix6_len, const char *vhost6, const char *vhostname, const char *tftp_export, const char *bootfile, const char *vdhcp_start, const char *vnameserver, const char *vnameserver6, const char *smb_export, const char *vsmbserver, const char **dnssearch) { /* default settings according to historic slirp */ struct in_addr net = { .s_addr = htonl(0x0a000200) }; /* 10.0.2.0 */ struct in_addr mask = { .s_addr = htonl(0xffffff00) }; /* 255.255.255.0 */ struct in_addr host = { .s_addr = htonl(0x0a000202) }; /* 10.0.2.2 */ struct in_addr dhcp = { .s_addr = htonl(0x0a00020f) }; /* 10.0.2.15 */ struct in_addr dns = { .s_addr = htonl(0x0a000203) }; /* 10.0.2.3 */ struct in6_addr ip6_prefix; struct in6_addr ip6_host; struct in6_addr ip6_dns; #ifndef _WIN32 struct in_addr smbsrv = { .s_addr = 0 }; #endif NetClientState *nc; SlirpState *s; char buf[20]; uint32_t addr; int shift; char *end; struct slirp_config_str *config; if (!ipv4 && (vnetwork || vhost || vnameserver)) { return -1; } if (!ipv6 && (vprefix6 || vhost6 || vnameserver6)) { return -1; } if (!ipv4 && !ipv6) { /* It doesn't make sense to disable both */ return -1; } if (!tftp_export) { tftp_export = legacy_tftp_prefix; } if (!bootfile) { bootfile = legacy_bootp_filename; } if (vnetwork) { if (get_str_sep(buf, sizeof(buf), &vnetwork, '/') < 0) { if (!inet_aton(vnetwork, &net)) { return -1; } addr = ntohl(net.s_addr); if (!(addr & 0x80000000)) { mask.s_addr = htonl(0xff000000); /* class A */ } else if ((addr & 0xfff00000) == 0xac100000) { mask.s_addr = htonl(0xfff00000); /* priv. 172.16.0.0/12 */ } else if ((addr & 0xc0000000) == 0x80000000) { mask.s_addr = htonl(0xffff0000); /* class B */ } else if ((addr & 0xffff0000) == 0xc0a80000) { mask.s_addr = htonl(0xffff0000); /* priv. 192.168.0.0/16 */ } else if ((addr & 0xffff0000) == 0xc6120000) { mask.s_addr = htonl(0xfffe0000); /* tests 198.18.0.0/15 */ } else if ((addr & 0xe0000000) == 0xe0000000) { mask.s_addr = htonl(0xffffff00); /* class C */ } else { mask.s_addr = htonl(0xfffffff0); /* multicast/reserved */ } } else { if (!inet_aton(buf, &net)) { return -1; } shift = strtol(vnetwork, &end, 10); if (*end != '\0') { if (!inet_aton(vnetwork, &mask)) { return -1; } } else if (shift < 4 || shift > 32) { return -1; } else { mask.s_addr = htonl(0xffffffff << (32 - shift)); } } net.s_addr &= mask.s_addr; host.s_addr = net.s_addr | (htonl(0x0202) & ~mask.s_addr); dhcp.s_addr = net.s_addr | (htonl(0x020f) & ~mask.s_addr); dns.s_addr = net.s_addr | (htonl(0x0203) & ~mask.s_addr); } if (vhost && !inet_aton(vhost, &host)) { return -1; } if ((host.s_addr & mask.s_addr) != net.s_addr) { return -1; } if (vnameserver && !inet_aton(vnameserver, &dns)) { return -1; } if ((dns.s_addr & mask.s_addr) != net.s_addr || dns.s_addr == host.s_addr) { return -1; } if (vdhcp_start && !inet_aton(vdhcp_start, &dhcp)) { return -1; } if ((dhcp.s_addr & mask.s_addr) != net.s_addr || dhcp.s_addr == host.s_addr || dhcp.s_addr == dns.s_addr) { return -1; } #ifndef _WIN32 if (vsmbserver && !inet_aton(vsmbserver, &smbsrv)) { return -1; } #endif #if defined(_WIN32) && (_WIN32_WINNT < 0x0600) /* No inet_pton helper before Vista... */ if (vprefix6) { /* Unsupported */ return -1; } memset(&ip6_prefix, 0, sizeof(ip6_prefix)); ip6_prefix.s6_addr[0] = 0xfe; ip6_prefix.s6_addr[1] = 0xc0; #else if (!vprefix6) { vprefix6 = "fec0::"; } if (!inet_pton(AF_INET6, vprefix6, &ip6_prefix)) { return -1; } #endif if (!vprefix6_len) { vprefix6_len = 64; } if (vprefix6_len < 0 || vprefix6_len > 126) { return -1; } if (vhost6) { #if defined(_WIN32) && (_WIN32_WINNT < 0x0600) return -1; #else if (!inet_pton(AF_INET6, vhost6, &ip6_host)) { return -1; } if (!in6_equal_net(&ip6_prefix, &ip6_host, vprefix6_len)) { return -1; } #endif } else { ip6_host = ip6_prefix; ip6_host.s6_addr[15] |= 2; } if (vnameserver6) { #if defined(_WIN32) && (_WIN32_WINNT < 0x0600) return -1; #else if (!inet_pton(AF_INET6, vnameserver6, &ip6_dns)) { return -1; } if (!in6_equal_net(&ip6_prefix, &ip6_dns, vprefix6_len)) { return -1; } #endif } else { ip6_dns = ip6_prefix; ip6_dns.s6_addr[15] |= 3; } nc = qemu_new_net_client(&net_slirp_info, peer, model, name); snprintf(nc->info_str, sizeof(nc->info_str), "net=%s,restrict=%s", inet_ntoa(net), restricted ? "on" : "off"); s = DO_UPCAST(SlirpState, nc, nc); s->slirp = slirp_init(restricted, ipv4, net, mask, host, ipv6, ip6_prefix, vprefix6_len, ip6_host, vhostname, tftp_export, bootfile, dhcp, dns, ip6_dns, dnssearch, s); QTAILQ_INSERT_TAIL(&slirp_stacks, s, entry); for (config = slirp_configs; config; config = config->next) { if (config->flags & SLIRP_CFG_HOSTFWD) { if (slirp_hostfwd(s, config->str, config->flags & SLIRP_CFG_LEGACY) < 0) goto error; } else { if (slirp_guestfwd(s, config->str, config->flags & SLIRP_CFG_LEGACY) < 0) goto error; } } #ifndef _WIN32 if (!smb_export) { smb_export = legacy_smb_export; } if (smb_export) { if (slirp_smb(s, smb_export, smbsrv) < 0) goto error; } #endif s->exit_notifier.notify = slirp_smb_exit; qemu_add_exit_notifier(&s->exit_notifier); return 0; error: qemu_del_net_client(nc); return -1; }
2,138
qemu
57d1f6d7ce23e79a8ebe4a57bd2363b269b4664b
1
size_t qemu_fd_getpagesize(int fd) { #ifdef CONFIG_LINUX struct statfs fs; int ret; if (fd != -1) { do { ret = fstatfs(fd, &fs); } while (ret != 0 && errno == EINTR); if (ret == 0 && fs.f_type == HUGETLBFS_MAGIC) { return fs.f_bsize; } } return getpagesize(); }
2,139
qemu
99e1dec06f343cefecae9baeec0aae2f99f552d5
1
static int cpudef_setfield(const char *name, const char *str, void *opaque) { x86_def_t *def = opaque; int err = 0; if (!strcmp(name, "name")) { def->name = g_strdup(str); } else if (!strcmp(name, "model_id")) { strncpy(def->model_id, str, sizeof (def->model_id)); } else if (!strcmp(name, "level")) { setscalar(&def->level, str, &err) } else if (!strcmp(name, "vendor")) { cpyid(&str[0], &def->vendor1); cpyid(&str[4], &def->vendor2); cpyid(&str[8], &def->vendor3); } else if (!strcmp(name, "family")) { setscalar(&def->family, str, &err) } else if (!strcmp(name, "model")) { setscalar(&def->model, str, &err) } else if (!strcmp(name, "stepping")) { setscalar(&def->stepping, str, &err) } else if (!strcmp(name, "feature_edx")) { setfeatures(&def->features, str, feature_name, &err); } else if (!strcmp(name, "feature_ecx")) { setfeatures(&def->ext_features, str, ext_feature_name, &err); } else if (!strcmp(name, "extfeature_edx")) { setfeatures(&def->ext2_features, str, ext2_feature_name, &err); } else if (!strcmp(name, "extfeature_ecx")) { setfeatures(&def->ext3_features, str, ext3_feature_name, &err); } else if (!strcmp(name, "xlevel")) { setscalar(&def->xlevel, str, &err) } else { fprintf(stderr, "error: unknown option [%s = %s]\n", name, str); return (1); } if (err) { fprintf(stderr, "error: bad option value [%s = %s]\n", name, str); return (1); } return (0); }
2,140
qemu
441692ddd8321d5e0f09b163e86410e578d87236
1
static inline int onenand_prog_spare(OneNANDState *s, int sec, int secn, void *src) { int result = 0; if (secn > 0) { const uint8_t *sp = (const uint8_t *)src; uint8_t *dp = 0, *dpp = 0; if (s->blk_cur) { dp = g_malloc(512); if (!dp || blk_read(s->blk_cur, s->secs_cur + (sec >> 5), dp, 1) < 0) { result = 1; } else { dpp = dp + ((sec & 31) << 4); } } else { if (sec + secn > s->secs_cur) { result = 1; } else { dpp = s->current + (s->secs_cur << 9) + (sec << 4); } } if (!result) { uint32_t i; for (i = 0; i < (secn << 4); i++) { dpp[i] &= sp[i]; } if (s->blk_cur) { result = blk_write(s->blk_cur, s->secs_cur + (sec >> 5), dp, 1) < 0; } } g_free(dp); } return result; }
2,141
FFmpeg
3d673078a03a3819df9dba7667f9e5d59b8487d0
1
static int ircam_read_header(AVFormatContext *s) { uint32_t magic, sample_rate, channels, tag; const AVCodecTag *tags; int le = -1, i; AVStream *st; magic = avio_rl32(s->pb); for (i = 0; i < 7; i++) { if (magic == table[i].magic) { le = table[i].is_le; break; } } if (le == 1) { sample_rate = av_int2float(avio_rl32(s->pb)); channels = avio_rl32(s->pb); tag = avio_rl32(s->pb); tags = ff_codec_ircam_le_tags; } else if (le == 0) { sample_rate = av_int2float(avio_rb32(s->pb)); channels = avio_rb32(s->pb); tag = avio_rb32(s->pb); tags = ff_codec_ircam_be_tags; } else { return AVERROR_INVALIDDATA; } if (!channels || !sample_rate) return AVERROR_INVALIDDATA; st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->channels = channels; st->codecpar->sample_rate = sample_rate; st->codecpar->codec_id = ff_codec_get_id(tags, tag); if (st->codecpar->codec_id == AV_CODEC_ID_NONE) { av_log(s, AV_LOG_ERROR, "unknown tag %X\n", tag); return AVERROR_INVALIDDATA; } st->codecpar->bits_per_coded_sample = av_get_bits_per_sample(st->codecpar->codec_id); st->codecpar->block_align = st->codecpar->bits_per_coded_sample * st->codecpar->channels / 8; avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate); avio_skip(s->pb, 1008); return 0; }
2,144
qemu
04920fc0faa4760f9c4fc0e73b992b768099be70
1
static void pc_init_pci_1_6(QEMUMachineInitArgs *args) { has_pci_info = false; pc_init_pci(args); }
2,145
qemu
5839e53bbc0fec56021d758aab7610df421ed8c8
1
static char **breakline(char *input, int *count) { int c = 0; char *p; char **rval = g_malloc0(sizeof(char *)); char **tmp; while (rval && (p = qemu_strsep(&input, " ")) != NULL) { if (!*p) { continue; } c++; tmp = g_realloc(rval, sizeof(*rval) * (c + 1)); if (!tmp) { g_free(rval); rval = NULL; c = 0; break; } else { rval = tmp; } rval[c - 1] = p; rval[c] = NULL; } *count = c; return rval; }
2,146
qemu
01960e6d21dcfbfc8a03d8fd6284c448cf75865b
1
int kvm_arch_release_virq_post(int virq) { MSIRouteEntry *entry, *next; QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) { if (entry->virq == virq) { trace_kvm_x86_remove_msi_route(virq); QLIST_REMOVE(entry, list); break; } } return 0; }
2,149
qemu
dc5999787181f6d090217f45570067e55333835b
1
monitor_qapi_event_queue(QAPIEvent event, QDict *qdict, Error **errp) { MonitorQAPIEventConf *evconf; MonitorQAPIEventState *evstate; assert(event < QAPI_EVENT__MAX); evconf = &monitor_qapi_event_conf[event]; trace_monitor_protocol_event_queue(event, qdict, evconf->rate); qemu_mutex_lock(&monitor_lock); if (!evconf->rate) { /* Unthrottled event */ monitor_qapi_event_emit(event, qdict); } else { QDict *data = qobject_to_qdict(qdict_get(qdict, "data")); MonitorQAPIEventState key = { .event = event, .data = data }; evstate = g_hash_table_lookup(monitor_qapi_event_state, &key); assert(!evstate || timer_pending(evstate->timer)); if (evstate) { /* * Timer is pending for (at least) evconf->rate ns after * last send. Store event for sending when timer fires, * replacing a prior stored event if any. */ QDECREF(evstate->qdict); evstate->qdict = qdict; QINCREF(evstate->qdict); } else { /* * Last send was (at least) evconf->rate ns ago. * Send immediately, and arm the timer to call * monitor_qapi_event_handler() in evconf->rate ns. Any * events arriving before then will be delayed until then. */ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); monitor_qapi_event_emit(event, qdict); evstate = g_new(MonitorQAPIEventState, 1); evstate->event = event; evstate->data = data; QINCREF(evstate->data); evstate->qdict = NULL; evstate->timer = timer_new_ns(QEMU_CLOCK_REALTIME, monitor_qapi_event_handler, evstate); g_hash_table_add(monitor_qapi_event_state, evstate); timer_mod_ns(evstate->timer, now + evconf->rate); } } qemu_mutex_unlock(&monitor_lock); }
2,150
qemu
0b8b8753e4d94901627b3e86431230f2319215c4
1
void pdu_submit(V9fsPDU *pdu) { Coroutine *co; CoroutineEntry *handler; V9fsState *s = pdu->s; if (pdu->id >= ARRAY_SIZE(pdu_co_handlers) || (pdu_co_handlers[pdu->id] == NULL)) { handler = v9fs_op_not_supp; } else { handler = pdu_co_handlers[pdu->id]; } if (is_ro_export(&s->ctx) && !is_read_only_op(pdu)) { handler = v9fs_fs_ro; } co = qemu_coroutine_create(handler); qemu_coroutine_enter(co, pdu); }
2,151
qemu
60fe637bf0e4d7989e21e50f52526444765c63b4
1
static ssize_t socket_writev_buffer(void *opaque, struct iovec *iov, int iovcnt, int64_t pos) { QEMUFileSocket *s = opaque; ssize_t len; ssize_t size = iov_size(iov, iovcnt); len = iov_send(s->fd, iov, iovcnt, 0, size); if (len < size) { len = -socket_error(); } return len; }
2,152
qemu
f8ed85ac992c48814d916d5df4d44f9a971c5de4
1
static void assign_storage(SCLPDevice *sclp, SCCB *sccb) { MemoryRegion *mr = NULL; uint64_t this_subregion_size; AssignStorage *assign_info = (AssignStorage *) sccb; sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); ram_addr_t assign_addr; MemoryRegion *sysmem = get_system_memory(); if (!mhd) { sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); return; } assign_addr = (assign_info->rn - 1) * mhd->rzm; if ((assign_addr % MEM_SECTION_SIZE == 0) && (assign_addr >= mhd->padded_ram_size)) { /* Re-use existing memory region if found */ mr = memory_region_find(sysmem, assign_addr, 1).mr; memory_region_unref(mr); if (!mr) { MemoryRegion *standby_ram = g_new(MemoryRegion, 1); /* offset to align to standby_subregion_size for allocation */ ram_addr_t offset = assign_addr - (assign_addr - mhd->padded_ram_size) % mhd->standby_subregion_size; /* strlen("standby.ram") + 4 (Max of KVM_MEMORY_SLOTS) + NULL */ char id[16]; snprintf(id, 16, "standby.ram%d", (int)((offset - mhd->padded_ram_size) / mhd->standby_subregion_size) + 1); /* Allocate a subregion of the calculated standby_subregion_size */ if (offset + mhd->standby_subregion_size > mhd->padded_ram_size + mhd->standby_mem_size) { this_subregion_size = mhd->padded_ram_size + mhd->standby_mem_size - offset; } else { this_subregion_size = mhd->standby_subregion_size; } memory_region_init_ram(standby_ram, NULL, id, this_subregion_size, &error_abort); /* This is a hack to make memory hotunplug work again. Once we have * subdevices, we have to unparent them when unassigning memory, * instead of doing it via the ref count of the MemoryRegion. */ object_ref(OBJECT(standby_ram)); object_unparent(OBJECT(standby_ram)); vmstate_register_ram_global(standby_ram); memory_region_add_subregion(sysmem, offset, standby_ram); } /* The specified subregion is no longer in standby */ mhd->standby_state_map[(assign_addr - mhd->padded_ram_size) / MEM_SECTION_SIZE] = 1; } sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION); }
2,153
qemu
2d25964d1831c99d54981e8b615eba5dd6a63e36
1
static int curl_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVCURLState *s = bs->opaque; CURLState *state = NULL; QemuOpts *opts; Error *local_err = NULL; const char *file; const char *cookie; const char *cookie_secret; double d; const char *secretid; const char *protocol_delimiter; static int inited = 0; if (flags & BDRV_O_RDWR) { error_setg(errp, "curl block device does not support writes"); return -EROFS; } qemu_mutex_init(&s->mutex); opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &local_err); if (local_err) { error_propagate(errp, local_err); goto out_noclean; } s->readahead_size = qemu_opt_get_size(opts, CURL_BLOCK_OPT_READAHEAD, READ_AHEAD_DEFAULT); if ((s->readahead_size & 0x1ff) != 0) { error_setg(errp, "HTTP_READAHEAD_SIZE %zd is not a multiple of 512", s->readahead_size); goto out_noclean; } s->timeout = qemu_opt_get_number(opts, CURL_BLOCK_OPT_TIMEOUT, CURL_TIMEOUT_DEFAULT); if (s->timeout > CURL_TIMEOUT_MAX) { error_setg(errp, "timeout parameter is too large or negative"); goto out_noclean; } s->sslverify = qemu_opt_get_bool(opts, CURL_BLOCK_OPT_SSLVERIFY, true); cookie = qemu_opt_get(opts, CURL_BLOCK_OPT_COOKIE); cookie_secret = qemu_opt_get(opts, CURL_BLOCK_OPT_COOKIE_SECRET); if (cookie && cookie_secret) { error_setg(errp, "curl driver cannot handle both cookie and cookie secret"); goto out_noclean; } if (cookie_secret) { s->cookie = qcrypto_secret_lookup_as_utf8(cookie_secret, errp); if (!s->cookie) { goto out_noclean; } } else { s->cookie = g_strdup(cookie); } file = qemu_opt_get(opts, CURL_BLOCK_OPT_URL); if (file == NULL) { error_setg(errp, "curl block driver requires an 'url' option"); goto out_noclean; } if (!strstart(file, bs->drv->protocol_name, &protocol_delimiter) || !strstart(protocol_delimiter, "://", NULL)) { error_setg(errp, "%s curl driver cannot handle the URL '%s' (does not " "start with '%s://')", bs->drv->protocol_name, file, bs->drv->protocol_name); goto out_noclean; } s->username = g_strdup(qemu_opt_get(opts, CURL_BLOCK_OPT_USERNAME)); secretid = qemu_opt_get(opts, CURL_BLOCK_OPT_PASSWORD_SECRET); if (secretid) { s->password = qcrypto_secret_lookup_as_utf8(secretid, errp); if (!s->password) { goto out_noclean; } } s->proxyusername = g_strdup( qemu_opt_get(opts, CURL_BLOCK_OPT_PROXY_USERNAME)); secretid = qemu_opt_get(opts, CURL_BLOCK_OPT_PROXY_PASSWORD_SECRET); if (secretid) { s->proxypassword = qcrypto_secret_lookup_as_utf8(secretid, errp); if (!s->proxypassword) { goto out_noclean; } } if (!inited) { curl_global_init(CURL_GLOBAL_ALL); inited = 1; } DPRINTF("CURL: Opening %s\n", file); QSIMPLEQ_INIT(&s->free_state_waitq); s->aio_context = bdrv_get_aio_context(bs); s->url = g_strdup(file); qemu_mutex_lock(&s->mutex); state = curl_find_state(s); qemu_mutex_unlock(&s->mutex); if (!state) { goto out_noclean; } // Get file size if (curl_init_state(s, state) < 0) { goto out; } s->accept_range = false; curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1); curl_easy_setopt(state->curl, CURLOPT_HEADERFUNCTION, curl_header_cb); curl_easy_setopt(state->curl, CURLOPT_HEADERDATA, s); if (curl_easy_perform(state->curl)) goto out; if (curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &d)) { goto out; } /* Prior CURL 7.19.4 return value of 0 could mean that the file size is not * know or the size is zero. From 7.19.4 CURL returns -1 if size is not * known and zero if it is realy zero-length file. */ #if LIBCURL_VERSION_NUM >= 0x071304 if (d < 0) { pstrcpy(state->errmsg, CURL_ERROR_SIZE, "Server didn't report file size."); goto out; } #else if (d <= 0) { pstrcpy(state->errmsg, CURL_ERROR_SIZE, "Unknown file size or zero-length file."); goto out; } #endif s->len = d; if ((!strncasecmp(s->url, "http://", strlen("http://")) || !strncasecmp(s->url, "https://", strlen("https://"))) && !s->accept_range) { pstrcpy(state->errmsg, CURL_ERROR_SIZE, "Server does not support 'range' (byte ranges)."); goto out; } DPRINTF("CURL: Size = %" PRIu64 "\n", s->len); qemu_mutex_lock(&s->mutex); curl_clean_state(state); qemu_mutex_unlock(&s->mutex); curl_easy_cleanup(state->curl); state->curl = NULL; curl_attach_aio_context(bs, bdrv_get_aio_context(bs)); qemu_opts_del(opts); return 0; out: error_setg(errp, "CURL: Error opening file: %s", state->errmsg); curl_easy_cleanup(state->curl); state->curl = NULL; out_noclean: qemu_mutex_destroy(&s->mutex); g_free(s->cookie); g_free(s->url); qemu_opts_del(opts); return -EINVAL; }
2,154
FFmpeg
ef363ebd596da18f889a7d4845023a23dfac84c9
0
static void mp3_write_xing(AVFormatContext *s) { MP3Context *mp3 = s->priv_data; AVCodecContext *codec = s->streams[mp3->audio_stream_idx]->codec; int32_t header; MPADecodeHeader mpah; int srate_idx, i, channels; int bitrate_idx; int best_bitrate_idx; int best_bitrate_error = INT_MAX; int xing_offset; int ver = 0; int lsf, bytes_needed; if (!s->pb->seekable || !mp3->write_xing) return; for (i = 0; i < FF_ARRAY_ELEMS(avpriv_mpa_freq_tab); i++) { const uint16_t base_freq = avpriv_mpa_freq_tab[i]; if (codec->sample_rate == base_freq) ver = 0x3; // MPEG 1 else if (codec->sample_rate == base_freq / 2) ver = 0x2; // MPEG 2 else if (codec->sample_rate == base_freq / 4) ver = 0x0; // MPEG 2.5 else continue; srate_idx = i; break; } if (i == FF_ARRAY_ELEMS(avpriv_mpa_freq_tab)) { av_log(s, AV_LOG_WARNING, "Unsupported sample rate, not writing Xing " "header.\n"); return; } switch (codec->channels) { case 1: channels = MPA_MONO; break; case 2: channels = MPA_STEREO; break; default: av_log(s, AV_LOG_WARNING, "Unsupported number of channels, " "not writing Xing header.\n"); return; } /* dummy MPEG audio header */ header = 0xff << 24; // sync header |= (0x7 << 5 | ver << 3 | 0x1 << 1 | 0x1) << 16; // sync/audio-version/layer 3/no crc*/ header |= (srate_idx << 2) << 8; header |= channels << 6; lsf = !((header & (1 << 20) && header & (1 << 19))); xing_offset = xing_offtbl[ver != 3][channels == 1]; bytes_needed = 4 // header + xing_offset + 4 // xing tag + 4 // frames/size/toc flags + 4 // frames + 4 // size + XING_TOC_SIZE; // toc for (bitrate_idx = 1; bitrate_idx < 15; bitrate_idx++) { int bit_rate = 1000 * avpriv_mpa_bitrate_tab[lsf][3 - 1][bitrate_idx]; int error = FFABS(bit_rate - codec->bit_rate); if (error < best_bitrate_error){ best_bitrate_error = error; best_bitrate_idx = bitrate_idx; } } for (bitrate_idx = best_bitrate_idx; bitrate_idx < 15; bitrate_idx++) { int32_t mask = bitrate_idx << (4 + 8); header |= mask; avpriv_mpegaudio_decode_header(&mpah, header); if (bytes_needed <= mpah.frame_size) break; header &= ~mask; } avio_wb32(s->pb, header); avpriv_mpegaudio_decode_header(&mpah, header); av_assert0(mpah.frame_size >= XING_MAX_SIZE); ffio_fill(s->pb, 0, xing_offset); mp3->xing_offset = avio_tell(s->pb); ffio_wfourcc(s->pb, "Xing"); avio_wb32(s->pb, 0x01 | 0x02 | 0x04); // frames / size / TOC mp3->size = mpah.frame_size; mp3->want = 1; avio_wb32(s->pb, 0); // frames avio_wb32(s->pb, 0); // size // TOC for (i = 0; i < XING_TOC_SIZE; i++) avio_w8(s->pb, 255 * i / XING_TOC_SIZE); ffio_fill(s->pb, 0, mpah.frame_size - bytes_needed); }
2,155
FFmpeg
d2bc04738b842169b6e32160ffa81db9c868eec4
0
static int flashsv_encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void *data) { FlashSVContext * const s = avctx->priv_data; AVFrame *pict = data; AVFrame * const p = &s->frame; int res; int I_frame = 0; int opt_w, opt_h; *p = *pict; /* First frame needs to be a keyframe */ if (avctx->frame_number == 0) { s->previous_frame = av_mallocz(p->linesize[0]*s->image_height); if (!s->previous_frame) { av_log(avctx, AV_LOG_ERROR, "Memory allocation failed.\n"); return -1; } I_frame = 1; } /* Check the placement of keyframes */ if (avctx->gop_size > 0) { if (avctx->frame_number >= s->last_key_frame + avctx->gop_size) { I_frame = 1; } } #if 0 int w, h; int optim_sizes[16][16]; int smallest_size; //Try all possible combinations and store the encoded frame sizes for (w=1 ; w<17 ; w++) { for (h=1 ; h<17 ; h++) { optim_sizes[w-1][h-1] = encode_bitstream(s, p, s->encbuffer, s->image_width*s->image_height*4, w*16, h*16, s->previous_frame); //av_log(avctx, AV_LOG_ERROR, "[%d][%d]size = %d\n",w,h,optim_sizes[w-1][h-1]); } } //Search for the smallest framesize and encode the frame with those parameters smallest_size=optim_sizes[0][0]; opt_w = 0; opt_h = 0; for (w=0 ; w<16 ; w++) { for (h=0 ; h<16 ; h++) { if (optim_sizes[w][h] < smallest_size) { smallest_size = optim_sizes[w][h]; opt_w = w; opt_h = h; } } } res = encode_bitstream(s, p, buf, buf_size, (opt_w+1)*16, (opt_h+1)*16, s->previous_frame); av_log(avctx, AV_LOG_ERROR, "[%d][%d]optimal size = %d, res = %d|\n", opt_w, opt_h, smallest_size, res); if (buf_size < res) av_log(avctx, AV_LOG_ERROR, "buf_size %d < res %d\n", buf_size, res); #else opt_w=1; opt_h=1; if (buf_size < s->image_width*s->image_height*3) { //Conservative upper bound check for compressed data av_log(avctx, AV_LOG_ERROR, "buf_size %d < %d\n", buf_size, s->image_width*s->image_height*3); return -1; } res = encode_bitstream(s, p, buf, buf_size, opt_w*16, opt_h*16, s->previous_frame, &I_frame); #endif //save the current frame memcpy(s->previous_frame, p->data[0], s->image_height*p->linesize[0]); //mark the frame type so the muxer can mux it correctly if (I_frame) { p->pict_type = FF_I_TYPE; p->key_frame = 1; s->last_key_frame = avctx->frame_number; av_log(avctx, AV_LOG_DEBUG, "Inserting key frame at frame %d\n",avctx->frame_number); } else { p->pict_type = FF_P_TYPE; p->key_frame = 0; } avctx->coded_frame = p; return res; }
2,156
FFmpeg
745f4bcc2c1deaa562cce01fa52e38b0220aed31
0
static void draw_bar(TestSourceContext *test, const uint8_t color[4], int x, int y, int w, int h, AVFrame *frame) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); uint8_t *p, *p0; int plane; x = FFMIN(x, test->w - 1); y = FFMIN(y, test->h - 1); w = FFMIN(w, test->w - x); h = FFMIN(h, test->h - y); av_assert0(x + w <= test->w); av_assert0(y + h <= test->h); for (plane = 0; frame->data[plane]; plane++) { const int c = color[plane]; const int linesize = frame->linesize[plane]; int i, px, py, pw, ph; if (plane == 1 || plane == 2) { px = x >> desc->log2_chroma_w; pw = AV_CEIL_RSHIFT(w, desc->log2_chroma_w); py = y >> desc->log2_chroma_h; ph = AV_CEIL_RSHIFT(h, desc->log2_chroma_h); } else { px = x; pw = w; py = y; ph = h; } p0 = p = frame->data[plane] + py * linesize + px; memset(p, c, pw); p += linesize; for (i = 1; i < ph; i++, p += linesize) memcpy(p, p0, pw); } }
2,159
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static void set_kernel_args_old(const struct arm_boot_info *info) { target_phys_addr_t p; const char *s; int initrd_size = info->initrd_size; target_phys_addr_t base = info->loader_start; /* see linux/include/asm-arm/setup.h */ p = base + KERNEL_ARGS_ADDR; /* page_size */ WRITE_WORD(p, 4096); /* nr_pages */ WRITE_WORD(p, info->ram_size / 4096); /* ramdisk_size */ WRITE_WORD(p, 0); #define FLAG_READONLY 1 #define FLAG_RDLOAD 4 #define FLAG_RDPROMPT 8 /* flags */ WRITE_WORD(p, FLAG_READONLY | FLAG_RDLOAD | FLAG_RDPROMPT); /* rootdev */ WRITE_WORD(p, (31 << 8) | 0); /* /dev/mtdblock0 */ /* video_num_cols */ WRITE_WORD(p, 0); /* video_num_rows */ WRITE_WORD(p, 0); /* video_x */ WRITE_WORD(p, 0); /* video_y */ WRITE_WORD(p, 0); /* memc_control_reg */ WRITE_WORD(p, 0); /* unsigned char sounddefault */ /* unsigned char adfsdrives */ /* unsigned char bytes_per_char_h */ /* unsigned char bytes_per_char_v */ WRITE_WORD(p, 0); /* pages_in_bank[4] */ WRITE_WORD(p, 0); WRITE_WORD(p, 0); WRITE_WORD(p, 0); WRITE_WORD(p, 0); /* pages_in_vram */ WRITE_WORD(p, 0); /* initrd_start */ if (initrd_size) WRITE_WORD(p, info->loader_start + INITRD_LOAD_ADDR); else WRITE_WORD(p, 0); /* initrd_size */ WRITE_WORD(p, initrd_size); /* rd_start */ WRITE_WORD(p, 0); /* system_rev */ WRITE_WORD(p, 0); /* system_serial_low */ WRITE_WORD(p, 0); /* system_serial_high */ WRITE_WORD(p, 0); /* mem_fclk_21285 */ WRITE_WORD(p, 0); /* zero unused fields */ while (p < base + KERNEL_ARGS_ADDR + 256 + 1024) { WRITE_WORD(p, 0); } s = info->kernel_cmdline; if (s) { cpu_physical_memory_write(p, (void *)s, strlen(s) + 1); } else { WRITE_WORD(p, 0); } }
2,161
qemu
621ff94d5074d88253a5818c6b9c4db718fbfc65
0
static void virtio_ccw_net_realize(VirtioCcwDevice *ccw_dev, Error **errp) { DeviceState *qdev = DEVICE(ccw_dev); VirtIONetCcw *dev = VIRTIO_NET_CCW(ccw_dev); DeviceState *vdev = DEVICE(&dev->vdev); Error *err = NULL; virtio_net_set_netclient_name(&dev->vdev, qdev->id, object_get_typename(OBJECT(qdev))); qdev_set_parent_bus(vdev, BUS(&ccw_dev->bus)); object_property_set_bool(OBJECT(vdev), true, "realized", &err); if (err) { error_propagate(errp, err); } }
2,162
qemu
4a1418e07bdcfaa3177739e04707ecaec75d89e1
0
uint8_t cpu_inb(CPUState *env, pio_addr_t addr) { uint8_t val; val = ioport_read(0, addr); LOG_IOPORT("inb : %04"FMT_pioaddr" %02"PRIx8"\n", addr, val); #ifdef CONFIG_KQEMU if (env) env->last_io_time = cpu_get_time_fast(); #endif return val; }
2,163
qemu
ec0e68ef1da316b3ead1943d8f607cc68b13e0d1
0
static void zynq_init(QEMUMachineInitArgs *args) { ram_addr_t ram_size = args->ram_size; const char *cpu_model = args->cpu_model; const char *kernel_filename = args->kernel_filename; const char *kernel_cmdline = args->kernel_cmdline; const char *initrd_filename = args->initrd_filename; ObjectClass *cpu_oc; ARMCPU *cpu; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *ext_ram = g_new(MemoryRegion, 1); MemoryRegion *ocm_ram = g_new(MemoryRegion, 1); DeviceState *dev; SysBusDevice *busdev; qemu_irq pic[64]; NICInfo *nd; Error *err = NULL; int n; if (!cpu_model) { cpu_model = "cortex-a9"; } cpu_oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model); cpu = ARM_CPU(object_new(object_class_get_name(cpu_oc))); object_property_set_int(OBJECT(cpu), MPCORE_PERIPHBASE, "reset-cbar", &err); if (err) { error_report("%s", error_get_pretty(err)); exit(1); } object_property_set_bool(OBJECT(cpu), true, "realized", &err); if (err) { error_report("%s", error_get_pretty(err)); exit(1); } /* max 2GB ram */ if (ram_size > 0x80000000) { ram_size = 0x80000000; } /* DDR remapped to address zero. */ memory_region_init_ram(ext_ram, NULL, "zynq.ext_ram", ram_size); vmstate_register_ram_global(ext_ram); memory_region_add_subregion(address_space_mem, 0, ext_ram); /* 256K of on-chip memory */ memory_region_init_ram(ocm_ram, NULL, "zynq.ocm_ram", 256 << 10); vmstate_register_ram_global(ocm_ram); memory_region_add_subregion(address_space_mem, 0xFFFC0000, ocm_ram); DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0); /* AMD */ pflash_cfi02_register(0xe2000000, NULL, "zynq.pflash", FLASH_SIZE, dinfo ? dinfo->bdrv : NULL, FLASH_SECTOR_SIZE, FLASH_SIZE/FLASH_SECTOR_SIZE, 1, 1, 0x0066, 0x0022, 0x0000, 0x0000, 0x0555, 0x2aa, 0); dev = qdev_create(NULL, "xilinx,zynq_slcr"); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xF8000000); dev = qdev_create(NULL, "a9mpcore_priv"); qdev_prop_set_uint32(dev, "num-cpu", 1); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, MPCORE_PERIPHBASE); sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ)); for (n = 0; n < 64; n++) { pic[n] = qdev_get_gpio_in(dev, n); } zynq_init_spi_flashes(0xE0006000, pic[58-IRQ_OFFSET], false); zynq_init_spi_flashes(0xE0007000, pic[81-IRQ_OFFSET], false); zynq_init_spi_flashes(0xE000D000, pic[51-IRQ_OFFSET], true); sysbus_create_simple("xlnx,ps7-usb", 0xE0002000, pic[53-IRQ_OFFSET]); sysbus_create_simple("xlnx,ps7-usb", 0xE0003000, pic[76-IRQ_OFFSET]); sysbus_create_simple("cadence_uart", 0xE0000000, pic[59-IRQ_OFFSET]); sysbus_create_simple("cadence_uart", 0xE0001000, pic[82-IRQ_OFFSET]); sysbus_create_varargs("cadence_ttc", 0xF8001000, pic[42-IRQ_OFFSET], pic[43-IRQ_OFFSET], pic[44-IRQ_OFFSET], NULL); sysbus_create_varargs("cadence_ttc", 0xF8002000, pic[69-IRQ_OFFSET], pic[70-IRQ_OFFSET], pic[71-IRQ_OFFSET], NULL); for (n = 0; n < nb_nics; n++) { nd = &nd_table[n]; if (n == 0) { gem_init(nd, 0xE000B000, pic[54-IRQ_OFFSET]); } else if (n == 1) { gem_init(nd, 0xE000C000, pic[77-IRQ_OFFSET]); } } dev = qdev_create(NULL, "generic-sdhci"); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xE0100000); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[56-IRQ_OFFSET]); dev = qdev_create(NULL, "generic-sdhci"); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xE0101000); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[79-IRQ_OFFSET]); dev = qdev_create(NULL, "pl330"); qdev_prop_set_uint8(dev, "num_chnls", 8); qdev_prop_set_uint8(dev, "num_periph_req", 4); qdev_prop_set_uint8(dev, "num_events", 16); qdev_prop_set_uint8(dev, "data_width", 64); qdev_prop_set_uint8(dev, "wr_cap", 8); qdev_prop_set_uint8(dev, "wr_q_dep", 16); qdev_prop_set_uint8(dev, "rd_cap", 8); qdev_prop_set_uint8(dev, "rd_q_dep", 16); qdev_prop_set_uint16(dev, "data_buffer_dep", 256); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, 0xF8003000); sysbus_connect_irq(busdev, 0, pic[45-IRQ_OFFSET]); /* abort irq line */ for (n = 0; n < 8; ++n) { /* event irqs */ sysbus_connect_irq(busdev, n + 1, pic[dma_irqs[n] - IRQ_OFFSET]); } zynq_binfo.ram_size = ram_size; zynq_binfo.kernel_filename = kernel_filename; zynq_binfo.kernel_cmdline = kernel_cmdline; zynq_binfo.initrd_filename = initrd_filename; zynq_binfo.nb_cpus = 1; zynq_binfo.board_id = 0xd32; zynq_binfo.loader_start = 0; arm_load_kernel(ARM_CPU(first_cpu), &zynq_binfo); }
2,164
FFmpeg
bcd7bf7eeb09a395cc01698842d1b8be9af483fc
0
static void avc_wgt_16width_msa(uint8_t *data, int32_t stride, int32_t height, int32_t log2_denom, int32_t src_weight, int32_t offset_in) { uint8_t cnt; v16u8 zero = { 0 }; v16u8 src0, src1, src2, src3; v16u8 dst0, dst1, dst2, dst3; v8u16 src0_l, src1_l, src2_l, src3_l; v8u16 src0_r, src1_r, src2_r, src3_r; v8u16 temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; v8u16 wgt, denom, offset; offset_in <<= (log2_denom); if (log2_denom) { offset_in += (1 << (log2_denom - 1)); } wgt = (v8u16) __msa_fill_h(src_weight); offset = (v8u16) __msa_fill_h(offset_in); denom = (v8u16) __msa_fill_h(log2_denom); for (cnt = height / 4; cnt--;) { LOAD_4VECS_UB(data, stride, src0, src1, src2, src3); ILV_B_LRLR_UH(src0, zero, src1, zero, src0_l, src0_r, src1_l, src1_r); ILV_B_LRLR_UH(src2, zero, src3, zero, src2_l, src2_r, src3_l, src3_r); temp0 = wgt * src0_r; temp1 = wgt * src0_l; temp2 = wgt * src1_r; temp3 = wgt * src1_l; temp4 = wgt * src2_r; temp5 = wgt * src2_l; temp6 = wgt * src3_r; temp7 = wgt * src3_l; ADDS_S_H_4VECS_UH(temp0, offset, temp1, offset, temp2, offset, temp3, offset, temp0, temp1, temp2, temp3); ADDS_S_H_4VECS_UH(temp4, offset, temp5, offset, temp6, offset, temp7, offset, temp4, temp5, temp6, temp7); MAXI_S_H_4VECS_UH(temp0, temp1, temp2, temp3, 0); MAXI_S_H_4VECS_UH(temp4, temp5, temp6, temp7, 0); SRL_H_4VECS_UH(temp0, temp1, temp2, temp3, temp0, temp1, temp2, temp3, denom); SRL_H_4VECS_UH(temp4, temp5, temp6, temp7, temp4, temp5, temp6, temp7, denom); SAT_U_H_4VECS_UH(temp0, temp1, temp2, temp3, 7); SAT_U_H_4VECS_UH(temp4, temp5, temp6, temp7, 7); PCKEV_B_4VECS_UB(temp1, temp3, temp5, temp7, temp0, temp2, temp4, temp6, dst0, dst1, dst2, dst3); STORE_4VECS_UB(data, stride, dst0, dst1, dst2, dst3); data += 4 * stride; } }
2,165
qemu
c76c8416be5631dfdbd13799d3c67ad670637155
0
static void mem_info_pae32(Monitor *mon, CPUState *env) { unsigned int l1, l2, l3; int prot, last_prot; uint64_t pdpe, pde, pte; uint64_t pdp_addr, pd_addr, pt_addr; target_phys_addr_t start, end; pdp_addr = env->cr[3] & ~0x1f; last_prot = 0; start = -1; for (l1 = 0; l1 < 4; l1++) { cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8); pdpe = le64_to_cpu(pdpe); end = l1 << 30; if (pdpe & PG_PRESENT_MASK) { pd_addr = pdpe & 0x3fffffffff000ULL; for (l2 = 0; l2 < 512; l2++) { cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8); pde = le64_to_cpu(pde); end = (l1 << 30) + (l2 << 21); if (pde & PG_PRESENT_MASK) { if (pde & PG_PSE_MASK) { prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK); mem_print(mon, &start, &last_prot, end, prot); } else { pt_addr = pde & 0x3fffffffff000ULL; for (l3 = 0; l3 < 512; l3++) { cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8); pte = le64_to_cpu(pte); end = (l1 << 30) + (l2 << 21) + (l3 << 12); if (pte & PG_PRESENT_MASK) { prot = pte & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK); } else { prot = 0; } mem_print(mon, &start, &last_prot, end, prot); } } } else { prot = 0; mem_print(mon, &start, &last_prot, end, prot); } } } else { prot = 0; mem_print(mon, &start, &last_prot, end, prot); } } /* Flush last range */ mem_print(mon, &start, &last_prot, (target_phys_addr_t)1 << 32, 0); }
2,166
qemu
7385aed20db5d83979f683b9d0048674411e963c
0
int64_t helper_fdtox(CPUSPARCState *env, float64 src) { int64_t ret; clear_float_exceptions(env); ret = float64_to_int64_round_to_zero(src, &env->fp_status); check_ieee_exceptions(env); return ret; }
2,167
qemu
42a268c241183877192c376d03bd9b6d527407c7
0
static inline void gen_op_jz_ecx(TCGMemOp size, int label1) { tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]); gen_extu(size, cpu_tmp0); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1); }
2,168
qemu
e3807054e20fb3b94d18cb751c437ee2f43b6fac
0
void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) { RAMBlock *block; rcu_read_lock(); QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { func(block->host, block->offset, block->used_length, opaque); } rcu_read_unlock(); }
2,169
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static uint64_t exynos4210_pwm_read(void *opaque, target_phys_addr_t offset, unsigned size) { Exynos4210PWMState *s = (Exynos4210PWMState *)opaque; uint32_t value = 0; int index; switch (offset) { case TCFG0: case TCFG1: index = (offset - TCFG0) >> 2; value = s->reg_tcfg[index]; break; case TCON: value = s->reg_tcon; break; case TCNTB0: case TCNTB1: case TCNTB2: case TCNTB3: case TCNTB4: index = (offset - TCNTB0) / 0xC; value = s->timer[index].reg_tcntb; break; case TCMPB0: case TCMPB1: case TCMPB2: case TCMPB3: index = (offset - TCMPB0) / 0xC; value = s->timer[index].reg_tcmpb; break; case TCNTO0: case TCNTO1: case TCNTO2: case TCNTO3: case TCNTO4: index = (offset == TCNTO4) ? 4 : (offset - TCNTO0) / 0xC; value = ptimer_get_count(s->timer[index].ptimer); break; case TINT_CSTAT: value = s->reg_tint_cstat; break; default: fprintf(stderr, "[exynos4210.pwm: bad read offset " TARGET_FMT_plx "]\n", offset); break; } return value; }
2,170
qemu
b09ea7d55cfab5a75912bb56ed1fcd757604a759
0
static void apic_init_ipi(APICState *s) { int i; s->tpr = 0; s->spurious_vec = 0xff; s->log_dest = 0; s->dest_mode = 0xf; memset(s->isr, 0, sizeof(s->isr)); memset(s->tmr, 0, sizeof(s->tmr)); memset(s->irr, 0, sizeof(s->irr)); for(i = 0; i < APIC_LVT_NB; i++) s->lvt[i] = 1 << 16; /* mask LVT */ s->esr = 0; memset(s->icr, 0, sizeof(s->icr)); s->divide_conf = 0; s->count_shift = 0; s->initial_count = 0; s->initial_count_load_time = 0; s->next_time = 0; cpu_reset(s->cpu_env); s->cpu_env->halted = !(s->apicbase & MSR_IA32_APICBASE_BSP); }
2,171
qemu
8297be80f7cf71e09617669a8bd8b2836dcfd4c3
0
static void init_ppc_proc(PowerPCCPU *cpu) { PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); CPUPPCState *env = &cpu->env; #if !defined(CONFIG_USER_ONLY) int i; env->irq_inputs = NULL; /* Set all exception vectors to an invalid address */ for (i = 0; i < POWERPC_EXCP_NB; i++) env->excp_vectors[i] = (target_ulong)(-1ULL); env->ivor_mask = 0x00000000; env->ivpr_mask = 0x00000000; /* Default MMU definitions */ env->nb_BATs = 0; env->nb_tlb = 0; env->nb_ways = 0; env->tlb_type = TLB_NONE; #endif /* Register SPR common to all PowerPC implementations */ gen_spr_generic(env); spr_register(env, SPR_PVR, "PVR", /* Linux permits userspace to read PVR */ #if defined(CONFIG_LINUX_USER) &spr_read_generic, #else SPR_NOACCESS, #endif SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, pcc->pvr); /* Register SVR if it's defined to anything else than POWERPC_SVR_NONE */ if (pcc->svr != POWERPC_SVR_NONE) { if (pcc->svr & POWERPC_SVR_E500) { spr_register(env, SPR_E500_SVR, "SVR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, pcc->svr & ~POWERPC_SVR_E500); } else { spr_register(env, SPR_SVR, "SVR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, pcc->svr); } } /* PowerPC implementation specific initialisations (SPRs, timers, ...) */ (*pcc->init_proc)(env); /* MSR bits & flags consistency checks */ if (env->msr_mask & (1 << 25)) { switch (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) { case POWERPC_FLAG_SPE: case POWERPC_FLAG_VRE: break; default: fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should define POWERPC_FLAG_SPE or POWERPC_FLAG_VRE\n"); exit(1); } } else if (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) { fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should not define POWERPC_FLAG_SPE nor POWERPC_FLAG_VRE\n"); exit(1); } if (env->msr_mask & (1 << 17)) { switch (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE)) { case POWERPC_FLAG_TGPR: case POWERPC_FLAG_CE: break; default: fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should define POWERPC_FLAG_TGPR or POWERPC_FLAG_CE\n"); exit(1); } } else if (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE)) { fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should not define POWERPC_FLAG_TGPR nor POWERPC_FLAG_CE\n"); exit(1); } if (env->msr_mask & (1 << 10)) { switch (env->flags & (POWERPC_FLAG_SE | POWERPC_FLAG_DWE | POWERPC_FLAG_UBLE)) { case POWERPC_FLAG_SE: case POWERPC_FLAG_DWE: case POWERPC_FLAG_UBLE: break; default: fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should define POWERPC_FLAG_SE or POWERPC_FLAG_DWE or " "POWERPC_FLAG_UBLE\n"); exit(1); } } else if (env->flags & (POWERPC_FLAG_SE | POWERPC_FLAG_DWE | POWERPC_FLAG_UBLE)) { fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should not define POWERPC_FLAG_SE nor POWERPC_FLAG_DWE nor " "POWERPC_FLAG_UBLE\n"); exit(1); } if (env->msr_mask & (1 << 9)) { switch (env->flags & (POWERPC_FLAG_BE | POWERPC_FLAG_DE)) { case POWERPC_FLAG_BE: case POWERPC_FLAG_DE: break; default: fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should define POWERPC_FLAG_BE or POWERPC_FLAG_DE\n"); exit(1); } } else if (env->flags & (POWERPC_FLAG_BE | POWERPC_FLAG_DE)) { fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should not define POWERPC_FLAG_BE nor POWERPC_FLAG_DE\n"); exit(1); } if (env->msr_mask & (1 << 2)) { switch (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM)) { case POWERPC_FLAG_PX: case POWERPC_FLAG_PMM: break; default: fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should define POWERPC_FLAG_PX or POWERPC_FLAG_PMM\n"); exit(1); } } else if (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM)) { fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should not define POWERPC_FLAG_PX nor POWERPC_FLAG_PMM\n"); exit(1); } if ((env->flags & (POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_BUS_CLK)) == 0) { fprintf(stderr, "PowerPC flags inconsistency\n" "Should define the time-base and decrementer clock source\n"); exit(1); } /* Allocate TLBs buffer when needed */ #if !defined(CONFIG_USER_ONLY) if (env->nb_tlb != 0) { int nb_tlb = env->nb_tlb; if (env->id_tlbs != 0) nb_tlb *= 2; switch (env->tlb_type) { case TLB_6XX: env->tlb.tlb6 = g_malloc0(nb_tlb * sizeof(ppc6xx_tlb_t)); break; case TLB_EMB: env->tlb.tlbe = g_malloc0(nb_tlb * sizeof(ppcemb_tlb_t)); break; case TLB_MAS: env->tlb.tlbm = g_malloc0(nb_tlb * sizeof(ppcmas_tlb_t)); break; } /* Pre-compute some useful values */ env->tlb_per_way = env->nb_tlb / env->nb_ways; } if (env->irq_inputs == NULL) { fprintf(stderr, "WARNING: no internal IRQ controller registered.\n" " Attempt QEMU to crash very soon !\n"); } #endif if (env->check_pow == NULL) { fprintf(stderr, "WARNING: no power management check handler " "registered.\n" " Attempt QEMU to crash very soon !\n"); } }
2,172
qemu
ddcd55316fb2851e144e719171621ad2816487dc
0
static int set_boot_dev(ISADevice *s, const char *boot_device) { #define PC_MAX_BOOT_DEVICES 3 int nbds, bds[3] = { 0, }; int i; nbds = strlen(boot_device); if (nbds > PC_MAX_BOOT_DEVICES) { error_report("Too many boot devices for PC"); return(1); } for (i = 0; i < nbds; i++) { bds[i] = boot_device2nibble(boot_device[i]); if (bds[i] == 0) { error_report("Invalid boot device for PC: '%c'", boot_device[i]); return(1); } } rtc_set_memory(s, 0x3d, (bds[1] << 4) | bds[0]); rtc_set_memory(s, 0x38, (bds[2] << 4) | (fd_bootchk ? 0x0 : 0x1)); return(0); }
2,173
qemu
94c2b6aff43cdfcfdfb552773a6b6b973a72ef0b
0
static int64_t load_kernel (void) { int64_t kernel_entry, kernel_high; long initrd_size; ram_addr_t initrd_offset; int big_endian; uint32_t *prom_buf; long prom_size; int prom_index = 0; #ifdef TARGET_WORDS_BIGENDIAN big_endian = 1; #else big_endian = 0; #endif if (load_elf(loaderparams.kernel_filename, cpu_mips_kseg0_to_phys, NULL, (uint64_t *)&kernel_entry, NULL, (uint64_t *)&kernel_high, big_endian, ELF_MACHINE, 1) < 0) { fprintf(stderr, "qemu: could not load kernel '%s'\n", loaderparams.kernel_filename); exit(1); } /* load initrd */ initrd_size = 0; initrd_offset = 0; if (loaderparams.initrd_filename) { initrd_size = get_image_size (loaderparams.initrd_filename); if (initrd_size > 0) { initrd_offset = (kernel_high + ~INITRD_PAGE_MASK) & INITRD_PAGE_MASK; if (initrd_offset + initrd_size > ram_size) { fprintf(stderr, "qemu: memory too small for initial ram disk '%s'\n", loaderparams.initrd_filename); exit(1); } initrd_size = load_image_targphys(loaderparams.initrd_filename, initrd_offset, ram_size - initrd_offset); } if (initrd_size == (target_ulong) -1) { fprintf(stderr, "qemu: could not load initial ram disk '%s'\n", loaderparams.initrd_filename); exit(1); } } /* Setup prom parameters. */ prom_size = ENVP_NB_ENTRIES * (sizeof(int32_t) + ENVP_ENTRY_SIZE); prom_buf = g_malloc(prom_size); prom_set(prom_buf, prom_index++, "%s", loaderparams.kernel_filename); if (initrd_size > 0) { prom_set(prom_buf, prom_index++, "rd_start=0x%" PRIx64 " rd_size=%li %s", cpu_mips_phys_to_kseg0(NULL, initrd_offset), initrd_size, loaderparams.kernel_cmdline); } else { prom_set(prom_buf, prom_index++, "%s", loaderparams.kernel_cmdline); } prom_set(prom_buf, prom_index++, "memsize"); prom_set(prom_buf, prom_index++, "%i", loaderparams.ram_size); prom_set(prom_buf, prom_index++, "modetty0"); prom_set(prom_buf, prom_index++, "38400n8r"); prom_set(prom_buf, prom_index++, NULL); rom_add_blob_fixed("prom", prom_buf, prom_size, cpu_mips_kseg0_to_phys(NULL, ENVP_ADDR)); return kernel_entry; }
2,174
qemu
93970672210ca1ee45fdebbc11e1fd97916c7c8e
0
static int nbd_co_receive_reply(NBDClientSession *s, NBDRequest *request, QEMUIOVector *qiov) { int ret; int i = HANDLE_TO_INDEX(s, request->handle); /* Wait until we're woken up by nbd_read_reply_entry. */ s->requests[i].receiving = true; qemu_coroutine_yield(); s->requests[i].receiving = false; if (s->reply.handle != request->handle || !s->ioc || s->quit) { ret = -EIO; } else { ret = -s->reply.error; if (qiov && s->reply.error == 0) { assert(request->len == iov_size(qiov->iov, qiov->niov)); if (qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov, NULL) < 0) { ret = -EIO; s->quit = true; } } /* Tell the read handler to read another header. */ s->reply.handle = 0; } s->requests[i].coroutine = NULL; /* Kick the read_reply_co to get the next reply. */ if (s->read_reply_co) { aio_co_wake(s->read_reply_co); } qemu_co_mutex_lock(&s->send_mutex); s->in_flight--; qemu_co_queue_next(&s->free_sema); qemu_co_mutex_unlock(&s->send_mutex); return ret; }
2,175
FFmpeg
3176217c60ca7828712985092d9102d331ea4f3d
0
void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl) { int i; const int8_t (*tab)[2]; const int slice_qp = av_clip(sl->qscale - 6*(h->sps.bit_depth_luma-8), 0, 51); if (sl->slice_type_nos == AV_PICTURE_TYPE_I) tab = cabac_context_init_I; else tab = cabac_context_init_PB[sl->cabac_init_idc]; /* calculate pre-state */ for( i= 0; i < 1024; i++ ) { int pre = 2*(((tab[i][0] * slice_qp) >>4 ) + tab[i][1]) - 127; pre^= pre>>31; if(pre > 124) pre= 124 + (pre&1); sl->cabac_state[i] = pre; } }
2,176
FFmpeg
4e240985d8b856e62e4e0377283138cf51cc398e
0
static int mov_read_udta(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom) { uint64_t end = url_ftell(pb) + atom.size; while (url_ftell(pb) + 8 < end) { uint32_t tag_size = get_be32(pb); uint32_t tag = get_le32(pb); uint64_t next = url_ftell(pb) + tag_size - 8; if (next > end) // stop if tag_size is wrong break; switch (tag) { case MKTAG(0xa9,'n','a','m'): mov_parse_udta_string(pb, c->fc->title, sizeof(c->fc->title)); break; case MKTAG(0xa9,'w','r','t'): mov_parse_udta_string(pb, c->fc->author, sizeof(c->fc->author)); break; case MKTAG(0xa9,'c','p','y'): mov_parse_udta_string(pb, c->fc->copyright, sizeof(c->fc->copyright)); break; case MKTAG(0xa9,'i','n','f'): mov_parse_udta_string(pb, c->fc->comment, sizeof(c->fc->comment)); break; default: break; } url_fseek(pb, next, SEEK_SET); } return 0; }
2,177
qemu
3ee886c5ba77a65d6b2c2a372a091d6796ed502b
0
static int usb_host_auto_scan(void *opaque, int bus_num, int addr, char *port, int class_id, int vendor_id, int product_id, const char *product_name, int speed) { struct USBAutoFilter *f; struct USBHostDevice *s; /* Ignore hubs */ if (class_id == 9) return 0; QTAILQ_FOREACH(s, &hostdevs, next) { f = &s->match; if (f->bus_num > 0 && f->bus_num != bus_num) { continue; } if (f->addr > 0 && f->addr != addr) { continue; } if (f->port != NULL && (port == NULL || strcmp(f->port, port) != 0)) { continue; } if (f->vendor_id > 0 && f->vendor_id != vendor_id) { continue; } if (f->product_id > 0 && f->product_id != product_id) { continue; } /* We got a match */ /* Already attached ? */ if (s->fd != -1) { return 0; } DPRINTF("husb: auto open: bus_num %d addr %d\n", bus_num, addr); usb_host_open(s, bus_num, addr, port, product_name, speed); break; } return 0; }
2,178
qemu
bd79255d2571a3c68820117caf94ea9afe1d527e
0
static int disas_coproc_insn(DisasContext *s, uint32_t insn) { int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2; const ARMCPRegInfo *ri; cpnum = (insn >> 8) & 0xf; /* First check for coprocessor space used for XScale/iwMMXt insns */ if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) { if (extract32(s->c15_cpar, cpnum, 1) == 0) { return 1; } if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) { return disas_iwmmxt_insn(s, insn); } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) { return disas_dsp_insn(s, insn); } return 1; } /* Otherwise treat as a generic register access */ is64 = (insn & (1 << 25)) == 0; if (!is64 && ((insn & (1 << 4)) == 0)) { /* cdp */ return 1; } crm = insn & 0xf; if (is64) { crn = 0; opc1 = (insn >> 4) & 0xf; opc2 = 0; rt2 = (insn >> 16) & 0xf; } else { crn = (insn >> 16) & 0xf; opc1 = (insn >> 21) & 7; opc2 = (insn >> 5) & 7; rt2 = 0; } isread = (insn >> 20) & 1; rt = (insn >> 12) & 0xf; ri = get_arm_cp_reginfo(s->cp_regs, ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2)); if (ri) { /* Check access permissions */ if (!cp_access_ok(s->current_el, ri, isread)) { return 1; } if (ri->accessfn || (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) { /* Emit code to perform further access permissions checks at * runtime; this may result in an exception. * Note that on XScale all cp0..c13 registers do an access check * call in order to handle c15_cpar. */ TCGv_ptr tmpptr; TCGv_i32 tcg_syn; uint32_t syndrome; /* Note that since we are an implementation which takes an * exception on a trapped conditional instruction only if the * instruction passes its condition code check, we can take * advantage of the clause in the ARM ARM that allows us to set * the COND field in the instruction to 0xE in all cases. * We could fish the actual condition out of the insn (ARM) * or the condexec bits (Thumb) but it isn't necessary. */ switch (cpnum) { case 14: if (is64) { syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2, isread, s->thumb); } else { syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm, rt, isread, s->thumb); } break; case 15: if (is64) { syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2, isread, s->thumb); } else { syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm, rt, isread, s->thumb); } break; default: /* ARMv8 defines that only coprocessors 14 and 15 exist, * so this can only happen if this is an ARMv7 or earlier CPU, * in which case the syndrome information won't actually be * guest visible. */ assert(!arm_dc_feature(s, ARM_FEATURE_V8)); syndrome = syn_uncategorized(); break; } gen_set_pc_im(s, s->pc); tmpptr = tcg_const_ptr(ri); tcg_syn = tcg_const_i32(syndrome); gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn); tcg_temp_free_ptr(tmpptr); tcg_temp_free_i32(tcg_syn); } /* Handle special cases first */ switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) { case ARM_CP_NOP: return 0; case ARM_CP_WFI: if (isread) { return 1; } gen_set_pc_im(s, s->pc); s->is_jmp = DISAS_WFI; return 0; default: break; } if (use_icount && (ri->type & ARM_CP_IO)) { gen_io_start(); } if (isread) { /* Read */ if (is64) { TCGv_i64 tmp64; TCGv_i32 tmp; if (ri->type & ARM_CP_CONST) { tmp64 = tcg_const_i64(ri->resetvalue); } else if (ri->readfn) { TCGv_ptr tmpptr; tmp64 = tcg_temp_new_i64(); tmpptr = tcg_const_ptr(ri); gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr); tcg_temp_free_ptr(tmpptr); } else { tmp64 = tcg_temp_new_i64(); tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset); } tmp = tcg_temp_new_i32(); tcg_gen_trunc_i64_i32(tmp, tmp64); store_reg(s, rt, tmp); tcg_gen_shri_i64(tmp64, tmp64, 32); tmp = tcg_temp_new_i32(); tcg_gen_trunc_i64_i32(tmp, tmp64); tcg_temp_free_i64(tmp64); store_reg(s, rt2, tmp); } else { TCGv_i32 tmp; if (ri->type & ARM_CP_CONST) { tmp = tcg_const_i32(ri->resetvalue); } else if (ri->readfn) { TCGv_ptr tmpptr; tmp = tcg_temp_new_i32(); tmpptr = tcg_const_ptr(ri); gen_helper_get_cp_reg(tmp, cpu_env, tmpptr); tcg_temp_free_ptr(tmpptr); } else { tmp = load_cpu_offset(ri->fieldoffset); } if (rt == 15) { /* Destination register of r15 for 32 bit loads sets * the condition codes from the high 4 bits of the value */ gen_set_nzcv(tmp); tcg_temp_free_i32(tmp); } else { store_reg(s, rt, tmp); } } } else { /* Write */ if (ri->type & ARM_CP_CONST) { /* If not forbidden by access permissions, treat as WI */ return 0; } if (is64) { TCGv_i32 tmplo, tmphi; TCGv_i64 tmp64 = tcg_temp_new_i64(); tmplo = load_reg(s, rt); tmphi = load_reg(s, rt2); tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi); tcg_temp_free_i32(tmplo); tcg_temp_free_i32(tmphi); if (ri->writefn) { TCGv_ptr tmpptr = tcg_const_ptr(ri); gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64); tcg_temp_free_ptr(tmpptr); } else { tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset); } tcg_temp_free_i64(tmp64); } else { if (ri->writefn) { TCGv_i32 tmp; TCGv_ptr tmpptr; tmp = load_reg(s, rt); tmpptr = tcg_const_ptr(ri); gen_helper_set_cp_reg(cpu_env, tmpptr, tmp); tcg_temp_free_ptr(tmpptr); tcg_temp_free_i32(tmp); } else { TCGv_i32 tmp = load_reg(s, rt); store_cpu_offset(tmp, ri->fieldoffset); } } } if (use_icount && (ri->type & ARM_CP_IO)) { /* I/O operations must end the TB here (whether read or write) */ gen_io_end(); gen_lookup_tb(s); } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { /* We default to ending the TB on a coprocessor register write, * but allow this to be suppressed by the register definition * (usually only necessary to work around guest bugs). */ gen_lookup_tb(s); } return 0; } /* Unknown register; this might be a guest error or a QEMU * unimplemented feature. */ if (is64) { qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 " "64 bit system register cp:%d opc1: %d crm:%d " "(%s)\n", isread ? "read" : "write", cpnum, opc1, crm, s->ns ? "non-secure" : "secure"); } else { qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 " "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d " "(%s)\n", isread ? "read" : "write", cpnum, opc1, crn, crm, opc2, s->ns ? "non-secure" : "secure"); } return 1; }
2,179
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
e1000_mmio_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { E1000State *s = opaque; unsigned int index = (addr & 0x1ffff) >> 2; if (index < NWRITEOPS && macreg_writeops[index]) { macreg_writeops[index](s, index, val); } else if (index < NREADOPS && macreg_readops[index]) { DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val); } else { DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n", index<<2, val); } }
2,180
qemu
32bafa8fdd098d52fbf1102d5a5e48d29398c0aa
0
socket_sockaddr_to_address_unix(struct sockaddr_storage *sa, socklen_t salen, Error **errp) { SocketAddress *addr; struct sockaddr_un *su = (struct sockaddr_un *)sa; addr = g_new0(SocketAddress, 1); addr->type = SOCKET_ADDRESS_KIND_UNIX; addr->u.q_unix = g_new0(UnixSocketAddress, 1); if (su->sun_path[0]) { addr->u.q_unix->path = g_strndup(su->sun_path, sizeof(su->sun_path)); } return addr; }
2,181
qemu
51b19ebe4320f3dcd93cea71235c1219318ddfd2
0
static int fetch_active_ports_list(QEMUFile *f, int version_id, VirtIOSerial *s, uint32_t nr_active_ports) { uint32_t i; s->post_load = g_malloc0(sizeof(*s->post_load)); s->post_load->nr_active_ports = nr_active_ports; s->post_load->connected = g_malloc0(sizeof(*s->post_load->connected) * nr_active_ports); s->post_load->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_serial_post_load_timer_cb, s); /* Items in struct VirtIOSerialPort */ for (i = 0; i < nr_active_ports; i++) { VirtIOSerialPort *port; uint32_t id; id = qemu_get_be32(f); port = find_port_by_id(s, id); if (!port) { return -EINVAL; } port->guest_connected = qemu_get_byte(f); s->post_load->connected[i].port = port; s->post_load->connected[i].host_connected = qemu_get_byte(f); if (version_id > 2) { uint32_t elem_popped; qemu_get_be32s(f, &elem_popped); if (elem_popped) { qemu_get_be32s(f, &port->iov_idx); qemu_get_be64s(f, &port->iov_offset); qemu_get_buffer(f, (unsigned char *)&port->elem, sizeof(port->elem)); virtqueue_map(&port->elem); /* * Port was throttled on source machine. Let's * unthrottle it here so data starts flowing again. */ virtio_serial_throttle_port(port, false); } } } timer_mod(s->post_load->timer, 1); return 0; }
2,184
qemu
b3ce84fea466f3bca2ff85d158744f00c0f429bd
0
void qdev_prop_set_globals_for_type(DeviceState *dev, const char *typename, Error **errp) { GlobalProperty *prop; QTAILQ_FOREACH(prop, &global_props, next) { Error *err = NULL; if (strcmp(typename, prop->driver) != 0) { continue; } prop->not_used = false; object_property_parse(OBJECT(dev), prop->value, prop->property, &err); if (err != NULL) { error_propagate(errp, err); return; } } }
2,185
qemu
3718d8ab65f68de2acccbe6a315907805f54e3cc
0
void bdrv_set_in_use(BlockDriverState *bs, int in_use) { assert(bs->in_use != in_use); bs->in_use = in_use; }
2,186
qemu
a980f7f2c2f4d7e9a1eba4f804cd66dbd458b6d4
0
static void qvirtio_scsi_stop(void) { qtest_end(); }
2,189
qemu
72cf2d4f0e181d0d3a3122e04129c58a95da713e
0
int qemu_aio_set_fd_handler(int fd, IOHandler *io_read, IOHandler *io_write, AioFlushHandler *io_flush, void *opaque) { AioHandler *node; node = find_aio_handler(fd); /* Are we deleting the fd handler? */ if (!io_read && !io_write) { if (node) { /* If the lock is held, just mark the node as deleted */ if (walking_handlers) node->deleted = 1; else { /* Otherwise, delete it for real. We can't just mark it as * deleted because deleted nodes are only cleaned up after * releasing the walking_handlers lock. */ LIST_REMOVE(node, node); qemu_free(node); } } } else { if (node == NULL) { /* Alloc and insert if it's not already there */ node = qemu_mallocz(sizeof(AioHandler)); node->fd = fd; LIST_INSERT_HEAD(&aio_handlers, node, node); } /* Update handler with latest information */ node->io_read = io_read; node->io_write = io_write; node->io_flush = io_flush; node->opaque = opaque; } qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque); return 0; }
2,191
qemu
e402463073ae51d00dc6cf98556e2f5c4b008a31
0
static pcibus_t pci_bar_address(PCIDevice *d, int reg, uint8_t type, pcibus_t size) { pcibus_t new_addr, last_addr; int bar = pci_bar(d, reg); uint16_t cmd = pci_get_word(d->config + PCI_COMMAND); if (type & PCI_BASE_ADDRESS_SPACE_IO) { if (!(cmd & PCI_COMMAND_IO)) { return PCI_BAR_UNMAPPED; } new_addr = pci_get_long(d->config + bar) & ~(size - 1); last_addr = new_addr + size - 1; /* Check if 32 bit BAR wraps around explicitly. * TODO: make priorities correct and remove this work around. */ if (last_addr <= new_addr || new_addr == 0 || last_addr >= UINT32_MAX) { return PCI_BAR_UNMAPPED; } return new_addr; } if (!(cmd & PCI_COMMAND_MEMORY)) { return PCI_BAR_UNMAPPED; } if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { new_addr = pci_get_quad(d->config + bar); } else { new_addr = pci_get_long(d->config + bar); } /* the ROM slot has a specific enable bit */ if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) { return PCI_BAR_UNMAPPED; } new_addr &= ~(size - 1); last_addr = new_addr + size - 1; /* NOTE: we do not support wrapping */ /* XXX: as we cannot support really dynamic mappings, we handle specific values as invalid mappings. */ if (last_addr <= new_addr || new_addr == 0 || last_addr == PCI_BAR_UNMAPPED) { return PCI_BAR_UNMAPPED; } /* Now pcibus_t is 64bit. * Check if 32 bit BAR wraps around explicitly. * Without this, PC ide doesn't work well. * TODO: remove this work around. */ if (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) { return PCI_BAR_UNMAPPED; } /* * OS is allowed to set BAR beyond its addressable * bits. For example, 32 bit OS can set 64bit bar * to >4G. Check it. TODO: we might need to support * it in the future for e.g. PAE. */ if (last_addr >= HWADDR_MAX) { return PCI_BAR_UNMAPPED; } return new_addr; }
2,192
qemu
2aece63c8a9d2c3a8ff41d2febc4cdeff2633331
0
static void pc_dimm_check_memdev_is_busy(Object *obj, const char *name, Object *val, Error **errp) { MemoryRegion *mr; Error *local_err = NULL; mr = host_memory_backend_get_memory(MEMORY_BACKEND(val), &local_err); if (local_err) { goto out; } if (memory_region_is_mapped(mr)) { char *path = object_get_canonical_path_component(val); error_setg(&local_err, "can't use already busy memdev: %s", path); g_free(path); } else { qdev_prop_allow_set_link_before_realize(obj, name, val, &local_err); } out: error_propagate(errp, local_err); }
2,193
qemu
0a43a1b5d7c33208120eeb2d98ebb9ab15dc2c87
0
static int vhdx_parse_log(BlockDriverState *bs, BDRVVHDXState *s) { int ret = 0; int i; VHDXHeader *hdr; hdr = s->headers[s->curr_header]; /* either the log guid, or log length is zero, * then a replay log is present */ for (i = 0; i < sizeof(hdr->log_guid.data4); i++) { ret |= hdr->log_guid.data4[i]; } if (hdr->log_guid.data1 == 0 && hdr->log_guid.data2 == 0 && hdr->log_guid.data3 == 0 && ret == 0) { goto exit; } /* per spec, only log version of 0 is supported */ if (hdr->log_version != 0) { ret = -EINVAL; goto exit; } if (hdr->log_length == 0) { goto exit; } /* We currently do not support images with logs to replay */ ret = -ENOTSUP; exit: return ret; }
2,194
qemu
ba5e6bfa1aee29a8f72c5538c565dfb9889cf273
0
static void vfio_unmap_bar(VFIOPCIDevice *vdev, int nr) { VFIOBAR *bar = &vdev->bars[nr]; if (!bar->region.size) { return; } vfio_bar_quirk_teardown(vdev, nr); memory_region_del_subregion(&bar->region.mem, &bar->region.mmap_mem); munmap(bar->region.mmap, memory_region_size(&bar->region.mmap_mem)); if (vdev->msix && vdev->msix->table_bar == nr) { memory_region_del_subregion(&bar->region.mem, &vdev->msix->mmap_mem); munmap(vdev->msix->mmap, memory_region_size(&vdev->msix->mmap_mem)); } }
2,196
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static void pmac_ide_writeb (void *opaque, target_phys_addr_t addr, uint32_t val) { MACIOIDEState *d = opaque; addr = (addr & 0xFFF) >> 4; switch (addr) { case 1 ... 7: ide_ioport_write(&d->bus, addr, val); break; case 8: case 22: ide_cmd_write(&d->bus, 0, val); break; default: break; } }
2,197
FFmpeg
2df0c32ea12ddfa72ba88309812bfb13b674130f
0
static int MPA_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { MpegAudioContext *s = avctx->priv_data; const int16_t *samples = (const int16_t *)frame->data[0]; short smr[MPA_MAX_CHANNELS][SBLIMIT]; unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT]; int padding, i, ret; for(i=0;i<s->nb_channels;i++) { filter(s, i, samples + i, s->nb_channels); } for(i=0;i<s->nb_channels;i++) { compute_scale_factors(s, s->scale_code[i], s->scale_factors[i], s->sb_samples[i], s->sblimit); } for(i=0;i<s->nb_channels;i++) { psycho_acoustic_model(s, smr[i]); } compute_bit_allocation(s, smr, bit_alloc, &padding); if ((ret = ff_alloc_packet(avpkt, MPA_MAX_CODED_FRAME_SIZE))) { av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n"); return ret; } init_put_bits(&s->pb, avpkt->data, avpkt->size); encode_frame(s, bit_alloc, padding); if (frame->pts != AV_NOPTS_VALUE) avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->delay); avpkt->size = put_bits_count(&s->pb) / 8; *got_packet_ptr = 1; return 0; }
2,198
FFmpeg
f8bed30d8b176fa030f6737765338bb4a2bcabc9
0
int vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb) { av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32)); v->profile = get_bits(gb, 2); if (v->profile == PROFILE_COMPLEX) { av_log(avctx, AV_LOG_WARNING, "WMV3 Complex Profile is not fully supported\n"); } if (v->profile == PROFILE_ADVANCED) { v->zz_8x4 = ff_vc1_adv_progressive_8x4_zz; v->zz_4x8 = ff_vc1_adv_progressive_4x8_zz; return decode_sequence_header_adv(v, gb); } else { v->zz_8x4 = wmv2_scantableA; v->zz_4x8 = wmv2_scantableB; v->res_y411 = get_bits1(gb); v->res_sprite = get_bits1(gb); if (v->res_y411) { av_log(avctx, AV_LOG_ERROR, "Old interlaced mode is not supported\n"); return -1; } if (v->res_sprite) { av_log(avctx, AV_LOG_ERROR, "WMVP is not fully supported\n"); } } // (fps-2)/4 (->30) v->frmrtq_postproc = get_bits(gb, 3); //common // (bitrate-32kbps)/64kbps v->bitrtq_postproc = get_bits(gb, 5); //common v->s.loop_filter = get_bits1(gb); //common if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE) { av_log(avctx, AV_LOG_ERROR, "LOOPFILTER shall not be enabled in Simple Profile\n"); } if(v->s.avctx->skip_loop_filter >= AVDISCARD_ALL) v->s.loop_filter = 0; v->res_x8 = get_bits1(gb); //reserved v->multires = get_bits1(gb); v->res_fasttx = get_bits1(gb); if (!v->res_fasttx) { v->vc1dsp.vc1_inv_trans_8x8 = ff_simple_idct; v->vc1dsp.vc1_inv_trans_8x4 = ff_simple_idct84_add; v->vc1dsp.vc1_inv_trans_4x8 = ff_simple_idct48_add; v->vc1dsp.vc1_inv_trans_4x4 = ff_simple_idct44_add; v->vc1dsp.vc1_inv_trans_8x8_dc = ff_simple_idct_add; v->vc1dsp.vc1_inv_trans_8x4_dc = ff_simple_idct84_add; v->vc1dsp.vc1_inv_trans_4x8_dc = ff_simple_idct48_add; v->vc1dsp.vc1_inv_trans_4x4_dc = ff_simple_idct44_add; } v->fastuvmc = get_bits1(gb); //common if (!v->profile && !v->fastuvmc) { av_log(avctx, AV_LOG_ERROR, "FASTUVMC unavailable in Simple Profile\n"); return -1; } v->extended_mv = get_bits1(gb); //common if (!v->profile && v->extended_mv) { av_log(avctx, AV_LOG_ERROR, "Extended MVs unavailable in Simple Profile\n"); return -1; } v->dquant = get_bits(gb, 2); //common v->vstransform = get_bits1(gb); //common v->res_transtab = get_bits1(gb); if (v->res_transtab) { av_log(avctx, AV_LOG_ERROR, "1 for reserved RES_TRANSTAB is forbidden\n"); return -1; } v->overlap = get_bits1(gb); //common v->s.resync_marker = get_bits1(gb); v->rangered = get_bits1(gb); if (v->rangered && v->profile == PROFILE_SIMPLE) { av_log(avctx, AV_LOG_INFO, "RANGERED should be set to 0 in Simple Profile\n"); } v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common v->quantizer_mode = get_bits(gb, 2); //common v->finterpflag = get_bits1(gb); //common if (v->res_sprite) { v->s.avctx->width = v->s.avctx->coded_width = get_bits(gb, 11); v->s.avctx->height = v->s.avctx->coded_height = get_bits(gb, 11); skip_bits(gb, 5); //frame rate v->res_x8 = get_bits1(gb); if (get_bits1(gb)) { // something to do with DC VLC selection av_log(avctx, AV_LOG_ERROR, "Unsupported sprite feature\n"); return -1; } skip_bits(gb, 3); //slice code v->res_rtm_flag = 0; } else { v->res_rtm_flag = get_bits1(gb); //reserved } if (!v->res_rtm_flag) { // av_log(avctx, AV_LOG_ERROR, // "0 for reserved RES_RTM_FLAG is forbidden\n"); av_log(avctx, AV_LOG_ERROR, "Old WMV3 version detected, some frames may be decoded incorrectly\n"); //return -1; } //TODO: figure out what they mean (always 0x402F) if(!v->res_fasttx) skip_bits(gb, 16); av_log(avctx, AV_LOG_DEBUG, "Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n" "LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n" "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n" "DQuant=%i, Quantizer mode=%i, Max B frames=%i\n", v->profile, v->frmrtq_postproc, v->bitrtq_postproc, v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv, v->rangered, v->vstransform, v->overlap, v->s.resync_marker, v->dquant, v->quantizer_mode, avctx->max_b_frames ); return 0; }
2,199