label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
1 | static void xhci_check_iso_kick(XHCIState *xhci, XHCITransfer *xfer, XHCIEPContext *epctx, uint64_t mfindex) { if (xfer->mfindex_kick > mfindex) { timer_mod(epctx->kick_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (xfer->mfindex_kick - mfindex) * 125000); xfer->running_retry = 1; } else { epctx->mfindex_last = xfer->mfindex_kick; timer_del(epctx->kick_timer); xfer->running_retry = 0; } } | 25,136 |
1 | static void watch_mem_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE); switch (size) { case 1: stb_phys(&address_space_memory, addr, val); break; case 2: stw_phys(&address_space_memory, addr, val); break; case 4: stl_phys(&address_space_memory, addr, val); break; default: abort(); } } | 25,137 |
0 | static void compute_rematrixing_strategy(AC3EncodeContext *s) { int nb_coefs; int blk, bnd, i; AC3Block *block, *av_uninit(block0); if (s->channel_mode != AC3_CHMODE_STEREO) return; for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { block = &s->blocks[blk]; block->new_rematrixing_strategy = !blk; if (!s->rematrixing_enabled) { block0 = block; continue; } block->num_rematrixing_bands = 4; if (block->cpl_in_use) { block->num_rematrixing_bands -= (s->start_freq[CPL_CH] <= 61); block->num_rematrixing_bands -= (s->start_freq[CPL_CH] == 37); if (blk && block->num_rematrixing_bands != block0->num_rematrixing_bands) block->new_rematrixing_strategy = 1; } nb_coefs = FFMIN(block->end_freq[1], block->end_freq[2]); for (bnd = 0; bnd < block->num_rematrixing_bands; bnd++) { /* calculate calculate sum of squared coeffs for one band in one block */ int start = ff_ac3_rematrix_band_tab[bnd]; int end = FFMIN(nb_coefs, ff_ac3_rematrix_band_tab[bnd+1]); CoefSumType sum[4] = {0,}; for (i = start; i < end; i++) { CoefType lt = block->mdct_coef[1][i]; CoefType rt = block->mdct_coef[2][i]; CoefType md = lt + rt; CoefType sd = lt - rt; MAC_COEF(sum[0], lt, lt); MAC_COEF(sum[1], rt, rt); MAC_COEF(sum[2], md, md); MAC_COEF(sum[3], sd, sd); } /* compare sums to determine if rematrixing will be used for this band */ if (FFMIN(sum[2], sum[3]) < FFMIN(sum[0], sum[1])) block->rematrixing_flags[bnd] = 1; else block->rematrixing_flags[bnd] = 0; /* determine if new rematrixing flags will be sent */ if (blk && block->rematrixing_flags[bnd] != block0->rematrixing_flags[bnd]) { block->new_rematrixing_strategy = 1; } } block0 = block; } } | 25,138 |
1 | long vnc_client_read_ws(VncState *vs) { int ret, err; uint8_t *payload; size_t payload_size, frame_size; VNC_DEBUG("Read websocket %p size %zd offset %zd\n", vs->ws_input.buffer, vs->ws_input.capacity, vs->ws_input.offset); buffer_reserve(&vs->ws_input, 4096); ret = vnc_client_read_buf(vs, buffer_end(&vs->ws_input), 4096); if (!ret) { return 0; } vs->ws_input.offset += ret; /* make sure that nothing is left in the ws_input buffer */ do { err = vncws_decode_frame(&vs->ws_input, &payload, &payload_size, &frame_size); if (err <= 0) { return err; } buffer_reserve(&vs->input, payload_size); buffer_append(&vs->input, payload, payload_size); buffer_advance(&vs->ws_input, frame_size); } while (vs->ws_input.offset > 0); return ret; } | 25,139 |
1 | int ff_msmpeg4_pred_dc(MpegEncContext *s, int n, int16_t **dc_val_ptr, int *dir_ptr) { int a, b, c, wrap, pred, scale; int16_t *dc_val; /* find prediction */ if (n < 4) { scale = s->y_dc_scale; } else { scale = s->c_dc_scale; } wrap = s->block_wrap[n]; dc_val= s->dc_val[0] + s->block_index[n]; /* B C * A X */ a = dc_val[ - 1]; b = dc_val[ - 1 - wrap]; c = dc_val[ - wrap]; if(s->first_slice_line && (n&2)==0 && s->msmpeg4_version<4){ b=c=1024; } /* XXX: the following solution consumes divisions, but it does not necessitate to modify mpegvideo.c. The problem comes from the fact they decided to store the quantized DC (which would lead to problems if Q could vary !) */ #if ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE __asm__ volatile( "movl %3, %%eax \n\t" "shrl $1, %%eax \n\t" "addl %%eax, %2 \n\t" "addl %%eax, %1 \n\t" "addl %0, %%eax \n\t" "mull %4 \n\t" "movl %%edx, %0 \n\t" "movl %1, %%eax \n\t" "mull %4 \n\t" "movl %%edx, %1 \n\t" "movl %2, %%eax \n\t" "mull %4 \n\t" "movl %%edx, %2 \n\t" : "+b" (a), "+c" (b), "+D" (c) : "g" (scale), "S" (ff_inverse[scale]) : "%eax", "%edx" ); #else /* Divisions are costly everywhere; optimize the most common case. */ if (scale == 8) { a = (a + (8 >> 1)) / 8; b = (b + (8 >> 1)) / 8; c = (c + (8 >> 1)) / 8; } else { a = FASTDIV((a + (scale >> 1)), scale); b = FASTDIV((b + (scale >> 1)), scale); c = FASTDIV((c + (scale >> 1)), scale); } #endif /* XXX: WARNING: they did not choose the same test as MPEG4. This is very important ! */ if(s->msmpeg4_version>3){ if(s->inter_intra_pred){ uint8_t *dest; int wrap; if(n==1){ pred=a; *dir_ptr = 0; }else if(n==2){ pred=c; *dir_ptr = 1; }else if(n==3){ if (abs(a - b) < abs(b - c)) { pred = c; *dir_ptr = 1; } else { pred = a; *dir_ptr = 0; } }else{ if(n<4){ wrap= s->linesize; dest= s->current_picture.f.data[0] + (((n >> 1) + 2*s->mb_y) * 8* wrap ) + ((n & 1) + 2*s->mb_x) * 8; }else{ wrap= s->uvlinesize; dest= s->current_picture.f.data[n - 3] + (s->mb_y * 8 * wrap) + s->mb_x * 8; } if(s->mb_x==0) a= (1024 + (scale>>1))/scale; else a= get_dc(dest-8, wrap, scale*8); if(s->mb_y==0) c= (1024 + (scale>>1))/scale; else c= get_dc(dest-8*wrap, wrap, scale*8); if (s->h263_aic_dir==0) { pred= a; *dir_ptr = 0; }else if (s->h263_aic_dir==1) { if(n==0){ pred= c; *dir_ptr = 1; }else{ pred= a; *dir_ptr = 0; } }else if (s->h263_aic_dir==2) { if(n==0){ pred= a; *dir_ptr = 0; }else{ pred= c; *dir_ptr = 1; } } else { pred= c; *dir_ptr = 1; } } }else{ if (abs(a - b) < abs(b - c)) { pred = c; *dir_ptr = 1; } else { pred = a; *dir_ptr = 0; } } }else{ if (abs(a - b) <= abs(b - c)) { pred = c; *dir_ptr = 1; } else { pred = a; *dir_ptr = 0; } } /* update predictor */ *dc_val_ptr = &dc_val[0]; return pred; } | 25,140 |
1 | int url_open(URLContext **puc, const char *filename, int flags) { URLProtocol *up; const char *p; char proto_str[128], *q; p = filename; q = proto_str; while (*p != '\0' && *p != ':') { /* protocols can only contain alphabetic chars */ if (!isalpha(*p)) goto file_proto; if ((q - proto_str) < sizeof(proto_str) - 1) *q++ = *p; p++; } /* if the protocol has length 1, we consider it is a dos drive */ if (*p == '\0' || (q - proto_str) <= 1) { file_proto: strcpy(proto_str, "file"); } else { *q = '\0'; } up = first_protocol; while (up != NULL) { if (!strcmp(proto_str, up->name)) return url_open_protocol (puc, up, filename, flags); up = up->next; } *puc = NULL; return AVERROR(ENOENT); } | 25,141 |
1 | static void gen_mfsrin_64b(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); #else TCGv t0; if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); return; } t0 = tcg_temp_new(); tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28); tcg_gen_andi_tl(t0, t0, 0xF); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); tcg_temp_free(t0); #endif } | 25,142 |
1 | struct SwsContext *sws_getContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, double *param) { struct SwsContext *ctx; ctx = av_malloc(sizeof(struct SwsContext)); if (ctx) ctx->av_class = av_mallocz(sizeof(AVClass)); if (!ctx || !ctx->av_class) { av_log(NULL, AV_LOG_ERROR, "Cannot allocate a resampling context!\n"); return NULL; } if ((srcH != dstH) || (srcW != dstW)) { if ((srcFormat != PIX_FMT_YUV420P) || (dstFormat != PIX_FMT_YUV420P)) { av_log(NULL, AV_LOG_INFO, "PIX_FMT_YUV420P will be used as an intermediate format for rescaling\n"); } ctx->resampling_ctx = img_resample_init(dstW, dstH, srcW, srcH); } else { ctx->resampling_ctx = av_malloc(sizeof(ImgReSampleContext)); ctx->resampling_ctx->iheight = srcH; ctx->resampling_ctx->iwidth = srcW; ctx->resampling_ctx->oheight = dstH; ctx->resampling_ctx->owidth = dstW; } ctx->src_pix_fmt = srcFormat; ctx->dst_pix_fmt = dstFormat; return ctx; } | 25,143 |
1 | static int vp8_encode(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet) { VP8Context *ctx = avctx->priv_data; struct vpx_image *rawimg = NULL; struct vpx_image *rawimg_alpha = NULL; int64_t timestamp = 0; int res, coded_size; vpx_enc_frame_flags_t flags = 0; if (frame) { rawimg = &ctx->rawimg; rawimg->planes[VPX_PLANE_Y] = frame->data[0]; rawimg->planes[VPX_PLANE_U] = frame->data[1]; rawimg->planes[VPX_PLANE_V] = frame->data[2]; rawimg->stride[VPX_PLANE_Y] = frame->linesize[0]; rawimg->stride[VPX_PLANE_U] = frame->linesize[1]; rawimg->stride[VPX_PLANE_V] = frame->linesize[2]; if (ctx->is_alpha) { uint8_t *u_plane, *v_plane; rawimg_alpha = &ctx->rawimg_alpha; rawimg_alpha->planes[VPX_PLANE_Y] = frame->data[3]; u_plane = av_malloc(frame->linesize[1] * frame->height); memset(u_plane, 0x80, frame->linesize[1] * frame->height); rawimg_alpha->planes[VPX_PLANE_U] = u_plane; v_plane = av_malloc(frame->linesize[2] * frame->height); memset(v_plane, 0x80, frame->linesize[2] * frame->height); rawimg_alpha->planes[VPX_PLANE_V] = v_plane; rawimg_alpha->stride[VPX_PLANE_Y] = frame->linesize[0]; rawimg_alpha->stride[VPX_PLANE_U] = frame->linesize[1]; rawimg_alpha->stride[VPX_PLANE_V] = frame->linesize[2]; } timestamp = frame->pts; if (frame->pict_type == AV_PICTURE_TYPE_I) flags |= VPX_EFLAG_FORCE_KF; } res = vpx_codec_encode(&ctx->encoder, rawimg, timestamp, avctx->ticks_per_frame, flags, ctx->deadline); if (res != VPX_CODEC_OK) { log_encoder_error(avctx, "Error encoding frame"); return AVERROR_INVALIDDATA; } if (ctx->is_alpha) { res = vpx_codec_encode(&ctx->encoder_alpha, rawimg_alpha, timestamp, avctx->ticks_per_frame, flags, ctx->deadline); if (res != VPX_CODEC_OK) { log_encoder_error(avctx, "Error encoding alpha frame"); return AVERROR_INVALIDDATA; } } coded_size = queue_frames(avctx, pkt, avctx->coded_frame); if (!frame && avctx->flags & CODEC_FLAG_PASS1) { unsigned int b64_size = AV_BASE64_SIZE(ctx->twopass_stats.sz); avctx->stats_out = av_malloc(b64_size); if (!avctx->stats_out) { av_log(avctx, AV_LOG_ERROR, "Stat buffer alloc (%d bytes) failed\n", b64_size); return AVERROR(ENOMEM); } av_base64_encode(avctx->stats_out, b64_size, ctx->twopass_stats.buf, ctx->twopass_stats.sz); } if (rawimg_alpha) { av_freep(&rawimg_alpha->planes[VPX_PLANE_U]); av_freep(&rawimg_alpha->planes[VPX_PLANE_V]); } *got_packet = !!coded_size; return 0; } | 25,144 |
1 | void av_opt_freep_ranges(AVOptionRanges **rangesp) { int i; AVOptionRanges *ranges = *rangesp; if (!ranges) return; for (i = 0; i < ranges->nb_ranges * ranges->nb_components; i++) { AVOptionRange *range = ranges->range[i]; av_freep(&range->str); av_freep(&ranges->range[i]); } av_freep(&ranges->range); av_freep(rangesp); } | 25,145 |
0 | static int oggvorbis_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { OggVorbisEncContext *s = avctx->priv_data; ogg_packet op; int ret, duration; /* send samples to libvorbis */ if (frame) { const int samples = frame->nb_samples; float **buffer; int c, channels = s->vi.channels; buffer = vorbis_analysis_buffer(&s->vd, samples); for (c = 0; c < channels; c++) { int co = (channels > 8) ? c : ff_vorbis_encoding_channel_layout_offsets[channels - 1][c]; memcpy(buffer[c], frame->extended_data[co], samples * sizeof(*buffer[c])); } if ((ret = vorbis_analysis_wrote(&s->vd, samples)) < 0) { av_log(avctx, AV_LOG_ERROR, "error in vorbis_analysis_wrote()\n"); return vorbis_error_to_averror(ret); } if ((ret = ff_af_queue_add(&s->afq, frame)) < 0) return ret; } else { if (!s->eof) if ((ret = vorbis_analysis_wrote(&s->vd, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, "error in vorbis_analysis_wrote()\n"); return vorbis_error_to_averror(ret); } s->eof = 1; } /* retrieve available packets from libvorbis */ while ((ret = vorbis_analysis_blockout(&s->vd, &s->vb)) == 1) { if ((ret = vorbis_analysis(&s->vb, NULL)) < 0) break; if ((ret = vorbis_bitrate_addblock(&s->vb)) < 0) break; /* add any available packets to the output packet buffer */ while ((ret = vorbis_bitrate_flushpacket(&s->vd, &op)) == 1) { if (av_fifo_space(s->pkt_fifo) < sizeof(ogg_packet) + op.bytes) { av_log(avctx, AV_LOG_ERROR, "packet buffer is too small\n"); return AVERROR_BUG; } av_fifo_generic_write(s->pkt_fifo, &op, sizeof(ogg_packet), NULL); av_fifo_generic_write(s->pkt_fifo, op.packet, op.bytes, NULL); } if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "error getting available packets\n"); break; } } if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "error getting available packets\n"); return vorbis_error_to_averror(ret); } /* check for available packets */ if (av_fifo_size(s->pkt_fifo) < sizeof(ogg_packet)) return 0; av_fifo_generic_read(s->pkt_fifo, &op, sizeof(ogg_packet), NULL); if ((ret = ff_alloc_packet2(avctx, avpkt, op.bytes))) return ret; av_fifo_generic_read(s->pkt_fifo, avpkt->data, op.bytes, NULL); avpkt->pts = ff_samples_to_time_base(avctx, op.granulepos); duration = avpriv_vorbis_parse_frame(&s->vp, avpkt->data, avpkt->size); if (duration > 0) { /* we do not know encoder delay until we get the first packet from * libvorbis, so we have to update the AudioFrameQueue counts */ if (!avctx->delay && s->afq.frames) { avctx->delay = duration; av_assert0(!s->afq.remaining_delay); s->afq.frames->duration += duration; s->afq.frames->pts -= duration; s->afq.remaining_samples += duration; } ff_af_queue_remove(&s->afq, duration, &avpkt->pts, &avpkt->duration); } *got_packet_ptr = 1; return 0; } | 25,147 |
1 | static void scsi_req_dequeue(SCSIRequest *req) { trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag); if (req->enqueued) { QTAILQ_REMOVE(&req->dev->requests, req, next); req->enqueued = false; } } | 25,148 |
1 | void check_aligned_anonymous_unfixed_colliding_mmaps(void) { char *p1; char *p2; char *p3; uintptr_t p; int i; fprintf (stderr, "%s", __func__); for (i = 0; i < 0x2fff; i++) { int nlen; p1 = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); fail_unless (p1 != MAP_FAILED); p = (uintptr_t) p1; fail_unless ((p & pagemask) == 0); memcpy (dummybuf, p1, pagesize); p2 = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); fail_unless (p2 != MAP_FAILED); p = (uintptr_t) p2; fail_unless ((p & pagemask) == 0); memcpy (dummybuf, p2, pagesize); munmap (p1, pagesize); nlen = pagesize * 8; p3 = mmap(NULL, nlen, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); /* Check if the mmaped areas collide. */ if (p3 < p2 && (p3 + nlen) > p2) fail_unless (0); memcpy (dummybuf, p3, pagesize); /* Make sure we get pages aligned with the pagesize. The target expects this. */ fail_unless (p3 != MAP_FAILED); p = (uintptr_t) p3; fail_unless ((p & pagemask) == 0); munmap (p2, pagesize); munmap (p3, nlen); } fprintf (stderr, " passed\n"); } | 25,150 |
0 | void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status) { const int start_i = av_clip(startx + starty * s->mb_width, 0, s->mb_num - 1); const int end_i = av_clip(endx + endy * s->mb_width, 0, s->mb_num); const int start_xy = s->mb_index2xy[start_i]; const int end_xy = s->mb_index2xy[end_i]; int mask = -1; if (s->avctx->hwaccel) return; if (start_i > end_i || start_xy > end_xy) { av_log(s->avctx, AV_LOG_ERROR, "internal error, slice end before start\n"); return; } if (!s->avctx->err_recognition) return; mask &= ~VP_START; if (status & (ER_AC_ERROR | ER_AC_END)) { mask &= ~(ER_AC_ERROR | ER_AC_END); s->error_count -= end_i - start_i + 1; } if (status & (ER_DC_ERROR | ER_DC_END)) { mask &= ~(ER_DC_ERROR | ER_DC_END); s->error_count -= end_i - start_i + 1; } if (status & (ER_MV_ERROR | ER_MV_END)) { mask &= ~(ER_MV_ERROR | ER_MV_END); s->error_count -= end_i - start_i + 1; } if (status & ER_MB_ERROR) { s->error_occurred = 1; s->error_count = INT_MAX; } if (mask == ~0x7F) { memset(&s->error_status_table[start_xy], 0, (end_xy - start_xy) * sizeof(uint8_t)); } else { int i; for (i = start_xy; i < end_xy; i++) s->error_status_table[i] &= mask; } if (end_i == s->mb_num) s->error_count = INT_MAX; else { s->error_status_table[end_xy] &= mask; s->error_status_table[end_xy] |= status; } s->error_status_table[start_xy] |= VP_START; if (start_xy > 0 && !(s->avctx->active_thread_type & FF_THREAD_SLICE) && s->avctx->skip_top * s->mb_width < start_i) { int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]]; prev_status &= ~ VP_START; if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END)) { s->error_occurred = 1; s->error_count = INT_MAX; } } } | 25,151 |
0 | static void avc_luma_hv_qrt_4w_msa(const uint8_t *src_x, const uint8_t *src_y, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height) { uint32_t loop_cnt; v16i8 src_hz0, src_hz1, src_hz2, src_hz3; v16i8 src_vt0, src_vt1, src_vt2, src_vt3, src_vt4; v16i8 src_vt5, src_vt6, src_vt7, src_vt8; v16i8 mask0, mask1, mask2; v8i16 hz_out0, hz_out1, vert_out0, vert_out1; v8i16 out0, out1; v16u8 out; LD_SB3(&luma_mask_arr[48], 16, mask0, mask1, mask2); LD_SB5(src_y, src_stride, src_vt0, src_vt1, src_vt2, src_vt3, src_vt4); src_y += (5 * src_stride); src_vt0 = (v16i8) __msa_insve_w((v4i32) src_vt0, 1, (v4i32) src_vt1); src_vt1 = (v16i8) __msa_insve_w((v4i32) src_vt1, 1, (v4i32) src_vt2); src_vt2 = (v16i8) __msa_insve_w((v4i32) src_vt2, 1, (v4i32) src_vt3); src_vt3 = (v16i8) __msa_insve_w((v4i32) src_vt3, 1, (v4i32) src_vt4); XORI_B4_128_SB(src_vt0, src_vt1, src_vt2, src_vt3); for (loop_cnt = (height >> 2); loop_cnt--;) { LD_SB4(src_x, src_stride, src_hz0, src_hz1, src_hz2, src_hz3); src_x += (4 * src_stride); XORI_B4_128_SB(src_hz0, src_hz1, src_hz2, src_hz3); hz_out0 = AVC_XOR_VSHF_B_AND_APPLY_6TAP_HORIZ_FILT_SH(src_hz0, src_hz1, mask0, mask1, mask2); hz_out1 = AVC_XOR_VSHF_B_AND_APPLY_6TAP_HORIZ_FILT_SH(src_hz2, src_hz3, mask0, mask1, mask2); SRARI_H2_SH(hz_out0, hz_out1, 5); SAT_SH2_SH(hz_out0, hz_out1, 7); LD_SB4(src_y, src_stride, src_vt5, src_vt6, src_vt7, src_vt8); src_y += (4 * src_stride); src_vt4 = (v16i8) __msa_insve_w((v4i32) src_vt4, 1, (v4i32) src_vt5); src_vt5 = (v16i8) __msa_insve_w((v4i32) src_vt5, 1, (v4i32) src_vt6); src_vt6 = (v16i8) __msa_insve_w((v4i32) src_vt6, 1, (v4i32) src_vt7); src_vt7 = (v16i8) __msa_insve_w((v4i32) src_vt7, 1, (v4i32) src_vt8); XORI_B4_128_SB(src_vt4, src_vt5, src_vt6, src_vt7); /* filter calc */ vert_out0 = AVC_CALC_DPADD_B_6PIX_2COEFF_R_SH(src_vt0, src_vt1, src_vt2, src_vt3, src_vt4, src_vt5); vert_out1 = AVC_CALC_DPADD_B_6PIX_2COEFF_R_SH(src_vt2, src_vt3, src_vt4, src_vt5, src_vt6, src_vt7); SRARI_H2_SH(vert_out0, vert_out1, 5); SAT_SH2_SH(vert_out0, vert_out1, 7); out0 = __msa_srari_h((hz_out0 + vert_out0), 1); out1 = __msa_srari_h((hz_out1 + vert_out1), 1); SAT_SH2_SH(out0, out1, 7); out = PCKEV_XORI128_UB(out0, out1); ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); dst += (4 * dst_stride); src_vt3 = src_vt7; src_vt1 = src_vt5; src_vt0 = src_vt4; src_vt4 = src_vt8; src_vt2 = src_vt6; } } | 25,152 |
1 | void qmp_migrate_set_downtime(double value, Error **errp) { value *= 1e9; value = MAX(0, MIN(UINT64_MAX, value)); max_downtime = (uint64_t)value; } | 25,153 |
1 | static void prop_get_fdt(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj); Error *err = NULL; int fdt_offset_next, fdt_offset, fdt_depth; void *fdt; if (!drc->fdt) { visit_start_struct(v, name, NULL, 0, &err); if (!err) { visit_end_struct(v, &err); } error_propagate(errp, err); return; } fdt = drc->fdt; fdt_offset = drc->fdt_start_offset; fdt_depth = 0; do { const char *name = NULL; const struct fdt_property *prop = NULL; int prop_len = 0, name_len = 0; uint32_t tag; tag = fdt_next_tag(fdt, fdt_offset, &fdt_offset_next); switch (tag) { case FDT_BEGIN_NODE: fdt_depth++; name = fdt_get_name(fdt, fdt_offset, &name_len); visit_start_struct(v, name, NULL, 0, &err); if (err) { error_propagate(errp, err); return; } break; case FDT_END_NODE: /* shouldn't ever see an FDT_END_NODE before FDT_BEGIN_NODE */ g_assert(fdt_depth > 0); visit_end_struct(v, &err); if (err) { error_propagate(errp, err); return; } fdt_depth--; break; case FDT_PROP: { int i; prop = fdt_get_property_by_offset(fdt, fdt_offset, &prop_len); name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff)); visit_start_list(v, name, &err); if (err) { error_propagate(errp, err); return; } for (i = 0; i < prop_len; i++) { visit_type_uint8(v, NULL, (uint8_t *)&prop->data[i], &err); if (err) { error_propagate(errp, err); return; } } visit_end_list(v); break; } default: error_setg(&error_abort, "device FDT in unexpected state: %d", tag); } fdt_offset = fdt_offset_next; } while (fdt_depth != 0); } | 25,154 |
1 | static void qtrle_decode_24bpp(QtrleContext *s) { int stream_ptr; int header; int start_line; int lines_to_change; signed char rle_code; int row_ptr, pixel_ptr; int row_inc = s->frame.linesize[0]; unsigned char r, g, b; unsigned char *rgb = s->frame.data[0]; int pixel_limit = s->frame.linesize[0] * s->avctx->height; /* check if this frame is even supposed to change */ if (s->size < 8) return; /* start after the chunk size */ stream_ptr = 4; /* fetch the header */ CHECK_STREAM_PTR(2); header = BE_16(&s->buf[stream_ptr]); stream_ptr += 2; /* if a header is present, fetch additional decoding parameters */ if (header & 0x0008) { CHECK_STREAM_PTR(8); start_line = BE_16(&s->buf[stream_ptr]); stream_ptr += 4; lines_to_change = BE_16(&s->buf[stream_ptr]); stream_ptr += 4; } else { start_line = 0; lines_to_change = s->avctx->height; } row_ptr = row_inc * start_line; while (lines_to_change--) { CHECK_STREAM_PTR(2); pixel_ptr = row_ptr + (s->buf[stream_ptr++] - 1) * 3; while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) { if (rle_code == 0) { /* there's another skip code in the stream */ CHECK_STREAM_PTR(1); pixel_ptr += (s->buf[stream_ptr++] - 1) * 3; CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ } else if (rle_code < 0) { /* decode the run length code */ rle_code = -rle_code; CHECK_STREAM_PTR(3); r = s->buf[stream_ptr++]; g = s->buf[stream_ptr++]; b = s->buf[stream_ptr++]; CHECK_PIXEL_PTR(rle_code * 3); while (rle_code--) { rgb[pixel_ptr++] = r; rgb[pixel_ptr++] = g; rgb[pixel_ptr++] = b; } } else { CHECK_STREAM_PTR(rle_code * 3); CHECK_PIXEL_PTR(rle_code * 3); /* copy pixels directly to output */ while (rle_code--) { rgb[pixel_ptr++] = s->buf[stream_ptr++]; rgb[pixel_ptr++] = s->buf[stream_ptr++]; rgb[pixel_ptr++] = s->buf[stream_ptr++]; } } } row_ptr += row_inc; } } | 25,155 |
1 | static void rtcp_send_sr(AVFormatContext *s1, int64_t ntp_time) { RTPDemuxContext *s = s1->priv_data; uint32_t rtp_ts; #if defined(DEBUG) printf("RTCP: %02x %"PRIx64" %x\n", s->payload_type, ntp_time, s->timestamp); #endif if (s->first_rtcp_ntp_time == AV_NOPTS_VALUE) s->first_rtcp_ntp_time = ntp_time; s->last_rtcp_ntp_time = ntp_time; rtp_ts = av_rescale_q(ntp_time - s->first_rtcp_ntp_time, AV_TIME_BASE_Q, s1->streams[0]->time_base) + s->base_timestamp; put_byte(s1->pb, (RTP_VERSION << 6)); put_byte(s1->pb, 200); put_be16(s1->pb, 6); /* length in words - 1 */ put_be32(s1->pb, s->ssrc); put_be32(s1->pb, ntp_time / 1000000); put_be32(s1->pb, ((ntp_time % 1000000) << 32) / 1000000); put_be32(s1->pb, rtp_ts); put_be32(s1->pb, s->packet_count); put_be32(s1->pb, s->octet_count); put_flush_packet(s1->pb); } | 25,156 |
1 | static void balloon_stats_get_all(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { Error *err = NULL; VirtIOBalloon *s = opaque; int i; visit_start_struct(v, name, NULL, 0, &err); if (err) { goto out; } visit_type_int(v, "last-update", &s->stats_last_update, &err); if (err) { goto out_end; } visit_start_struct(v, "stats", NULL, 0, &err); if (err) { goto out_end; } for (i = 0; i < VIRTIO_BALLOON_S_NR; i++) { visit_type_uint64(v, balloon_stat_names[i], &s->stats[i], &err); if (err) { break; } } error_propagate(errp, err); err = NULL; visit_end_struct(v, &err); out_end: error_propagate(errp, err); err = NULL; visit_end_struct(v, &err); out: error_propagate(errp, err); } | 25,158 |
1 | envlist_setenv(envlist_t *envlist, const char *env) { struct envlist_entry *entry = NULL; const char *eq_sign; size_t envname_len; if ((envlist == NULL) || (env == NULL)) return (EINVAL); /* find out first equals sign in given env */ if ((eq_sign = strchr(env, '=')) == NULL) return (EINVAL); envname_len = eq_sign - env + 1; /* * If there already exists variable with given name * we remove and release it before allocating a whole * new entry. */ for (entry = envlist->el_entries.lh_first; entry != NULL; entry = entry->ev_link.le_next) { if (strncmp(entry->ev_var, env, envname_len) == 0) break; } if (entry != NULL) { QLIST_REMOVE(entry, ev_link); free((char *)entry->ev_var); free(entry); } else { envlist->el_count++; } if ((entry = malloc(sizeof (*entry))) == NULL) return (errno); if ((entry->ev_var = strdup(env)) == NULL) { free(entry); return (errno); } QLIST_INSERT_HEAD(&envlist->el_entries, entry, ev_link); return (0); } | 25,159 |
0 | static inline int check_ap(CPUARMState *env, int ap, int domain_prot, int access_type, int is_user) { int prot_ro; if (domain_prot == 3) { return PAGE_READ | PAGE_WRITE; } if (access_type == 1) prot_ro = 0; else prot_ro = PAGE_READ; switch (ap) { case 0: if (access_type == 1) return 0; switch ((env->cp15.c1_sys >> 8) & 3) { case 1: return is_user ? 0 : PAGE_READ; case 2: return PAGE_READ; default: return 0; } case 1: return is_user ? 0 : PAGE_READ | PAGE_WRITE; case 2: if (is_user) return prot_ro; else return PAGE_READ | PAGE_WRITE; case 3: return PAGE_READ | PAGE_WRITE; case 4: /* Reserved. */ return 0; case 5: return is_user ? 0 : prot_ro; case 6: return prot_ro; case 7: if (!arm_feature (env, ARM_FEATURE_V6K)) return 0; return prot_ro; default: abort(); } } | 25,160 |
0 | static void test_mirror(void) { int send_sock[2], recv_sock; char *cmdline; uint32_t ret = 0, len = 0; char send_buf[] = "Hello! filter-mirror~"; char sock_path[] = "filter-mirror.XXXXXX"; char *recv_buf; uint32_t size = sizeof(send_buf); size = htonl(size); ret = socketpair(PF_UNIX, SOCK_STREAM, 0, send_sock); g_assert_cmpint(ret, !=, -1); ret = mkstemp(sock_path); g_assert_cmpint(ret, !=, -1); cmdline = g_strdup_printf("-netdev socket,id=qtest-bn0,fd=%d " "-device e1000,netdev=qtest-bn0,id=qtest-e0 " "-chardev socket,id=mirror0,path=%s,server,nowait " "-object filter-mirror,id=qtest-f0,netdev=qtest-bn0,queue=tx,outdev=mirror0 " , send_sock[1], sock_path); qtest_start(cmdline); g_free(cmdline); recv_sock = unix_connect(sock_path, NULL); g_assert_cmpint(recv_sock, !=, -1); struct iovec iov[] = { { .iov_base = &size, .iov_len = sizeof(size), }, { .iov_base = send_buf, .iov_len = sizeof(send_buf), }, }; /* send a qmp command to guarantee that 'connected' is setting to true. */ qmp_discard_response("{ 'execute' : 'query-status'}"); ret = iov_send(send_sock[0], iov, 2, 0, sizeof(size) + sizeof(send_buf)); g_assert_cmpint(ret, ==, sizeof(send_buf) + sizeof(size)); close(send_sock[0]); ret = qemu_recv(recv_sock, &len, sizeof(len), 0); g_assert_cmpint(ret, ==, sizeof(len)); len = ntohl(len); g_assert_cmpint(len, ==, sizeof(send_buf)); recv_buf = g_malloc(len); ret = qemu_recv(recv_sock, recv_buf, len, 0); g_assert_cmpstr(recv_buf, ==, send_buf); g_free(recv_buf); close(recv_sock); unlink(sock_path); } | 25,161 |
0 | S390PCIBusDevice *s390_pci_find_dev_by_fh(uint32_t fh) { S390PCIBusDevice *pbdev; int i; S390pciState *s = S390_PCI_HOST_BRIDGE( object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL)); if (!s || !fh) { return NULL; } for (i = 0; i < PCI_SLOT_MAX; i++) { pbdev = &s->pbdev[i]; if (pbdev->fh == fh) { return pbdev; } } return NULL; } | 25,162 |
0 | static void decode_cabac_residual( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff) { static const int significant_coeff_flag_offset[2][6] = { { 105+0, 105+15, 105+29, 105+44, 105+47, 402 }, { 277+0, 277+15, 277+29, 277+44, 277+47, 436 } }; static const int last_coeff_flag_offset[2][6] = { { 166+0, 166+15, 166+29, 166+44, 166+47, 417 }, { 338+0, 338+15, 338+29, 338+44, 338+47, 451 } }; static const int coeff_abs_level_m1_offset[6] = { 227+0, 227+10, 227+20, 227+30, 227+39, 426 }; static const uint8_t significant_coeff_flag_offset_8x8[2][63] = { { 0, 1, 2, 3, 4, 5, 5, 4, 4, 3, 3, 4, 4, 4, 5, 5, 4, 4, 4, 4, 3, 3, 6, 7, 7, 7, 8, 9,10, 9, 8, 7, 7, 6,11,12,13,11, 6, 7, 8, 9,14,10, 9, 8, 6,11, 12,13,11, 6, 9,14,10, 9,11,12,13,11,14,10,12 }, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 7, 7, 7, 8, 4, 5, 6, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,13,13, 9, 9,10,10, 8,13,13, 9, 9,10,10,14,14,14,14,14 } }; /* node ctx: 0..3: abslevel1 (with abslevelgt1 == 0). * 4..7: abslevelgt1 + 3 (and abslevel1 doesn't matter). * map node ctx => cabac ctx for level=1 */ static const uint8_t coeff_abs_level1_ctx[8] = { 1, 2, 3, 4, 0, 0, 0, 0 }; /* map node ctx => cabac ctx for level>1 */ static const uint8_t coeff_abs_levelgt1_ctx[8] = { 5, 5, 5, 5, 6, 7, 8, 9 }; static const uint8_t coeff_abs_level_transition[2][8] = { /* update node ctx after decoding a level=1 */ { 1, 2, 3, 3, 4, 5, 6, 7 }, /* update node ctx after decoding a level>1 */ { 4, 4, 4, 4, 5, 6, 7, 7 } }; int index[64]; int av_unused last; int coeff_count = 0; int node_ctx = 0; uint8_t *significant_coeff_ctx_base; uint8_t *last_coeff_ctx_base; uint8_t *abs_level_m1_ctx_base; #ifndef ARCH_X86 #define CABAC_ON_STACK #endif #ifdef CABAC_ON_STACK #define CC &cc CABACContext cc; cc.range = h->cabac.range; cc.low = h->cabac.low; cc.bytestream= h->cabac.bytestream; #else #define CC &h->cabac #endif /* cat: 0-> DC 16x16 n = 0 * 1-> AC 16x16 n = luma4x4idx * 2-> Luma4x4 n = luma4x4idx * 3-> DC Chroma n = iCbCr * 4-> AC Chroma n = 4 * iCbCr + chroma4x4idx * 5-> Luma8x8 n = 4 * luma8x8idx */ /* read coded block flag */ if( cat != 5 ) { if( get_cabac( CC, &h->cabac_state[85 + get_cabac_cbf_ctx( h, cat, n ) ] ) == 0 ) { if( cat == 1 || cat == 2 ) h->non_zero_count_cache[scan8[n]] = 0; else if( cat == 4 ) h->non_zero_count_cache[scan8[16+n]] = 0; #ifdef CABAC_ON_STACK h->cabac.range = cc.range ; h->cabac.low = cc.low ; h->cabac.bytestream= cc.bytestream; #endif return; } } significant_coeff_ctx_base = h->cabac_state + significant_coeff_flag_offset[MB_FIELD][cat]; last_coeff_ctx_base = h->cabac_state + last_coeff_flag_offset[MB_FIELD][cat]; abs_level_m1_ctx_base = h->cabac_state + coeff_abs_level_m1_offset[cat]; if( cat == 5 ) { #define DECODE_SIGNIFICANCE( coefs, sig_off, last_off ) \ for(last= 0; last < coefs; last++) { \ uint8_t *sig_ctx = significant_coeff_ctx_base + sig_off; \ if( get_cabac( CC, sig_ctx )) { \ uint8_t *last_ctx = last_coeff_ctx_base + last_off; \ index[coeff_count++] = last; \ if( get_cabac( CC, last_ctx ) ) { \ last= max_coeff; \ break; \ } \ } \ }\ if( last == max_coeff -1 ) {\ index[coeff_count++] = last;\ } const uint8_t *sig_off = significant_coeff_flag_offset_8x8[MB_FIELD]; #if defined(ARCH_X86) && defined(HAVE_7REGS) && defined(HAVE_EBX_AVAILABLE) && !defined(BROKEN_RELOCATIONS) coeff_count= decode_significance_8x8_x86(CC, significant_coeff_ctx_base, index, sig_off); } else { coeff_count= decode_significance_x86(CC, max_coeff, significant_coeff_ctx_base, index); #else DECODE_SIGNIFICANCE( 63, sig_off[last], last_coeff_flag_offset_8x8[last] ); } else { DECODE_SIGNIFICANCE( max_coeff - 1, last, last ); #endif } assert(coeff_count > 0); if( cat == 0 ) h->cbp_table[h->mb_xy] |= 0x100; else if( cat == 1 || cat == 2 ) h->non_zero_count_cache[scan8[n]] = coeff_count; else if( cat == 3 ) h->cbp_table[h->mb_xy] |= 0x40 << n; else if( cat == 4 ) h->non_zero_count_cache[scan8[16+n]] = coeff_count; else { assert( cat == 5 ); fill_rectangle(&h->non_zero_count_cache[scan8[n]], 2, 2, 8, coeff_count, 1); } for( coeff_count--; coeff_count >= 0; coeff_count-- ) { uint8_t *ctx = coeff_abs_level1_ctx[node_ctx] + abs_level_m1_ctx_base; int j= scantable[index[coeff_count]]; if( get_cabac( CC, ctx ) == 0 ) { node_ctx = coeff_abs_level_transition[0][node_ctx]; if( !qmul ) { block[j] = get_cabac_bypass_sign( CC, -1); }else{ block[j] = (get_cabac_bypass_sign( CC, -qmul[j]) + 32) >> 6; } } else { int coeff_abs = 2; ctx = coeff_abs_levelgt1_ctx[node_ctx] + abs_level_m1_ctx_base; node_ctx = coeff_abs_level_transition[1][node_ctx]; while( coeff_abs < 15 && get_cabac( CC, ctx ) ) { coeff_abs++; } if( coeff_abs >= 15 ) { int j = 0; while( get_cabac_bypass( CC ) ) { j++; } coeff_abs=1; while( j-- ) { coeff_abs += coeff_abs + get_cabac_bypass( CC ); } coeff_abs+= 14; } if( !qmul ) { if( get_cabac_bypass( CC ) ) block[j] = -coeff_abs; else block[j] = coeff_abs; }else{ if( get_cabac_bypass( CC ) ) block[j] = (-coeff_abs * qmul[j] + 32) >> 6; else block[j] = ( coeff_abs * qmul[j] + 32) >> 6; } } } #ifdef CABAC_ON_STACK h->cabac.range = cc.range ; h->cabac.low = cc.low ; h->cabac.bytestream= cc.bytestream; #endif } | 25,164 |
0 | static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { SerialState *s = opaque; addr &= 7; DPRINTF("write addr=0x%" HWADDR_PRIx " val=0x%" PRIx64 "\n", addr, val); switch(addr) { default: case 0: if (s->lcr & UART_LCR_DLAB) { s->divider = (s->divider & 0xff00) | val; serial_update_parameters(s); } else { s->thr = (uint8_t) val; if(s->fcr & UART_FCR_FE) { /* xmit overruns overwrite data, so make space if needed */ if (fifo8_is_full(&s->xmit_fifo)) { fifo8_pop(&s->xmit_fifo); } fifo8_push(&s->xmit_fifo, s->thr); } s->thr_ipending = 0; s->lsr &= ~UART_LSR_THRE; s->lsr &= ~UART_LSR_TEMT; serial_update_irq(s); if (s->tsr_retry <= 0) { serial_xmit(NULL, G_IO_OUT, s); } } break; case 1: if (s->lcr & UART_LCR_DLAB) { s->divider = (s->divider & 0x00ff) | (val << 8); serial_update_parameters(s); } else { uint8_t changed = (s->ier ^ val) & 0x0f; s->ier = val & 0x0f; /* If the backend device is a real serial port, turn polling of the modem * status lines on physical port on or off depending on UART_IER_MSI state. */ if ((changed & UART_IER_MSI) && s->poll_msl >= 0) { if (s->ier & UART_IER_MSI) { s->poll_msl = 1; serial_update_msl(s); } else { timer_del(s->modem_status_poll); s->poll_msl = 0; } } /* Turning on the THRE interrupt on IER can trigger the interrupt * if LSR.THRE=1, even if it had been masked before by reading IIR. * This is not in the datasheet, but Windows relies on it. It is * unclear if THRE has to be resampled every time THRI becomes * 1, or only on the rising edge. Bochs does the latter, and Windows * always toggles IER to all zeroes and back to all ones, so do the * same. * * If IER.THRI is zero, thr_ipending is not used. Set it to zero * so that the thr_ipending subsection is not migrated. */ if (changed & UART_IER_THRI) { if ((s->ier & UART_IER_THRI) && (s->lsr & UART_LSR_THRE)) { s->thr_ipending = 1; } else { s->thr_ipending = 0; } } if (changed) { serial_update_irq(s); } } break; case 2: /* Did the enable/disable flag change? If so, make sure FIFOs get flushed */ if ((val ^ s->fcr) & UART_FCR_FE) { val |= UART_FCR_XFR | UART_FCR_RFR; } /* FIFO clear */ if (val & UART_FCR_RFR) { s->lsr &= ~(UART_LSR_DR | UART_LSR_BI); timer_del(s->fifo_timeout_timer); s->timeout_ipending = 0; fifo8_reset(&s->recv_fifo); } if (val & UART_FCR_XFR) { s->lsr |= UART_LSR_THRE; s->thr_ipending = 1; fifo8_reset(&s->xmit_fifo); } serial_write_fcr(s, val & 0xC9); serial_update_irq(s); break; case 3: { int break_enable; s->lcr = val; serial_update_parameters(s); break_enable = (val >> 6) & 1; if (break_enable != s->last_break_enable) { s->last_break_enable = break_enable; qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_SERIAL_SET_BREAK, &break_enable); } } break; case 4: { int flags; int old_mcr = s->mcr; s->mcr = val & 0x1f; if (val & UART_MCR_LOOP) break; if (s->poll_msl >= 0 && old_mcr != s->mcr) { qemu_chr_fe_ioctl(s->chr,CHR_IOCTL_SERIAL_GET_TIOCM, &flags); flags &= ~(CHR_TIOCM_RTS | CHR_TIOCM_DTR); if (val & UART_MCR_RTS) flags |= CHR_TIOCM_RTS; if (val & UART_MCR_DTR) flags |= CHR_TIOCM_DTR; qemu_chr_fe_ioctl(s->chr,CHR_IOCTL_SERIAL_SET_TIOCM, &flags); /* Update the modem status after a one-character-send wait-time, since there may be a response from the device/computer at the other end of the serial line */ timer_mod(s->modem_status_poll, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->char_transmit_time); } } break; case 5: break; case 6: break; case 7: s->scr = val; break; } } | 25,165 |
0 | void bdrv_drain(BlockDriverState *bs) { while (bdrv_drain_one(bs)) { /* Keep iterating */ } } | 25,166 |
0 | static void nvdimm_dsm_set_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in, hwaddr dsm_mem_addr) { NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm); NvdimmFuncSetLabelDataIn *set_label_data; uint32_t status; set_label_data = (NvdimmFuncSetLabelDataIn *)in->arg3; le32_to_cpus(&set_label_data->offset); le32_to_cpus(&set_label_data->length); nvdimm_debug("Write Label Data: offset %#x length %#x.\n", set_label_data->offset, set_label_data->length); status = nvdimm_rw_label_data_check(nvdimm, set_label_data->offset, set_label_data->length); if (status != 0 /* Success */) { nvdimm_dsm_no_payload(status, dsm_mem_addr); return; } assert(offsetof(NvdimmDsmIn, arg3) + sizeof(*set_label_data) + set_label_data->length <= 4096); nvc->write_label_data(nvdimm, set_label_data->in_buf, set_label_data->length, set_label_data->offset); nvdimm_dsm_no_payload(0 /* Success */, dsm_mem_addr); } | 25,168 |
0 | static void slirp_receive(void *opaque, const uint8_t *buf, size_t size) { #ifdef DEBUG_SLIRP printf("slirp input:\n"); hex_dump(stdout, buf, size); #endif slirp_input(buf, size); } | 25,169 |
0 | void op_dmfc0_ebase (void) { T0 = env->CP0_EBase; RETURN(); } | 25,170 |
0 | void sparc_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) { unsigned int i; for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) { (*cpu_fprintf)(f, "Sparc %16s IU " TARGET_FMT_lx " FPU %08x MMU %08x NWINS %d ", sparc_defs[i].name, sparc_defs[i].iu_version, sparc_defs[i].fpu_version, sparc_defs[i].mmu_version, sparc_defs[i].nwindows); print_features(f, cpu_fprintf, CPU_DEFAULT_FEATURES & ~sparc_defs[i].features, "-"); print_features(f, cpu_fprintf, ~CPU_DEFAULT_FEATURES & sparc_defs[i].features, "+"); (*cpu_fprintf)(f, "\n"); } (*cpu_fprintf)(f, "Default CPU feature flags (use '-' to remove): "); print_features(f, cpu_fprintf, CPU_DEFAULT_FEATURES, NULL); (*cpu_fprintf)(f, "\n"); (*cpu_fprintf)(f, "Available CPU feature flags (use '+' to add): "); print_features(f, cpu_fprintf, ~CPU_DEFAULT_FEATURES, NULL); (*cpu_fprintf)(f, "\n"); (*cpu_fprintf)(f, "Numerical features (use '=' to set): iu_version " "fpu_version mmu_version nwindows\n"); } | 25,171 |
0 | static uint32_t dma_rinvalid (void *opaque, target_phys_addr_t addr) { hw_error("Unsupported short raccess. reg=" TARGET_FMT_plx "\n", addr); return 0; } | 25,172 |
0 | static void scsi_hd_class_initfn(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); sc->init = scsi_hd_initfn; sc->destroy = scsi_destroy; sc->alloc_req = scsi_new_request; sc->unit_attention_reported = scsi_disk_unit_attention_reported; dc->fw_name = "disk"; dc->desc = "virtual SCSI disk"; dc->reset = scsi_disk_reset; dc->props = scsi_hd_properties; dc->vmsd = &vmstate_scsi_disk_state; } | 25,173 |
0 | static int qxl_init_secondary(PCIDevice *dev) { static int device_id = 1; PCIQXLDevice *qxl = DO_UPCAST(PCIQXLDevice, pci, dev); ram_addr_t ram_size = msb_mask(qxl->vga.vram_size * 2 - 1); qxl->id = device_id++; if (ram_size < 16 * 1024 * 1024) { ram_size = 16 * 1024 * 1024; } qxl->vga.vram_size = ram_size; qxl->vga.vram_offset = qemu_ram_alloc(&qxl->pci.qdev, "qxl.vgavram", qxl->vga.vram_size); qxl->vga.vram_ptr = qemu_get_ram_ptr(qxl->vga.vram_offset); pci_config_set_class(dev->config, PCI_CLASS_DISPLAY_OTHER); return qxl_init_common(qxl); } | 25,174 |
0 | static inline void downmix_2f_2r_to_dolby(float *samples) { int i; for (i = 0; i < 256; i++) { samples[i] -= samples[i + 512]; samples[i + 256] += samples[i + 768]; samples[i + 512] = samples[i + 768] = 0; } } | 25,175 |
0 | static void disas_simd_3same_logic(DisasContext *s, uint32_t insn) { int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int rm = extract32(insn, 16, 5); int size = extract32(insn, 22, 2); bool is_u = extract32(insn, 29, 1); bool is_q = extract32(insn, 30, 1); TCGv_i64 tcg_op1 = tcg_temp_new_i64(); TCGv_i64 tcg_op2 = tcg_temp_new_i64(); TCGv_i64 tcg_res[2]; int pass; tcg_res[0] = tcg_temp_new_i64(); tcg_res[1] = tcg_temp_new_i64(); for (pass = 0; pass < (is_q ? 2 : 1); pass++) { read_vec_element(s, tcg_op1, rn, pass, MO_64); read_vec_element(s, tcg_op2, rm, pass, MO_64); if (!is_u) { switch (size) { case 0: /* AND */ tcg_gen_and_i64(tcg_res[pass], tcg_op1, tcg_op2); break; case 1: /* BIC */ tcg_gen_andc_i64(tcg_res[pass], tcg_op1, tcg_op2); break; case 2: /* ORR */ tcg_gen_or_i64(tcg_res[pass], tcg_op1, tcg_op2); break; case 3: /* ORN */ tcg_gen_orc_i64(tcg_res[pass], tcg_op1, tcg_op2); break; } } else { if (size != 0) { /* B* ops need res loaded to operate on */ read_vec_element(s, tcg_res[pass], rd, pass, MO_64); } switch (size) { case 0: /* EOR */ tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2); break; case 1: /* BSL bitwise select */ tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_op2); tcg_gen_and_i64(tcg_op1, tcg_op1, tcg_res[pass]); tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op1); break; case 2: /* BIT, bitwise insert if true */ tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_res[pass]); tcg_gen_and_i64(tcg_op1, tcg_op1, tcg_op2); tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1); break; case 3: /* BIF, bitwise insert if false */ tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_res[pass]); tcg_gen_andc_i64(tcg_op1, tcg_op1, tcg_op2); tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1); break; } } } write_vec_element(s, tcg_res[0], rd, 0, MO_64); if (!is_q) { tcg_gen_movi_i64(tcg_res[1], 0); } write_vec_element(s, tcg_res[1], rd, 1, MO_64); tcg_temp_free_i64(tcg_op1); tcg_temp_free_i64(tcg_op2); tcg_temp_free_i64(tcg_res[0]); tcg_temp_free_i64(tcg_res[1]); } | 25,176 |
0 | void net_check_clients(void) { VLANState *vlan; VLANClientState *vc; int i; /* Don't warn about the default network setup that you get if * no command line -net or -netdev options are specified. There * are two cases that we would otherwise complain about: * (1) board doesn't support a NIC but the implicit "-net nic" * requested one * (2) CONFIG_SLIRP not set, in which case the implicit "-net nic" * sets up a nic that isn't connected to anything. */ if (default_net) { return; } QTAILQ_FOREACH(vlan, &vlans, next) { int has_nic = 0, has_host_dev = 0; QTAILQ_FOREACH(vc, &vlan->clients, next) { switch (vc->info->type) { case NET_CLIENT_OPTIONS_KIND_NIC: has_nic = 1; break; case NET_CLIENT_OPTIONS_KIND_USER: case NET_CLIENT_OPTIONS_KIND_TAP: case NET_CLIENT_OPTIONS_KIND_SOCKET: case NET_CLIENT_OPTIONS_KIND_VDE: has_host_dev = 1; break; default: ; } } if (has_host_dev && !has_nic) fprintf(stderr, "Warning: vlan %d with no nics\n", vlan->id); if (has_nic && !has_host_dev) fprintf(stderr, "Warning: vlan %d is not connected to host network\n", vlan->id); } QTAILQ_FOREACH(vc, &non_vlan_clients, next) { if (!vc->peer) { fprintf(stderr, "Warning: %s %s has no peer\n", vc->info->type == NET_CLIENT_OPTIONS_KIND_NIC ? "nic" : "netdev", vc->name); } } /* Check that all NICs requested via -net nic actually got created. * NICs created via -device don't need to be checked here because * they are always instantiated. */ for (i = 0; i < MAX_NICS; i++) { NICInfo *nd = &nd_table[i]; if (nd->used && !nd->instantiated) { fprintf(stderr, "Warning: requested NIC (%s, model %s) " "was not created (not supported by this machine?)\n", nd->name ? nd->name : "anonymous", nd->model ? nd->model : "unspecified"); } } } | 25,177 |
0 | static uint16_t read_u16(uint8_t *data, size_t offset) { return ((data[offset] & 0xFF) << 8) | (data[offset + 1] & 0xFF); } | 25,178 |
1 | av_cold void ff_psy_end(FFPsyContext *ctx) { if (ctx->model->end) ctx->model->end(ctx); av_freep(&ctx->bands); av_freep(&ctx->num_bands); av_freep(&ctx->group); av_freep(&ctx->ch); } | 25,180 |
1 | static void parse_str(StringInputVisitor *siv, Error **errp) { char *str = (char *) siv->string; long long start, end; Range *cur; char *endptr; if (siv->ranges) { return; } do { errno = 0; start = strtoll(str, &endptr, 0); if (errno == 0 && endptr > str) { if (*endptr == '\0') { cur = g_malloc0(sizeof(*cur)); cur->begin = start; cur->end = start + 1; siv->ranges = g_list_insert_sorted_merged(siv->ranges, cur, range_compare); cur = NULL; str = NULL; } else if (*endptr == '-') { str = endptr + 1; errno = 0; end = strtoll(str, &endptr, 0); if (errno == 0 && endptr > str && start <= end && (start > INT64_MAX - 65536 || end < start + 65536)) { if (*endptr == '\0') { cur = g_malloc0(sizeof(*cur)); cur->begin = start; cur->end = end + 1; siv->ranges = g_list_insert_sorted_merged(siv->ranges, cur, range_compare); cur = NULL; str = NULL; } else if (*endptr == ',') { str = endptr + 1; cur = g_malloc0(sizeof(*cur)); cur->begin = start; cur->end = end + 1; siv->ranges = g_list_insert_sorted_merged(siv->ranges, cur, range_compare); cur = NULL; } else { goto error; } } else { goto error; } } else if (*endptr == ',') { str = endptr + 1; cur = g_malloc0(sizeof(*cur)); cur->begin = start; cur->end = start + 1; siv->ranges = g_list_insert_sorted_merged(siv->ranges, cur, range_compare); cur = NULL; } else { goto error; } } else { goto error; } } while (str); return; error: g_list_foreach(siv->ranges, free_range, NULL); g_list_free(siv->ranges); siv->ranges = NULL; } | 25,183 |
1 | int inet_nonblocking_connect(const char *str, bool *in_progress, Error **errp) { QemuOpts *opts; int sock = -1; opts = qemu_opts_create(&dummy_opts, NULL, 0, NULL); if (inet_parse(opts, str) == 0) { sock = inet_connect_opts(opts, false, in_progress, errp); } else { error_set(errp, QERR_SOCKET_CREATE_FAILED); } qemu_opts_del(opts); return sock; } | 25,186 |
1 | roundAndPackFloatx80( int8 roundingPrecision, flag zSign, int32 zExp, uint64_t zSig0, uint64_t zSig1 STATUS_PARAM) { int8 roundingMode; flag roundNearestEven, increment, isTiny; int64 roundIncrement, roundMask, roundBits; roundingMode = STATUS(float_rounding_mode); roundNearestEven = ( roundingMode == float_round_nearest_even ); if ( roundingPrecision == 80 ) goto precision80; if ( roundingPrecision == 64 ) { roundIncrement = LIT64( 0x0000000000000400 ); roundMask = LIT64( 0x00000000000007FF ); } else if ( roundingPrecision == 32 ) { roundIncrement = LIT64( 0x0000008000000000 ); roundMask = LIT64( 0x000000FFFFFFFFFF ); } else { goto precision80; } zSig0 |= ( zSig1 != 0 ); if ( ! roundNearestEven ) { if ( roundingMode == float_round_to_zero ) { roundIncrement = 0; } else { roundIncrement = roundMask; if ( zSign ) { if ( roundingMode == float_round_up ) roundIncrement = 0; } else { if ( roundingMode == float_round_down ) roundIncrement = 0; } } } roundBits = zSig0 & roundMask; if ( 0x7FFD <= (uint32_t) ( zExp - 1 ) ) { if ( ( 0x7FFE < zExp ) || ( ( zExp == 0x7FFE ) && ( zSig0 + roundIncrement < zSig0 ) ) ) { goto overflow; } if ( zExp <= 0 ) { if ( STATUS(flush_to_zero) ) return packFloatx80( zSign, 0, 0 ); isTiny = ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) || ( zExp < 0 ) || ( zSig0 <= zSig0 + roundIncrement ); shift64RightJamming( zSig0, 1 - zExp, &zSig0 ); zExp = 0; roundBits = zSig0 & roundMask; if ( isTiny && roundBits ) float_raise( float_flag_underflow STATUS_VAR); if ( roundBits ) STATUS(float_exception_flags) |= float_flag_inexact; zSig0 += roundIncrement; if ( (int64_t) zSig0 < 0 ) zExp = 1; roundIncrement = roundMask + 1; if ( roundNearestEven && ( roundBits<<1 == roundIncrement ) ) { roundMask |= roundIncrement; } zSig0 &= ~ roundMask; return packFloatx80( zSign, zExp, zSig0 ); } } if ( roundBits ) STATUS(float_exception_flags) |= float_flag_inexact; zSig0 += roundIncrement; if ( zSig0 < roundIncrement ) { ++zExp; zSig0 = LIT64( 0x8000000000000000 ); } roundIncrement = roundMask + 1; if ( roundNearestEven && ( roundBits<<1 == roundIncrement ) ) { roundMask |= roundIncrement; } zSig0 &= ~ roundMask; if ( zSig0 == 0 ) zExp = 0; return packFloatx80( zSign, zExp, zSig0 ); precision80: increment = ( (int64_t) zSig1 < 0 ); if ( ! roundNearestEven ) { if ( roundingMode == float_round_to_zero ) { increment = 0; } else { if ( zSign ) { increment = ( roundingMode == float_round_down ) && zSig1; } else { increment = ( roundingMode == float_round_up ) && zSig1; } } } if ( 0x7FFD <= (uint32_t) ( zExp - 1 ) ) { if ( ( 0x7FFE < zExp ) || ( ( zExp == 0x7FFE ) && ( zSig0 == LIT64( 0xFFFFFFFFFFFFFFFF ) ) && increment ) ) { roundMask = 0; overflow: float_raise( float_flag_overflow | float_flag_inexact STATUS_VAR); if ( ( roundingMode == float_round_to_zero ) || ( zSign && ( roundingMode == float_round_up ) ) || ( ! zSign && ( roundingMode == float_round_down ) ) ) { return packFloatx80( zSign, 0x7FFE, ~ roundMask ); } return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); } if ( zExp <= 0 ) { isTiny = ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) || ( zExp < 0 ) || ! increment || ( zSig0 < LIT64( 0xFFFFFFFFFFFFFFFF ) ); shift64ExtraRightJamming( zSig0, zSig1, 1 - zExp, &zSig0, &zSig1 ); zExp = 0; if ( isTiny && zSig1 ) float_raise( float_flag_underflow STATUS_VAR); if ( zSig1 ) STATUS(float_exception_flags) |= float_flag_inexact; if ( roundNearestEven ) { increment = ( (int64_t) zSig1 < 0 ); } else { if ( zSign ) { increment = ( roundingMode == float_round_down ) && zSig1; } else { increment = ( roundingMode == float_round_up ) && zSig1; } } if ( increment ) { ++zSig0; zSig0 &= ~ ( ( (uint64_t) ( zSig1<<1 ) == 0 ) & roundNearestEven ); if ( (int64_t) zSig0 < 0 ) zExp = 1; } return packFloatx80( zSign, zExp, zSig0 ); } } if ( zSig1 ) STATUS(float_exception_flags) |= float_flag_inexact; if ( increment ) { ++zSig0; if ( zSig0 == 0 ) { ++zExp; zSig0 = LIT64( 0x8000000000000000 ); } else { zSig0 &= ~ ( ( (uint64_t) ( zSig1<<1 ) == 0 ) & roundNearestEven ); } } else { if ( zSig0 == 0 ) zExp = 0; } return packFloatx80( zSign, zExp, zSig0 ); } | 25,187 |
1 | static void win_stdio_close(CharDriverState *chr) { WinStdioCharState *stdio = chr->opaque; if (stdio->hInputReadyEvent != INVALID_HANDLE_VALUE) { CloseHandle(stdio->hInputReadyEvent); } if (stdio->hInputDoneEvent != INVALID_HANDLE_VALUE) { CloseHandle(stdio->hInputDoneEvent); } if (stdio->hInputThread != INVALID_HANDLE_VALUE) { TerminateThread(stdio->hInputThread, 0); } g_free(chr->opaque); g_free(chr); } | 25,188 |
1 | static int i2c_slave_post_load(void *opaque, int version_id) { I2CSlave *dev = opaque; I2CBus *bus; I2CNode *node; bus = I2C_BUS(qdev_get_parent_bus(DEVICE(dev))); if ((bus->saved_address == dev->address) || (bus->broadcast)) { node = g_malloc(sizeof(struct I2CNode)); node->elt = dev; QLIST_INSERT_HEAD(&bus->current_devs, node, next); } return 0; } | 25,190 |
1 | int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout) { #ifdef CONFIG_PPOLL if (timeout < 0) { return ppoll((struct pollfd *)fds, nfds, NULL, NULL); } else { struct timespec ts; ts.tv_sec = timeout / 1000000000LL; ts.tv_nsec = timeout % 1000000000LL; return ppoll((struct pollfd *)fds, nfds, &ts, NULL); } #else return g_poll(fds, nfds, qemu_timeout_ns_to_ms(timeout)); #endif } | 25,191 |
1 | const char *error_get_pretty(Error *err) { return err->msg; } | 25,192 |
1 | static int qemu_rdma_put_buffer(void *opaque, const uint8_t *buf, int64_t pos, int size) { QEMUFileRDMA *r = opaque; QEMUFile *f = r->file; RDMAContext *rdma = r->rdma; size_t remaining = size; uint8_t * data = (void *) buf; int ret; CHECK_ERROR_STATE(); /* * Push out any writes that * we're queued up for VM's ram. */ ret = qemu_rdma_write_flush(f, rdma); if (ret < 0) { rdma->error_state = ret; return ret; } while (remaining) { RDMAControlHeader head; r->len = MIN(remaining, RDMA_SEND_INCREMENT); remaining -= r->len; head.len = r->len; head.type = RDMA_CONTROL_QEMU_FILE; ret = qemu_rdma_exchange_send(rdma, &head, data, NULL, NULL, NULL); if (ret < 0) { rdma->error_state = ret; return ret; } data += r->len; } return size; } | 25,193 |
0 | static int peer_has_ufo(VirtIONet *n) { if (!peer_has_vnet_hdr(n)) return 0; n->has_ufo = qemu_peer_has_ufo(qemu_get_queue(n->nic)); return n->has_ufo; } | 25,195 |
0 | static uint64_t apb_pci_config_read(void *opaque, target_phys_addr_t addr, unsigned size) { uint32_t ret; APBState *s = opaque; ret = pci_data_read(s->bus, addr, size); ret = qemu_bswap_len(ret, size); APB_DPRINTF("%s: addr " TARGET_FMT_lx " -> %x\n", __func__, addr, ret); return ret; } | 25,196 |
0 | void s390_pci_sclp_deconfigure(SCCB *sccb) { PciCfgSccb *psccb = (PciCfgSccb *)sccb; S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(), be32_to_cpu(psccb->aid)); uint16_t rc; if (be16_to_cpu(sccb->h.length) < 16) { rc = SCLP_RC_INSUFFICIENT_SCCB_LENGTH; goto out; } if (!pbdev) { DPRINTF("sclp deconfig no dev found\n"); rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; goto out; } switch (pbdev->state) { case ZPCI_FS_RESERVED: rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE; break; case ZPCI_FS_STANDBY: rc = SCLP_RC_NO_ACTION_REQUIRED; break; default: if (pbdev->summary_ind) { pci_dereg_irqs(pbdev); } if (pbdev->iommu->enabled) { pci_dereg_ioat(pbdev->iommu); } pbdev->state = ZPCI_FS_STANDBY; rc = SCLP_RC_NORMAL_COMPLETION; if (pbdev->release_timer) { qdev_unplug(DEVICE(pbdev->pdev), NULL); } } out: psccb->header.response_code = cpu_to_be16(rc); } | 25,198 |
0 | void cpu_register_physical_memory_offset(target_phys_addr_t start_addr, ram_addr_t size, ram_addr_t phys_offset, ram_addr_t region_offset) { target_phys_addr_t addr, end_addr; PhysPageDesc *p; CPUState *env; ram_addr_t orig_size = size; void *subpage; #ifdef CONFIG_KQEMU /* XXX: should not depend on cpu context */ env = first_cpu; if (env->kqemu_enabled) { kqemu_set_phys_mem(start_addr, size, phys_offset); } #endif if (kvm_enabled()) kvm_set_phys_mem(start_addr, size, phys_offset); if (phys_offset == IO_MEM_UNASSIGNED) { region_offset = start_addr; } region_offset &= TARGET_PAGE_MASK; size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; end_addr = start_addr + (target_phys_addr_t)size; for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { p = phys_page_find(addr >> TARGET_PAGE_BITS); if (p && p->phys_offset != IO_MEM_UNASSIGNED) { ram_addr_t orig_memory = p->phys_offset; target_phys_addr_t start_addr2, end_addr2; int need_subpage = 0; CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, need_subpage); if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) { if (!(orig_memory & IO_MEM_SUBPAGE)) { subpage = subpage_init((addr & TARGET_PAGE_MASK), &p->phys_offset, orig_memory, p->region_offset); } else { subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK) >> IO_MEM_SHIFT]; } subpage_register(subpage, start_addr2, end_addr2, phys_offset, region_offset); p->region_offset = 0; } else { p->phys_offset = phys_offset; if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || (phys_offset & IO_MEM_ROMD)) phys_offset += TARGET_PAGE_SIZE; } } else { p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); p->phys_offset = phys_offset; p->region_offset = region_offset; if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || (phys_offset & IO_MEM_ROMD)) { phys_offset += TARGET_PAGE_SIZE; } else { target_phys_addr_t start_addr2, end_addr2; int need_subpage = 0; CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, need_subpage); if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) { subpage = subpage_init((addr & TARGET_PAGE_MASK), &p->phys_offset, IO_MEM_UNASSIGNED, addr & TARGET_PAGE_MASK); subpage_register(subpage, start_addr2, end_addr2, phys_offset, region_offset); p->region_offset = 0; } } } region_offset += TARGET_PAGE_SIZE; } /* since each CPU stores ram addresses in its TLB cache, we must reset the modified entries */ /* XXX: slow ! */ for(env = first_cpu; env != NULL; env = env->next_cpu) { tlb_flush(env, 1); } } | 25,199 |
0 | static void help(int exitcode) { const char *options_help = #define QEMU_OPTIONS_GENERATE_HELP #include "qemu-options-wrapper.h" ; version(); printf("usage: %s [options] [disk_image]\n" "\n" "'disk_image' is a raw hard disk image for IDE hard disk 0\n" "\n" "%s\n" "During emulation, the following keys are useful:\n" "ctrl-alt-f toggle full screen\n" "ctrl-alt-n switch to virtual console 'n'\n" "ctrl-alt toggle mouse and keyboard grab\n" "\n" "When using -nographic, press 'ctrl-a h' to get some help.\n", error_get_progname(), options_help); exit(exitcode); } | 25,200 |
0 | static TileExcp gen_rr_opcode(DisasContext *dc, unsigned opext, unsigned dest, unsigned srca) { TCGv tdest, tsrca; const char *mnemonic; TCGMemOp memop; TileExcp ret = TILEGX_EXCP_NONE; /* Eliminate instructions with no output before doing anything else. */ switch (opext) { case OE_RR_Y0(NOP): case OE_RR_Y1(NOP): case OE_RR_X0(NOP): case OE_RR_X1(NOP): mnemonic = "nop"; goto done0; case OE_RR_Y0(FNOP): case OE_RR_Y1(FNOP): case OE_RR_X0(FNOP): case OE_RR_X1(FNOP): mnemonic = "fnop"; goto done0; case OE_RR_X1(DRAIN): mnemonic = "drain"; goto done0; case OE_RR_X1(FLUSHWB): mnemonic = "flushwb"; goto done0; case OE_RR_X1(ILL): if (dest == 0x1c && srca == 0x25) { mnemonic = "bpt"; goto done2; } /* Fall through */ case OE_RR_Y1(ILL): mnemonic = "ill"; done2: qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s", mnemonic); return TILEGX_EXCP_OPCODE_UNKNOWN; case OE_RR_X1(MF): mnemonic = "mf"; goto done0; case OE_RR_X1(NAP): /* ??? This should yield, especially in system mode. */ mnemonic = "nap"; goto done0; case OE_RR_X1(SWINT0): case OE_RR_X1(SWINT2): case OE_RR_X1(SWINT3): return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; case OE_RR_X1(SWINT1): ret = TILEGX_EXCP_SYSCALL; mnemonic = "swint1"; done0: if (srca || dest) { return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; } qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s", mnemonic); return ret; case OE_RR_X1(DTLBPR): return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; case OE_RR_X1(FINV): mnemonic = "finv"; goto done1; case OE_RR_X1(FLUSH): mnemonic = "flush"; goto done1; case OE_RR_X1(ICOH): mnemonic = "icoh"; goto done1; case OE_RR_X1(INV): mnemonic = "inv"; goto done1; case OE_RR_X1(WH64): mnemonic = "wh64"; goto done1; case OE_RR_X1(JRP): case OE_RR_Y1(JRP): mnemonic = "jrp"; goto do_jr; case OE_RR_X1(JR): case OE_RR_Y1(JR): mnemonic = "jr"; goto do_jr; case OE_RR_X1(JALRP): case OE_RR_Y1(JALRP): mnemonic = "jalrp"; goto do_jalr; case OE_RR_X1(JALR): case OE_RR_Y1(JALR): mnemonic = "jalr"; do_jalr: tcg_gen_movi_tl(dest_gr(dc, TILEGX_R_LR), dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES); do_jr: dc->jmp.cond = TCG_COND_ALWAYS; dc->jmp.dest = tcg_temp_new(); tcg_gen_andi_tl(dc->jmp.dest, load_gr(dc, srca), ~7); done1: if (dest) { return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; } qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s", mnemonic, reg_names[srca]); return ret; } tdest = dest_gr(dc, dest); tsrca = load_gr(dc, srca); switch (opext) { case OE_RR_X0(CNTLZ): case OE_RR_Y0(CNTLZ): gen_helper_cntlz(tdest, tsrca); mnemonic = "cntlz"; break; case OE_RR_X0(CNTTZ): case OE_RR_Y0(CNTTZ): gen_helper_cnttz(tdest, tsrca); mnemonic = "cnttz"; break; case OE_RR_X0(FSINGLE_PACK1): case OE_RR_Y0(FSINGLE_PACK1): case OE_RR_X1(IRET): return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; case OE_RR_X1(LD1S): memop = MO_SB; mnemonic = "ld1s"; goto do_load; case OE_RR_X1(LD1U): memop = MO_UB; mnemonic = "ld1u"; goto do_load; case OE_RR_X1(LD2S): memop = MO_TESW; mnemonic = "ld2s"; goto do_load; case OE_RR_X1(LD2U): memop = MO_TEUW; mnemonic = "ld2u"; goto do_load; case OE_RR_X1(LD4S): memop = MO_TESL; mnemonic = "ld4s"; goto do_load; case OE_RR_X1(LD4U): memop = MO_TEUL; mnemonic = "ld4u"; goto do_load; case OE_RR_X1(LDNT1S): memop = MO_SB; mnemonic = "ldnt1s"; goto do_load; case OE_RR_X1(LDNT1U): memop = MO_UB; mnemonic = "ldnt1u"; goto do_load; case OE_RR_X1(LDNT2S): memop = MO_TESW; mnemonic = "ldnt2s"; goto do_load; case OE_RR_X1(LDNT2U): memop = MO_TEUW; mnemonic = "ldnt2u"; goto do_load; case OE_RR_X1(LDNT4S): memop = MO_TESL; mnemonic = "ldnt4s"; goto do_load; case OE_RR_X1(LDNT4U): memop = MO_TEUL; mnemonic = "ldnt4u"; goto do_load; case OE_RR_X1(LDNT): memop = MO_TEQ; mnemonic = "ldnt"; goto do_load; case OE_RR_X1(LD): memop = MO_TEQ; mnemonic = "ld"; do_load: tcg_gen_qemu_ld_tl(tdest, tsrca, dc->mmuidx, memop); break; case OE_RR_X1(LDNA): tcg_gen_andi_tl(tdest, tsrca, ~7); tcg_gen_qemu_ld_tl(tdest, tdest, dc->mmuidx, MO_TEQ); mnemonic = "ldna"; break; case OE_RR_X1(LNK): case OE_RR_Y1(LNK): if (srca) { return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; } tcg_gen_movi_tl(tdest, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES); mnemonic = "lnk"; break; case OE_RR_X0(PCNT): case OE_RR_Y0(PCNT): gen_helper_pcnt(tdest, tsrca); mnemonic = "pcnt"; break; case OE_RR_X0(REVBITS): case OE_RR_Y0(REVBITS): gen_helper_revbits(tdest, tsrca); mnemonic = "revbits"; break; case OE_RR_X0(REVBYTES): case OE_RR_Y0(REVBYTES): tcg_gen_bswap64_tl(tdest, tsrca); mnemonic = "revbytes"; break; case OE_RR_X0(TBLIDXB0): case OE_RR_Y0(TBLIDXB0): tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tsrca, 2, 8); mnemonic = "tblidxb0"; break; case OE_RR_X0(TBLIDXB1): case OE_RR_Y0(TBLIDXB1): tcg_gen_shri_tl(tdest, tsrca, 8); tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tdest, 2, 8); mnemonic = "tblidxb1"; break; case OE_RR_X0(TBLIDXB2): case OE_RR_Y0(TBLIDXB2): tcg_gen_shri_tl(tdest, tsrca, 16); tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tdest, 2, 8); mnemonic = "tblidxb2"; break; case OE_RR_X0(TBLIDXB3): case OE_RR_Y0(TBLIDXB3): tcg_gen_shri_tl(tdest, tsrca, 24); tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tdest, 2, 8); mnemonic = "tblidxb3"; break; default: return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; } qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", mnemonic, reg_names[dest], reg_names[srca]); return ret; } | 25,201 |
0 | static void etsec_cleanup(NetClientState *nc) { /* qemu_log("eTSEC cleanup\n"); */ } | 25,202 |
0 | static void qmp_input_push(QmpInputVisitor *qiv, const QObject *obj, Error **errp) { qiv->stack[qiv->nb_stack].obj = obj; if (qobject_type(obj) == QTYPE_QLIST) { qiv->stack[qiv->nb_stack].entry = qlist_first(qobject_to_qlist(obj)); } qiv->nb_stack++; if (qiv->nb_stack >= QIV_STACK_SIZE) { error_set(errp, QERR_BUFFER_OVERRUN); return; } } | 25,203 |
0 | static const IntelHDAReg *intel_hda_reg_find(IntelHDAState *d, target_phys_addr_t addr) { const IntelHDAReg *reg; if (addr >= sizeof(regtab)/sizeof(regtab[0])) { goto noreg; } reg = regtab+addr; if (reg->name == NULL) { goto noreg; } return reg; noreg: dprint(d, 1, "unknown register, addr 0x%x\n", (int) addr); return NULL; } | 25,204 |
0 | build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { AcpiSystemResourceAffinityTable *srat; AcpiSratProcessorGiccAffinity *core; AcpiSratMemoryAffinity *numamem; int i, srat_start; uint64_t mem_base; MachineClass *mc = MACHINE_GET_CLASS(vms); const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(MACHINE(vms)); srat_start = table_data->len; srat = acpi_data_push(table_data, sizeof(*srat)); srat->reserved1 = cpu_to_le32(1); for (i = 0; i < cpu_list->len; ++i) { int node_id = cpu_list->cpus[i].props.has_node_id ? cpu_list->cpus[i].props.node_id : 0; core = acpi_data_push(table_data, sizeof(*core)); core->type = ACPI_SRAT_PROCESSOR_GICC; core->length = sizeof(*core); core->proximity = cpu_to_le32(node_id); core->acpi_processor_uid = cpu_to_le32(i); core->flags = cpu_to_le32(1); } mem_base = vms->memmap[VIRT_MEM].base; for (i = 0; i < nb_numa_nodes; ++i) { numamem = acpi_data_push(table_data, sizeof(*numamem)); build_srat_memory(numamem, mem_base, numa_info[i].node_mem, i, MEM_AFFINITY_ENABLED); mem_base += numa_info[i].node_mem; } build_header(linker, table_data, (void *)srat, "SRAT", table_data->len - srat_start, 3, NULL, NULL); } | 25,205 |
0 | void acpi_pcihp_init(AcpiPciHpState *s, PCIBus *root_bus, MemoryRegion *address_space_io, bool bridges_enabled) { uint16_t io_size = ACPI_PCIHP_SIZE; s->root= root_bus; s->legacy_piix = !bridges_enabled; if (s->legacy_piix) { unsigned *bus_bsel = g_malloc(sizeof *bus_bsel); io_size = ACPI_PCIHP_LEGACY_SIZE; *bus_bsel = ACPI_PCIHP_BSEL_DEFAULT; object_property_add_uint32_ptr(OBJECT(root_bus), ACPI_PCIHP_PROP_BSEL, bus_bsel, NULL); } memory_region_init_io(&s->io, NULL, &acpi_pcihp_io_ops, s, "acpi-pci-hotplug", io_size); memory_region_add_subregion(address_space_io, ACPI_PCIHP_ADDR, &s->io); } | 25,207 |
0 | static unsigned tget_short(GetByteContext *gb, int le) { unsigned v = le ? bytestream2_get_le16u(gb) : bytestream2_get_be16u(gb); return v; } | 25,208 |
0 | static void omap_lpg_tick(void *opaque) { struct omap_lpg_s *s = opaque; if (s->cycle) timer_mod(s->tm, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + s->period - s->on); else timer_mod(s->tm, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + s->on); s->cycle = !s->cycle; printf("%s: LED is %s\n", __FUNCTION__, s->cycle ? "on" : "off"); } | 25,209 |
0 | static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, int completion) { int dir; size_t len = 0; #ifdef DEBUG_ISOCH const char *str = NULL; #endif int pid; int ret; int i; USBDevice *dev; struct ohci_iso_td iso_td; uint32_t addr; uint16_t starting_frame; int16_t relative_frame_number; int frame_count; uint32_t start_offset, next_offset, end_offset = 0; uint32_t start_addr, end_addr; addr = ed->head & OHCI_DPTR_MASK; if (!ohci_read_iso_td(ohci, addr, &iso_td)) { printf("usb-ohci: ISO_TD read error at %x\n", addr); return 0; } starting_frame = OHCI_BM(iso_td.flags, TD_SF); frame_count = OHCI_BM(iso_td.flags, TD_FC); relative_frame_number = USUB(ohci->frame_number, starting_frame); #ifdef DEBUG_ISOCH printf("--- ISO_TD ED head 0x%.8x tailp 0x%.8x\n" "0x%.8x 0x%.8x 0x%.8x 0x%.8x\n" "0x%.8x 0x%.8x 0x%.8x 0x%.8x\n" "0x%.8x 0x%.8x 0x%.8x 0x%.8x\n" "frame_number 0x%.8x starting_frame 0x%.8x\n" "frame_count 0x%.8x relative %d\n" "di 0x%.8x cc 0x%.8x\n", ed->head & OHCI_DPTR_MASK, ed->tail & OHCI_DPTR_MASK, iso_td.flags, iso_td.bp, iso_td.next, iso_td.be, iso_td.offset[0], iso_td.offset[1], iso_td.offset[2], iso_td.offset[3], iso_td.offset[4], iso_td.offset[5], iso_td.offset[6], iso_td.offset[7], ohci->frame_number, starting_frame, frame_count, relative_frame_number, OHCI_BM(iso_td.flags, TD_DI), OHCI_BM(iso_td.flags, TD_CC)); #endif if (relative_frame_number < 0) { DPRINTF("usb-ohci: ISO_TD R=%d < 0\n", relative_frame_number); return 1; } else if (relative_frame_number > frame_count) { /* ISO TD expired - retire the TD to the Done Queue and continue with the next ISO TD of the same ED */ DPRINTF("usb-ohci: ISO_TD R=%d > FC=%d\n", relative_frame_number, frame_count); OHCI_SET_BM(iso_td.flags, TD_CC, OHCI_CC_DATAOVERRUN); ed->head &= ~OHCI_DPTR_MASK; ed->head |= (iso_td.next & OHCI_DPTR_MASK); iso_td.next = ohci->done; ohci->done = addr; i = OHCI_BM(iso_td.flags, TD_DI); if (i < ohci->done_count) ohci->done_count = i; ohci_put_iso_td(ohci, addr, &iso_td); return 0; } dir = OHCI_BM(ed->flags, ED_D); switch (dir) { case OHCI_TD_DIR_IN: #ifdef DEBUG_ISOCH str = "in"; #endif pid = USB_TOKEN_IN; break; case OHCI_TD_DIR_OUT: #ifdef DEBUG_ISOCH str = "out"; #endif pid = USB_TOKEN_OUT; break; case OHCI_TD_DIR_SETUP: #ifdef DEBUG_ISOCH str = "setup"; #endif pid = USB_TOKEN_SETUP; break; default: printf("usb-ohci: Bad direction %d\n", dir); return 1; } if (!iso_td.bp || !iso_td.be) { printf("usb-ohci: ISO_TD bp 0x%.8x be 0x%.8x\n", iso_td.bp, iso_td.be); return 1; } start_offset = iso_td.offset[relative_frame_number]; next_offset = iso_td.offset[relative_frame_number + 1]; if (!(OHCI_BM(start_offset, TD_PSW_CC) & 0xe) || ((relative_frame_number < frame_count) && !(OHCI_BM(next_offset, TD_PSW_CC) & 0xe))) { printf("usb-ohci: ISO_TD cc != not accessed 0x%.8x 0x%.8x\n", start_offset, next_offset); return 1; } if ((relative_frame_number < frame_count) && (start_offset > next_offset)) { printf("usb-ohci: ISO_TD start_offset=0x%.8x > next_offset=0x%.8x\n", start_offset, next_offset); return 1; } if ((start_offset & 0x1000) == 0) { start_addr = (iso_td.bp & OHCI_PAGE_MASK) | (start_offset & OHCI_OFFSET_MASK); } else { start_addr = (iso_td.be & OHCI_PAGE_MASK) | (start_offset & OHCI_OFFSET_MASK); } if (relative_frame_number < frame_count) { end_offset = next_offset - 1; if ((end_offset & 0x1000) == 0) { end_addr = (iso_td.bp & OHCI_PAGE_MASK) | (end_offset & OHCI_OFFSET_MASK); } else { end_addr = (iso_td.be & OHCI_PAGE_MASK) | (end_offset & OHCI_OFFSET_MASK); } } else { /* Last packet in the ISO TD */ end_addr = iso_td.be; } if ((start_addr & OHCI_PAGE_MASK) != (end_addr & OHCI_PAGE_MASK)) { len = (end_addr & OHCI_OFFSET_MASK) + 0x1001 - (start_addr & OHCI_OFFSET_MASK); } else { len = end_addr - start_addr + 1; } if (len && dir != OHCI_TD_DIR_IN) { ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, len, 0); } if (completion) { ret = ohci->usb_packet.result; } else { usb_packet_setup(&ohci->usb_packet, pid, OHCI_BM(ed->flags, ED_FA), OHCI_BM(ed->flags, ED_EN)); usb_packet_addbuf(&ohci->usb_packet, ohci->usb_buf, len); dev = ohci_find_device(ohci, ohci->usb_packet.devaddr); ret = usb_handle_packet(dev, &ohci->usb_packet); if (ret == USB_RET_ASYNC) { return 1; } } #ifdef DEBUG_ISOCH printf("so 0x%.8x eo 0x%.8x\nsa 0x%.8x ea 0x%.8x\ndir %s len %zu ret %d\n", start_offset, end_offset, start_addr, end_addr, str, len, ret); #endif /* Writeback */ if (dir == OHCI_TD_DIR_IN && ret >= 0 && ret <= len) { /* IN transfer succeeded */ ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, ret, 1); OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC, OHCI_CC_NOERROR); OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE, ret); } else if (dir == OHCI_TD_DIR_OUT && ret == len) { /* OUT transfer succeeded */ OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC, OHCI_CC_NOERROR); OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE, 0); } else { if (ret > (ssize_t) len) { printf("usb-ohci: DataOverrun %d > %zu\n", ret, len); OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC, OHCI_CC_DATAOVERRUN); OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE, len); } else if (ret >= 0) { printf("usb-ohci: DataUnderrun %d\n", ret); OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC, OHCI_CC_DATAUNDERRUN); } else { switch (ret) { case USB_RET_NODEV: OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC, OHCI_CC_DEVICENOTRESPONDING); OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE, 0); break; case USB_RET_NAK: case USB_RET_STALL: printf("usb-ohci: got NAK/STALL %d\n", ret); OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC, OHCI_CC_STALL); OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE, 0); break; default: printf("usb-ohci: Bad device response %d\n", ret); OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC, OHCI_CC_UNDEXPETEDPID); break; } } } if (relative_frame_number == frame_count) { /* Last data packet of ISO TD - retire the TD to the Done Queue */ OHCI_SET_BM(iso_td.flags, TD_CC, OHCI_CC_NOERROR); ed->head &= ~OHCI_DPTR_MASK; ed->head |= (iso_td.next & OHCI_DPTR_MASK); iso_td.next = ohci->done; ohci->done = addr; i = OHCI_BM(iso_td.flags, TD_DI); if (i < ohci->done_count) ohci->done_count = i; } ohci_put_iso_td(ohci, addr, &iso_td); return 1; } | 25,210 |
0 | static QObject *parse_literal(JSONParserContext *ctxt, QList **tokens) { QObject *token, *obj; QList *working = qlist_copy(*tokens); token = qlist_pop(working); if (token == NULL) { goto out; } switch (token_get_type(token)) { case JSON_STRING: obj = QOBJECT(qstring_from_escaped_str(ctxt, token)); break; case JSON_INTEGER: obj = QOBJECT(qint_from_int(strtoll(token_get_value(token), NULL, 10))); break; case JSON_FLOAT: /* FIXME dependent on locale */ obj = QOBJECT(qfloat_from_double(strtod(token_get_value(token), NULL))); break; default: goto out; } qobject_decref(token); QDECREF(*tokens); *tokens = working; return obj; out: qobject_decref(token); QDECREF(working); return NULL; } | 25,211 |
0 | static int spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset, int phb_index, int drc_index, sPAPRPHBState *sphb) { ResourceProps rp; bool is_bridge = false; int pci_status, err; char *buf = NULL; if (pci_default_read_config(dev, PCI_HEADER_TYPE, 1) == PCI_HEADER_TYPE_BRIDGE) { is_bridge = true; } /* in accordance with PAPR+ v2.7 13.6.3, Table 181 */ _FDT(fdt_setprop_cell(fdt, offset, "vendor-id", pci_default_read_config(dev, PCI_VENDOR_ID, 2))); _FDT(fdt_setprop_cell(fdt, offset, "device-id", pci_default_read_config(dev, PCI_DEVICE_ID, 2))); _FDT(fdt_setprop_cell(fdt, offset, "revision-id", pci_default_read_config(dev, PCI_REVISION_ID, 1))); _FDT(fdt_setprop_cell(fdt, offset, "class-code", pci_default_read_config(dev, PCI_CLASS_PROG, 3))); if (pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)) { _FDT(fdt_setprop_cell(fdt, offset, "interrupts", pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1))); } if (!is_bridge) { _FDT(fdt_setprop_cell(fdt, offset, "min-grant", pci_default_read_config(dev, PCI_MIN_GNT, 1))); _FDT(fdt_setprop_cell(fdt, offset, "max-latency", pci_default_read_config(dev, PCI_MAX_LAT, 1))); } if (pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)) { _FDT(fdt_setprop_cell(fdt, offset, "subsystem-id", pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2))); } if (pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)) { _FDT(fdt_setprop_cell(fdt, offset, "subsystem-vendor-id", pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2))); } _FDT(fdt_setprop_cell(fdt, offset, "cache-line-size", pci_default_read_config(dev, PCI_CACHE_LINE_SIZE, 1))); /* the following fdt cells are masked off the pci status register */ pci_status = pci_default_read_config(dev, PCI_STATUS, 2); _FDT(fdt_setprop_cell(fdt, offset, "devsel-speed", PCI_STATUS_DEVSEL_MASK & pci_status)); if (pci_status & PCI_STATUS_FAST_BACK) { _FDT(fdt_setprop(fdt, offset, "fast-back-to-back", NULL, 0)); } if (pci_status & PCI_STATUS_66MHZ) { _FDT(fdt_setprop(fdt, offset, "66mhz-capable", NULL, 0)); } if (pci_status & PCI_STATUS_UDF) { _FDT(fdt_setprop(fdt, offset, "udf-supported", NULL, 0)); } /* NOTE: this is normally generated by firmware via path/unit name, * but in our case we must set it manually since it does not get * processed by OF beforehand */ _FDT(fdt_setprop_string(fdt, offset, "name", "pci")); buf = spapr_phb_get_loc_code(sphb, dev); if (!buf) { error_report("Failed setting the ibm,loc-code"); return -1; } err = fdt_setprop_string(fdt, offset, "ibm,loc-code", buf); g_free(buf); if (err < 0) { return err; } _FDT(fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)); _FDT(fdt_setprop_cell(fdt, offset, "#address-cells", RESOURCE_CELLS_ADDRESS)); _FDT(fdt_setprop_cell(fdt, offset, "#size-cells", RESOURCE_CELLS_SIZE)); _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi-x", RESOURCE_CELLS_SIZE)); populate_resource_props(dev, &rp); _FDT(fdt_setprop(fdt, offset, "reg", (uint8_t *)rp.reg, rp.reg_len)); _FDT(fdt_setprop(fdt, offset, "assigned-addresses", (uint8_t *)rp.assigned, rp.assigned_len)); return 0; } | 25,212 |
0 | static void usb_host_set_config(USBHostDevice *s, int config, USBPacket *p) { int rc; trace_usb_host_set_config(s->bus_num, s->addr, config); usb_host_release_interfaces(s); usb_host_detach_kernel(s); rc = libusb_set_configuration(s->dh, config); if (rc != 0) { usb_host_libusb_error("libusb_set_configuration", rc); p->status = USB_RET_STALL; if (rc == LIBUSB_ERROR_NO_DEVICE) { usb_host_nodev(s); } return; } p->status = usb_host_claim_interfaces(s, config); if (p->status != USB_RET_SUCCESS) { return; } usb_host_ep_update(s); } | 25,213 |
1 | static __attribute__((unused)) void map_exec(void *addr, long size) { unsigned long start, end, page_size; page_size = getpagesize(); start = (unsigned long)addr; start &= ~(page_size - 1); end = (unsigned long)addr + size; end += page_size - 1; end &= ~(page_size - 1); mprotect((void *)start, end - start, PROT_READ | PROT_WRITE | PROT_EXEC); } | 25,214 |
1 | static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs, int64_t offset, int64_t length, int addend) { BDRVQcowState *s = bs->opaque; int64_t start, last, cluster_offset; int64_t refcount_block_offset = 0; int64_t table_index = -1, old_table_index; int first_index = -1, last_index = -1; int ret; #ifdef DEBUG_ALLOC2 printf("update_refcount: offset=%" PRId64 " size=%" PRId64 " addend=%d\n", offset, length, addend); #endif if (length < 0) { return -EINVAL; } else if (length == 0) { return 0; } start = offset & ~(s->cluster_size - 1); last = (offset + length - 1) & ~(s->cluster_size - 1); for(cluster_offset = start; cluster_offset <= last; cluster_offset += s->cluster_size) { int block_index, refcount; int64_t cluster_index = cluster_offset >> s->cluster_bits; int64_t new_block; /* Only write refcount block to disk when we are done with it */ old_table_index = table_index; table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT); if ((old_table_index >= 0) && (table_index != old_table_index)) { ret = write_refcount_block_entries(bs, refcount_block_offset, first_index, last_index); if (ret < 0) { return ret; } first_index = -1; last_index = -1; } /* Load the refcount block and allocate it if needed */ new_block = alloc_refcount_block(bs, cluster_index); if (new_block < 0) { ret = new_block; goto fail; } refcount_block_offset = new_block; /* we can update the count and save it */ block_index = cluster_index & ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1); if (first_index == -1 || block_index < first_index) { first_index = block_index; } if (block_index > last_index) { last_index = block_index; } refcount = be16_to_cpu(s->refcount_block_cache[block_index]); refcount += addend; if (refcount < 0 || refcount > 0xffff) { ret = -EINVAL; goto fail; } if (refcount == 0 && cluster_index < s->free_cluster_index) { s->free_cluster_index = cluster_index; } s->refcount_block_cache[block_index] = cpu_to_be16(refcount); } ret = 0; fail: /* Write last changed block to disk */ if (refcount_block_offset != 0) { int wret; wret = write_refcount_block_entries(bs, refcount_block_offset, first_index, last_index); if (wret < 0) { return ret < 0 ? ret : wret; } } /* * Try do undo any updates if an error is returned (This may succeed in * some cases like ENOSPC for allocating a new refcount block) */ if (ret < 0) { int dummy; dummy = update_refcount(bs, offset, cluster_offset - offset, -addend); } bdrv_flush(bs->file); return ret; } | 25,215 |
1 | int fw_cfg_add_i16(FWCfgState *s, uint16_t key, uint16_t value) { uint16_t *copy; copy = g_malloc(sizeof(value)); *copy = cpu_to_le16(value); return fw_cfg_add_bytes(s, key, (uint8_t *)copy, sizeof(value)); } | 25,216 |
1 | int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) { CPUS390XState *env = &cpu->env; uint64_t offset, data; S390PCIBusDevice *pbdev; MemoryRegion *mr; uint8_t len; uint32_t fh; uint8_t pcias; cpu_synchronize_state(CPU(cpu)); if (env->psw.mask & PSW_MASK_PSTATE) { program_interrupt(env, PGM_PRIVILEGED, 4); return 0; } if (r2 & 0x1) { program_interrupt(env, PGM_SPECIFICATION, 4); return 0; } fh = env->regs[r2] >> 32; pcias = (env->regs[r2] >> 16) & 0xf; len = env->regs[r2] & 0xf; offset = env->regs[r2 + 1]; pbdev = s390_pci_find_dev_by_fh(fh); if (!pbdev) { DPRINTF("pcistg no pci dev\n"); setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; } switch (pbdev->state) { case ZPCI_FS_RESERVED: case ZPCI_FS_STANDBY: case ZPCI_FS_DISABLED: case ZPCI_FS_PERMANENT_ERROR: setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; case ZPCI_FS_ERROR: setcc(cpu, ZPCI_PCI_LS_ERR); s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); return 0; default: break; } data = env->regs[r1]; if (pcias < 6) { if ((8 - (offset & 0x7)) < len) { program_interrupt(env, PGM_OPERAND, 4); return 0; } if (trap_msix(pbdev, offset, pcias)) { offset = offset - pbdev->msix.table_offset; mr = &pbdev->pdev->msix_table_mmio; update_msix_table_msg_data(pbdev, offset, &data, len); } else { mr = pbdev->pdev->io_regions[pcias].memory; } memory_region_dispatch_write(mr, offset, data, len, MEMTXATTRS_UNSPECIFIED); } else if (pcias == 15) { if ((4 - (offset & 0x3)) < len) { program_interrupt(env, PGM_OPERAND, 4); return 0; } switch (len) { case 1: break; case 2: data = bswap16(data); break; case 4: data = bswap32(data); break; case 8: data = bswap64(data); break; default: program_interrupt(env, PGM_OPERAND, 4); return 0; } pci_host_config_write_common(pbdev->pdev, offset, pci_config_size(pbdev->pdev), data, len); } else { DPRINTF("pcistg invalid space\n"); setcc(cpu, ZPCI_PCI_LS_ERR); s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS); return 0; } setcc(cpu, ZPCI_PCI_LS_OK); return 0; } | 25,217 |
1 | static int dv_decode_video_segment(AVCodecContext *avctx, void *arg) { DVVideoContext *s = avctx->priv_data; DVwork_chunk *work_chunk = arg; int quant, dc, dct_mode, class1, j; int mb_index, mb_x, mb_y, last_index; int y_stride, linesize; DCTELEM *block, *block1; int c_offset; uint8_t *y_ptr; const uint8_t *buf_ptr; PutBitContext pb, vs_pb; GetBitContext gb; BlockInfo mb_data[5 * DV_MAX_BPM], *mb, *mb1; LOCAL_ALIGNED_16(DCTELEM, sblock, [5*DV_MAX_BPM], [64]); LOCAL_ALIGNED_16(uint8_t, mb_bit_buffer, [80 + 4]); /* allow some slack */ LOCAL_ALIGNED_16(uint8_t, vs_bit_buffer, [5 * 80 + 4]); /* allow some slack */ const int log2_blocksize = 3-s->avctx->lowres; int is_field_mode[5]; assert((((int)mb_bit_buffer) & 7) == 0); assert((((int)vs_bit_buffer) & 7) == 0); memset(sblock, 0, 5*DV_MAX_BPM*sizeof(*sblock)); /* pass 1: read DC and AC coefficients in blocks */ buf_ptr = &s->buf[work_chunk->buf_offset*80]; block1 = &sblock[0][0]; mb1 = mb_data; init_put_bits(&vs_pb, vs_bit_buffer, 5 * 80); for (mb_index = 0; mb_index < 5; mb_index++, mb1 += s->sys->bpm, block1 += s->sys->bpm * 64) { /* skip header */ quant = buf_ptr[3] & 0x0f; buf_ptr += 4; init_put_bits(&pb, mb_bit_buffer, 80); mb = mb1; block = block1; is_field_mode[mb_index] = 0; for (j = 0; j < s->sys->bpm; j++) { last_index = s->sys->block_sizes[j]; init_get_bits(&gb, buf_ptr, last_index); /* get the DC */ dc = get_sbits(&gb, 9); dct_mode = get_bits1(&gb); class1 = get_bits(&gb, 2); if (DV_PROFILE_IS_HD(s->sys)) { mb->idct_put = s->idct_put[0]; mb->scan_table = s->dv_zigzag[0]; mb->factor_table = &s->sys->idct_factor[(j >= 4)*4*16*64 + class1*16*64 + quant*64]; is_field_mode[mb_index] |= !j && dct_mode; } else { mb->idct_put = s->idct_put[dct_mode && log2_blocksize == 3]; mb->scan_table = s->dv_zigzag[dct_mode]; mb->factor_table = &s->sys->idct_factor[(class1 == 3)*2*22*64 + dct_mode*22*64 + (quant + dv_quant_offset[class1])*64]; } dc = dc << 2; /* convert to unsigned because 128 is not added in the standard IDCT */ dc += 1024; block[0] = dc; buf_ptr += last_index >> 3; mb->pos = 0; mb->partial_bit_count = 0; av_dlog(avctx, "MB block: %d, %d ", mb_index, j); dv_decode_ac(&gb, mb, block); /* write the remaining bits in a new buffer only if the block is finished */ if (mb->pos >= 64) bit_copy(&pb, &gb); block += 64; mb++; } /* pass 2: we can do it just after */ av_dlog(avctx, "***pass 2 size=%d MB#=%d\n", put_bits_count(&pb), mb_index); block = block1; mb = mb1; init_get_bits(&gb, mb_bit_buffer, put_bits_count(&pb)); flush_put_bits(&pb); for (j = 0; j < s->sys->bpm; j++, block += 64, mb++) { if (mb->pos < 64 && get_bits_left(&gb) > 0) { dv_decode_ac(&gb, mb, block); /* if still not finished, no need to parse other blocks */ if (mb->pos < 64) break; } } /* all blocks are finished, so the extra bytes can be used at the video segment level */ if (j >= s->sys->bpm) bit_copy(&vs_pb, &gb); } /* we need a pass over the whole video segment */ av_dlog(avctx, "***pass 3 size=%d\n", put_bits_count(&vs_pb)); block = &sblock[0][0]; mb = mb_data; init_get_bits(&gb, vs_bit_buffer, put_bits_count(&vs_pb)); flush_put_bits(&vs_pb); for (mb_index = 0; mb_index < 5; mb_index++) { for (j = 0; j < s->sys->bpm; j++) { if (mb->pos < 64) { av_dlog(avctx, "start %d:%d\n", mb_index, j); dv_decode_ac(&gb, mb, block); } if (mb->pos >= 64 && mb->pos < 127) av_log(avctx, AV_LOG_ERROR, "AC EOB marker is absent pos=%d\n", mb->pos); block += 64; mb++; } } /* compute idct and place blocks */ block = &sblock[0][0]; mb = mb_data; for (mb_index = 0; mb_index < 5; mb_index++) { dv_calculate_mb_xy(s, work_chunk, mb_index, &mb_x, &mb_y); /* idct_put'ting luminance */ if ((s->sys->pix_fmt == PIX_FMT_YUV420P) || (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) || (s->sys->height >= 720 && mb_y != 134)) { y_stride = (s->picture.linesize[0] << ((!is_field_mode[mb_index]) * log2_blocksize)); } else { y_stride = (2 << log2_blocksize); } y_ptr = s->picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x) << log2_blocksize); linesize = s->picture.linesize[0] << is_field_mode[mb_index]; mb[0] .idct_put(y_ptr , linesize, block + 0*64); if (s->sys->video_stype == 4) { /* SD 422 */ mb[2].idct_put(y_ptr + (1 << log2_blocksize) , linesize, block + 2*64); } else { mb[1].idct_put(y_ptr + (1 << log2_blocksize) , linesize, block + 1*64); mb[2].idct_put(y_ptr + y_stride, linesize, block + 2*64); mb[3].idct_put(y_ptr + (1 << log2_blocksize) + y_stride, linesize, block + 3*64); } mb += 4; block += 4*64; /* idct_put'ting chrominance */ c_offset = (((mb_y >> (s->sys->pix_fmt == PIX_FMT_YUV420P)) * s->picture.linesize[1] + (mb_x >> ((s->sys->pix_fmt == PIX_FMT_YUV411P) ? 2 : 1))) << log2_blocksize); for (j = 2; j; j--) { uint8_t *c_ptr = s->picture.data[j] + c_offset; if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) { uint64_t aligned_pixels[64/8]; uint8_t *pixels = (uint8_t*)aligned_pixels; uint8_t *c_ptr1, *ptr1; int x, y; mb->idct_put(pixels, 8, block); for (y = 0; y < (1 << log2_blocksize); y++, c_ptr += s->picture.linesize[j], pixels += 8) { ptr1 = pixels + (1 << (log2_blocksize - 1)); c_ptr1 = c_ptr + (s->picture.linesize[j] << log2_blocksize); for (x = 0; x < (1 << (log2_blocksize - 1)); x++) { c_ptr[x] = pixels[x]; c_ptr1[x] = ptr1[x]; } } block += 64; mb++; } else { y_stride = (mb_y == 134) ? (1 << log2_blocksize) : s->picture.linesize[j] << ((!is_field_mode[mb_index]) * log2_blocksize); linesize = s->picture.linesize[j] << is_field_mode[mb_index]; (mb++)-> idct_put(c_ptr , linesize, block); block += 64; if (s->sys->bpm == 8) { (mb++)->idct_put(c_ptr + y_stride, linesize, block); block += 64; } } } } return 0; } | 25,218 |
0 | void build_legacy_cpu_hotplug_aml(Aml *ctx, MachineState *machine, uint16_t io_base) { Aml *dev; Aml *crs; Aml *pkg; Aml *field; Aml *method; Aml *if_ctx; Aml *else_ctx; int i, apic_idx; Aml *sb_scope = aml_scope("_SB"); uint8_t madt_tmpl[8] = {0x00, 0x08, 0x00, 0x00, 0x00, 0, 0, 0}; Aml *cpu_id = aml_arg(1); Aml *apic_id = aml_arg(0); Aml *cpu_on = aml_local(0); Aml *madt = aml_local(1); Aml *cpus_map = aml_name(CPU_ON_BITMAP); Aml *zero = aml_int(0); Aml *one = aml_int(1); MachineClass *mc = MACHINE_GET_CLASS(machine); CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine); PCMachineState *pcms = PC_MACHINE(machine); /* * _MAT method - creates an madt apic buffer * apic_id = Arg0 = Local APIC ID * cpu_id = Arg1 = Processor ID * cpu_on = Local0 = CPON flag for this cpu * madt = Local1 = Buffer (in madt apic form) to return */ method = aml_method(CPU_MAT_METHOD, 2, AML_NOTSERIALIZED); aml_append(method, aml_store(aml_derefof(aml_index(cpus_map, apic_id)), cpu_on)); aml_append(method, aml_store(aml_buffer(sizeof(madt_tmpl), madt_tmpl), madt)); /* Update the processor id, lapic id, and enable/disable status */ aml_append(method, aml_store(cpu_id, aml_index(madt, aml_int(2)))); aml_append(method, aml_store(apic_id, aml_index(madt, aml_int(3)))); aml_append(method, aml_store(cpu_on, aml_index(madt, aml_int(4)))); aml_append(method, aml_return(madt)); aml_append(sb_scope, method); /* * _STA method - return ON status of cpu * apic_id = Arg0 = Local APIC ID * cpu_on = Local0 = CPON flag for this cpu */ method = aml_method(CPU_STATUS_METHOD, 1, AML_NOTSERIALIZED); aml_append(method, aml_store(aml_derefof(aml_index(cpus_map, apic_id)), cpu_on)); if_ctx = aml_if(cpu_on); { aml_append(if_ctx, aml_return(aml_int(0xF))); } aml_append(method, if_ctx); else_ctx = aml_else(); { aml_append(else_ctx, aml_return(zero)); } aml_append(method, else_ctx); aml_append(sb_scope, method); method = aml_method(CPU_EJECT_METHOD, 2, AML_NOTSERIALIZED); aml_append(method, aml_sleep(200)); aml_append(sb_scope, method); method = aml_method(CPU_SCAN_METHOD, 0, AML_NOTSERIALIZED); { Aml *while_ctx, *if_ctx2, *else_ctx2; Aml *bus_check_evt = aml_int(1); Aml *remove_evt = aml_int(3); Aml *status_map = aml_local(5); /* Local5 = active cpu bitmap */ Aml *byte = aml_local(2); /* Local2 = last read byte from bitmap */ Aml *idx = aml_local(0); /* Processor ID / APIC ID iterator */ Aml *is_cpu_on = aml_local(1); /* Local1 = CPON flag for cpu */ Aml *status = aml_local(3); /* Local3 = active state for cpu */ aml_append(method, aml_store(aml_name(CPU_STATUS_MAP), status_map)); aml_append(method, aml_store(zero, byte)); aml_append(method, aml_store(zero, idx)); /* While (idx < SizeOf(CPON)) */ while_ctx = aml_while(aml_lless(idx, aml_sizeof(cpus_map))); aml_append(while_ctx, aml_store(aml_derefof(aml_index(cpus_map, idx)), is_cpu_on)); if_ctx = aml_if(aml_and(idx, aml_int(0x07), NULL)); { /* Shift down previously read bitmap byte */ aml_append(if_ctx, aml_shiftright(byte, one, byte)); } aml_append(while_ctx, if_ctx); else_ctx = aml_else(); { /* Read next byte from cpu bitmap */ aml_append(else_ctx, aml_store(aml_derefof(aml_index(status_map, aml_shiftright(idx, aml_int(3), NULL))), byte)); } aml_append(while_ctx, else_ctx); aml_append(while_ctx, aml_store(aml_and(byte, one, NULL), status)); if_ctx = aml_if(aml_lnot(aml_equal(is_cpu_on, status))); { /* State change - update CPON with new state */ aml_append(if_ctx, aml_store(status, aml_index(cpus_map, idx))); if_ctx2 = aml_if(aml_equal(status, one)); { aml_append(if_ctx2, aml_call2(AML_NOTIFY_METHOD, idx, bus_check_evt)); } aml_append(if_ctx, if_ctx2); else_ctx2 = aml_else(); { aml_append(else_ctx2, aml_call2(AML_NOTIFY_METHOD, idx, remove_evt)); } } aml_append(if_ctx, else_ctx2); aml_append(while_ctx, if_ctx); aml_append(while_ctx, aml_increment(idx)); /* go to next cpu */ aml_append(method, while_ctx); } aml_append(sb_scope, method); /* The current AML generator can cover the APIC ID range [0..255], * inclusive, for VCPU hotplug. */ QEMU_BUILD_BUG_ON(ACPI_CPU_HOTPLUG_ID_LIMIT > 256); g_assert(pcms->apic_id_limit <= ACPI_CPU_HOTPLUG_ID_LIMIT); /* create PCI0.PRES device and its _CRS to reserve CPU hotplug MMIO */ dev = aml_device("PCI0." stringify(CPU_HOTPLUG_RESOURCE_DEVICE)); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A06"))); aml_append(dev, aml_name_decl("_UID", aml_string("CPU Hotplug resources")) ); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, io_base, io_base, 1, ACPI_GPE_PROC_LEN) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(sb_scope, dev); /* declare CPU hotplug MMIO region and PRS field to access it */ aml_append(sb_scope, aml_operation_region( "PRST", AML_SYSTEM_IO, aml_int(io_base), ACPI_GPE_PROC_LEN)); field = aml_field("PRST", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); aml_append(field, aml_named_field("PRS", 256)); aml_append(sb_scope, field); /* build Processor object for each processor */ for (i = 0; i < apic_ids->len; i++) { int apic_id = apic_ids->cpus[i].arch_id; assert(apic_id < ACPI_CPU_HOTPLUG_ID_LIMIT); dev = aml_processor(i, 0, 0, "CP%.02X", apic_id); method = aml_method("_MAT", 0, AML_NOTSERIALIZED); aml_append(method, aml_return(aml_call2(CPU_MAT_METHOD, aml_int(apic_id), aml_int(i)) )); aml_append(dev, method); method = aml_method("_STA", 0, AML_NOTSERIALIZED); aml_append(method, aml_return(aml_call1(CPU_STATUS_METHOD, aml_int(apic_id)))); aml_append(dev, method); method = aml_method("_EJ0", 1, AML_NOTSERIALIZED); aml_append(method, aml_return(aml_call2(CPU_EJECT_METHOD, aml_int(apic_id), aml_arg(0))) ); aml_append(dev, method); aml_append(sb_scope, dev); } /* build this code: * Method(NTFY, 2) {If (LEqual(Arg0, 0x00)) {Notify(CP00, Arg1)} ...} */ /* Arg0 = APIC ID */ method = aml_method(AML_NOTIFY_METHOD, 2, AML_NOTSERIALIZED); for (i = 0; i < apic_ids->len; i++) { int apic_id = apic_ids->cpus[i].arch_id; if_ctx = aml_if(aml_equal(aml_arg(0), aml_int(apic_id))); aml_append(if_ctx, aml_notify(aml_name("CP%.02X", apic_id), aml_arg(1)) ); aml_append(method, if_ctx); } aml_append(sb_scope, method); /* build "Name(CPON, Package() { One, One, ..., Zero, Zero, ... })" * * Note: The ability to create variable-sized packages was first * introduced in ACPI 2.0. ACPI 1.0 only allowed fixed-size packages * ith up to 255 elements. Windows guests up to win2k8 fail when * VarPackageOp is used. */ pkg = pcms->apic_id_limit <= 255 ? aml_package(pcms->apic_id_limit) : aml_varpackage(pcms->apic_id_limit); for (i = 0, apic_idx = 0; i < apic_ids->len; i++) { int apic_id = apic_ids->cpus[i].arch_id; for (; apic_idx < apic_id; apic_idx++) { aml_append(pkg, aml_int(0)); } aml_append(pkg, aml_int(apic_ids->cpus[i].cpu ? 1 : 0)); apic_idx = apic_id + 1; } aml_append(sb_scope, aml_name_decl(CPU_ON_BITMAP, pkg)); g_free(apic_ids); aml_append(ctx, sb_scope); method = aml_method("\\_GPE._E02", 0, AML_NOTSERIALIZED); aml_append(method, aml_call0("\\_SB." CPU_SCAN_METHOD)); aml_append(ctx, method); } | 25,220 |
0 | static void test_dispatch_cmd_io(void) { QDict *req = qdict_new(); QDict *args = qdict_new(); QDict *args3 = qdict_new(); QDict *ud1a = qdict_new(); QDict *ud1b = qdict_new(); QDict *ret, *ret_dict, *ret_dict_dict, *ret_dict_dict_userdef; QDict *ret_dict_dict2, *ret_dict_dict2_userdef; QInt *ret3; qdict_put_obj(ud1a, "integer", QOBJECT(qint_from_int(42))); qdict_put_obj(ud1a, "string", QOBJECT(qstring_from_str("hello"))); qdict_put_obj(ud1b, "integer", QOBJECT(qint_from_int(422))); qdict_put_obj(ud1b, "string", QOBJECT(qstring_from_str("hello2"))); qdict_put_obj(args, "ud1a", QOBJECT(ud1a)); qdict_put_obj(args, "ud1b", QOBJECT(ud1b)); qdict_put_obj(req, "arguments", QOBJECT(args)); qdict_put_obj(req, "execute", QOBJECT(qstring_from_str("user_def_cmd2"))); ret = qobject_to_qdict(test_qmp_dispatch(req)); assert(!strcmp(qdict_get_str(ret, "string"), "blah1")); ret_dict = qdict_get_qdict(ret, "dict"); assert(!strcmp(qdict_get_str(ret_dict, "string"), "blah2")); ret_dict_dict = qdict_get_qdict(ret_dict, "dict"); ret_dict_dict_userdef = qdict_get_qdict(ret_dict_dict, "userdef"); assert(qdict_get_int(ret_dict_dict_userdef, "integer") == 42); assert(!strcmp(qdict_get_str(ret_dict_dict_userdef, "string"), "hello")); assert(!strcmp(qdict_get_str(ret_dict_dict, "string"), "blah3")); ret_dict_dict2 = qdict_get_qdict(ret_dict, "dict2"); ret_dict_dict2_userdef = qdict_get_qdict(ret_dict_dict2, "userdef"); assert(qdict_get_int(ret_dict_dict2_userdef, "integer") == 422); assert(!strcmp(qdict_get_str(ret_dict_dict2_userdef, "string"), "hello2")); assert(!strcmp(qdict_get_str(ret_dict_dict2, "string"), "blah4")); QDECREF(ret); qdict_put(args3, "a", qint_from_int(66)); qdict_put(req, "arguments", args3); qdict_put(req, "execute", qstring_from_str("user_def_cmd3")); ret3 = qobject_to_qint(test_qmp_dispatch(req)); assert(qint_get_int(ret3) == 66); QDECREF(ret3); QDECREF(req); } | 25,221 |
0 | static void coroutine_fn bdrv_flush_co_entry(void *opaque) { RwCo *rwco = opaque; rwco->ret = bdrv_co_flush(rwco->bs); } | 25,222 |
0 | static inline void t_gen_add_flag(TCGv d, int flag) { TCGv c; c = tcg_temp_new(TCG_TYPE_TL); t_gen_mov_TN_preg(c, PR_CCS); /* Propagate carry into d. */ tcg_gen_andi_tl(c, c, 1 << flag); if (flag) tcg_gen_shri_tl(c, c, flag); tcg_gen_add_tl(d, d, c); tcg_temp_free(c); } | 25,224 |
0 | void console_select(unsigned int index) { TextConsole *s; if (index >= MAX_CONSOLES) return; s = consoles[index]; if (s) { active_console = s; if (s->g_width && s->g_height && (s->g_width != s->ds->width || s->g_height != s->ds->height)) dpy_resize(s->ds, s->g_width, s->g_height); vga_hw_invalidate(); } } | 25,225 |
0 | static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk, uint32_t *max_compressed_size, uint32_t *max_sectors_per_chunk) { uint32_t compressed_size = 0; uint32_t uncompressed_sectors = 0; switch (s->types[chunk]) { case 0x80000005: /* zlib compressed */ case 0x80000006: /* bzip2 compressed */ compressed_size = s->lengths[chunk]; uncompressed_sectors = s->sectorcounts[chunk]; break; case 1: /* copy */ uncompressed_sectors = (s->lengths[chunk] + 511) / 512; break; case 2: /* zero */ uncompressed_sectors = s->sectorcounts[chunk]; break; } if (compressed_size > *max_compressed_size) { *max_compressed_size = compressed_size; } if (uncompressed_sectors > *max_sectors_per_chunk) { *max_sectors_per_chunk = uncompressed_sectors; } } | 25,226 |
0 | static int decode_syncpoint(NUTContext *nut, int64_t *ts, int64_t *back_ptr){ AVFormatContext *s= nut->avf; ByteIOContext *bc = &s->pb; int64_t end, tmp; AVRational time_base; nut->last_syncpoint_pos= url_ftell(bc)-8; end= get_packetheader(nut, bc, 1); end += url_ftell(bc); tmp= get_v(bc); *back_ptr= nut->last_syncpoint_pos - 16*get_v(bc); if(*back_ptr < 0) return -1; ff_nut_reset_ts(nut, nut->time_base[tmp % nut->time_base_count], tmp); if(skip_reserved(bc, end) || get_checksum(bc)){ av_log(s, AV_LOG_ERROR, "sync point checksum mismatch\n"); return -1; } *ts= tmp / s->nb_streams * av_q2d(nut->time_base[tmp % s->nb_streams])*AV_TIME_BASE; add_sp(nut, nut->last_syncpoint_pos, *back_ptr, *ts); return 0; } | 25,227 |
0 | int av_image_fill_linesizes(int linesizes[4], enum PixelFormat pix_fmt, int width) { int i; const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt]; int max_step [4]; /* max pixel step for each plane */ int max_step_comp[4]; /* the component for each plane which has the max pixel step */ memset(linesizes, 0, 4*sizeof(linesizes[0])); if (desc->flags & PIX_FMT_HWACCEL) return AVERROR(EINVAL); if (desc->flags & PIX_FMT_BITSTREAM) { linesizes[0] = (width * (desc->comp[0].step_minus1+1) + 7) >> 3; return 0; } av_image_fill_max_pixsteps(max_step, max_step_comp, desc); for (i = 0; i < 4; i++) { int s = (max_step_comp[i] == 1 || max_step_comp[i] == 2) ? desc->log2_chroma_w : 0; linesizes[i] = max_step[i] * (((width + (1 << s) - 1)) >> s); } return 0; } | 25,228 |
0 | static void qmp_output_type_int64(Visitor *v, const char *name, int64_t *obj, Error **errp) { QmpOutputVisitor *qov = to_qov(v); qmp_output_add(qov, name, qint_from_int(*obj)); } | 25,229 |
0 | void qemu_bh_cancel(QEMUBH *bh) { bh->scheduled = 0; } | 25,230 |
0 | static int cdrom_probe_device(const char *filename) { int fd, ret; int prio = 0; if (strstart(filename, "/dev/cd", NULL)) prio = 50; fd = open(filename, O_RDONLY | O_NONBLOCK); if (fd < 0) { goto out; } /* Attempt to detect via a CDROM specific ioctl */ ret = ioctl(fd, CDROM_DRIVE_STATUS, CDSL_CURRENT); if (ret >= 0) prio = 100; close(fd); out: return prio; } | 25,231 |
0 | static uint64_t megasas_mmio_read(void *opaque, target_phys_addr_t addr, unsigned size) { MegasasState *s = opaque; uint32_t retval = 0; switch (addr) { case MFI_IDB: retval = 0; break; case MFI_OMSG0: case MFI_OSP0: retval = (megasas_use_msix(s) ? MFI_FWSTATE_MSIX_SUPPORTED : 0) | (s->fw_state & MFI_FWSTATE_MASK) | ((s->fw_sge & 0xff) << 16) | (s->fw_cmds & 0xFFFF); break; case MFI_OSTS: if (megasas_intr_enabled(s) && s->doorbell) { retval = MFI_1078_RM | 1; } break; case MFI_OMSK: retval = s->intr_mask; break; case MFI_ODCR0: retval = s->doorbell; break; default: trace_megasas_mmio_invalid_readl(addr); break; } trace_megasas_mmio_readl(addr, retval); return retval; } | 25,233 |
0 | static uint64_t pxa2xx_rtc_read(void *opaque, hwaddr addr, unsigned size) { PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; switch (addr) { case RTTR: return s->rttr; case RTSR: return s->rtsr; case RTAR: return s->rtar; case RDAR1: return s->rdar1; case RDAR2: return s->rdar2; case RYAR1: return s->ryar1; case RYAR2: return s->ryar2; case SWAR1: return s->swar1; case SWAR2: return s->swar2; case PIAR: return s->piar; case RCNR: return s->last_rcnr + ((qemu_clock_get_ms(rtc_clock) - s->last_hz) << 15) / (1000 * ((s->rttr & 0xffff) + 1)); case RDCR: return s->last_rdcr + ((qemu_clock_get_ms(rtc_clock) - s->last_hz) << 15) / (1000 * ((s->rttr & 0xffff) + 1)); case RYCR: return s->last_rycr; case SWCR: if (s->rtsr & (1 << 12)) return s->last_swcr + (qemu_clock_get_ms(rtc_clock) - s->last_sw) / 10; else return s->last_swcr; default: printf("%s: Bad register " REG_FMT "\n", __FUNCTION__, addr); break; } return 0; } | 25,235 |
0 | static void test_visitor_out_union_flat(TestOutputVisitorData *data, const void *unused) { QObject *arg; QDict *qdict; Error *err = NULL; UserDefFlatUnion *tmp = g_malloc0(sizeof(UserDefFlatUnion)); tmp->enum1 = ENUM_ONE_VALUE1; tmp->string = g_strdup("str"); tmp->value1 = g_malloc0(sizeof(UserDefA)); /* TODO when generator bug is fixed: tmp->integer = 41; */ tmp->value1->boolean = true; visit_type_UserDefFlatUnion(data->ov, &tmp, NULL, &err); g_assert(err == NULL); arg = qmp_output_get_qobject(data->qov); g_assert(qobject_type(arg) == QTYPE_QDICT); qdict = qobject_to_qdict(arg); g_assert_cmpstr(qdict_get_str(qdict, "enum1"), ==, "value1"); g_assert_cmpstr(qdict_get_str(qdict, "string"), ==, "str"); /* TODO g_assert_cmpint(qdict_get_int(qdict, "integer"), ==, 41); */ g_assert_cmpint(qdict_get_bool(qdict, "boolean"), ==, true); qapi_free_UserDefFlatUnion(tmp); QDECREF(qdict); } | 25,236 |
0 | static SaveStateEntry *find_se(const char *idstr, int instance_id) { SaveStateEntry *se; QTAILQ_FOREACH(se, &savevm_handlers, entry) { if (!strcmp(se->idstr, idstr) && instance_id == se->instance_id) return se; } return NULL; } | 25,237 |
0 | av_cold void ff_vorbisdsp_init_x86(VorbisDSPContext *dsp) { #if HAVE_YASM int cpu_flags = av_get_cpu_flags(); #if ARCH_X86_32 if (cpu_flags & AV_CPU_FLAG_3DNOW) dsp->vorbis_inverse_coupling = ff_vorbis_inverse_coupling_3dnow; #endif /* ARCH_X86_32 */ if (cpu_flags & AV_CPU_FLAG_SSE) dsp->vorbis_inverse_coupling = ff_vorbis_inverse_coupling_sse; #endif /* HAVE_YASM */ } | 25,239 |
0 | static void output_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost) { int ret = 0; /* apply the output bitstream filters, if any */ if (ost->nb_bitstream_filters) { int idx; ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt); if (ret < 0) goto finish; idx = 1; while (idx) { /* get a packet from the previous filter up the chain */ ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt); if (ret == AVERROR(EAGAIN)) { ret = 0; idx--; continue; } else if (ret < 0) goto finish; /* send it to the next filter down the chain or to the muxer */ if (idx < ost->nb_bitstream_filters) { ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt); if (ret < 0) goto finish; idx++; } else write_packet(s, pkt, ost); } } else write_packet(s, pkt, ost); finish: if (ret < 0 && ret != AVERROR_EOF) { av_log(NULL, AV_LOG_FATAL, "Error applying bitstream filters to an output " "packet for stream #%d:%d.\n", ost->file_index, ost->index); exit_program(1); } } | 25,240 |
0 | static void pred_temp_direct_motion(const H264Context *const h, H264SliceContext *sl, int *mb_type) { int b8_stride = 2; int b4_stride = h->b_stride; int mb_xy = sl->mb_xy, mb_y = sl->mb_y; int mb_type_col[2]; const int16_t (*l1mv0)[2], (*l1mv1)[2]; const int8_t *l1ref0, *l1ref1; const int is_b8x8 = IS_8X8(*mb_type); unsigned int sub_mb_type; int i8, i4; assert(sl->ref_list[1][0].reference & 3); await_reference_mb_row(h, sl->ref_list[1][0].parent, sl->mb_y + !!IS_INTERLACED(*mb_type)); if (IS_INTERLACED(sl->ref_list[1][0].parent->mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL mb_y = (sl->mb_y & ~1) + sl->col_parity; mb_xy = sl->mb_x + ((sl->mb_y & ~1) + sl->col_parity) * h->mb_stride; b8_stride = 0; } else { mb_y += sl->col_fieldoff; mb_xy += h->mb_stride * sl->col_fieldoff; // non-zero for FL -> FL & differ parity } goto single_col; } else { // AFL/AFR/FR/FL -> AFR/FR if (IS_INTERLACED(*mb_type)) { // AFL /FL -> AFR/FR mb_y = sl->mb_y & ~1; mb_xy = sl->mb_x + (sl->mb_y & ~1) * h->mb_stride; mb_type_col[0] = sl->ref_list[1][0].parent->mb_type[mb_xy]; mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy + h->mb_stride]; b8_stride = 2 + 4 * h->mb_stride; b4_stride *= 6; if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) { mb_type_col[0] &= ~MB_TYPE_INTERLACED; mb_type_col[1] &= ~MB_TYPE_INTERLACED; } sub_mb_type = MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */ if ((mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) && !is_b8x8) { *mb_type |= MB_TYPE_16x8 | MB_TYPE_L0L1 | MB_TYPE_DIRECT2; /* B_16x8 */ } else { *mb_type |= MB_TYPE_8x8 | MB_TYPE_L0L1; } } else { // AFR/FR -> AFR/FR single_col: mb_type_col[0] = mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy]; sub_mb_type = MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */ if (!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)) { *mb_type |= MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 | MB_TYPE_DIRECT2; /* B_16x16 */ } else if (!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16))) { *mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16)); } else { if (!h->sps.direct_8x8_inference_flag) { /* FIXME: save sub mb types from previous frames (or derive * from MVs) so we know exactly what block size to use */ sub_mb_type = MB_TYPE_8x8 | MB_TYPE_P0L0 | MB_TYPE_P0L1 | MB_TYPE_DIRECT2; /* B_SUB_4x4 */ } *mb_type |= MB_TYPE_8x8 | MB_TYPE_L0L1; } } } await_reference_mb_row(h, sl->ref_list[1][0].parent, mb_y); l1mv0 = &sl->ref_list[1][0].parent->motion_val[0][h->mb2b_xy[mb_xy]]; l1mv1 = &sl->ref_list[1][0].parent->motion_val[1][h->mb2b_xy[mb_xy]]; l1ref0 = &sl->ref_list[1][0].parent->ref_index[0][4 * mb_xy]; l1ref1 = &sl->ref_list[1][0].parent->ref_index[1][4 * mb_xy]; if (!b8_stride) { if (sl->mb_y & 1) { l1ref0 += 2; l1ref1 += 2; l1mv0 += 2 * b4_stride; l1mv1 += 2 * b4_stride; } } { const int *map_col_to_list0[2] = { sl->map_col_to_list0[0], sl->map_col_to_list0[1] }; const int *dist_scale_factor = sl->dist_scale_factor; int ref_offset; if (FRAME_MBAFF(h) && IS_INTERLACED(*mb_type)) { map_col_to_list0[0] = sl->map_col_to_list0_field[sl->mb_y & 1][0]; map_col_to_list0[1] = sl->map_col_to_list0_field[sl->mb_y & 1][1]; dist_scale_factor = sl->dist_scale_factor_field[sl->mb_y & 1]; } ref_offset = (sl->ref_list[1][0].parent->mbaff << 4) & (mb_type_col[0] >> 3); if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) { int y_shift = 2 * !IS_INTERLACED(*mb_type); assert(h->sps.direct_8x8_inference_flag); for (i8 = 0; i8 < 4; i8++) { const int x8 = i8 & 1; const int y8 = i8 >> 1; int ref0, scale; const int16_t (*l1mv)[2] = l1mv0; if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8])) continue; sl->sub_mb_type[i8] = sub_mb_type; fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 1); if (IS_INTRA(mb_type_col[y8])) { fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 1); fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4); fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4); continue; } ref0 = l1ref0[x8 + y8 * b8_stride]; if (ref0 >= 0) ref0 = map_col_to_list0[0][ref0 + ref_offset]; else { ref0 = map_col_to_list0[1][l1ref1[x8 + y8 * b8_stride] + ref_offset]; l1mv = l1mv1; } scale = dist_scale_factor[ref0]; fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, ref0, 1); { const int16_t *mv_col = l1mv[x8 * 3 + y8 * b4_stride]; int my_col = (mv_col[1] << y_shift) / 2; int mx = (scale * mv_col[0] + 128) >> 8; int my = (scale * my_col + 128) >> 8; fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, pack16to32(mx, my), 4); fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, pack16to32(mx - mv_col[0], my - my_col), 4); } } return; } /* one-to-one mv scaling */ if (IS_16X16(*mb_type)) { int ref, mv0, mv1; fill_rectangle(&sl->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1); if (IS_INTRA(mb_type_col[0])) { ref = mv0 = mv1 = 0; } else { const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0] + ref_offset] : map_col_to_list0[1][l1ref1[0] + ref_offset]; const int scale = dist_scale_factor[ref0]; const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0]; int mv_l0[2]; mv_l0[0] = (scale * mv_col[0] + 128) >> 8; mv_l0[1] = (scale * mv_col[1] + 128) >> 8; ref = ref0; mv0 = pack16to32(mv_l0[0], mv_l0[1]); mv1 = pack16to32(mv_l0[0] - mv_col[0], mv_l0[1] - mv_col[1]); } fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1); fill_rectangle(&sl->mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4); fill_rectangle(&sl->mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4); } else { for (i8 = 0; i8 < 4; i8++) { const int x8 = i8 & 1; const int y8 = i8 >> 1; int ref0, scale; const int16_t (*l1mv)[2] = l1mv0; if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8])) continue; sl->sub_mb_type[i8] = sub_mb_type; fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 1); if (IS_INTRA(mb_type_col[0])) { fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 1); fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4); fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4); continue; } assert(b8_stride == 2); ref0 = l1ref0[i8]; if (ref0 >= 0) ref0 = map_col_to_list0[0][ref0 + ref_offset]; else { ref0 = map_col_to_list0[1][l1ref1[i8] + ref_offset]; l1mv = l1mv1; } scale = dist_scale_factor[ref0]; fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, ref0, 1); if (IS_SUB_8X8(sub_mb_type)) { const int16_t *mv_col = l1mv[x8 * 3 + y8 * 3 * b4_stride]; int mx = (scale * mv_col[0] + 128) >> 8; int my = (scale * mv_col[1] + 128) >> 8; fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, pack16to32(mx, my), 4); fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, pack16to32(mx - mv_col[0], my - mv_col[1]), 4); } else { for (i4 = 0; i4 < 4; i4++) { const int16_t *mv_col = l1mv[x8 * 2 + (i4 & 1) + (y8 * 2 + (i4 >> 1)) * b4_stride]; int16_t *mv_l0 = sl->mv_cache[0][scan8[i8 * 4 + i4]]; mv_l0[0] = (scale * mv_col[0] + 128) >> 8; mv_l0[1] = (scale * mv_col[1] + 128) >> 8; AV_WN32A(sl->mv_cache[1][scan8[i8 * 4 + i4]], pack16to32(mv_l0[0] - mv_col[0], mv_l0[1] - mv_col[1])); } } } } } } | 25,241 |
0 | static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size) { const VC1Context *v = avctx->priv_data; const MpegEncContext *s = &v->s; struct vaapi_context * const vactx = avctx->hwaccel_context; VAPictureParameterBufferVC1 *pic_param; vactx->slice_param_size = sizeof(VASliceParameterBufferVC1); /* Fill in VAPictureParameterBufferVC1 */ pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferVC1)); if (!pic_param) return -1; pic_param->forward_reference_picture = VA_INVALID_ID; pic_param->backward_reference_picture = VA_INVALID_ID; pic_param->inloop_decoded_picture = VA_INVALID_ID; pic_param->sequence_fields.value = 0; /* reset all bits */ pic_param->sequence_fields.bits.pulldown = v->broadcast; pic_param->sequence_fields.bits.interlace = v->interlace; pic_param->sequence_fields.bits.tfcntrflag = v->tfcntrflag; pic_param->sequence_fields.bits.finterpflag = v->finterpflag; pic_param->sequence_fields.bits.psf = v->psf; pic_param->sequence_fields.bits.multires = v->multires; pic_param->sequence_fields.bits.overlap = v->overlap; pic_param->sequence_fields.bits.syncmarker = v->resync_marker; pic_param->sequence_fields.bits.rangered = v->rangered; pic_param->sequence_fields.bits.max_b_frames = s->avctx->max_b_frames; #if VA_CHECK_VERSION(0,32,0) pic_param->sequence_fields.bits.profile = v->profile; #endif pic_param->coded_width = s->avctx->coded_width; pic_param->coded_height = s->avctx->coded_height; pic_param->entrypoint_fields.value = 0; /* reset all bits */ pic_param->entrypoint_fields.bits.broken_link = v->broken_link; pic_param->entrypoint_fields.bits.closed_entry = v->closed_entry; pic_param->entrypoint_fields.bits.panscan_flag = v->panscanflag; pic_param->entrypoint_fields.bits.loopfilter = s->loop_filter; pic_param->conditional_overlap_flag = v->condover; pic_param->fast_uvmc_flag = v->fastuvmc; pic_param->range_mapping_fields.value = 0; /* reset all bits */ pic_param->range_mapping_fields.bits.luma_flag = v->range_mapy_flag; pic_param->range_mapping_fields.bits.luma = v->range_mapy; pic_param->range_mapping_fields.bits.chroma_flag = v->range_mapuv_flag; pic_param->range_mapping_fields.bits.chroma = v->range_mapuv; pic_param->b_picture_fraction = v->bfraction_lut_index; pic_param->cbp_table = v->cbpcy_vlc ? v->cbpcy_vlc - ff_vc1_cbpcy_p_vlc : 0; pic_param->mb_mode_table = 0; /* XXX: interlaced frame */ pic_param->range_reduction_frame = v->rangeredfrm; pic_param->rounding_control = v->rnd; pic_param->post_processing = v->postproc; pic_param->picture_resolution_index = v->respic; pic_param->luma_scale = v->lumscale; pic_param->luma_shift = v->lumshift; pic_param->picture_fields.value = 0; /* reset all bits */ pic_param->picture_fields.bits.picture_type = vc1_get_PTYPE(v); pic_param->picture_fields.bits.frame_coding_mode = v->fcm; pic_param->picture_fields.bits.top_field_first = v->tff; pic_param->picture_fields.bits.is_first_field = v->fcm == 0; /* XXX: interlaced frame */ pic_param->picture_fields.bits.intensity_compensation = v->mv_mode == MV_PMODE_INTENSITY_COMP; pic_param->raw_coding.value = 0; /* reset all bits */ pic_param->raw_coding.flags.mv_type_mb = v->mv_type_is_raw; pic_param->raw_coding.flags.direct_mb = v->dmb_is_raw; pic_param->raw_coding.flags.skip_mb = v->skip_is_raw; pic_param->raw_coding.flags.field_tx = 0; /* XXX: interlaced frame */ pic_param->raw_coding.flags.forward_mb = 0; /* XXX: interlaced frame */ pic_param->raw_coding.flags.ac_pred = v->acpred_is_raw; pic_param->raw_coding.flags.overflags = v->overflg_is_raw; pic_param->bitplane_present.value = 0; /* reset all bits */ pic_param->bitplane_present.flags.bp_mv_type_mb = vc1_has_MVTYPEMB_bitplane(v); pic_param->bitplane_present.flags.bp_direct_mb = vc1_has_DIRECTMB_bitplane(v); pic_param->bitplane_present.flags.bp_skip_mb = vc1_has_SKIPMB_bitplane(v); pic_param->bitplane_present.flags.bp_field_tx = 0; /* XXX: interlaced frame */ pic_param->bitplane_present.flags.bp_forward_mb = 0; /* XXX: interlaced frame */ pic_param->bitplane_present.flags.bp_ac_pred = vc1_has_ACPRED_bitplane(v); pic_param->bitplane_present.flags.bp_overflags = vc1_has_OVERFLAGS_bitplane(v); pic_param->reference_fields.value = 0; /* reset all bits */ pic_param->reference_fields.bits.reference_distance_flag = v->refdist_flag; pic_param->reference_fields.bits.reference_distance = 0; /* XXX: interlaced frame */ pic_param->reference_fields.bits.num_reference_pictures = 0; /* XXX: interlaced frame */ pic_param->reference_fields.bits.reference_field_pic_indicator = 0; /* XXX: interlaced frame */ pic_param->mv_fields.value = 0; /* reset all bits */ pic_param->mv_fields.bits.mv_mode = vc1_get_MVMODE(v); pic_param->mv_fields.bits.mv_mode2 = vc1_get_MVMODE2(v); pic_param->mv_fields.bits.mv_table = s->mv_table_index; pic_param->mv_fields.bits.two_mv_block_pattern_table = 0; /* XXX: interlaced frame */ pic_param->mv_fields.bits.four_mv_switch = 0; /* XXX: interlaced frame */ pic_param->mv_fields.bits.four_mv_block_pattern_table = 0; /* XXX: interlaced frame */ pic_param->mv_fields.bits.extended_mv_flag = v->extended_mv; pic_param->mv_fields.bits.extended_mv_range = v->mvrange; pic_param->mv_fields.bits.extended_dmv_flag = v->extended_dmv; pic_param->mv_fields.bits.extended_dmv_range = 0; /* XXX: interlaced frame */ pic_param->pic_quantizer_fields.value = 0; /* reset all bits */ pic_param->pic_quantizer_fields.bits.dquant = v->dquant; pic_param->pic_quantizer_fields.bits.quantizer = v->quantizer_mode; pic_param->pic_quantizer_fields.bits.half_qp = v->halfpq; pic_param->pic_quantizer_fields.bits.pic_quantizer_scale = v->pq; pic_param->pic_quantizer_fields.bits.pic_quantizer_type = v->pquantizer; pic_param->pic_quantizer_fields.bits.dq_frame = v->dquantfrm; pic_param->pic_quantizer_fields.bits.dq_profile = v->dqprofile; pic_param->pic_quantizer_fields.bits.dq_sb_edge = v->dqprofile == DQPROFILE_SINGLE_EDGE ? v->dqsbedge : 0; pic_param->pic_quantizer_fields.bits.dq_db_edge = v->dqprofile == DQPROFILE_DOUBLE_EDGES ? v->dqsbedge : 0; pic_param->pic_quantizer_fields.bits.dq_binary_level = v->dqbilevel; pic_param->pic_quantizer_fields.bits.alt_pic_quantizer = v->altpq; pic_param->transform_fields.value = 0; /* reset all bits */ pic_param->transform_fields.bits.variable_sized_transform_flag = v->vstransform; pic_param->transform_fields.bits.mb_level_transform_type_flag = v->ttmbf; pic_param->transform_fields.bits.frame_level_transform_type = vc1_get_TTFRM(v); pic_param->transform_fields.bits.transform_ac_codingset_idx1 = v->c_ac_table_index; pic_param->transform_fields.bits.transform_ac_codingset_idx2 = v->y_ac_table_index; pic_param->transform_fields.bits.intra_transform_dc_table = v->s.dc_table_index; switch (s->pict_type) { case AV_PICTURE_TYPE_B: pic_param->backward_reference_picture = ff_vaapi_get_surface_id(s->next_picture.f); // fall-through case AV_PICTURE_TYPE_P: pic_param->forward_reference_picture = ff_vaapi_get_surface_id(s->last_picture.f); break; } if (pic_param->bitplane_present.value) { uint8_t *bitplane; const uint8_t *ff_bp[3]; int x, y, n; switch (s->pict_type) { case AV_PICTURE_TYPE_P: ff_bp[0] = pic_param->bitplane_present.flags.bp_direct_mb ? v->direct_mb_plane : NULL; ff_bp[1] = pic_param->bitplane_present.flags.bp_skip_mb ? s->mbskip_table : NULL; ff_bp[2] = pic_param->bitplane_present.flags.bp_mv_type_mb ? v->mv_type_mb_plane : NULL; break; case AV_PICTURE_TYPE_B: if (!v->bi_type) { ff_bp[0] = pic_param->bitplane_present.flags.bp_direct_mb ? v->direct_mb_plane : NULL; ff_bp[1] = pic_param->bitplane_present.flags.bp_skip_mb ? s->mbskip_table : NULL; ff_bp[2] = NULL; /* XXX: interlaced frame (FORWARD plane) */ break; } /* fall-through (BI-type) */ case AV_PICTURE_TYPE_I: ff_bp[0] = NULL; /* XXX: interlaced frame (FIELDTX plane) */ ff_bp[1] = pic_param->bitplane_present.flags.bp_ac_pred ? v->acpred_plane : NULL; ff_bp[2] = pic_param->bitplane_present.flags.bp_overflags ? v->over_flags_plane : NULL; break; default: ff_bp[0] = NULL; ff_bp[1] = NULL; ff_bp[2] = NULL; break; } bitplane = ff_vaapi_alloc_bitplane(vactx, (s->mb_width * s->mb_height + 1) / 2); if (!bitplane) return -1; n = 0; for (y = 0; y < s->mb_height; y++) for (x = 0; x < s->mb_width; x++, n++) vc1_pack_bitplanes(bitplane, n, ff_bp, x, y, s->mb_stride); if (n & 1) /* move last nibble to the high order */ bitplane[n/2] <<= 4; } return 0; } | 25,242 |
0 | static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){ GetBitContext gb; int i; init_get_bits(&gb, src, length); for(i=0; i<3; i++){ read_len_table(s->len[i], &gb); if(generate_bits_table(s->bits[i], s->len[i])<0){ return -1; } #if 0 for(j=0; j<256; j++){ printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j); } #endif init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4); } return 0; } | 25,243 |
1 | static void test_source_notify(void) { while (g_main_context_iteration(NULL, false)); aio_notify(ctx); g_assert(g_main_context_iteration(NULL, true)); g_assert(!g_main_context_iteration(NULL, false)); } | 25,244 |
1 | static void virtio_crypto_dataq_bh(void *opaque) { VirtIOCryptoQueue *q = opaque; VirtIOCrypto *vcrypto = q->vcrypto; VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); /* This happens when device was stopped but BH wasn't. */ if (!vdev->vm_running) { return; } /* Just in case the driver is not ready on more */ if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) { return; } virtio_crypto_handle_dataq(vdev, q->dataq); virtio_queue_set_notification(q->dataq, 1); } | 25,246 |
1 | static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo, hwaddr addr_limit) { void *fdt = NULL; int size, rc; uint32_t acells, scells; if (binfo->dtb_filename) { char *filename; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, binfo->dtb_filename); if (!filename) { fprintf(stderr, "Couldn't open dtb file %s\n", binfo->dtb_filename); goto fail; } fdt = load_device_tree(filename, &size); if (!fdt) { fprintf(stderr, "Couldn't open dtb file %s\n", filename); g_free(filename); goto fail; } g_free(filename); } else if (binfo->get_dtb) { fdt = binfo->get_dtb(binfo, &size); if (!fdt) { fprintf(stderr, "Board was unable to create a dtb blob\n"); goto fail; } } if (addr_limit > addr && size > (addr_limit - addr)) { /* Installing the device tree blob at addr would exceed addr_limit. * Whether this constitutes failure is up to the caller to decide, * so just return 0 as size, i.e., no error. */ g_free(fdt); return 0; } acells = qemu_fdt_getprop_cell(fdt, "/", "#address-cells"); scells = qemu_fdt_getprop_cell(fdt, "/", "#size-cells"); if (acells == 0 || scells == 0) { fprintf(stderr, "dtb file invalid (#address-cells or #size-cells 0)\n"); goto fail; } if (scells < 2 && binfo->ram_size >= (1ULL << 32)) { /* This is user error so deserves a friendlier error message * than the failure of setprop_sized_cells would provide */ fprintf(stderr, "qemu: dtb file not compatible with " "RAM size > 4GB\n"); goto fail; } rc = qemu_fdt_setprop_sized_cells(fdt, "/memory", "reg", acells, binfo->loader_start, scells, binfo->ram_size); if (rc < 0) { fprintf(stderr, "couldn't set /memory/reg\n"); goto fail; } if (binfo->kernel_cmdline && *binfo->kernel_cmdline) { rc = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", binfo->kernel_cmdline); if (rc < 0) { fprintf(stderr, "couldn't set /chosen/bootargs\n"); goto fail; } } if (binfo->initrd_size) { rc = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", binfo->initrd_start); if (rc < 0) { fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n"); goto fail; } rc = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", binfo->initrd_start + binfo->initrd_size); if (rc < 0) { fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n"); goto fail; } } if (binfo->modify_dtb) { binfo->modify_dtb(binfo, fdt); } qemu_fdt_dumpdtb(fdt, size); /* Put the DTB into the memory map as a ROM image: this will ensure * the DTB is copied again upon reset, even if addr points into RAM. */ rom_add_blob_fixed("dtb", fdt, size, addr); g_free(fdt); return size; fail: g_free(fdt); return -1; } | 25,248 |
1 | static NetSocketState *net_socket_fd_init_dgram(NetClientState *peer, const char *model, const char *name, int fd, int is_connected) { struct sockaddr_in saddr; int newfd; socklen_t saddr_len = sizeof(saddr); NetClientState *nc; NetSocketState *s; /* fd passed: multicast: "learn" dgram_dst address from bound address and save it * Because this may be "shared" socket from a "master" process, datagrams would be recv() * by ONLY ONE process: we must "clone" this dgram socket --jjo */ if (is_connected) { if (getsockname(fd, (struct sockaddr *) &saddr, &saddr_len) == 0) { /* must be bound */ if (saddr.sin_addr.s_addr == 0) { fprintf(stderr, "qemu: error: init_dgram: fd=%d unbound, " "cannot setup multicast dst addr\n", fd); goto err; } /* clone dgram socket */ newfd = net_socket_mcast_create(&saddr, NULL); if (newfd < 0) { /* error already reported by net_socket_mcast_create() */ goto err; } /* clone newfd to fd, close newfd */ dup2(newfd, fd); close(newfd); } else { fprintf(stderr, "qemu: error: init_dgram: fd=%d failed getsockname(): %s\n", fd, strerror(errno)); goto err; } } nc = qemu_new_net_client(&net_dgram_socket_info, peer, model, name); snprintf(nc->info_str, sizeof(nc->info_str), "socket: fd=%d (%s mcast=%s:%d)", fd, is_connected ? "cloned" : "", inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port)); s = DO_UPCAST(NetSocketState, nc, nc); s->fd = fd; s->listen_fd = -1; s->send_fn = net_socket_send_dgram; net_socket_read_poll(s, true); /* mcast: save bound address as dst */ if (is_connected) { s->dgram_dst = saddr; } return s; err: closesocket(fd); return NULL; } | 25,249 |
1 | static int wc3_read_packet(AVFormatContext *s, AVPacket *pkt) { Wc3DemuxContext *wc3 = s->priv_data; ByteIOContext *pb = s->pb; unsigned int fourcc_tag; unsigned int size; int packet_read = 0; int ret = 0; unsigned char text[1024]; unsigned int palette_number; int i; unsigned char r, g, b; int base_palette_index; while (!packet_read) { fourcc_tag = get_le32(pb); /* chunk sizes are 16-bit aligned */ size = (get_be32(pb) + 1) & (~1); if (url_feof(pb)) return AVERROR(EIO); switch (fourcc_tag) { case BRCH_TAG: /* no-op */ break; case SHOT_TAG: /* load up new palette */ palette_number = get_le32(pb); if (palette_number >= wc3->palette_count) return AVERROR_INVALIDDATA; base_palette_index = palette_number * PALETTE_COUNT * 3; for (i = 0; i < PALETTE_COUNT; i++) { r = wc3->palettes[base_palette_index + i * 3 + 0]; g = wc3->palettes[base_palette_index + i * 3 + 1]; b = wc3->palettes[base_palette_index + i * 3 + 2]; wc3->palette_control.palette[i] = (r << 16) | (g << 8) | (b); } wc3->palette_control.palette_changed = 1; break; case VGA__TAG: /* send out video chunk */ ret= av_get_packet(pb, pkt, size); pkt->stream_index = wc3->video_stream_index; pkt->pts = wc3->pts; packet_read = 1; break; case TEXT_TAG: /* subtitle chunk */ #if 0 url_fseek(pb, size, SEEK_CUR); #else if ((unsigned)size > sizeof(text) || (ret = get_buffer(pb, text, size)) != size) ret = AVERROR(EIO); else { int i = 0; av_log (s, AV_LOG_DEBUG, "Subtitle time!\n"); av_log (s, AV_LOG_DEBUG, " inglish: %s\n", &text[i + 1]); i += text[i] + 1; av_log (s, AV_LOG_DEBUG, " doytsch: %s\n", &text[i + 1]); i += text[i] + 1; av_log (s, AV_LOG_DEBUG, " fronsay: %s\n", &text[i + 1]); } #endif break; case AUDI_TAG: /* send out audio chunk */ ret= av_get_packet(pb, pkt, size); pkt->stream_index = wc3->audio_stream_index; pkt->pts = wc3->pts; /* time to advance pts */ wc3->pts++; packet_read = 1; break; default: av_log (s, AV_LOG_ERROR, " unrecognized WC3 chunk: %c%c%c%c (0x%02X%02X%02X%02X)\n", (uint8_t)fourcc_tag, (uint8_t)(fourcc_tag >> 8), (uint8_t)(fourcc_tag >> 16), (uint8_t)(fourcc_tag >> 24), (uint8_t)fourcc_tag, (uint8_t)(fourcc_tag >> 8), (uint8_t)(fourcc_tag >> 16), (uint8_t)(fourcc_tag >> 24)); ret = AVERROR_INVALIDDATA; packet_read = 1; break; } } return ret; } | 25,250 |
1 | void hmp_info_memdev(Monitor *mon, const QDict *qdict) { Error *err = NULL; MemdevList *memdev_list = qmp_query_memdev(&err); MemdevList *m = memdev_list; StringOutputVisitor *ov; char *str; int i = 0; while (m) { ov = string_output_visitor_new(false); visit_type_uint16List(string_output_get_visitor(ov), &m->value->host_nodes, NULL, NULL); monitor_printf(mon, "memory backend: %d\n", i); monitor_printf(mon, " size: %" PRId64 "\n", m->value->size); monitor_printf(mon, " merge: %s\n", m->value->merge ? "true" : "false"); monitor_printf(mon, " dump: %s\n", m->value->dump ? "true" : "false"); monitor_printf(mon, " prealloc: %s\n", m->value->prealloc ? "true" : "false"); monitor_printf(mon, " policy: %s\n", HostMemPolicy_lookup[m->value->policy]); str = string_output_get_string(ov); monitor_printf(mon, " host nodes: %s\n", str); g_free(str); string_output_visitor_cleanup(ov); m = m->next; i++; } monitor_printf(mon, "\n"); } | 25,251 |
1 | static void guest_suspend(const char *pmutils_bin, const char *sysfile_str, Error **err) { pid_t pid; char *pmutils_path; pmutils_path = g_find_program_in_path(pmutils_bin); pid = fork(); if (pid == 0) { /* child */ int fd; setsid(); reopen_fd_to_null(0); reopen_fd_to_null(1); reopen_fd_to_null(2); if (pmutils_path) { execle(pmutils_path, pmutils_bin, NULL, environ); } /* * If we get here either pm-utils is not installed or execle() has * failed. Let's try the manual method if the caller wants it. */ if (!sysfile_str) { _exit(EXIT_FAILURE); } fd = open(LINUX_SYS_STATE_FILE, O_WRONLY); if (fd < 0) { _exit(EXIT_FAILURE); } if (write(fd, sysfile_str, strlen(sysfile_str)) < 0) { _exit(EXIT_FAILURE); } _exit(EXIT_SUCCESS); } g_free(pmutils_path); if (pid < 0) { error_set(err, QERR_UNDEFINED_ERROR); return; } } | 25,253 |
1 | static void test_qemu_strtoull_max(void) { const char *str = g_strdup_printf("%llu", ULLONG_MAX); char f = 'X'; const char *endptr = &f; uint64_t res = 999; int err; err = qemu_strtoull(str, &endptr, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, ULLONG_MAX); g_assert(endptr == str + strlen(str)); } | 25,254 |
1 | static int rate_control(AVCodecContext *avctx, void *arg) { SliceArgs *slice_dat = arg; VC2EncContext *s = slice_dat->ctx; const int sx = slice_dat->x; const int sy = slice_dat->y; int quant_buf[2], bits_buf[2], quant = s->q_start, range = s->q_start/3; const int64_t top = slice_dat->bits_ceil; const double percent = s->tolerance; const double bottom = top - top*(percent/100.0f); int bits = count_hq_slice(s, sx, sy, quant); range -= range & 1; /* Make it an even number */ while ((bits > top) || (bits < bottom)) { range *= bits > top ? +1 : -1; quant = av_clip(quant + range, 0, s->q_ceil); bits = count_hq_slice(s, sx, sy, quant); range = av_clip(range/2, 1, s->q_ceil); if (quant_buf[1] == quant) { quant = bits_buf[0] < bits ? quant_buf[0] : quant; bits = bits_buf[0] < bits ? bits_buf[0] : bits; break; } quant_buf[1] = quant_buf[0]; quant_buf[0] = quant; bits_buf[1] = bits_buf[0]; bits_buf[0] = bits; } slice_dat->quant_idx = av_clip(quant, 0, s->q_ceil); slice_dat->bytes = FFALIGN((bits >> 3), s->size_scaler) + 4 + s->prefix_bytes; return 0; } | 25,255 |
1 | static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds, uint64_t info_begin, uint64_t info_length) { BDRVDMGState *s = bs->opaque; int ret; uint32_t count, rsrc_data_offset; uint8_t *buffer = NULL; uint64_t info_end; uint64_t offset; /* read offset from begin of resource fork (info_begin) to resource data */ ret = read_uint32(bs, info_begin, &rsrc_data_offset); if (ret < 0) { goto fail; } else if (rsrc_data_offset > info_length) { ret = -EINVAL; goto fail; } /* read length of resource data */ ret = read_uint32(bs, info_begin + 8, &count); if (ret < 0) { goto fail; } else if (count == 0 || rsrc_data_offset + count > info_length) { ret = -EINVAL; goto fail; } /* begin of resource data (consisting of one or more resources) */ offset = info_begin + rsrc_data_offset; /* end of resource data (there is possibly a following resource map * which will be ignored). */ info_end = offset + count; /* read offsets (mish blocks) from one or more resources in resource data */ while (offset < info_end) { /* size of following resource */ ret = read_uint32(bs, offset, &count); if (ret < 0) { goto fail; } else if (count == 0) { ret = -EINVAL; goto fail; } offset += 4; buffer = g_realloc(buffer, count); ret = bdrv_pread(bs->file, offset, buffer, count); if (ret < 0) { goto fail; } ret = dmg_read_mish_block(s, ds, buffer, count); if (ret < 0) { goto fail; } /* advance offset by size of resource */ offset += count; } ret = 0; fail: g_free(buffer); return ret; } | 25,257 |
1 | static int ogg_read_page(AVFormatContext *s, int *str) { AVIOContext *bc = s->pb; struct ogg *ogg = s->priv_data; struct ogg_stream *os; int ret, i = 0; int flags, nsegs; uint64_t gp; uint32_t serial; int size, idx; uint8_t sync[4]; int sp = 0; ret = avio_read(bc, sync, 4); if (ret < 4) return ret < 0 ? ret : AVERROR_EOF; do{ int c; if (sync[sp & 3] == 'O' && sync[(sp + 1) & 3] == 'g' && sync[(sp + 2) & 3] == 'g' && sync[(sp + 3) & 3] == 'S') break; c = avio_r8(bc); if (bc->eof_reached) return AVERROR_EOF; sync[sp++ & 3] = c; }while (i++ < MAX_PAGE_SIZE); if (i >= MAX_PAGE_SIZE){ av_log (s, AV_LOG_INFO, "ogg, can't find sync word\n"); return AVERROR_INVALIDDATA; } if (avio_r8(bc) != 0) /* version */ return AVERROR_INVALIDDATA; flags = avio_r8(bc); gp = avio_rl64 (bc); serial = avio_rl32 (bc); avio_skip(bc, 8); /* seq, crc */ nsegs = avio_r8(bc); idx = ogg_find_stream (ogg, serial); if (idx < 0){ if (ogg->headers) { int n; for (n = 0; n < ogg->nstreams; n++) { av_freep(&ogg->streams[n].buf); if (!ogg->state || ogg->state->streams[n].private != ogg->streams[n].private) av_freep(&ogg->streams[n].private); } ogg->curidx = -1; ogg->nstreams = 0; idx = ogg_new_stream(s, serial, 0); } else { idx = ogg_new_stream(s, serial, 1); } if (idx < 0) return idx; } os = ogg->streams + idx; os->page_pos = avio_tell(bc) - 27; if(os->psize > 0) ogg_new_buf(ogg, idx); ret = avio_read(bc, os->segments, nsegs); if (ret < nsegs) return ret < 0 ? ret : AVERROR_EOF; os->nsegs = nsegs; os->segp = 0; size = 0; for (i = 0; i < nsegs; i++) size += os->segments[i]; if (flags & OGG_FLAG_CONT || os->incomplete){ if (!os->psize){ while (os->segp < os->nsegs){ int seg = os->segments[os->segp++]; os->pstart += seg; if (seg < 255) break; } os->sync_pos = os->page_pos; } }else{ os->psize = 0; os->sync_pos = os->page_pos; } if (os->bufsize - os->bufpos < size){ uint8_t *nb = av_malloc (os->bufsize *= 2); memcpy (nb, os->buf, os->bufpos); av_free (os->buf); os->buf = nb; } ret = avio_read(bc, os->buf + os->bufpos, size); if (ret < size) return ret < 0 ? ret : AVERROR_EOF; os->bufpos += size; os->granule = gp; os->flags = flags; if (str) *str = idx; return 0; } | 25,258 |
Subsets and Splits