project
stringclasses
2 values
commit_id
stringlengths
40
40
target
int64
0
1
func
stringlengths
26
142k
idx
int64
0
27.3k
FFmpeg
e6c90ce94f1b07f50cea2babf7471af455cca0ff
0
static av_always_inline void h264_filter_mb_fast_internal(H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int pixel_shift) { int chroma = !(CONFIG_GRAY && (h->flags&CODEC_FLAG_GRAY)); int chroma444 = CHROMA444(h); int chroma422 = CHROMA422(h); int mb_xy = h->mb_xy; int left_type = sl->left_type[LTOP]; int top_type = sl->top_type; int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); int a = 52 + h->slice_alpha_c0_offset - qp_bd_offset; int b = 52 + h->slice_beta_offset - qp_bd_offset; int mb_type = h->cur_pic.mb_type[mb_xy]; int qp = h->cur_pic.qscale_table[mb_xy]; int qp0 = h->cur_pic.qscale_table[mb_xy - 1]; int qp1 = h->cur_pic.qscale_table[sl->top_mb_xy]; int qpc = get_chroma_qp( h, 0, qp ); int qpc0 = get_chroma_qp( h, 0, qp0 ); int qpc1 = get_chroma_qp( h, 0, qp1 ); qp0 = (qp + qp0 + 1) >> 1; qp1 = (qp + qp1 + 1) >> 1; qpc0 = (qpc + qpc0 + 1) >> 1; qpc1 = (qpc + qpc1 + 1) >> 1; if( IS_INTRA(mb_type) ) { static const int16_t bS4[4] = {4,4,4,4}; static const int16_t bS3[4] = {3,3,3,3}; const int16_t *bSH = FIELD_PICTURE(h) ? bS3 : bS4; if(left_type) filter_mb_edgev( &img_y[4*0<<pixel_shift], linesize, bS4, qp0, a, b, h, 1); if( IS_8x8DCT(mb_type) ) { filter_mb_edgev( &img_y[4*2<<pixel_shift], linesize, bS3, qp, a, b, h, 0); if(top_type){ filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, a, b, h, 1); } filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, a, b, h, 0); } else { filter_mb_edgev( &img_y[4*1<<pixel_shift], linesize, bS3, qp, a, b, h, 0); filter_mb_edgev( &img_y[4*2<<pixel_shift], linesize, bS3, qp, a, b, h, 0); filter_mb_edgev( &img_y[4*3<<pixel_shift], linesize, bS3, qp, a, b, h, 0); if(top_type){ filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, a, b, h, 1); } filter_mb_edgeh( &img_y[4*1*linesize], linesize, bS3, qp, a, b, h, 0); filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, a, b, h, 0); filter_mb_edgeh( &img_y[4*3*linesize], linesize, bS3, qp, a, b, h, 0); } if(chroma){ if(chroma444){ if(left_type){ filter_mb_edgev( &img_cb[4*0<<pixel_shift], linesize, bS4, qpc0, a, b, h, 1); filter_mb_edgev( &img_cr[4*0<<pixel_shift], linesize, bS4, qpc0, a, b, h, 1); } if( IS_8x8DCT(mb_type) ) { filter_mb_edgev( &img_cb[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); filter_mb_edgev( &img_cr[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); if(top_type){ filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1 ); filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1 ); } filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, a, b, h, 0); filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, a, b, h, 0); } else { filter_mb_edgev( &img_cb[4*1<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); filter_mb_edgev( &img_cr[4*1<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); filter_mb_edgev( &img_cb[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); filter_mb_edgev( &img_cr[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); filter_mb_edgev( &img_cb[4*3<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); filter_mb_edgev( &img_cr[4*3<<pixel_shift], linesize, bS3, qpc, a, b, h, 0); if(top_type){ filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1); filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1); } filter_mb_edgeh( &img_cb[4*1*linesize], linesize, bS3, qpc, a, b, h, 0); filter_mb_edgeh( &img_cr[4*1*linesize], linesize, bS3, qpc, a, b, h, 0); filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, a, b, h, 0); filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, a, b, h, 0); filter_mb_edgeh( &img_cb[4*3*linesize], linesize, bS3, qpc, a, b, h, 0); filter_mb_edgeh( &img_cr[4*3*linesize], linesize, bS3, qpc, a, b, h, 0); } }else if(chroma422){ if(left_type){ filter_mb_edgecv(&img_cb[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1); filter_mb_edgecv(&img_cr[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1); } filter_mb_edgecv(&img_cb[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0); filter_mb_edgecv(&img_cr[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0); if(top_type){ filter_mb_edgech(&img_cb[4*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1); filter_mb_edgech(&img_cr[4*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1); } filter_mb_edgech(&img_cb[4*1*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0); filter_mb_edgech(&img_cr[4*1*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0); filter_mb_edgech(&img_cb[4*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0); filter_mb_edgech(&img_cr[4*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0); filter_mb_edgech(&img_cb[4*3*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0); filter_mb_edgech(&img_cr[4*3*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0); }else{ if(left_type){ filter_mb_edgecv( &img_cb[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1); filter_mb_edgecv( &img_cr[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1); } filter_mb_edgecv( &img_cb[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0); filter_mb_edgecv( &img_cr[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0); if(top_type){ filter_mb_edgech( &img_cb[2*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1); filter_mb_edgech( &img_cr[2*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1); } filter_mb_edgech( &img_cb[2*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0); filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0); } } return; } else { LOCAL_ALIGNED_8(int16_t, bS, [2], [4][4]); int edges; if( IS_8x8DCT(mb_type) && (sl->cbp&7) == 7 && !chroma444 ) { edges = 4; AV_WN64A(bS[0][0], 0x0002000200020002ULL); AV_WN64A(bS[0][2], 0x0002000200020002ULL); AV_WN64A(bS[1][0], 0x0002000200020002ULL); AV_WN64A(bS[1][2], 0x0002000200020002ULL); } else { int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4); //(mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : (mb_type & MB_TYPE_16x8) ? 1 : 0; int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[LTOP] & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : 0; int step = 1+(mb_type>>24); //IS_8x8DCT(mb_type) ? 2 : 1; edges = 4 - 3*((mb_type>>3) & !(sl->cbp & 15)); //(mb_type & MB_TYPE_16x16) && !(h->cbp & 15) ? 1 : 4; h->h264dsp.h264_loop_filter_strength(bS, sl->non_zero_count_cache, sl->ref_cache, sl->mv_cache, sl->list_count==2, edges, step, mask_edge0, mask_edge1, FIELD_PICTURE(h)); } if( IS_INTRA(left_type) ) AV_WN64A(bS[0][0], 0x0004000400040004ULL); if( IS_INTRA(top_type) ) AV_WN64A(bS[1][0], FIELD_PICTURE(h) ? 0x0003000300030003ULL : 0x0004000400040004ULL); #define FILTER(hv,dir,edge,intra)\ if(AV_RN64A(bS[dir][edge])) { \ filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qp : qp##dir, a, b, h, intra );\ if(chroma){\ if(chroma444){\ filter_mb_edge##hv( &img_cb[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\ filter_mb_edge##hv( &img_cr[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\ } else if(!(edge&1)) {\ filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1<<pixel_shift)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\ filter_mb_edgec##hv( &img_cr[2*edge*(dir?uvlinesize:1<<pixel_shift)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\ }\ }\ } if(left_type) FILTER(v,0,0,1); if( edges == 1 ) { if(top_type) FILTER(h,1,0,1); } else if( IS_8x8DCT(mb_type) ) { FILTER(v,0,2,0); if(top_type) FILTER(h,1,0,1); FILTER(h,1,2,0); } else { FILTER(v,0,1,0); FILTER(v,0,2,0); FILTER(v,0,3,0); if(top_type) FILTER(h,1,0,1); FILTER(h,1,1,0); FILTER(h,1,2,0); FILTER(h,1,3,0); } #undef FILTER } }
26,125
qemu
fd56e0612b6454a282fa6a953fdb09281a98c589
0
static void pxb_pcie_dev_realize(PCIDevice *dev, Error **errp) { if (!pci_bus_is_express(dev->bus)) { error_setg(errp, "pxb-pcie devices cannot reside on a PCI bus"); return; } pxb_dev_realize_common(dev, true, errp); }
26,126
qemu
185698715dfb18c82ad2a5dbc169908602d43e81
0
static int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM) { if (float64_is_signaling_nan(a) || float64_is_signaling_nan(b) || (sig && (float64_is_nan(a) || float64_is_nan(b)))) { float_raise(float_flag_invalid, status); return 1; } else if (float64_is_nan(a) || float64_is_nan(b)) { return 1; } else { return 0; } }
26,127
qemu
b3db211f3c80bb996a704d665fe275619f728bd4
0
static void qmp_input_type_str(Visitor *v, const char *name, char **obj, Error **errp) { QmpInputVisitor *qiv = to_qiv(v); QObject *qobj = qmp_input_get_object(qiv, name, true, errp); QString *qstr; *obj = NULL; if (!qobj) { return; } qstr = qobject_to_qstring(qobj); if (!qstr) { error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", "string"); return; } *obj = g_strdup(qstring_get_str(qstr)); }
26,128
qemu
48a402e693cbea9582472159931aa6799a6c80c7
0
static int curl_open(BlockDriverState *bs, const char *filename, int flags) { BDRVCURLState *s = bs->opaque; CURLState *state = NULL; double d; #define RA_OPTSTR ":readahead=" char *file; char *ra; const char *ra_val; int parse_state = 0; static int inited = 0; file = strdup(filename); s->readahead_size = READ_AHEAD_SIZE; /* Parse a trailing ":readahead=#:" param, if present. */ ra = file + strlen(file) - 1; while (ra >= file) { if (parse_state == 0) { if (*ra == ':') parse_state++; else break; } else if (parse_state == 1) { if (*ra > '9' || *ra < '0') { char *opt_start = ra - strlen(RA_OPTSTR) + 1; if (opt_start > file && strncmp(opt_start, RA_OPTSTR, strlen(RA_OPTSTR)) == 0) { ra_val = ra + 1; ra -= strlen(RA_OPTSTR) - 1; *ra = '\0'; s->readahead_size = atoi(ra_val); break; } else { break; } } } ra--; } if ((s->readahead_size & 0x1ff) != 0) { fprintf(stderr, "HTTP_READAHEAD_SIZE %Zd is not a multiple of 512\n", s->readahead_size); goto out_noclean; } if (!inited) { curl_global_init(CURL_GLOBAL_ALL); inited = 1; } DPRINTF("CURL: Opening %s\n", file); s->url = file; state = curl_init_state(s); if (!state) goto out_noclean; // Get file size curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1); curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION, (void *)curl_size_cb); if (curl_easy_perform(state->curl)) goto out; curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &d); curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION, (void *)curl_read_cb); curl_easy_setopt(state->curl, CURLOPT_NOBODY, 0); if (d) s->len = (size_t)d; else if(!s->len) goto out; DPRINTF("CURL: Size = %lld\n", (long long)s->len); curl_clean_state(state); curl_easy_cleanup(state->curl); state->curl = NULL; // Now we know the file exists and its size, so let's // initialize the multi interface! s->multi = curl_multi_init(); curl_multi_setopt( s->multi, CURLMOPT_SOCKETDATA, s); curl_multi_setopt( s->multi, CURLMOPT_SOCKETFUNCTION, curl_sock_cb ); curl_multi_do(s); return 0; out: fprintf(stderr, "CURL: Error opening file: %s\n", state->errmsg); curl_easy_cleanup(state->curl); state->curl = NULL; out_noclean: qemu_free(file); return -EINVAL; }
26,129
qemu
18b21a2f83a26c3d6a9e7f0bdc4e8eb2b177e8f6
0
static void gen_ldarx(DisasContext *ctx) { TCGv t0; gen_set_access_type(ctx, ACCESS_RES); t0 = tcg_temp_local_new(); gen_addr_reg_index(ctx, t0); gen_check_align(ctx, t0, 0x07); gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], t0); tcg_gen_mov_tl(cpu_reserve, t0); tcg_temp_free(t0); }
26,130
qemu
3eff1f46f08a360a4ae9f834ce9fef4c45bf6f0f
0
static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) { VirtIOSCSI *s = (VirtIOSCSI *)vdev; VirtIOSCSIReq *req; while ((req = virtio_scsi_pop_req(s, vq))) { int type; if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0, &type, sizeof(type)) < sizeof(type)) { virtio_scsi_bad_req(); continue; } tswap32s(&req->req.tmf->type); if (req->req.tmf->type == VIRTIO_SCSI_T_TMF) { if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq), sizeof(VirtIOSCSICtrlTMFResp)) < 0) { virtio_scsi_bad_req(); } else { virtio_scsi_do_tmf(s, req); } } else if (req->req.tmf->type == VIRTIO_SCSI_T_AN_QUERY || req->req.tmf->type == VIRTIO_SCSI_T_AN_SUBSCRIBE) { if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq), sizeof(VirtIOSCSICtrlANResp)) < 0) { virtio_scsi_bad_req(); } else { req->resp.an->event_actual = 0; req->resp.an->response = VIRTIO_SCSI_S_OK; } } virtio_scsi_complete_req(req); } }
26,131
qemu
de9b05b807918d40db9e26ddd6a54ad2978ac5b7
1
void arm_cpu_realize(ARMCPU *cpu) { /* This function is called by cpu_arm_init() because it * needs to do common actions based on feature bits, etc * that have been set by the subclass init functions. * When we have QOM realize support it should become * a true realize function instead. */ CPUARMState *env = &cpu->env; /* Some features automatically imply others: */ if (arm_feature(env, ARM_FEATURE_V7)) { set_feature(env, ARM_FEATURE_VAPA); set_feature(env, ARM_FEATURE_THUMB2); set_feature(env, ARM_FEATURE_MPIDR); if (!arm_feature(env, ARM_FEATURE_M)) { set_feature(env, ARM_FEATURE_V6K); } else { set_feature(env, ARM_FEATURE_V6); if (arm_feature(env, ARM_FEATURE_V6K)) { set_feature(env, ARM_FEATURE_V6); set_feature(env, ARM_FEATURE_MVFR); if (arm_feature(env, ARM_FEATURE_V6)) { set_feature(env, ARM_FEATURE_V5); if (!arm_feature(env, ARM_FEATURE_M)) { set_feature(env, ARM_FEATURE_AUXCR); if (arm_feature(env, ARM_FEATURE_V5)) { set_feature(env, ARM_FEATURE_V4T); if (arm_feature(env, ARM_FEATURE_M)) { set_feature(env, ARM_FEATURE_THUMB_DIV); if (arm_feature(env, ARM_FEATURE_ARM_DIV)) { set_feature(env, ARM_FEATURE_THUMB_DIV); if (arm_feature(env, ARM_FEATURE_VFP4)) { set_feature(env, ARM_FEATURE_VFP3); if (arm_feature(env, ARM_FEATURE_VFP3)) { set_feature(env, ARM_FEATURE_VFP); register_cp_regs_for_features(cpu);
26,132
FFmpeg
3c4add27f7513f435e9daa03643fd992d5f6bcee
1
static int mpc7_decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MPCContext *c = avctx->priv_data; GetBitContext gb; uint8_t *bits; int i, ch; int mb = -1; Band *bands = c->bands; int off, ret; int bits_used, bits_avail; memset(bands, 0, sizeof(*bands) * (c->maxbands + 1)); if(buf_size <= 4){ av_log(avctx, AV_LOG_ERROR, "Too small buffer passed (%i bytes)\n", buf_size); return AVERROR(EINVAL); } /* get output buffer */ c->frame.nb_samples = buf[1] ? c->lastframelen : MPC_FRAME_SIZE; if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } bits = av_malloc(((buf_size - 1) & ~3) + FF_INPUT_BUFFER_PADDING_SIZE); c->dsp.bswap_buf((uint32_t*)bits, (const uint32_t*)(buf + 4), (buf_size - 4) >> 2); init_get_bits(&gb, bits, (buf_size - 4)* 8); skip_bits_long(&gb, buf[0]); /* read subband indexes */ for(i = 0; i <= c->maxbands; i++){ for(ch = 0; ch < 2; ch++){ int t = 4; if(i) t = get_vlc2(&gb, hdr_vlc.table, MPC7_HDR_BITS, 1) - 5; if(t == 4) bands[i].res[ch] = get_bits(&gb, 4); else bands[i].res[ch] = bands[i-1].res[ch] + t; } if(bands[i].res[0] || bands[i].res[1]){ mb = i; if(c->MSS) bands[i].msf = get_bits1(&gb); } } /* get scale indexes coding method */ for(i = 0; i <= mb; i++) for(ch = 0; ch < 2; ch++) if(bands[i].res[ch]) bands[i].scfi[ch] = get_vlc2(&gb, scfi_vlc.table, MPC7_SCFI_BITS, 1); /* get scale indexes */ for(i = 0; i <= mb; i++){ for(ch = 0; ch < 2; ch++){ if(bands[i].res[ch]){ bands[i].scf_idx[ch][2] = c->oldDSCF[ch][i]; bands[i].scf_idx[ch][0] = get_scale_idx(&gb, bands[i].scf_idx[ch][2]); switch(bands[i].scfi[ch]){ case 0: bands[i].scf_idx[ch][1] = get_scale_idx(&gb, bands[i].scf_idx[ch][0]); bands[i].scf_idx[ch][2] = get_scale_idx(&gb, bands[i].scf_idx[ch][1]); break; case 1: bands[i].scf_idx[ch][1] = get_scale_idx(&gb, bands[i].scf_idx[ch][0]); bands[i].scf_idx[ch][2] = bands[i].scf_idx[ch][1]; break; case 2: bands[i].scf_idx[ch][1] = bands[i].scf_idx[ch][0]; bands[i].scf_idx[ch][2] = get_scale_idx(&gb, bands[i].scf_idx[ch][1]); break; case 3: bands[i].scf_idx[ch][2] = bands[i].scf_idx[ch][1] = bands[i].scf_idx[ch][0]; break; } c->oldDSCF[ch][i] = bands[i].scf_idx[ch][2]; } } } /* get quantizers */ memset(c->Q, 0, sizeof(c->Q)); off = 0; for(i = 0; i < BANDS; i++, off += SAMPLES_PER_BAND) for(ch = 0; ch < 2; ch++) idx_to_quant(c, &gb, bands[i].res[ch], c->Q[ch] + off); ff_mpc_dequantize_and_synth(c, mb, c->frame.data[0], 2); av_free(bits); bits_used = get_bits_count(&gb); bits_avail = (buf_size - 4) * 8; if(!buf[1] && ((bits_avail < bits_used) || (bits_used + 32 <= bits_avail))){ av_log(NULL,0, "Error decoding frame: used %i of %i bits\n", bits_used, bits_avail); return -1; } if(c->frames_to_skip){ c->frames_to_skip--; *got_frame_ptr = 0; return buf_size; } *got_frame_ptr = 1; *(AVFrame *)data = c->frame; return buf_size; }
26,133
qemu
d470ad42acfc73c45d3e8ed5311a491160b4c100
1
static void replication_start(ReplicationState *rs, ReplicationMode mode, Error **errp) { BlockDriverState *bs = rs->opaque; BDRVReplicationState *s; BlockDriverState *top_bs; int64_t active_length, hidden_length, disk_length; AioContext *aio_context; Error *local_err = NULL; BlockJob *job; aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); s = bs->opaque; if (s->stage != BLOCK_REPLICATION_NONE) { error_setg(errp, "Block replication is running or done"); aio_context_release(aio_context); return; } if (s->mode != mode) { error_setg(errp, "The parameter mode's value is invalid, needs %d," " but got %d", s->mode, mode); aio_context_release(aio_context); return; } switch (s->mode) { case REPLICATION_MODE_PRIMARY: break; case REPLICATION_MODE_SECONDARY: s->active_disk = bs->file; if (!s->active_disk || !s->active_disk->bs || !s->active_disk->bs->backing) { error_setg(errp, "Active disk doesn't have backing file"); aio_context_release(aio_context); return; } s->hidden_disk = s->active_disk->bs->backing; if (!s->hidden_disk->bs || !s->hidden_disk->bs->backing) { error_setg(errp, "Hidden disk doesn't have backing file"); aio_context_release(aio_context); return; } s->secondary_disk = s->hidden_disk->bs->backing; if (!s->secondary_disk->bs || !bdrv_has_blk(s->secondary_disk->bs)) { error_setg(errp, "The secondary disk doesn't have block backend"); aio_context_release(aio_context); return; } /* verify the length */ active_length = bdrv_getlength(s->active_disk->bs); hidden_length = bdrv_getlength(s->hidden_disk->bs); disk_length = bdrv_getlength(s->secondary_disk->bs); if (active_length < 0 || hidden_length < 0 || disk_length < 0 || active_length != hidden_length || hidden_length != disk_length) { error_setg(errp, "Active disk, hidden disk, secondary disk's length" " are not the same"); aio_context_release(aio_context); return; } if (!s->active_disk->bs->drv->bdrv_make_empty || !s->hidden_disk->bs->drv->bdrv_make_empty) { error_setg(errp, "Active disk or hidden disk doesn't support make_empty"); aio_context_release(aio_context); return; } /* reopen the backing file in r/w mode */ reopen_backing_file(bs, true, &local_err); if (local_err) { error_propagate(errp, local_err); aio_context_release(aio_context); return; } /* start backup job now */ error_setg(&s->blocker, "Block device is in use by internal backup job"); top_bs = bdrv_lookup_bs(s->top_id, s->top_id, NULL); if (!top_bs || !bdrv_is_root_node(top_bs) || !check_top_bs(top_bs, bs)) { error_setg(errp, "No top_bs or it is invalid"); reopen_backing_file(bs, false, NULL); aio_context_release(aio_context); return; } bdrv_op_block_all(top_bs, s->blocker); bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker); job = backup_job_create(NULL, s->secondary_disk->bs, s->hidden_disk->bs, 0, MIRROR_SYNC_MODE_NONE, NULL, false, BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT, BLOCK_JOB_INTERNAL, backup_job_completed, bs, NULL, &local_err); if (local_err) { error_propagate(errp, local_err); backup_job_cleanup(bs); aio_context_release(aio_context); return; } block_job_start(job); break; default: aio_context_release(aio_context); abort(); } s->stage = BLOCK_REPLICATION_RUNNING; if (s->mode == REPLICATION_MODE_SECONDARY) { secondary_do_checkpoint(s, errp); } s->error = 0; aio_context_release(aio_context); }
26,134
FFmpeg
224bb46fb857dab589597bdab302ba8ba012008c
1
uint8_t* ff_AMediaCodec_getOutputBuffer(FFAMediaCodec* codec, size_t idx, size_t *out_size) { uint8_t *ret = NULL; JNIEnv *env = NULL; jobject buffer = NULL; JNI_GET_ENV_OR_RETURN(env, codec, NULL); if (codec->has_get_i_o_buffer) { buffer = (*env)->CallObjectMethod(env, codec->object, codec->jfields.get_output_buffer_id, idx); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } } else { if (!codec->output_buffers) { codec->output_buffers = (*env)->CallObjectMethod(env, codec->object, codec->jfields.get_output_buffers_id); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } codec->output_buffers = (*env)->NewGlobalRef(env, codec->output_buffers); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } } buffer = (*env)->GetObjectArrayElement(env, codec->output_buffers, idx); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } } ret = (*env)->GetDirectBufferAddress(env, buffer); *out_size = (*env)->GetDirectBufferCapacity(env, buffer); fail: if (buffer) { (*env)->DeleteLocalRef(env, buffer); } return ret; }
26,135
qemu
760d88d1d0c409f1afe6f1c91539487413e8b2a9
1
int cpu_get_dump_info(ArchDumpInfo *info, const struct GuestPhysBlockList *guest_phys_blocks) { PowerPCCPU *cpu = POWERPC_CPU(first_cpu); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); info->d_machine = EM_PPC64; info->d_class = ELFCLASS64; if ((*pcc->interrupts_big_endian)(cpu)) { info->d_endian = ELFDATA2MSB; } else { info->d_endian = ELFDATA2LSB; return 0;
26,137
FFmpeg
dfbb5de172b3a0373cbead8a966c41f5ba1ae08b
1
static int try_decode_video_frame(AVCodecContext *codec_ctx, AVPacket *pkt, int decode) { int ret = 0; int got_frame = 0; AVFrame *frame = NULL; int skip_frame = codec_ctx->skip_frame; if (!avcodec_is_open(codec_ctx)) { const AVCodec *codec = avcodec_find_decoder(codec_ctx->codec_id); ret = avcodec_open2(codec_ctx, codec, NULL); if (ret < 0) { av_log(codec_ctx, AV_LOG_ERROR, "Failed to open codec\n"); goto end; } } frame = av_frame_alloc(); if (!frame) { av_log(NULL, AV_LOG_ERROR, "Failed to allocate frame\n"); goto end; } if (!decode && codec_ctx->codec->caps_internal & FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM) { codec_ctx->skip_frame = AVDISCARD_ALL; } do { ret = avcodec_decode_video2(codec_ctx, frame, &got_frame, pkt); av_assert0(decode || (!decode && !got_frame)); if (ret < 0) break; pkt->data += ret; pkt->size -= ret; if (got_frame) { break; } } while (pkt->size > 0); end: codec_ctx->skip_frame = skip_frame; av_frame_free(&frame); return ret; }
26,138
FFmpeg
e1c0cfaa419aa5d320540d5a1b3f8fd9b82ab7e5
0
static int tiff_decode_tag(TiffContext *s, AVFrame *frame) { unsigned tag, type, count, off, value = 0, value2 = 0; int i, start; int pos; int ret; double *dp; ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start); if (ret < 0) { goto end; } off = bytestream2_tell(&s->gb); if (count == 1) { switch (type) { case TIFF_BYTE: case TIFF_SHORT: case TIFF_LONG: value = ff_tget(&s->gb, type, s->le); break; case TIFF_RATIONAL: value = ff_tget(&s->gb, TIFF_LONG, s->le); value2 = ff_tget(&s->gb, TIFF_LONG, s->le); break; case TIFF_STRING: if (count <= 4) { break; } default: value = UINT_MAX; } } switch (tag) { case TIFF_WIDTH: s->width = value; break; case TIFF_HEIGHT: s->height = value; break; case TIFF_BPP: s->bppcount = count; if (count > 4) { av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%d, %d components)\n", s->bpp, count); return AVERROR_INVALIDDATA; } if (count == 1) s->bpp = value; else { switch (type) { case TIFF_BYTE: case TIFF_SHORT: case TIFF_LONG: s->bpp = 0; if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count) return AVERROR_INVALIDDATA; for (i = 0; i < count; i++) s->bpp += ff_tget(&s->gb, type, s->le); break; default: s->bpp = -1; } } break; case TIFF_SAMPLES_PER_PIXEL: if (count != 1) { av_log(s->avctx, AV_LOG_ERROR, "Samples per pixel requires a single value, many provided\n"); return AVERROR_INVALIDDATA; } if (value > 4U) { av_log(s->avctx, AV_LOG_ERROR, "Samples per pixel %d is too large\n", value); return AVERROR_INVALIDDATA; } if (s->bppcount == 1) s->bpp *= value; s->bppcount = value; break; case TIFF_COMPR: s->compr = value; s->predictor = 0; switch (s->compr) { case TIFF_RAW: case TIFF_PACKBITS: case TIFF_LZW: case TIFF_CCITT_RLE: break; case TIFF_G3: case TIFF_G4: s->fax_opts = 0; break; case TIFF_DEFLATE: case TIFF_ADOBE_DEFLATE: #if CONFIG_ZLIB break; #else av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n"); return AVERROR(ENOSYS); #endif case TIFF_JPEG: case TIFF_NEWJPEG: avpriv_report_missing_feature(s->avctx, "JPEG compression"); return AVERROR_PATCHWELCOME; case TIFF_LZMA: #if CONFIG_LZMA break; #else av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n"); return AVERROR(ENOSYS); #endif default: av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n", s->compr); return AVERROR_INVALIDDATA; } break; case TIFF_ROWSPERSTRIP: if (!value || (type == TIFF_LONG && value == UINT_MAX)) value = s->height; s->rps = FFMIN(value, s->height); break; case TIFF_STRIP_OFFS: if (count == 1) { s->strippos = 0; s->stripoff = value; } else s->strippos = off; s->strips = count; if (s->strips == 1) s->rps = s->height; s->sot = type; break; case TIFF_STRIP_SIZE: if (count == 1) { s->stripsizesoff = 0; s->stripsize = value; s->strips = 1; } else { s->stripsizesoff = off; } s->strips = count; s->sstype = type; break; case TIFF_XRES: case TIFF_YRES: set_sar(s, tag, value, value2); break; case TIFF_TILE_BYTE_COUNTS: case TIFF_TILE_LENGTH: case TIFF_TILE_OFFSETS: case TIFF_TILE_WIDTH: av_log(s->avctx, AV_LOG_ERROR, "Tiled images are not supported\n"); return AVERROR_PATCHWELCOME; break; case TIFF_PREDICTOR: s->predictor = value; break; case TIFF_PHOTOMETRIC: switch (value) { case TIFF_PHOTOMETRIC_WHITE_IS_ZERO: case TIFF_PHOTOMETRIC_BLACK_IS_ZERO: case TIFF_PHOTOMETRIC_RGB: case TIFF_PHOTOMETRIC_PALETTE: case TIFF_PHOTOMETRIC_YCBCR: s->photometric = value; break; case TIFF_PHOTOMETRIC_ALPHA_MASK: case TIFF_PHOTOMETRIC_SEPARATED: case TIFF_PHOTOMETRIC_CIE_LAB: case TIFF_PHOTOMETRIC_ICC_LAB: case TIFF_PHOTOMETRIC_ITU_LAB: case TIFF_PHOTOMETRIC_CFA: case TIFF_PHOTOMETRIC_LOG_L: case TIFF_PHOTOMETRIC_LOG_LUV: case TIFF_PHOTOMETRIC_LINEAR_RAW: avpriv_report_missing_feature(s->avctx, "PhotometricInterpretation 0x%04X", value); return AVERROR_PATCHWELCOME; default: av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is " "unknown\n", value); return AVERROR_INVALIDDATA; } break; case TIFF_FILL_ORDER: if (value < 1 || value > 2) { av_log(s->avctx, AV_LOG_ERROR, "Unknown FillOrder value %d, trying default one\n", value); value = 1; } s->fill_order = value - 1; break; case TIFF_PAL: { GetByteContext pal_gb[3]; off = type_sizes[type]; if (count / 3 > 256 || bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3) return AVERROR_INVALIDDATA; pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb; bytestream2_skip(&pal_gb[1], count / 3 * off); bytestream2_skip(&pal_gb[2], count / 3 * off * 2); off = (type_sizes[type] - 1) << 3; for (i = 0; i < count / 3; i++) { uint32_t p = 0xFF000000; p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16; p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8; p |= ff_tget(&pal_gb[2], type, s->le) >> off; s->palette[i] = p; } s->palette_is_set = 1; break; } case TIFF_PLANAR: s->planar = value == 2; break; case TIFF_YCBCR_SUBSAMPLING: if (count != 2) { av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n"); return AVERROR_INVALIDDATA; } for (i = 0; i < count; i++) s->subsampling[i] = ff_tget(&s->gb, type, s->le); break; case TIFF_T4OPTIONS: if (s->compr == TIFF_G3) s->fax_opts = value; break; case TIFF_T6OPTIONS: if (s->compr == TIFF_G4) s->fax_opts = value; break; #define ADD_METADATA(count, name, sep)\ if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\ av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\ goto end;\ } case TIFF_MODEL_PIXEL_SCALE: ADD_METADATA(count, "ModelPixelScaleTag", NULL); break; case TIFF_MODEL_TRANSFORMATION: ADD_METADATA(count, "ModelTransformationTag", NULL); break; case TIFF_MODEL_TIEPOINT: ADD_METADATA(count, "ModelTiepointTag", NULL); break; case TIFF_GEO_KEY_DIRECTORY: ADD_METADATA(1, "GeoTIFF_Version", NULL); ADD_METADATA(2, "GeoTIFF_Key_Revision", "."); s->geotag_count = ff_tget_short(&s->gb, s->le); if (s->geotag_count > count / 4 - 1) { s->geotag_count = count / 4 - 1; av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n"); } if (bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4) { s->geotag_count = 0; return -1; } s->geotags = av_mallocz_array(s->geotag_count, sizeof(TiffGeoTag)); if (!s->geotags) { av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n"); s->geotag_count = 0; goto end; } for (i = 0; i < s->geotag_count; i++) { s->geotags[i].key = ff_tget_short(&s->gb, s->le); s->geotags[i].type = ff_tget_short(&s->gb, s->le); s->geotags[i].count = ff_tget_short(&s->gb, s->le); if (!s->geotags[i].type) s->geotags[i].val = get_geokey_val(s->geotags[i].key, ff_tget_short(&s->gb, s->le)); else s->geotags[i].offset = ff_tget_short(&s->gb, s->le); } break; case TIFF_GEO_DOUBLE_PARAMS: if (count >= INT_MAX / sizeof(int64_t)) return AVERROR_INVALIDDATA; if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t)) return AVERROR_INVALIDDATA; dp = av_malloc_array(count, sizeof(double)); if (!dp) { av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n"); goto end; } for (i = 0; i < count; i++) dp[i] = ff_tget_double(&s->gb, s->le); for (i = 0; i < s->geotag_count; i++) { if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) { if (s->geotags[i].count == 0 || s->geotags[i].offset + s->geotags[i].count > count) { av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key); } else { char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", "); if (!ap) { av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n"); av_freep(&dp); return AVERROR(ENOMEM); } s->geotags[i].val = ap; } } } av_freep(&dp); break; case TIFF_GEO_ASCII_PARAMS: pos = bytestream2_tell(&s->gb); for (i = 0; i < s->geotag_count; i++) { if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) { if (s->geotags[i].count == 0 || s->geotags[i].offset + s->geotags[i].count > count) { av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key); } else { char *ap; bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET); if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count) return AVERROR_INVALIDDATA; ap = av_malloc(s->geotags[i].count); if (!ap) { av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n"); return AVERROR(ENOMEM); } bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count); ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte s->geotags[i].val = ap; } } } break; case TIFF_ARTIST: ADD_METADATA(count, "artist", NULL); break; case TIFF_COPYRIGHT: ADD_METADATA(count, "copyright", NULL); break; case TIFF_DATE: ADD_METADATA(count, "date", NULL); break; case TIFF_DOCUMENT_NAME: ADD_METADATA(count, "document_name", NULL); break; case TIFF_HOST_COMPUTER: ADD_METADATA(count, "computer", NULL); break; case TIFF_IMAGE_DESCRIPTION: ADD_METADATA(count, "description", NULL); break; case TIFF_MAKE: ADD_METADATA(count, "make", NULL); break; case TIFF_MODEL: ADD_METADATA(count, "model", NULL); break; case TIFF_PAGE_NAME: ADD_METADATA(count, "page_name", NULL); break; case TIFF_PAGE_NUMBER: ADD_METADATA(count, "page_number", " / "); break; case TIFF_SOFTWARE_NAME: ADD_METADATA(count, "software", NULL); break; default: if (s->avctx->err_recognition & AV_EF_EXPLODE) { av_log(s->avctx, AV_LOG_ERROR, "Unknown or unsupported tag %d/0X%0X\n", tag, tag); return AVERROR_INVALIDDATA; } } end: bytestream2_seek(&s->gb, start, SEEK_SET); return 0; }
26,139
FFmpeg
c3fb20bab4f00621733809fb35ee39a5ae11e598
1
static int reap_filters(void) { AVFilterBufferRef *picref; AVFrame *filtered_frame = NULL; int i; int64_t frame_pts; /* Reap all buffers present in the buffer sinks */ for (i = 0; i < nb_output_streams; i++) { OutputStream *ost = output_streams[i]; OutputFile *of = output_files[ost->file_index]; int ret = 0; if (!ost->filter) continue; if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) { return AVERROR(ENOMEM); } else avcodec_get_frame_defaults(ost->filtered_frame); filtered_frame = ost->filtered_frame; while (1) { ret = av_buffersink_get_buffer_ref(ost->filter->filter, &picref, AV_BUFFERSINK_FLAG_NO_REQUEST); if (ret < 0) { if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) { char buf[256]; av_strerror(ret, buf, sizeof(buf)); av_log(NULL, AV_LOG_WARNING, "Error in av_buffersink_get_buffer_ref(): %s\n", buf); frame_pts = AV_NOPTS_VALUE; if (picref->pts != AV_NOPTS_VALUE) { filtered_frame->pts = frame_pts = av_rescale_q(picref->pts, ost->filter->filter->inputs[0]->time_base, ost->st->codec->time_base) - av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->codec->time_base); if (of->start_time && filtered_frame->pts < 0) { avfilter_unref_buffer(picref); continue; //if (ost->source_index >= 0) // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold switch (ost->filter->filter->inputs[0]->type) { case AVMEDIA_TYPE_VIDEO: avfilter_copy_buf_props(filtered_frame, picref); filtered_frame->pts = frame_pts; if (!ost->frame_aspect_ratio) ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio; do_video_out(of->ctx, ost, filtered_frame); case AVMEDIA_TYPE_AUDIO: avfilter_copy_buf_props(filtered_frame, picref); filtered_frame->pts = frame_pts; do_audio_out(of->ctx, ost, filtered_frame); default: // TODO support subtitle filters av_assert0(0); avfilter_unref_buffer(picref); return 0;
26,140
FFmpeg
4d1418cd4f620b382106542d0f33d96e33a0fdae
1
static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, int mb_type, int is_h264, int simple, int transform_bypass, int pixel_shift, int *block_offset, int linesize, uint8_t *dest_y, int p) { MpegEncContext * const s = &h->s; void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride); void (*idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride); int i; int qscale = p == 0 ? s->qscale : h->chroma_qp[p-1]; block_offset += 16*p; if(IS_INTRA4x4(mb_type)){ if(simple || !s->encoding){ if(IS_8x8DCT(mb_type)){ if(transform_bypass){ idct_dc_add = idct_add = s->dsp.add_pixels8; }else{ idct_dc_add = h->h264dsp.h264_idct8_dc_add; idct_add = h->h264dsp.h264_idct8_add; } for(i=0; i<16; i+=4){ uint8_t * const ptr= dest_y + block_offset[i]; const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ]; if(transform_bypass && h->sps.profile_idc==244 && dir<=1){ h->hpc.pred8x8l_add[dir](ptr, h->mb + (i*16+p*256 << pixel_shift), linesize); }else{ const int nnz = h->non_zero_count_cache[ scan8[i+p*16] ]; h->hpc.pred8x8l[ dir ](ptr, (h->topleft_samples_available<<i)&0x8000, (h->topright_samples_available<<i)&0x4000, linesize); if(nnz){ if(nnz == 1 && dctcoef_get(h->mb, pixel_shift, i*16+p*256)) idct_dc_add(ptr, h->mb + (i*16+p*256 << pixel_shift), linesize); else idct_add (ptr, h->mb + (i*16+p*256 << pixel_shift), linesize); } } } }else{ if(transform_bypass){ idct_dc_add = idct_add = s->dsp.add_pixels4; }else{ idct_dc_add = h->h264dsp.h264_idct_dc_add; idct_add = h->h264dsp.h264_idct_add; } for(i=0; i<16; i++){ uint8_t * const ptr= dest_y + block_offset[i]; const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ]; if(transform_bypass && h->sps.profile_idc==244 && dir<=1){ h->hpc.pred4x4_add[dir](ptr, h->mb + (i*16+p*256 << pixel_shift), linesize); }else{ uint8_t *topright; int nnz, tr; uint64_t tr_high; if(dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED){ const int topright_avail= (h->topright_samples_available<<i)&0x8000; assert(s->mb_y || linesize <= block_offset[i]); if(!topright_avail){ if (pixel_shift) { tr_high= ((uint16_t*)ptr)[3 - linesize/2]*0x0001000100010001ULL; topright= (uint8_t*) &tr_high; } else { tr= ptr[3 - linesize]*0x01010101; topright= (uint8_t*) &tr; } }else topright= ptr + (4 << pixel_shift) - linesize; }else topright= NULL; h->hpc.pred4x4[ dir ](ptr, topright, linesize); nnz = h->non_zero_count_cache[ scan8[i+p*16] ]; if(nnz){ if(is_h264){ if(nnz == 1 && dctcoef_get(h->mb, pixel_shift, i*16+p*256)) idct_dc_add(ptr, h->mb + (i*16+p*256 << pixel_shift), linesize); else idct_add (ptr, h->mb + (i*16+p*256 << pixel_shift), linesize); }else ff_svq3_add_idct_c(ptr, h->mb + i*16+p*256, linesize, qscale, 0); } } } } } }else{ h->hpc.pred16x16[ h->intra16x16_pred_mode ](dest_y , linesize); if(is_h264){ if(h->non_zero_count_cache[ scan8[LUMA_DC_BLOCK_INDEX+p] ]){ if(!transform_bypass) h->h264dsp.h264_luma_dc_dequant_idct(h->mb+(p*256 << pixel_shift), h->mb_luma_dc[p], h->dequant4_coeff[p][qscale][0]); else{ static const uint8_t dc_mapping[16] = { 0*16, 1*16, 4*16, 5*16, 2*16, 3*16, 6*16, 7*16, 8*16, 9*16,12*16,13*16,10*16,11*16,14*16,15*16}; for(i = 0; i < 16; i++) dctcoef_set(h->mb+p*256, pixel_shift, dc_mapping[i], dctcoef_get(h->mb_luma_dc[p], pixel_shift, i)); } } }else ff_svq3_luma_dc_dequant_idct_c(h->mb+p*256, h->mb_luma_dc[p], qscale); } }
26,142
FFmpeg
7faa40af982960608b117e20fec999b48011e5e0
1
static int adx_read_packet(AVFormatContext *s, AVPacket *pkt) { ADXDemuxerContext *c = s->priv_data; AVCodecContext *avctx = s->streams[0]->codec; int ret, size; size = BLOCK_SIZE * avctx->channels; pkt->pos = avio_tell(s->pb); pkt->stream_index = 0; ret = av_get_packet(s->pb, pkt, size); if (ret != size) { av_free_packet(pkt); return ret < 0 ? ret : AVERROR(EIO); if (AV_RB16(pkt->data) & 0x8000) { av_free_packet(pkt); return AVERROR_EOF; pkt->size = size; pkt->duration = 1; pkt->pts = (pkt->pos - c->header_size) / size; return 0;
26,143
qemu
a1f0cce2ac0243572ff72aa561da67fe3766a395
1
static int execute_command(BlockDriverState *bdrv, SCSIGenericReq *r, int direction, BlockDriverCompletionFunc *complete) { SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, r->req.dev); r->io_header.interface_id = 'S'; r->io_header.dxfer_direction = direction; r->io_header.dxferp = r->buf; r->io_header.dxfer_len = r->buflen; r->io_header.cmdp = r->req.cmd.buf; r->io_header.cmd_len = r->req.cmd.len; r->io_header.mx_sb_len = sizeof(s->sensebuf); r->io_header.sbp = s->sensebuf; r->io_header.timeout = MAX_UINT; r->io_header.usr_ptr = r; r->io_header.flags |= SG_FLAG_DIRECT_IO; r->req.aiocb = bdrv_aio_ioctl(bdrv, SG_IO, &r->io_header, complete, r); if (r->req.aiocb == NULL) { BADF("execute_command: read failed !\n"); return -1; } return 0; }
26,144
qemu
60fe637bf0e4d7989e21e50f52526444765c63b4
1
static int get_int8(QEMUFile *f, void *pv, size_t size) { int8_t *v = pv; qemu_get_s8s(f, v); return 0; }
26,145
qemu
fea505221eaf87889000378d4d33ad0dfd5f4d9d
0
static void handle_msr(DisasContext *s, uint32_t insn, unsigned int op0, unsigned int op1, unsigned int op2, unsigned int crn, unsigned int crm, unsigned int rt) { unsupported_encoding(s, insn); }
26,146
qemu
b3db211f3c80bb996a704d665fe275619f728bd4
0
static void validate_teardown(TestInputVisitorData *data, const void *unused) { qobject_decref(data->obj); data->obj = NULL; if (data->qiv) { visit_free(data->qiv); data->qiv = NULL; } }
26,147
qemu
b9bec74bcb16519a876ec21cd5277c526a9b512d
0
int kvm_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, int type) { int n; n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type); if (n < 0) return -ENOENT; nb_hw_breakpoint--; hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint]; return 0; }
26,148
qemu
f0ddf11b23260f0af84fb529486a8f9ba2d19401
0
void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2) { uint32_t Dc1 = extract32(regs, 9, 3); uint32_t Dc2 = extract32(regs, 6, 3); uint32_t Du1 = extract32(regs, 3, 3); uint32_t Du2 = extract32(regs, 0, 3); uint32_t c1 = env->dregs[Dc1]; uint32_t c2 = env->dregs[Dc2]; uint32_t u1 = env->dregs[Du1]; uint32_t u2 = env->dregs[Du2]; uint32_t l1, l2; uintptr_t ra = GETPC(); #if defined(CONFIG_ATOMIC64) && !defined(CONFIG_USER_ONLY) int mmu_idx = cpu_mmu_index(env, 0); TCGMemOpIdx oi; #endif if (parallel_cpus) { /* We're executing in a parallel context -- must be atomic. */ #ifdef CONFIG_ATOMIC64 uint64_t c, u, l; if ((a1 & 7) == 0 && a2 == a1 + 4) { c = deposit64(c2, 32, 32, c1); u = deposit64(u2, 32, 32, u1); #ifdef CONFIG_USER_ONLY l = helper_atomic_cmpxchgq_be(env, a1, c, u); #else oi = make_memop_idx(MO_BEQ, mmu_idx); l = helper_atomic_cmpxchgq_be_mmu(env, a1, c, u, oi, ra); #endif l1 = l >> 32; l2 = l; } else if ((a2 & 7) == 0 && a1 == a2 + 4) { c = deposit64(c1, 32, 32, c2); u = deposit64(u1, 32, 32, u2); #ifdef CONFIG_USER_ONLY l = helper_atomic_cmpxchgq_be(env, a2, c, u); #else oi = make_memop_idx(MO_BEQ, mmu_idx); l = helper_atomic_cmpxchgq_be_mmu(env, a2, c, u, oi, ra); #endif l2 = l >> 32; l1 = l; } else #endif { /* Tell the main loop we need to serialize this insn. */ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); } } else { /* We're executing in a serial context -- no need to be atomic. */ l1 = cpu_ldl_data_ra(env, a1, ra); l2 = cpu_ldl_data_ra(env, a2, ra); if (l1 == c1 && l2 == c2) { cpu_stl_data_ra(env, a1, u1, ra); cpu_stl_data_ra(env, a2, u2, ra); } } if (c1 != l1) { env->cc_n = l1; env->cc_v = c1; } else { env->cc_n = l2; env->cc_v = c2; } env->cc_op = CC_OP_CMPL; env->dregs[Dc1] = l1; env->dregs[Dc2] = l2; }
26,150
qemu
786a4ea82ec9c87e3a895cf41081029b285a5fe5
0
static inline hwaddr booke206_page_size_to_tlb(uint64_t size) { return (ffs(size >> 10) - 1) >> 1; }
26,151
qemu
8917c3bdba37d6fe4393db0fad3fabbde9530d6b
0
sofcantrcvmore(struct socket *so) { if ((so->so_state & SS_NOFDREF) == 0) { shutdown(so->s,0); if(global_writefds) { FD_CLR(so->s,global_writefds); } } so->so_state &= ~(SS_ISFCONNECTING); if (so->so_state & SS_FCANTSENDMORE) { so->so_state &= SS_PERSISTENT_MASK; so->so_state |= SS_NOFDREF; /* Don't select it */ } else { so->so_state |= SS_FCANTRCVMORE; } }
26,153
qemu
72cf2d4f0e181d0d3a3122e04129c58a95da713e
0
void cpu_breakpoint_remove_all(CPUState *env, int mask) { #if defined(TARGET_HAS_ICE) CPUBreakpoint *bp, *next; TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) { if (bp->flags & mask) cpu_breakpoint_remove_by_ref(env, bp); } #endif }
26,155
qemu
f8b6cc0070aab8b75bd082582c829be1353f395f
0
static void scsi_destroy(SCSIDevice *d) { SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, d); SCSIGenericReq *r; while (!QTAILQ_EMPTY(&s->qdev.requests)) { r = DO_UPCAST(SCSIGenericReq, req, QTAILQ_FIRST(&s->qdev.requests)); scsi_remove_request(r); } blockdev_mark_auto_del(s->qdev.conf.dinfo->bdrv); }
26,157
qemu
4a1418e07bdcfaa3177739e04707ecaec75d89e1
0
static inline void kqemu_save_seg(SegmentCache *sc, const struct kqemu_segment_cache *ksc) { sc->selector = ksc->selector; sc->flags = ksc->flags; sc->limit = ksc->limit; sc->base = ksc->base; }
26,159
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static void omap_mpu_timer_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { struct omap_mpu_timer_s *s = (struct omap_mpu_timer_s *) opaque; if (size != 4) { return omap_badwidth_write32(opaque, addr, value); } switch (addr) { case 0x00: /* CNTL_TIMER */ omap_timer_sync(s); s->enable = (value >> 5) & 1; s->ptv = (value >> 2) & 7; s->ar = (value >> 1) & 1; s->st = value & 1; omap_timer_update(s); return; case 0x04: /* LOAD_TIM */ s->reset_val = value; return; case 0x08: /* READ_TIM */ OMAP_RO_REG(addr); break; default: OMAP_BAD_REG(addr); } }
26,161
qemu
d6085e3ace20bc9b0fa625d8d79b22668710e217
0
bool qemu_peer_has_vnet_hdr(NetClientState *nc) { if (!nc->peer || !nc->peer->info->has_vnet_hdr) { return false; } return nc->peer->info->has_vnet_hdr(nc->peer); }
26,162
qemu
57407ea44cc0a3d630b9b89a2be011f1955ce5c1
0
static void vmxnet3_cleanup(NetClientState *nc) { VMXNET3State *s = qemu_get_nic_opaque(nc); s->nic = NULL; }
26,163
qemu
5e755519ac9d867f7da13f58a9d0c262db82e14c
0
void op_cp1_enabled(void) { if (!(env->CP0_Status & (1 << CP0St_CU1))) { CALL_FROM_TB2(do_raise_exception_err, EXCP_CpU, 1); } RETURN(); }
26,164
FFmpeg
59c6178a54c414fd19e064f0077d00b82a1eb812
0
static int put_flac_codecpriv(AVFormatContext *s, ByteIOContext *pb, AVCodecContext *codec) { // if the extradata_size is greater than FLAC_STREAMINFO_SIZE, // assume that it's in Matroska format already if (codec->extradata_size < FLAC_STREAMINFO_SIZE) { av_log(s, AV_LOG_ERROR, "Invalid FLAC extradata\n"); return -1; } else if (codec->extradata_size == FLAC_STREAMINFO_SIZE) { // only the streaminfo packet put_buffer(pb, "fLaC", 4); put_byte(pb, 0x80); put_be24(pb, FLAC_STREAMINFO_SIZE); } else if(memcmp("fLaC", codec->extradata, 4)) { av_log(s, AV_LOG_ERROR, "Invalid FLAC extradata\n"); return -1; } put_buffer(pb, codec->extradata, codec->extradata_size); return 0; }
26,165
qemu
ac1970fbe8ad5a70174f462109ac0f6c7bf1bc43
0
static void core_region_add(MemoryListener *listener, MemoryRegionSection *section) { cpu_register_physical_memory_log(section, section->readonly); }
26,166
qemu
c34d440a728fd3b5099d11dec122d440ef092c23
0
static void kvm_mce_inj_srao_memscrub(CPUState *env, target_phys_addr_t paddr) { struct kvm_x86_mce mce = { .bank = 9, .status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S | 0xc0, .mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV, .addr = paddr, .misc = (MCM_ADDR_PHYS << 6) | 0xc, }; int r; r = kvm_set_mce(env, &mce); if (r < 0) { fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno)); abort(); } kvm_mce_broadcast_rest(env); }
26,167
FFmpeg
ddebfb15dc8ee01f7f8ff4e15e80b9843e550f00
0
int avcodec_open(AVCodecContext *avctx, AVCodec *codec) { int ret; if(avctx->codec) return -1; avctx->codec = codec; avctx->codec_id = codec->id; avctx->frame_number = 0; if (codec->priv_data_size > 0) { avctx->priv_data = av_mallocz(codec->priv_data_size); if (!avctx->priv_data) return -ENOMEM; } else { avctx->priv_data = NULL; } if(avctx->coded_width && avctx->coded_height) avcodec_set_dimensions(avctx, avctx->coded_width, avctx->coded_height); else if(avctx->width && avctx->height) avcodec_set_dimensions(avctx, avctx->width, avctx->height); if((avctx->coded_width||avctx->coded_height) && avcodec_check_dimensions(avctx,avctx->coded_width,avctx->coded_height)){ av_freep(&avctx->priv_data); return -1; } ret = avctx->codec->init(avctx); if (ret < 0) { av_freep(&avctx->priv_data); return ret; } return 0; }
26,168
FFmpeg
d1adad3cca407f493c3637e20ecd4f7124e69212
0
static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, long width, long height, long lumStride, long chromStride, long dstStride) { //FIXME interpolate chroma RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2); }
26,169
FFmpeg
ac4b32df71bd932838043a4838b86d11e169707f
1
static int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS - 1], int i, uint8_t *token_prob, int16_t qmul[2]) { VP56RangeCoder c = *r; goto skip_eob; do { int coeff; if (!vp56_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB break; skip_eob: if (!vp56_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0 if (++i == 16) break; // invalid input; blocks should end with EOB token_prob = probs[i][0]; goto skip_eob; } if (!vp56_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1 coeff = 1; token_prob = probs[i + 1][1]; } else { if (!vp56_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4 coeff = vp56_rac_get_prob_branchy(&c, token_prob[4]); if (coeff) coeff += vp56_rac_get_prob(&c, token_prob[5]); coeff += 2; } else { // DCT_CAT* if (!vp56_rac_get_prob_branchy(&c, token_prob[6])) { if (!vp56_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1 coeff = 5 + vp56_rac_get_prob(&c, vp8_dct_cat1_prob[0]); } else { // DCT_CAT2 coeff = 7; coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1; coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[1]); } } else { // DCT_CAT3 and up int a = vp56_rac_get_prob(&c, token_prob[8]); int b = vp56_rac_get_prob(&c, token_prob[9 + a]); int cat = (a << 1) + b; coeff = 3 + (8 << cat); coeff += vp8_rac_get_coeff(&c, ff_vp8_dct_cat_prob[cat]); } } token_prob = probs[i + 1][2]; } block[zigzag_scan[i]] = (vp8_rac_get(&c) ? -coeff : coeff) * qmul[!!i]; } while (++i < 16); *r = c; return i; }
26,170
qemu
832390a5ed11e6c516db0986bf302d098e3ae36c
1
static int img_check(int argc, char **argv) { int c, ret; OutputFormat output_format = OFORMAT_HUMAN; const char *filename, *fmt, *output, *cache; BlockBackend *blk; BlockDriverState *bs; int fix = 0; int flags = BDRV_O_FLAGS | BDRV_O_CHECK; ImageCheck *check; bool quiet = false; fmt = NULL; output = NULL; cache = BDRV_DEFAULT_CACHE; for(;;) { int option_index = 0; static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, {"format", required_argument, 0, 'f'}, {"repair", required_argument, 0, 'r'}, {"output", required_argument, 0, OPTION_OUTPUT}, {0, 0, 0, 0} }; c = getopt_long(argc, argv, "hf:r:T:q", long_options, &option_index); if (c == -1) { break; } switch(c) { case '?': case 'h': help(); break; case 'f': fmt = optarg; break; case 'r': flags |= BDRV_O_RDWR; if (!strcmp(optarg, "leaks")) { fix = BDRV_FIX_LEAKS; } else if (!strcmp(optarg, "all")) { fix = BDRV_FIX_LEAKS | BDRV_FIX_ERRORS; } else { error_exit("Unknown option value for -r " "(expecting 'leaks' or 'all'): %s", optarg); } break; case OPTION_OUTPUT: output = optarg; break; case 'T': cache = optarg; break; case 'q': quiet = true; break; } } if (optind != argc - 1) { error_exit("Expecting one image file name"); } filename = argv[optind++]; if (output && !strcmp(output, "json")) { output_format = OFORMAT_JSON; } else if (output && !strcmp(output, "human")) { output_format = OFORMAT_HUMAN; } else if (output) { error_report("--output must be used with human or json as argument."); return 1; } ret = bdrv_parse_cache_flags(cache, &flags); if (ret < 0) { error_report("Invalid source cache option: %s", cache); return 1; } blk = img_open("image", filename, fmt, flags, true, quiet); if (!blk) { return 1; } bs = blk_bs(blk); check = g_new0(ImageCheck, 1); ret = collect_image_check(bs, check, filename, fmt, fix); if (ret == -ENOTSUP) { error_report("This image format does not support checks"); ret = 63; goto fail; } if (check->corruptions_fixed || check->leaks_fixed) { int corruptions_fixed, leaks_fixed; leaks_fixed = check->leaks_fixed; corruptions_fixed = check->corruptions_fixed; if (output_format == OFORMAT_HUMAN) { qprintf(quiet, "The following inconsistencies were found and repaired:\n\n" " %" PRId64 " leaked clusters\n" " %" PRId64 " corruptions\n\n" "Double checking the fixed image now...\n", check->leaks_fixed, check->corruptions_fixed); } ret = collect_image_check(bs, check, filename, fmt, 0); check->leaks_fixed = leaks_fixed; check->corruptions_fixed = corruptions_fixed; } switch (output_format) { case OFORMAT_HUMAN: dump_human_image_check(check, quiet); break; case OFORMAT_JSON: dump_json_image_check(check, quiet); break; } if (ret || check->check_errors) { ret = 1; goto fail; } if (check->corruptions) { ret = 2; } else if (check->leaks) { ret = 3; } else { ret = 0; } fail: qapi_free_ImageCheck(check); blk_unref(blk); return ret; }
26,171
FFmpeg
5a8fec1b33f2c9da89fe565516fff24b09988dc9
1
static void imdct12(INTFLOAT *out, INTFLOAT *in) { INTFLOAT in0, in1, in2, in3, in4, in5, t1, t2; in0 = in[0*3]; in1 = in[1*3] + in[0*3]; in2 = in[2*3] + in[1*3]; in3 = in[3*3] + in[2*3]; in4 = in[4*3] + in[3*3]; in5 = in[5*3] + in[4*3]; in5 += in3; in3 += in1; in2 = MULH3(in2, C3, 2); in3 = MULH3(in3, C3, 4); t1 = in0 - in4; t2 = MULH3(in1 - in5, C4, 2); out[ 7] = out[10] = t1 + t2; out[ 1] = out[ 4] = t1 - t2; in0 += SHR(in4, 1); in4 = in0 + in2; in5 += 2*in1; in1 = MULH3(in5 + in3, C5, 1); out[ 8] = out[ 9] = in4 + in1; out[ 2] = out[ 3] = in4 - in1; in0 -= in2; in5 = MULH3(in5 - in3, C6, 2); out[ 0] = out[ 5] = in0 - in5; out[ 6] = out[11] = in0 + in5; }
26,172
qemu
de82815db1c89da058b7fb941dab137d6d9ab738
1
int qcow2_snapshot_load_tmp(BlockDriverState *bs, const char *snapshot_id, const char *name, Error **errp) { int i, snapshot_index; BDRVQcowState *s = bs->opaque; QCowSnapshot *sn; uint64_t *new_l1_table; int new_l1_bytes; int ret; assert(bs->read_only); /* Search the snapshot */ snapshot_index = find_snapshot_by_id_and_name(bs, snapshot_id, name); if (snapshot_index < 0) { error_setg(errp, "Can't find snapshot"); return -ENOENT; } sn = &s->snapshots[snapshot_index]; /* Allocate and read in the snapshot's L1 table */ if (sn->l1_size > QCOW_MAX_L1_SIZE) { error_setg(errp, "Snapshot L1 table too large"); return -EFBIG; } new_l1_bytes = sn->l1_size * sizeof(uint64_t); new_l1_table = g_malloc0(align_offset(new_l1_bytes, 512)); ret = bdrv_pread(bs->file, sn->l1_table_offset, new_l1_table, new_l1_bytes); if (ret < 0) { error_setg(errp, "Failed to read l1 table for snapshot"); g_free(new_l1_table); return ret; } /* Switch the L1 table */ g_free(s->l1_table); s->l1_size = sn->l1_size; s->l1_table_offset = sn->l1_table_offset; s->l1_table = new_l1_table; for(i = 0;i < s->l1_size; i++) { be64_to_cpus(&s->l1_table[i]); } return 0; }
26,176
qemu
63fa06dc978f3669dbfd9443b33cde9e2a7f4b41
1
static int vdi_create(const char *filename, QEMUOptionParameter *options, Error **errp) { int fd; int result = 0; uint64_t bytes = 0; uint32_t blocks; size_t block_size = DEFAULT_CLUSTER_SIZE; uint32_t image_type = VDI_TYPE_DYNAMIC; VdiHeader header; size_t i; size_t bmap_size; logout("\n"); /* Read out options. */ while (options && options->name) { if (!strcmp(options->name, BLOCK_OPT_SIZE)) { bytes = options->value.n; #if defined(CONFIG_VDI_BLOCK_SIZE) } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { if (options->value.n) { /* TODO: Additional checks (SECTOR_SIZE * 2^n, ...). */ block_size = options->value.n; } #endif #if defined(CONFIG_VDI_STATIC_IMAGE) } else if (!strcmp(options->name, BLOCK_OPT_STATIC)) { if (options->value.n) { image_type = VDI_TYPE_STATIC; } #endif } options++; } fd = qemu_open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY | O_LARGEFILE, 0644); if (fd < 0) { return -errno; } /* We need enough blocks to store the given disk size, so always round up. */ blocks = (bytes + block_size - 1) / block_size; bmap_size = blocks * sizeof(uint32_t); bmap_size = ((bmap_size + SECTOR_SIZE - 1) & ~(SECTOR_SIZE -1)); memset(&header, 0, sizeof(header)); pstrcpy(header.text, sizeof(header.text), VDI_TEXT); header.signature = VDI_SIGNATURE; header.version = VDI_VERSION_1_1; header.header_size = 0x180; header.image_type = image_type; header.offset_bmap = 0x200; header.offset_data = 0x200 + bmap_size; header.sector_size = SECTOR_SIZE; header.disk_size = bytes; header.block_size = block_size; header.blocks_in_image = blocks; if (image_type == VDI_TYPE_STATIC) { header.blocks_allocated = blocks; } uuid_generate(header.uuid_image); uuid_generate(header.uuid_last_snap); /* There is no need to set header.uuid_link or header.uuid_parent here. */ #if defined(CONFIG_VDI_DEBUG) vdi_header_print(&header); #endif vdi_header_to_le(&header); if (write(fd, &header, sizeof(header)) < 0) { result = -errno; } if (bmap_size > 0) { uint32_t *bmap = g_malloc0(bmap_size); for (i = 0; i < blocks; i++) { if (image_type == VDI_TYPE_STATIC) { bmap[i] = i; } else { bmap[i] = VDI_UNALLOCATED; } } if (write(fd, bmap, bmap_size) < 0) { result = -errno; } g_free(bmap); } if (image_type == VDI_TYPE_STATIC) { if (ftruncate(fd, sizeof(header) + bmap_size + blocks * block_size)) { result = -errno; } } if (close(fd) < 0) { result = -errno; } return result; }
26,177
FFmpeg
ca402f32e392590a81a1381dab41c4f9c2c2f98a
1
static int dxa_read_header(AVFormatContext *s, AVFormatParameters *ap) { AVIOContext *pb = s->pb; DXAContext *c = s->priv_data; AVStream *st, *ast; uint32_t tag; int32_t fps; int w, h; int num, den; int flags; tag = avio_rl32(pb); if (tag != MKTAG('D', 'E', 'X', 'A')) return -1; flags = avio_r8(pb); c->frames = avio_rb16(pb); if(!c->frames){ av_log(s, AV_LOG_ERROR, "File contains no frames ???\n"); return -1; } fps = avio_rb32(pb); if(fps > 0){ den = 1000; num = fps; }else if (fps < 0){ den = 100000; num = -fps; }else{ den = 10; num = 1; } w = avio_rb16(pb); h = avio_rb16(pb); c->has_sound = 0; st = av_new_stream(s, 0); if (!st) return -1; // Parse WAV data header if(avio_rl32(pb) == MKTAG('W', 'A', 'V', 'E')){ uint32_t size, fsize; c->has_sound = 1; size = avio_rb32(pb); c->vidpos = avio_tell(pb) + size; avio_skip(pb, 16); fsize = avio_rl32(pb); ast = av_new_stream(s, 0); if (!ast) return -1; ff_get_wav_header(pb, ast->codec, fsize); // find 'data' chunk while(avio_tell(pb) < c->vidpos && !pb->eof_reached){ tag = avio_rl32(pb); fsize = avio_rl32(pb); if(tag == MKTAG('d', 'a', 't', 'a')) break; avio_skip(pb, fsize); } c->bpc = (fsize + c->frames - 1) / c->frames; if(ast->codec->block_align) c->bpc = ((c->bpc + ast->codec->block_align - 1) / ast->codec->block_align) * ast->codec->block_align; c->bytes_left = fsize; c->wavpos = avio_tell(pb); avio_seek(pb, c->vidpos, SEEK_SET); } /* now we are ready: build format streams */ st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_DXA; st->codec->width = w; st->codec->height = h; av_reduce(&den, &num, den, num, (1UL<<31)-1); av_set_pts_info(st, 33, num, den); /* flags & 0x80 means that image is interlaced, * flags & 0x40 means that image has double height * either way set true height */ if(flags & 0xC0){ st->codec->height >>= 1; } c->readvid = !c->has_sound; c->vidpos = avio_tell(pb); s->start_time = 0; s->duration = (int64_t)c->frames * AV_TIME_BASE * num / den; av_log(s, AV_LOG_DEBUG, "%d frame(s)\n",c->frames); return 0; }
26,178
FFmpeg
dcd3418a35aab7ef283b68ed9997ce4ac204094e
0
static int get_cv_color_primaries(AVCodecContext *avctx, CFStringRef *primaries) { enum AVColorPrimaries pri = avctx->color_primaries; switch (pri) { case AVCOL_PRI_UNSPECIFIED: *primaries = NULL; break; case AVCOL_PRI_BT709: *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2; break; case AVCOL_PRI_BT2020: *primaries = kCVImageBufferColorPrimaries_ITU_R_2020; break; default: av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri)); *primaries = NULL; return -1; } return 0; }
26,179
FFmpeg
bcd7bf7eeb09a395cc01698842d1b8be9af483fc
0
static void avc_wgt_4x4multiple_msa(uint8_t *data, int32_t stride, int32_t height, int32_t log2_denom, int32_t src_weight, int32_t offset_in) { uint8_t cnt; uint32_t data0, data1, data2, data3; v16u8 zero = { 0 }; v16u8 src0, src1, src2, src3; v8u16 temp0, temp1, temp2, temp3; v8i16 wgt, denom, offset; offset_in <<= (log2_denom); if (log2_denom) { offset_in += (1 << (log2_denom - 1)); } wgt = __msa_fill_h(src_weight); offset = __msa_fill_h(offset_in); denom = __msa_fill_h(log2_denom); for (cnt = height / 4; cnt--;) { LOAD_4WORDS_WITH_STRIDE(data, stride, data0, data1, data2, data3); src0 = (v16u8) __msa_fill_w(data0); src1 = (v16u8) __msa_fill_w(data1); src2 = (v16u8) __msa_fill_w(data2); src3 = (v16u8) __msa_fill_w(data3); ILVR_B_4VECS_UH(src0, src1, src2, src3, zero, zero, zero, zero, temp0, temp1, temp2, temp3); temp0 *= wgt; temp1 *= wgt; temp2 *= wgt; temp3 *= wgt; ADDS_S_H_4VECS_UH(temp0, offset, temp1, offset, temp2, offset, temp3, offset, temp0, temp1, temp2, temp3); MAXI_S_H_4VECS_UH(temp0, temp1, temp2, temp3, 0); SRL_H_4VECS_UH(temp0, temp1, temp2, temp3, temp0, temp1, temp2, temp3, denom); SAT_U_H_4VECS_UH(temp0, temp1, temp2, temp3, 7); PCKEV_B_STORE_4_BYTES_4(temp0, temp1, temp2, temp3, data, stride); data += (4 * stride); } }
26,181
FFmpeg
acc163c6ab52d2235767852262c64c7f6b273d1c
0
static void FUNC(flac_decorrelate_indep_c)(uint8_t **out, int32_t **in, int channels, int len, int shift) { sample *samples = (sample *) OUT(out); int i, j; for (j = 0; j < len; j++) for (i = 0; i < channels; i++) S(samples, i, j) = in[i][j] << shift; }
26,182
qemu
8f5d58ef2c92d7b82d9a6eeefd7c8854a183ba4a
1
void object_property_add_link(Object *obj, const char *name, const char *type, Object **child, void (*check)(Object *, const char *, Object *, Error **), ObjectPropertyLinkFlags flags, Error **errp) { Error *local_err = NULL; LinkProperty *prop = g_malloc(sizeof(*prop)); gchar *full_type; ObjectProperty *op; prop->child = child; prop->check = check; prop->flags = flags; full_type = g_strdup_printf("link<%s>", type); op = object_property_add(obj, name, full_type, object_get_link_property, check ? object_set_link_property : NULL, object_release_link_property, prop, &local_err); if (local_err) { error_propagate(errp, local_err); g_free(prop); goto out; } op->resolve = object_resolve_link_property; out: g_free(full_type); }
26,184
qemu
2b5d5953eec0cc541857c3df812bdf8421596ab2
1
static int alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index, uint16_t **refcount_block) { BDRVQcowState *s = bs->opaque; unsigned int refcount_table_index; int ret; BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); /* Find the refcount block for the given cluster */ refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT); if (refcount_table_index < s->refcount_table_size) { uint64_t refcount_block_offset = s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK; /* If it's already there, we're done */ if (refcount_block_offset) { return load_refcount_block(bs, refcount_block_offset, (void**) refcount_block); /* * If we came here, we need to allocate something. Something is at least * a cluster for the new refcount block. It may also include a new refcount * table if the old refcount table is too small. * * Note that allocating clusters here needs some special care: * * - We can't use the normal qcow2_alloc_clusters(), it would try to * increase the refcount and very likely we would end up with an endless * recursion. Instead we must place the refcount blocks in a way that * they can describe them themselves. * * - We need to consider that at this point we are inside update_refcounts * and potentially doing an initial refcount increase. This means that * some clusters have already been allocated by the caller, but their * refcount isn't accurate yet. If we allocate clusters for metadata, we * need to return -EAGAIN to signal the caller that it needs to restart * the search for free clusters. * * - alloc_clusters_noref and qcow2_free_clusters may load a different * refcount block into the cache */ *refcount_block = NULL; /* We write to the refcount table, so we might depend on L2 tables */ ret = qcow2_cache_flush(bs, s->l2_table_cache); if (ret < 0) { return ret; /* Allocate the refcount block itself and mark it as used */ int64_t new_block = alloc_clusters_noref(bs, s->cluster_size); if (new_block < 0) { return new_block; #ifdef DEBUG_ALLOC2 fprintf(stderr, "qcow2: Allocate refcount block %d for %" PRIx64 " at %" PRIx64 "\n", refcount_table_index, cluster_index << s->cluster_bits, new_block); #endif if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) { /* Zero the new refcount block before updating it */ ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, (void**) refcount_block); if (ret < 0) { goto fail_block; memset(*refcount_block, 0, s->cluster_size); /* The block describes itself, need to update the cache */ int block_index = (new_block >> s->cluster_bits) & ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1); (*refcount_block)[block_index] = cpu_to_be16(1); } else { /* Described somewhere else. This can recurse at most twice before we * arrive at a block that describes itself. */ ret = update_refcount(bs, new_block, s->cluster_size, 1, QCOW2_DISCARD_NEVER); if (ret < 0) { goto fail_block; ret = qcow2_cache_flush(bs, s->refcount_block_cache); if (ret < 0) { goto fail_block; /* Initialize the new refcount block only after updating its refcount, * update_refcount uses the refcount cache itself */ ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, (void**) refcount_block); if (ret < 0) { goto fail_block; memset(*refcount_block, 0, s->cluster_size); /* Now the new refcount block needs to be written to disk */ BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE); qcow2_cache_entry_mark_dirty(s->refcount_block_cache, *refcount_block); ret = qcow2_cache_flush(bs, s->refcount_block_cache); if (ret < 0) { goto fail_block; /* If the refcount table is big enough, just hook the block up there */ if (refcount_table_index < s->refcount_table_size) { uint64_t data64 = cpu_to_be64(new_block); BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP); ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset + refcount_table_index * sizeof(uint64_t), &data64, sizeof(data64)); if (ret < 0) { goto fail_block; s->refcount_table[refcount_table_index] = new_block; /* The new refcount block may be where the caller intended to put its * data, so let it restart the search. */ return -EAGAIN; ret = qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block); if (ret < 0) { goto fail_block; /* * If we come here, we need to grow the refcount table. Again, a new * refcount table needs some space and we can't simply allocate to avoid * endless recursion. * * Therefore let's grab new refcount blocks at the end of the image, which * will describe themselves and the new refcount table. This way we can * reference them only in the new table and do the switch to the new * refcount table at once without producing an inconsistent state in * between. */ BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW); /* Calculate the number of refcount blocks needed so far */ uint64_t refcount_block_clusters = 1 << (s->cluster_bits - REFCOUNT_SHIFT); uint64_t blocks_used = DIV_ROUND_UP(cluster_index, refcount_block_clusters); /* And now we need at least one block more for the new metadata */ uint64_t table_size = next_refcount_table_size(s, blocks_used + 1); uint64_t last_table_size; uint64_t blocks_clusters; do { uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t)); blocks_clusters = 1 + ((table_clusters + refcount_block_clusters - 1) / refcount_block_clusters); uint64_t meta_clusters = table_clusters + blocks_clusters; last_table_size = table_size; table_size = next_refcount_table_size(s, blocks_used + ((meta_clusters + refcount_block_clusters - 1) / refcount_block_clusters)); } while (last_table_size != table_size); #ifdef DEBUG_ALLOC2 fprintf(stderr, "qcow2: Grow refcount table %" PRId32 " => %" PRId64 "\n", s->refcount_table_size, table_size); #endif /* Create the new refcount table and blocks */ uint64_t meta_offset = (blocks_used * refcount_block_clusters) * s->cluster_size; uint64_t table_offset = meta_offset + blocks_clusters * s->cluster_size; uint16_t *new_blocks = g_malloc0(blocks_clusters * s->cluster_size); uint64_t *new_table = g_malloc0(table_size * sizeof(uint64_t)); /* Fill the new refcount table */ memcpy(new_table, s->refcount_table, s->refcount_table_size * sizeof(uint64_t)); new_table[refcount_table_index] = new_block; int i; for (i = 0; i < blocks_clusters; i++) { new_table[blocks_used + i] = meta_offset + (i * s->cluster_size); /* Fill the refcount blocks */ uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t)); int block = 0; for (i = 0; i < table_clusters + blocks_clusters; i++) { new_blocks[block++] = cpu_to_be16(1); /* Write refcount blocks to disk */ BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS); ret = bdrv_pwrite_sync(bs->file, meta_offset, new_blocks, blocks_clusters * s->cluster_size); g_free(new_blocks); if (ret < 0) { goto fail_table; /* Write refcount table to disk */ for(i = 0; i < table_size; i++) { cpu_to_be64s(&new_table[i]); BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE); ret = bdrv_pwrite_sync(bs->file, table_offset, new_table, table_size * sizeof(uint64_t)); if (ret < 0) { goto fail_table; for(i = 0; i < table_size; i++) { be64_to_cpus(&new_table[i]); /* Hook up the new refcount table in the qcow2 header */ uint8_t data[12]; cpu_to_be64w((uint64_t*)data, table_offset); cpu_to_be32w((uint32_t*)(data + 8), table_clusters); BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE); ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, refcount_table_offset), data, sizeof(data)); if (ret < 0) { goto fail_table; /* And switch it in memory */ uint64_t old_table_offset = s->refcount_table_offset; uint64_t old_table_size = s->refcount_table_size; g_free(s->refcount_table); s->refcount_table = new_table; s->refcount_table_size = table_size; s->refcount_table_offset = table_offset; /* Free old table. */ qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t), QCOW2_DISCARD_OTHER); ret = load_refcount_block(bs, new_block, (void**) refcount_block); if (ret < 0) { return ret; /* If we were trying to do the initial refcount update for some cluster * allocation, we might have used the same clusters to store newly * allocated metadata. Make the caller search some new space. */ return -EAGAIN; fail_table: g_free(new_table); fail_block: if (*refcount_block != NULL) { qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block); return ret;
26,185
qemu
d9bce9d99f4656ae0b0127f7472db9067b8f84ab
1
PPC_OP(test_ctrz) { T0 = (regs->ctr == 0); RETURN(); }
26,186
qemu
fe3c546c5ff2a6210f9a4d8561cc64051ca8603e
1
static void usb_net_handle_dataout(USBNetState *s, USBPacket *p) { int sz = sizeof(s->out_buf) - s->out_ptr; struct rndis_packet_msg_type *msg = (struct rndis_packet_msg_type *) s->out_buf; uint32_t len; #ifdef TRAFFIC_DEBUG fprintf(stderr, "usbnet: data out len %zu\n", p->iov.size); iov_hexdump(p->iov.iov, p->iov.niov, stderr, "usbnet", p->iov.size); #endif if (sz > p->iov.size) { sz = p->iov.size; } usb_packet_copy(p, &s->out_buf[s->out_ptr], sz); s->out_ptr += sz; if (!is_rndis(s)) { if (p->iov.size < 64) { qemu_send_packet(qemu_get_queue(s->nic), s->out_buf, s->out_ptr); s->out_ptr = 0; } return; } len = le32_to_cpu(msg->MessageLength); if (s->out_ptr < 8 || s->out_ptr < len) { return; } if (le32_to_cpu(msg->MessageType) == RNDIS_PACKET_MSG) { uint32_t offs = 8 + le32_to_cpu(msg->DataOffset); uint32_t size = le32_to_cpu(msg->DataLength); if (offs + size <= len) qemu_send_packet(qemu_get_queue(s->nic), s->out_buf + offs, size); } s->out_ptr -= len; memmove(s->out_buf, &s->out_buf[len], s->out_ptr); }
26,187
qemu
b923ab3112ed5ab47c2ff35776f17ab54c60d651
1
static void quiesce_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SCLPEventClass *k = SCLP_EVENT_CLASS(klass); dc->reset = quiesce_reset; dc->vmsd = &vmstate_sclpquiesce; set_bit(DEVICE_CATEGORY_MISC, dc->categories); k->init = quiesce_init; k->get_send_mask = send_mask; k->get_receive_mask = receive_mask; k->can_handle_event = can_handle_event; k->read_event_data = read_event_data; k->write_event_data = NULL; }
26,188
FFmpeg
c9c7263e5820c957598643216c42be9b1c4f2d2b
0
static int mov_open_dref(MOVContext *c, AVIOContext **pb, const char *src, MOVDref *ref, AVIOInterruptCB *int_cb) { AVOpenCallback open_func = c->fc->open_cb; if (!open_func) open_func = ffio_open2_wrapper; /* try relative path, we do not try the absolute because it can leak information about our system to an attacker */ if (ref->nlvl_to > 0 && ref->nlvl_from > 0 && ref->path[0] != '/') { char filename[1025]; const char *src_path; int i, l; /* find a source dir */ src_path = strrchr(src, '/'); if (src_path) src_path++; else src_path = src; /* find a next level down to target */ for (i = 0, l = strlen(ref->path) - 1; l >= 0; l--) if (ref->path[l] == '/') { if (i == ref->nlvl_to - 1) break; else i++; } /* compose filename if next level down to target was found */ if (i == ref->nlvl_to - 1 && src_path - src < sizeof(filename)) { memcpy(filename, src, src_path - src); filename[src_path - src] = 0; for (i = 1; i < ref->nlvl_from; i++) av_strlcat(filename, "../", sizeof(filename)); av_strlcat(filename, ref->path + l + 1, sizeof(filename)); if (!c->use_absolute_path && !c->fc->open_cb) if(strstr(ref->path + l + 1, "..") || ref->nlvl_from > 1) return AVERROR(ENOENT); if (strlen(filename) + 1 == sizeof(filename)) return AVERROR(ENOENT); if (!open_func(c->fc, pb, filename, AVIO_FLAG_READ, int_cb, NULL)) return 0; } } else if (c->use_absolute_path) { av_log(c->fc, AV_LOG_WARNING, "Using absolute path on user request, " "this is a possible security issue\n"); if (!open_func(c->fc, pb, ref->path, AVIO_FLAG_READ, int_cb, NULL)) return 0; } else if (c->fc->open_cb) { if (!open_func(c->fc, pb, ref->path, AVIO_FLAG_READ, int_cb, NULL)) return 0; } else { av_log(c->fc, AV_LOG_ERROR, "Absolute path %s not tried for security reasons, " "set demuxer option use_absolute_path to allow absolute paths\n", ref->path); } return AVERROR(ENOENT); }
26,189
FFmpeg
aac8b76983e340bc744d3542d676f72efa3b474f
0
static void filter_mb_edgev( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int qp ) { int i, d; const int index_a = qp + h->slice_alpha_c0_offset; const int alpha = (alpha_table+52)[index_a]; const int beta = (beta_table+52)[qp + h->slice_beta_offset]; if( bS[0] < 4 ) { int8_t tc[4]; for(i=0; i<4; i++) tc[i] = bS[i] ? (tc0_table+52)[index_a][bS[i] - 1] : -1; h->s.dsp.h264_h_loop_filter_luma(pix, stride, alpha, beta, tc); } else { h->s.dsp.h264_h_loop_filter_luma_intra(pix, stride, alpha, beta); } }
26,190
qemu
1964a397063967acc5ce71a2a24ed26e74824ee1
0
int qemu_file_rate_limit(QEMUFile *f) { if (f->ops->rate_limit) return f->ops->rate_limit(f->opaque); return 0; }
26,192
qemu
850f49de9b57511dcaf2cd7e45059f8f38fadf3b
0
qcrypto_block_luks_create(QCryptoBlock *block, QCryptoBlockCreateOptions *options, const char *optprefix, QCryptoBlockInitFunc initfunc, QCryptoBlockWriteFunc writefunc, void *opaque, Error **errp) { QCryptoBlockLUKS *luks; QCryptoBlockCreateOptionsLUKS luks_opts; Error *local_err = NULL; uint8_t *masterkey = NULL; uint8_t *slotkey = NULL; uint8_t *splitkey = NULL; size_t splitkeylen = 0; size_t i; QCryptoCipher *cipher = NULL; QCryptoIVGen *ivgen = NULL; char *password; const char *cipher_alg; const char *cipher_mode; const char *ivgen_alg; const char *ivgen_hash_alg = NULL; const char *hash_alg; char *cipher_mode_spec = NULL; QCryptoCipherAlgorithm ivcipheralg = 0; uint64_t iters; memcpy(&luks_opts, &options->u.luks, sizeof(luks_opts)); if (!luks_opts.has_iter_time) { luks_opts.iter_time = 2000; } if (!luks_opts.has_cipher_alg) { luks_opts.cipher_alg = QCRYPTO_CIPHER_ALG_AES_256; } if (!luks_opts.has_cipher_mode) { luks_opts.cipher_mode = QCRYPTO_CIPHER_MODE_XTS; } if (!luks_opts.has_ivgen_alg) { luks_opts.ivgen_alg = QCRYPTO_IVGEN_ALG_PLAIN64; } if (!luks_opts.has_hash_alg) { luks_opts.hash_alg = QCRYPTO_HASH_ALG_SHA256; } if (luks_opts.ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) { if (!luks_opts.has_ivgen_hash_alg) { luks_opts.ivgen_hash_alg = QCRYPTO_HASH_ALG_SHA256; luks_opts.has_ivgen_hash_alg = true; } } /* Note we're allowing ivgen_hash_alg to be set even for * non-essiv iv generators that don't need a hash. It will * be silently ignored, for compatibility with dm-crypt */ if (!options->u.luks.key_secret) { error_setg(errp, "Parameter '%skey-secret' is required for cipher", optprefix ? optprefix : ""); return -1; } password = qcrypto_secret_lookup_as_utf8(luks_opts.key_secret, errp); if (!password) { return -1; } luks = g_new0(QCryptoBlockLUKS, 1); block->opaque = luks; memcpy(luks->header.magic, qcrypto_block_luks_magic, QCRYPTO_BLOCK_LUKS_MAGIC_LEN); /* We populate the header in native endianness initially and * then convert everything to big endian just before writing * it out to disk */ luks->header.version = QCRYPTO_BLOCK_LUKS_VERSION; qcrypto_block_luks_uuid_gen(luks->header.uuid); cipher_alg = qcrypto_block_luks_cipher_alg_lookup(luks_opts.cipher_alg, errp); if (!cipher_alg) { goto error; } cipher_mode = QCryptoCipherMode_str(luks_opts.cipher_mode); ivgen_alg = QCryptoIVGenAlgorithm_str(luks_opts.ivgen_alg); if (luks_opts.has_ivgen_hash_alg) { ivgen_hash_alg = QCryptoHashAlgorithm_str(luks_opts.ivgen_hash_alg); cipher_mode_spec = g_strdup_printf("%s-%s:%s", cipher_mode, ivgen_alg, ivgen_hash_alg); } else { cipher_mode_spec = g_strdup_printf("%s-%s", cipher_mode, ivgen_alg); } hash_alg = QCryptoHashAlgorithm_str(luks_opts.hash_alg); if (strlen(cipher_alg) >= QCRYPTO_BLOCK_LUKS_CIPHER_NAME_LEN) { error_setg(errp, "Cipher name '%s' is too long for LUKS header", cipher_alg); goto error; } if (strlen(cipher_mode_spec) >= QCRYPTO_BLOCK_LUKS_CIPHER_MODE_LEN) { error_setg(errp, "Cipher mode '%s' is too long for LUKS header", cipher_mode_spec); goto error; } if (strlen(hash_alg) >= QCRYPTO_BLOCK_LUKS_HASH_SPEC_LEN) { error_setg(errp, "Hash name '%s' is too long for LUKS header", hash_alg); goto error; } if (luks_opts.ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) { ivcipheralg = qcrypto_block_luks_essiv_cipher(luks_opts.cipher_alg, luks_opts.ivgen_hash_alg, &local_err); if (local_err) { error_propagate(errp, local_err); goto error; } } else { ivcipheralg = luks_opts.cipher_alg; } strcpy(luks->header.cipher_name, cipher_alg); strcpy(luks->header.cipher_mode, cipher_mode_spec); strcpy(luks->header.hash_spec, hash_alg); luks->header.key_bytes = qcrypto_cipher_get_key_len(luks_opts.cipher_alg); if (luks_opts.cipher_mode == QCRYPTO_CIPHER_MODE_XTS) { luks->header.key_bytes *= 2; } /* Generate the salt used for hashing the master key * with PBKDF later */ if (qcrypto_random_bytes(luks->header.master_key_salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, errp) < 0) { goto error; } /* Generate random master key */ masterkey = g_new0(uint8_t, luks->header.key_bytes); if (qcrypto_random_bytes(masterkey, luks->header.key_bytes, errp) < 0) { goto error; } /* Setup the block device payload encryption objects */ block->cipher = qcrypto_cipher_new(luks_opts.cipher_alg, luks_opts.cipher_mode, masterkey, luks->header.key_bytes, errp); if (!block->cipher) { goto error; } block->kdfhash = luks_opts.hash_alg; block->niv = qcrypto_cipher_get_iv_len(luks_opts.cipher_alg, luks_opts.cipher_mode); block->ivgen = qcrypto_ivgen_new(luks_opts.ivgen_alg, ivcipheralg, luks_opts.ivgen_hash_alg, masterkey, luks->header.key_bytes, errp); if (!block->ivgen) { goto error; } /* Determine how many iterations we need to hash the master * key, in order to have 1 second of compute time used */ iters = qcrypto_pbkdf2_count_iters(luks_opts.hash_alg, masterkey, luks->header.key_bytes, luks->header.master_key_salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, QCRYPTO_BLOCK_LUKS_DIGEST_LEN, &local_err); if (local_err) { error_propagate(errp, local_err); goto error; } if (iters > (ULLONG_MAX / luks_opts.iter_time)) { error_setg_errno(errp, ERANGE, "PBKDF iterations %llu too large to scale", (unsigned long long)iters); goto error; } /* iter_time was in millis, but count_iters reported for secs */ iters = iters * luks_opts.iter_time / 1000; /* Why /= 8 ? That matches cryptsetup, but there's no * explanation why they chose /= 8... Probably so that * if all 8 keyslots are active we only spend 1 second * in total time to check all keys */ iters /= 8; if (iters > UINT32_MAX) { error_setg_errno(errp, ERANGE, "PBKDF iterations %llu larger than %u", (unsigned long long)iters, UINT32_MAX); goto error; } iters = MAX(iters, QCRYPTO_BLOCK_LUKS_MIN_MASTER_KEY_ITERS); luks->header.master_key_iterations = iters; /* Hash the master key, saving the result in the LUKS * header. This hash is used when opening the encrypted * device to verify that the user password unlocked a * valid master key */ if (qcrypto_pbkdf2(luks_opts.hash_alg, masterkey, luks->header.key_bytes, luks->header.master_key_salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, luks->header.master_key_iterations, luks->header.master_key_digest, QCRYPTO_BLOCK_LUKS_DIGEST_LEN, errp) < 0) { goto error; } /* Although LUKS has multiple key slots, we're just going * to use the first key slot */ splitkeylen = luks->header.key_bytes * QCRYPTO_BLOCK_LUKS_STRIPES; for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { luks->header.key_slots[i].active = i == 0 ? QCRYPTO_BLOCK_LUKS_KEY_SLOT_ENABLED : QCRYPTO_BLOCK_LUKS_KEY_SLOT_DISABLED; luks->header.key_slots[i].stripes = QCRYPTO_BLOCK_LUKS_STRIPES; /* This calculation doesn't match that shown in the spec, * but instead follows the cryptsetup implementation. */ luks->header.key_slots[i].key_offset = (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / QCRYPTO_BLOCK_LUKS_SECTOR_SIZE) + (ROUND_UP(DIV_ROUND_UP(splitkeylen, QCRYPTO_BLOCK_LUKS_SECTOR_SIZE), (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / QCRYPTO_BLOCK_LUKS_SECTOR_SIZE)) * i); } if (qcrypto_random_bytes(luks->header.key_slots[0].salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, errp) < 0) { goto error; } /* Again we determine how many iterations are required to * hash the user password while consuming 1 second of compute * time */ iters = qcrypto_pbkdf2_count_iters(luks_opts.hash_alg, (uint8_t *)password, strlen(password), luks->header.key_slots[0].salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, luks->header.key_bytes, &local_err); if (local_err) { error_propagate(errp, local_err); goto error; } if (iters > (ULLONG_MAX / luks_opts.iter_time)) { error_setg_errno(errp, ERANGE, "PBKDF iterations %llu too large to scale", (unsigned long long)iters); goto error; } /* iter_time was in millis, but count_iters reported for secs */ iters = iters * luks_opts.iter_time / 1000; if (iters > UINT32_MAX) { error_setg_errno(errp, ERANGE, "PBKDF iterations %llu larger than %u", (unsigned long long)iters, UINT32_MAX); goto error; } luks->header.key_slots[0].iterations = MAX(iters, QCRYPTO_BLOCK_LUKS_MIN_SLOT_KEY_ITERS); /* Generate a key that we'll use to encrypt the master * key, from the user's password */ slotkey = g_new0(uint8_t, luks->header.key_bytes); if (qcrypto_pbkdf2(luks_opts.hash_alg, (uint8_t *)password, strlen(password), luks->header.key_slots[0].salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, luks->header.key_slots[0].iterations, slotkey, luks->header.key_bytes, errp) < 0) { goto error; } /* Setup the encryption objects needed to encrypt the * master key material */ cipher = qcrypto_cipher_new(luks_opts.cipher_alg, luks_opts.cipher_mode, slotkey, luks->header.key_bytes, errp); if (!cipher) { goto error; } ivgen = qcrypto_ivgen_new(luks_opts.ivgen_alg, ivcipheralg, luks_opts.ivgen_hash_alg, slotkey, luks->header.key_bytes, errp); if (!ivgen) { goto error; } /* Before storing the master key, we need to vastly * increase its size, as protection against forensic * disk data recovery */ splitkey = g_new0(uint8_t, splitkeylen); if (qcrypto_afsplit_encode(luks_opts.hash_alg, luks->header.key_bytes, luks->header.key_slots[0].stripes, masterkey, splitkey, errp) < 0) { goto error; } /* Now we encrypt the split master key with the key generated * from the user's password, before storing it */ if (qcrypto_block_encrypt_helper(cipher, block->niv, ivgen, QCRYPTO_BLOCK_LUKS_SECTOR_SIZE, 0, splitkey, splitkeylen, errp) < 0) { goto error; } /* The total size of the LUKS headers is the partition header + key * slot headers, rounded up to the nearest sector, combined with * the size of each master key material region, also rounded up * to the nearest sector */ luks->header.payload_offset = (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / QCRYPTO_BLOCK_LUKS_SECTOR_SIZE) + (ROUND_UP(DIV_ROUND_UP(splitkeylen, QCRYPTO_BLOCK_LUKS_SECTOR_SIZE), (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / QCRYPTO_BLOCK_LUKS_SECTOR_SIZE)) * QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS); block->payload_offset = luks->header.payload_offset * QCRYPTO_BLOCK_LUKS_SECTOR_SIZE; /* Reserve header space to match payload offset */ initfunc(block, block->payload_offset, opaque, &local_err); if (local_err) { error_propagate(errp, local_err); goto error; } /* Everything on disk uses Big Endian, so flip header fields * before writing them */ cpu_to_be16s(&luks->header.version); cpu_to_be32s(&luks->header.payload_offset); cpu_to_be32s(&luks->header.key_bytes); cpu_to_be32s(&luks->header.master_key_iterations); for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { cpu_to_be32s(&luks->header.key_slots[i].active); cpu_to_be32s(&luks->header.key_slots[i].iterations); cpu_to_be32s(&luks->header.key_slots[i].key_offset); cpu_to_be32s(&luks->header.key_slots[i].stripes); } /* Write out the partition header and key slot headers */ writefunc(block, 0, (const uint8_t *)&luks->header, sizeof(luks->header), opaque, &local_err); /* Delay checking local_err until we've byte-swapped */ /* Byte swap the header back to native, in case we need * to read it again later */ be16_to_cpus(&luks->header.version); be32_to_cpus(&luks->header.payload_offset); be32_to_cpus(&luks->header.key_bytes); be32_to_cpus(&luks->header.master_key_iterations); for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { be32_to_cpus(&luks->header.key_slots[i].active); be32_to_cpus(&luks->header.key_slots[i].iterations); be32_to_cpus(&luks->header.key_slots[i].key_offset); be32_to_cpus(&luks->header.key_slots[i].stripes); } if (local_err) { error_propagate(errp, local_err); goto error; } /* Write out the master key material, starting at the * sector immediately following the partition header. */ if (writefunc(block, luks->header.key_slots[0].key_offset * QCRYPTO_BLOCK_LUKS_SECTOR_SIZE, splitkey, splitkeylen, opaque, errp) != splitkeylen) { goto error; } luks->cipher_alg = luks_opts.cipher_alg; luks->cipher_mode = luks_opts.cipher_mode; luks->ivgen_alg = luks_opts.ivgen_alg; luks->ivgen_hash_alg = luks_opts.ivgen_hash_alg; luks->hash_alg = luks_opts.hash_alg; memset(masterkey, 0, luks->header.key_bytes); g_free(masterkey); memset(slotkey, 0, luks->header.key_bytes); g_free(slotkey); g_free(splitkey); g_free(password); g_free(cipher_mode_spec); qcrypto_ivgen_free(ivgen); qcrypto_cipher_free(cipher); return 0; error: if (masterkey) { memset(masterkey, 0, luks->header.key_bytes); } g_free(masterkey); if (slotkey) { memset(slotkey, 0, luks->header.key_bytes); } g_free(slotkey); g_free(splitkey); g_free(password); g_free(cipher_mode_spec); qcrypto_ivgen_free(ivgen); qcrypto_cipher_free(cipher); g_free(luks); return -1; }
26,193
qemu
424e4a87d20027acf52e65f322a2100460162a49
0
static void pc_cmos_init_late(void *opaque) { pc_cmos_init_late_arg *arg = opaque; ISADevice *s = arg->rtc_state; int16_t cylinders; int8_t heads, sectors; int val; int i, trans; Object *container; CheckFdcState state = { 0 }; val = 0; if (ide_get_geometry(arg->idebus[0], 0, &cylinders, &heads, &sectors) >= 0) { cmos_init_hd(s, 0x19, 0x1b, cylinders, heads, sectors); val |= 0xf0; } if (ide_get_geometry(arg->idebus[0], 1, &cylinders, &heads, &sectors) >= 0) { cmos_init_hd(s, 0x1a, 0x24, cylinders, heads, sectors); val |= 0x0f; } rtc_set_memory(s, 0x12, val); val = 0; for (i = 0; i < 4; i++) { /* NOTE: ide_get_geometry() returns the physical geometry. It is always such that: 1 <= sects <= 63, 1 <= heads <= 16, 1 <= cylinders <= 16383. The BIOS geometry can be different if a translation is done. */ if (ide_get_geometry(arg->idebus[i / 2], i % 2, &cylinders, &heads, &sectors) >= 0) { trans = ide_get_bios_chs_trans(arg->idebus[i / 2], i % 2) - 1; assert((trans & ~3) == 0); val |= trans << (i * 2); } } rtc_set_memory(s, 0x39, val); /* * Locate the FDC at IO address 0x3f0, and configure the CMOS registers * accordingly. */ for (i = 0; i < ARRAY_SIZE(fdc_container_path); i++) { container = container_get(qdev_get_machine(), fdc_container_path[i]); object_child_foreach(container, check_fdc, &state); } if (state.multiple) { error_report("warning: multiple floppy disk controllers with " "iobase=0x3f0 have been found;\n" "the one being picked for CMOS setup might not reflect " "your intent"); } pc_cmos_init_floppy(s, state.floppy); qemu_unregister_reset(pc_cmos_init_late, opaque); }
26,194
qemu
2bd3e04c3b3c76d573435a299a4d85bad0021a90
0
static void kvm_mce_broadcast_rest(CPUState *env) { CPUState *cenv; int family, model, cpuver = env->cpuid_version; family = (cpuver >> 8) & 0xf; model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0xf); /* Broadcast MCA signal for processor version 06H_EH and above */ if ((family == 6 && model >= 14) || family > 6) { for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) { if (cenv == env) { continue; } kvm_inject_x86_mce(cenv, 1, MCI_STATUS_VAL | MCI_STATUS_UC, MCG_STATUS_MCIP | MCG_STATUS_RIPV, 0, 0, ABORT_ON_ERROR); } } }
26,195
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static struct omap_lpg_s *omap_lpg_init(MemoryRegion *system_memory, target_phys_addr_t base, omap_clk clk) { struct omap_lpg_s *s = (struct omap_lpg_s *) g_malloc0(sizeof(struct omap_lpg_s)); s->tm = qemu_new_timer_ms(vm_clock, omap_lpg_tick, s); omap_lpg_reset(s); memory_region_init_io(&s->iomem, &omap_lpg_ops, s, "omap-lpg", 0x800); memory_region_add_subregion(system_memory, base, &s->iomem); omap_clk_adduser(clk, qemu_allocate_irqs(omap_lpg_clk_update, s, 1)[0]); return s; }
26,196
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static int vapic_enable(VAPICROMState *s, CPUX86State *env) { int cpu_number = get_kpcr_number(env); target_phys_addr_t vapic_paddr; static const uint8_t enabled = 1; if (cpu_number < 0) { return -1; } vapic_paddr = s->vapic_paddr + (((target_phys_addr_t)cpu_number) << VAPIC_CPU_SHIFT); cpu_physical_memory_rw(vapic_paddr + offsetof(VAPICState, enabled), (void *)&enabled, sizeof(enabled), 1); apic_enable_vapic(env->apic_state, vapic_paddr); s->state = VAPIC_ACTIVE; return 0; }
26,197
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static void omap_id_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { if (size != 4) { return omap_badwidth_write32(opaque, addr, value); } OMAP_BAD_REG(addr); }
26,198
qemu
4be746345f13e99e468c60acbd3a355e8183e3ce
0
static void scsi_unmap_complete(void *opaque, int ret) { UnmapCBData *data = opaque; SCSIDiskReq *r = data->r; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint64_t sector_num; uint32_t nb_sectors; r->req.aiocb = NULL; if (r->req.io_canceled) { scsi_req_cancel_complete(&r->req); goto done; } if (ret < 0) { if (scsi_handle_rw_error(r, -ret)) { goto done; } } if (data->count > 0) { sector_num = ldq_be_p(&data->inbuf[0]); nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; if (!check_lba_range(s, sector_num, nb_sectors)) { scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); goto done; } r->req.aiocb = bdrv_aio_discard(s->qdev.conf.bs, sector_num * (s->qdev.blocksize / 512), nb_sectors * (s->qdev.blocksize / 512), scsi_unmap_complete, data); data->count--; data->inbuf += 16; return; } scsi_req_complete(&r->req, GOOD); done: scsi_req_unref(&r->req); g_free(data); }
26,200
FFmpeg
e55ed689a264c78f332745598ea8c58a3422ee13
0
static void selfTest(uint8_t *src[4], int stride[4], int w, int h){ enum PixelFormat srcFormat, dstFormat; int srcW, srcH, dstW, dstH; int flags; for (srcFormat = 0; srcFormat < PIX_FMT_NB; srcFormat++) { for (dstFormat = 0; dstFormat < PIX_FMT_NB; dstFormat++) { printf("%s -> %s\n", sws_format_name(srcFormat), sws_format_name(dstFormat)); fflush(stdout); srcW= w; srcH= h; for (dstW=w - w/3; dstW<= 4*w/3; dstW+= w/3){ for (dstH=h - h/3; dstH<= 4*h/3; dstH+= h/3){ for (flags=1; flags<33; flags*=2) { int res; res = doTest(src, stride, w, h, srcFormat, dstFormat, srcW, srcH, dstW, dstH, flags); if (res < 0) { dstW = 4 * w / 3; dstH = 4 * h / 3; flags = 33; } } } } } } }
26,201
qemu
4baef2679e029c76707be1e2ed54bf3dd21693fe
0
int qemu_strtoi64(const char *nptr, const char **endptr, int base, int64_t *result) { char *ep; int err = 0; if (!nptr) { if (endptr) { *endptr = nptr; } err = -EINVAL; } else { errno = 0; /* FIXME This assumes int64_t is long long */ *result = strtoll(nptr, &ep, base); err = check_strtox_error(nptr, ep, endptr, errno); } return err; }
26,204
qemu
88571882516a7cb4291a329c537eb79fd126e1f2
0
static int qemu_rdma_exchange_get_response(RDMAContext *rdma, RDMAControlHeader *head, int expecting, int idx) { int ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RECV_CONTROL + idx); if (ret < 0) { fprintf(stderr, "rdma migration: recv polling control error!\n"); return ret; } network_to_control((void *) rdma->wr_data[idx].control); memcpy(head, rdma->wr_data[idx].control, sizeof(RDMAControlHeader)); DDDPRINTF("CONTROL: %s receiving...\n", control_desc[expecting]); if (expecting == RDMA_CONTROL_NONE) { DDDPRINTF("Surprise: got %s (%d)\n", control_desc[head->type], head->type); } else if (head->type != expecting || head->type == RDMA_CONTROL_ERROR) { fprintf(stderr, "Was expecting a %s (%d) control message" ", but got: %s (%d), length: %d\n", control_desc[expecting], expecting, control_desc[head->type], head->type, head->len); return -EIO; } if (head->len > RDMA_CONTROL_MAX_BUFFER - sizeof(*head)) { fprintf(stderr, "too long length: %d\n", head->len); return -EINVAL; } return 0; }
26,205
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static void omap_tcmi_init(MemoryRegion *memory, target_phys_addr_t base, struct omap_mpu_state_s *mpu) { memory_region_init_io(&mpu->tcmi_iomem, &omap_tcmi_ops, mpu, "omap-tcmi", 0x100); memory_region_add_subregion(memory, base, &mpu->tcmi_iomem); omap_tcmi_reset(mpu); }
26,206
qemu
fc9f38c3c0f42b7e98957b646976ee5b63f23806
0
static void spapr_machine_2_5_class_options(MachineClass *mc) { sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc); mc->alias = "pseries"; mc->is_default = 1; smc->dr_lmb_enabled = true; }
26,207
qemu
e1d177b922f52569e900e96d611caa09655bdec9
1
static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) { uint32_t insn, imm, shift, offset; uint32_t rd, rn, rm, rs; TCGv tmp; TCGv tmp2; TCGv tmp3; TCGv addr; TCGv_i64 tmp64; int op; int shiftop; int conds; int logic_cc; if (!(arm_feature(env, ARM_FEATURE_THUMB2) || arm_feature (env, ARM_FEATURE_M))) { /* Thumb-1 cores may need to treat bl and blx as a pair of 16-bit instructions to get correct prefetch abort behavior. */ insn = insn_hw1; if ((insn & (1 << 12)) == 0) { /* Second half of blx. */ offset = ((insn & 0x7ff) << 1); tmp = load_reg(s, 14); tcg_gen_addi_i32(tmp, tmp, offset); tcg_gen_andi_i32(tmp, tmp, 0xfffffffc); tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, s->pc | 1); store_reg(s, 14, tmp2); gen_bx(s, tmp); return 0; } if (insn & (1 << 11)) { /* Second half of bl. */ offset = ((insn & 0x7ff) << 1) | 1; tmp = load_reg(s, 14); tcg_gen_addi_i32(tmp, tmp, offset); tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, s->pc | 1); store_reg(s, 14, tmp2); gen_bx(s, tmp); return 0; } if ((s->pc & ~TARGET_PAGE_MASK) == 0) { /* Instruction spans a page boundary. Implement it as two 16-bit instructions in case the second half causes an prefetch abort. */ offset = ((int32_t)insn << 21) >> 9; tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset); return 0; } /* Fall through to 32-bit decode. */ } insn = lduw_code(s->pc); s->pc += 2; insn |= (uint32_t)insn_hw1 << 16; if ((insn & 0xf800e800) != 0xf000e800) { ARCH(6T2); } rn = (insn >> 16) & 0xf; rs = (insn >> 12) & 0xf; rd = (insn >> 8) & 0xf; rm = insn & 0xf; switch ((insn >> 25) & 0xf) { case 0: case 1: case 2: case 3: /* 16-bit instructions. Should never happen. */ abort(); case 4: if (insn & (1 << 22)) { /* Other load/store, table branch. */ if (insn & 0x01200000) { /* Load/store doubleword. */ if (rn == 15) { addr = tcg_temp_new_i32(); tcg_gen_movi_i32(addr, s->pc & ~3); } else { addr = load_reg(s, rn); } offset = (insn & 0xff) * 4; if ((insn & (1 << 23)) == 0) offset = -offset; if (insn & (1 << 24)) { tcg_gen_addi_i32(addr, addr, offset); offset = 0; } if (insn & (1 << 20)) { /* ldrd */ tmp = gen_ld32(addr, IS_USER(s)); store_reg(s, rs, tmp); tcg_gen_addi_i32(addr, addr, 4); tmp = gen_ld32(addr, IS_USER(s)); store_reg(s, rd, tmp); } else { /* strd */ tmp = load_reg(s, rs); gen_st32(tmp, addr, IS_USER(s)); tcg_gen_addi_i32(addr, addr, 4); tmp = load_reg(s, rd); gen_st32(tmp, addr, IS_USER(s)); } if (insn & (1 << 21)) { /* Base writeback. */ if (rn == 15) goto illegal_op; tcg_gen_addi_i32(addr, addr, offset - 4); store_reg(s, rn, addr); } else { tcg_temp_free_i32(addr); } } else if ((insn & (1 << 23)) == 0) { /* Load/store exclusive word. */ addr = tcg_temp_local_new(); load_reg_var(s, addr, rn); tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2); if (insn & (1 << 20)) { gen_load_exclusive(s, rs, 15, addr, 2); } else { gen_store_exclusive(s, rd, rs, 15, addr, 2); } tcg_temp_free(addr); } else if ((insn & (1 << 6)) == 0) { /* Table Branch. */ if (rn == 15) { addr = tcg_temp_new_i32(); tcg_gen_movi_i32(addr, s->pc); } else { addr = load_reg(s, rn); } tmp = load_reg(s, rm); tcg_gen_add_i32(addr, addr, tmp); if (insn & (1 << 4)) { /* tbh */ tcg_gen_add_i32(addr, addr, tmp); tcg_temp_free_i32(tmp); tmp = gen_ld16u(addr, IS_USER(s)); } else { /* tbb */ tcg_temp_free_i32(tmp); tmp = gen_ld8u(addr, IS_USER(s)); } tcg_temp_free_i32(addr); tcg_gen_shli_i32(tmp, tmp, 1); tcg_gen_addi_i32(tmp, tmp, s->pc); store_reg(s, 15, tmp); } else { /* Load/store exclusive byte/halfword/doubleword. */ ARCH(7); op = (insn >> 4) & 0x3; if (op == 2) { goto illegal_op; } addr = tcg_temp_local_new(); load_reg_var(s, addr, rn); if (insn & (1 << 20)) { gen_load_exclusive(s, rs, rd, addr, op); } else { gen_store_exclusive(s, rm, rs, rd, addr, op); } tcg_temp_free(addr); } } else { /* Load/store multiple, RFE, SRS. */ if (((insn >> 23) & 1) == ((insn >> 24) & 1)) { /* Not available in user mode. */ if (IS_USER(s)) goto illegal_op; if (insn & (1 << 20)) { /* rfe */ addr = load_reg(s, rn); if ((insn & (1 << 24)) == 0) tcg_gen_addi_i32(addr, addr, -8); /* Load PC into tmp and CPSR into tmp2. */ tmp = gen_ld32(addr, 0); tcg_gen_addi_i32(addr, addr, 4); tmp2 = gen_ld32(addr, 0); if (insn & (1 << 21)) { /* Base writeback. */ if (insn & (1 << 24)) { tcg_gen_addi_i32(addr, addr, 4); } else { tcg_gen_addi_i32(addr, addr, -4); } store_reg(s, rn, addr); } else { tcg_temp_free_i32(addr); } gen_rfe(s, tmp, tmp2); } else { /* srs */ op = (insn & 0x1f); addr = tcg_temp_new_i32(); tmp = tcg_const_i32(op); gen_helper_get_r13_banked(addr, cpu_env, tmp); tcg_temp_free_i32(tmp); if ((insn & (1 << 24)) == 0) { tcg_gen_addi_i32(addr, addr, -8); } tmp = load_reg(s, 14); gen_st32(tmp, addr, 0); tcg_gen_addi_i32(addr, addr, 4); tmp = tcg_temp_new_i32(); gen_helper_cpsr_read(tmp); gen_st32(tmp, addr, 0); if (insn & (1 << 21)) { if ((insn & (1 << 24)) == 0) { tcg_gen_addi_i32(addr, addr, -4); } else { tcg_gen_addi_i32(addr, addr, 4); } tmp = tcg_const_i32(op); gen_helper_set_r13_banked(cpu_env, tmp, addr); tcg_temp_free_i32(tmp); } else { tcg_temp_free_i32(addr); } } } else { int i; /* Load/store multiple. */ addr = load_reg(s, rn); offset = 0; for (i = 0; i < 16; i++) { if (insn & (1 << i)) offset += 4; } if (insn & (1 << 24)) { tcg_gen_addi_i32(addr, addr, -offset); } for (i = 0; i < 16; i++) { if ((insn & (1 << i)) == 0) continue; if (insn & (1 << 20)) { /* Load. */ tmp = gen_ld32(addr, IS_USER(s)); if (i == 15) { gen_bx(s, tmp); } else { store_reg(s, i, tmp); } } else { /* Store. */ tmp = load_reg(s, i); gen_st32(tmp, addr, IS_USER(s)); } tcg_gen_addi_i32(addr, addr, 4); } if (insn & (1 << 21)) { /* Base register writeback. */ if (insn & (1 << 24)) { tcg_gen_addi_i32(addr, addr, -offset); } /* Fault if writeback register is in register list. */ if (insn & (1 << rn)) goto illegal_op; store_reg(s, rn, addr); } else { tcg_temp_free_i32(addr); } } } break; case 5: op = (insn >> 21) & 0xf; if (op == 6) { /* Halfword pack. */ tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3); if (insn & (1 << 5)) { /* pkhtb */ if (shift == 0) shift = 31; tcg_gen_sari_i32(tmp2, tmp2, shift); tcg_gen_andi_i32(tmp, tmp, 0xffff0000); tcg_gen_ext16u_i32(tmp2, tmp2); } else { /* pkhbt */ if (shift) tcg_gen_shli_i32(tmp2, tmp2, shift); tcg_gen_ext16u_i32(tmp, tmp); tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000); } tcg_gen_or_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); } else { /* Data processing register constant shift. */ if (rn == 15) { tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); } else { tmp = load_reg(s, rn); } tmp2 = load_reg(s, rm); shiftop = (insn >> 4) & 3; shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c); conds = (insn & (1 << 20)) != 0; logic_cc = (conds && thumb2_logic_op(op)); gen_arm_shift_im(tmp2, shiftop, shift, logic_cc); if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2)) goto illegal_op; tcg_temp_free_i32(tmp2); if (rd != 15) { store_reg(s, rd, tmp); } else { tcg_temp_free_i32(tmp); } } break; case 13: /* Misc data processing. */ op = ((insn >> 22) & 6) | ((insn >> 7) & 1); if (op < 4 && (insn & 0xf000) != 0xf000) goto illegal_op; switch (op) { case 0: /* Register controlled shift. */ tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); if ((insn & 0x70) != 0) goto illegal_op; op = (insn >> 21) & 3; logic_cc = (insn & (1 << 20)) != 0; gen_arm_shift_reg(tmp, op, tmp2, logic_cc); if (logic_cc) gen_logic_CC(tmp); store_reg_bx(env, s, rd, tmp); break; case 1: /* Sign/zero extend. */ tmp = load_reg(s, rm); shift = (insn >> 4) & 3; /* ??? In many cases it's not neccessary to do a rotate, a shift is sufficient. */ if (shift != 0) tcg_gen_rotri_i32(tmp, tmp, shift * 8); op = (insn >> 20) & 7; switch (op) { case 0: gen_sxth(tmp); break; case 1: gen_uxth(tmp); break; case 2: gen_sxtb16(tmp); break; case 3: gen_uxtb16(tmp); break; case 4: gen_sxtb(tmp); break; case 5: gen_uxtb(tmp); break; default: goto illegal_op; } if (rn != 15) { tmp2 = load_reg(s, rn); if ((op >> 1) == 1) { gen_add16(tmp, tmp2); } else { tcg_gen_add_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); } } store_reg(s, rd, tmp); break; case 2: /* SIMD add/subtract. */ op = (insn >> 20) & 7; shift = (insn >> 4) & 7; if ((op & 3) == 3 || (shift & 3) == 3) goto illegal_op; tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); gen_thumb2_parallel_addsub(op, shift, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); break; case 3: /* Other data processing. */ op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7); if (op < 4) { /* Saturating add/subtract. */ tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); if (op & 1) gen_helper_double_saturate(tmp, tmp); if (op & 2) gen_helper_sub_saturate(tmp, tmp2, tmp); else gen_helper_add_saturate(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); } else { tmp = load_reg(s, rn); switch (op) { case 0x0a: /* rbit */ gen_helper_rbit(tmp, tmp); break; case 0x08: /* rev */ tcg_gen_bswap32_i32(tmp, tmp); break; case 0x09: /* rev16 */ gen_rev16(tmp); break; case 0x0b: /* revsh */ gen_revsh(tmp); break; case 0x10: /* sel */ tmp2 = load_reg(s, rm); tmp3 = tcg_temp_new_i32(); tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE)); gen_helper_sel_flags(tmp, tmp3, tmp, tmp2); tcg_temp_free_i32(tmp3); tcg_temp_free_i32(tmp2); break; case 0x18: /* clz */ gen_helper_clz(tmp, tmp); break; default: goto illegal_op; } } store_reg(s, rd, tmp); break; case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */ op = (insn >> 4) & 0xf; tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); switch ((insn >> 20) & 7) { case 0: /* 32 x 32 -> 32 */ tcg_gen_mul_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); if (rs != 15) { tmp2 = load_reg(s, rs); if (op) tcg_gen_sub_i32(tmp, tmp2, tmp); else tcg_gen_add_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); } break; case 1: /* 16 x 16 -> 32 */ gen_mulxy(tmp, tmp2, op & 2, op & 1); tcg_temp_free_i32(tmp2); if (rs != 15) { tmp2 = load_reg(s, rs); gen_helper_add_setq(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); } break; case 2: /* Dual multiply add. */ case 4: /* Dual multiply subtract. */ if (op) gen_swap_half(tmp2); gen_smul_dual(tmp, tmp2); /* This addition cannot overflow. */ if (insn & (1 << 22)) { tcg_gen_sub_i32(tmp, tmp, tmp2); } else { tcg_gen_add_i32(tmp, tmp, tmp2); } tcg_temp_free_i32(tmp2); if (rs != 15) { tmp2 = load_reg(s, rs); gen_helper_add_setq(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); } break; case 3: /* 32 * 16 -> 32msb */ if (op) tcg_gen_sari_i32(tmp2, tmp2, 16); else gen_sxth(tmp2); tmp64 = gen_muls_i64_i32(tmp, tmp2); tcg_gen_shri_i64(tmp64, tmp64, 16); tmp = tcg_temp_new_i32(); tcg_gen_trunc_i64_i32(tmp, tmp64); tcg_temp_free_i64(tmp64); if (rs != 15) { tmp2 = load_reg(s, rs); gen_helper_add_setq(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); } break; case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */ tmp64 = gen_muls_i64_i32(tmp, tmp2); if (rs != 15) { tmp = load_reg(s, rs); if (insn & (1 << 20)) { tmp64 = gen_addq_msw(tmp64, tmp); } else { tmp64 = gen_subq_msw(tmp64, tmp); } } if (insn & (1 << 4)) { tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u); } tcg_gen_shri_i64(tmp64, tmp64, 32); tmp = tcg_temp_new_i32(); tcg_gen_trunc_i64_i32(tmp, tmp64); tcg_temp_free_i64(tmp64); break; case 7: /* Unsigned sum of absolute differences. */ gen_helper_usad8(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); if (rs != 15) { tmp2 = load_reg(s, rs); tcg_gen_add_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); } break; } store_reg(s, rd, tmp); break; case 6: case 7: /* 64-bit multiply, Divide. */ op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70); tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); if ((op & 0x50) == 0x10) { /* sdiv, udiv */ if (!arm_feature(env, ARM_FEATURE_DIV)) goto illegal_op; if (op & 0x20) gen_helper_udiv(tmp, tmp, tmp2); else gen_helper_sdiv(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); } else if ((op & 0xe) == 0xc) { /* Dual multiply accumulate long. */ if (op & 1) gen_swap_half(tmp2); gen_smul_dual(tmp, tmp2); if (op & 0x10) { tcg_gen_sub_i32(tmp, tmp, tmp2); } else { tcg_gen_add_i32(tmp, tmp, tmp2); } tcg_temp_free_i32(tmp2); /* BUGFIX */ tmp64 = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(tmp64, tmp); tcg_temp_free_i32(tmp); gen_addq(s, tmp64, rs, rd); gen_storeq_reg(s, rs, rd, tmp64); tcg_temp_free_i64(tmp64); } else { if (op & 0x20) { /* Unsigned 64-bit multiply */ tmp64 = gen_mulu_i64_i32(tmp, tmp2); } else { if (op & 8) { /* smlalxy */ gen_mulxy(tmp, tmp2, op & 2, op & 1); tcg_temp_free_i32(tmp2); tmp64 = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(tmp64, tmp); tcg_temp_free_i32(tmp); } else { /* Signed 64-bit multiply */ tmp64 = gen_muls_i64_i32(tmp, tmp2); } } if (op & 4) { /* umaal */ gen_addq_lo(s, tmp64, rs); gen_addq_lo(s, tmp64, rd); } else if (op & 0x40) { /* 64-bit accumulate. */ gen_addq(s, tmp64, rs, rd); } gen_storeq_reg(s, rs, rd, tmp64); tcg_temp_free_i64(tmp64); } break; } break; case 6: case 7: case 14: case 15: /* Coprocessor. */ if (((insn >> 24) & 3) == 3) { /* Translate into the equivalent ARM encoding. */ insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28); if (disas_neon_data_insn(env, s, insn)) goto illegal_op; } else { if (insn & (1 << 28)) goto illegal_op; if (disas_coproc_insn (env, s, insn)) goto illegal_op; } break; case 8: case 9: case 10: case 11: if (insn & (1 << 15)) { /* Branches, misc control. */ if (insn & 0x5000) { /* Unconditional branch. */ /* signextend(hw1[10:0]) -> offset[:12]. */ offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff; /* hw1[10:0] -> offset[11:1]. */ offset |= (insn & 0x7ff) << 1; /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22] offset[24:22] already have the same value because of the sign extension above. */ offset ^= ((~insn) & (1 << 13)) << 10; offset ^= ((~insn) & (1 << 11)) << 11; if (insn & (1 << 14)) { /* Branch and link. */ tcg_gen_movi_i32(cpu_R[14], s->pc | 1); } offset += s->pc; if (insn & (1 << 12)) { /* b/bl */ gen_jmp(s, offset); } else { /* blx */ offset &= ~(uint32_t)2; gen_bx_im(s, offset); } } else if (((insn >> 23) & 7) == 7) { /* Misc control */ if (insn & (1 << 13)) goto illegal_op; if (insn & (1 << 26)) { /* Secure monitor call (v6Z) */ goto illegal_op; /* not implemented. */ } else { op = (insn >> 20) & 7; switch (op) { case 0: /* msr cpsr. */ if (IS_M(env)) { tmp = load_reg(s, rn); addr = tcg_const_i32(insn & 0xff); gen_helper_v7m_msr(cpu_env, addr, tmp); tcg_temp_free_i32(addr); tcg_temp_free_i32(tmp); gen_lookup_tb(s); break; } /* fall through */ case 1: /* msr spsr. */ if (IS_M(env)) goto illegal_op; tmp = load_reg(s, rn); if (gen_set_psr(s, msr_mask(env, s, (insn >> 8) & 0xf, op == 1), op == 1, tmp)) goto illegal_op; break; case 2: /* cps, nop-hint. */ if (((insn >> 8) & 7) == 0) { gen_nop_hint(s, insn & 0xff); } /* Implemented as NOP in user mode. */ if (IS_USER(s)) break; offset = 0; imm = 0; if (insn & (1 << 10)) { if (insn & (1 << 7)) offset |= CPSR_A; if (insn & (1 << 6)) offset |= CPSR_I; if (insn & (1 << 5)) offset |= CPSR_F; if (insn & (1 << 9)) imm = CPSR_A | CPSR_I | CPSR_F; } if (insn & (1 << 8)) { offset |= 0x1f; imm |= (insn & 0x1f); } if (offset) { gen_set_psr_im(s, offset, 0, imm); } break; case 3: /* Special control operations. */ ARCH(7); op = (insn >> 4) & 0xf; switch (op) { case 2: /* clrex */ gen_clrex(s); break; case 4: /* dsb */ case 5: /* dmb */ case 6: /* isb */ /* These execute as NOPs. */ break; default: goto illegal_op; } break; case 4: /* bxj */ /* Trivial implementation equivalent to bx. */ tmp = load_reg(s, rn); gen_bx(s, tmp); break; case 5: /* Exception return. */ if (IS_USER(s)) { goto illegal_op; } if (rn != 14 || rd != 15) { goto illegal_op; } tmp = load_reg(s, rn); tcg_gen_subi_i32(tmp, tmp, insn & 0xff); gen_exception_return(s, tmp); break; case 6: /* mrs cpsr. */ tmp = tcg_temp_new_i32(); if (IS_M(env)) { addr = tcg_const_i32(insn & 0xff); gen_helper_v7m_mrs(tmp, cpu_env, addr); tcg_temp_free_i32(addr); } else { gen_helper_cpsr_read(tmp); } store_reg(s, rd, tmp); break; case 7: /* mrs spsr. */ /* Not accessible in user mode. */ if (IS_USER(s) || IS_M(env)) goto illegal_op; tmp = load_cpu_field(spsr); store_reg(s, rd, tmp); break; } } } else { /* Conditional branch. */ op = (insn >> 22) & 0xf; /* Generate a conditional jump to next instruction. */ s->condlabel = gen_new_label(); gen_test_cc(op ^ 1, s->condlabel); s->condjmp = 1; /* offset[11:1] = insn[10:0] */ offset = (insn & 0x7ff) << 1; /* offset[17:12] = insn[21:16]. */ offset |= (insn & 0x003f0000) >> 4; /* offset[31:20] = insn[26]. */ offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11; /* offset[18] = insn[13]. */ offset |= (insn & (1 << 13)) << 5; /* offset[19] = insn[11]. */ offset |= (insn & (1 << 11)) << 8; /* jump to the offset */ gen_jmp(s, s->pc + offset); } } else { /* Data processing immediate. */ if (insn & (1 << 25)) { if (insn & (1 << 24)) { if (insn & (1 << 20)) goto illegal_op; /* Bitfield/Saturate. */ op = (insn >> 21) & 7; imm = insn & 0x1f; shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c); if (rn == 15) { tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); } else { tmp = load_reg(s, rn); } switch (op) { case 2: /* Signed bitfield extract. */ imm++; if (shift + imm > 32) goto illegal_op; if (imm < 32) gen_sbfx(tmp, shift, imm); break; case 6: /* Unsigned bitfield extract. */ imm++; if (shift + imm > 32) goto illegal_op; if (imm < 32) gen_ubfx(tmp, shift, (1u << imm) - 1); break; case 3: /* Bitfield insert/clear. */ if (imm < shift) goto illegal_op; imm = imm + 1 - shift; if (imm != 32) { tmp2 = load_reg(s, rd); gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1); tcg_temp_free_i32(tmp2); } break; case 7: goto illegal_op; default: /* Saturate. */ if (shift) { if (op & 1) tcg_gen_sari_i32(tmp, tmp, shift); else tcg_gen_shli_i32(tmp, tmp, shift); } tmp2 = tcg_const_i32(imm); if (op & 4) { /* Unsigned. */ if ((op & 1) && shift == 0) gen_helper_usat16(tmp, tmp, tmp2); else gen_helper_usat(tmp, tmp, tmp2); } else { /* Signed. */ if ((op & 1) && shift == 0) gen_helper_ssat16(tmp, tmp, tmp2); else gen_helper_ssat(tmp, tmp, tmp2); } tcg_temp_free_i32(tmp2); break; } store_reg(s, rd, tmp); } else { imm = ((insn & 0x04000000) >> 15) | ((insn & 0x7000) >> 4) | (insn & 0xff); if (insn & (1 << 22)) { /* 16-bit immediate. */ imm |= (insn >> 4) & 0xf000; if (insn & (1 << 23)) { /* movt */ tmp = load_reg(s, rd); tcg_gen_ext16u_i32(tmp, tmp); tcg_gen_ori_i32(tmp, tmp, imm << 16); } else { /* movw */ tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, imm); } } else { /* Add/sub 12-bit immediate. */ if (rn == 15) { offset = s->pc & ~(uint32_t)3; if (insn & (1 << 23)) offset -= imm; else offset += imm; tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, offset); } else { tmp = load_reg(s, rn); if (insn & (1 << 23)) tcg_gen_subi_i32(tmp, tmp, imm); else tcg_gen_addi_i32(tmp, tmp, imm); } } store_reg(s, rd, tmp); } } else { int shifter_out = 0; /* modified 12-bit immediate. */ shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12); imm = (insn & 0xff); switch (shift) { case 0: /* XY */ /* Nothing to do. */ break; case 1: /* 00XY00XY */ imm |= imm << 16; break; case 2: /* XY00XY00 */ imm |= imm << 16; imm <<= 8; break; case 3: /* XYXYXYXY */ imm |= imm << 16; imm |= imm << 8; break; default: /* Rotated constant. */ shift = (shift << 1) | (imm >> 7); imm |= 0x80; imm = imm << (32 - shift); shifter_out = 1; break; } tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, imm); rn = (insn >> 16) & 0xf; if (rn == 15) { tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); } else { tmp = load_reg(s, rn); } op = (insn >> 21) & 0xf; if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0, shifter_out, tmp, tmp2)) goto illegal_op; tcg_temp_free_i32(tmp2); rd = (insn >> 8) & 0xf; if (rd != 15) { store_reg(s, rd, tmp); } else { tcg_temp_free_i32(tmp); } } } break; case 12: /* Load/store single data item. */ { int postinc = 0; int writeback = 0; int user; if ((insn & 0x01100000) == 0x01000000) { if (disas_neon_ls_insn(env, s, insn)) goto illegal_op; break; } op = ((insn >> 21) & 3) | ((insn >> 22) & 4); if (rs == 15) { if (!(insn & (1 << 20))) { goto illegal_op; } if (op != 2) { /* Byte or halfword load space with dest == r15 : memory hints. * Catch them early so we don't emit pointless addressing code. * This space is a mix of: * PLD/PLDW/PLI, which we implement as NOPs (note that unlike * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP * cores) * unallocated hints, which must be treated as NOPs * UNPREDICTABLE space, which we NOP or UNDEF depending on * which is easiest for the decoding logic * Some space which must UNDEF */ int op1 = (insn >> 23) & 3; int op2 = (insn >> 6) & 0x3f; if (op & 2) { goto illegal_op; } if (rn == 15) { /* UNPREDICTABLE or unallocated hint */ return 0; } if (op1 & 1) { return 0; /* PLD* or unallocated hint */ } if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) { return 0; /* PLD* or unallocated hint */ } /* UNDEF space, or an UNPREDICTABLE */ return 1; } } user = IS_USER(s); if (rn == 15) { addr = tcg_temp_new_i32(); /* PC relative. */ /* s->pc has already been incremented by 4. */ imm = s->pc & 0xfffffffc; if (insn & (1 << 23)) imm += insn & 0xfff; else imm -= insn & 0xfff; tcg_gen_movi_i32(addr, imm); } else { addr = load_reg(s, rn); if (insn & (1 << 23)) { /* Positive offset. */ imm = insn & 0xfff; tcg_gen_addi_i32(addr, addr, imm); } else { imm = insn & 0xff; switch ((insn >> 8) & 0xf) { case 0x0: /* Shifted Register. */ shift = (insn >> 4) & 0xf; if (shift > 3) { tcg_temp_free_i32(addr); goto illegal_op; } tmp = load_reg(s, rm); if (shift) tcg_gen_shli_i32(tmp, tmp, shift); tcg_gen_add_i32(addr, addr, tmp); tcg_temp_free_i32(tmp); break; case 0xc: /* Negative offset. */ tcg_gen_addi_i32(addr, addr, -imm); break; case 0xe: /* User privilege. */ tcg_gen_addi_i32(addr, addr, imm); user = 1; break; case 0x9: /* Post-decrement. */ imm = -imm; /* Fall through. */ case 0xb: /* Post-increment. */ postinc = 1; writeback = 1; break; case 0xd: /* Pre-decrement. */ imm = -imm; /* Fall through. */ case 0xf: /* Pre-increment. */ tcg_gen_addi_i32(addr, addr, imm); writeback = 1; break; default: tcg_temp_free_i32(addr); goto illegal_op; } } } if (insn & (1 << 20)) { /* Load. */ switch (op) { case 0: tmp = gen_ld8u(addr, user); break; case 4: tmp = gen_ld8s(addr, user); break; case 1: tmp = gen_ld16u(addr, user); break; case 5: tmp = gen_ld16s(addr, user); break; case 2: tmp = gen_ld32(addr, user); break; default: tcg_temp_free_i32(addr); goto illegal_op; } if (rs == 15) { gen_bx(s, tmp); } else { store_reg(s, rs, tmp); } } else { /* Store. */ tmp = load_reg(s, rs); switch (op) { case 0: gen_st8(tmp, addr, user); break; case 1: gen_st16(tmp, addr, user); break; case 2: gen_st32(tmp, addr, user); break; default: tcg_temp_free_i32(addr); goto illegal_op; } } if (postinc) tcg_gen_addi_i32(addr, addr, imm); if (writeback) { store_reg(s, rn, addr); } else { tcg_temp_free_i32(addr); } } break; default: goto illegal_op; } return 0; illegal_op: return 1; }
26,208
qemu
a01672d3968cf91208666d371784110bfde9d4f8
1
static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size, ram_addr_t phys_offset, bool log_dirty) { KVMState *s = kvm_state; ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; KVMSlot *mem, old; int err; void *ram = NULL; /* kvm works in page size chunks, but the function may be called with sub-page size and unaligned start address. */ size = TARGET_PAGE_ALIGN(size); start_addr = TARGET_PAGE_ALIGN(start_addr); /* KVM does not support read-only slots */ phys_offset &= ~IO_MEM_ROM; if ((phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { ram = qemu_safe_ram_ptr(phys_offset); } while (1) { mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size); if (!mem) { break; } if (flags < IO_MEM_UNASSIGNED && start_addr >= mem->start_addr && (start_addr + size <= mem->start_addr + mem->memory_size) && (ram - start_addr == mem->ram - mem->start_addr)) { /* The new slot fits into the existing one and comes with * identical parameters - update flags and done. */ kvm_slot_dirty_pages_log_change(mem, log_dirty); return; } old = *mem; /* unregister the overlapping slot */ mem->memory_size = 0; err = kvm_set_user_memory_region(s, mem); if (err) { fprintf(stderr, "%s: error unregistering overlapping slot: %s\n", __func__, strerror(-err)); abort(); } /* Workaround for older KVM versions: we can't join slots, even not by * unregistering the previous ones and then registering the larger * slot. We have to maintain the existing fragmentation. Sigh. * * This workaround assumes that the new slot starts at the same * address as the first existing one. If not or if some overlapping * slot comes around later, we will fail (not seen in practice so far) * - and actually require a recent KVM version. */ if (s->broken_set_mem_region && old.start_addr == start_addr && old.memory_size < size && flags < IO_MEM_UNASSIGNED) { mem = kvm_alloc_slot(s); mem->memory_size = old.memory_size; mem->start_addr = old.start_addr; mem->ram = old.ram; mem->flags = kvm_mem_flags(s, log_dirty); err = kvm_set_user_memory_region(s, mem); if (err) { fprintf(stderr, "%s: error updating slot: %s\n", __func__, strerror(-err)); abort(); } start_addr += old.memory_size; phys_offset += old.memory_size; ram += old.memory_size; size -= old.memory_size; continue; } /* register prefix slot */ if (old.start_addr < start_addr) { mem = kvm_alloc_slot(s); mem->memory_size = start_addr - old.start_addr; mem->start_addr = old.start_addr; mem->ram = old.ram; mem->flags = kvm_mem_flags(s, log_dirty); err = kvm_set_user_memory_region(s, mem); if (err) { fprintf(stderr, "%s: error registering prefix slot: %s\n", __func__, strerror(-err)); #ifdef TARGET_PPC fprintf(stderr, "%s: This is probably because your kernel's " \ "PAGE_SIZE is too big. Please try to use 4k " \ "PAGE_SIZE!\n", __func__); #endif abort(); } } /* register suffix slot */ if (old.start_addr + old.memory_size > start_addr + size) { ram_addr_t size_delta; mem = kvm_alloc_slot(s); mem->start_addr = start_addr + size; size_delta = mem->start_addr - old.start_addr; mem->memory_size = old.memory_size - size_delta; mem->ram = old.ram + size_delta; mem->flags = kvm_mem_flags(s, log_dirty); err = kvm_set_user_memory_region(s, mem); if (err) { fprintf(stderr, "%s: error registering suffix slot: %s\n", __func__, strerror(-err)); abort(); } } } /* in case the KVM bug workaround already "consumed" the new slot */ if (!size) { return; } /* KVM does not need to know about this memory */ if (flags >= IO_MEM_UNASSIGNED) { return; } mem = kvm_alloc_slot(s); mem->memory_size = size; mem->start_addr = start_addr; mem->ram = ram; mem->flags = kvm_mem_flags(s, log_dirty); err = kvm_set_user_memory_region(s, mem); if (err) { fprintf(stderr, "%s: error registering slot: %s\n", __func__, strerror(-err)); abort(); } }
26,209
qemu
c572f23a3e7180dbeab5e86583e43ea2afed6271
1
static void v9fs_xattrwalk(void *opaque) { int64_t size; V9fsString name; ssize_t err = 0; size_t offset = 7; int32_t fid, newfid; V9fsFidState *file_fidp; V9fsFidState *xattr_fidp = NULL; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; pdu_unmarshal(pdu, offset, "dds", &fid, &newfid, &name); file_fidp = get_fid(pdu, fid); if (file_fidp == NULL) { err = -ENOENT; goto out_nofid; } xattr_fidp = alloc_fid(s, newfid); if (xattr_fidp == NULL) { err = -EINVAL; goto out; } v9fs_path_copy(&xattr_fidp->path, &file_fidp->path); if (name.data[0] == 0) { /* * listxattr request. Get the size first */ size = v9fs_co_llistxattr(pdu, &xattr_fidp->path, NULL, 0); if (size < 0) { err = size; clunk_fid(s, xattr_fidp->fid); goto out; } /* * Read the xattr value */ xattr_fidp->fs.xattr.len = size; xattr_fidp->fid_type = P9_FID_XATTR; xattr_fidp->fs.xattr.copied_len = -1; if (size) { xattr_fidp->fs.xattr.value = g_malloc(size); err = v9fs_co_llistxattr(pdu, &xattr_fidp->path, xattr_fidp->fs.xattr.value, xattr_fidp->fs.xattr.len); if (err < 0) { clunk_fid(s, xattr_fidp->fid); goto out; } } offset += pdu_marshal(pdu, offset, "q", size); err = offset; } else { /* * specific xattr fid. We check for xattr * presence also collect the xattr size */ size = v9fs_co_lgetxattr(pdu, &xattr_fidp->path, &name, NULL, 0); if (size < 0) { err = size; clunk_fid(s, xattr_fidp->fid); goto out; } /* * Read the xattr value */ xattr_fidp->fs.xattr.len = size; xattr_fidp->fid_type = P9_FID_XATTR; xattr_fidp->fs.xattr.copied_len = -1; if (size) { xattr_fidp->fs.xattr.value = g_malloc(size); err = v9fs_co_lgetxattr(pdu, &xattr_fidp->path, &name, xattr_fidp->fs.xattr.value, xattr_fidp->fs.xattr.len); if (err < 0) { clunk_fid(s, xattr_fidp->fid); goto out; } } offset += pdu_marshal(pdu, offset, "q", size); err = offset; } out: put_fid(pdu, file_fidp); if (xattr_fidp) { put_fid(pdu, xattr_fidp); } out_nofid: trace_v9fs_xattrwalk_return(pdu->tag, pdu->id, size); complete_pdu(s, pdu, err); v9fs_string_free(&name); }
26,210
FFmpeg
7b05b5093ea67a3397b0c37cf398bab471e1ce2b
1
void ff_eac3_apply_spectral_extension(AC3DecodeContext *s) { int bin, bnd, ch, i; uint8_t wrapflag[SPX_MAX_BANDS]={1,0,}, num_copy_sections, copy_sizes[SPX_MAX_BANDS]; float rms_energy[SPX_MAX_BANDS]; /* Set copy index mapping table. Set wrap flags to apply a notch filter at wrap points later on. */ bin = s->spx_dst_start_freq; num_copy_sections = 0; for (bnd = 0; bnd < s->num_spx_bands; bnd++) { int copysize; int bandsize = s->spx_band_sizes[bnd]; if (bin + bandsize > s->spx_src_start_freq) { copy_sizes[num_copy_sections++] = bin - s->spx_dst_start_freq; bin = s->spx_dst_start_freq; wrapflag[bnd] = 1; } for (i = 0; i < bandsize; i += copysize) { if (bin == s->spx_src_start_freq) { copy_sizes[num_copy_sections++] = bin - s->spx_dst_start_freq; bin = s->spx_dst_start_freq; } copysize = FFMIN(bandsize - i, s->spx_src_start_freq - bin); bin += copysize; } } copy_sizes[num_copy_sections++] = bin - s->spx_dst_start_freq; for (ch = 1; ch <= s->fbw_channels; ch++) { if (!s->channel_uses_spx[ch]) continue; /* Copy coeffs from normal bands to extension bands */ bin = s->spx_src_start_freq; for (i = 0; i < num_copy_sections; i++) { memcpy(&s->transform_coeffs[ch][bin], &s->transform_coeffs[ch][s->spx_dst_start_freq], copy_sizes[i]*sizeof(float)); bin += copy_sizes[i]; } /* Calculate RMS energy for each SPX band. */ bin = s->spx_src_start_freq; for (bnd = 0; bnd < s->num_spx_bands; bnd++) { int bandsize = s->spx_band_sizes[bnd]; float accum = 0.0f; for (i = 0; i < bandsize; i++) { float coeff = s->transform_coeffs[ch][bin++]; accum += coeff * coeff; } rms_energy[bnd] = sqrtf(accum / bandsize); } /* Apply a notch filter at transitions between normal and extension bands and at all wrap points. */ if (s->spx_atten_code[ch] >= 0) { const float *atten_tab = ff_eac3_spx_atten_tab[s->spx_atten_code[ch]]; bin = s->spx_src_start_freq - 2; for (bnd = 0; bnd < s->num_spx_bands; bnd++) { if (wrapflag[bnd]) { float *coeffs = &s->transform_coeffs[ch][bin]; coeffs[0] *= atten_tab[0]; coeffs[1] *= atten_tab[1]; coeffs[2] *= atten_tab[2]; coeffs[3] *= atten_tab[1]; coeffs[4] *= atten_tab[0]; } bin += s->spx_band_sizes[bnd]; } } /* Apply noise-blended coefficient scaling based on previously calculated RMS energy, blending factors, and SPX coordinates for each band. */ bin = s->spx_src_start_freq; for (bnd = 0; bnd < s->num_spx_bands; bnd++) { float nscale = s->spx_noise_blend[ch][bnd] * rms_energy[bnd] * (1.0f / INT32_MIN); float sscale = s->spx_signal_blend[ch][bnd]; for (i = 0; i < s->spx_band_sizes[bnd]; i++) { float noise = nscale * (int32_t)av_lfg_get(&s->dith_state); s->transform_coeffs[ch][bin] *= sscale; s->transform_coeffs[ch][bin++] += noise; } } } }
26,211
FFmpeg
ff17c76e92cd9a9072a8771cad73c96cd620040b
1
static int add_crc_to_array(uint32_t crc, int64_t pts) { if (size_of_array <= number_of_elements) { if (size_of_array == 0) size_of_array = 10; size_of_array *= 2; crc_array = av_realloc(crc_array, size_of_array * sizeof(uint32_t)); pts_array = av_realloc(pts_array, size_of_array * sizeof(int64_t)); if ((crc_array == NULL) || (pts_array == NULL)) { av_log(NULL, AV_LOG_ERROR, "Can't allocate array to store crcs\n"); return AVERROR(ENOMEM); } } crc_array[number_of_elements] = crc; pts_array[number_of_elements] = pts; number_of_elements++; return 0; }
26,212
qemu
1d5bf692e55ae22b59083741d521e27db704846d
1
static void vfio_listener_region_del(MemoryListener *listener, MemoryRegionSection *section) { VFIOContainer *container = container_of(listener, VFIOContainer, iommu_data.listener); hwaddr iova, end; int ret; if (vfio_listener_skipped_section(section)) { DPRINTF("SKIPPING region_del %"HWADDR_PRIx" - %"PRIx64"\n", section->offset_within_address_space, section->offset_within_address_space + section->size - 1); return; } if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != (section->offset_within_region & ~TARGET_PAGE_MASK))) { error_report("%s received unaligned region", __func__); return; } iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); end = (section->offset_within_address_space + int128_get64(section->size)) & TARGET_PAGE_MASK; if (iova >= end) { return; } DPRINTF("region_del %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", iova, end - 1); ret = vfio_dma_unmap(container, iova, end - iova); memory_region_unref(section->mr); if (ret) { error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx") = %d (%m)", container, iova, end - iova, ret); } }
26,213
FFmpeg
5b4da8a38a5ed211df9504c85ce401c30af86b97
0
static av_cold void init_mv_penalty_and_fcode(MpegEncContext *s) { int f_code; int mv; for(f_code=1; f_code<=MAX_FCODE; f_code++){ for(mv=-MAX_MV; mv<=MAX_MV; mv++){ int len; if(mv==0) len= ff_mvtab[0][1]; else{ int val, bit_size, code; bit_size = f_code - 1; val=mv; if (val < 0) val = -val; val--; code = (val >> bit_size) + 1; if(code<33){ len= ff_mvtab[code][1] + 1 + bit_size; }else{ len= ff_mvtab[32][1] + av_log2(code>>5) + 2 + bit_size; } } mv_penalty[f_code][mv+MAX_MV]= len; } } for(f_code=MAX_FCODE; f_code>0; f_code--){ for(mv=-(16<<f_code); mv<(16<<f_code); mv++){ fcode_tab[mv+MAX_MV]= f_code; } } for(mv=0; mv<MAX_MV*2+1; mv++){ umv_fcode_tab[mv]= 1; } }
26,214
FFmpeg
ddf1b4a2f8a680126eb611428e4f47e6e5b8c6c0
0
static av_always_inline int setup_classifs(vorbis_context *vc, vorbis_residue *vr, uint8_t *do_not_decode, unsigned ch_used, int partition_count) { int p, j, i; unsigned c_p_c = vc->codebooks[vr->classbook].dimensions; unsigned inverse_class = ff_inverse[vr->classifications]; unsigned temp, temp2; for (p = 0, j = 0; j < ch_used; ++j) { if (!do_not_decode[j]) { temp = get_vlc2(&vc->gb, vc->codebooks[vr->classbook].vlc.table, vc->codebooks[vr->classbook].nb_bits, 3); av_dlog(NULL, "Classword: %u\n", temp); assert(vr->classifications > 1 && temp <= 65536); //needed for inverse[] for (i = 0; i < c_p_c; ++i) { temp2 = (((uint64_t)temp) * inverse_class) >> 32; if (partition_count + c_p_c - 1 - i < vr->ptns_to_read) vr->classifs[p + partition_count + c_p_c - 1 - i] = temp - temp2 * vr->classifications; temp = temp2; } } p += vr->ptns_to_read; } return 0; }
26,215
qemu
384acbf46b70edf0d2c1648aa1a92a90bcf7057d
0
static int qemu_laio_process_requests(void *opaque) { struct qemu_laio_state *s = opaque; struct qemu_laiocb *laiocb, *next; int res = 0; QLIST_FOREACH_SAFE (laiocb, &s->completed_reqs, node, next) { if (laiocb->async_context_id == get_async_context_id()) { qemu_laio_process_completion(s, laiocb); QLIST_REMOVE(laiocb, node); res = 1; } } return res; }
26,216
qemu
bff3063837a76b37a4bbbfe614324ca38e859f2b
0
static void event_scan(PowerPCCPU *cpu, sPAPRMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { uint32_t mask, buf, len, event_len; sPAPREventLogEntry *event; struct rtas_error_log *hdr; if (nargs != 4 || nret != 1) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } mask = rtas_ld(args, 0); buf = rtas_ld(args, 2); len = rtas_ld(args, 3); event = rtas_event_log_dequeue(mask, false); if (!event) { goto out_no_events; } hdr = event->data; event_len = be32_to_cpu(hdr->extended_length) + sizeof(*hdr); if (event_len < len) { len = event_len; } cpu_physical_memory_write(buf, event->data, len); rtas_st(rets, 0, RTAS_OUT_SUCCESS); g_free(event->data); g_free(event); return; out_no_events: rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND); }
26,217
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static void sysbus_esp_mem_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned int size) { SysBusESPState *sysbus = opaque; uint32_t saddr; saddr = addr >> sysbus->it_shift; esp_reg_write(&sysbus->esp, saddr, val); }
26,218
FFmpeg
ee9f36a88eb3e2706ea659acb0ca80c414fa5d8a
0
static uint32_t adler32(uint32_t adler, const uint8_t *buf, unsigned int len) { unsigned long s1 = adler & 0xffff; unsigned long s2 = (adler >> 16) & 0xffff; int k; if (buf == NULL) return 1L; while (len > 0) { k = len < NMAX ? len : NMAX; len -= k; while (k >= 16) { DO16(buf); k -= 16; } if (k != 0) do { DO1(buf); } while (--k); s1 %= BASE; s2 %= BASE; } return (s2 << 16) | s1; }
26,219
qemu
a89f364ae8740dfc31b321eed9ee454e996dc3c1
0
static int wm8750_tx(I2CSlave *i2c, uint8_t data) { WM8750State *s = WM8750(i2c); uint8_t cmd; uint16_t value; if (s->i2c_len >= 2) { #ifdef VERBOSE printf("%s: long message (%i bytes)\n", __func__, s->i2c_len); #endif return 1; } s->i2c_data[s->i2c_len ++] = data; if (s->i2c_len != 2) return 0; cmd = s->i2c_data[0] >> 1; value = ((s->i2c_data[0] << 8) | s->i2c_data[1]) & 0x1ff; switch (cmd) { case WM8750_LADCIN: /* ADC Signal Path Control (Left) */ s->diff[0] = (((value >> 6) & 3) == 3); /* LINSEL */ if (s->diff[0]) s->in[0] = &s->adc_voice[0 + s->ds * 1]; else s->in[0] = &s->adc_voice[((value >> 6) & 3) * 1 + 0]; break; case WM8750_RADCIN: /* ADC Signal Path Control (Right) */ s->diff[1] = (((value >> 6) & 3) == 3); /* RINSEL */ if (s->diff[1]) s->in[1] = &s->adc_voice[0 + s->ds * 1]; else s->in[1] = &s->adc_voice[((value >> 6) & 3) * 1 + 0]; break; case WM8750_ADCIN: /* ADC Input Mode */ s->ds = (value >> 8) & 1; /* DS */ if (s->diff[0]) s->in[0] = &s->adc_voice[0 + s->ds * 1]; if (s->diff[1]) s->in[1] = &s->adc_voice[0 + s->ds * 1]; s->monomix[0] = (value >> 6) & 3; /* MONOMIX */ break; case WM8750_ADCTL1: /* Additional Control (1) */ s->monomix[1] = (value >> 1) & 1; /* DMONOMIX */ break; case WM8750_PWR1: /* Power Management (1) */ s->enable = ((value >> 6) & 7) == 3; /* VMIDSEL, VREF */ wm8750_set_format(s); break; case WM8750_LINVOL: /* Left Channel PGA */ s->invol[0] = value & 0x3f; /* LINVOL */ s->inmute[0] = (value >> 7) & 1; /* LINMUTE */ wm8750_vol_update(s); break; case WM8750_RINVOL: /* Right Channel PGA */ s->invol[1] = value & 0x3f; /* RINVOL */ s->inmute[1] = (value >> 7) & 1; /* RINMUTE */ wm8750_vol_update(s); break; case WM8750_ADCDAC: /* ADC and DAC Control */ s->pol = (value >> 5) & 3; /* ADCPOL */ s->mute = (value >> 3) & 1; /* DACMU */ wm8750_vol_update(s); break; case WM8750_ADCTL3: /* Additional Control (3) */ break; case WM8750_LADC: /* Left ADC Digital Volume */ s->invol[2] = value & 0xff; /* LADCVOL */ wm8750_vol_update(s); break; case WM8750_RADC: /* Right ADC Digital Volume */ s->invol[3] = value & 0xff; /* RADCVOL */ wm8750_vol_update(s); break; case WM8750_ALC1: /* ALC Control (1) */ s->alc = (value >> 7) & 3; /* ALCSEL */ break; case WM8750_NGATE: /* Noise Gate Control */ case WM8750_3D: /* 3D enhance */ break; case WM8750_LDAC: /* Left Channel Digital Volume */ s->outvol[0] = value & 0xff; /* LDACVOL */ wm8750_vol_update(s); break; case WM8750_RDAC: /* Right Channel Digital Volume */ s->outvol[1] = value & 0xff; /* RDACVOL */ wm8750_vol_update(s); break; case WM8750_BASS: /* Bass Control */ break; case WM8750_LOUTM1: /* Left Mixer Control (1) */ s->path[0] = (value >> 8) & 1; /* LD2LO */ /* TODO: mute/unmute respective paths */ wm8750_vol_update(s); break; case WM8750_LOUTM2: /* Left Mixer Control (2) */ s->path[1] = (value >> 8) & 1; /* RD2LO */ /* TODO: mute/unmute respective paths */ wm8750_vol_update(s); break; case WM8750_ROUTM1: /* Right Mixer Control (1) */ s->path[2] = (value >> 8) & 1; /* LD2RO */ /* TODO: mute/unmute respective paths */ wm8750_vol_update(s); break; case WM8750_ROUTM2: /* Right Mixer Control (2) */ s->path[3] = (value >> 8) & 1; /* RD2RO */ /* TODO: mute/unmute respective paths */ wm8750_vol_update(s); break; case WM8750_MOUTM1: /* Mono Mixer Control (1) */ s->mpath[0] = (value >> 8) & 1; /* LD2MO */ /* TODO: mute/unmute respective paths */ wm8750_vol_update(s); break; case WM8750_MOUTM2: /* Mono Mixer Control (2) */ s->mpath[1] = (value >> 8) & 1; /* RD2MO */ /* TODO: mute/unmute respective paths */ wm8750_vol_update(s); break; case WM8750_LOUT1V: /* LOUT1 Volume */ s->outvol[2] = value & 0x7f; /* LOUT1VOL */ wm8750_vol_update(s); break; case WM8750_LOUT2V: /* LOUT2 Volume */ s->outvol[4] = value & 0x7f; /* LOUT2VOL */ wm8750_vol_update(s); break; case WM8750_ROUT1V: /* ROUT1 Volume */ s->outvol[3] = value & 0x7f; /* ROUT1VOL */ wm8750_vol_update(s); break; case WM8750_ROUT2V: /* ROUT2 Volume */ s->outvol[5] = value & 0x7f; /* ROUT2VOL */ wm8750_vol_update(s); break; case WM8750_MOUTV: /* MONOOUT Volume */ s->outvol[6] = value & 0x7f; /* MONOOUTVOL */ wm8750_vol_update(s); break; case WM8750_ADCTL2: /* Additional Control (2) */ break; case WM8750_PWR2: /* Power Management (2) */ s->power = value & 0x7e; /* TODO: mute/unmute respective paths */ wm8750_vol_update(s); break; case WM8750_IFACE: /* Digital Audio Interface Format */ s->format = value; s->master = (value >> 6) & 1; /* MS */ wm8750_clk_update(s, s->master); break; case WM8750_SRATE: /* Clocking and Sample Rate Control */ s->rate = &wm_rate_table[(value >> 1) & 0x1f]; wm8750_clk_update(s, 0); break; case WM8750_RESET: /* Reset */ wm8750_reset(I2C_SLAVE(s)); break; #ifdef VERBOSE default: printf("%s: unknown register %02x\n", __FUNCTION__, cmd); #endif } return 0; }
26,220
qemu
7bd427d801e1e3293a634d3c83beadaa90ffb911
0
static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len) { int i; uint16_t limit; VncDisplay *vd = vs->vd; if (data[0] > 3) { vd->timer_interval = VNC_REFRESH_INTERVAL_BASE; if (!qemu_timer_expired(vd->timer, qemu_get_clock(rt_clock) + vd->timer_interval)) qemu_mod_timer(vd->timer, qemu_get_clock(rt_clock) + vd->timer_interval); } switch (data[0]) { case VNC_MSG_CLIENT_SET_PIXEL_FORMAT: if (len == 1) return 20; set_pixel_format(vs, read_u8(data, 4), read_u8(data, 5), read_u8(data, 6), read_u8(data, 7), read_u16(data, 8), read_u16(data, 10), read_u16(data, 12), read_u8(data, 14), read_u8(data, 15), read_u8(data, 16)); break; case VNC_MSG_CLIENT_SET_ENCODINGS: if (len == 1) return 4; if (len == 4) { limit = read_u16(data, 2); if (limit > 0) return 4 + (limit * 4); } else limit = read_u16(data, 2); for (i = 0; i < limit; i++) { int32_t val = read_s32(data, 4 + (i * 4)); memcpy(data + 4 + (i * 4), &val, sizeof(val)); } set_encodings(vs, (int32_t *)(data + 4), limit); break; case VNC_MSG_CLIENT_FRAMEBUFFER_UPDATE_REQUEST: if (len == 1) return 10; framebuffer_update_request(vs, read_u8(data, 1), read_u16(data, 2), read_u16(data, 4), read_u16(data, 6), read_u16(data, 8)); break; case VNC_MSG_CLIENT_KEY_EVENT: if (len == 1) return 8; key_event(vs, read_u8(data, 1), read_u32(data, 4)); break; case VNC_MSG_CLIENT_POINTER_EVENT: if (len == 1) return 6; pointer_event(vs, read_u8(data, 1), read_u16(data, 2), read_u16(data, 4)); break; case VNC_MSG_CLIENT_CUT_TEXT: if (len == 1) return 8; if (len == 8) { uint32_t dlen = read_u32(data, 4); if (dlen > 0) return 8 + dlen; } client_cut_text(vs, read_u32(data, 4), data + 8); break; case VNC_MSG_CLIENT_QEMU: if (len == 1) return 2; switch (read_u8(data, 1)) { case VNC_MSG_CLIENT_QEMU_EXT_KEY_EVENT: if (len == 2) return 12; ext_key_event(vs, read_u16(data, 2), read_u32(data, 4), read_u32(data, 8)); break; case VNC_MSG_CLIENT_QEMU_AUDIO: if (len == 2) return 4; switch (read_u16 (data, 2)) { case VNC_MSG_CLIENT_QEMU_AUDIO_ENABLE: audio_add(vs); break; case VNC_MSG_CLIENT_QEMU_AUDIO_DISABLE: audio_del(vs); break; case VNC_MSG_CLIENT_QEMU_AUDIO_SET_FORMAT: if (len == 4) return 10; switch (read_u8(data, 4)) { case 0: vs->as.fmt = AUD_FMT_U8; break; case 1: vs->as.fmt = AUD_FMT_S8; break; case 2: vs->as.fmt = AUD_FMT_U16; break; case 3: vs->as.fmt = AUD_FMT_S16; break; case 4: vs->as.fmt = AUD_FMT_U32; break; case 5: vs->as.fmt = AUD_FMT_S32; break; default: printf("Invalid audio format %d\n", read_u8(data, 4)); vnc_client_error(vs); break; } vs->as.nchannels = read_u8(data, 5); if (vs->as.nchannels != 1 && vs->as.nchannels != 2) { printf("Invalid audio channel coount %d\n", read_u8(data, 5)); vnc_client_error(vs); break; } vs->as.freq = read_u32(data, 6); break; default: printf ("Invalid audio message %d\n", read_u8(data, 4)); vnc_client_error(vs); break; } break; default: printf("Msg: %d\n", read_u16(data, 0)); vnc_client_error(vs); break; } break; default: printf("Msg: %d\n", data[0]); vnc_client_error(vs); break; } vnc_read_when(vs, protocol_client_msg, 1); return 0; }
26,221
qemu
9a78eead0c74333a394c0f7bbfc4423ac746fcd5
0
void ppc_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) { int i, max; max = ARRAY_SIZE(ppc_defs); for (i = 0; i < max; i++) { (*cpu_fprintf)(f, "PowerPC %-16s PVR %08x\n", ppc_defs[i].name, ppc_defs[i].pvr); } }
26,222
qemu
41bf234d8e35e9273290df278e2aeb88c0c50a4f
0
static void gic_update(gic_state *s) { int best_irq; int best_prio; int irq; int level; int cpu; int cm; for (cpu = 0; cpu < NUM_CPU(s); cpu++) { cm = 1 << cpu; s->current_pending[cpu] = 1023; if (!s->enabled || !s->cpu_enabled[cpu]) { qemu_irq_lower(s->parent_irq[cpu]); return; } best_prio = 0x100; best_irq = 1023; for (irq = 0; irq < GIC_NIRQ; irq++) { if (GIC_TEST_ENABLED(irq) && GIC_TEST_PENDING(irq, cm)) { if (GIC_GET_PRIORITY(irq, cpu) < best_prio) { best_prio = GIC_GET_PRIORITY(irq, cpu); best_irq = irq; } } } level = 0; if (best_prio <= s->priority_mask[cpu]) { s->current_pending[cpu] = best_irq; if (best_prio < s->running_priority[cpu]) { DPRINTF("Raised pending IRQ %d\n", best_irq); level = 1; } } qemu_set_irq(s->parent_irq[cpu], level); } }
26,224
qemu
4a1418e07bdcfaa3177739e04707ecaec75d89e1
0
static void *kqemu_vmalloc(size_t size) { static int phys_ram_fd = -1; static int phys_ram_size = 0; void *ptr; /* no need (?) for a dummy file on OpenBSD/FreeBSD */ #if defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) int map_anon = MAP_ANON; #else int map_anon = 0; const char *tmpdir; char phys_ram_file[1024]; #ifdef CONFIG_SOLARIS struct statvfs stfs; #else struct statfs stfs; #endif if (!size) { abort (); } if (phys_ram_fd < 0) { tmpdir = getenv("QEMU_TMPDIR"); if (!tmpdir) #ifdef CONFIG_SOLARIS tmpdir = "/tmp"; if (statvfs(tmpdir, &stfs) == 0) { #else tmpdir = "/dev/shm"; if (statfs(tmpdir, &stfs) == 0) { #endif int64_t free_space; int ram_mb; free_space = (int64_t)stfs.f_bavail * stfs.f_bsize; if ((ram_size + 8192 * 1024) >= free_space) { ram_mb = (ram_size / (1024 * 1024)); fprintf(stderr, "You do not have enough space in '%s' for the %d MB of QEMU virtual RAM.\n", tmpdir, ram_mb); if (strcmp(tmpdir, "/dev/shm") == 0) { fprintf(stderr, "To have more space available provided you have enough RAM and swap, do as root:\n" "mount -o remount,size=%dm /dev/shm\n", ram_mb + 16); } else { fprintf(stderr, "Use the '-m' option of QEMU to diminish the amount of virtual RAM or use the\n" "QEMU_TMPDIR environment variable to set another directory where the QEMU\n" "temporary RAM file will be opened.\n"); } fprintf(stderr, "Or disable the accelerator module with -no-kqemu\n"); exit(1); } } snprintf(phys_ram_file, sizeof(phys_ram_file), "%s/qemuXXXXXX", tmpdir); phys_ram_fd = mkstemp(phys_ram_file); if (phys_ram_fd < 0) { fprintf(stderr, "warning: could not create temporary file in '%s'.\n" "Use QEMU_TMPDIR to select a directory in a tmpfs filesystem.\n" "Using '/tmp' as fallback.\n", tmpdir); snprintf(phys_ram_file, sizeof(phys_ram_file), "%s/qemuXXXXXX", "/tmp"); phys_ram_fd = mkstemp(phys_ram_file); if (phys_ram_fd < 0) { fprintf(stderr, "Could not create temporary memory file '%s'\n", phys_ram_file); exit(1); } } unlink(phys_ram_file); } size = (size + 4095) & ~4095; ftruncate(phys_ram_fd, phys_ram_size + size); #endif /* !(__OpenBSD__ || __FreeBSD__ || __DragonFly__) */ ptr = mmap(NULL, size, PROT_WRITE | PROT_READ, map_anon | MAP_SHARED, phys_ram_fd, phys_ram_size); if (ptr == MAP_FAILED) { fprintf(stderr, "Could not map physical memory\n"); exit(1); } phys_ram_size += size; return ptr; }
26,228
qemu
8172539d21a03e982aa7f139ddc1607dc1422045
0
static uint32_t virtio_console_get_features(VirtIODevice *vdev) { return 0; }
26,229
FFmpeg
041086191fc08ab162ad6117b07a5f39639d5d9d
0
void event_loop(void) { SDL_Event event; double incr, pos, frac; for(;;) { SDL_WaitEvent(&event); switch(event.type) { case SDL_KEYDOWN: switch(event.key.keysym.sym) { case SDLK_ESCAPE: case SDLK_q: do_exit(); break; case SDLK_f: toggle_full_screen(); break; case SDLK_p: case SDLK_SPACE: toggle_pause(); break; case SDLK_s: //S: Step to next frame step_to_next_frame(); break; case SDLK_a: if (cur_stream) stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO); break; case SDLK_v: if (cur_stream) stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO); break; case SDLK_w: toggle_audio_display(); break; case SDLK_LEFT: incr = -10.0; goto do_seek; case SDLK_RIGHT: incr = 10.0; goto do_seek; case SDLK_UP: incr = 60.0; goto do_seek; case SDLK_DOWN: incr = -60.0; do_seek: if (cur_stream) { pos = get_master_clock(cur_stream); printf("%f %f %d %d %d %d\n", (float)pos, (float)incr, cur_stream->av_sync_type == AV_SYNC_VIDEO_MASTER, cur_stream->av_sync_type == AV_SYNC_AUDIO_MASTER, cur_stream->video_st, cur_stream->audio_st); pos += incr; stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE)); } break; default: break; } break; case SDL_MOUSEBUTTONDOWN: if (cur_stream) { int ns, hh, mm, ss; int tns, thh, tmm, tss; tns = cur_stream->ic->duration/1000000LL; thh = tns/3600; tmm = (tns%3600)/60; tss = (tns%60); frac = (double)event.button.x/(double)cur_stream->width; ns = frac*tns; hh = ns/3600; mm = (ns%3600)/60; ss = (ns%60); fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100, hh, mm, ss, thh, tmm, tss); stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration)); } break; case SDL_VIDEORESIZE: if (cur_stream) { screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0, SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL); cur_stream->width = event.resize.w; cur_stream->height = event.resize.h; } break; case SDL_QUIT: case FF_QUIT_EVENT: do_exit(); break; case FF_ALLOC_EVENT: alloc_picture(event.user.data1); break; case FF_REFRESH_EVENT: video_refresh_timer(event.user.data1); break; default: break; } } }
26,231
qemu
e69a17f65e9f12f33c48b04a789e49d40a8993f5
0
static inline void IRQ_setbit(IRQQueue *q, int n_IRQ) { set_bit(q->queue, n_IRQ); }
26,232
qemu
1ea879e5580f63414693655fcf0328559cdce138
0
static void sdl_callback (void *opaque, Uint8 *buf, int len) { SDLVoiceOut *sdl = opaque; SDLAudioState *s = &glob_sdl; HWVoiceOut *hw = &sdl->hw; int samples = len >> hw->info.shift; if (s->exit) { return; } while (samples) { int to_mix, decr; /* dolog ("in callback samples=%d\n", samples); */ sdl_wait (s, "sdl_callback"); if (s->exit) { return; } if (sdl_lock (s, "sdl_callback")) { return; } if (audio_bug (AUDIO_FUNC, sdl->live < 0 || sdl->live > hw->samples)) { dolog ("sdl->live=%d hw->samples=%d\n", sdl->live, hw->samples); return; } if (!sdl->live) { goto again; } /* dolog ("in callback live=%d\n", live); */ to_mix = audio_MIN (samples, sdl->live); decr = to_mix; while (to_mix) { int chunk = audio_MIN (to_mix, hw->samples - hw->rpos); st_sample_t *src = hw->mix_buf + hw->rpos; /* dolog ("in callback to_mix %d, chunk %d\n", to_mix, chunk); */ hw->clip (buf, src, chunk); sdl->rpos = (sdl->rpos + chunk) % hw->samples; to_mix -= chunk; buf += chunk << hw->info.shift; } samples -= decr; sdl->live -= decr; sdl->decr += decr; again: if (sdl_unlock (s, "sdl_callback")) { return; } } /* dolog ("done len=%d\n", len); */ }
26,233
FFmpeg
565e0c6d866ce08d4b06427456d3d1f4fd856e9c
0
static int mov_write_ilst_tag(AVIOContext *pb, MOVMuxContext *mov, AVFormatContext *s) { int64_t pos = avio_tell(pb); avio_wb32(pb, 0); /* size */ ffio_wfourcc(pb, "ilst"); mov_write_string_metadata(s, pb, "\251nam", "title" , 1); mov_write_string_metadata(s, pb, "\251ART", "artist" , 1); mov_write_string_metadata(s, pb, "aART", "album_artist", 1); mov_write_string_metadata(s, pb, "\251wrt", "composer" , 1); mov_write_string_metadata(s, pb, "\251alb", "album" , 1); mov_write_string_metadata(s, pb, "\251day", "date" , 1); mov_write_string_tag(pb, "\251too", LIBAVFORMAT_IDENT, 0, 1); mov_write_string_metadata(s, pb, "\251cmt", "comment" , 1); mov_write_string_metadata(s, pb, "\251gen", "genre" , 1); mov_write_string_metadata(s, pb, "\251cpy", "copyright", 1); mov_write_string_metadata(s, pb, "\251grp", "grouping" , 1); mov_write_string_metadata(s, pb, "\251lyr", "lyrics" , 1); mov_write_string_metadata(s, pb, "desc", "description",1); mov_write_string_metadata(s, pb, "ldes", "synopsis" , 1); mov_write_string_metadata(s, pb, "tvsh", "show" , 1); mov_write_string_metadata(s, pb, "tven", "episode_id",1); mov_write_string_metadata(s, pb, "tvnn", "network" , 1); mov_write_trkn_tag(pb, mov, s); return update_size(pb, pos); }
26,235
FFmpeg
0b54f3c0878a3acaa9142e4f24942e762d97e350
1
static int gif_read_close(AVFormatContext *s1) { GifState *s = s1->priv_data; av_free(s->image_buf); return 0; }
26,236
qemu
403e633126b7a781ecd48a29e3355770d46bbf1a
1
void qemu_thread_get_self(QemuThread *thread) { if (!thread->thread) { /* In the main thread of the process. Initialize the QemuThread pointer in TLS, and use the dummy GetCurrentThread handle as the identifier for qemu_thread_is_self. */ qemu_thread_init(); TlsSetValue(qemu_thread_tls_index, thread); thread->thread = GetCurrentThread(); } }
26,238
qemu
480cff632221dc4d4889bf72dd0f09cd35096bc1
1
static void coroutine_fn qemu_co_mutex_lock_slowpath(CoMutex *mutex) { Coroutine *self = qemu_coroutine_self(); CoWaitRecord w; unsigned old_handoff; trace_qemu_co_mutex_lock_entry(mutex, self); w.co = self; push_waiter(mutex, &w); /* This is the "Responsibility Hand-Off" protocol; a lock() picks from * a concurrent unlock() the responsibility of waking somebody up. */ old_handoff = atomic_mb_read(&mutex->handoff); if (old_handoff && has_waiters(mutex) && atomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) { /* There can be no concurrent pops, because there can be only * one active handoff at a time. */ CoWaitRecord *to_wake = pop_waiter(mutex); Coroutine *co = to_wake->co; if (co == self) { /* We got the lock ourselves! */ assert(to_wake == &w); return; } aio_co_wake(co); } qemu_coroutine_yield(); trace_qemu_co_mutex_lock_return(mutex, self); }
26,239
FFmpeg
2c046c718aefbc9f8223e22f85bb119da4fea04d
1
static int pnm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; PNMContext * const s = avctx->priv_data; AVFrame * const p = data; int i, j, n, linesize, h, upgrade = 0, is_mono = 0; unsigned char *ptr; int components, sample_len, ret; unsigned int maskval = 0; s->bytestream_start = s->bytestream = (uint8_t *)buf; s->bytestream_end = (uint8_t *)buf + buf_size; if ((ret = ff_pnm_decode_header(avctx, s)) < 0) return ret; if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; p->pict_type = AV_PICTURE_TYPE_I; p->key_frame = 1; switch (avctx->pix_fmt) { default: return AVERROR(EINVAL); case AV_PIX_FMT_RGBA64BE: n = avctx->width * 8; components=4; sample_len=16; goto do_read; case AV_PIX_FMT_RGB48BE: n = avctx->width * 6; components=3; sample_len=16; goto do_read; case AV_PIX_FMT_RGBA: n = avctx->width * 4; components=4; sample_len=8; goto do_read; case AV_PIX_FMT_RGB24: n = avctx->width * 3; components=3; sample_len=8; goto do_read; case AV_PIX_FMT_GRAY8: n = avctx->width; components=1; sample_len=8; if (s->maxval < 255) { upgrade = 1; maskval = (2 << av_log2(s->maxval)) - 1; } goto do_read; case AV_PIX_FMT_GRAY8A: n = avctx->width * 2; components=2; sample_len=8; goto do_read; case AV_PIX_FMT_GRAY16BE: case AV_PIX_FMT_GRAY16LE: n = avctx->width * 2; components=1; sample_len=16; if (s->maxval < 65535) { upgrade = 2; maskval = (2 << av_log2(s->maxval)) - 1; } goto do_read; case AV_PIX_FMT_MONOWHITE: case AV_PIX_FMT_MONOBLACK: n = (avctx->width + 7) >> 3; components=1; sample_len=1; is_mono = 1; do_read: ptr = p->data[0]; linesize = p->linesize[0]; if (s->bytestream + n * avctx->height > s->bytestream_end) return AVERROR_INVALIDDATA; if(s->type < 4 || (is_mono && s->type==7)){ for (i=0; i<avctx->height; i++) { PutBitContext pb; init_put_bits(&pb, ptr, linesize); for(j=0; j<avctx->width * components; j++){ unsigned int c=0; int v=0; if(s->type < 4) while(s->bytestream < s->bytestream_end && (*s->bytestream < '0' || *s->bytestream > '9' )) s->bytestream++; if(s->bytestream >= s->bytestream_end) return AVERROR_INVALIDDATA; if (is_mono) { /* read a single digit */ v = (*s->bytestream++)&1; } else { /* read a sequence of digits */ do { v = 10*v + c; c = (*s->bytestream++) - '0'; } while (c <= 9); } put_bits(&pb, sample_len, (((1<<sample_len)-1)*v + (s->maxval>>1))/s->maxval); } flush_put_bits(&pb); ptr+= linesize; } }else{ for (i = 0; i < avctx->height; i++) { if (!upgrade) memcpy(ptr, s->bytestream, n); else if (upgrade == 1) { unsigned int j, f = (255 * 128 + s->maxval / 2) / s->maxval; for (j = 0; j < n; j++) ptr[j] = ((s->bytestream[j] & maskval) * f + 64) >> 7; } else if (upgrade == 2) { unsigned int j, v, f = (65535 * 32768 + s->maxval / 2) / s->maxval; for (j = 0; j < n / 2; j++) { v = av_be2ne16(((uint16_t *)s->bytestream)[j]) & maskval; ((uint16_t *)ptr)[j] = (v * f + 16384) >> 15; } } s->bytestream += n; ptr += linesize; } } break; case AV_PIX_FMT_YUV420P: case AV_PIX_FMT_YUV420P9BE: case AV_PIX_FMT_YUV420P10BE: { unsigned char *ptr1, *ptr2; n = avctx->width; ptr = p->data[0]; linesize = p->linesize[0]; if (s->maxval >= 256) n *= 2; if (s->bytestream + n * avctx->height * 3 / 2 > s->bytestream_end) return AVERROR_INVALIDDATA; for (i = 0; i < avctx->height; i++) { memcpy(ptr, s->bytestream, n); s->bytestream += n; ptr += linesize; } ptr1 = p->data[1]; ptr2 = p->data[2]; n >>= 1; h = avctx->height >> 1; for (i = 0; i < h; i++) { memcpy(ptr1, s->bytestream, n); s->bytestream += n; memcpy(ptr2, s->bytestream, n); s->bytestream += n; ptr1 += p->linesize[1]; ptr2 += p->linesize[2]; } } break; case AV_PIX_FMT_YUV420P16: { uint16_t *ptr1, *ptr2; const int f = (65535 * 32768 + s->maxval / 2) / s->maxval; unsigned int j, v; n = avctx->width * 2; ptr = p->data[0]; linesize = p->linesize[0]; if (s->bytestream + n * avctx->height * 3 / 2 > s->bytestream_end) return AVERROR_INVALIDDATA; for (i = 0; i < avctx->height; i++) { for (j = 0; j < n / 2; j++) { v = av_be2ne16(((uint16_t *)s->bytestream)[j]); ((uint16_t *)ptr)[j] = (v * f + 16384) >> 15; } s->bytestream += n; ptr += linesize; } ptr1 = (uint16_t*)p->data[1]; ptr2 = (uint16_t*)p->data[2]; n >>= 1; h = avctx->height >> 1; for (i = 0; i < h; i++) { for (j = 0; j < n / 2; j++) { v = av_be2ne16(((uint16_t *)s->bytestream)[j]); ptr1[j] = (v * f + 16384) >> 15; } s->bytestream += n; for (j = 0; j < n / 2; j++) { v = av_be2ne16(((uint16_t *)s->bytestream)[j]); ptr2[j] = (v * f + 16384) >> 15; } s->bytestream += n; ptr1 += p->linesize[1] / 2; ptr2 += p->linesize[2] / 2; } } break; } *got_frame = 1; return s->bytestream - s->bytestream_start; }
26,240
qemu
e4a3507e86a1ef1453d603031bca27d5ac4cff3c
0
static ssize_t block_crypto_write_func(QCryptoBlock *block, void *opaque, size_t offset, const uint8_t *buf, size_t buflen, Error **errp) { struct BlockCryptoCreateData *data = opaque; ssize_t ret; ret = blk_pwrite(data->blk, offset, buf, buflen, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not write encryption header"); return ret; } return ret; }
26,241
qemu
d6f4ade214a9f74dca9495b83a24ff9c113e4f9a
0
static int qemu_calculate_timeout(void) { #ifndef CONFIG_IOTHREAD int timeout; if (!vm_running) timeout = 5000; else if (tcg_has_work()) timeout = 0; else { /* XXX: use timeout computed from timers */ int64_t add; int64_t delta; /* Advance virtual time to the next event. */ delta = qemu_icount_delta(); if (delta > 0) { /* If virtual time is ahead of real time then just wait for IO. */ timeout = (delta + 999999) / 1000000; } else { /* Wait for either IO to occur or the next timer event. */ add = qemu_next_deadline(); /* We advance the timer before checking for IO. Limit the amount we advance so that early IO activity won't get the guest too far ahead. */ if (add > 10000000) add = 10000000; delta += add; qemu_icount += qemu_icount_round (add); timeout = delta / 1000000; if (timeout < 0) timeout = 0; } } return timeout; #else /* CONFIG_IOTHREAD */ return 1000; #endif }
26,244
qemu
e280ff5e9159ed227a117339c1157143627cab96
0
static int spice_chr_write(CharDriverState *chr, const uint8_t *buf, int len) { SpiceCharDriver *s = chr->opaque; vmc_register_interface(s); assert(s->datalen == 0); if (s->bufsize < len) { s->bufsize = len; s->buffer = g_realloc(s->buffer, s->bufsize); } memcpy(s->buffer, buf, len); s->datapos = s->buffer; s->datalen = len; spice_server_char_device_wakeup(&s->sin); return len; }
26,246
qemu
68756ba8be2127b6ea30a466af9f78a5c97bc15f
0
static int net_slirp_init(NetClientState *peer, const char *model, const char *name, int restricted, const char *vnetwork, const char *vhost, const char *vhostname, const char *tftp_export, const char *bootfile, const char *vdhcp_start, const char *vnameserver, const char *smb_export, const char *vsmbserver, const char **dnssearch) { /* default settings according to historic slirp */ struct in_addr net = { .s_addr = htonl(0x0a000200) }; /* 10.0.2.0 */ struct in_addr mask = { .s_addr = htonl(0xffffff00) }; /* 255.255.255.0 */ struct in_addr host = { .s_addr = htonl(0x0a000202) }; /* 10.0.2.2 */ struct in_addr dhcp = { .s_addr = htonl(0x0a00020f) }; /* 10.0.2.15 */ struct in_addr dns = { .s_addr = htonl(0x0a000203) }; /* 10.0.2.3 */ #ifndef _WIN32 struct in_addr smbsrv = { .s_addr = 0 }; #endif NetClientState *nc; SlirpState *s; char buf[20]; uint32_t addr; int shift; char *end; struct slirp_config_str *config; if (!tftp_export) { tftp_export = legacy_tftp_prefix; } if (!bootfile) { bootfile = legacy_bootp_filename; } if (vnetwork) { if (get_str_sep(buf, sizeof(buf), &vnetwork, '/') < 0) { if (!inet_aton(vnetwork, &net)) { return -1; } addr = ntohl(net.s_addr); if (!(addr & 0x80000000)) { mask.s_addr = htonl(0xff000000); /* class A */ } else if ((addr & 0xfff00000) == 0xac100000) { mask.s_addr = htonl(0xfff00000); /* priv. 172.16.0.0/12 */ } else if ((addr & 0xc0000000) == 0x80000000) { mask.s_addr = htonl(0xffff0000); /* class B */ } else if ((addr & 0xffff0000) == 0xc0a80000) { mask.s_addr = htonl(0xffff0000); /* priv. 192.168.0.0/16 */ } else if ((addr & 0xffff0000) == 0xc6120000) { mask.s_addr = htonl(0xfffe0000); /* tests 198.18.0.0/15 */ } else if ((addr & 0xe0000000) == 0xe0000000) { mask.s_addr = htonl(0xffffff00); /* class C */ } else { mask.s_addr = htonl(0xfffffff0); /* multicast/reserved */ } } else { if (!inet_aton(buf, &net)) { return -1; } shift = strtol(vnetwork, &end, 10); if (*end != '\0') { if (!inet_aton(vnetwork, &mask)) { return -1; } } else if (shift < 4 || shift > 32) { return -1; } else { mask.s_addr = htonl(0xffffffff << (32 - shift)); } } net.s_addr &= mask.s_addr; host.s_addr = net.s_addr | (htonl(0x0202) & ~mask.s_addr); dhcp.s_addr = net.s_addr | (htonl(0x020f) & ~mask.s_addr); dns.s_addr = net.s_addr | (htonl(0x0203) & ~mask.s_addr); } if (vhost && !inet_aton(vhost, &host)) { return -1; } if ((host.s_addr & mask.s_addr) != net.s_addr) { return -1; } if (vdhcp_start && !inet_aton(vdhcp_start, &dhcp)) { return -1; } if ((dhcp.s_addr & mask.s_addr) != net.s_addr || dhcp.s_addr == host.s_addr || dhcp.s_addr == dns.s_addr) { return -1; } if (vnameserver && !inet_aton(vnameserver, &dns)) { return -1; } if ((dns.s_addr & mask.s_addr) != net.s_addr || dns.s_addr == host.s_addr) { return -1; } #ifndef _WIN32 if (vsmbserver && !inet_aton(vsmbserver, &smbsrv)) { return -1; } #endif nc = qemu_new_net_client(&net_slirp_info, peer, model, name); snprintf(nc->info_str, sizeof(nc->info_str), "net=%s,restrict=%s", inet_ntoa(net), restricted ? "on" : "off"); s = DO_UPCAST(SlirpState, nc, nc); s->slirp = slirp_init(restricted, net, mask, host, vhostname, tftp_export, bootfile, dhcp, dns, dnssearch, s); QTAILQ_INSERT_TAIL(&slirp_stacks, s, entry); for (config = slirp_configs; config; config = config->next) { if (config->flags & SLIRP_CFG_HOSTFWD) { if (slirp_hostfwd(s, config->str, config->flags & SLIRP_CFG_LEGACY) < 0) goto error; } else { if (slirp_guestfwd(s, config->str, config->flags & SLIRP_CFG_LEGACY) < 0) goto error; } } #ifndef _WIN32 if (!smb_export) { smb_export = legacy_smb_export; } if (smb_export) { if (slirp_smb(s, smb_export, smbsrv) < 0) goto error; } #endif return 0; error: qemu_del_net_client(nc); return -1; }
26,247
qemu
492ec48dc2d99ca13b24d554e1970af7e2581e23
0
static int ppce500_load_device_tree(CPUPPCState *env, PPCE500Params *params, hwaddr addr, hwaddr initrd_base, hwaddr initrd_size) { int ret = -1; uint64_t mem_reg_property[] = { 0, cpu_to_be64(params->ram_size) }; int fdt_size; void *fdt; uint8_t hypercall[16]; uint32_t clock_freq = 400000000; uint32_t tb_freq = 400000000; int i; const char *toplevel_compat = NULL; /* user override */ char compatible_sb[] = "fsl,mpc8544-immr\0simple-bus"; char soc[128]; char mpic[128]; uint32_t mpic_ph; uint32_t msi_ph; char gutil[128]; char pci[128]; char msi[128]; uint32_t *pci_map = NULL; int len; uint32_t pci_ranges[14] = { 0x2000000, 0x0, 0xc0000000, 0x0, 0xc0000000, 0x0, 0x20000000, 0x1000000, 0x0, 0x0, 0x0, 0xe1000000, 0x0, 0x10000, }; QemuOpts *machine_opts; const char *dtb_file = NULL; machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0); if (machine_opts) { dtb_file = qemu_opt_get(machine_opts, "dtb"); toplevel_compat = qemu_opt_get(machine_opts, "dt_compatible"); } if (dtb_file) { char *filename; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_file); if (!filename) { goto out; } fdt = load_device_tree(filename, &fdt_size); if (!fdt) { goto out; } goto done; } fdt = create_device_tree(&fdt_size); if (fdt == NULL) { goto out; } /* Manipulate device tree in memory. */ qemu_devtree_setprop_cell(fdt, "/", "#address-cells", 2); qemu_devtree_setprop_cell(fdt, "/", "#size-cells", 2); qemu_devtree_add_subnode(fdt, "/memory"); qemu_devtree_setprop_string(fdt, "/memory", "device_type", "memory"); qemu_devtree_setprop(fdt, "/memory", "reg", mem_reg_property, sizeof(mem_reg_property)); qemu_devtree_add_subnode(fdt, "/chosen"); if (initrd_size) { ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-start", initrd_base); if (ret < 0) { fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n"); } ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-end", (initrd_base + initrd_size)); if (ret < 0) { fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n"); } } ret = qemu_devtree_setprop_string(fdt, "/chosen", "bootargs", params->kernel_cmdline); if (ret < 0) fprintf(stderr, "couldn't set /chosen/bootargs\n"); if (kvm_enabled()) { /* Read out host's frequencies */ clock_freq = kvmppc_get_clockfreq(); tb_freq = kvmppc_get_tbfreq(); /* indicate KVM hypercall interface */ qemu_devtree_add_subnode(fdt, "/hypervisor"); qemu_devtree_setprop_string(fdt, "/hypervisor", "compatible", "linux,kvm"); kvmppc_get_hypercall(env, hypercall, sizeof(hypercall)); qemu_devtree_setprop(fdt, "/hypervisor", "hcall-instructions", hypercall, sizeof(hypercall)); } /* Create CPU nodes */ qemu_devtree_add_subnode(fdt, "/cpus"); qemu_devtree_setprop_cell(fdt, "/cpus", "#address-cells", 1); qemu_devtree_setprop_cell(fdt, "/cpus", "#size-cells", 0); /* We need to generate the cpu nodes in reverse order, so Linux can pick the first node as boot node and be happy */ for (i = smp_cpus - 1; i >= 0; i--) { char cpu_name[128]; uint64_t cpu_release_addr = MPC8544_SPIN_BASE + (i * 0x20); for (env = first_cpu; env != NULL; env = env->next_cpu) { if (env->cpu_index == i) { break; } } if (!env) { continue; } snprintf(cpu_name, sizeof(cpu_name), "/cpus/PowerPC,8544@%x", env->cpu_index); qemu_devtree_add_subnode(fdt, cpu_name); qemu_devtree_setprop_cell(fdt, cpu_name, "clock-frequency", clock_freq); qemu_devtree_setprop_cell(fdt, cpu_name, "timebase-frequency", tb_freq); qemu_devtree_setprop_string(fdt, cpu_name, "device_type", "cpu"); qemu_devtree_setprop_cell(fdt, cpu_name, "reg", env->cpu_index); qemu_devtree_setprop_cell(fdt, cpu_name, "d-cache-line-size", env->dcache_line_size); qemu_devtree_setprop_cell(fdt, cpu_name, "i-cache-line-size", env->icache_line_size); qemu_devtree_setprop_cell(fdt, cpu_name, "d-cache-size", 0x8000); qemu_devtree_setprop_cell(fdt, cpu_name, "i-cache-size", 0x8000); qemu_devtree_setprop_cell(fdt, cpu_name, "bus-frequency", 0); if (env->cpu_index) { qemu_devtree_setprop_string(fdt, cpu_name, "status", "disabled"); qemu_devtree_setprop_string(fdt, cpu_name, "enable-method", "spin-table"); qemu_devtree_setprop_u64(fdt, cpu_name, "cpu-release-addr", cpu_release_addr); } else { qemu_devtree_setprop_string(fdt, cpu_name, "status", "okay"); } } qemu_devtree_add_subnode(fdt, "/aliases"); /* XXX These should go into their respective devices' code */ snprintf(soc, sizeof(soc), "/soc@%llx", MPC8544_CCSRBAR_BASE); qemu_devtree_add_subnode(fdt, soc); qemu_devtree_setprop_string(fdt, soc, "device_type", "soc"); qemu_devtree_setprop(fdt, soc, "compatible", compatible_sb, sizeof(compatible_sb)); qemu_devtree_setprop_cell(fdt, soc, "#address-cells", 1); qemu_devtree_setprop_cell(fdt, soc, "#size-cells", 1); qemu_devtree_setprop_cells(fdt, soc, "ranges", 0x0, MPC8544_CCSRBAR_BASE >> 32, MPC8544_CCSRBAR_BASE, MPC8544_CCSRBAR_SIZE); /* XXX should contain a reasonable value */ qemu_devtree_setprop_cell(fdt, soc, "bus-frequency", 0); snprintf(mpic, sizeof(mpic), "%s/pic@%llx", soc, MPC8544_MPIC_REGS_OFFSET); qemu_devtree_add_subnode(fdt, mpic); qemu_devtree_setprop_string(fdt, mpic, "device_type", "open-pic"); qemu_devtree_setprop_string(fdt, mpic, "compatible", "chrp,open-pic"); qemu_devtree_setprop_cells(fdt, mpic, "reg", MPC8544_MPIC_REGS_OFFSET, 0x40000); qemu_devtree_setprop_cell(fdt, mpic, "#address-cells", 0); qemu_devtree_setprop_cell(fdt, mpic, "#interrupt-cells", 2); mpic_ph = qemu_devtree_alloc_phandle(fdt); qemu_devtree_setprop_cell(fdt, mpic, "phandle", mpic_ph); qemu_devtree_setprop_cell(fdt, mpic, "linux,phandle", mpic_ph); qemu_devtree_setprop(fdt, mpic, "interrupt-controller", NULL, 0); /* * We have to generate ser1 first, because Linux takes the first * device it finds in the dt as serial output device. And we generate * devices in reverse order to the dt. */ dt_serial_create(fdt, MPC8544_SERIAL1_REGS_OFFSET, soc, mpic, "serial1", 1, false); dt_serial_create(fdt, MPC8544_SERIAL0_REGS_OFFSET, soc, mpic, "serial0", 0, true); snprintf(gutil, sizeof(gutil), "%s/global-utilities@%llx", soc, MPC8544_UTIL_OFFSET); qemu_devtree_add_subnode(fdt, gutil); qemu_devtree_setprop_string(fdt, gutil, "compatible", "fsl,mpc8544-guts"); qemu_devtree_setprop_cells(fdt, gutil, "reg", MPC8544_UTIL_OFFSET, 0x1000); qemu_devtree_setprop(fdt, gutil, "fsl,has-rstcr", NULL, 0); snprintf(msi, sizeof(msi), "/%s/msi@%llx", soc, MPC8544_MSI_REGS_OFFSET); qemu_devtree_add_subnode(fdt, msi); qemu_devtree_setprop_string(fdt, msi, "compatible", "fsl,mpic-msi"); qemu_devtree_setprop_cells(fdt, msi, "reg", MPC8544_MSI_REGS_OFFSET, 0x200); msi_ph = qemu_devtree_alloc_phandle(fdt); qemu_devtree_setprop_cells(fdt, msi, "msi-available-ranges", 0x0, 0x100); qemu_devtree_setprop_phandle(fdt, msi, "interrupt-parent", mpic); qemu_devtree_setprop_cells(fdt, msi, "interrupts", 0xe0, 0x0, 0xe1, 0x0, 0xe2, 0x0, 0xe3, 0x0, 0xe4, 0x0, 0xe5, 0x0, 0xe6, 0x0, 0xe7, 0x0); qemu_devtree_setprop_cell(fdt, msi, "phandle", msi_ph); qemu_devtree_setprop_cell(fdt, msi, "linux,phandle", msi_ph); snprintf(pci, sizeof(pci), "/pci@%llx", MPC8544_PCI_REGS_BASE); qemu_devtree_add_subnode(fdt, pci); qemu_devtree_setprop_cell(fdt, pci, "cell-index", 0); qemu_devtree_setprop_string(fdt, pci, "compatible", "fsl,mpc8540-pci"); qemu_devtree_setprop_string(fdt, pci, "device_type", "pci"); qemu_devtree_setprop_cells(fdt, pci, "interrupt-map-mask", 0xf800, 0x0, 0x0, 0x7); pci_map = pci_map_create(fdt, qemu_devtree_get_phandle(fdt, mpic), 0x11, 2, &len); qemu_devtree_setprop(fdt, pci, "interrupt-map", pci_map, len); qemu_devtree_setprop_phandle(fdt, pci, "interrupt-parent", mpic); qemu_devtree_setprop_cells(fdt, pci, "interrupts", 24, 2); qemu_devtree_setprop_cells(fdt, pci, "bus-range", 0, 255); for (i = 0; i < 14; i++) { pci_ranges[i] = cpu_to_be32(pci_ranges[i]); } qemu_devtree_setprop_cell(fdt, pci, "fsl,msi", msi_ph); qemu_devtree_setprop(fdt, pci, "ranges", pci_ranges, sizeof(pci_ranges)); qemu_devtree_setprop_cells(fdt, pci, "reg", MPC8544_PCI_REGS_BASE >> 32, MPC8544_PCI_REGS_BASE, 0, 0x1000); qemu_devtree_setprop_cell(fdt, pci, "clock-frequency", 66666666); qemu_devtree_setprop_cell(fdt, pci, "#interrupt-cells", 1); qemu_devtree_setprop_cell(fdt, pci, "#size-cells", 2); qemu_devtree_setprop_cell(fdt, pci, "#address-cells", 3); qemu_devtree_setprop_string(fdt, "/aliases", "pci0", pci); params->fixup_devtree(params, fdt); if (toplevel_compat) { qemu_devtree_setprop(fdt, "/", "compatible", toplevel_compat, strlen(toplevel_compat) + 1); } done: qemu_devtree_dumpdtb(fdt, fdt_size); ret = rom_add_blob_fixed(BINARY_DEVICE_TREE_FILE, fdt, fdt_size, addr); if (ret < 0) { goto out; } g_free(fdt); ret = fdt_size; out: g_free(pci_map); return ret; }
26,248
FFmpeg
0cf3505930913d3584b215f6912de04ff41366e0
0
int ff_audio_mix_get_matrix(AudioMix *am, double *matrix, int stride) { int i, o; if ( am->in_channels <= 0 || am->in_channels > AVRESAMPLE_MAX_CHANNELS || am->out_channels <= 0 || am->out_channels > AVRESAMPLE_MAX_CHANNELS) { av_log(am, AV_LOG_ERROR, "Invalid channel counts\n"); return AVERROR(EINVAL); } #define GET_MATRIX_CONVERT(suffix, scale) \ if (!am->matrix_ ## suffix[0]) { \ av_log(am, AV_LOG_ERROR, "matrix is not set\n"); \ return AVERROR(EINVAL); \ } \ for (o = 0; o < am->out_channels; o++) \ for (i = 0; i < am->in_channels; i++) \ matrix[o * stride + i] = am->matrix_ ## suffix[o][i] * (scale); switch (am->coeff_type) { case AV_MIX_COEFF_TYPE_Q8: GET_MATRIX_CONVERT(q8, 1.0 / 256.0); break; case AV_MIX_COEFF_TYPE_Q15: GET_MATRIX_CONVERT(q15, 1.0 / 32768.0); break; case AV_MIX_COEFF_TYPE_FLT: GET_MATRIX_CONVERT(flt, 1.0); break; default: av_log(am, AV_LOG_ERROR, "Invalid mix coeff type\n"); return AVERROR(EINVAL); } return 0; }
26,249
qemu
4be746345f13e99e468c60acbd3a355e8183e3ce
0
DriveInfo *drive_get_by_blockdev(BlockDriverState *bs) { return bs->blk ? blk_legacy_dinfo(bs->blk) : NULL; }
26,250
qemu
b7680cb6078bd7294a3dd86473d3f2fdee991dd0
0
int kvm_arch_put_registers(CPUState *env, int level) { int ret; assert(cpu_is_stopped(env) || qemu_cpu_self(env)); ret = kvm_getput_regs(env, 1); if (ret < 0) { return ret; } ret = kvm_put_xsave(env); if (ret < 0) { return ret; } ret = kvm_put_xcrs(env); if (ret < 0) { return ret; } ret = kvm_put_sregs(env); if (ret < 0) { return ret; } ret = kvm_put_msrs(env, level); if (ret < 0) { return ret; } if (level >= KVM_PUT_RESET_STATE) { ret = kvm_put_mp_state(env); if (ret < 0) { return ret; } } ret = kvm_put_vcpu_events(env, level); if (ret < 0) { return ret; } ret = kvm_put_debugregs(env); if (ret < 0) { return ret; } /* must be last */ ret = kvm_guest_debug_workarounds(env); if (ret < 0) { return ret; } return 0; }
26,252