label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
1 | int ff_h264_decode_sei(H264Context *h){ while (get_bits_left(&h->gb) > 16) { int size, type; type=0; do{ if (get_bits_left(&h->gb) < 8) return AVERROR_INVALIDDATA; type+= show_bits(&h->gb, 8); }while(get_bits(&h->gb, 8) == 255); size=0; do{ if (get_bits_left(&h->gb) < 8) return AVERROR_INVALIDDATA; size+= show_bits(&h->gb, 8); }while(get_bits(&h->gb, 8) == 255); if(h->avctx->debug&FF_DEBUG_STARTCODE) av_log(h->avctx, AV_LOG_DEBUG, "SEI %d len:%d\n", type, size); switch(type){ case SEI_TYPE_PIC_TIMING: // Picture timing SEI if(decode_picture_timing(h) < 0) return -1; break; case SEI_TYPE_USER_DATA_ITU_T_T35: if(decode_user_data_itu_t_t35(h, size) < 0) return -1; break; case SEI_TYPE_USER_DATA_UNREGISTERED: if(decode_unregistered_user_data(h, size) < 0) return -1; break; case SEI_TYPE_RECOVERY_POINT: if(decode_recovery_point(h) < 0) return -1; break; case SEI_BUFFERING_PERIOD: if(decode_buffering_period(h) < 0) return -1; break; case SEI_TYPE_FRAME_PACKING: if(decode_frame_packing(h, size) < 0) return -1; default: skip_bits(&h->gb, 8*size); } //FIXME check bits here align_get_bits(&h->gb); } return 0; } | 26,761 |
0 | static int mpeg_decode_postinit(AVCodecContext *avctx) { Mpeg1Context *s1 = avctx->priv_data; MpegEncContext *s = &s1->mpeg_enc_ctx; uint8_t old_permutation[64]; int ret; if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) { // MPEG-1 aspect avctx->sample_aspect_ratio = av_d2q(1.0 / ff_mpeg1_aspect[s->aspect_ratio_info], 255); } else { // MPEG-2 // MPEG-2 aspect if (s->aspect_ratio_info > 1) { AVRational dar = av_mul_q(av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info], (AVRational) { s1->pan_scan.width, s1->pan_scan.height }), (AVRational) { s->width, s->height }); /* We ignore the spec here and guess a bit as reality does not * match the spec, see for example res_change_ffmpeg_aspect.ts * and sequence-display-aspect.mpg. * issue1613, 621, 562 */ if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) || (av_cmp_q(dar, (AVRational) { 4, 3 }) && av_cmp_q(dar, (AVRational) { 16, 9 }))) { s->avctx->sample_aspect_ratio = av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info], (AVRational) { s->width, s->height }); } else { s->avctx->sample_aspect_ratio = av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info], (AVRational) { s1->pan_scan.width, s1->pan_scan.height }); // issue1613 4/3 16/9 -> 16/9 // res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3 // widescreen-issue562.mpg 4/3 16/9 -> 16/9 // s->avctx->sample_aspect_ratio = av_mul_q(s->avctx->sample_aspect_ratio, (AVRational) {s->width, s->height}); av_dlog(avctx, "A %d/%d\n", ff_mpeg2_aspect[s->aspect_ratio_info].num, ff_mpeg2_aspect[s->aspect_ratio_info].den); av_dlog(avctx, "B %d/%d\n", s->avctx->sample_aspect_ratio.num, s->avctx->sample_aspect_ratio.den); } } else { s->avctx->sample_aspect_ratio = ff_mpeg2_aspect[s->aspect_ratio_info]; } } // MPEG-2 ff_set_sar(s->avctx, s->avctx->sample_aspect_ratio); if ((s1->mpeg_enc_ctx_allocated == 0) || avctx->coded_width != s->width || avctx->coded_height != s->height || s1->save_width != s->width || s1->save_height != s->height || s1->save_aspect_info != s->aspect_ratio_info || (s1->save_progressive_seq != s->progressive_sequence && FFALIGN(s->height, 16) != FFALIGN(s->height, 32)) || 0) { if (s1->mpeg_enc_ctx_allocated) { ParseContext pc = s->parse_context; s->parse_context.buffer = 0; ff_mpv_common_end(s); s->parse_context = pc; s1->mpeg_enc_ctx_allocated = 0; } ret = ff_set_dimensions(avctx, s->width, s->height); if (ret < 0) return ret; if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->bit_rate) { avctx->rc_max_rate = s->bit_rate; } else if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && s->bit_rate && (s->bit_rate != 0x3FFFF*400 || s->vbv_delay != 0xFFFF)) { avctx->bit_rate = s->bit_rate; } s1->save_aspect_info = s->aspect_ratio_info; s1->save_width = s->width; s1->save_height = s->height; s1->save_progressive_seq = s->progressive_sequence; /* low_delay may be forced, in this case we will have B-frames * that behave like P-frames. */ avctx->has_b_frames = !s->low_delay; if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) { // MPEG-1 fps avctx->framerate = ff_mpeg12_frame_rate_tab[s->frame_rate_index]; avctx->ticks_per_frame = 1; } else { // MPEG-2 // MPEG-2 fps av_reduce(&s->avctx->framerate.num, &s->avctx->framerate.den, ff_mpeg12_frame_rate_tab[s->frame_rate_index].num * s1->frame_rate_ext.num, ff_mpeg12_frame_rate_tab[s->frame_rate_index].den * s1->frame_rate_ext.den, 1 << 30); avctx->ticks_per_frame = 2; } // MPEG-2 avctx->pix_fmt = mpeg_get_pixelformat(avctx); setup_hwaccel_for_pixfmt(avctx); /* Quantization matrices may need reordering * if DCT permutation is changed. */ memcpy(old_permutation, s->idsp.idct_permutation, 64 * sizeof(uint8_t)); ff_mpv_idct_init(s); if ((ret = ff_mpv_common_init(s)) < 0) return ret; quant_matrix_rebuild(s->intra_matrix, old_permutation, s->idsp.idct_permutation); quant_matrix_rebuild(s->inter_matrix, old_permutation, s->idsp.idct_permutation); quant_matrix_rebuild(s->chroma_intra_matrix, old_permutation, s->idsp.idct_permutation); quant_matrix_rebuild(s->chroma_inter_matrix, old_permutation, s->idsp.idct_permutation); s1->mpeg_enc_ctx_allocated = 1; } return 0; } | 26,763 |
1 | static int mxf_read_primer_pack(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset) { MXFContext *mxf = arg; int item_num = avio_rb32(pb); int item_len = avio_rb32(pb); if (item_len != 18) { avpriv_request_sample(pb, "Primer pack item length %d", item_len); return AVERROR_PATCHWELCOME; } if (item_num > 65536) { av_log(mxf->fc, AV_LOG_ERROR, "item_num %d is too large\n", item_num); return AVERROR_INVALIDDATA; } mxf->local_tags = av_calloc(item_num, item_len); if (!mxf->local_tags) return AVERROR(ENOMEM); mxf->local_tags_count = item_num; avio_read(pb, mxf->local_tags, item_num*item_len); return 0; } | 26,764 |
1 | void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status, uint64_t mcg_status, uint64_t addr, uint64_t misc, int abort_on_error) { #ifdef KVM_CAP_MCE struct kvm_x86_mce mce = { .bank = bank, .status = status, .mcg_status = mcg_status, .addr = addr, .misc = misc, }; struct kvm_x86_mce_data data = { .env = cenv, .mce = &mce, }; if (!cenv->mcg_cap) { fprintf(stderr, "MCE support is not enabled!\n"); return; } run_on_cpu(cenv, kvm_do_inject_x86_mce, &data); #else if (abort_on_error) abort(); #endif } | 26,765 |
1 | static inline void RENAME(yuv2bgr24_2)(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *ubuf0, const uint16_t *ubuf1, const uint16_t *vbuf0, const uint16_t *vbuf1, const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y) { x86_reg uv_off = c->uv_off << 1; //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" YSCALEYUV2RGB(%%REGBP, %5, %6) "pxor %%mm7, %%mm7 \n\t" WRITEBGR24(%%REGb, 8280(%5), %%REGBP) "pop %%"REG_BP" \n\t" "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither), "m"(uv_off) ); } | 26,766 |
1 | static int usb_xhci_initfn(struct PCIDevice *dev) { int i, ret; XHCIState *xhci = DO_UPCAST(XHCIState, pci_dev, dev); xhci->pci_dev.config[PCI_CLASS_PROG] = 0x30; /* xHCI */ xhci->pci_dev.config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin 1 */ xhci->pci_dev.config[PCI_CACHE_LINE_SIZE] = 0x10; xhci->pci_dev.config[0x60] = 0x30; /* release number */ usb_xhci_init(xhci, &dev->qdev); if (xhci->numintrs > MAXINTRS) { xhci->numintrs = MAXINTRS; if (xhci->numintrs < 1) { xhci->numintrs = 1; if (xhci->numslots > MAXSLOTS) { xhci->numslots = MAXSLOTS; if (xhci->numslots < 1) { xhci->numslots = 1; xhci->mfwrap_timer = qemu_new_timer_ns(vm_clock, xhci_mfwrap_timer, xhci); xhci->irq = xhci->pci_dev.irq[0]; memory_region_init(&xhci->mem, "xhci", LEN_REGS); memory_region_init_io(&xhci->mem_cap, &xhci_cap_ops, xhci, "capabilities", LEN_CAP); memory_region_init_io(&xhci->mem_oper, &xhci_oper_ops, xhci, "operational", 0x400); memory_region_init_io(&xhci->mem_runtime, &xhci_runtime_ops, xhci, "runtime", LEN_RUNTIME); memory_region_init_io(&xhci->mem_doorbell, &xhci_doorbell_ops, xhci, "doorbell", LEN_DOORBELL); memory_region_add_subregion(&xhci->mem, 0, &xhci->mem_cap); memory_region_add_subregion(&xhci->mem, OFF_OPER, &xhci->mem_oper); memory_region_add_subregion(&xhci->mem, OFF_RUNTIME, &xhci->mem_runtime); memory_region_add_subregion(&xhci->mem, OFF_DOORBELL, &xhci->mem_doorbell); for (i = 0; i < xhci->numports; i++) { XHCIPort *port = &xhci->ports[i]; uint32_t offset = OFF_OPER + 0x400 + 0x10 * i; port->xhci = xhci; memory_region_init_io(&port->mem, &xhci_port_ops, port, port->name, 0x10); memory_region_add_subregion(&xhci->mem, offset, &port->mem); pci_register_bar(&xhci->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64, &xhci->mem); ret = pcie_endpoint_cap_init(&xhci->pci_dev, 0xa0); assert(ret >= 0); if (xhci->flags & (1 << XHCI_FLAG_USE_MSI)) { msi_init(&xhci->pci_dev, 0x70, xhci->numintrs, true, false); if (xhci->flags & (1 << XHCI_FLAG_USE_MSI_X)) { msix_init(&xhci->pci_dev, xhci->numintrs, &xhci->mem, 0, OFF_MSIX_TABLE, &xhci->mem, 0, OFF_MSIX_PBA, 0x90); return 0; | 26,767 |
1 | int avformat_network_init(void) { #if CONFIG_NETWORK int ret; ff_network_inited_globally = 1; if ((ret = ff_network_init()) < 0) return ret; ff_tls_init(); #endif return 0; } | 26,768 |
1 | GList *range_list_insert(GList *list, Range *data) { GList *l, *next = NULL; Range *r, *nextr; if (!list) { list = g_list_insert_sorted(list, data, range_compare); return list; } nextr = data; l = list; while (l && l != next && nextr) { r = l->data; if (ranges_can_merge(r, nextr)) { range_merge(r, nextr); l = g_list_remove_link(l, next); next = g_list_next(l); if (next) { nextr = next->data; } else { nextr = NULL; } } else { l = g_list_next(l); } } if (!l) { list = g_list_insert_sorted(list, data, range_compare); } return list; } | 26,769 |
1 | static int sab_diamond_search(MpegEncContext * s, int *best, int dmin, int src_index, int ref_index, int const penalty_factor, int size, int h, int flags) { MotionEstContext * const c= &s->me; me_cmp_func cmpf, chroma_cmpf; Minima minima[MAX_SAB_SIZE]; const int minima_count= FFABS(c->dia_size); int i, j; LOAD_COMMON LOAD_COMMON2 int map_generation= c->map_generation; cmpf= s->dsp.me_cmp[size]; chroma_cmpf= s->dsp.me_cmp[size+1]; for(j=i=0; i<ME_MAP_SIZE; i++){ uint32_t key= map[i]; key += (1<<(ME_MAP_MV_BITS-1)) + (1<<(2*ME_MAP_MV_BITS-1)); if((key&((-1)<<(2*ME_MAP_MV_BITS))) != map_generation) continue; assert(j<MAX_SAB_SIZE); //max j = number of predictors minima[j].height= score_map[i]; minima[j].x= key & ((1<<ME_MAP_MV_BITS)-1); key>>=ME_MAP_MV_BITS; minima[j].y= key & ((1<<ME_MAP_MV_BITS)-1); minima[j].x-= (1<<(ME_MAP_MV_BITS-1)); minima[j].y-= (1<<(ME_MAP_MV_BITS-1)); minima[j].checked=0; if(minima[j].x || minima[j].y) minima[j].height+= (mv_penalty[((minima[j].x)<<shift)-pred_x] + mv_penalty[((minima[j].y)<<shift)-pred_y])*penalty_factor; j++; } qsort(minima, j, sizeof(Minima), minima_cmp); for(; j<minima_count; j++){ minima[j].height=256*256*256*64; minima[j].checked=0; minima[j].x= minima[j].y=0; } for(i=0; i<minima_count; i++){ const int x= minima[i].x; const int y= minima[i].y; int d; if(minima[i].checked) continue; if( x >= xmax || x <= xmin || y >= ymax || y <= ymin) continue; SAB_CHECK_MV(x-1, y) SAB_CHECK_MV(x+1, y) SAB_CHECK_MV(x , y-1) SAB_CHECK_MV(x , y+1) minima[i].checked= 1; } best[0]= minima[0].x; best[1]= minima[0].y; dmin= minima[0].height; if( best[0] < xmax && best[0] > xmin && best[1] < ymax && best[1] > ymin){ int d; //ensure that the refernece samples for hpel refinement are in the map CHECK_MV(best[0]-1, best[1]) CHECK_MV(best[0]+1, best[1]) CHECK_MV(best[0], best[1]-1) CHECK_MV(best[0], best[1]+1) } return dmin; } | 26,770 |
1 | static void close_guest_eventfds(IVShmemState *s, int posn) { int i, guest_curr_max; if (!ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { guest_curr_max = s->peers[posn].nb_eventfds; memory_region_transaction_begin(); for (i = 0; i < guest_curr_max; i++) { ivshmem_del_eventfd(s, posn, i); memory_region_transaction_commit(); for (i = 0; i < guest_curr_max; i++) { event_notifier_cleanup(&s->peers[posn].eventfds[i]); g_free(s->peers[posn].eventfds); s->peers[posn].nb_eventfds = 0; | 26,771 |
1 | static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *buf, const uint8_t *end_buf) { unsigned tag, type, count, off, value = 0; int i; uint32_t *pal; const uint8_t *rp, *gp, *bp; if (end_buf - buf < 12) return AVERROR_INVALIDDATA; tag = tget_short(&buf, s->le); type = tget_short(&buf, s->le); count = tget_long(&buf, s->le); off = tget_long(&buf, s->le); if (type == 0 || type >= FF_ARRAY_ELEMS(type_sizes)) { av_log(s->avctx, AV_LOG_DEBUG, "Unknown tiff type (%u) encountered\n", type); return 0; } if (count == 1) { switch (type) { case TIFF_BYTE: case TIFF_SHORT: buf -= 4; value = tget(&buf, type, s->le); buf = NULL; break; case TIFF_LONG: value = off; buf = NULL; break; case TIFF_STRING: if (count <= 4) { buf -= 4; break; } default: value = UINT_MAX; buf = start + off; } } else { if (count <= 4 && type_sizes[type] * count <= 4) buf -= 4; else buf = start + off; } if (buf && (buf < start || buf > end_buf)) { av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n"); return AVERROR_INVALIDDATA; } switch (tag) { case TIFF_WIDTH: s->width = value; break; case TIFF_HEIGHT: s->height = value; break; case TIFF_BPP: s->bppcount = count; if (count > 4) { av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%d, %d components)\n", s->bpp, count); return AVERROR_INVALIDDATA; } if (count == 1) s->bpp = value; else { switch (type) { case TIFF_BYTE: s->bpp = (off & 0xFF) + ((off >> 8) & 0xFF) + ((off >> 16) & 0xFF) + ((off >> 24) & 0xFF); break; case TIFF_SHORT: case TIFF_LONG: s->bpp = 0; for (i = 0; i < count && buf < end_buf; i++) s->bpp += tget(&buf, type, s->le); break; default: s->bpp = -1; } } break; case TIFF_SAMPLES_PER_PIXEL: if (count != 1) { av_log(s->avctx, AV_LOG_ERROR, "Samples per pixel requires a single value, many provided\n"); return AVERROR_INVALIDDATA; } if (s->bppcount == 1) s->bpp *= value; s->bppcount = value; break; case TIFF_COMPR: s->compr = value; s->predictor = 0; switch (s->compr) { case TIFF_RAW: case TIFF_PACKBITS: case TIFF_LZW: case TIFF_CCITT_RLE: break; case TIFF_G3: case TIFF_G4: s->fax_opts = 0; break; case TIFF_DEFLATE: case TIFF_ADOBE_DEFLATE: #if CONFIG_ZLIB break; #else av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n"); return AVERROR(ENOSYS); #endif case TIFF_JPEG: case TIFF_NEWJPEG: avpriv_report_missing_feature(s->avctx, "JPEG compression"); return AVERROR_PATCHWELCOME; default: av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n", s->compr); return AVERROR_INVALIDDATA; } break; case TIFF_ROWSPERSTRIP: if (type == TIFF_LONG && value == UINT_MAX) value = s->avctx->height; if (value < 1) { av_log(s->avctx, AV_LOG_ERROR, "Incorrect value of rows per strip\n"); return AVERROR_INVALIDDATA; } s->rps = value; break; case TIFF_STRIP_OFFS: if (count == 1) { s->stripdata = NULL; s->stripoff = value; } else s->stripdata = start + off; s->strips = count; if (s->strips == 1) s->rps = s->height; s->sot = type; if (s->stripdata > end_buf) { av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n"); return AVERROR_INVALIDDATA; } break; case TIFF_STRIP_SIZE: if (count == 1) { s->stripsizes = NULL; s->stripsize = value; s->strips = 1; } else { s->stripsizes = start + off; } s->strips = count; s->sstype = type; if (s->stripsizes > end_buf) { av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n"); return AVERROR_INVALIDDATA; } break; case TIFF_PREDICTOR: s->predictor = value; break; case TIFF_INVERT: switch (value) { case 0: s->invert = 1; break; case 1: s->invert = 0; break; case 2: case 3: break; default: av_log(s->avctx, AV_LOG_ERROR, "Color mode %d is not supported\n", value); return AVERROR_INVALIDDATA; } break; case TIFF_FILL_ORDER: if (value < 1 || value > 2) { av_log(s->avctx, AV_LOG_ERROR, "Unknown FillOrder value %d, trying default one\n", value); value = 1; } s->fill_order = value - 1; break; case TIFF_PAL: pal = (uint32_t *) s->palette; off = type_sizes[type]; if (count / 3 > 256 || end_buf - buf < count / 3 * off * 3) return AVERROR_INVALIDDATA; rp = buf; gp = buf + count / 3 * off; bp = buf + count / 3 * off * 2; off = (type_sizes[type] - 1) << 3; for (i = 0; i < count / 3; i++) { uint32_t p = 0xFF000000; p |= (tget(&rp, type, s->le) >> off) << 16; p |= (tget(&gp, type, s->le) >> off) << 8; p |= tget(&bp, type, s->le) >> off; pal[i] = p; } s->palette_is_set = 1; break; case TIFF_PLANAR: if (value == 2) { avpriv_report_missing_feature(s->avctx, "Planar format"); return AVERROR_PATCHWELCOME; } break; case TIFF_T4OPTIONS: if (s->compr == TIFF_G3) s->fax_opts = value; break; case TIFF_T6OPTIONS: if (s->compr == TIFF_G4) s->fax_opts = value; break; default: if (s->avctx->err_recognition & AV_EF_EXPLODE) { av_log(s->avctx, AV_LOG_ERROR, "Unknown or unsupported tag %d/0X%0X\n", tag, tag); return AVERROR_INVALIDDATA; } } return 0; } | 26,772 |
0 | static int read_packet(AVFormatContext *s, AVPacket *pkt) { MmDemuxContext *mm = s->priv_data; AVIOContext *pb = s->pb; unsigned char preamble[MM_PREAMBLE_SIZE]; unsigned int type, length; while(1) { if (avio_read(pb, preamble, MM_PREAMBLE_SIZE) != MM_PREAMBLE_SIZE) { return AVERROR(EIO); } type = AV_RL16(&preamble[0]); length = AV_RL16(&preamble[2]); switch(type) { case MM_TYPE_PALETTE : case MM_TYPE_INTER : case MM_TYPE_INTRA : case MM_TYPE_INTRA_HH : case MM_TYPE_INTER_HH : case MM_TYPE_INTRA_HHV : case MM_TYPE_INTER_HHV : /* output preamble + data */ if (av_new_packet(pkt, length + MM_PREAMBLE_SIZE)) return AVERROR(ENOMEM); memcpy(pkt->data, preamble, MM_PREAMBLE_SIZE); if (avio_read(pb, pkt->data + MM_PREAMBLE_SIZE, length) != length) return AVERROR(EIO); pkt->size = length + MM_PREAMBLE_SIZE; pkt->stream_index = 0; pkt->pts = mm->video_pts; if (type!=MM_TYPE_PALETTE) mm->video_pts++; return 0; case MM_TYPE_AUDIO : if (av_get_packet(s->pb, pkt, length)<0) return AVERROR(ENOMEM); pkt->size = length; pkt->stream_index = 1; pkt->pts = mm->audio_pts++; return 0; default : av_log(s, AV_LOG_INFO, "unknown chunk type 0x%x\n", type); avio_skip(pb, length); } } } | 26,773 |
0 | static int handle_name_to_path(FsContext *ctx, V9fsPath *dir_path, const char *name, V9fsPath *target) { char buffer[PATH_MAX]; struct file_handle *fh; int dirfd, ret, mnt_id; struct handle_data *data = (struct handle_data *)ctx->private; /* "." and ".." are not allowed */ if (!strcmp(name, ".") || !strcmp(name, "..")) { errno = EINVAL; return -1; } if (dir_path) { dirfd = open_by_handle(data->mountfd, dir_path->data, O_PATH); } else { /* relative to export root */ dirfd = open(rpath(ctx, ".", buffer), O_DIRECTORY); } if (dirfd < 0) { return dirfd; } fh = g_malloc(sizeof(struct file_handle) + data->handle_bytes); fh->handle_bytes = data->handle_bytes; /* add a "./" at the beginning of the path */ snprintf(buffer, PATH_MAX, "./%s", name); /* flag = 0 imply don't follow symlink */ ret = name_to_handle(dirfd, buffer, fh, &mnt_id, 0); if (!ret) { target->data = (char *)fh; target->size = sizeof(struct file_handle) + data->handle_bytes; } else { g_free(fh); } close(dirfd); return ret; } | 26,775 |
0 | static int qio_dns_resolver_lookup_sync_inet(QIODNSResolver *resolver, SocketAddress *addr, size_t *naddrs, SocketAddress ***addrs, Error **errp) { struct addrinfo ai, *res, *e; InetSocketAddress *iaddr = addr->u.inet.data; char port[33]; char uaddr[INET6_ADDRSTRLEN + 1]; char uport[33]; int rc; Error *err = NULL; size_t i; *naddrs = 0; *addrs = NULL; memset(&ai, 0, sizeof(ai)); ai.ai_flags = AI_PASSIVE; if (iaddr->has_numeric && iaddr->numeric) { ai.ai_flags |= AI_NUMERICHOST | AI_NUMERICSERV; } ai.ai_family = inet_ai_family_from_address(iaddr, &err); ai.ai_socktype = SOCK_STREAM; if (err) { error_propagate(errp, err); return -1; } if (iaddr->host == NULL) { error_setg(errp, "host not specified"); return -1; } if (iaddr->port != NULL) { pstrcpy(port, sizeof(port), iaddr->port); } else { port[0] = '\0'; } rc = getaddrinfo(strlen(iaddr->host) ? iaddr->host : NULL, strlen(port) ? port : NULL, &ai, &res); if (rc != 0) { error_setg(errp, "address resolution failed for %s:%s: %s", iaddr->host, port, gai_strerror(rc)); return -1; } for (e = res; e != NULL; e = e->ai_next) { (*naddrs)++; } *addrs = g_new0(SocketAddress *, *naddrs); /* create socket + bind */ for (i = 0, e = res; e != NULL; i++, e = e->ai_next) { SocketAddress *newaddr = g_new0(SocketAddress, 1); InetSocketAddress *newiaddr = g_new0(InetSocketAddress, 1); newaddr->u.inet.data = newiaddr; newaddr->type = SOCKET_ADDRESS_KIND_INET; getnameinfo((struct sockaddr *)e->ai_addr, e->ai_addrlen, uaddr, INET6_ADDRSTRLEN, uport, 32, NI_NUMERICHOST | NI_NUMERICSERV); *newiaddr = (InetSocketAddress){ .host = g_strdup(uaddr), .port = g_strdup(uport), .has_numeric = true, .numeric = true, .has_to = iaddr->has_to, .to = iaddr->to, .has_ipv4 = false, .has_ipv6 = false, }; (*addrs)[i] = newaddr; } freeaddrinfo(res); return 0; } | 26,778 |
0 | void acpi_build(AcpiBuildTables *tables, MachineState *machine) { PCMachineState *pcms = PC_MACHINE(machine); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); GArray *table_offsets; unsigned facs, dsdt, rsdt, fadt; AcpiPmInfo pm; AcpiMiscInfo misc; AcpiMcfgInfo mcfg; Range pci_hole, pci_hole64; uint8_t *u; size_t aml_len = 0; GArray *tables_blob = tables->table_data; AcpiSlicOem slic_oem = { .id = NULL, .table_id = NULL }; Object *vmgenid_dev; acpi_get_pm_info(&pm); acpi_get_misc_info(&misc); acpi_get_pci_holes(&pci_hole, &pci_hole64); acpi_get_slic_oem(&slic_oem); table_offsets = g_array_new(false, true /* clear */, sizeof(uint32_t)); ACPI_BUILD_DPRINTF("init ACPI tables\n"); bios_linker_loader_alloc(tables->linker, ACPI_BUILD_TABLE_FILE, tables_blob, 64 /* Ensure FACS is aligned */, false /* high memory */); /* * FACS is pointed to by FADT. * We place it first since it's the only table that has alignment * requirements. */ facs = tables_blob->len; build_facs(tables_blob, tables->linker); /* DSDT is pointed to by FADT */ dsdt = tables_blob->len; build_dsdt(tables_blob, tables->linker, &pm, &misc, &pci_hole, &pci_hole64, machine); /* Count the size of the DSDT and SSDT, we will need it for legacy * sizing of ACPI tables. */ aml_len += tables_blob->len - dsdt; /* ACPI tables pointed to by RSDT */ fadt = tables_blob->len; acpi_add_table(table_offsets, tables_blob); build_fadt(tables_blob, tables->linker, &pm, facs, dsdt, slic_oem.id, slic_oem.table_id); aml_len += tables_blob->len - fadt; acpi_add_table(table_offsets, tables_blob); build_madt(tables_blob, tables->linker, pcms); vmgenid_dev = find_vmgenid_dev(); if (vmgenid_dev) { acpi_add_table(table_offsets, tables_blob); vmgenid_build_acpi(VMGENID(vmgenid_dev), tables_blob, tables->vmgenid, tables->linker); } if (misc.has_hpet) { acpi_add_table(table_offsets, tables_blob); build_hpet(tables_blob, tables->linker); } if (misc.tpm_version != TPM_VERSION_UNSPEC) { acpi_add_table(table_offsets, tables_blob); build_tpm_tcpa(tables_blob, tables->linker, tables->tcpalog); if (misc.tpm_version == TPM_VERSION_2_0) { acpi_add_table(table_offsets, tables_blob); build_tpm2(tables_blob, tables->linker); } } if (pcms->numa_nodes) { acpi_add_table(table_offsets, tables_blob); build_srat(tables_blob, tables->linker, machine); if (have_numa_distance) { acpi_add_table(table_offsets, tables_blob); build_slit(tables_blob, tables->linker); } } if (acpi_get_mcfg(&mcfg)) { acpi_add_table(table_offsets, tables_blob); build_mcfg_q35(tables_blob, tables->linker, &mcfg); } if (x86_iommu_get_default()) { IommuType IOMMUType = x86_iommu_get_type(); if (IOMMUType == TYPE_AMD) { acpi_add_table(table_offsets, tables_blob); build_amd_iommu(tables_blob, tables->linker); } else if (IOMMUType == TYPE_INTEL) { acpi_add_table(table_offsets, tables_blob); build_dmar_q35(tables_blob, tables->linker); } } if (pcms->acpi_nvdimm_state.is_enabled) { nvdimm_build_acpi(table_offsets, tables_blob, tables->linker, &pcms->acpi_nvdimm_state, machine->ram_slots); } /* Add tables supplied by user (if any) */ for (u = acpi_table_first(); u; u = acpi_table_next(u)) { unsigned len = acpi_table_len(u); acpi_add_table(table_offsets, tables_blob); g_array_append_vals(tables_blob, u, len); } /* RSDT is pointed to by RSDP */ rsdt = tables_blob->len; build_rsdt(tables_blob, tables->linker, table_offsets, slic_oem.id, slic_oem.table_id); /* RSDP is in FSEG memory, so allocate it separately */ build_rsdp(tables->rsdp, tables->linker, rsdt); /* We'll expose it all to Guest so we want to reduce * chance of size changes. * * We used to align the tables to 4k, but of course this would * too simple to be enough. 4k turned out to be too small an * alignment very soon, and in fact it is almost impossible to * keep the table size stable for all (max_cpus, max_memory_slots) * combinations. So the table size is always 64k for pc-i440fx-2.1 * and we give an error if the table grows beyond that limit. * * We still have the problem of migrating from "-M pc-i440fx-2.0". For * that, we exploit the fact that QEMU 2.1 generates _smaller_ tables * than 2.0 and we can always pad the smaller tables with zeros. We can * then use the exact size of the 2.0 tables. * * All this is for PIIX4, since QEMU 2.0 didn't support Q35 migration. */ if (pcmc->legacy_acpi_table_size) { /* Subtracting aml_len gives the size of fixed tables. Then add the * size of the PIIX4 DSDT/SSDT in QEMU 2.0. */ int legacy_aml_len = pcmc->legacy_acpi_table_size + ACPI_BUILD_LEGACY_CPU_AML_SIZE * pcms->apic_id_limit; int legacy_table_size = ROUND_UP(tables_blob->len - aml_len + legacy_aml_len, ACPI_BUILD_ALIGN_SIZE); if (tables_blob->len > legacy_table_size) { /* Should happen only with PCI bridges and -M pc-i440fx-2.0. */ error_report("Warning: migration may not work."); } g_array_set_size(tables_blob, legacy_table_size); } else { /* Make sure we have a buffer in case we need to resize the tables. */ if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) { /* As of QEMU 2.1, this fires with 160 VCPUs and 255 memory slots. */ error_report("Warning: ACPI tables are larger than 64k."); error_report("Warning: migration may not work."); error_report("Warning: please remove CPUs, NUMA nodes, " "memory slots or PCI bridges."); } acpi_align_size(tables_blob, ACPI_BUILD_TABLE_SIZE); } acpi_align_size(tables->linker->cmd_blob, ACPI_BUILD_ALIGN_SIZE); /* Cleanup memory that's no longer used. */ g_array_free(table_offsets, true); } | 26,779 |
0 | static Aml *build_crs(PCIHostState *host, GPtrArray *io_ranges, GPtrArray *mem_ranges) { Aml *crs = aml_resource_template(); uint8_t max_bus = pci_bus_num(host->bus); uint8_t type; int devfn; for (devfn = 0; devfn < ARRAY_SIZE(host->bus->devices); devfn++) { int i; uint64_t range_base, range_limit; PCIDevice *dev = host->bus->devices[devfn]; if (!dev) { continue; } for (i = 0; i < PCI_NUM_REGIONS; i++) { PCIIORegion *r = &dev->io_regions[i]; range_base = r->addr; range_limit = r->addr + r->size - 1; /* * Work-around for old bioses * that do not support multiple root buses */ if (!range_base || range_base > range_limit) { continue; } if (r->type & PCI_BASE_ADDRESS_SPACE_IO) { aml_append(crs, aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0, range_base, range_limit, 0, range_limit - range_base + 1)); crs_range_insert(io_ranges, range_base, range_limit); } else { /* "memory" */ aml_append(crs, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_NON_CACHEABLE, AML_READ_WRITE, 0, range_base, range_limit, 0, range_limit - range_base + 1)); crs_range_insert(mem_ranges, range_base, range_limit); } } type = dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; if (type == PCI_HEADER_TYPE_BRIDGE) { uint8_t subordinate = dev->config[PCI_SUBORDINATE_BUS]; if (subordinate > max_bus) { max_bus = subordinate; } range_base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_SPACE_IO); range_limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_SPACE_IO); /* * Work-around for old bioses * that do not support multiple root buses */ if (range_base || range_base > range_limit) { aml_append(crs, aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0, range_base, range_limit, 0, range_limit - range_base + 1)); crs_range_insert(io_ranges, range_base, range_limit); } range_base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_SPACE_MEMORY); range_limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_SPACE_MEMORY); /* * Work-around for old bioses * that do not support multiple root buses */ if (range_base || range_base > range_limit) { aml_append(crs, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_NON_CACHEABLE, AML_READ_WRITE, 0, range_base, range_limit, 0, range_limit - range_base + 1)); crs_range_insert(mem_ranges, range_base, range_limit); } range_base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); range_limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); /* * Work-around for old bioses * that do not support multiple root buses */ if (range_base || range_base > range_limit) { aml_append(crs, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_NON_CACHEABLE, AML_READ_WRITE, 0, range_base, range_limit, 0, range_limit - range_base + 1)); crs_range_insert(mem_ranges, range_base, range_limit); } } } aml_append(crs, aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, 0, pci_bus_num(host->bus), max_bus, 0, max_bus - pci_bus_num(host->bus) + 1)); return crs; } | 26,781 |
0 | static int escc_init1(SysBusDevice *dev) { ESCCState *s = ESCC(dev); unsigned int i; s->chn[0].disabled = s->disabled; s->chn[1].disabled = s->disabled; for (i = 0; i < 2; i++) { sysbus_init_irq(dev, &s->chn[i].irq); s->chn[i].chn = 1 - i; s->chn[i].clock = s->frequency / 2; if (s->chn[i].chr) { qemu_chr_add_handlers(s->chn[i].chr, serial_can_receive, serial_receive1, serial_event, &s->chn[i]); } } s->chn[0].otherchn = &s->chn[1]; s->chn[1].otherchn = &s->chn[0]; memory_region_init_io(&s->mmio, OBJECT(s), &escc_mem_ops, s, "escc", ESCC_SIZE << s->it_shift); sysbus_init_mmio(dev, &s->mmio); if (s->chn[0].type == mouse) { qemu_add_mouse_event_handler(sunmouse_event, &s->chn[0], 0, "QEMU Sun Mouse"); } if (s->chn[1].type == kbd) { qemu_add_kbd_event_handler(sunkbd_event, &s->chn[1]); } return 0; } | 26,782 |
0 | void aio_bh_update_timeout(AioContext *ctx, uint32_t *timeout) { QEMUBH *bh; for (bh = ctx->first_bh; bh; bh = bh->next) { if (!bh->deleted && bh->scheduled) { if (bh->idle) { /* idle bottom halves will be polled at least * every 10ms */ *timeout = MIN(10, *timeout); } else { /* non-idle bottom halves will be executed * immediately */ *timeout = 0; break; } } } } | 26,783 |
0 | void bdrv_image_info_specific_dump(fprintf_function func_fprintf, void *f, ImageInfoSpecific *info_spec) { QObject *obj, *data; Visitor *v = qobject_output_visitor_new(&obj); visit_type_ImageInfoSpecific(v, NULL, &info_spec, &error_abort); visit_complete(v, &obj); assert(qobject_type(obj) == QTYPE_QDICT); data = qdict_get(qobject_to_qdict(obj), "data"); dump_qobject(func_fprintf, f, 1, data); qobject_decref(obj); visit_free(v); } | 26,784 |
0 | static mfxIMPL choose_implementation(const InputStream *ist) { static const struct { const char *name; mfxIMPL impl; } impl_map[] = { { "auto", MFX_IMPL_AUTO }, { "sw", MFX_IMPL_SOFTWARE }, { "hw", MFX_IMPL_HARDWARE }, { "auto_any", MFX_IMPL_AUTO_ANY }, { "hw_any", MFX_IMPL_HARDWARE_ANY }, { "hw2", MFX_IMPL_HARDWARE2 }, { "hw3", MFX_IMPL_HARDWARE3 }, { "hw4", MFX_IMPL_HARDWARE4 }, }; mfxIMPL impl = MFX_IMPL_AUTO_ANY; int i; if (ist->hwaccel_device) { for (i = 0; i < FF_ARRAY_ELEMS(impl_map); i++) if (!strcmp(ist->hwaccel_device, impl_map[i].name)) { impl = impl_map[i].impl; break; } if (i == FF_ARRAY_ELEMS(impl_map)) impl = strtol(ist->hwaccel_device, NULL, 0); } return impl; } | 26,785 |
0 | SocketAddress *socket_address_crumple(SocketAddressFlat *addr_flat) { SocketAddress *addr = g_new(SocketAddress, 1); switch (addr_flat->type) { case SOCKET_ADDRESS_FLAT_TYPE_INET: addr->type = SOCKET_ADDRESS_KIND_INET; addr->u.inet.data = QAPI_CLONE(InetSocketAddress, &addr_flat->u.inet); break; case SOCKET_ADDRESS_FLAT_TYPE_UNIX: addr->type = SOCKET_ADDRESS_KIND_UNIX; addr->u.q_unix.data = QAPI_CLONE(UnixSocketAddress, &addr_flat->u.q_unix); break; case SOCKET_ADDRESS_FLAT_TYPE_VSOCK: addr->type = SOCKET_ADDRESS_KIND_VSOCK; addr->u.vsock.data = QAPI_CLONE(VsockSocketAddress, &addr_flat->u.vsock); break; case SOCKET_ADDRESS_FLAT_TYPE_FD: addr->type = SOCKET_ADDRESS_KIND_FD; addr->u.fd.data = QAPI_CLONE(String, &addr_flat->u.fd); break; default: abort(); } return addr; } | 26,786 |
0 | void aio_set_event_notifier(AioContext *ctx, EventNotifier *e, EventNotifierHandler *io_notify) { AioHandler *node; QLIST_FOREACH(node, &ctx->aio_handlers, node) { if (node->e == e && !node->deleted) { break; } } /* Are we deleting the fd handler? */ if (!io_notify) { if (node) { g_source_remove_poll(&ctx->source, &node->pfd); /* If the lock is held, just mark the node as deleted */ if (ctx->walking_handlers) { node->deleted = 1; node->pfd.revents = 0; } else { /* Otherwise, delete it for real. We can't just mark it as * deleted because deleted nodes are only cleaned up after * releasing the walking_handlers lock. */ QLIST_REMOVE(node, node); g_free(node); } } } else { if (node == NULL) { /* Alloc and insert if it's not already there */ node = g_malloc0(sizeof(AioHandler)); node->e = e; node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); node->pfd.events = G_IO_IN; QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); g_source_add_poll(&ctx->source, &node->pfd); } /* Update handler with latest information */ node->io_notify = io_notify; } aio_notify(ctx); } | 26,787 |
0 | static void uhci_fill_queue(UHCIState *s, UHCI_TD *td) { uint32_t int_mask = 0; uint32_t plink = td->link; uint32_t token = uhci_queue_token(td); UHCI_TD ptd; int ret; while (is_valid(plink)) { pci_dma_read(&s->dev, plink & ~0xf, &ptd, sizeof(ptd)); le32_to_cpus(&ptd.link); le32_to_cpus(&ptd.ctrl); le32_to_cpus(&ptd.token); le32_to_cpus(&ptd.buffer); if (!(ptd.ctrl & TD_CTRL_ACTIVE)) { break; } if (uhci_queue_token(&ptd) != token) { break; } trace_usb_uhci_td_queue(plink & ~0xf, ptd.ctrl, ptd.token); ret = uhci_handle_td(s, plink, &ptd, &int_mask, true); if (ret == TD_RESULT_ASYNC_CONT) { break; } assert(ret == TD_RESULT_ASYNC_START); assert(int_mask == 0); if (ptd.ctrl & TD_CTRL_SPD) { break; } plink = ptd.link; } } | 26,788 |
0 | static inline int get_segment(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, int rw, int type) { hwaddr hash; target_ulong vsid; int ds, pr, target_page_bits; int ret, ret2; pr = msr_pr; ctx->eaddr = eaddr; #if defined(TARGET_PPC64) if (env->mmu_model & POWERPC_MMU_64) { ppc_slb_t *slb; target_ulong pageaddr; int segment_bits; LOG_MMU("Check SLBs\n"); slb = slb_lookup(env, eaddr); if (!slb) { return -5; } if (slb->vsid & SLB_VSID_B) { vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; segment_bits = 40; } else { vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; segment_bits = 28; } target_page_bits = (slb->vsid & SLB_VSID_L) ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS; ctx->key = !!(pr ? (slb->vsid & SLB_VSID_KP) : (slb->vsid & SLB_VSID_KS)); ds = 0; ctx->nx = !!(slb->vsid & SLB_VSID_N); pageaddr = eaddr & ((1ULL << segment_bits) - (1ULL << target_page_bits)); if (slb->vsid & SLB_VSID_B) { hash = vsid ^ (vsid << 25) ^ (pageaddr >> target_page_bits); } else { hash = vsid ^ (pageaddr >> target_page_bits); } /* Only 5 bits of the page index are used in the AVPN */ ctx->ptem = (slb->vsid & SLB_VSID_PTEM) | ((pageaddr >> 16) & ((1ULL << segment_bits) - 0x80)); } else #endif /* defined(TARGET_PPC64) */ { target_ulong sr, pgidx; sr = env->sr[eaddr >> 28]; ctx->key = (((sr & 0x20000000) && (pr != 0)) || ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; ds = sr & 0x80000000 ? 1 : 0; ctx->nx = sr & 0x10000000 ? 1 : 0; vsid = sr & 0x00FFFFFF; target_page_bits = TARGET_PAGE_BITS; LOG_MMU("Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx " ir=%d dr=%d pr=%d %d t=%d\n", eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, (int)msr_dr, pr != 0 ? 1 : 0, rw, type); pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; hash = vsid ^ pgidx; ctx->ptem = (vsid << 7) | (pgidx >> 10); } LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", ctx->key, ds, ctx->nx, vsid); ret = -1; if (!ds) { /* Check if instruction fetch is allowed, if needed */ if (type != ACCESS_CODE || ctx->nx == 0) { /* Page address translation */ LOG_MMU("htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx " hash " TARGET_FMT_plx "\n", env->htab_base, env->htab_mask, hash); ctx->hash[0] = hash; ctx->hash[1] = ~hash; /* Initialize real address with an invalid value */ ctx->raddr = (hwaddr)-1ULL; if (unlikely(env->mmu_model == POWERPC_MMU_SOFT_6xx || env->mmu_model == POWERPC_MMU_SOFT_74xx)) { /* Software TLB search */ ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type); } else { LOG_MMU("0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx " hash=" TARGET_FMT_plx "\n", env->htab_base, env->htab_mask, vsid, ctx->ptem, ctx->hash[0]); /* Primary table lookup */ ret = find_pte(env, ctx, 0, rw, type, target_page_bits); if (ret < 0) { /* Secondary table lookup */ if (eaddr != 0xEFFFFFFF) { LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx " hash=" TARGET_FMT_plx "\n", env->htab_base, env->htab_mask, vsid, ctx->ptem, ctx->hash[1]); } ret2 = find_pte(env, ctx, 1, rw, type, target_page_bits); if (ret2 != -1) { ret = ret2; } } } #if defined(DUMP_PAGE_TABLES) if (qemu_log_enabled()) { hwaddr curaddr; uint32_t a0, a1, a2, a3; qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx "\n", sdr, mask + 0x80); for (curaddr = sdr; curaddr < (sdr + mask + 0x80); curaddr += 16) { a0 = ldl_phys(curaddr); a1 = ldl_phys(curaddr + 4); a2 = ldl_phys(curaddr + 8); a3 = ldl_phys(curaddr + 12); if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", curaddr, a0, a1, a2, a3); } } } #endif } else { LOG_MMU("No access allowed\n"); ret = -3; } } else { target_ulong sr; LOG_MMU("direct store...\n"); /* Direct-store segment : absolutely *BUGGY* for now */ /* Direct-store implies a 32-bit MMU. * Check the Segment Register's bus unit ID (BUID). */ sr = env->sr[eaddr >> 28]; if ((sr & 0x1FF00000) >> 20 == 0x07f) { /* Memory-forced I/O controller interface access */ /* If T=1 and BUID=x'07F', the 601 performs a memory access * to SR[28-31] LA[4-31], bypassing all protection mechanisms. */ ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return 0; } switch (type) { case ACCESS_INT: /* Integer load/store : only access allowed */ break; case ACCESS_CODE: /* No code fetch is allowed in direct-store areas */ return -4; case ACCESS_FLOAT: /* Floating point load/store */ return -4; case ACCESS_RES: /* lwarx, ldarx or srwcx. */ return -4; case ACCESS_CACHE: /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */ /* Should make the instruction do no-op. * As it already do no-op, it's quite easy :-) */ ctx->raddr = eaddr; return 0; case ACCESS_EXT: /* eciwx or ecowx */ return -4; default: qemu_log("ERROR: instruction should not need " "address translation\n"); return -4; } if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) { ctx->raddr = eaddr; ret = 2; } else { ret = -2; } } return ret; } | 26,789 |
0 | static void char_socket_test(void) { Chardev *chr = qemu_chr_new("server", "tcp:127.0.0.1:0,server,nowait"); Chardev *chr_client; QObject *addr; QDict *qdict, *data; const char *port; SocketIdleData d = { .chr = chr }; CharBackend be; CharBackend client_be; char *tmp; d.be = &be; d.client_be = &be; g_assert_nonnull(chr); g_assert(!object_property_get_bool(OBJECT(chr), "connected", &error_abort)); addr = object_property_get_qobject(OBJECT(chr), "addr", &error_abort); qdict = qobject_to_qdict(addr); data = qdict_get_qdict(qdict, "data"); port = qdict_get_str(data, "port"); tmp = g_strdup_printf("tcp:127.0.0.1:%s", port); QDECREF(qdict); qemu_chr_fe_init(&be, chr, &error_abort); qemu_chr_fe_set_handlers(&be, socket_can_read, socket_read, NULL, &d, NULL, true); chr_client = qemu_chr_new("client", tmp); qemu_chr_fe_init(&client_be, chr_client, &error_abort); qemu_chr_fe_set_handlers(&client_be, socket_can_read_hello, socket_read_hello, NULL, &d, NULL, true); g_free(tmp); d.conn_expected = true; guint id = g_idle_add(char_socket_test_idle, &d); g_source_set_name_by_id(id, "test-idle"); g_assert_cmpint(id, >, 0); main_loop(); g_assert(object_property_get_bool(OBJECT(chr), "connected", &error_abort)); g_assert(object_property_get_bool(OBJECT(chr_client), "connected", &error_abort)); qemu_chr_write_all(chr_client, (const uint8_t *)"Z", 1); main_loop(); object_unparent(OBJECT(chr_client)); d.conn_expected = false; g_idle_add(char_socket_test_idle, &d); main_loop(); object_unparent(OBJECT(chr)); } | 26,790 |
0 | void virtio_scsi_dataplane_start(VirtIOSCSI *s) { int i; int rc; BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); if (s->dataplane_started || s->dataplane_starting || s->dataplane_fenced || s->ctx != iothread_get_aio_context(vs->conf.iothread)) { return; } s->dataplane_starting = true; /* Set up guest notifier (irq) */ rc = k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, true); if (rc != 0) { fprintf(stderr, "virtio-scsi: Failed to set guest notifiers (%d), " "ensure -enable-kvm is set\n", rc); goto fail_guest_notifiers; } aio_context_acquire(s->ctx); rc = virtio_scsi_vring_init(s, vs->ctrl_vq, 0, virtio_scsi_data_plane_handle_ctrl); if (rc) { goto fail_vrings; } rc = virtio_scsi_vring_init(s, vs->event_vq, 1, virtio_scsi_data_plane_handle_event); if (rc) { goto fail_vrings; } for (i = 0; i < vs->conf.num_queues; i++) { rc = virtio_scsi_vring_init(s, vs->cmd_vqs[i], i + 2, virtio_scsi_data_plane_handle_cmd); if (rc) { goto fail_vrings; } } s->dataplane_starting = false; s->dataplane_started = true; aio_context_release(s->ctx); return; fail_vrings: virtio_scsi_clear_aio(s); aio_context_release(s->ctx); for (i = 0; i < vs->conf.num_queues + 2; i++) { virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); } k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, false); fail_guest_notifiers: s->dataplane_fenced = true; s->dataplane_starting = false; s->dataplane_started = true; } | 26,793 |
0 | static void disas_fp_csel(DisasContext *s, uint32_t insn) { unsigned int mos, type, rm, cond, rn, rd; int label_continue = -1; mos = extract32(insn, 29, 3); type = extract32(insn, 22, 2); /* 0 = single, 1 = double */ rm = extract32(insn, 16, 5); cond = extract32(insn, 12, 4); rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); if (mos || type > 1) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } if (cond < 0x0e) { /* not always */ int label_match = gen_new_label(); label_continue = gen_new_label(); arm_gen_test_cc(cond, label_match); /* nomatch: */ gen_mov_fp2fp(s, type, rd, rm); tcg_gen_br(label_continue); gen_set_label(label_match); } gen_mov_fp2fp(s, type, rd, rn); if (cond < 0x0e) { /* continue */ gen_set_label(label_continue); } } | 26,794 |
0 | int bdrv_open(BlockDriverState **pbs, const char *filename, const char *reference, QDict *options, int flags, BlockDriver *drv, Error **errp) { int ret; BlockDriverState *file = NULL, *bs; const char *drvname; Error *local_err = NULL; int snapshot_flags = 0; assert(pbs); if (reference) { bool options_non_empty = options ? qdict_size(options) : false; QDECREF(options); if (*pbs) { error_setg(errp, "Cannot reuse an existing BDS when referencing " "another block device"); return -EINVAL; } if (filename || options_non_empty) { error_setg(errp, "Cannot reference an existing block device with " "additional options or a new filename"); return -EINVAL; } bs = bdrv_lookup_bs(reference, reference, errp); if (!bs) { return -ENODEV; } bdrv_ref(bs); *pbs = bs; return 0; } if (*pbs) { bs = *pbs; } else { bs = bdrv_new(); } /* NULL means an empty set of options */ if (options == NULL) { options = qdict_new(); } ret = bdrv_fill_options(&options, &filename, flags, drv, &local_err); if (local_err) { goto fail; } /* Find the right image format driver */ drv = NULL; drvname = qdict_get_try_str(options, "driver"); if (drvname) { drv = bdrv_find_format(drvname); qdict_del(options, "driver"); if (!drv) { error_setg(errp, "Unknown driver: '%s'", drvname); ret = -EINVAL; goto fail; } } assert(drvname || !(flags & BDRV_O_PROTOCOL)); if (drv && !drv->bdrv_file_open) { /* If the user explicitly wants a format driver here, we'll need to add * another layer for the protocol in bs->file */ flags &= ~BDRV_O_PROTOCOL; } bs->options = options; options = qdict_clone_shallow(options); /* Open image file without format layer */ if ((flags & BDRV_O_PROTOCOL) == 0) { if (flags & BDRV_O_RDWR) { flags |= BDRV_O_ALLOW_RDWR; } if (flags & BDRV_O_SNAPSHOT) { snapshot_flags = bdrv_temp_snapshot_flags(flags); flags = bdrv_backing_flags(flags); } assert(file == NULL); ret = bdrv_open_image(&file, filename, options, "file", bdrv_inherited_flags(flags), true, &local_err); if (ret < 0) { goto fail; } } /* Image format probing */ bs->probed = !drv; if (!drv && file) { ret = find_image_format(file, filename, &drv, &local_err); if (ret < 0) { goto fail; } } else if (!drv) { error_setg(errp, "Must specify either driver or file"); ret = -EINVAL; goto fail; } /* Open the image */ ret = bdrv_open_common(bs, file, options, flags, drv, &local_err); if (ret < 0) { goto fail; } if (file && (bs->file != file)) { bdrv_unref(file); file = NULL; } /* If there is a backing file, use it */ if ((flags & BDRV_O_NO_BACKING) == 0) { QDict *backing_options; qdict_extract_subqdict(options, &backing_options, "backing."); ret = bdrv_open_backing_file(bs, backing_options, &local_err); if (ret < 0) { goto close_and_fail; } } bdrv_refresh_filename(bs); /* For snapshot=on, create a temporary qcow2 overlay. bs points to the * temporary snapshot afterwards. */ if (snapshot_flags) { ret = bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err); if (local_err) { goto close_and_fail; } } /* Check if any unknown options were used */ if (options && (qdict_size(options) != 0)) { const QDictEntry *entry = qdict_first(options); if (flags & BDRV_O_PROTOCOL) { error_setg(errp, "Block protocol '%s' doesn't support the option " "'%s'", drv->format_name, entry->key); } else { error_setg(errp, "Block format '%s' used by device '%s' doesn't " "support the option '%s'", drv->format_name, bdrv_get_device_name(bs), entry->key); } ret = -EINVAL; goto close_and_fail; } if (!bdrv_key_required(bs)) { if (bs->blk) { blk_dev_change_media_cb(bs->blk, true); } } else if (!runstate_check(RUN_STATE_PRELAUNCH) && !runstate_check(RUN_STATE_INMIGRATE) && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */ error_setg(errp, "Guest must be stopped for opening of encrypted image"); ret = -EBUSY; goto close_and_fail; } QDECREF(options); *pbs = bs; return 0; fail: if (file != NULL) { bdrv_unref(file); } QDECREF(bs->options); QDECREF(options); bs->options = NULL; if (!*pbs) { /* If *pbs is NULL, a new BDS has been created in this function and needs to be freed now. Otherwise, it does not need to be closed, since it has not really been opened yet. */ bdrv_unref(bs); } if (local_err) { error_propagate(errp, local_err); } return ret; close_and_fail: /* See fail path, but now the BDS has to be always closed */ if (*pbs) { bdrv_close(bs); } else { bdrv_unref(bs); } QDECREF(options); if (local_err) { error_propagate(errp, local_err); } return ret; } | 26,795 |
0 | START_TEST(qdict_haskey_test) { const char *key = "test"; qdict_put(tests_dict, key, qint_from_int(0)); fail_unless(qdict_haskey(tests_dict, key) == 1); } | 26,796 |
0 | static void pc_dimm_check_memdev_is_busy(const Object *obj, const char *name, Object *val, Error **errp) { Error *local_err = NULL; if (host_memory_backend_is_mapped(MEMORY_BACKEND(val))) { char *path = object_get_canonical_path_component(val); error_setg(&local_err, "can't use already busy memdev: %s", path); g_free(path); } else { qdev_prop_allow_set_link_before_realize(obj, name, val, &local_err); } error_propagate(errp, local_err); } | 26,797 |
0 | void co_run_in_worker_bh(void *opaque) { Coroutine *co = opaque; thread_pool_submit_aio(aio_get_thread_pool(qemu_get_aio_context()), coroutine_enter_func, co, coroutine_enter_cb, co); } | 26,798 |
0 | static int local_fstat(FsContext *ctx, int fd, struct stat *stbuf) { return fstat(fd, stbuf); } | 26,799 |
0 | static bool virtio_blk_sect_range_ok(VirtIOBlock *dev, uint64_t sector, size_t size) { uint64_t nb_sectors = size >> BDRV_SECTOR_BITS; uint64_t total_sectors; if (nb_sectors > INT_MAX) { return false; } if (sector & dev->sector_mask) { return false; } if (size % dev->conf.conf.logical_block_size) { return false; } blk_get_geometry(dev->blk, &total_sectors); if (sector > total_sectors || nb_sectors > total_sectors - sector) { return false; } return true; } | 26,800 |
0 | static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, long src_size) { register const uint8_t* s=src; register uint8_t* d=dst; register const uint8_t *end; const uint8_t *mm_end; end = s + src_size; #if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s)); __asm__ volatile("movq %0, %%mm7"::"m"(mask15rg)); __asm__ volatile("movq %0, %%mm6"::"m"(mask15b)); mm_end = end - 15; while (s<mm_end) { __asm__ volatile( PREFETCH" 32%1 \n\t" "movq %1, %%mm0 \n\t" "movq 8%1, %%mm2 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "psrlq $1, %%mm0 \n\t" "psrlq $1, %%mm2 \n\t" "pand %%mm7, %%mm0 \n\t" "pand %%mm7, %%mm2 \n\t" "pand %%mm6, %%mm1 \n\t" "pand %%mm6, %%mm3 \n\t" "por %%mm1, %%mm0 \n\t" "por %%mm3, %%mm2 \n\t" MOVNTQ" %%mm0, %0 \n\t" MOVNTQ" %%mm2, 8%0" :"=m"(*d) :"m"(*s) ); d+=16; s+=16; } __asm__ volatile(SFENCE:::"memory"); __asm__ volatile(EMMS:::"memory"); #endif mm_end = end - 3; while (s < mm_end) { register uint32_t x= *((const uint32_t*)s); *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F); s+=4; d+=4; } if (s < end) { register uint16_t x= *((const uint16_t*)s); *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F); } } | 26,801 |
0 | static void do_interrupt64(CPUX86State *env, int intno, int is_int, int error_code, target_ulong next_eip, int is_hw) { SegmentCache *dt; target_ulong ptr; int type, dpl, selector, cpl, ist; int has_error_code, new_stack; uint32_t e1, e2, e3, ss; target_ulong old_eip, esp, offset; has_error_code = 0; if (!is_int && !is_hw) { has_error_code = exception_has_error_code(intno); } if (is_int) { old_eip = next_eip; } else { old_eip = env->eip; } dt = &env->idt; if (intno * 16 + 15 > dt->limit) { raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); } ptr = dt->base + intno * 16; e1 = cpu_ldl_kernel(env, ptr); e2 = cpu_ldl_kernel(env, ptr + 4); e3 = cpu_ldl_kernel(env, ptr + 8); /* check gate type */ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; switch (type) { case 14: /* 386 interrupt gate */ case 15: /* 386 trap gate */ break; default: raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); break; } dpl = (e2 >> DESC_DPL_SHIFT) & 3; cpl = env->hflags & HF_CPL_MASK; /* check privilege if software int */ if (is_int && dpl < cpl) { raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); } /* check valid bit */ if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2); } selector = e1 >> 16; offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); ist = e2 & 7; if ((selector & 0xfffc) == 0) { raise_exception_err(env, EXCP0D_GPF, 0); } if (load_segment(env, &e1, &e2, selector) != 0) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (dpl > cpl) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); } if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) { /* to inner privilege */ if (ist != 0) { esp = get_rsp_from_tss(env, ist + 3); } else { esp = get_rsp_from_tss(env, dpl); } esp &= ~0xfLL; /* align stack */ ss = 0; new_stack = 1; } else if ((e2 & DESC_C_MASK) || dpl == cpl) { /* to same privilege */ if (env->eflags & VM_MASK) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } new_stack = 0; if (ist != 0) { esp = get_rsp_from_tss(env, ist + 3); } else { esp = env->regs[R_ESP]; } esp &= ~0xfLL; /* align stack */ dpl = cpl; } else { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); new_stack = 0; /* avoid warning */ esp = 0; /* avoid warning */ } PUSHQ(esp, env->segs[R_SS].selector); PUSHQ(esp, env->regs[R_ESP]); PUSHQ(esp, cpu_compute_eflags(env)); PUSHQ(esp, env->segs[R_CS].selector); PUSHQ(esp, old_eip); if (has_error_code) { PUSHQ(esp, error_code); } /* interrupt gate clear IF mask */ if ((type & 1) == 0) { env->eflags &= ~IF_MASK; } env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); if (new_stack) { ss = 0 | dpl; cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); } env->regs[R_ESP] = esp; selector = (selector & ~3) | dpl; cpu_x86_load_seg_cache(env, R_CS, selector, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); env->eip = offset; } | 26,802 |
0 | static void sdhci_generic_reset(DeviceState *ds) { SDHCIState *s = SDHCI(ds); SDHCI_GET_CLASS(s)->reset(s); } | 26,803 |
0 | static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, int destidx, int element, TCGMemOp memop) { int vect_off = vec_reg_offset(destidx, element, memop & MO_SIZE); switch (memop) { case MO_8: tcg_gen_st8_i32(tcg_src, cpu_env, vect_off); break; case MO_16: tcg_gen_st16_i32(tcg_src, cpu_env, vect_off); break; case MO_32: tcg_gen_st_i32(tcg_src, cpu_env, vect_off); break; default: g_assert_not_reached(); } } | 26,804 |
0 | static void net_socket_cleanup(NetClientState *nc) { NetSocketState *s = DO_UPCAST(NetSocketState, nc, nc); qemu_set_fd_handler(s->fd, NULL, NULL, NULL); close(s->fd); } | 26,805 |
0 | void qtest_init(const char *qtest_chrdev, const char *qtest_log, Error **errp) { CharDriverState *chr; chr = qemu_chr_new("qtest", qtest_chrdev, NULL); if (chr == NULL) { error_setg(errp, "Failed to initialize device for qtest: \"%s\"", qtest_chrdev); return; } qemu_chr_add_handlers(chr, qtest_can_read, qtest_read, qtest_event, chr); qemu_chr_fe_set_echo(chr, true); inbuf = g_string_new(""); if (qtest_log) { if (strcmp(qtest_log, "none") != 0) { qtest_log_fp = fopen(qtest_log, "w+"); } } else { qtest_log_fp = stderr; } qtest_chr = chr; } | 26,806 |
0 | static uint32_t taihu_cpld_readl (void *opaque, hwaddr addr) { uint32_t ret; ret = taihu_cpld_readb(opaque, addr) << 24; ret |= taihu_cpld_readb(opaque, addr + 1) << 16; ret |= taihu_cpld_readb(opaque, addr + 2) << 8; ret |= taihu_cpld_readb(opaque, addr + 3); return ret; } | 26,809 |
0 | static int htab_save_complete(QEMUFile *f, void *opaque) { sPAPRMachineState *spapr = opaque; /* Iteration header */ qemu_put_be32(f, 0); if (!spapr->htab) { int rc; assert(kvm_enabled()); rc = spapr_check_htab_fd(spapr); if (rc < 0) { return rc; } rc = kvmppc_save_htab(f, spapr->htab_fd, MAX_KVM_BUF_SIZE, -1); if (rc < 0) { return rc; } close(spapr->htab_fd); spapr->htab_fd = -1; } else { htab_save_later_pass(f, spapr, -1); } /* End marker */ qemu_put_be32(f, 0); qemu_put_be16(f, 0); qemu_put_be16(f, 0); return 0; } | 26,810 |
0 | static uint32_t ahci_port_read(AHCIState *s, int port, int offset) { uint32_t val; AHCIPortRegs *pr; pr = &s->dev[port].port_regs; switch (offset) { case PORT_LST_ADDR: val = pr->lst_addr; break; case PORT_LST_ADDR_HI: val = pr->lst_addr_hi; break; case PORT_FIS_ADDR: val = pr->fis_addr; break; case PORT_FIS_ADDR_HI: val = pr->fis_addr_hi; break; case PORT_IRQ_STAT: val = pr->irq_stat; break; case PORT_IRQ_MASK: val = pr->irq_mask; break; case PORT_CMD: val = pr->cmd; break; case PORT_TFDATA: val = pr->tfdata; break; case PORT_SIG: val = pr->sig; break; case PORT_SCR_STAT: if (s->dev[port].port.ifs[0].bs) { val = SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP | SATA_SCR_SSTATUS_SPD_GEN1 | SATA_SCR_SSTATUS_IPM_ACTIVE; } else { val = SATA_SCR_SSTATUS_DET_NODEV; } break; case PORT_SCR_CTL: val = pr->scr_ctl; break; case PORT_SCR_ERR: val = pr->scr_err; break; case PORT_SCR_ACT: pr->scr_act &= ~s->dev[port].finished; s->dev[port].finished = 0; val = pr->scr_act; break; case PORT_CMD_ISSUE: val = pr->cmd_issue; break; case PORT_RESERVED: default: val = 0; } DPRINTF(port, "offset: 0x%x val: 0x%x\n", offset, val); return val; } | 26,811 |
0 | static void hscroll(AVCodecContext *avctx) { AnsiContext *s = avctx->priv_data; int i; if (s->y < avctx->height - s->font_height) { s->y += s->font_height; return; } i = 0; for (; i < avctx->height - s->font_height; i++) memcpy(s->frame->data[0] + i * s->frame->linesize[0], s->frame->data[0] + (i + s->font_height) * s->frame->linesize[0], avctx->width); for (; i < avctx->height; i++) memset(s->frame->data[0] + i * s->frame->linesize[0], DEFAULT_BG_COLOR, avctx->width); } | 26,812 |
0 | static int64_t buffered_set_rate_limit(void *opaque, int64_t new_rate) { QEMUFileBuffered *s = opaque; if (qemu_file_get_error(s->file)) { goto out; } if (new_rate > SIZE_MAX) { new_rate = SIZE_MAX; } s->xfer_limit = new_rate / 10; out: return s->xfer_limit; } | 26,813 |
0 | static int mp_dacl_setxattr(FsContext *ctx, const char *path, const char *name, void *value, size_t size, int flags) { char buffer[PATH_MAX]; return lsetxattr(rpath(ctx, path, buffer), MAP_ACL_DEFAULT, value, size, flags); } | 26,814 |
0 | static int spapr_vio_check_reg(VIOsPAPRDevice *sdev, VIOsPAPRDeviceInfo *info) { VIOsPAPRDevice *other_sdev; DeviceState *qdev; VIOsPAPRBus *sbus; sbus = DO_UPCAST(VIOsPAPRBus, bus, sdev->qdev.parent_bus); /* * Check two device aren't given clashing addresses by the user (or some * other mechanism). We have to open code this because we have to check * for matches with devices other than us. */ QTAILQ_FOREACH(qdev, &sbus->bus.children, sibling) { other_sdev = DO_UPCAST(VIOsPAPRDevice, qdev, qdev); if (other_sdev != sdev && other_sdev->reg == sdev->reg) { fprintf(stderr, "vio: %s and %s devices conflict at address %#x\n", info->qdev.name, other_sdev->qdev.info->name, sdev->reg); return -EEXIST; } } return 0; } | 26,815 |
0 | static int vaapi_encode_issue(AVCodecContext *avctx, VAAPIEncodePicture *pic) { VAAPIEncodeContext *ctx = avctx->priv_data; VAAPIEncodeSlice *slice; VAStatus vas; int err, i; char data[MAX_PARAM_BUFFER_SIZE]; size_t bit_len; av_log(avctx, AV_LOG_DEBUG, "Issuing encode for pic %"PRId64"/%"PRId64" " "as type %s.\n", pic->display_order, pic->encode_order, picture_type_name[pic->type]); if (pic->nb_refs == 0) { av_log(avctx, AV_LOG_DEBUG, "No reference pictures.\n"); } else { av_log(avctx, AV_LOG_DEBUG, "Refers to:"); for (i = 0; i < pic->nb_refs; i++) { av_log(avctx, AV_LOG_DEBUG, " %"PRId64"/%"PRId64, pic->refs[i]->display_order, pic->refs[i]->encode_order); } av_log(avctx, AV_LOG_DEBUG, ".\n"); } av_assert0(pic->input_available && !pic->encode_issued); for (i = 0; i < pic->nb_refs; i++) { av_assert0(pic->refs[i]); // If we are serialised then the references must have already // completed. If not, they must have been issued but need not // have completed yet. if (ctx->issue_mode == ISSUE_MODE_SERIALISE_EVERYTHING) av_assert0(pic->refs[i]->encode_complete); else av_assert0(pic->refs[i]->encode_issued); } av_log(avctx, AV_LOG_DEBUG, "Input surface is %#x.\n", pic->input_surface); pic->recon_image = av_frame_alloc(); if (!pic->recon_image) { err = AVERROR(ENOMEM); goto fail; } err = av_hwframe_get_buffer(ctx->recon_frames_ref, pic->recon_image, 0); if (err < 0) { err = AVERROR(ENOMEM); goto fail; } pic->recon_surface = (VASurfaceID)(uintptr_t)pic->recon_image->data[3]; av_log(avctx, AV_LOG_DEBUG, "Recon surface is %#x.\n", pic->recon_surface); pic->output_buffer_ref = av_buffer_pool_get(ctx->output_buffer_pool); if (!pic->output_buffer_ref) { err = AVERROR(ENOMEM); goto fail; } pic->output_buffer = (VABufferID)(uintptr_t)pic->output_buffer_ref->data; av_log(avctx, AV_LOG_DEBUG, "Output buffer is %#x.\n", pic->output_buffer); if (ctx->codec->picture_params_size > 0) { pic->codec_picture_params = av_malloc(ctx->codec->picture_params_size); if (!pic->codec_picture_params) goto fail; memcpy(pic->codec_picture_params, ctx->codec_picture_params, ctx->codec->picture_params_size); } else { av_assert0(!ctx->codec_picture_params); } pic->nb_param_buffers = 0; if (pic->encode_order == 0) { // Global parameter buffers are set on the first picture only. for (i = 0; i < ctx->nb_global_params; i++) { err = vaapi_encode_make_param_buffer(avctx, pic, VAEncMiscParameterBufferType, (char*)ctx->global_params[i], ctx->global_params_size[i]); if (err < 0) goto fail; } } if (pic->type == PICTURE_TYPE_IDR && ctx->codec->init_sequence_params) { err = vaapi_encode_make_param_buffer(avctx, pic, VAEncSequenceParameterBufferType, ctx->codec_sequence_params, ctx->codec->sequence_params_size); if (err < 0) goto fail; } if (ctx->codec->init_picture_params) { err = ctx->codec->init_picture_params(avctx, pic); if (err < 0) { av_log(avctx, AV_LOG_ERROR, "Failed to initialise picture " "parameters: %d.\n", err); goto fail; } err = vaapi_encode_make_param_buffer(avctx, pic, VAEncPictureParameterBufferType, pic->codec_picture_params, ctx->codec->picture_params_size); if (err < 0) goto fail; } if (pic->type == PICTURE_TYPE_IDR) { if (ctx->codec->write_sequence_header) { bit_len = 8 * sizeof(data); err = ctx->codec->write_sequence_header(avctx, data, &bit_len); if (err < 0) { av_log(avctx, AV_LOG_ERROR, "Failed to write per-sequence " "header: %d.\n", err); goto fail; } err = vaapi_encode_make_packed_header(avctx, pic, ctx->codec->sequence_header_type, data, bit_len); if (err < 0) goto fail; } } if (ctx->codec->write_picture_header) { bit_len = 8 * sizeof(data); err = ctx->codec->write_picture_header(avctx, pic, data, &bit_len); if (err < 0) { av_log(avctx, AV_LOG_ERROR, "Failed to write per-picture " "header: %d.\n", err); goto fail; } err = vaapi_encode_make_packed_header(avctx, pic, ctx->codec->picture_header_type, data, bit_len); if (err < 0) goto fail; } if (ctx->codec->write_extra_buffer) { for (i = 0;; i++) { size_t len = sizeof(data); int type; err = ctx->codec->write_extra_buffer(avctx, pic, i, &type, data, &len); if (err == AVERROR_EOF) break; if (err < 0) { av_log(avctx, AV_LOG_ERROR, "Failed to write extra " "buffer %d: %d.\n", i, err); goto fail; } err = vaapi_encode_make_param_buffer(avctx, pic, type, data, len); if (err < 0) goto fail; } } if (ctx->codec->write_extra_header) { for (i = 0;; i++) { int type; bit_len = 8 * sizeof(data); err = ctx->codec->write_extra_header(avctx, pic, i, &type, data, &bit_len); if (err == AVERROR_EOF) break; if (err < 0) { av_log(avctx, AV_LOG_ERROR, "Failed to write extra " "header %d: %d.\n", i, err); goto fail; } err = vaapi_encode_make_packed_header(avctx, pic, type, data, bit_len); if (err < 0) goto fail; } } av_assert0(pic->nb_slices <= MAX_PICTURE_SLICES); for (i = 0; i < pic->nb_slices; i++) { slice = av_mallocz(sizeof(*slice)); if (!slice) { err = AVERROR(ENOMEM); goto fail; } pic->slices[i] = slice; if (ctx->codec->slice_params_size > 0) { slice->codec_slice_params = av_mallocz(ctx->codec->slice_params_size); if (!slice->codec_slice_params) { err = AVERROR(ENOMEM); goto fail; } } if (ctx->codec->init_slice_params) { err = ctx->codec->init_slice_params(avctx, pic, slice); if (err < 0) { av_log(avctx, AV_LOG_ERROR, "Failed to initalise slice " "parameters: %d.\n", err); goto fail; } } if (ctx->codec->write_slice_header) { bit_len = 8 * sizeof(data); err = ctx->codec->write_slice_header(avctx, pic, slice, data, &bit_len); if (err < 0) { av_log(avctx, AV_LOG_ERROR, "Failed to write per-slice " "header: %d.\n", err); goto fail; } err = vaapi_encode_make_packed_header(avctx, pic, ctx->codec->slice_header_type, data, bit_len); if (err < 0) goto fail; } if (ctx->codec->init_slice_params) { err = vaapi_encode_make_param_buffer(avctx, pic, VAEncSliceParameterBufferType, slice->codec_slice_params, ctx->codec->slice_params_size); if (err < 0) goto fail; } } vas = vaBeginPicture(ctx->hwctx->display, ctx->va_context, pic->input_surface); if (vas != VA_STATUS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "Failed to begin picture encode issue: " "%d (%s).\n", vas, vaErrorStr(vas)); err = AVERROR(EIO); goto fail_with_picture; } vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context, pic->param_buffers, pic->nb_param_buffers); if (vas != VA_STATUS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "Failed to upload encode parameters: " "%d (%s).\n", vas, vaErrorStr(vas)); err = AVERROR(EIO); goto fail_with_picture; } vas = vaEndPicture(ctx->hwctx->display, ctx->va_context); if (vas != VA_STATUS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "Failed to end picture encode issue: " "%d (%s).\n", vas, vaErrorStr(vas)); err = AVERROR(EIO); // vaRenderPicture() has been called here, so we should not destroy // the parameter buffers unless separate destruction is required. if (ctx->hwctx->driver_quirks & AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS) goto fail; else goto fail_at_end; } if (ctx->hwctx->driver_quirks & AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS) { for (i = 0; i < pic->nb_param_buffers; i++) { vas = vaDestroyBuffer(ctx->hwctx->display, pic->param_buffers[i]); if (vas != VA_STATUS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "Failed to destroy " "param buffer %#x: %d (%s).\n", pic->param_buffers[i], vas, vaErrorStr(vas)); // And ignore. } } } pic->encode_issued = 1; if (ctx->issue_mode == ISSUE_MODE_SERIALISE_EVERYTHING) return vaapi_encode_wait(avctx, pic); else return 0; fail_with_picture: vaEndPicture(ctx->hwctx->display, ctx->va_context); fail: for(i = 0; i < pic->nb_param_buffers; i++) vaDestroyBuffer(ctx->hwctx->display, pic->param_buffers[i]); fail_at_end: av_freep(&pic->codec_picture_params); av_frame_free(&pic->recon_image); return err; } | 26,816 |
0 | static void avc_luma_hv_qrt_8w_msa(const uint8_t *src_x, const uint8_t *src_y, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height) { uint32_t loop_cnt; v16i8 src_hz0, src_hz1, src_hz2, src_hz3; v16i8 src_vt0, src_vt1, src_vt2, src_vt3, src_vt4; v16i8 src_vt5, src_vt6, src_vt7, src_vt8; v16i8 mask0, mask1, mask2; v8i16 hz_out0, hz_out1, hz_out2, hz_out3; v8i16 vert_out0, vert_out1, vert_out2, vert_out3; v8i16 out0, out1, out2, out3; v16u8 tmp0, tmp1; LD_SB3(&luma_mask_arr[0], 16, mask0, mask1, mask2); LD_SB5(src_y, src_stride, src_vt0, src_vt1, src_vt2, src_vt3, src_vt4); src_y += (5 * src_stride); src_vt0 = (v16i8) __msa_insve_d((v2i64) src_vt0, 1, (v2i64) src_vt1); src_vt1 = (v16i8) __msa_insve_d((v2i64) src_vt1, 1, (v2i64) src_vt2); src_vt2 = (v16i8) __msa_insve_d((v2i64) src_vt2, 1, (v2i64) src_vt3); src_vt3 = (v16i8) __msa_insve_d((v2i64) src_vt3, 1, (v2i64) src_vt4); XORI_B4_128_SB(src_vt0, src_vt1, src_vt2, src_vt3); for (loop_cnt = (height >> 2); loop_cnt--;) { LD_SB4(src_x, src_stride, src_hz0, src_hz1, src_hz2, src_hz3); XORI_B4_128_SB(src_hz0, src_hz1, src_hz2, src_hz3); src_x += (4 * src_stride); hz_out0 = AVC_HORZ_FILTER_SH(src_hz0, src_hz0, mask0, mask1, mask2); hz_out1 = AVC_HORZ_FILTER_SH(src_hz1, src_hz1, mask0, mask1, mask2); hz_out2 = AVC_HORZ_FILTER_SH(src_hz2, src_hz2, mask0, mask1, mask2); hz_out3 = AVC_HORZ_FILTER_SH(src_hz3, src_hz3, mask0, mask1, mask2); SRARI_H4_SH(hz_out0, hz_out1, hz_out2, hz_out3, 5); SAT_SH4_SH(hz_out0, hz_out1, hz_out2, hz_out3, 7); LD_SB4(src_y, src_stride, src_vt5, src_vt6, src_vt7, src_vt8); src_y += (4 * src_stride); src_vt4 = (v16i8) __msa_insve_d((v2i64) src_vt4, 1, (v2i64) src_vt5); src_vt5 = (v16i8) __msa_insve_d((v2i64) src_vt5, 1, (v2i64) src_vt6); src_vt6 = (v16i8) __msa_insve_d((v2i64) src_vt6, 1, (v2i64) src_vt7); src_vt7 = (v16i8) __msa_insve_d((v2i64) src_vt7, 1, (v2i64) src_vt8); XORI_B4_128_SB(src_vt4, src_vt5, src_vt6, src_vt7); /* filter calc */ AVC_CALC_DPADD_B_6PIX_2COEFF_SH(src_vt0, src_vt1, src_vt2, src_vt3, src_vt4, src_vt5, vert_out0, vert_out1); AVC_CALC_DPADD_B_6PIX_2COEFF_SH(src_vt2, src_vt3, src_vt4, src_vt5, src_vt6, src_vt7, vert_out2, vert_out3); SRARI_H4_SH(vert_out0, vert_out1, vert_out2, vert_out3, 5); SAT_SH4_SH(vert_out0, vert_out1, vert_out2, vert_out3, 7); out0 = __msa_srari_h((hz_out0 + vert_out0), 1); out1 = __msa_srari_h((hz_out1 + vert_out1), 1); out2 = __msa_srari_h((hz_out2 + vert_out2), 1); out3 = __msa_srari_h((hz_out3 + vert_out3), 1); SAT_SH4_SH(out0, out1, out2, out3, 7); tmp0 = PCKEV_XORI128_UB(out0, out1); tmp1 = PCKEV_XORI128_UB(out2, out3); ST8x4_UB(tmp0, tmp1, dst, dst_stride); dst += (4 * dst_stride); src_vt3 = src_vt7; src_vt1 = src_vt5; src_vt5 = src_vt4; src_vt4 = src_vt8; src_vt2 = src_vt6; src_vt0 = src_vt5; } } | 26,817 |
0 | static int dxva_get_decoder_guid(AVCodecContext *avctx, void *service, void *surface_format, unsigned guid_count, const GUID *guid_list, GUID *decoder_guid) { FFDXVASharedContext *sctx = DXVA_SHARED_CONTEXT(avctx); unsigned i, j; *decoder_guid = ff_GUID_NULL; for (i = 0; dxva_modes[i].guid; i++) { const dxva_mode *mode = &dxva_modes[i]; int validate; if (mode->codec != avctx->codec_id) continue; for (j = 0; j < guid_count; j++) { if (IsEqualGUID(mode->guid, &guid_list[j])) break; } if (j == guid_count) continue; #if CONFIG_D3D11VA if (sctx->pix_fmt == AV_PIX_FMT_D3D11) validate = d3d11va_validate_output(service, *mode->guid, surface_format); #endif #if CONFIG_DXVA2 if (sctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) validate = dxva2_validate_output(service, *mode->guid, surface_format); #endif if (validate) { *decoder_guid = *mode->guid; break; } } if (IsEqualGUID(decoder_guid, &ff_GUID_NULL)) { av_log(avctx, AV_LOG_VERBOSE, "No decoder device for codec found\n"); return AVERROR(EINVAL); } if (IsEqualGUID(decoder_guid, &ff_DXVADDI_Intel_ModeH264_E)) sctx->workaround |= FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO; return 0; } | 26,820 |
0 | static int decode_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; int width, height, vo_ver_id; /* vol header */ skip_bits(gb, 1); /* random access */ s->vo_type = get_bits(gb, 8); if (get_bits1(gb) != 0) { /* is_ol_id */ vo_ver_id = get_bits(gb, 4); /* vo_ver_id */ skip_bits(gb, 3); /* vo_priority */ } else { vo_ver_id = 1; } s->aspect_ratio_info = get_bits(gb, 4); if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) { s->avctx->sample_aspect_ratio.num = get_bits(gb, 8); // par_width s->avctx->sample_aspect_ratio.den = get_bits(gb, 8); // par_height } else { s->avctx->sample_aspect_ratio = ff_h263_pixel_aspect[s->aspect_ratio_info]; } if ((ctx->vol_control_parameters = get_bits1(gb))) { /* vol control parameter */ int chroma_format = get_bits(gb, 2); if (chroma_format != CHROMA_420) av_log(s->avctx, AV_LOG_ERROR, "illegal chroma format\n"); s->low_delay = get_bits1(gb); if (get_bits1(gb)) { /* vbv parameters */ get_bits(gb, 15); /* first_half_bitrate */ skip_bits1(gb); /* marker */ get_bits(gb, 15); /* latter_half_bitrate */ skip_bits1(gb); /* marker */ get_bits(gb, 15); /* first_half_vbv_buffer_size */ skip_bits1(gb); /* marker */ get_bits(gb, 3); /* latter_half_vbv_buffer_size */ get_bits(gb, 11); /* first_half_vbv_occupancy */ skip_bits1(gb); /* marker */ get_bits(gb, 15); /* latter_half_vbv_occupancy */ skip_bits1(gb); /* marker */ } } else { /* is setting low delay flag only once the smartest thing to do? * low delay detection won't be overridden. */ if (s->picture_number == 0) s->low_delay = 0; } ctx->shape = get_bits(gb, 2); /* vol shape */ if (ctx->shape != RECT_SHAPE) av_log(s->avctx, AV_LOG_ERROR, "only rectangular vol supported\n"); if (ctx->shape == GRAY_SHAPE && vo_ver_id != 1) { av_log(s->avctx, AV_LOG_ERROR, "Gray shape not supported\n"); skip_bits(gb, 4); /* video_object_layer_shape_extension */ } check_marker(gb, "before time_increment_resolution"); s->avctx->framerate.num = get_bits(gb, 16); if (!s->avctx->framerate.num) { av_log(s->avctx, AV_LOG_ERROR, "framerate==0\n"); return AVERROR_INVALIDDATA; } ctx->time_increment_bits = av_log2(s->avctx->framerate.num - 1) + 1; if (ctx->time_increment_bits < 1) ctx->time_increment_bits = 1; check_marker(gb, "before fixed_vop_rate"); if (get_bits1(gb) != 0) /* fixed_vop_rate */ s->avctx->framerate.den = get_bits(gb, ctx->time_increment_bits); else s->avctx->framerate.den = 1; s->avctx->time_base = av_inv_q(av_mul_q(s->avctx->framerate, (AVRational){s->avctx->ticks_per_frame, 1})); ctx->t_frame = 0; if (ctx->shape != BIN_ONLY_SHAPE) { if (ctx->shape == RECT_SHAPE) { check_marker(gb, "before width"); width = get_bits(gb, 13); check_marker(gb, "before height"); height = get_bits(gb, 13); check_marker(gb, "after height"); if (width && height && /* they should be non zero but who knows */ !(s->width && s->codec_tag == AV_RL32("MP4S"))) { if (s->width && s->height && (s->width != width || s->height != height)) s->context_reinit = 1; s->width = width; s->height = height; } } s->progressive_sequence = s->progressive_frame = get_bits1(gb) ^ 1; s->interlaced_dct = 0; if (!get_bits1(gb) && (s->avctx->debug & FF_DEBUG_PICT_INFO)) av_log(s->avctx, AV_LOG_INFO, /* OBMC Disable */ "MPEG4 OBMC not supported (very likely buggy encoder)\n"); if (vo_ver_id == 1) ctx->vol_sprite_usage = get_bits1(gb); /* vol_sprite_usage */ else ctx->vol_sprite_usage = get_bits(gb, 2); /* vol_sprite_usage */ if (ctx->vol_sprite_usage == STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, "Static Sprites not supported\n"); if (ctx->vol_sprite_usage == STATIC_SPRITE || ctx->vol_sprite_usage == GMC_SPRITE) { if (ctx->vol_sprite_usage == STATIC_SPRITE) { skip_bits(gb, 13); // sprite_width skip_bits1(gb); /* marker */ skip_bits(gb, 13); // sprite_height skip_bits1(gb); /* marker */ skip_bits(gb, 13); // sprite_left skip_bits1(gb); /* marker */ skip_bits(gb, 13); // sprite_top skip_bits1(gb); /* marker */ } ctx->num_sprite_warping_points = get_bits(gb, 6); if (ctx->num_sprite_warping_points > 3) { av_log(s->avctx, AV_LOG_ERROR, "%d sprite_warping_points\n", ctx->num_sprite_warping_points); ctx->num_sprite_warping_points = 0; return AVERROR_INVALIDDATA; } s->sprite_warping_accuracy = get_bits(gb, 2); ctx->sprite_brightness_change = get_bits1(gb); if (ctx->vol_sprite_usage == STATIC_SPRITE) skip_bits1(gb); // low_latency_sprite } // FIXME sadct disable bit if verid!=1 && shape not rect if (get_bits1(gb) == 1) { /* not_8_bit */ s->quant_precision = get_bits(gb, 4); /* quant_precision */ if (get_bits(gb, 4) != 8) /* bits_per_pixel */ av_log(s->avctx, AV_LOG_ERROR, "N-bit not supported\n"); if (s->quant_precision != 5) av_log(s->avctx, AV_LOG_ERROR, "quant precision %d\n", s->quant_precision); if (s->quant_precision<3 || s->quant_precision>9) { s->quant_precision = 5; } } else { s->quant_precision = 5; } // FIXME a bunch of grayscale shape things if ((s->mpeg_quant = get_bits1(gb))) { /* vol_quant_type */ int i, v; /* load default matrixes */ for (i = 0; i < 64; i++) { int j = s->idsp.idct_permutation[i]; v = ff_mpeg4_default_intra_matrix[i]; s->intra_matrix[j] = v; s->chroma_intra_matrix[j] = v; v = ff_mpeg4_default_non_intra_matrix[i]; s->inter_matrix[j] = v; s->chroma_inter_matrix[j] = v; } /* load custom intra matrix */ if (get_bits1(gb)) { int last = 0; for (i = 0; i < 64; i++) { int j; v = get_bits(gb, 8); if (v == 0) break; last = v; j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->intra_matrix[j] = last; s->chroma_intra_matrix[j] = last; } /* replicate last value */ for (; i < 64; i++) { int j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->intra_matrix[j] = last; s->chroma_intra_matrix[j] = last; } } /* load custom non intra matrix */ if (get_bits1(gb)) { int last = 0; for (i = 0; i < 64; i++) { int j; v = get_bits(gb, 8); if (v == 0) break; last = v; j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->inter_matrix[j] = v; s->chroma_inter_matrix[j] = v; } /* replicate last value */ for (; i < 64; i++) { int j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->inter_matrix[j] = last; s->chroma_inter_matrix[j] = last; } } // FIXME a bunch of grayscale shape things } if (vo_ver_id != 1) s->quarter_sample = get_bits1(gb); else s->quarter_sample = 0; if (get_bits_left(gb) < 4) { av_log(s->avctx, AV_LOG_ERROR, "VOL Header truncated\n"); return AVERROR_INVALIDDATA; } if (!get_bits1(gb)) { int pos = get_bits_count(gb); int estimation_method = get_bits(gb, 2); if (estimation_method < 2) { if (!get_bits1(gb)) { ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* opaque */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* transparent */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_cae */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* inter_cae */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* no_update */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* upampling */ } if (!get_bits1(gb)) { ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_blocks */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter_blocks */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter4v_blocks */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* not coded blocks */ } if (!check_marker(gb, "in complexity estimation part 1")) { skip_bits_long(gb, pos - get_bits_count(gb)); goto no_cplx_est; } if (!get_bits1(gb)) { ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_coeffs */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_lines */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* vlc_syms */ ctx->cplx_estimation_trash_i += 4 * get_bits1(gb); /* vlc_bits */ } if (!get_bits1(gb)) { ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* apm */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* npm */ ctx->cplx_estimation_trash_b += 8 * get_bits1(gb); /* interpolate_mc_q */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* forwback_mc_q */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel2 */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel4 */ } if (!check_marker(gb, "in complexity estimation part 2")) { skip_bits_long(gb, pos - get_bits_count(gb)); goto no_cplx_est; } if (estimation_method == 1) { ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* sadct */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* qpel */ } } else av_log(s->avctx, AV_LOG_ERROR, "Invalid Complexity estimation method %d\n", estimation_method); } else { no_cplx_est: ctx->cplx_estimation_trash_i = ctx->cplx_estimation_trash_p = ctx->cplx_estimation_trash_b = 0; } ctx->resync_marker = !get_bits1(gb); /* resync_marker_disabled */ s->data_partitioning = get_bits1(gb); if (s->data_partitioning) ctx->rvlc = get_bits1(gb); if (vo_ver_id != 1) { ctx->new_pred = get_bits1(gb); if (ctx->new_pred) { av_log(s->avctx, AV_LOG_ERROR, "new pred not supported\n"); skip_bits(gb, 2); /* requested upstream message type */ skip_bits1(gb); /* newpred segment type */ } if (get_bits1(gb)) // reduced_res_vop av_log(s->avctx, AV_LOG_ERROR, "reduced resolution VOP not supported\n"); } else { ctx->new_pred = 0; } ctx->scalability = get_bits1(gb); if (ctx->scalability) { GetBitContext bak = *gb; int h_sampling_factor_n; int h_sampling_factor_m; int v_sampling_factor_n; int v_sampling_factor_m; skip_bits1(gb); // hierarchy_type skip_bits(gb, 4); /* ref_layer_id */ skip_bits1(gb); /* ref_layer_sampling_dir */ h_sampling_factor_n = get_bits(gb, 5); h_sampling_factor_m = get_bits(gb, 5); v_sampling_factor_n = get_bits(gb, 5); v_sampling_factor_m = get_bits(gb, 5); ctx->enhancement_type = get_bits1(gb); if (h_sampling_factor_n == 0 || h_sampling_factor_m == 0 || v_sampling_factor_n == 0 || v_sampling_factor_m == 0) { /* illegal scalability header (VERY broken encoder), * trying to workaround */ ctx->scalability = 0; *gb = bak; } else av_log(s->avctx, AV_LOG_ERROR, "scalability not supported\n"); // bin shape stuff FIXME } } if (s->avctx->debug&FF_DEBUG_PICT_INFO) { av_log(s->avctx, AV_LOG_DEBUG, "tb %d/%d, tincrbits:%d, qp_prec:%d, ps:%d, %s%s%s%s\n", s->avctx->framerate.den, s->avctx->framerate.num, ctx->time_increment_bits, s->quant_precision, s->progressive_sequence, ctx->scalability ? "scalability " :"" , s->quarter_sample ? "qpel " : "", s->data_partitioning ? "partition " : "", ctx->rvlc ? "rvlc " : "" ); } return 0; } | 26,821 |
0 | static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt) { struct video_data *s = ctx->priv_data; struct v4l2_buffer buf = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .memory = V4L2_MEMORY_MMAP }; struct pollfd p = { .fd = s->fd, .events = POLLIN }; int res; res = poll(&p, 1, s->timeout); if (res < 0) return AVERROR(errno); if (!(p.revents & (POLLIN | POLLERR | POLLHUP))) return AVERROR(EAGAIN); /* FIXME: Some special treatment might be needed in case of loss of signal... */ while ((res = ioctl(s->fd, VIDIOC_DQBUF, &buf)) < 0 && (errno == EINTR)); if (res < 0) { if (errno == EAGAIN) { pkt->size = 0; return AVERROR(EAGAIN); } av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_DQBUF): %s\n", strerror(errno)); return AVERROR(errno); } if (buf.index >= s->buffers) { av_log(ctx, AV_LOG_ERROR, "Invalid buffer index received.\n"); return AVERROR(EINVAL); } avpriv_atomic_int_add_and_fetch(&s->buffers_queued, -1); // always keep at least one buffer queued av_assert0(avpriv_atomic_int_get(&s->buffers_queued) >= 1); if (s->frame_size > 0 && buf.bytesused != s->frame_size) { av_log(ctx, AV_LOG_ERROR, "The v4l2 frame is %d bytes, but %d bytes are expected\n", buf.bytesused, s->frame_size); return AVERROR_INVALIDDATA; } /* Image is at s->buff_start[buf.index] */ if (avpriv_atomic_int_get(&s->buffers_queued) == FFMAX(s->buffers / 8, 1)) { /* when we start getting low on queued buffers, fall back on copying data */ res = av_new_packet(pkt, buf.bytesused); if (res < 0) { av_log(ctx, AV_LOG_ERROR, "Error allocating a packet.\n"); return res; } memcpy(pkt->data, s->buf_start[buf.index], buf.bytesused); res = ioctl(s->fd, VIDIOC_QBUF, &buf); if (res < 0) { av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF)\n"); av_free_packet(pkt); return AVERROR(errno); } avpriv_atomic_int_add_and_fetch(&s->buffers_queued, 1); } else { struct buff_data *buf_descriptor; pkt->data = s->buf_start[buf.index]; pkt->size = buf.bytesused; #if FF_API_DESTRUCT_PACKET FF_DISABLE_DEPRECATION_WARNINGS pkt->destruct = dummy_release_buffer; FF_ENABLE_DEPRECATION_WARNINGS #endif buf_descriptor = av_malloc(sizeof(struct buff_data)); if (buf_descriptor == NULL) { /* Something went wrong... Since av_malloc() failed, we cannot even * allocate a buffer for memcpying into it */ av_log(ctx, AV_LOG_ERROR, "Failed to allocate a buffer descriptor\n"); res = ioctl(s->fd, VIDIOC_QBUF, &buf); return AVERROR(ENOMEM); } buf_descriptor->fd = s->fd; buf_descriptor->index = buf.index; buf_descriptor->s = s; pkt->buf = av_buffer_create(pkt->data, pkt->size, mmap_release_buffer, buf_descriptor, 0); if (!pkt->buf) { av_freep(&buf_descriptor); return AVERROR(ENOMEM); } } pkt->pts = buf.timestamp.tv_sec * INT64_C(1000000) + buf.timestamp.tv_usec; return s->buf_len[buf.index]; } | 26,822 |
1 | void cpu_ppc_set_papr(PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; /* PAPR always has exception vectors in RAM not ROM. To ensure this, * MSR[IP] should never be set. * * We also disallow setting of MSR_HV */ env->msr_mask &= ~((1ull << MSR_EP) | MSR_HVB); /* Set a full AMOR so guest can use the AMR as it sees fit */ env->spr[SPR_AMOR] = amor->default_value = 0xffffffffffffffffull; /* Tell KVM that we're in PAPR mode */ if (kvm_enabled()) { kvmppc_set_papr(cpu); } } | 26,823 |
1 | static int aac_encode_frame(AVCodecContext *avctx, uint8_t *frame, int buf_size, void *data) { AACEncContext *s = avctx->priv_data; int16_t *samples = s->samples, *samples2, *la; ChannelElement *cpe; int i, j, chans, tag, start_ch; const uint8_t *chan_map = aac_chan_configs[avctx->channels-1]; int chan_el_counter[4]; FFPsyWindowInfo windows[avctx->channels]; if (s->last_frame) return 0; if (data) { if (!s->psypp) { memcpy(s->samples + 1024 * avctx->channels, data, 1024 * avctx->channels * sizeof(s->samples[0])); } else { start_ch = 0; samples2 = s->samples + 1024 * avctx->channels; for (i = 0; i < chan_map[0]; i++) { tag = chan_map[i+1]; chans = tag == TYPE_CPE ? 2 : 1; ff_psy_preprocess(s->psypp, (uint16_t*)data + start_ch, samples2 + start_ch, start_ch, chans); start_ch += chans; } } } if (!avctx->frame_number) { memcpy(s->samples, s->samples + 1024 * avctx->channels, 1024 * avctx->channels * sizeof(s->samples[0])); return 0; } start_ch = 0; for (i = 0; i < chan_map[0]; i++) { FFPsyWindowInfo* wi = windows + start_ch; tag = chan_map[i+1]; chans = tag == TYPE_CPE ? 2 : 1; cpe = &s->cpe[i]; samples2 = samples + start_ch; la = samples2 + 1024 * avctx->channels + start_ch; if (!data) la = NULL; for (j = 0; j < chans; j++) { IndividualChannelStream *ics = &cpe->ch[j].ics; int k; wi[j] = ff_psy_suggest_window(&s->psy, samples2, la, start_ch + j, ics->window_sequence[0]); ics->window_sequence[1] = ics->window_sequence[0]; ics->window_sequence[0] = wi[j].window_type[0]; ics->use_kb_window[1] = ics->use_kb_window[0]; ics->use_kb_window[0] = wi[j].window_shape; ics->num_windows = wi[j].num_windows; ics->swb_sizes = s->psy.bands [ics->num_windows == 8]; ics->num_swb = s->psy.num_bands[ics->num_windows == 8]; for (k = 0; k < ics->num_windows; k++) ics->group_len[k] = wi[j].grouping[k]; s->cur_channel = start_ch + j; apply_window_and_mdct(avctx, s, &cpe->ch[j], samples2, j); } start_ch += chans; } init_put_bits(&s->pb, frame, buf_size*8); if ((avctx->frame_number & 0xFF)==1 && !(avctx->flags & CODEC_FLAG_BITEXACT)) put_bitstream_info(avctx, s, LIBAVCODEC_IDENT); start_ch = 0; memset(chan_el_counter, 0, sizeof(chan_el_counter)); for (i = 0; i < chan_map[0]; i++) { FFPsyWindowInfo* wi = windows + start_ch; tag = chan_map[i+1]; chans = tag == TYPE_CPE ? 2 : 1; cpe = &s->cpe[i]; for (j = 0; j < chans; j++) { s->coder->search_for_quantizers(avctx, s, &cpe->ch[j], s->lambda); } cpe->common_window = 0; if (chans > 1 && wi[0].window_type[0] == wi[1].window_type[0] && wi[0].window_shape == wi[1].window_shape) { cpe->common_window = 1; for (j = 0; j < wi[0].num_windows; j++) { if (wi[0].grouping[j] != wi[1].grouping[j]) { cpe->common_window = 0; break; } } } if (cpe->common_window && s->coder->search_for_ms) s->coder->search_for_ms(s, cpe, s->lambda); adjust_frame_information(s, cpe, chans); put_bits(&s->pb, 3, tag); put_bits(&s->pb, 4, chan_el_counter[tag]++); if (chans == 2) { put_bits(&s->pb, 1, cpe->common_window); if (cpe->common_window) { put_ics_info(s, &cpe->ch[0].ics); encode_ms_info(&s->pb, cpe); } } for (j = 0; j < chans; j++) { s->cur_channel = start_ch + j; ff_psy_set_band_info(&s->psy, s->cur_channel, cpe->ch[j].coeffs, &wi[j]); encode_individual_channel(avctx, s, &cpe->ch[j], cpe->common_window); } start_ch += chans; } put_bits(&s->pb, 3, TYPE_END); flush_put_bits(&s->pb); avctx->frame_bits = put_bits_count(&s->pb); // rate control stuff if (!(avctx->flags & CODEC_FLAG_QSCALE)) { float ratio = avctx->bit_rate * 1024.0f / avctx->sample_rate / avctx->frame_bits; s->lambda *= ratio; s->lambda = fminf(s->lambda, 65536.f); } if (avctx->frame_bits > 6144*avctx->channels) av_log(avctx, AV_LOG_ERROR, "input buffer violation %d > %d.\n", avctx->frame_bits, 6144*avctx->channels); if (!data) s->last_frame = 1; memcpy(s->samples, s->samples + 1024 * avctx->channels, 1024 * avctx->channels * sizeof(s->samples[0])); return put_bits_count(&s->pb)>>3; } | 26,824 |
0 | static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset) { GetBitContext *gb = &v->s.gb; MpegEncContext *s = &v->s; int dc_pred_dir = 0; /* Direction of the DC prediction used */ int run_diff, i; int16_t *dc_val; int16_t *ac_val, *ac_val2; int dcdiff; int mb_pos = s->mb_x + s->mb_y * s->mb_stride; int a_avail, c_avail; /* XXX: Guard against dumb values of mquant */ mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant ); /* Set DC scale - y and c use the same */ s->y_dc_scale = s->y_dc_scale_table[mquant]; s->c_dc_scale = s->c_dc_scale_table[mquant]; /* check if prediction blocks A and C are available */ a_avail = c_avail = 0; if((n == 2 || n == 3) || (s->mb_y && IS_INTRA(s->current_picture.mb_type[mb_pos - s->mb_stride]))) a_avail = 1; if((n == 1 || n == 3) || (s->mb_x && IS_INTRA(s->current_picture.mb_type[mb_pos - 1]))) c_avail = 1; /* Get DC differential */ if (n < 4) { dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3); } else { dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3); } if (dcdiff < 0){ av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n"); return -1; } if (dcdiff) { if (dcdiff == 119 /* ESC index value */) { /* TODO: Optimize */ if (mquant == 1) dcdiff = get_bits(gb, 10); else if (mquant == 2) dcdiff = get_bits(gb, 9); else dcdiff = get_bits(gb, 8); } else { if (mquant == 1) dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3; else if (mquant == 2) dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1; } if (get_bits(gb, 1)) dcdiff = -dcdiff; } /* Prediction */ dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir); *dc_val = dcdiff; /* Store the quantized DC coeff, used for prediction */ if (n < 4) { block[0] = dcdiff * s->y_dc_scale; } else { block[0] = dcdiff * s->c_dc_scale; } /* Skip ? */ run_diff = 0; i = 0; if (!coded) { goto not_coded; } //AC Decoding i = 1; { int last = 0, skip, value; const int8_t *zz_table; int scale; int k; scale = mquant * 2 + v->halfpq; zz_table = vc1_simple_progressive_8x8_zz; ac_val = s->ac_val[0][0] + s->block_index[n] * 16; ac_val2 = ac_val; if(dc_pred_dir) //left ac_val -= 16; else //top ac_val -= 16 * s->block_wrap[n]; while (!last) { vc1_decode_ac_coeff(v, &last, &skip, &value, codingset); i += skip; if(i > 63) break; block[zz_table[i++]] = value; } /* apply AC prediction if needed */ if(s->ac_pred) { /* scale predictors if needed*/ int mb_pos2, q1, q2; mb_pos2 = mb_pos - dc_pred_dir - (1 - dc_pred_dir) * s->mb_stride; q1 = s->current_picture.qscale_table[mb_pos]; q2 = s->current_picture.qscale_table[mb_pos2]; if(!c_avail) { memset(ac_val, 0, 8 * sizeof(ac_val[0])); dc_pred_dir = 0; } if(!a_avail) { memset(ac_val + 8, 0, 8 * sizeof(ac_val[0])); dc_pred_dir = 1; } if(!q1 && q1 && q2 && q1 != q2) { q1 = q1 * 2 - 1; q2 = q2 * 2 - 1; if(dc_pred_dir) { //left for(k = 1; k < 8; k++) block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18; } else { //top for(k = 1; k < 8; k++) block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18; } } else { if(dc_pred_dir) { //left for(k = 1; k < 8; k++) block[k << 3] += ac_val[k]; } else { //top for(k = 1; k < 8; k++) block[k] += ac_val[k + 8]; } } } /* save AC coeffs for further prediction */ for(k = 1; k < 8; k++) { ac_val2[k] = block[k << 3]; ac_val2[k + 8] = block[k]; } /* scale AC coeffs */ for(k = 1; k < 64; k++) if(block[k]) { block[k] *= scale; if(!v->pquantizer) block[k] += (block[k] < 0) ? -mquant : mquant; } if(s->ac_pred) i = 63; } not_coded: if(!coded) { int k, scale; ac_val = s->ac_val[0][0] + s->block_index[n] * 16; ac_val2 = ac_val; if(!c_avail) { memset(ac_val, 0, 8 * sizeof(ac_val[0])); dc_pred_dir = 0; } if(!a_avail) { memset(ac_val + 8, 0, 8 * sizeof(ac_val[0])); dc_pred_dir = 1; } scale = mquant * 2 + v->halfpq; memset(ac_val2, 0, 16 * 2); if(dc_pred_dir) {//left ac_val -= 16; if(s->ac_pred) memcpy(ac_val2, ac_val, 8 * 2); } else {//top ac_val -= 16 * s->block_wrap[n]; if(s->ac_pred) memcpy(ac_val2 + 8, ac_val + 8, 8 * 2); } /* apply AC prediction if needed */ if(s->ac_pred) { if(dc_pred_dir) { //left for(k = 1; k < 8; k++) { block[k << 3] = ac_val[k] * scale; if(!v->pquantizer) block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant; } } else { //top for(k = 1; k < 8; k++) { block[k] = ac_val[k + 8] * scale; if(!v->pquantizer) block[k] += (block[k] < 0) ? -mquant : mquant; } } i = 63; } } s->block_last_index[n] = i; return 0; } | 26,825 |
0 | static int vp3_decode_end(AVCodecContext *avctx) { Vp3DecodeContext *s = avctx->priv_data; av_free(s->all_fragments); av_free(s->coded_fragment_list); av_free(s->superblock_fragments); av_free(s->superblock_macroblocks); av_free(s->macroblock_fragments); av_free(s->macroblock_coded); /* release all frames */ avctx->release_buffer(avctx, &s->golden_frame); avctx->release_buffer(avctx, &s->last_frame); avctx->release_buffer(avctx, &s->current_frame); return 0; } | 26,826 |
1 | static uint64_t openpic_src_read(void *opaque, uint64_t addr, unsigned len) { OpenPICState *opp = opaque; uint32_t retval; int idx; DPRINTF("%s: addr %08x\n", __func__, addr); retval = 0xFFFFFFFF; if (addr & 0xF) { return retval; } addr = addr & 0xFFF0; idx = addr >> 5; if (addr & 0x10) { /* EXDE / IFEDE / IEEDE */ retval = read_IRQreg_ide(opp, idx); } else { /* EXVP / IFEVP / IEEVP */ retval = read_IRQreg_ipvp(opp, idx); } DPRINTF("%s: => %08x\n", __func__, retval); return retval; } | 26,827 |
1 | SchroFrame *ff_create_schro_frame(AVCodecContext *avccontext, SchroFrameFormat schro_frame_fmt) { AVPicture *p_pic; SchroFrame *p_frame; int y_width, uv_width; int y_height, uv_height; int i; y_width = avccontext->width; y_height = avccontext->height; uv_width = y_width >> (SCHRO_FRAME_FORMAT_H_SHIFT(schro_frame_fmt)); uv_height = y_height >> (SCHRO_FRAME_FORMAT_V_SHIFT(schro_frame_fmt)); p_pic = av_mallocz(sizeof(AVPicture)); avpicture_alloc(p_pic, avccontext->pix_fmt, y_width, y_height); p_frame = schro_frame_new(); p_frame->format = schro_frame_fmt; p_frame->width = y_width; p_frame->height = y_height; schro_frame_set_free_callback(p_frame, free_schro_frame, (void *)p_pic); for (i = 0; i < 3; ++i) { p_frame->components[i].width = i ? uv_width : y_width; p_frame->components[i].stride = p_pic->linesize[i]; p_frame->components[i].height = i ? uv_height : y_height; p_frame->components[i].length = p_frame->components[i].stride * p_frame->components[i].height; p_frame->components[i].data = p_pic->data[i]; if (i) { p_frame->components[i].v_shift = SCHRO_FRAME_FORMAT_V_SHIFT(p_frame->format); p_frame->components[i].h_shift = SCHRO_FRAME_FORMAT_H_SHIFT(p_frame->format); } } return p_frame; } | 26,828 |
0 | static int ffm_seek(AVFormatContext *s, int stream_index, int64_t wanted_pts, int flags) { FFMContext *ffm = s->priv_data; int64_t pos_min, pos_max, pos; int64_t pts_min, pts_max, pts; double pos1; av_dlog(s, "wanted_pts=%0.6f\n", wanted_pts / 1000000.0); /* find the position using linear interpolation (better than dichotomy in typical cases) */ if (ffm->write_index && ffm->write_index < ffm->file_size) { if (get_dts(s, FFM_PACKET_SIZE) < wanted_pts) { pos_min = FFM_PACKET_SIZE; pos_max = ffm->write_index - FFM_PACKET_SIZE; } else { pos_min = ffm->write_index; pos_max = ffm->file_size - FFM_PACKET_SIZE; } } else { pos_min = FFM_PACKET_SIZE; pos_max = ffm->file_size - FFM_PACKET_SIZE; } while (pos_min <= pos_max) { pts_min = get_dts(s, pos_min); pts_max = get_dts(s, pos_max); if (pts_min > wanted_pts || pts_max <= wanted_pts) { pos = pts_min > wanted_pts ? pos_min : pos_max; goto found; } /* linear interpolation */ pos1 = (double)(pos_max - pos_min) * (double)(wanted_pts - pts_min) / (double)(pts_max - pts_min); pos = (((int64_t)pos1) / FFM_PACKET_SIZE) * FFM_PACKET_SIZE; if (pos <= pos_min) pos = pos_min; else if (pos >= pos_max) pos = pos_max; pts = get_dts(s, pos); /* check if we are lucky */ if (pts == wanted_pts) { goto found; } else if (pts > wanted_pts) { pos_max = pos - FFM_PACKET_SIZE; } else { pos_min = pos + FFM_PACKET_SIZE; } } pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max; found: if (ffm_seek1(s, pos) < 0) return -1; /* reset read state */ ffm->read_state = READ_HEADER; ffm->packet_ptr = ffm->packet; ffm->packet_end = ffm->packet; ffm->first_packet = 1; return 0; } | 26,829 |
1 | static void moxie_cpu_realizefn(DeviceState *dev, Error **errp) { MoxieCPU *cpu = MOXIE_CPU(dev); MoxieCPUClass *mcc = MOXIE_CPU_GET_CLASS(dev); cpu_reset(CPU(cpu)); mcc->parent_realize(dev, errp); } | 26,830 |
1 | int ga_install_service(const char *path, const char *logfile) { SC_HANDLE manager; SC_HANDLE service; TCHAR cmdline[MAX_PATH]; if (GetModuleFileName(NULL, cmdline, MAX_PATH) == 0) { printf_win_error("No full path to service's executable"); return EXIT_FAILURE; } _snprintf(cmdline, MAX_PATH - strlen(cmdline), "%s -d", cmdline); if (path) { _snprintf(cmdline, MAX_PATH - strlen(cmdline), "%s -p %s", cmdline, path); } if (logfile) { _snprintf(cmdline, MAX_PATH - strlen(cmdline), "%s -l %s -v", cmdline, logfile); } g_debug("service's cmdline: %s", cmdline); manager = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS); if (manager == NULL) { printf_win_error("No handle to service control manager"); return EXIT_FAILURE; } service = CreateService(manager, QGA_SERVICE_NAME, QGA_SERVICE_DISPLAY_NAME, SERVICE_ALL_ACCESS, SERVICE_WIN32_OWN_PROCESS, SERVICE_AUTO_START, SERVICE_ERROR_NORMAL, cmdline, NULL, NULL, NULL, NULL, NULL); if (service) { SERVICE_DESCRIPTION desc = { (char *)QGA_SERVICE_DESCRIPTION }; ChangeServiceConfig2(service, SERVICE_CONFIG_DESCRIPTION, &desc); printf("Service was installed successfully.\n"); } else { printf_win_error("Failed to install service"); } CloseServiceHandle(service); CloseServiceHandle(manager); return (service == NULL); } | 26,831 |
1 | static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, int src_size) { const uint16_t *end; const uint16_t *mm_end; uint8_t *d = (uint8_t *)dst; const uint16_t *s = (const uint16_t *)src; end = s + src_size/2; __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); mm_end = end - 7; while (s < mm_end) { __asm__ volatile( PREFETCH" 32%1 \n\t" "movq %1, %%mm0 \n\t" "movq %1, %%mm1 \n\t" "movq %1, %%mm2 \n\t" "pand %2, %%mm0 \n\t" "pand %3, %%mm1 \n\t" "pand %4, %%mm2 \n\t" "psllq $3, %%mm0 \n\t" "psrlq $3, %%mm1 \n\t" "psrlq $8, %%mm2 \n\t" "movq %%mm0, %%mm3 \n\t" "movq %%mm1, %%mm4 \n\t" "movq %%mm2, %%mm5 \n\t" "punpcklwd %5, %%mm0 \n\t" "punpcklwd %5, %%mm1 \n\t" "punpcklwd %5, %%mm2 \n\t" "punpckhwd %5, %%mm3 \n\t" "punpckhwd %5, %%mm4 \n\t" "punpckhwd %5, %%mm5 \n\t" "psllq $8, %%mm1 \n\t" "psllq $16, %%mm2 \n\t" "por %%mm1, %%mm0 \n\t" "por %%mm2, %%mm0 \n\t" "psllq $8, %%mm4 \n\t" "psllq $16, %%mm5 \n\t" "por %%mm4, %%mm3 \n\t" "por %%mm5, %%mm3 \n\t" "movq %%mm0, %%mm6 \n\t" "movq %%mm3, %%mm7 \n\t" "movq 8%1, %%mm0 \n\t" "movq 8%1, %%mm1 \n\t" "movq 8%1, %%mm2 \n\t" "pand %2, %%mm0 \n\t" "pand %3, %%mm1 \n\t" "pand %4, %%mm2 \n\t" "psllq $3, %%mm0 \n\t" "psrlq $3, %%mm1 \n\t" "psrlq $8, %%mm2 \n\t" "movq %%mm0, %%mm3 \n\t" "movq %%mm1, %%mm4 \n\t" "movq %%mm2, %%mm5 \n\t" "punpcklwd %5, %%mm0 \n\t" "punpcklwd %5, %%mm1 \n\t" "punpcklwd %5, %%mm2 \n\t" "punpckhwd %5, %%mm3 \n\t" "punpckhwd %5, %%mm4 \n\t" "punpckhwd %5, %%mm5 \n\t" "psllq $8, %%mm1 \n\t" "psllq $16, %%mm2 \n\t" "por %%mm1, %%mm0 \n\t" "por %%mm2, %%mm0 \n\t" "psllq $8, %%mm4 \n\t" "psllq $16, %%mm5 \n\t" "por %%mm4, %%mm3 \n\t" "por %%mm5, %%mm3 \n\t" :"=m"(*d) :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null) :"memory"); /* borrowed 32 to 24 */ __asm__ volatile( "movq %%mm0, %%mm4 \n\t" "movq %%mm3, %%mm5 \n\t" "movq %%mm6, %%mm0 \n\t" "movq %%mm7, %%mm1 \n\t" "movq %%mm4, %%mm6 \n\t" "movq %%mm5, %%mm7 \n\t" "movq %%mm0, %%mm2 \n\t" "movq %%mm1, %%mm3 \n\t" STORE_BGR24_MMX :"=m"(*d) :"m"(*s) :"memory"); d += 24; s += 8; } __asm__ volatile(SFENCE:::"memory"); __asm__ volatile(EMMS:::"memory"); while (s < end) { register uint16_t bgr; bgr = *s++; *d++ = (bgr&0x1F)<<3; *d++ = (bgr&0x7E0)>>3; *d++ = (bgr&0xF800)>>8; } } | 26,832 |
1 | static void thread_pool_cancel(BlockDriverAIOCB *acb) { ThreadPoolElement *elem = (ThreadPoolElement *)acb; ThreadPool *pool = elem->pool; trace_thread_pool_cancel(elem, elem->common.opaque); qemu_mutex_lock(&pool->lock); if (elem->state == THREAD_QUEUED && /* No thread has yet started working on elem. we can try to "steal" * the item from the worker if we can get a signal from the * semaphore. Because this is non-blocking, we can do it with * the lock taken and ensure that elem will remain THREAD_QUEUED. */ qemu_sem_timedwait(&pool->sem, 0) == 0) { QTAILQ_REMOVE(&pool->request_list, elem, reqs); elem->state = THREAD_CANCELED; event_notifier_set(&pool->notifier); } else { pool->pending_cancellations++; while (elem->state != THREAD_CANCELED && elem->state != THREAD_DONE) { qemu_cond_wait(&pool->check_cancel, &pool->lock); } pool->pending_cancellations--; } qemu_mutex_unlock(&pool->lock); } | 26,833 |
1 | static void vhost_dev_unassign_memory(struct vhost_dev *dev, uint64_t start_addr, uint64_t size) { int from, to, n = dev->mem->nregions; /* Track overlapping/split regions for sanity checking. */ int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0; for (from = 0, to = 0; from < n; ++from, ++to) { struct vhost_memory_region *reg = dev->mem->regions + to; uint64_t reglast; uint64_t memlast; uint64_t change; /* clone old region */ if (to != from) { memcpy(reg, dev->mem->regions + from, sizeof *reg); } /* No overlap is simple */ if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size, start_addr, size)) { continue; } /* Split only happens if supplied region * is in the middle of an existing one. Thus it can not * overlap with any other existing region. */ assert(!split); reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); memlast = range_get_last(start_addr, size); /* Remove whole region */ if (start_addr <= reg->guest_phys_addr && memlast >= reglast) { --dev->mem->nregions; --to; assert(to >= 0); ++overlap_middle; continue; } /* Shrink region */ if (memlast >= reglast) { reg->memory_size = start_addr - reg->guest_phys_addr; assert(reg->memory_size); assert(!overlap_end); ++overlap_end; continue; } /* Shift region */ if (start_addr <= reg->guest_phys_addr) { change = memlast + 1 - reg->guest_phys_addr; reg->memory_size -= change; reg->guest_phys_addr += change; reg->userspace_addr += change; assert(reg->memory_size); assert(!overlap_start); ++overlap_start; continue; } /* This only happens if supplied region * is in the middle of an existing one. Thus it can not * overlap with any other existing region. */ assert(!overlap_start); assert(!overlap_end); assert(!overlap_middle); /* Split region: shrink first part, shift second part. */ memcpy(dev->mem->regions + n, reg, sizeof *reg); reg->memory_size = start_addr - reg->guest_phys_addr; assert(reg->memory_size); change = memlast + 1 - reg->guest_phys_addr; reg = dev->mem->regions + n; reg->memory_size -= change; assert(reg->memory_size); reg->guest_phys_addr += change; reg->userspace_addr += change; /* Never add more than 1 region */ assert(dev->mem->nregions == n); ++dev->mem->nregions; ++split; } } | 26,834 |
1 | static int pfpu_decode_insn(MilkymistPFPUState *s) { uint32_t pc = s->regs[R_PC]; uint32_t insn = s->microcode[pc]; uint32_t reg_a = (insn >> 18) & 0x7f; uint32_t reg_b = (insn >> 11) & 0x7f; uint32_t op = (insn >> 7) & 0xf; uint32_t reg_d = insn & 0x7f; uint32_t r; int latency = 0; switch (op) { case OP_NOP: break; case OP_FADD: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]); float t = a + b; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_FADD; D_EXEC(qemu_log("ADD a=%f b=%f t=%f, r=%08x\n", a, b, t, r)); } break; case OP_FSUB: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]); float t = a - b; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_FSUB; D_EXEC(qemu_log("SUB a=%f b=%f t=%f, r=%08x\n", a, b, t, r)); } break; case OP_FMUL: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]); float t = a * b; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_FMUL; D_EXEC(qemu_log("MUL a=%f b=%f t=%f, r=%08x\n", a, b, t, r)); } break; case OP_FABS: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float t = fabsf(a); r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_FABS; D_EXEC(qemu_log("ABS a=%f t=%f, r=%08x\n", a, t, r)); } break; case OP_F2I: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); int32_t t = a; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_F2I; D_EXEC(qemu_log("F2I a=%f t=%d, r=%08x\n", a, t, r)); } break; case OP_I2F: { int32_t a = REINTERPRET_CAST(int32_t, s->gp_regs[reg_a]); float t = a; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_I2F; D_EXEC(qemu_log("I2F a=%08x t=%f, r=%08x\n", a, t, r)); } break; case OP_VECTOUT: { uint32_t a = cpu_to_be32(s->gp_regs[reg_a]); uint32_t b = cpu_to_be32(s->gp_regs[reg_b]); target_phys_addr_t dma_ptr = get_dma_address(s->regs[R_MESHBASE], s->gp_regs[GPR_X], s->gp_regs[GPR_Y]); cpu_physical_memory_write(dma_ptr, (uint8_t *)&a, 4); cpu_physical_memory_write(dma_ptr + 4, (uint8_t *)&b, 4); s->regs[R_LASTDMA] = dma_ptr + 4; D_EXEC(qemu_log("VECTOUT a=%08x b=%08x dma=%08x\n", a, b, dma_ptr)); trace_milkymist_pfpu_vectout(a, b, dma_ptr); } break; case OP_SIN: { int32_t a = REINTERPRET_CAST(int32_t, s->gp_regs[reg_a]); float t = sinf(a * (1.0f / (M_PI * 4096.0f))); r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_SIN; D_EXEC(qemu_log("SIN a=%d t=%f, r=%08x\n", a, t, r)); } break; case OP_COS: { int32_t a = REINTERPRET_CAST(int32_t, s->gp_regs[reg_a]); float t = cosf(a * (1.0f / (M_PI * 4096.0f))); r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_COS; D_EXEC(qemu_log("COS a=%d t=%f, r=%08x\n", a, t, r)); } break; case OP_ABOVE: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]); float t = (a > b) ? 1.0f : 0.0f; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_ABOVE; D_EXEC(qemu_log("ABOVE a=%f b=%f t=%f, r=%08x\n", a, b, t, r)); } break; case OP_EQUAL: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]); float t = (a == b) ? 1.0f : 0.0f; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_EQUAL; D_EXEC(qemu_log("EQUAL a=%f b=%f t=%f, r=%08x\n", a, b, t, r)); } break; case OP_COPY: { r = s->gp_regs[reg_a]; latency = LATENCY_COPY; D_EXEC(qemu_log("COPY")); } break; case OP_IF: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]); uint32_t f = s->gp_regs[GPR_FLAGS]; float t = (f != 0) ? a : b; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_IF; D_EXEC(qemu_log("IF f=%u a=%f b=%f t=%f, r=%08x\n", f, a, b, t, r)); } break; case OP_TSIGN: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]); float t = (b < 0) ? -a : a; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_TSIGN; D_EXEC(qemu_log("TSIGN a=%f b=%f t=%f, r=%08x\n", a, b, t, r)); } break; case OP_QUAKE: { uint32_t a = s->gp_regs[reg_a]; r = 0x5f3759df - (a >> 1); latency = LATENCY_QUAKE; D_EXEC(qemu_log("QUAKE a=%d r=%08x\n", a, r)); } break; default: error_report("milkymist_pfpu: unknown opcode %d\n", op); break; } if (!reg_d) { D_EXEC(qemu_log("%04d %8s R%03d, R%03d <L=%d, E=%04d>\n", s->regs[R_PC], opcode_to_str[op], reg_a, reg_b, latency, s->regs[R_PC] + latency)); } else { D_EXEC(qemu_log("%04d %8s R%03d, R%03d <L=%d, E=%04d> -> R%03d\n", s->regs[R_PC], opcode_to_str[op], reg_a, reg_b, latency, s->regs[R_PC] + latency, reg_d)); } if (op == OP_VECTOUT) { return 0; } /* store output for this cycle */ if (reg_d) { uint32_t val = output_queue_remove(s); D_EXEC(qemu_log("R%03d <- 0x%08x\n", reg_d, val)); s->gp_regs[reg_d] = val; } output_queue_advance(s); /* store op output */ if (op != OP_NOP) { output_queue_insert(s, r, latency-1); } /* advance PC */ s->regs[R_PC]++; return 1; }; | 26,835 |
1 | double av_int2dbl(int64_t v){ if(v+v > 0xFFEULL<<52) return NAN; return ldexp(((v&((1LL<<52)-1)) + (1LL<<52)) * (v>>63|1), (v>>52&0x7FF)-1075); } | 26,837 |
1 | static inline int mxf_read_utf16_string(AVIOContext *pb, int size, char** str, int be) { int ret; size_t buf_size; if (size < 0) return AVERROR(EINVAL); buf_size = size + size / 2 + 1; *str = av_malloc(buf_size); if (!*str) return AVERROR(ENOMEM); if (be) ret = avio_get_str16be(pb, size, *str, buf_size); else ret = avio_get_str16le(pb, size, *str, buf_size); if (ret < 0) { av_freep(str); return ret; } return ret; } | 26,838 |
1 | static void synth_block_fcb_acb(WMAVoiceContext *s, GetBitContext *gb, int block_idx, int size, int block_pitch_sh2, const struct frame_type_desc *frame_desc, float *excitation) { static const float gain_coeff[6] = { 0.8169, -0.06545, 0.1726, 0.0185, -0.0359, 0.0458 }; float pulses[MAX_FRAMESIZE / 2], pred_err, acb_gain, fcb_gain; int n, idx, gain_weight; AMRFixed fcb; assert(size <= MAX_FRAMESIZE / 2); memset(pulses, 0, sizeof(*pulses) * size); fcb.pitch_lag = block_pitch_sh2 >> 2; fcb.pitch_fac = 1.0; fcb.no_repeat_mask = 0; fcb.n = 0; /* For the other frame types, this is where we apply the innovation * (fixed) codebook pulses of the speech signal. */ if (frame_desc->fcb_type == FCB_TYPE_AW_PULSES) { aw_pulse_set1(s, gb, block_idx, &fcb); aw_pulse_set2(s, gb, block_idx, &fcb); } else /* FCB_TYPE_EXC_PULSES */ { int offset_nbits = 5 - frame_desc->log_n_blocks; fcb.no_repeat_mask = -1; /* similar to ff_decode_10_pulses_35bits(), but with single pulses * (instead of double) for a subset of pulses */ for (n = 0; n < 5; n++) { float sign; int pos1, pos2; sign = get_bits1(gb) ? 1.0 : -1.0; pos1 = get_bits(gb, offset_nbits); fcb.x[fcb.n] = n + 5 * pos1; fcb.y[fcb.n++] = sign; if (n < frame_desc->dbl_pulses) { pos2 = get_bits(gb, offset_nbits); fcb.x[fcb.n] = n + 5 * pos2; fcb.y[fcb.n++] = (pos1 < pos2) ? -sign : sign; } } } ff_set_fixed_vector(pulses, &fcb, 1.0, size); /* Calculate gain for adaptive & fixed codebook signal. * see ff_amr_set_fixed_gain(). */ idx = get_bits(gb, 7); fcb_gain = expf(avpriv_scalarproduct_float_c(s->gain_pred_err, gain_coeff, 6) - 5.2409161640 + wmavoice_gain_codebook_fcb[idx]); acb_gain = wmavoice_gain_codebook_acb[idx]; pred_err = av_clipf(wmavoice_gain_codebook_fcb[idx], -2.9957322736 /* log(0.05) */, 1.6094379124 /* log(5.0) */); gain_weight = 8 >> frame_desc->log_n_blocks; memmove(&s->gain_pred_err[gain_weight], s->gain_pred_err, sizeof(*s->gain_pred_err) * (6 - gain_weight)); for (n = 0; n < gain_weight; n++) s->gain_pred_err[n] = pred_err; /* Calculation of adaptive codebook */ if (frame_desc->acb_type == ACB_TYPE_ASYMMETRIC) { int len; for (n = 0; n < size; n += len) { int next_idx_sh16; int abs_idx = block_idx * size + n; int pitch_sh16 = (s->last_pitch_val << 16) + s->pitch_diff_sh16 * abs_idx; int pitch = (pitch_sh16 + 0x6FFF) >> 16; int idx_sh16 = ((pitch << 16) - pitch_sh16) * 8 + 0x58000; idx = idx_sh16 >> 16; if (s->pitch_diff_sh16) { if (s->pitch_diff_sh16 > 0) { next_idx_sh16 = (idx_sh16) &~ 0xFFFF; } else next_idx_sh16 = (idx_sh16 + 0x10000) &~ 0xFFFF; len = av_clip((idx_sh16 - next_idx_sh16) / s->pitch_diff_sh16 / 8, 1, size - n); } else len = size; ff_acelp_interpolatef(&excitation[n], &excitation[n - pitch], wmavoice_ipol1_coeffs, 17, idx, 9, len); } } else /* ACB_TYPE_HAMMING */ { int block_pitch = block_pitch_sh2 >> 2; idx = block_pitch_sh2 & 3; if (idx) { ff_acelp_interpolatef(excitation, &excitation[-block_pitch], wmavoice_ipol2_coeffs, 4, idx, 8, size); } else av_memcpy_backptr((uint8_t *) excitation, sizeof(float) * block_pitch, sizeof(float) * size); } /* Interpolate ACB/FCB and use as excitation signal */ ff_weighted_vector_sumf(excitation, excitation, pulses, acb_gain, fcb_gain, size); } | 26,839 |
1 | type_init(serial_register_types) static bool serial_isa_init(ISABus *bus, int index, CharDriverState *chr) { DeviceState *dev; ISADevice *isadev; isadev = isa_try_create(bus, TYPE_ISA_SERIAL); if (!isadev) { return false; } dev = DEVICE(isadev); qdev_prop_set_uint32(dev, "index", index); qdev_prop_set_chr(dev, "chardev", chr); if (qdev_init(dev) < 0) { return false; } return true; } | 26,840 |
0 | void url_split(char *proto, int proto_size, char *hostname, int hostname_size, int *port_ptr, char *path, int path_size, const char *url) { const char *p; char *q; int port; port = -1; p = url; q = proto; while (*p != ':' && *p != '\0') { if ((q - proto) < proto_size - 1) *q++ = *p; p++; } if (proto_size > 0) *q = '\0'; if (*p == '\0') { if (proto_size > 0) proto[0] = '\0'; if (hostname_size > 0) hostname[0] = '\0'; p = url; } else { p++; if (*p == '/') p++; if (*p == '/') p++; q = hostname; while (*p != ':' && *p != '/' && *p != '?' && *p != '\0') { if ((q - hostname) < hostname_size - 1) *q++ = *p; p++; } if (hostname_size > 0) *q = '\0'; if (*p == ':') { p++; port = strtoul(p, (char **)&p, 10); } } if (port_ptr) *port_ptr = port; pstrcpy(path, path_size, p); } | 26,842 |
0 | static unsigned long iv_decode_frame(Indeo3DecodeContext *s, const uint8_t *buf, int buf_size) { unsigned int image_width, image_height, chroma_width, chroma_height; unsigned long flags, cb_offset, data_size, y_offset, v_offset, u_offset, mc_vector_count; const uint8_t *hdr_pos, *buf_pos; buf_pos = buf; buf_pos += 18; /* skip OS header (16 bytes) and version number */ flags = bytestream_get_le16(&buf_pos); data_size = bytestream_get_le32(&buf_pos); cb_offset = *buf_pos++; buf_pos += 3; /* skip reserved byte and checksum */ image_height = bytestream_get_le16(&buf_pos); image_width = bytestream_get_le16(&buf_pos); if(avcodec_check_dimensions(NULL, image_width, image_height)) return -1; chroma_height = ((image_height >> 2) + 3) & 0x7ffc; chroma_width = ((image_width >> 2) + 3) & 0x7ffc; y_offset = bytestream_get_le32(&buf_pos); v_offset = bytestream_get_le32(&buf_pos); u_offset = bytestream_get_le32(&buf_pos); buf_pos += 4; /* reserved */ hdr_pos = buf_pos; if(data_size == 0x80) return 4; if(flags & 0x200) { s->cur_frame = s->iv_frame + 1; s->ref_frame = s->iv_frame; } else { s->cur_frame = s->iv_frame; s->ref_frame = s->iv_frame + 1; } buf_pos = buf + 16 + y_offset; mc_vector_count = bytestream_get_le32(&buf_pos); iv_Decode_Chunk(s, s->cur_frame->Ybuf, s->ref_frame->Ybuf, image_width, image_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos, FFMIN(image_width, 160)); if (!(s->avctx->flags & CODEC_FLAG_GRAY)) { buf_pos = buf + 16 + v_offset; mc_vector_count = bytestream_get_le32(&buf_pos); iv_Decode_Chunk(s, s->cur_frame->Vbuf, s->ref_frame->Vbuf, chroma_width, chroma_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos, FFMIN(chroma_width, 40)); buf_pos = buf + 16 + u_offset; mc_vector_count = bytestream_get_le32(&buf_pos); iv_Decode_Chunk(s, s->cur_frame->Ubuf, s->ref_frame->Ubuf, chroma_width, chroma_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos, FFMIN(chroma_width, 40)); } return 8; } | 26,843 |
0 | static void rtsp_send_cmd_async (AVFormatContext *s, const char *cmd, RTSPMessageHeader *reply, unsigned char **content_ptr) { RTSPState *rt = s->priv_data; char buf[4096], buf1[1024]; rt->seq++; av_strlcpy(buf, cmd, sizeof(buf)); snprintf(buf1, sizeof(buf1), "CSeq: %d\r\n", rt->seq); av_strlcat(buf, buf1, sizeof(buf)); if (rt->session_id[0] != '\0' && !strstr(cmd, "\nIf-Match:")) { snprintf(buf1, sizeof(buf1), "Session: %s\r\n", rt->session_id); av_strlcat(buf, buf1, sizeof(buf)); } if (rt->auth_b64) av_strlcatf(buf, sizeof(buf), "Authorization: Basic %s\r\n", rt->auth_b64); av_strlcat(buf, "\r\n", sizeof(buf)); dprintf(s, "Sending:\n%s--\n", buf); url_write(rt->rtsp_hd, buf, strlen(buf)); rt->last_cmd_time = av_gettime(); } | 26,844 |
1 | static void apply_channel_coupling(AC3EncodeContext *s) { LOCAL_ALIGNED_16(CoefType, cpl_coords, [AC3_MAX_BLOCKS], [AC3_MAX_CHANNELS][16]); #if CONFIG_AC3ENC_FLOAT LOCAL_ALIGNED_16(int32_t, fixed_cpl_coords, [AC3_MAX_BLOCKS], [AC3_MAX_CHANNELS][16]); #else int32_t (*fixed_cpl_coords)[AC3_MAX_CHANNELS][16] = cpl_coords; #endif int blk, ch, bnd, i, j; CoefSumType energy[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][16] = {{{0}}}; int cpl_start, num_cpl_coefs; memset(cpl_coords, 0, AC3_MAX_BLOCKS * sizeof(*cpl_coords)); #if CONFIG_AC3ENC_FLOAT memset(fixed_cpl_coords, 0, AC3_MAX_BLOCKS * sizeof(*cpl_coords)); #endif /* align start to 16-byte boundary. align length to multiple of 32. note: coupling start bin % 4 will always be 1 */ cpl_start = s->start_freq[CPL_CH] - 1; num_cpl_coefs = FFALIGN(s->num_cpl_subbands * 12 + 1, 32); cpl_start = FFMIN(256, cpl_start + num_cpl_coefs) - num_cpl_coefs; /* calculate coupling channel from fbw channels */ for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; CoefType *cpl_coef = &block->mdct_coef[CPL_CH][cpl_start]; if (!block->cpl_in_use) continue; memset(cpl_coef, 0, num_cpl_coefs * sizeof(*cpl_coef)); for (ch = 1; ch <= s->fbw_channels; ch++) { CoefType *ch_coef = &block->mdct_coef[ch][cpl_start]; if (!block->channel_in_cpl[ch]) continue; for (i = 0; i < num_cpl_coefs; i++) cpl_coef[i] += ch_coef[i]; } /* coefficients must be clipped in order to be encoded */ clip_coefficients(&s->dsp, cpl_coef, num_cpl_coefs); } /* calculate energy in each band in coupling channel and each fbw channel */ /* TODO: possibly use SIMD to speed up energy calculation */ bnd = 0; i = s->start_freq[CPL_CH]; while (i < s->cpl_end_freq) { int band_size = s->cpl_band_sizes[bnd]; for (ch = CPL_CH; ch <= s->fbw_channels; ch++) { for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; if (!block->cpl_in_use || (ch > CPL_CH && !block->channel_in_cpl[ch])) continue; for (j = 0; j < band_size; j++) { CoefType v = block->mdct_coef[ch][i+j]; MAC_COEF(energy[blk][ch][bnd], v, v); } } } i += band_size; bnd++; } /* calculate coupling coordinates for all blocks for all channels */ for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; if (!block->cpl_in_use) continue; for (ch = 1; ch <= s->fbw_channels; ch++) { if (!block->channel_in_cpl[ch]) continue; for (bnd = 0; bnd < s->num_cpl_bands; bnd++) { cpl_coords[blk][ch][bnd] = calc_cpl_coord(energy[blk][ch][bnd], energy[blk][CPL_CH][bnd]); } } } /* determine which blocks to send new coupling coordinates for */ for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; AC3Block *block0 = blk ? &s->blocks[blk-1] : NULL; memset(block->new_cpl_coords, 0, sizeof(block->new_cpl_coords)); if (block->cpl_in_use) { /* send new coordinates if this is the first block, if previous * block did not use coupling but this block does, the channels * using coupling has changed from the previous block, or the * coordinate difference from the last block for any channel is * greater than a threshold value. */ if (blk == 0 || !block0->cpl_in_use) { for (ch = 1; ch <= s->fbw_channels; ch++) block->new_cpl_coords[ch] = 1; } else { for (ch = 1; ch <= s->fbw_channels; ch++) { if (!block->channel_in_cpl[ch]) continue; if (!block0->channel_in_cpl[ch]) { block->new_cpl_coords[ch] = 1; } else { CoefSumType coord_diff = 0; for (bnd = 0; bnd < s->num_cpl_bands; bnd++) { coord_diff += FFABS(cpl_coords[blk-1][ch][bnd] - cpl_coords[blk ][ch][bnd]); } coord_diff /= s->num_cpl_bands; if (coord_diff > NEW_CPL_COORD_THRESHOLD) block->new_cpl_coords[ch] = 1; } } } } } /* calculate final coupling coordinates, taking into account reusing of coordinates in successive blocks */ for (bnd = 0; bnd < s->num_cpl_bands; bnd++) { blk = 0; while (blk < s->num_blocks) { int av_uninit(blk1); AC3Block *block = &s->blocks[blk]; if (!block->cpl_in_use) { blk++; continue; } for (ch = 1; ch <= s->fbw_channels; ch++) { CoefSumType energy_ch, energy_cpl; if (!block->channel_in_cpl[ch]) continue; energy_cpl = energy[blk][CPL_CH][bnd]; energy_ch = energy[blk][ch][bnd]; blk1 = blk+1; while (!s->blocks[blk1].new_cpl_coords[ch] && blk1 < s->num_blocks) { if (s->blocks[blk1].cpl_in_use) { energy_cpl += energy[blk1][CPL_CH][bnd]; energy_ch += energy[blk1][ch][bnd]; } blk1++; } cpl_coords[blk][ch][bnd] = calc_cpl_coord(energy_ch, energy_cpl); } blk = blk1; } } /* calculate exponents/mantissas for coupling coordinates */ for (blk = 0; blk < s->num_blocks; blk++) { AC3Block *block = &s->blocks[blk]; if (!block->cpl_in_use) continue; #if CONFIG_AC3ENC_FLOAT s->ac3dsp.float_to_fixed24(fixed_cpl_coords[blk][1], cpl_coords[blk][1], s->fbw_channels * 16); #endif s->ac3dsp.extract_exponents(block->cpl_coord_exp[1], fixed_cpl_coords[blk][1], s->fbw_channels * 16); for (ch = 1; ch <= s->fbw_channels; ch++) { int bnd, min_exp, max_exp, master_exp; if (!block->new_cpl_coords[ch]) continue; /* determine master exponent */ min_exp = max_exp = block->cpl_coord_exp[ch][0]; for (bnd = 1; bnd < s->num_cpl_bands; bnd++) { int exp = block->cpl_coord_exp[ch][bnd]; min_exp = FFMIN(exp, min_exp); max_exp = FFMAX(exp, max_exp); } master_exp = ((max_exp - 15) + 2) / 3; master_exp = FFMAX(master_exp, 0); while (min_exp < master_exp * 3) master_exp--; for (bnd = 0; bnd < s->num_cpl_bands; bnd++) { block->cpl_coord_exp[ch][bnd] = av_clip(block->cpl_coord_exp[ch][bnd] - master_exp * 3, 0, 15); } block->cpl_master_exp[ch] = master_exp; /* quantize mantissas */ for (bnd = 0; bnd < s->num_cpl_bands; bnd++) { int cpl_exp = block->cpl_coord_exp[ch][bnd]; int cpl_mant = (fixed_cpl_coords[blk][ch][bnd] << (5 + cpl_exp + master_exp * 3)) >> 24; if (cpl_exp == 15) cpl_mant >>= 1; else cpl_mant -= 16; block->cpl_coord_mant[ch][bnd] = cpl_mant; } } } if (CONFIG_EAC3_ENCODER && s->eac3) ff_eac3_set_cpl_states(s); } | 26,845 |
1 | static int get_high_utility_cell(elbg_data *elbg) { int i=0; /* Using linear search, do binary if it ever turns to be speed critical */ int r = av_lfg_get(elbg->rand_state)%elbg->utility_inc[elbg->numCB-1] + 1; while (elbg->utility_inc[i] < r) i++; av_assert2(elbg->cells[i]); return i; } | 26,846 |
1 | static uint32_t acpi_find_vgia(void) { uint32_t rsdp_offset; uint32_t guid_offset = 0; AcpiRsdpDescriptor rsdp_table; uint32_t rsdt; AcpiRsdtDescriptorRev1 rsdt_table; int tables_nr; uint32_t *tables; AcpiTableHeader ssdt_table; VgidTable vgid_table; int i; /* Tables may take a short time to be set up by the guest */ for (i = 0; i < RSDP_TRIES_MAX; i++) { rsdp_offset = acpi_find_rsdp_address(); if (rsdp_offset < RSDP_ADDR_INVALID) { break; } g_usleep(RSDP_SLEEP_US); } g_assert_cmphex(rsdp_offset, <, RSDP_ADDR_INVALID); acpi_parse_rsdp_table(rsdp_offset, &rsdp_table); rsdt = rsdp_table.rsdt_physical_address; /* read the header */ ACPI_READ_TABLE_HEADER(&rsdt_table, rsdt); ACPI_ASSERT_CMP(rsdt_table.signature, "RSDT"); /* compute the table entries in rsdt */ tables_nr = (rsdt_table.length - sizeof(AcpiRsdtDescriptorRev1)) / sizeof(uint32_t); g_assert_cmpint(tables_nr, >, 0); /* get the addresses of the tables pointed by rsdt */ tables = g_new0(uint32_t, tables_nr); ACPI_READ_ARRAY_PTR(tables, tables_nr, rsdt); for (i = 0; i < tables_nr; i++) { ACPI_READ_TABLE_HEADER(&ssdt_table, tables[i]); if (!strncmp((char *)ssdt_table.oem_table_id, "VMGENID", 7)) { /* the first entry in the table should be VGIA * That's all we need */ ACPI_READ_FIELD(vgid_table.name_op, tables[i]); g_assert(vgid_table.name_op == 0x08); /* name */ ACPI_READ_ARRAY(vgid_table.vgia, tables[i]); g_assert(memcmp(vgid_table.vgia, "VGIA", 4) == 0); ACPI_READ_FIELD(vgid_table.val_op, tables[i]); g_assert(vgid_table.val_op == 0x0C); /* dword */ ACPI_READ_FIELD(vgid_table.vgia_val, tables[i]); /* The GUID is written at a fixed offset into the fw_cfg file * in order to implement the "OVMF SDT Header probe suppressor" * see docs/specs/vmgenid.txt for more details */ guid_offset = vgid_table.vgia_val + VMGENID_GUID_OFFSET; break; } } g_free(tables); return guid_offset; } | 26,847 |
1 | static void ff_jref_idct1_put(uint8_t *dest, int line_size, DCTELEM *block) { uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; dest[0] = cm[(block[0] + 4)>>3]; } | 26,849 |
1 | int cpu_load(QEMUFile *f, void *opaque, int version_id) { CPUState *env = opaque; int i; uint32_t tmp; if (version_id != 5) return -EINVAL; for(i = 0; i < 8; i++) qemu_get_betls(f, &env->gregs[i]); qemu_get_be32s(f, &env->nwindows); for(i = 0; i < env->nwindows * 16; i++) qemu_get_betls(f, &env->regbase[i]); /* FPU */ for(i = 0; i < TARGET_FPREGS; i++) { union { float32 f; uint32_t i; } u; u.i = qemu_get_be32(f); env->fpr[i] = u.f; } qemu_get_betls(f, &env->pc); qemu_get_betls(f, &env->npc); qemu_get_betls(f, &env->y); tmp = qemu_get_be32(f); env->cwp = 0; /* needed to ensure that the wrapping registers are correctly updated */ PUT_PSR(env, tmp); qemu_get_betls(f, &env->fsr); qemu_get_betls(f, &env->tbr); tmp = qemu_get_be32(f); env->interrupt_index = tmp; qemu_get_be32s(f, &env->pil_in); #ifndef TARGET_SPARC64 qemu_get_be32s(f, &env->wim); /* MMU */ for (i = 0; i < 32; i++) qemu_get_be32s(f, &env->mmuregs[i]); #else qemu_get_be64s(f, &env->lsu); for (i = 0; i < 16; i++) { qemu_get_be64s(f, &env->immuregs[i]); qemu_get_be64s(f, &env->dmmuregs[i]); } for (i = 0; i < 64; i++) { qemu_get_be64s(f, &env->itlb[i].tag); qemu_get_be64s(f, &env->itlb[i].tte); qemu_get_be64s(f, &env->dtlb[i].tag); qemu_get_be64s(f, &env->dtlb[i].tte); } qemu_get_be32s(f, &env->mmu_version); for (i = 0; i < MAXTL_MAX; i++) { qemu_get_be64s(f, &env->ts[i].tpc); qemu_get_be64s(f, &env->ts[i].tnpc); qemu_get_be64s(f, &env->ts[i].tstate); qemu_get_be32s(f, &env->ts[i].tt); } qemu_get_be32s(f, &env->xcc); qemu_get_be32s(f, &env->asi); qemu_get_be32s(f, &env->pstate); qemu_get_be32s(f, &env->tl); env->tsptr = &env->ts[env->tl & MAXTL_MASK]; qemu_get_be32s(f, &env->cansave); qemu_get_be32s(f, &env->canrestore); qemu_get_be32s(f, &env->otherwin); qemu_get_be32s(f, &env->wstate); qemu_get_be32s(f, &env->cleanwin); for (i = 0; i < 8; i++) qemu_get_be64s(f, &env->agregs[i]); for (i = 0; i < 8; i++) qemu_get_be64s(f, &env->bgregs[i]); for (i = 0; i < 8; i++) qemu_get_be64s(f, &env->igregs[i]); for (i = 0; i < 8; i++) qemu_get_be64s(f, &env->mgregs[i]); qemu_get_be64s(f, &env->fprs); qemu_get_be64s(f, &env->tick_cmpr); qemu_get_be64s(f, &env->stick_cmpr); qemu_get_ptimer(f, env->tick); qemu_get_ptimer(f, env->stick); qemu_get_be64s(f, &env->gsr); qemu_get_be32s(f, &env->gl); qemu_get_be64s(f, &env->hpstate); for (i = 0; i < MAXTL_MAX; i++) qemu_get_be64s(f, &env->htstate[i]); qemu_get_be64s(f, &env->hintp); qemu_get_be64s(f, &env->htba); qemu_get_be64s(f, &env->hver); qemu_get_be64s(f, &env->hstick_cmpr); qemu_get_be64s(f, &env->ssr); qemu_get_ptimer(f, env->hstick); #endif tlb_flush(env, 1); return 0; } | 26,850 |
0 | static av_cold int v410_encode_close(AVCodecContext *avctx) { av_freep(&avctx->coded_frame); return 0; } | 26,851 |
0 | static int vp3_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { Vp3DecodeContext *s = avctx->priv_data; GetBitContext gb; static int counter = 0; init_get_bits(&gb, buf, buf_size * 8); if (s->theora && get_bits1(&gb)) { int ptype = get_bits(&gb, 7); skip_bits(&gb, 6*8); /* "theora" */ switch(ptype) { case 1: theora_decode_comments(avctx, gb); break; case 2: theora_decode_tables(avctx, gb); init_dequantizer(s); break; default: av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype); } return buf_size; } s->keyframe = !get_bits1(&gb); if (!s->theora) skip_bits(&gb, 1); s->last_quality_index = s->quality_index; s->quality_index = get_bits(&gb, 6); if (s->theora >= 0x030200) skip_bits1(&gb); if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n", s->keyframe?"key":"", counter, s->quality_index); counter++; if (s->quality_index != s->last_quality_index) init_dequantizer(s); if (s->keyframe) { if (!s->theora) { skip_bits(&gb, 4); /* width code */ skip_bits(&gb, 4); /* height code */ if (s->version) { s->version = get_bits(&gb, 5); if (counter == 1) av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version); } } if (s->version || s->theora) { if (get_bits1(&gb)) av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n"); skip_bits(&gb, 2); /* reserved? */ } if (s->last_frame.data[0] == s->golden_frame.data[0]) { if (s->golden_frame.data[0]) avctx->release_buffer(avctx, &s->golden_frame); s->last_frame= s->golden_frame; /* ensure that we catch any access to this released frame */ } else { if (s->golden_frame.data[0]) avctx->release_buffer(avctx, &s->golden_frame); if (s->last_frame.data[0]) avctx->release_buffer(avctx, &s->last_frame); } s->golden_frame.reference = 3; if(avctx->get_buffer(avctx, &s->golden_frame) < 0) { av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n"); return -1; } /* golden frame is also the current frame */ memcpy(&s->current_frame, &s->golden_frame, sizeof(AVFrame)); /* time to figure out pixel addresses? */ if (!s->pixel_addresses_inited) { if (!s->flipped_image) vp3_calculate_pixel_addresses(s); else theora_calculate_pixel_addresses(s); } } else { /* allocate a new current frame */ s->current_frame.reference = 3; if(avctx->get_buffer(avctx, &s->current_frame) < 0) { av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n"); return -1; } } s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame s->current_frame.qstride= 0; init_frame(s, &gb); #if KEYFRAMES_ONLY if (!s->keyframe) { memcpy(s->current_frame.data[0], s->golden_frame.data[0], s->current_frame.linesize[0] * s->height); memcpy(s->current_frame.data[1], s->golden_frame.data[1], s->current_frame.linesize[1] * s->height / 2); memcpy(s->current_frame.data[2], s->golden_frame.data[2], s->current_frame.linesize[2] * s->height / 2); } else { #endif if (unpack_superblocks(s, &gb) || unpack_modes(s, &gb) || unpack_vectors(s, &gb) || unpack_dct_coeffs(s, &gb)) { av_log(s->avctx, AV_LOG_ERROR, " vp3: could not decode frame\n"); return -1; } reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height); render_fragments(s, 0, s->width, s->height, 0); // apply_loop_filter(s); if ((avctx->flags & CODEC_FLAG_GRAY) == 0) { reverse_dc_prediction(s, s->u_fragment_start, s->fragment_width / 2, s->fragment_height / 2); reverse_dc_prediction(s, s->v_fragment_start, s->fragment_width / 2, s->fragment_height / 2); render_fragments(s, s->u_fragment_start, s->width / 2, s->height / 2, 1); render_fragments(s, s->v_fragment_start, s->width / 2, s->height / 2, 2); } else { memset(s->current_frame.data[1], 0x80, s->width * s->height / 4); memset(s->current_frame.data[2], 0x80, s->width * s->height / 4); } #if KEYFRAMES_ONLY } #endif *data_size=sizeof(AVFrame); *(AVFrame*)data= s->current_frame; /* release the last frame, if it is allocated and if it is not the * golden frame */ if ((s->last_frame.data[0]) && (s->last_frame.data[0] != s->golden_frame.data[0])) avctx->release_buffer(avctx, &s->last_frame); /* shuffle frames (last = current) */ memcpy(&s->last_frame, &s->current_frame, sizeof(AVFrame)); s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */ return buf_size; } | 26,852 |
0 | static int au_read_header(AVFormatContext *s) { int size; unsigned int tag; AVIOContext *pb = s->pb; unsigned int id, channels, rate; enum AVCodecID codec; AVStream *st; /* check ".snd" header */ tag = avio_rl32(pb); if (tag != MKTAG('.', 's', 'n', 'd')) return -1; size = avio_rb32(pb); /* header size */ avio_rb32(pb); /* data size */ id = avio_rb32(pb); rate = avio_rb32(pb); channels = avio_rb32(pb); codec = ff_codec_get_id(codec_au_tags, id); if (!av_get_bits_per_sample(codec)) { av_log_ask_for_sample(s, "could not determine bits per sample\n"); return AVERROR_PATCHWELCOME; } if (channels == 0 || channels > 64) { av_log(s, AV_LOG_ERROR, "Invalid number of channels %d\n", channels); return AVERROR_INVALIDDATA; } if (size >= 24) { /* skip unused data */ avio_skip(pb, size - 24); } /* now we are ready: build format streams */ st = avformat_new_stream(s, NULL); if (!st) return -1; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = id; st->codec->codec_id = codec; st->codec->channels = channels; st->codec->sample_rate = rate; avpriv_set_pts_info(st, 64, 1, rate); return 0; } | 26,853 |
1 | static void mtree_print_mr(fprintf_function mon_printf, void *f, const MemoryRegion *mr, unsigned int level, target_phys_addr_t base, MemoryRegionListHead *alias_print_queue) { MemoryRegionList *new_ml, *ml, *next_ml; MemoryRegionListHead submr_print_queue; const MemoryRegion *submr; unsigned int i; if (!mr) { return; } for (i = 0; i < level; i++) { mon_printf(f, " "); } if (mr->alias) { MemoryRegionList *ml; bool found = false; /* check if the alias is already in the queue */ QTAILQ_FOREACH(ml, alias_print_queue, queue) { if (ml->mr == mr->alias && !ml->printed) { found = true; } } if (!found) { ml = g_new(MemoryRegionList, 1); ml->mr = mr->alias; ml->printed = false; QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue); } mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d): alias %s @%s " TARGET_FMT_plx "-" TARGET_FMT_plx "\n", base + mr->addr, base + mr->addr + (target_phys_addr_t)int128_get64(mr->size) - 1, mr->priority, mr->name, mr->alias->name, mr->alias_offset, mr->alias_offset + (target_phys_addr_t)int128_get64(mr->size) - 1); } else { mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d): %s\n", base + mr->addr, base + mr->addr + (target_phys_addr_t)int128_get64(mr->size) - 1, mr->priority, mr->name); } QTAILQ_INIT(&submr_print_queue); QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) { new_ml = g_new(MemoryRegionList, 1); new_ml->mr = submr; QTAILQ_FOREACH(ml, &submr_print_queue, queue) { if (new_ml->mr->addr < ml->mr->addr || (new_ml->mr->addr == ml->mr->addr && new_ml->mr->priority > ml->mr->priority)) { QTAILQ_INSERT_BEFORE(ml, new_ml, queue); new_ml = NULL; break; } } if (new_ml) { QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue); } } QTAILQ_FOREACH(ml, &submr_print_queue, queue) { mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr, alias_print_queue); } QTAILQ_FOREACH_SAFE(next_ml, &submr_print_queue, queue, ml) { g_free(ml); } } | 26,854 |
1 | int spapr_vio_check_tces(VIOsPAPRDevice *dev, target_ulong ioba, target_ulong len, enum VIOsPAPR_TCEAccess access) { int start, end, i; start = ioba >> SPAPR_VIO_TCE_PAGE_SHIFT; end = (ioba + len - 1) >> SPAPR_VIO_TCE_PAGE_SHIFT; for (i = start; i <= end; i++) { if ((dev->rtce_table[i].tce & access) != access) { #ifdef DEBUG_TCE fprintf(stderr, "FAIL on %d\n", i); #endif return -1; } } return 0; } | 26,855 |
1 | void qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **errp) { int ret = 0; FsMountList mounts; struct FsMount *mount; int fd; Error *local_err = NULL; struct fstrim_range r = { .start = 0, .len = -1, .minlen = has_minimum ? minimum : 0, }; slog("guest-fstrim called"); QTAILQ_INIT(&mounts); build_fs_mount_list(&mounts, &local_err); if (local_err) { error_propagate(errp, local_err); return; } QTAILQ_FOREACH(mount, &mounts, next) { fd = qemu_open(mount->dirname, O_RDONLY); if (fd == -1) { error_setg_errno(errp, errno, "failed to open %s", mount->dirname); goto error; } /* We try to cull filesytems we know won't work in advance, but other * filesytems may not implement fstrim for less obvious reasons. These * will report EOPNOTSUPP; we simply ignore these errors. Any other * error means an unexpected error, so return it in those cases. In * some other cases ENOTTY will be reported (e.g. CD-ROMs). */ ret = ioctl(fd, FITRIM, &r); if (ret == -1) { if (errno != ENOTTY && errno != EOPNOTSUPP) { error_setg_errno(errp, errno, "failed to trim %s", mount->dirname); close(fd); goto error; } } close(fd); } error: free_fs_mount_list(&mounts); } | 26,857 |
0 | static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_count) { int16_t weight[8][64]; DCTELEM orig[8][64]; const int mb_x= s->mb_x; const int mb_y= s->mb_y; int i; int skip_dct[8]; int dct_offset = s->linesize*8; //default for progressive frames uint8_t *ptr_y, *ptr_cb, *ptr_cr; int wrap_y, wrap_c; for(i=0; i<mb_block_count; i++) skip_dct[i]=s->skipdct; if(s->adaptive_quant){ const int last_qp= s->qscale; const int mb_xy= mb_x + mb_y*s->mb_stride; s->lambda= s->lambda_table[mb_xy]; update_qscale(s); if(!(s->flags&CODEC_FLAG_QP_RD)){ s->qscale= s->current_picture_ptr->qscale_table[mb_xy]; s->dquant= s->qscale - last_qp; if(s->out_format==FMT_H263){ s->dquant= av_clip(s->dquant, -2, 2); if(s->codec_id==CODEC_ID_MPEG4){ if(!s->mb_intra){ if(s->pict_type == FF_B_TYPE){ if(s->dquant&1 || s->mv_dir&MV_DIRECT) s->dquant= 0; } if(s->mv_type==MV_TYPE_8X8) s->dquant=0; } } } } ff_set_qscale(s, last_qp + s->dquant); }else if(s->flags&CODEC_FLAG_QP_RD) ff_set_qscale(s, s->qscale + s->dquant); wrap_y = s->linesize; wrap_c = s->uvlinesize; ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16; ptr_cb = s->new_picture.data[1] + (mb_y * mb_block_height * wrap_c) + mb_x * 8; ptr_cr = s->new_picture.data[2] + (mb_y * mb_block_height * wrap_c) + mb_x * 8; if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){ uint8_t *ebuf= s->edge_emu_buffer + 32; ff_emulated_edge_mc(ebuf , ptr_y , wrap_y,16,16,mb_x*16,mb_y*16, s->width , s->height); ptr_y= ebuf; ff_emulated_edge_mc(ebuf+18*wrap_y , ptr_cb, wrap_c, 8, mb_block_height, mb_x*8, mb_y*8, s->width>>1, s->height>>1); ptr_cb= ebuf+18*wrap_y; ff_emulated_edge_mc(ebuf+18*wrap_y+8, ptr_cr, wrap_c, 8, mb_block_height, mb_x*8, mb_y*8, s->width>>1, s->height>>1); ptr_cr= ebuf+18*wrap_y+8; } if (s->mb_intra) { if(s->flags&CODEC_FLAG_INTERLACED_DCT){ int progressive_score, interlaced_score; s->interlaced_dct=0; progressive_score= s->dsp.ildct_cmp[4](s, ptr_y , NULL, wrap_y, 8) +s->dsp.ildct_cmp[4](s, ptr_y + wrap_y*8, NULL, wrap_y, 8) - 400; if(progressive_score > 0){ interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y , NULL, wrap_y*2, 8) +s->dsp.ildct_cmp[4](s, ptr_y + wrap_y , NULL, wrap_y*2, 8); if(progressive_score > interlaced_score){ s->interlaced_dct=1; dct_offset= wrap_y; wrap_y<<=1; if (s->chroma_format == CHROMA_422) wrap_c<<=1; } } } s->dsp.get_pixels(s->block[0], ptr_y , wrap_y); s->dsp.get_pixels(s->block[1], ptr_y + 8, wrap_y); s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y); s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y); if(s->flags&CODEC_FLAG_GRAY){ skip_dct[4]= 1; skip_dct[5]= 1; }else{ s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c); s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c); if(!s->chroma_y_shift){ /* 422 */ s->dsp.get_pixels(s->block[6], ptr_cb + (dct_offset>>1), wrap_c); s->dsp.get_pixels(s->block[7], ptr_cr + (dct_offset>>1), wrap_c); } } }else{ op_pixels_func (*op_pix)[4]; qpel_mc_func (*op_qpix)[16]; uint8_t *dest_y, *dest_cb, *dest_cr; dest_y = s->dest[0]; dest_cb = s->dest[1]; dest_cr = s->dest[2]; if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){ op_pix = s->dsp.put_pixels_tab; op_qpix= s->dsp.put_qpel_pixels_tab; }else{ op_pix = s->dsp.put_no_rnd_pixels_tab; op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab; } if (s->mv_dir & MV_DIR_FORWARD) { MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix); op_pix = s->dsp.avg_pixels_tab; op_qpix= s->dsp.avg_qpel_pixels_tab; } if (s->mv_dir & MV_DIR_BACKWARD) { MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix); } if(s->flags&CODEC_FLAG_INTERLACED_DCT){ int progressive_score, interlaced_score; s->interlaced_dct=0; progressive_score= s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y, 8) +s->dsp.ildct_cmp[0](s, dest_y + wrap_y*8, ptr_y + wrap_y*8, wrap_y, 8) - 400; if(s->avctx->ildct_cmp == FF_CMP_VSSE) progressive_score -= 400; if(progressive_score>0){ interlaced_score = s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y*2, 8) +s->dsp.ildct_cmp[0](s, dest_y + wrap_y , ptr_y + wrap_y , wrap_y*2, 8); if(progressive_score > interlaced_score){ s->interlaced_dct=1; dct_offset= wrap_y; wrap_y<<=1; if (s->chroma_format == CHROMA_422) wrap_c<<=1; } } } s->dsp.diff_pixels(s->block[0], ptr_y , dest_y , wrap_y); s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y); s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset , dest_y + dct_offset , wrap_y); s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y); if(s->flags&CODEC_FLAG_GRAY){ skip_dct[4]= 1; skip_dct[5]= 1; }else{ s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c); s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c); if(!s->chroma_y_shift){ /* 422 */ s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset>>1), dest_cb + (dct_offset>>1), wrap_c); s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset>>1), dest_cr + (dct_offset>>1), wrap_c); } } /* pre quantization */ if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){ //FIXME optimize if(s->dsp.sad[1](NULL, ptr_y , dest_y , wrap_y, 8) < 20*s->qscale) skip_dct[0]= 1; if(s->dsp.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20*s->qscale) skip_dct[1]= 1; if(s->dsp.sad[1](NULL, ptr_y +dct_offset , dest_y +dct_offset , wrap_y, 8) < 20*s->qscale) skip_dct[2]= 1; if(s->dsp.sad[1](NULL, ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y, 8) < 20*s->qscale) skip_dct[3]= 1; if(s->dsp.sad[1](NULL, ptr_cb , dest_cb , wrap_c, 8) < 20*s->qscale) skip_dct[4]= 1; if(s->dsp.sad[1](NULL, ptr_cr , dest_cr , wrap_c, 8) < 20*s->qscale) skip_dct[5]= 1; if(!s->chroma_y_shift){ /* 422 */ if(s->dsp.sad[1](NULL, ptr_cb +(dct_offset>>1), dest_cb +(dct_offset>>1), wrap_c, 8) < 20*s->qscale) skip_dct[6]= 1; if(s->dsp.sad[1](NULL, ptr_cr +(dct_offset>>1), dest_cr +(dct_offset>>1), wrap_c, 8) < 20*s->qscale) skip_dct[7]= 1; } } } if(s->avctx->quantizer_noise_shaping){ if(!skip_dct[0]) get_visual_weight(weight[0], ptr_y , wrap_y); if(!skip_dct[1]) get_visual_weight(weight[1], ptr_y + 8, wrap_y); if(!skip_dct[2]) get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y); if(!skip_dct[3]) get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y); if(!skip_dct[4]) get_visual_weight(weight[4], ptr_cb , wrap_c); if(!skip_dct[5]) get_visual_weight(weight[5], ptr_cr , wrap_c); if(!s->chroma_y_shift){ /* 422 */ if(!skip_dct[6]) get_visual_weight(weight[6], ptr_cb + (dct_offset>>1), wrap_c); if(!skip_dct[7]) get_visual_weight(weight[7], ptr_cr + (dct_offset>>1), wrap_c); } memcpy(orig[0], s->block[0], sizeof(DCTELEM)*64*mb_block_count); } /* DCT & quantize */ assert(s->out_format!=FMT_MJPEG || s->qscale==8); { for(i=0;i<mb_block_count;i++) { if(!skip_dct[i]){ int overflow; s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow); // FIXME we could decide to change to quantizer instead of clipping // JS: I don't think that would be a good idea it could lower quality instead // of improve it. Just INTRADC clipping deserves changes in quantizer if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]); }else s->block_last_index[i]= -1; } if(s->avctx->quantizer_noise_shaping){ for(i=0;i<mb_block_count;i++) { if(!skip_dct[i]){ s->block_last_index[i] = dct_quantize_refine(s, s->block[i], weight[i], orig[i], i, s->qscale); } } } if(s->luma_elim_threshold && !s->mb_intra) for(i=0; i<4; i++) dct_single_coeff_elimination(s, i, s->luma_elim_threshold); if(s->chroma_elim_threshold && !s->mb_intra) for(i=4; i<mb_block_count; i++) dct_single_coeff_elimination(s, i, s->chroma_elim_threshold); if(s->flags & CODEC_FLAG_CBP_RD){ for(i=0;i<mb_block_count;i++) { if(s->block_last_index[i] == -1) s->coded_score[i]= INT_MAX/256; } } } if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){ s->block_last_index[4]= s->block_last_index[5]= 0; s->block[4][0]= s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale; } //non c quantize code returns incorrect block_last_index FIXME if(s->alternate_scan && s->dct_quantize != dct_quantize_c){ for(i=0; i<mb_block_count; i++){ int j; if(s->block_last_index[i]>0){ for(j=63; j>0; j--){ if(s->block[i][ s->intra_scantable.permutated[j] ]) break; } s->block_last_index[i]= j; } } } /* huffman encode */ switch(s->codec_id){ //FIXME funct ptr could be slightly faster case CODEC_ID_MPEG1VIDEO: case CODEC_ID_MPEG2VIDEO: if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) mpeg1_encode_mb(s, s->block, motion_x, motion_y); break; case CODEC_ID_MPEG4: if (CONFIG_MPEG4_ENCODER) mpeg4_encode_mb(s, s->block, motion_x, motion_y); break; case CODEC_ID_MSMPEG4V2: case CODEC_ID_MSMPEG4V3: case CODEC_ID_WMV1: if (CONFIG_MSMPEG4_ENCODER) msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break; case CODEC_ID_WMV2: if (CONFIG_WMV2_ENCODER) ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break; case CODEC_ID_H261: if (CONFIG_H261_ENCODER) ff_h261_encode_mb(s, s->block, motion_x, motion_y); break; case CODEC_ID_H263: case CODEC_ID_H263P: case CODEC_ID_FLV1: case CODEC_ID_RV10: case CODEC_ID_RV20: if (CONFIG_H263_ENCODER || CONFIG_H263P_ENCODER || CONFIG_FLV_ENCODER || CONFIG_RV10_ENCODER || CONFIG_RV20_ENCODER) h263_encode_mb(s, s->block, motion_x, motion_y); break; case CODEC_ID_MJPEG: if (CONFIG_MJPEG_ENCODER) ff_mjpeg_encode_mb(s, s->block); break; default: assert(0); } } | 26,858 |
1 | vpc_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { BDRVVPCState *s = bs->opaque; int64_t image_offset; int64_t n_bytes; int64_t bytes_done = 0; int ret; VHDFooter *footer = (VHDFooter *) s->footer_buf; QEMUIOVector local_qiov; if (be32_to_cpu(footer->type) == VHD_FIXED) { return bdrv_co_pwritev(bs->file, offset, bytes, qiov, 0); } qemu_co_mutex_lock(&s->lock); qemu_iovec_init(&local_qiov, qiov->niov); while (bytes > 0) { image_offset = get_image_offset(bs, offset, true); n_bytes = MIN(bytes, s->block_size - (offset % s->block_size)); if (image_offset == -1) { image_offset = alloc_block(bs, offset); if (image_offset < 0) { ret = image_offset; goto fail; } } qemu_iovec_reset(&local_qiov); qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes); ret = bdrv_co_pwritev(bs->file, image_offset, n_bytes, &local_qiov, 0); if (ret < 0) { goto fail; } bytes -= n_bytes; offset += n_bytes; bytes_done += n_bytes; } ret = 0; fail: qemu_iovec_destroy(&local_qiov); qemu_co_mutex_unlock(&s->lock); return ret; } | 26,859 |
1 | bool memory_region_present(MemoryRegion *container, hwaddr addr) { MemoryRegion *mr = memory_region_find(container, addr, 1).mr; if (!mr || (mr == container)) { return false; } memory_region_unref(mr); return true; } | 26,860 |
1 | static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOp opc = l->opc; TCGReg data_reg; uint8_t **label_ptr = &l->label_ptr[0]; /* resolve label address */ *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4); if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4); } if (TCG_TARGET_REG_BITS == 32) { int ofs = 0; tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); ofs += 4; tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); ofs += 4; if (TARGET_LONG_BITS == 64) { tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); ofs += 4; } tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index); ofs += 4; tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr); } else { tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); /* The second argument is already loaded with addrlo. */ tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], l->mem_index); tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3], (uintptr_t)l->raddr); } tcg_out_calli(s, (uintptr_t)qemu_ld_helpers[opc & ~MO_SIGN]); data_reg = l->datalo_reg; switch (opc & MO_SSIZE) { case MO_SB: tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW); break; case MO_SW: tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW); break; #if TCG_TARGET_REG_BITS == 64 case MO_SL: tcg_out_ext32s(s, data_reg, TCG_REG_EAX); break; #endif case MO_UB: case MO_UW: /* Note that the helpers have zero-extended to tcg_target_long. */ case MO_UL: tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); break; case MO_Q: if (TCG_TARGET_REG_BITS == 64) { tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX); } else if (data_reg == TCG_REG_EDX) { /* xchg %edx, %eax */ tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0); tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX); } else { tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX); } break; default: tcg_abort(); } /* Jump to the code corresponding to next IR of qemu_st */ tcg_out_jmp(s, (uintptr_t)l->raddr); } | 26,861 |
1 | static long do_sigreturn_v1(CPUARMState *env) { abi_ulong frame_addr; struct sigframe_v1 *frame; target_sigset_t set; sigset_t host_set; int i; /* * Since we stacked the signal on a 64-bit boundary, * then 'sp' should be word aligned here. If it's * not, then the user is trying to mess with us. */ if (env->regs[13] & 7) goto badframe; frame_addr = env->regs[13]; if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) goto badframe; if (__get_user(set.sig[0], &frame->sc.oldmask)) goto badframe; for(i = 1; i < TARGET_NSIG_WORDS; i++) { if (__get_user(set.sig[i], &frame->extramask[i - 1])) goto badframe; } target_to_host_sigset_internal(&host_set, &set); sigprocmask(SIG_SETMASK, &host_set, NULL); if (restore_sigcontext(env, &frame->sc)) goto badframe; #if 0 /* Send SIGTRAP if we're single-stepping */ if (ptrace_cancel_bpt(current)) send_sig(SIGTRAP, current, 1); #endif unlock_user_struct(frame, frame_addr, 0); return env->regs[0]; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV /* , current */); return 0; } | 26,863 |
1 | static void tcp_chr_connect(void *opaque) { CharDriverState *chr = opaque; TCPCharDriver *s = chr->opaque; QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(s->ioc); g_free(chr->filename); chr->filename = sockaddr_to_str(&sioc->localAddr, sioc->localAddrLen, &sioc->remoteAddr, sioc->remoteAddrLen, s->is_listen, s->is_telnet); s->connected = 1; if (s->ioc) { chr->fd_in_tag = io_add_watch_poll(s->ioc, tcp_chr_read_poll, tcp_chr_read, chr); } qemu_chr_be_generic_open(chr); } | 26,864 |
1 | static void gen_muldiv (DisasContext *ctx, uint32_t opc, int rs, int rt) { const char *opn = "mul/div"; TCGv t0, t1; unsigned int acc; switch (opc) { case OPC_DIV: case OPC_DIVU: #if defined(TARGET_MIPS64) case OPC_DDIV: case OPC_DDIVU: #endif t0 = tcg_temp_local_new(); t1 = tcg_temp_local_new(); break; default: t0 = tcg_temp_new(); t1 = tcg_temp_new(); break; } gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); switch (opc) { case OPC_DIV: { int l1 = gen_new_label(); int l2 = gen_new_label(); tcg_gen_ext32s_tl(t0, t0); tcg_gen_ext32s_tl(t1, t1); tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, INT_MIN, l2); tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1, l2); tcg_gen_mov_tl(cpu_LO[0], t0); tcg_gen_movi_tl(cpu_HI[0], 0); tcg_gen_br(l1); gen_set_label(l2); tcg_gen_div_tl(cpu_LO[0], t0, t1); tcg_gen_rem_tl(cpu_HI[0], t0, t1); tcg_gen_ext32s_tl(cpu_LO[0], cpu_LO[0]); tcg_gen_ext32s_tl(cpu_HI[0], cpu_HI[0]); gen_set_label(l1); } opn = "div"; break; case OPC_DIVU: { int l1 = gen_new_label(); tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1); tcg_gen_divu_tl(cpu_LO[0], t0, t1); tcg_gen_remu_tl(cpu_HI[0], t0, t1); tcg_gen_ext32s_tl(cpu_LO[0], cpu_LO[0]); tcg_gen_ext32s_tl(cpu_HI[0], cpu_HI[0]); gen_set_label(l1); } opn = "divu"; break; case OPC_MULT: { TCGv_i64 t2 = tcg_temp_new_i64(); TCGv_i64 t3 = tcg_temp_new_i64(); acc = ((ctx->opcode) >> 11) & 0x03; if (acc != 0) { check_dsp(ctx); } tcg_gen_ext_tl_i64(t2, t0); tcg_gen_ext_tl_i64(t3, t1); tcg_gen_mul_i64(t2, t2, t3); tcg_temp_free_i64(t3); tcg_gen_trunc_i64_tl(t0, t2); tcg_gen_shri_i64(t2, t2, 32); tcg_gen_trunc_i64_tl(t1, t2); tcg_temp_free_i64(t2); tcg_gen_ext32s_tl(cpu_LO[acc], t0); tcg_gen_ext32s_tl(cpu_HI[acc], t1); } opn = "mult"; break; case OPC_MULTU: { TCGv_i64 t2 = tcg_temp_new_i64(); TCGv_i64 t3 = tcg_temp_new_i64(); acc = ((ctx->opcode) >> 11) & 0x03; if (acc != 0) { check_dsp(ctx); } tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); tcg_gen_extu_tl_i64(t2, t0); tcg_gen_extu_tl_i64(t3, t1); tcg_gen_mul_i64(t2, t2, t3); tcg_temp_free_i64(t3); tcg_gen_trunc_i64_tl(t0, t2); tcg_gen_shri_i64(t2, t2, 32); tcg_gen_trunc_i64_tl(t1, t2); tcg_temp_free_i64(t2); tcg_gen_ext32s_tl(cpu_LO[acc], t0); tcg_gen_ext32s_tl(cpu_HI[acc], t1); } opn = "multu"; break; #if defined(TARGET_MIPS64) case OPC_DDIV: { int l1 = gen_new_label(); int l2 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, -1LL << 63, l2); tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2); tcg_gen_mov_tl(cpu_LO[0], t0); tcg_gen_movi_tl(cpu_HI[0], 0); tcg_gen_br(l1); gen_set_label(l2); tcg_gen_div_i64(cpu_LO[0], t0, t1); tcg_gen_rem_i64(cpu_HI[0], t0, t1); gen_set_label(l1); } opn = "ddiv"; break; case OPC_DDIVU: { int l1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1); tcg_gen_divu_i64(cpu_LO[0], t0, t1); tcg_gen_remu_i64(cpu_HI[0], t0, t1); gen_set_label(l1); } opn = "ddivu"; break; case OPC_DMULT: gen_helper_dmult(cpu_env, t0, t1); opn = "dmult"; break; case OPC_DMULTU: gen_helper_dmultu(cpu_env, t0, t1); opn = "dmultu"; break; #endif case OPC_MADD: { TCGv_i64 t2 = tcg_temp_new_i64(); TCGv_i64 t3 = tcg_temp_new_i64(); acc = ((ctx->opcode) >> 11) & 0x03; if (acc != 0) { check_dsp(ctx); } tcg_gen_ext_tl_i64(t2, t0); tcg_gen_ext_tl_i64(t3, t1); tcg_gen_mul_i64(t2, t2, t3); tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); tcg_gen_add_i64(t2, t2, t3); tcg_temp_free_i64(t3); tcg_gen_trunc_i64_tl(t0, t2); tcg_gen_shri_i64(t2, t2, 32); tcg_gen_trunc_i64_tl(t1, t2); tcg_temp_free_i64(t2); tcg_gen_ext32s_tl(cpu_LO[acc], t0); tcg_gen_ext32s_tl(cpu_HI[acc], t1); } opn = "madd"; break; case OPC_MADDU: { TCGv_i64 t2 = tcg_temp_new_i64(); TCGv_i64 t3 = tcg_temp_new_i64(); acc = ((ctx->opcode) >> 11) & 0x03; if (acc != 0) { check_dsp(ctx); } tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); tcg_gen_extu_tl_i64(t2, t0); tcg_gen_extu_tl_i64(t3, t1); tcg_gen_mul_i64(t2, t2, t3); tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); tcg_gen_add_i64(t2, t2, t3); tcg_temp_free_i64(t3); tcg_gen_trunc_i64_tl(t0, t2); tcg_gen_shri_i64(t2, t2, 32); tcg_gen_trunc_i64_tl(t1, t2); tcg_temp_free_i64(t2); tcg_gen_ext32s_tl(cpu_LO[acc], t0); tcg_gen_ext32s_tl(cpu_HI[acc], t1); } opn = "maddu"; break; case OPC_MSUB: { TCGv_i64 t2 = tcg_temp_new_i64(); TCGv_i64 t3 = tcg_temp_new_i64(); acc = ((ctx->opcode) >> 11) & 0x03; if (acc != 0) { check_dsp(ctx); } tcg_gen_ext_tl_i64(t2, t0); tcg_gen_ext_tl_i64(t3, t1); tcg_gen_mul_i64(t2, t2, t3); tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); tcg_gen_sub_i64(t2, t3, t2); tcg_temp_free_i64(t3); tcg_gen_trunc_i64_tl(t0, t2); tcg_gen_shri_i64(t2, t2, 32); tcg_gen_trunc_i64_tl(t1, t2); tcg_temp_free_i64(t2); tcg_gen_ext32s_tl(cpu_LO[acc], t0); tcg_gen_ext32s_tl(cpu_HI[acc], t1); } opn = "msub"; break; case OPC_MSUBU: { TCGv_i64 t2 = tcg_temp_new_i64(); TCGv_i64 t3 = tcg_temp_new_i64(); acc = ((ctx->opcode) >> 11) & 0x03; if (acc != 0) { check_dsp(ctx); } tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); tcg_gen_extu_tl_i64(t2, t0); tcg_gen_extu_tl_i64(t3, t1); tcg_gen_mul_i64(t2, t2, t3); tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]); tcg_gen_sub_i64(t2, t3, t2); tcg_temp_free_i64(t3); tcg_gen_trunc_i64_tl(t0, t2); tcg_gen_shri_i64(t2, t2, 32); tcg_gen_trunc_i64_tl(t1, t2); tcg_temp_free_i64(t2); tcg_gen_ext32s_tl(cpu_LO[acc], t0); tcg_gen_ext32s_tl(cpu_HI[acc], t1); } opn = "msubu"; break; default: MIPS_INVAL(opn); generate_exception(ctx, EXCP_RI); goto out; } (void)opn; /* avoid a compiler warning */ MIPS_DEBUG("%s %s %s", opn, regnames[rs], regnames[rt]); out: tcg_temp_free(t0); tcg_temp_free(t1); } | 26,865 |
1 | static void usb_msd_copy_data(MSDState *s) { uint32_t len; len = s->usb_len; if (len > s->scsi_len) len = s->scsi_len; if (s->mode == USB_MSDM_DATAIN) { memcpy(s->usb_buf, s->scsi_buf, len); } else { memcpy(s->scsi_buf, s->usb_buf, len); } s->usb_len -= len; s->scsi_len -= len; s->usb_buf += len; s->scsi_buf += len; s->data_len -= len; if (s->scsi_len == 0 || s->data_len == 0) { if (s->mode == USB_MSDM_DATAIN) { s->scsi_dev->info->read_data(s->scsi_dev, s->tag); } else if (s->mode == USB_MSDM_DATAOUT) { s->scsi_dev->info->write_data(s->scsi_dev, s->tag); } } } | 26,866 |
1 | void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl, char *line, int line_size, int *print_prefix) { char part[3][512]; format_line(ptr, level, fmt, vl, part, sizeof(part[0]), print_prefix, NULL); snprintf(line, line_size, "%s%s%s", part[0], part[1], part[2]); } | 26,867 |
1 | static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long src_size) { const uint8_t *s = src; const uint8_t *end; #ifdef HAVE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; #ifdef HAVE_MMX __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm __volatile( "movq %0, %%mm7\n\t" "movq %1, %%mm6\n\t" ::"m"(red_15mask),"m"(green_15mask)); mm_end = end - 15; while(s < mm_end) { __asm __volatile( PREFETCH" 32%1\n\t" "movd %1, %%mm0\n\t" "movd 4%1, %%mm3\n\t" "punpckldq 8%1, %%mm0\n\t" "punpckldq 12%1, %%mm3\n\t" "movq %%mm0, %%mm1\n\t" "movq %%mm0, %%mm2\n\t" "movq %%mm3, %%mm4\n\t" "movq %%mm3, %%mm5\n\t" "psllq $7, %%mm0\n\t" "psllq $7, %%mm3\n\t" "pand %%mm7, %%mm0\n\t" "pand %%mm7, %%mm3\n\t" "psrlq $6, %%mm1\n\t" "psrlq $6, %%mm4\n\t" "pand %%mm6, %%mm1\n\t" "pand %%mm6, %%mm4\n\t" "psrlq $19, %%mm2\n\t" "psrlq $19, %%mm5\n\t" "pand %2, %%mm2\n\t" "pand %2, %%mm5\n\t" "por %%mm1, %%mm0\n\t" "por %%mm4, %%mm3\n\t" "por %%mm2, %%mm0\n\t" "por %%mm5, %%mm3\n\t" "psllq $16, %%mm3\n\t" "por %%mm3, %%mm0\n\t" MOVNTQ" %%mm0, %0\n\t" :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); d += 4; s += 16; } __asm __volatile(SFENCE:::"memory"); __asm __volatile(EMMS:::"memory"); #endif while(s < end) { register int rgb = *(uint32_t*)s; s += 4; *d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19); } } | 26,868 |
0 | static int http_receive_data(HTTPContext *c) { HTTPContext *c1; if (c->buffer_end > c->buffer_ptr) { int len; len = recv(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr, 0); if (len < 0) { if (ff_neterrno() != FF_NETERROR(EAGAIN) && ff_neterrno() != FF_NETERROR(EINTR)) /* error : close connection */ goto fail; } else if (len == 0) /* end of connection : close it */ goto fail; else { c->buffer_ptr += len; c->data_count += len; update_datarate(&c->datarate, c->data_count); } } if (c->buffer_ptr - c->buffer >= 2 && c->data_count > FFM_PACKET_SIZE) { if (c->buffer[0] != 'f' || c->buffer[1] != 'm') { http_log("Feed stream has become desynchronized -- disconnecting\n"); goto fail; } } if (c->buffer_ptr >= c->buffer_end) { FFStream *feed = c->stream; /* a packet has been received : write it in the store, except if header */ if (c->data_count > FFM_PACKET_SIZE) { // printf("writing pos=0x%"PRIx64" size=0x%"PRIx64"\n", feed->feed_write_index, feed->feed_size); /* XXX: use llseek or url_seek */ lseek(c->feed_fd, feed->feed_write_index, SEEK_SET); if (write(c->feed_fd, c->buffer, FFM_PACKET_SIZE) < 0) { http_log("Error writing to feed file: %s\n", strerror(errno)); goto fail; } feed->feed_write_index += FFM_PACKET_SIZE; /* update file size */ if (feed->feed_write_index > c->stream->feed_size) feed->feed_size = feed->feed_write_index; /* handle wrap around if max file size reached */ if (c->stream->feed_max_size && feed->feed_write_index >= c->stream->feed_max_size) feed->feed_write_index = FFM_PACKET_SIZE; /* write index */ ffm_write_write_index(c->feed_fd, feed->feed_write_index); /* wake up any waiting connections */ for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) { if (c1->state == HTTPSTATE_WAIT_FEED && c1->stream->feed == c->stream->feed) c1->state = HTTPSTATE_SEND_DATA; } } else { /* We have a header in our hands that contains useful data */ AVFormatContext *s = NULL; ByteIOContext *pb; AVInputFormat *fmt_in; int i; url_open_buf(&pb, c->buffer, c->buffer_end - c->buffer, URL_RDONLY); pb->is_streamed = 1; /* use feed output format name to find corresponding input format */ fmt_in = av_find_input_format(feed->fmt->name); if (!fmt_in) goto fail; av_open_input_stream(&s, pb, c->stream->feed_filename, fmt_in, NULL); /* Now we have the actual streams */ if (s->nb_streams != feed->nb_streams) { av_close_input_stream(s); av_free(pb); goto fail; } for (i = 0; i < s->nb_streams; i++) memcpy(feed->streams[i]->codec, s->streams[i]->codec, sizeof(AVCodecContext)); av_close_input_stream(s); av_free(pb); } c->buffer_ptr = c->buffer; } return 0; fail: c->stream->feed_opened = 0; close(c->feed_fd); /* wake up any waiting connections to stop waiting for feed */ for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) { if (c1->state == HTTPSTATE_WAIT_FEED && c1->stream->feed == c->stream->feed) c1->state = HTTPSTATE_SEND_DATA_TRAILER; } return -1; } | 26,869 |
0 | uint32_t kvmppc_get_dfp(void) { return kvmppc_read_int_cpu_dt("ibm,dfp"); } | 26,871 |
0 | static int send_full_color_rect(VncState *vs, int w, int h) { int stream = 0; size_t bytes; vnc_write_u8(vs, stream << 4); /* no flushing, no filter */ if (vs->tight_pixel24) { tight_pack24(vs, vs->tight.buffer, w * h, &vs->tight.offset); bytes = 3; } else { bytes = vs->clientds.pf.bytes_per_pixel; } bytes = tight_compress_data(vs, stream, w * h * bytes, tight_conf[vs->tight_compression].raw_zlib_level, Z_DEFAULT_STRATEGY); return (bytes >= 0); } | 26,872 |
0 | static void test_visitor_in_string(TestInputVisitorData *data, const void *unused) { char *res = NULL, *value = (char *) "Q E M U"; Visitor *v; v = visitor_input_test_init(data, "%s", value); visit_type_str(v, NULL, &res, &error_abort); g_assert_cmpstr(res, ==, value); g_free(res); } | 26,874 |
0 | static int kvm_init(MachineState *ms) { MachineClass *mc = MACHINE_GET_CLASS(ms); static const char upgrade_note[] = "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" "(see http://sourceforge.net/projects/kvm).\n"; struct { const char *name; int num; } num_cpus[] = { { "SMP", smp_cpus }, { "hotpluggable", max_cpus }, { NULL, } }, *nc = num_cpus; int soft_vcpus_limit, hard_vcpus_limit; KVMState *s; const KVMCapabilityInfo *missing_cap; int ret; int type = 0; const char *kvm_type; s = KVM_STATE(ms->accelerator); /* * On systems where the kernel can support different base page * sizes, host page size may be different from TARGET_PAGE_SIZE, * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum * page size for the system though. */ assert(TARGET_PAGE_SIZE <= getpagesize()); page_size_init(); s->sigmask_len = 8; #ifdef KVM_CAP_SET_GUEST_DEBUG QTAILQ_INIT(&s->kvm_sw_breakpoints); #endif s->vmfd = -1; s->fd = qemu_open("/dev/kvm", O_RDWR); if (s->fd == -1) { fprintf(stderr, "Could not access KVM kernel module: %m\n"); ret = -errno; goto err; } ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); if (ret < KVM_API_VERSION) { if (ret >= 0) { ret = -EINVAL; } fprintf(stderr, "kvm version too old\n"); goto err; } if (ret > KVM_API_VERSION) { ret = -EINVAL; fprintf(stderr, "kvm version not supported\n"); goto err; } s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); /* If unspecified, use the default value */ if (!s->nr_slots) { s->nr_slots = 32; } /* check the vcpu limits */ soft_vcpus_limit = kvm_recommended_vcpus(s); hard_vcpus_limit = kvm_max_vcpus(s); while (nc->name) { if (nc->num > soft_vcpus_limit) { fprintf(stderr, "Warning: Number of %s cpus requested (%d) exceeds " "the recommended cpus supported by KVM (%d)\n", nc->name, nc->num, soft_vcpus_limit); if (nc->num > hard_vcpus_limit) { fprintf(stderr, "Number of %s cpus requested (%d) exceeds " "the maximum cpus supported by KVM (%d)\n", nc->name, nc->num, hard_vcpus_limit); exit(1); } } nc++; } kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type"); if (mc->kvm_type) { type = mc->kvm_type(kvm_type); } else if (kvm_type) { ret = -EINVAL; fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type); goto err; } do { ret = kvm_ioctl(s, KVM_CREATE_VM, type); } while (ret == -EINTR); if (ret < 0) { fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret, strerror(-ret)); #ifdef TARGET_S390X if (ret == -EINVAL) { fprintf(stderr, "Host kernel setup problem detected. Please verify:\n"); fprintf(stderr, "- for kernels supporting the switch_amode or" " user_mode parameters, whether\n"); fprintf(stderr, " user space is running in primary address space\n"); fprintf(stderr, "- for kernels supporting the vm.allocate_pgste sysctl, " "whether it is enabled\n"); } #endif goto err; } s->vmfd = ret; missing_cap = kvm_check_extension_list(s, kvm_required_capabilites); if (!missing_cap) { missing_cap = kvm_check_extension_list(s, kvm_arch_required_capabilities); } if (missing_cap) { ret = -EINVAL; fprintf(stderr, "kvm does not support %s\n%s", missing_cap->name, upgrade_note); goto err; } s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); s->broken_set_mem_region = 1; ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS); if (ret > 0) { s->broken_set_mem_region = 0; } #ifdef KVM_CAP_VCPU_EVENTS s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); #endif s->robust_singlestep = kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); #ifdef KVM_CAP_DEBUGREGS s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); #endif #ifdef KVM_CAP_XSAVE s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE); #endif #ifdef KVM_CAP_XCRS s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS); #endif #ifdef KVM_CAP_PIT_STATE2 s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2); #endif #ifdef KVM_CAP_IRQ_ROUTING kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0); #endif s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3); s->irq_set_ioctl = KVM_IRQ_LINE; if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) { s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; } #ifdef KVM_CAP_READONLY_MEM kvm_readonly_mem_allowed = (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); #endif kvm_eventfds_allowed = (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0); kvm_irqfds_allowed = (kvm_check_extension(s, KVM_CAP_IRQFD) > 0); kvm_resamplefds_allowed = (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0); kvm_vm_attributes_allowed = (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0); ret = kvm_arch_init(ms, s); if (ret < 0) { goto err; } if (machine_kernel_irqchip_allowed(ms)) { kvm_irqchip_create(ms, s); } kvm_state = s; s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; s->memory_listener.listener.coalesced_mmio_add = kvm_coalesce_mmio_region; s->memory_listener.listener.coalesced_mmio_del = kvm_uncoalesce_mmio_region; kvm_memory_listener_register(s, &s->memory_listener, &address_space_memory, 0); memory_listener_register(&kvm_io_listener, &address_space_io); s->many_ioeventfds = kvm_check_many_ioeventfds(); cpu_interrupt_handler = kvm_handle_interrupt; return 0; err: assert(ret < 0); if (s->vmfd >= 0) { close(s->vmfd); } if (s->fd != -1) { close(s->fd); } g_free(s->memory_listener.slots); return ret; } | 26,876 |
0 | static void qemu_gluster_gconf_free(GlusterConf *gconf) { g_free(gconf->server); g_free(gconf->volname); g_free(gconf->image); g_free(gconf->transport); g_free(gconf); } | 26,877 |
0 | void net_tx_pkt_init(struct NetTxPkt **pkt, uint32_t max_frags, bool has_virt_hdr) { struct NetTxPkt *p = g_malloc0(sizeof *p); p->vec = g_malloc((sizeof *p->vec) * (max_frags + NET_TX_PKT_PL_START_FRAG)); p->raw = g_malloc((sizeof *p->raw) * max_frags); p->max_payload_frags = max_frags; p->max_raw_frags = max_frags; p->has_virt_hdr = has_virt_hdr; p->vec[NET_TX_PKT_VHDR_FRAG].iov_base = &p->virt_hdr; p->vec[NET_TX_PKT_VHDR_FRAG].iov_len = p->has_virt_hdr ? sizeof p->virt_hdr : 0; p->vec[NET_TX_PKT_L2HDR_FRAG].iov_base = &p->l2_hdr; p->vec[NET_TX_PKT_L3HDR_FRAG].iov_base = NULL; p->vec[NET_TX_PKT_L3HDR_FRAG].iov_len = 0; *pkt = p; } | 26,878 |
0 | static void vtd_init(IntelIOMMUState *s) { memset(s->csr, 0, DMAR_REG_SIZE); memset(s->wmask, 0, DMAR_REG_SIZE); memset(s->w1cmask, 0, DMAR_REG_SIZE); memset(s->womask, 0, DMAR_REG_SIZE); s->iommu_ops.translate = vtd_iommu_translate; s->root = 0; s->root_extended = false; s->dmar_enabled = false; s->iq_head = 0; s->iq_tail = 0; s->iq = 0; s->iq_size = 0; s->qi_enabled = false; s->iq_last_desc_type = VTD_INV_DESC_NONE; s->next_frcd_reg = 0; s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | VTD_CAP_MGAW | VTD_CAP_SAGAW | VTD_CAP_MAMV | VTD_CAP_PSI; s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO; vtd_reset_context_cache(s); vtd_reset_iotlb(s); /* Define registers with default values and bit semantics */ vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0); vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0); vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0); vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0); vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL); vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0); vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffff000ULL, 0); vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0); vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL); /* Advanced Fault Logging not supported */ vtd_define_long(s, DMAR_FSTS_REG, 0, 0, 0x11UL); vtd_define_long(s, DMAR_FECTL_REG, 0x80000000UL, 0x80000000UL, 0); vtd_define_long(s, DMAR_FEDATA_REG, 0, 0x0000ffffUL, 0); vtd_define_long(s, DMAR_FEADDR_REG, 0, 0xfffffffcUL, 0); /* Treated as RsvdZ when EIM in ECAP_REG is not supported * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0); */ vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0, 0); /* Treated as RO for implementations that PLMR and PHMR fields reported * as Clear in the CAP_REG. * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0); */ vtd_define_long(s, DMAR_PMEN_REG, 0, 0, 0); vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0); vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0); vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff007ULL, 0); vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL); vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0); vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0); vtd_define_long(s, DMAR_IEADDR_REG, 0, 0xfffffffcUL, 0); /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */ vtd_define_long(s, DMAR_IEUADDR_REG, 0, 0, 0); /* IOTLB registers */ vtd_define_quad(s, DMAR_IOTLB_REG, 0, 0Xb003ffff00000000ULL, 0); vtd_define_quad(s, DMAR_IVA_REG, 0, 0xfffffffffffff07fULL, 0); vtd_define_quad_wo(s, DMAR_IVA_REG, 0xfffffffffffff07fULL); /* Fault Recording Registers, 128-bit */ vtd_define_quad(s, DMAR_FRCD_REG_0_0, 0, 0, 0); vtd_define_quad(s, DMAR_FRCD_REG_0_2, 0, 0, 0x8000000000000000ULL); } | 26,879 |