project
stringclasses
2 values
commit_id
stringlengths
40
40
target
int64
0
1
func
stringlengths
26
142k
idx
int64
0
27.3k
FFmpeg
b864098c168f601f7c7393893927a4fd3f79ae5d
0
static int read_decoding_params(MLPDecodeContext *m, GetBitContext *gbp, unsigned int substr) { SubStream *s = &m->substream[substr]; unsigned int ch; if (s->param_presence_flags & PARAM_PRESENCE) if (get_bits1(gbp)) s->param_presence_flags = get_bits(gbp, 8); if (s->param_presence_flags & PARAM_BLOCKSIZE) if (get_bits1(gbp)) { s->blocksize = get_bits(gbp, 9); if (s->blocksize > MAX_BLOCKSIZE) { av_log(m->avctx, AV_LOG_ERROR, "block size too large\n"); s->blocksize = 0; return -1; } } if (s->param_presence_flags & PARAM_MATRIX) if (get_bits1(gbp)) { if (read_matrix_params(m, s, gbp) < 0) return -1; } if (s->param_presence_flags & PARAM_OUTSHIFT) if (get_bits1(gbp)) for (ch = 0; ch <= s->max_matrix_channel; ch++) { s->output_shift[ch] = get_sbits(gbp, 4); dprintf(m->avctx, "output shift[%d] = %d\n", ch, s->output_shift[ch]); } if (s->param_presence_flags & PARAM_QUANTSTEP) if (get_bits1(gbp)) for (ch = 0; ch <= s->max_channel; ch++) { ChannelParams *cp = &m->channel_params[ch]; s->quant_step_size[ch] = get_bits(gbp, 4); cp->sign_huff_offset = calculate_sign_huff(m, substr, ch); } for (ch = s->min_channel; ch <= s->max_channel; ch++) if (get_bits1(gbp)) { if (read_channel_params(m, substr, gbp, ch) < 0) return -1; } return 0; }
1,346
qemu
a89f364ae8740dfc31b321eed9ee454e996dc3c1
0
static uint64_t pxa2xx_pic_mem_read(void *opaque, hwaddr offset, unsigned size) { PXA2xxPICState *s = (PXA2xxPICState *) opaque; switch (offset) { case ICIP: /* IRQ Pending register */ return s->int_pending[0] & ~s->is_fiq[0] & s->int_enabled[0]; case ICIP2: /* IRQ Pending register 2 */ return s->int_pending[1] & ~s->is_fiq[1] & s->int_enabled[1]; case ICMR: /* Mask register */ return s->int_enabled[0]; case ICMR2: /* Mask register 2 */ return s->int_enabled[1]; case ICLR: /* Level register */ return s->is_fiq[0]; case ICLR2: /* Level register 2 */ return s->is_fiq[1]; case ICCR: /* Idle mask */ return (s->int_idle == 0); case ICFP: /* FIQ Pending register */ return s->int_pending[0] & s->is_fiq[0] & s->int_enabled[0]; case ICFP2: /* FIQ Pending register 2 */ return s->int_pending[1] & s->is_fiq[1] & s->int_enabled[1]; case ICPR: /* Pending register */ return s->int_pending[0]; case ICPR2: /* Pending register 2 */ return s->int_pending[1]; case IPR0 ... IPR31: return s->priority[0 + ((offset - IPR0 ) >> 2)]; case IPR32 ... IPR39: return s->priority[32 + ((offset - IPR32) >> 2)]; case ICHP: /* Highest Priority register */ return pxa2xx_pic_highest(s); default: printf("%s: Bad register offset " REG_FMT "\n", __FUNCTION__, offset); return 0; } }
1,348
qemu
3558f8055f37a34762b7a2a0f02687e6eeab893d
0
static inline void gen_op_addl_A0_seg(DisasContext *s, int reg) { tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base)); if (CODE64(s)) { tcg_gen_ext32u_tl(cpu_A0, cpu_A0); tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); } else { tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); tcg_gen_ext32u_tl(cpu_A0, cpu_A0); } }
1,349
qemu
3a720b14b9e09f8553832b835ede9933b70fe9a9
0
void vm_start(void) { if (!vm_running) { cpu_enable_ticks(); vm_running = 1; vm_state_notify(1, 0); qemu_rearm_alarm_timer(alarm_timer); resume_all_vcpus(); } }
1,350
qemu
581b9e29f36eec5de0779c3dbade980e4405d92e
0
static int raw_pread(BlockDriverState *bs, int64_t offset, uint8_t *buf, int count) { BDRVRawState *s = bs->opaque; int size, ret, shift, sum; sum = 0; if (s->aligned_buf != NULL) { if (offset & 0x1ff) { /* align offset on a 512 bytes boundary */ shift = offset & 0x1ff; size = (shift + count + 0x1ff) & ~0x1ff; if (size > ALIGNED_BUFFER_SIZE) size = ALIGNED_BUFFER_SIZE; ret = raw_pread_aligned(bs, offset - shift, s->aligned_buf, size); if (ret < 0) return ret; size = 512 - shift; if (size > count) size = count; memcpy(buf, s->aligned_buf + shift, size); buf += size; offset += size; count -= size; sum += size; if (count == 0) return sum; } if (count & 0x1ff || (uintptr_t) buf & 0x1ff) { /* read on aligned buffer */ while (count) { size = (count + 0x1ff) & ~0x1ff; if (size > ALIGNED_BUFFER_SIZE) size = ALIGNED_BUFFER_SIZE; ret = raw_pread_aligned(bs, offset, s->aligned_buf, size); if (ret < 0) { return ret; } else if (ret == 0) { fprintf(stderr, "raw_pread: read beyond end of file\n"); abort(); } size = ret; if (size > count) size = count; memcpy(buf, s->aligned_buf, size); buf += size; offset += size; count -= size; sum += size; } return sum; } } return raw_pread_aligned(bs, offset, buf, count) + sum; }
1,352
qemu
a87f39543a9259f671c5413723311180ee2ad2a8
0
address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, hwaddr *plen, bool resolve_subpage) { MemoryRegionSection *section; Int128 diff, diff_page; section = address_space_lookup_region(d, addr, resolve_subpage); /* Compute offset within MemoryRegionSection */ addr -= section->offset_within_address_space; /* Compute offset within MemoryRegion */ *xlat = addr + section->offset_within_region; diff_page = int128_make64(((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr); diff = int128_sub(section->mr->size, int128_make64(addr)); diff = int128_min(diff, diff_page); *plen = int128_get64(int128_min(diff, int128_make64(*plen))); return section; }
1,353
FFmpeg
65c14801527068fcaf729eeffc142ffd4682a21a
0
void checkasm_report(const char *name, ...) { static int prev_checked, prev_failed, max_length; if (state.num_checked > prev_checked) { print_cpu_name(); if (*name) { int pad_length = max_length; va_list arg; fprintf(stderr, " - "); va_start(arg, name); pad_length -= vfprintf(stderr, name, arg); va_end(arg); fprintf(stderr, "%*c", FFMAX(pad_length, 0) + 2, '['); } else fprintf(stderr, " - %-*s [", max_length, state.current_func->name); if (state.num_failed == prev_failed) color_printf(COLOR_GREEN, "OK"); else color_printf(COLOR_RED, "FAILED"); fprintf(stderr, "]\n"); prev_checked = state.num_checked; prev_failed = state.num_failed; } else if (!state.cpu_flag) { int length; /* Calculate the amount of padding required to make the output vertically aligned */ if (*name) { va_list arg; va_start(arg, name); length = vsnprintf(NULL, 0, name, arg); va_end(arg); } else length = strlen(state.current_func->name); if (length > max_length) max_length = length; } }
1,354
FFmpeg
403ee835e7913eb9536b22c2b22edfdd700166a9
0
static int rtp_new_av_stream(HTTPContext *c, int stream_index, struct sockaddr_in *dest_addr, HTTPContext *rtsp_c) { AVFormatContext *ctx; AVStream *st; char *ipaddr; URLContext *h = NULL; uint8_t *dummy_buf; int max_packet_size; /* now we can open the relevant output stream */ ctx = avformat_alloc_context(); if (!ctx) return -1; ctx->oformat = av_guess_format("rtp", NULL, NULL); st = av_mallocz(sizeof(AVStream)); if (!st) goto fail; ctx->nb_streams = 1; ctx->streams[0] = st; if (!c->stream->feed || c->stream->feed == c->stream) memcpy(st, c->stream->streams[stream_index], sizeof(AVStream)); else memcpy(st, c->stream->feed->streams[c->stream->feed_streams[stream_index]], sizeof(AVStream)); st->priv_data = NULL; /* build destination RTP address */ ipaddr = inet_ntoa(dest_addr->sin_addr); switch(c->rtp_protocol) { case RTSP_LOWER_TRANSPORT_UDP: case RTSP_LOWER_TRANSPORT_UDP_MULTICAST: /* RTP/UDP case */ /* XXX: also pass as parameter to function ? */ if (c->stream->is_multicast) { int ttl; ttl = c->stream->multicast_ttl; if (!ttl) ttl = 16; snprintf(ctx->filename, sizeof(ctx->filename), "rtp://%s:%d?multicast=1&ttl=%d", ipaddr, ntohs(dest_addr->sin_port), ttl); } else { snprintf(ctx->filename, sizeof(ctx->filename), "rtp://%s:%d", ipaddr, ntohs(dest_addr->sin_port)); } if (url_open(&h, ctx->filename, URL_WRONLY) < 0) goto fail; c->rtp_handles[stream_index] = h; max_packet_size = url_get_max_packet_size(h); break; case RTSP_LOWER_TRANSPORT_TCP: /* RTP/TCP case */ c->rtsp_c = rtsp_c; max_packet_size = RTSP_TCP_MAX_PACKET_SIZE; break; default: goto fail; } http_log("%s:%d - - \"PLAY %s/streamid=%d %s\"\n", ipaddr, ntohs(dest_addr->sin_port), c->stream->filename, stream_index, c->protocol); /* normally, no packets should be output here, but the packet size may be checked */ if (url_open_dyn_packet_buf(&ctx->pb, max_packet_size) < 0) { /* XXX: close stream */ goto fail; } av_set_parameters(ctx, NULL); if (av_write_header(ctx) < 0) { fail: if (h) url_close(h); av_free(ctx); return -1; } avio_close_dyn_buf(ctx->pb, &dummy_buf); av_free(dummy_buf); c->rtp_ctx[stream_index] = ctx; return 0; }
1,355
qemu
e8ede0a8bb5298a6979bcf7ed84ef64a64a4e3fe
0
float32 HELPER(ucf64_muls)(float32 a, float32 b, CPUUniCore32State *env) { return float32_mul(a, b, &env->ucf64.fp_status); }
1,357
qemu
130257dc443574a9da91dc293665be2cfc40245a
0
static void qemu_chr_parse_serial(QemuOpts *opts, ChardevBackend *backend, Error **errp) { const char *device = qemu_opt_get(opts, "path"); if (device == NULL) { error_setg(errp, "chardev: serial/tty: no device path given"); return; } backend->serial = g_new0(ChardevHostdev, 1); backend->serial->device = g_strdup(device); }
1,358
qemu
8a5956ad6392f115521dad774055c737c49fb0dd
0
static void *rcu_update_perf_test(void *arg) { long long n_updates_local = 0; rcu_register_thread(); *(struct rcu_reader_data **)arg = &rcu_reader; atomic_inc(&nthreadsrunning); while (goflag == GOFLAG_INIT) { g_usleep(1000); } while (goflag == GOFLAG_RUN) { synchronize_rcu(); n_updates_local++; } atomic_add(&n_updates, n_updates_local); rcu_unregister_thread(); return NULL; }
1,359
qemu
fd859081453f94c3cbd6527289e41b7fddbf645f
0
static int tpm_passthrough_unix_transfer(TPMPassthruState *tpm_pt, const TPMLocality *locty_data) { return tpm_passthrough_unix_tx_bufs(tpm_pt, locty_data->w_buffer.buffer, locty_data->w_offset, locty_data->r_buffer.buffer, locty_data->r_buffer.size); }
1,362
qemu
8f6e699ddbcad32480fa64796ccf44cbaf5b4b91
0
static void lsi_transfer_data(SCSIRequest *req, uint32_t len) { LSIState *s = DO_UPCAST(LSIState, dev.qdev, req->bus->qbus.parent); int out; if (s->waiting == 1 || !s->current || req->hba_private != s->current || (lsi_irq_on_rsl(s) && !(s->scntl1 & LSI_SCNTL1_CON))) { if (lsi_queue_req(s, req, len)) { return; } } out = (s->sstat1 & PHASE_MASK) == PHASE_DO; /* host adapter (re)connected */ DPRINTF("Data ready tag=0x%x len=%d\n", req->tag, len); s->current->dma_len = len; s->command_complete = 1; if (s->waiting) { if (s->waiting == 1 || s->dbc == 0) { lsi_resume_script(s); } else { lsi_do_dma(s, out); } } }
1,363
qemu
149f54b53b7666a3facd45e86eece60ce7d3b114
0
void tlb_set_page(CPUArchState *env, target_ulong vaddr, hwaddr paddr, int prot, int mmu_idx, target_ulong size) { MemoryRegionSection *section; unsigned int index; target_ulong address; target_ulong code_address; uintptr_t addend; CPUTLBEntry *te; hwaddr iotlb; assert(size >= TARGET_PAGE_SIZE); if (size != TARGET_PAGE_SIZE) { tlb_add_large_page(env, vaddr, size); } section = phys_page_find(address_space_memory.dispatch, paddr >> TARGET_PAGE_BITS); #if defined(DEBUG_TLB) printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx " prot=%x idx=%d pd=0x%08lx\n", vaddr, paddr, prot, mmu_idx, pd); #endif address = vaddr; if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) { /* IO memory case */ address |= TLB_MMIO; addend = 0; } else { /* TLB_MMIO for rom/romd handled below */ addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + memory_region_section_addr(section, paddr); } code_address = address; iotlb = memory_region_section_get_iotlb(env, section, vaddr, paddr, prot, &address); index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); env->iotlb[mmu_idx][index] = iotlb - vaddr; te = &env->tlb_table[mmu_idx][index]; te->addend = addend - vaddr; if (prot & PAGE_READ) { te->addr_read = address; } else { te->addr_read = -1; } if (prot & PAGE_EXEC) { te->addr_code = code_address; } else { te->addr_code = -1; } if (prot & PAGE_WRITE) { if ((memory_region_is_ram(section->mr) && section->readonly) || memory_region_is_romd(section->mr)) { /* Write access calls the I/O callback. */ te->addr_write = address | TLB_MMIO; } else if (memory_region_is_ram(section->mr) && !cpu_physical_memory_is_dirty( section->mr->ram_addr + memory_region_section_addr(section, paddr))) { te->addr_write = address | TLB_NOTDIRTY; } else { te->addr_write = address; } } else { te->addr_write = -1; } }
1,364
qemu
3fb5bf5730b90c08d5d1c027900efae210d9b326
0
void memory_region_set_address(MemoryRegion *mr, hwaddr addr) { MemoryRegion *parent = mr->parent; int priority = mr->priority; bool may_overlap = mr->may_overlap; if (addr == mr->addr || !parent) { mr->addr = addr; return; } memory_region_transaction_begin(); memory_region_ref(mr); memory_region_del_subregion(parent, mr); if (may_overlap) { memory_region_add_subregion_overlap(parent, addr, mr, priority); } else { memory_region_add_subregion(parent, addr, mr); } memory_region_unref(mr); memory_region_transaction_commit(); }
1,365
FFmpeg
7daabccb5d36e9bf649d157ab14ccb2a016f1c53
0
void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) { int i; unsigned char __align8 vector128[8] = { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 }; movq_m2r(*vector128, mm1); for (i = 0; i < 8; i++) { movq_m2r(*(block), mm0); packsswb_m2r(*(block + 4), mm0); block += 8; paddb_r2r(mm1, mm0); movq_r2m(mm0, *pixels); pixels += line_size; } }
1,367
qemu
800b0e814bef7cd14ae2bce149c09d70676e93fb
0
static void gd_mouse_mode_change(Notifier *notify, void *data) { gd_update_cursor(container_of(notify, GtkDisplayState, mouse_mode_notifier), FALSE); }
1,368
FFmpeg
4e987f8282ff7658a6f804b9db39954bb59fa72e
0
static void loop_filter(H264Context *h, int start_x, int end_x){ MpegEncContext * const s = &h->s; uint8_t *dest_y, *dest_cb, *dest_cr; int linesize, uvlinesize, mb_x, mb_y; const int end_mb_y= s->mb_y + FRAME_MBAFF; const int old_slice_type= h->slice_type; const int pixel_shift = h->pixel_shift; if(h->deblocking_filter) { for(mb_x= start_x; mb_x<end_x; mb_x++){ for(mb_y=end_mb_y - FRAME_MBAFF; mb_y<= end_mb_y; mb_y++){ int mb_xy, mb_type; mb_xy = h->mb_xy = mb_x + mb_y*s->mb_stride; h->slice_num= h->slice_table[mb_xy]; mb_type= s->current_picture.mb_type[mb_xy]; h->list_count= h->list_counts[mb_xy]; if(FRAME_MBAFF) h->mb_mbaff = h->mb_field_decoding_flag = !!IS_INTERLACED(mb_type); s->mb_x= mb_x; s->mb_y= mb_y; dest_y = s->current_picture.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16; dest_cb = s->current_picture.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8; dest_cr = s->current_picture.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8; //FIXME simplify above if (MB_FIELD) { linesize = h->mb_linesize = s->linesize * 2; uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2; if(mb_y&1){ //FIXME move out of this function? dest_y -= s->linesize*15; dest_cb-= s->uvlinesize*7; dest_cr-= s->uvlinesize*7; } } else { linesize = h->mb_linesize = s->linesize; uvlinesize = h->mb_uvlinesize = s->uvlinesize; } backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0); if(fill_filter_caches(h, mb_type)) continue; h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.qscale_table[mb_xy]); h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.qscale_table[mb_xy]); if (FRAME_MBAFF) { ff_h264_filter_mb (h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize); } else { ff_h264_filter_mb_fast(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize); } } } } h->slice_type= old_slice_type; s->mb_x= 0; s->mb_y= end_mb_y - FRAME_MBAFF; h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale); h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale); }
1,369
FFmpeg
72732f2dddabae1d943ce617e0a27e32d13416fb
0
static int to_integer(char *p, int len) { int ret; char *q = av_malloc(sizeof(char) * len); if (!q) return -1; strncpy(q, p, len); ret = atoi(q); av_free(q); return ret; }
1,370
FFmpeg
e51073fe00d2f7ae1c455d441b305e2b5c8251bc
0
void checkasm_check_blend(void) { uint8_t *top1 = av_malloc(BUF_SIZE); uint8_t *top2 = av_malloc(BUF_SIZE); uint8_t *bot1 = av_malloc(BUF_SIZE); uint8_t *bot2 = av_malloc(BUF_SIZE); uint8_t *dst1 = av_malloc(BUF_SIZE); uint8_t *dst2 = av_malloc(BUF_SIZE); FilterParams param = { .opacity = 1.0, }; #define check_and_report(name, val) \ param.mode = val; \ ff_blend_init(&param, 0); \ if (check_func(param.blend, #name)) \ check_blend_func(); check_and_report(addition, BLEND_ADDITION) check_and_report(addition128, BLEND_ADDITION128) check_and_report(and, BLEND_AND) check_and_report(average, BLEND_AVERAGE) check_and_report(darken, BLEND_DARKEN) check_and_report(difference128, BLEND_DIFFERENCE128) check_and_report(hardmix, BLEND_HARDMIX) check_and_report(lighten, BLEND_LIGHTEN) check_and_report(multiply, BLEND_MULTIPLY) check_and_report(or, BLEND_OR) check_and_report(phoenix, BLEND_PHOENIX) check_and_report(screen, BLEND_SCREEN) check_and_report(subtract, BLEND_SUBTRACT) check_and_report(xor, BLEND_XOR) check_and_report(difference, BLEND_DIFFERENCE) check_and_report(extremity, BLEND_EXTREMITY) check_and_report(negation, BLEND_NEGATION) report("8bit"); av_freep(&top1); av_freep(&top2); av_freep(&bot1); av_freep(&bot2); av_freep(&dst1); av_freep(&dst2); }
1,371
qemu
e23a1b33b53d25510320b26d9f154e19c6c99725
1
qemu_irq *armv7m_init(int flash_size, int sram_size, const char *kernel_filename, const char *cpu_model) { CPUState *env; DeviceState *nvic; /* FIXME: make this local state. */ static qemu_irq pic[64]; qemu_irq *cpu_pic; uint32_t pc; int image_size; uint64_t entry; uint64_t lowaddr; int i; int big_endian; flash_size *= 1024; sram_size *= 1024; if (!cpu_model) cpu_model = "cortex-m3"; env = cpu_init(cpu_model); if (!env) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } #if 0 /* > 32Mb SRAM gets complicated because it overlaps the bitband area. We don't have proper commandline options, so allocate half of memory as SRAM, up to a maximum of 32Mb, and the rest as code. */ if (ram_size > (512 + 32) * 1024 * 1024) ram_size = (512 + 32) * 1024 * 1024; sram_size = (ram_size / 2) & TARGET_PAGE_MASK; if (sram_size > 32 * 1024 * 1024) sram_size = 32 * 1024 * 1024; code_size = ram_size - sram_size; #endif /* Flash programming is done via the SCU, so pretend it is ROM. */ cpu_register_physical_memory(0, flash_size, qemu_ram_alloc(flash_size) | IO_MEM_ROM); cpu_register_physical_memory(0x20000000, sram_size, qemu_ram_alloc(sram_size) | IO_MEM_RAM); armv7m_bitband_init(); nvic = qdev_create(NULL, "armv7m_nvic"); env->v7m.nvic = nvic; qdev_init(nvic); cpu_pic = arm_pic_init_cpu(env); sysbus_connect_irq(sysbus_from_qdev(nvic), 0, cpu_pic[ARM_PIC_CPU_IRQ]); for (i = 0; i < 64; i++) { pic[i] = qdev_get_gpio_in(nvic, i); } #ifdef TARGET_WORDS_BIGENDIAN big_endian = 1; #else big_endian = 0; #endif image_size = load_elf(kernel_filename, 0, &entry, &lowaddr, NULL, big_endian, ELF_MACHINE, 1); if (image_size < 0) { image_size = load_image_targphys(kernel_filename, 0, flash_size); lowaddr = 0; } if (image_size < 0) { fprintf(stderr, "qemu: could not load kernel '%s'\n", kernel_filename); exit(1); } /* If the image was loaded at address zero then assume it is a regular ROM image and perform the normal CPU reset sequence. Otherwise jump directly to the entry point. */ if (lowaddr == 0) { env->regs[13] = ldl_phys(0); pc = ldl_phys(4); } else { pc = entry; } env->thumb = pc & 1; env->regs[15] = pc & ~1; /* Hack to map an additional page of ram at the top of the address space. This stops qemu complaining about executing code outside RAM when returning from an exception. */ cpu_register_physical_memory(0xfffff000, 0x1000, qemu_ram_alloc(0x1000) | IO_MEM_RAM); return pic; }
1,374
FFmpeg
0fa8d19987f03444365a5c7f73b7ecf1520b011e
1
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { SVQ3Context *svq3 = avctx->priv_data; H264Context *h = &svq3->h; MpegEncContext *s = &h->s; int buf_size = avpkt->size; int m, mb_type, left; uint8_t *buf; /* special case for last picture */ if (buf_size == 0) { if (s->next_picture_ptr && !s->low_delay) { *(AVFrame *) data = *(AVFrame *) &s->next_picture; s->next_picture_ptr = NULL; *data_size = sizeof(AVFrame); } return 0; } s->mb_x = s->mb_y = h->mb_xy = 0; if (svq3->watermark_key) { svq3->buf = av_fast_realloc(svq3->buf, &svq3->buf_size, buf_size+FF_INPUT_BUFFER_PADDING_SIZE); if (!svq3->buf) return AVERROR(ENOMEM); memcpy(svq3->buf, avpkt->data, buf_size); buf = svq3->buf; } else { buf = avpkt->data; } init_get_bits(&s->gb, buf, 8*buf_size); if (svq3_decode_slice_header(avctx)) return -1; s->pict_type = h->slice_type; s->picture_number = h->slice_num; if (avctx->debug&FF_DEBUG_PICT_INFO){ av_log(h->s.avctx, AV_LOG_DEBUG, "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n", av_get_picture_type_char(s->pict_type), svq3->halfpel_flag, svq3->thirdpel_flag, s->adaptive_quant, s->qscale, h->slice_num); } /* for skipping the frame */ s->current_picture.pict_type = s->pict_type; s->current_picture.key_frame = (s->pict_type == AV_PICTURE_TYPE_I); /* Skip B-frames if we do not have reference frames. */ if (s->last_picture_ptr == NULL && s->pict_type == AV_PICTURE_TYPE_B) return 0; if ( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) || avctx->skip_frame >= AVDISCARD_ALL) return 0; if (s->next_p_frame_damaged) { if (s->pict_type == AV_PICTURE_TYPE_B) return 0; else s->next_p_frame_damaged = 0; } if (ff_h264_frame_start(h) < 0) return -1; if (s->pict_type == AV_PICTURE_TYPE_B) { h->frame_num_offset = (h->slice_num - h->prev_frame_num); if (h->frame_num_offset < 0) { h->frame_num_offset += 256; } if (h->frame_num_offset == 0 || h->frame_num_offset >= h->prev_frame_num_offset) { av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n"); return -1; } } else { h->prev_frame_num = h->frame_num; h->frame_num = h->slice_num; h->prev_frame_num_offset = (h->frame_num - h->prev_frame_num); if (h->prev_frame_num_offset < 0) { h->prev_frame_num_offset += 256; } } for (m = 0; m < 2; m++){ int i; for (i = 0; i < 4; i++){ int j; for (j = -1; j < 4; j++) h->ref_cache[m][scan8[0] + 8*i + j]= 1; if (i < 3) h->ref_cache[m][scan8[0] + 8*i + j]= PART_NOT_AVAILABLE; } } for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) { for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) { h->mb_xy = s->mb_x + s->mb_y*s->mb_stride; if ( (get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits && ((get_bits_count(&s->gb) & 7) == 0 || show_bits(&s->gb, (-get_bits_count(&s->gb) & 7)) == 0)) { skip_bits(&s->gb, svq3->next_slice_index - get_bits_count(&s->gb)); s->gb.size_in_bits = 8*buf_size; if (svq3_decode_slice_header(avctx)) return -1; /* TODO: support s->mb_skip_run */ } mb_type = svq3_get_ue_golomb(&s->gb); if (s->pict_type == AV_PICTURE_TYPE_I) { mb_type += 8; } else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4) { mb_type += 4; } if (mb_type > 33 || svq3_decode_mb(svq3, mb_type)) { av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y); return -1; } if (mb_type != 0) { ff_h264_hl_decode_mb (h); } if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay) { s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] = (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1; } } ff_draw_horiz_band(s, 16*s->mb_y, 16); } left = buf_size*8 - get_bits_count(&s->gb); if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) { av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left); //av_hex_dump(stderr, buf+buf_size-8, 8); } if (left < 0) { av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left); return -1; } MPV_frame_end(s); if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { *(AVFrame *) data = *(AVFrame *) &s->current_picture; } else { *(AVFrame *) data = *(AVFrame *) &s->last_picture; } /* Do not output the last pic after seeking. */ if (s->last_picture_ptr || s->low_delay) { *data_size = sizeof(AVFrame); } return buf_size; }
1,375
FFmpeg
fc5c49ab3247533e0a5cb203cf7122143389eb5c
1
int mpeg4_decode_video_packet_header(MpegEncContext *s) { int mb_num_bits= av_log2(s->mb_num - 1) + 1; int header_extension=0, mb_num, len; /* is there enough space left for a video packet + header */ if( get_bits_count(&s->gb) > s->gb.size_in_bits-20) return -1; for(len=0; len<32; len++){ if(get_bits1(&s->gb)) break; } if(len!=ff_mpeg4_get_video_packet_prefix_length(s)){ av_log(s->avctx, AV_LOG_ERROR, "marker does not match f_code\n"); return -1; } if(s->shape != RECT_SHAPE){ header_extension= get_bits1(&s->gb); //FIXME more stuff here } mb_num= get_bits(&s->gb, mb_num_bits); if(mb_num>=s->mb_num){ av_log(s->avctx, AV_LOG_ERROR, "illegal mb_num in video packet (%d %d) \n", mb_num, s->mb_num); return -1; } s->mb_x= mb_num % s->mb_width; s->mb_y= mb_num / s->mb_width; if(s->shape != BIN_ONLY_SHAPE){ int qscale= get_bits(&s->gb, s->quant_precision); if(qscale) s->chroma_qscale=s->qscale= qscale; } if(s->shape == RECT_SHAPE){ header_extension= get_bits1(&s->gb); } if(header_extension){ int time_incr=0; while (get_bits1(&s->gb) != 0) time_incr++; check_marker(&s->gb, "before time_increment in video packed header"); skip_bits(&s->gb, s->time_increment_bits); /* time_increment */ check_marker(&s->gb, "before vop_coding_type in video packed header"); skip_bits(&s->gb, 2); /* vop coding type */ //FIXME not rect stuff here if(s->shape != BIN_ONLY_SHAPE){ skip_bits(&s->gb, 3); /* intra dc vlc threshold */ //FIXME don't just ignore everything if(s->pict_type == AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){ mpeg4_decode_sprite_trajectory(s, &s->gb); av_log(s->avctx, AV_LOG_ERROR, "untested\n"); } //FIXME reduced res stuff here if (s->pict_type != AV_PICTURE_TYPE_I) { int f_code = get_bits(&s->gb, 3); /* fcode_for */ if(f_code==0){ av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (f_code=0)\n"); } } if (s->pict_type == AV_PICTURE_TYPE_B) { int b_code = get_bits(&s->gb, 3); if(b_code==0){ av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (b_code=0)\n"); } } } } //FIXME new-pred stuff return 0; }
1,376
FFmpeg
464a631c34967f4c326b2de8b3cf4903d3e5b01c
0
static void opt_video_rc_override_string(char *arg) { video_rc_override_string = arg; }
1,378
FFmpeg
5e689b65ce9c945c61d0f07394541a0440316757
1
av_cold int ff_dvvideo_init(AVCodecContext *avctx) { DVVideoContext *s = avctx->priv_data; DSPContext dsp; static int done = 0; int i, j; if (!done) { VLC dv_vlc; uint16_t new_dv_vlc_bits[NB_DV_VLC*2]; uint8_t new_dv_vlc_len[NB_DV_VLC*2]; uint8_t new_dv_vlc_run[NB_DV_VLC*2]; int16_t new_dv_vlc_level[NB_DV_VLC*2]; done = 1; /* it's faster to include sign bit in a generic VLC parsing scheme */ for (i = 0, j = 0; i < NB_DV_VLC; i++, j++) { new_dv_vlc_bits[j] = dv_vlc_bits[i]; new_dv_vlc_len[j] = dv_vlc_len[i]; new_dv_vlc_run[j] = dv_vlc_run[i]; new_dv_vlc_level[j] = dv_vlc_level[i]; if (dv_vlc_level[i]) { new_dv_vlc_bits[j] <<= 1; new_dv_vlc_len[j]++; j++; new_dv_vlc_bits[j] = (dv_vlc_bits[i] << 1) | 1; new_dv_vlc_len[j] = dv_vlc_len[i] + 1; new_dv_vlc_run[j] = dv_vlc_run[i]; new_dv_vlc_level[j] = -dv_vlc_level[i]; } } /* NOTE: as a trick, we use the fact the no codes are unused to accelerate the parsing of partial codes */ init_vlc(&dv_vlc, TEX_VLC_BITS, j, new_dv_vlc_len, 1, 1, new_dv_vlc_bits, 2, 2, 0); assert(dv_vlc.table_size == 1184); for (i = 0; i < dv_vlc.table_size; i++){ int code = dv_vlc.table[i][0]; int len = dv_vlc.table[i][1]; int level, run; if (len < 0){ //more bits needed run = 0; level = code; } else { run = new_dv_vlc_run [code] + 1; level = new_dv_vlc_level[code]; } ff_dv_rl_vlc[i].len = len; ff_dv_rl_vlc[i].level = level; ff_dv_rl_vlc[i].run = run; } ff_free_vlc(&dv_vlc); } /* Generic DSP setup */ ff_dsputil_init(&dsp, avctx); ff_set_cmp(&dsp, dsp.ildct_cmp, avctx->ildct_cmp); s->get_pixels = dsp.get_pixels; s->ildct_cmp = dsp.ildct_cmp[5]; /* 88DCT setup */ s->fdct[0] = dsp.fdct; s->idct_put[0] = dsp.idct_put; for (i = 0; i < 64; i++) s->dv_zigzag[0][i] = dsp.idct_permutation[ff_zigzag_direct[i]]; /* 248DCT setup */ s->fdct[1] = dsp.fdct248; s->idct_put[1] = ff_simple_idct248_put; // FIXME: need to add it to DSP if (avctx->lowres){ for (i = 0; i < 64; i++){ int j = ff_zigzag248_direct[i]; s->dv_zigzag[1][i] = dsp.idct_permutation[(j & 7) + (j & 8) * 4 + (j & 48) / 2]; } }else memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64); avctx->coded_frame = &s->picture; s->avctx = avctx; avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; return 0; }
1,379
FFmpeg
b7dc6f662868fbdad779c61c233b1d19d8b89d3c
1
static inline void RENAME(hyscale)(uint16_t *dst, int dstWidth, uint8_t *src, int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hLumFilter, int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode, int srcFormat, uint8_t *formatConvBuffer) { if(srcFormat==IMGFMT_YUY2) { RENAME(yuy2ToY)(formatConvBuffer, src, srcW); src= formatConvBuffer; } else if(srcFormat==IMGFMT_BGR32) { RENAME(bgr32ToY)(formatConvBuffer, src, srcW); src= formatConvBuffer; } else if(srcFormat==IMGFMT_BGR24) { RENAME(bgr24ToY)(formatConvBuffer, src, srcW); src= formatConvBuffer; } else if(srcFormat==IMGFMT_BGR16) { RENAME(bgr16ToY)(formatConvBuffer, src, srcW); src= formatConvBuffer; } else if(srcFormat==IMGFMT_BGR15) { RENAME(bgr15ToY)(formatConvBuffer, src, srcW); src= formatConvBuffer; } else if(srcFormat==IMGFMT_RGB32) { RENAME(rgb32ToY)(formatConvBuffer, src, srcW); src= formatConvBuffer; } else if(srcFormat==IMGFMT_RGB24) { RENAME(rgb24ToY)(formatConvBuffer, src, srcW); src= formatConvBuffer; } #ifdef HAVE_MMX // use the new MMX scaler if th mmx2 cant be used (its faster than the x86asm one) if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed)) #else if(!(flags&SWS_FAST_BILINEAR)) #endif { RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize); } else // Fast Bilinear upscale / crap downscale { #ifdef ARCH_X86 #ifdef HAVE_MMX2 int i; if(canMMX2BeUsed) { asm volatile( "pxor %%mm7, %%mm7 \n\t" "pxor %%mm2, %%mm2 \n\t" // 2*xalpha "movd %5, %%mm6 \n\t" // xInc&0xFFFF "punpcklwd %%mm6, %%mm6 \n\t" "punpcklwd %%mm6, %%mm6 \n\t" "movq %%mm6, %%mm2 \n\t" "psllq $16, %%mm2 \n\t" "paddw %%mm6, %%mm2 \n\t" "psllq $16, %%mm2 \n\t" "paddw %%mm6, %%mm2 \n\t" "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=xInc&0xFF "movq %%mm2, %%mm4 \n\t" "movd %4, %%mm6 \n\t" //(xInc*4)&0xFFFF "punpcklwd %%mm6, %%mm6 \n\t" "punpcklwd %%mm6, %%mm6 \n\t" "xorl %%eax, %%eax \n\t" // i "movl %0, %%esi \n\t" // src "movl %1, %%edi \n\t" // buf1 "movl %3, %%edx \n\t" // (xInc*4)>>16 "xorl %%ecx, %%ecx \n\t" "xorl %%ebx, %%ebx \n\t" "movw %4, %%bx \n\t" // (xInc*4)&0xFFFF #define FUNNY_Y_CODE \ PREFETCH" 1024(%%esi) \n\t"\ PREFETCH" 1056(%%esi) \n\t"\ PREFETCH" 1088(%%esi) \n\t"\ "call *%6 \n\t"\ "movq %%mm4, %%mm2 \n\t"\ "xorl %%ecx, %%ecx \n\t" FUNNY_Y_CODE FUNNY_Y_CODE FUNNY_Y_CODE FUNNY_Y_CODE FUNNY_Y_CODE FUNNY_Y_CODE FUNNY_Y_CODE FUNNY_Y_CODE :: "m" (src), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16), "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF), "m" (funnyYCode) : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi" ); for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128; } else { #endif //NO MMX just normal asm ... asm volatile( "xorl %%eax, %%eax \n\t" // i "xorl %%ebx, %%ebx \n\t" // xx "xorl %%ecx, %%ecx \n\t" // 2*xalpha ".balign 16 \n\t" "1: \n\t" "movzbl (%0, %%ebx), %%edi \n\t" //src[xx] "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1] "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha "shll $16, %%edi \n\t" "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) "movl %1, %%edi \n\t" "shrl $9, %%esi \n\t" "movw %%si, (%%edi, %%eax, 2) \n\t" "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry "movzbl (%0, %%ebx), %%edi \n\t" //src[xx] "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1] "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha "shll $16, %%edi \n\t" "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) "movl %1, %%edi \n\t" "shrl $9, %%esi \n\t" "movw %%si, 2(%%edi, %%eax, 2) \n\t" "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry "addl $2, %%eax \n\t" "cmpl %2, %%eax \n\t" " jb 1b \n\t" :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF) : "%eax", "%ebx", "%ecx", "%edi", "%esi" ); #ifdef HAVE_MMX2 } //if MMX2 cant be used #endif #else int i; unsigned int xpos=0; for(i=0;i<dstWidth;i++) { register unsigned int xx=xpos>>16; register unsigned int xalpha=(xpos&0xFFFF)>>9; dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha; xpos+=xInc; } #endif } }
1,380
qemu
b3a6a2e0417c78ec5491347eb85a7d125a5fefdc
1
int32 float128_to_int32_round_to_zero( float128 a STATUS_PARAM ) { flag aSign; int32 aExp, shiftCount; uint64_t aSig0, aSig1, savedASig; int32 z; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); aSig0 |= ( aSig1 != 0 ); if ( 0x401E < aExp ) { if ( ( aExp == 0x7FFF ) && aSig0 ) aSign = 0; goto invalid; } else if ( aExp < 0x3FFF ) { if ( aExp || aSig0 ) STATUS(float_exception_flags) |= float_flag_inexact; return 0; } aSig0 |= LIT64( 0x0001000000000000 ); shiftCount = 0x402F - aExp; savedASig = aSig0; aSig0 >>= shiftCount; z = aSig0; if ( aSign ) z = - z; if ( ( z < 0 ) ^ aSign ) { invalid: float_raise( float_flag_invalid STATUS_VAR); return aSign ? (int32_t) 0x80000000 : 0x7FFFFFFF; } if ( ( aSig0<<shiftCount ) != savedASig ) { STATUS(float_exception_flags) |= float_flag_inexact; } return z; }
1,382
FFmpeg
4279613a2652cdf2bee564f4b7244567e5ba91ba
1
static int decode_ihdr_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length) { if (length != 13) return AVERROR_INVALIDDATA; if (s->state & PNG_IDAT) { av_log(avctx, AV_LOG_ERROR, "IHDR after IDAT\n"); return AVERROR_INVALIDDATA; if (s->state & PNG_IHDR) { av_log(avctx, AV_LOG_ERROR, "Multiple IHDR\n"); return AVERROR_INVALIDDATA; s->width = s->cur_w = bytestream2_get_be32(&s->gb); s->height = s->cur_h = bytestream2_get_be32(&s->gb); if (av_image_check_size(s->width, s->height, 0, avctx)) { s->cur_w = s->cur_h = s->width = s->height = 0; av_log(avctx, AV_LOG_ERROR, "Invalid image size\n"); return AVERROR_INVALIDDATA; s->bit_depth = bytestream2_get_byte(&s->gb); s->color_type = bytestream2_get_byte(&s->gb); s->compression_type = bytestream2_get_byte(&s->gb); s->filter_type = bytestream2_get_byte(&s->gb); s->interlace_type = bytestream2_get_byte(&s->gb); bytestream2_skip(&s->gb, 4); /* crc */ s->state |= PNG_IHDR; if (avctx->debug & FF_DEBUG_PICT_INFO) av_log(avctx, AV_LOG_DEBUG, "width=%d height=%d depth=%d color_type=%d " "compression_type=%d filter_type=%d interlace_type=%d\n", s->width, s->height, s->bit_depth, s->color_type, s->compression_type, s->filter_type, s->interlace_type); return 0; error: s->cur_w = s->cur_h = s->width = s->height = 0; s->bit_depth = 8; return AVERROR_INVALIDDATA;
1,383
qemu
9c4bbee9e3b83544257e82566342c29e15a88637
1
int page_unprotect(target_ulong address, uintptr_t pc) { unsigned int prot; bool current_tb_invalidated; PageDesc *p; target_ulong host_start, host_end, addr; /* Technically this isn't safe inside a signal handler. However we know this only ever happens in a synchronous SEGV handler, so in practice it seems to be ok. */ mmap_lock(); p = page_find(address >> TARGET_PAGE_BITS); if (!p) { mmap_unlock(); return 0; } /* if the page was really writable, then we change its protection back to writable */ if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { host_start = address & qemu_host_page_mask; host_end = host_start + qemu_host_page_size; prot = 0; current_tb_invalidated = false; for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { p = page_find(addr >> TARGET_PAGE_BITS); p->flags |= PAGE_WRITE; prot |= p->flags; /* and since the content will be modified, we must invalidate the corresponding translated code. */ current_tb_invalidated |= tb_invalidate_phys_page(addr, pc); #ifdef CONFIG_USER_ONLY if (DEBUG_TB_CHECK_GATE) { tb_invalidate_check(addr); } #endif } mprotect((void *)g2h(host_start), qemu_host_page_size, prot & PAGE_BITS); mmap_unlock(); /* If current TB was invalidated return to main loop */ return current_tb_invalidated ? 2 : 1; } mmap_unlock(); return 0; }
1,384
qemu
c572f23a3e7180dbeab5e86583e43ea2afed6271
1
static void v9fs_readlink(void *opaque) { V9fsPDU *pdu = opaque; size_t offset = 7; V9fsString target; int32_t fid; int err = 0; V9fsFidState *fidp; pdu_unmarshal(pdu, offset, "d", &fid); fidp = get_fid(pdu, fid); if (fidp == NULL) { err = -ENOENT; goto out_nofid; } v9fs_string_init(&target); err = v9fs_co_readlink(pdu, &fidp->path, &target); if (err < 0) { goto out; } offset += pdu_marshal(pdu, offset, "s", &target); err = offset; v9fs_string_free(&target); out: put_fid(pdu, fidp); out_nofid: trace_v9fs_readlink_return(pdu->tag, pdu->id, target.data); complete_pdu(pdu->s, pdu, err); }
1,386
qemu
0e1dac6c41f337f997814344a847162968c20c64
1
int kvm_init(MachineClass *mc) { static const char upgrade_note[] = "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" "(see http://sourceforge.net/projects/kvm).\n"; struct { const char *name; int num; } num_cpus[] = { { "SMP", smp_cpus }, { "hotpluggable", max_cpus }, { NULL, } }, *nc = num_cpus; int soft_vcpus_limit, hard_vcpus_limit; KVMState *s; const KVMCapabilityInfo *missing_cap; int ret; int i, type = 0; const char *kvm_type; s = g_malloc0(sizeof(KVMState)); /* * On systems where the kernel can support different base page * sizes, host page size may be different from TARGET_PAGE_SIZE, * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum * page size for the system though. */ assert(TARGET_PAGE_SIZE <= getpagesize()); page_size_init(); #ifdef KVM_CAP_SET_GUEST_DEBUG QTAILQ_INIT(&s->kvm_sw_breakpoints); #endif s->vmfd = -1; s->fd = qemu_open("/dev/kvm", O_RDWR); if (s->fd == -1) { fprintf(stderr, "Could not access KVM kernel module: %m\n"); ret = -errno; goto err; } ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); if (ret < KVM_API_VERSION) { if (ret > 0) { ret = -EINVAL; } fprintf(stderr, "kvm version too old\n"); goto err; } if (ret > KVM_API_VERSION) { ret = -EINVAL; fprintf(stderr, "kvm version not supported\n"); goto err; } s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); /* If unspecified, use the default value */ if (!s->nr_slots) { s->nr_slots = 32; } s->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot)); for (i = 0; i < s->nr_slots; i++) { s->slots[i].slot = i; } /* check the vcpu limits */ soft_vcpus_limit = kvm_recommended_vcpus(s); hard_vcpus_limit = kvm_max_vcpus(s); while (nc->name) { if (nc->num > soft_vcpus_limit) { fprintf(stderr, "Warning: Number of %s cpus requested (%d) exceeds " "the recommended cpus supported by KVM (%d)\n", nc->name, nc->num, soft_vcpus_limit); if (nc->num > hard_vcpus_limit) { fprintf(stderr, "Number of %s cpus requested (%d) exceeds " "the maximum cpus supported by KVM (%d)\n", nc->name, nc->num, hard_vcpus_limit); exit(1); } } nc++; } kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type"); if (mc->kvm_type) { type = mc->kvm_type(kvm_type); } else if (kvm_type) { fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type); goto err; } do { ret = kvm_ioctl(s, KVM_CREATE_VM, type); } while (ret == -EINTR); if (ret < 0) { fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret, strerror(-ret)); #ifdef TARGET_S390X fprintf(stderr, "Please add the 'switch_amode' kernel parameter to " "your host kernel command line\n"); #endif goto err; } s->vmfd = ret; missing_cap = kvm_check_extension_list(s, kvm_required_capabilites); if (!missing_cap) { missing_cap = kvm_check_extension_list(s, kvm_arch_required_capabilities); } if (missing_cap) { ret = -EINVAL; fprintf(stderr, "kvm does not support %s\n%s", missing_cap->name, upgrade_note); goto err; } s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); s->broken_set_mem_region = 1; ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS); if (ret > 0) { s->broken_set_mem_region = 0; } #ifdef KVM_CAP_VCPU_EVENTS s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); #endif s->robust_singlestep = kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); #ifdef KVM_CAP_DEBUGREGS s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); #endif #ifdef KVM_CAP_XSAVE s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE); #endif #ifdef KVM_CAP_XCRS s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS); #endif #ifdef KVM_CAP_PIT_STATE2 s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2); #endif #ifdef KVM_CAP_IRQ_ROUTING s->direct_msi = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0); #endif s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3); s->irq_set_ioctl = KVM_IRQ_LINE; if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) { s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; } #ifdef KVM_CAP_READONLY_MEM kvm_readonly_mem_allowed = (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); #endif ret = kvm_arch_init(s); if (ret < 0) { goto err; } ret = kvm_irqchip_create(s); if (ret < 0) { goto err; } kvm_state = s; memory_listener_register(&kvm_memory_listener, &address_space_memory); memory_listener_register(&kvm_io_listener, &address_space_io); s->many_ioeventfds = kvm_check_many_ioeventfds(); cpu_interrupt_handler = kvm_handle_interrupt; return 0; err: if (s->vmfd >= 0) { close(s->vmfd); } if (s->fd != -1) { close(s->fd); } g_free(s->slots); g_free(s); return ret; }
1,389
qemu
4c4f0e4801ac79632d03867c88aafc90b4ce503c
1
static void openpic_msi_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { OpenPICState *opp = opaque; int idx = opp->irq_msi; int srs, ibs; DPRINTF("%s: addr " TARGET_FMT_plx " <= %08x\n", __func__, addr, val); if (addr & 0xF) { return; } switch (addr) { case MSIIR_OFFSET: srs = val >> MSIIR_SRS_SHIFT; idx += srs; ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT; opp->msi[srs].msir |= 1 << ibs; openpic_set_irq(opp, idx, 1); break; default: /* most registers are read-only, thus ignored */ break; } }
1,390
FFmpeg
b45411e24a7566a1191f9526a4adea0f76e9cb86
1
static int eval_refl(const int16_t *coefs, int *refl, RA144Context *ractx) { int retval = 0; int b, c, i; unsigned int u; int buffer1[10]; int buffer2[10]; int *bp1 = buffer1; int *bp2 = buffer2; for (i=0; i < 10; i++) buffer2[i] = coefs[i]; u = refl[9] = bp2[9]; if (u + 0x1000 > 0x1fff) { av_log(ractx, AV_LOG_ERROR, "Overflow. Broken sample?\n"); return 0; } for (c=8; c >= 0; c--) { if (u == 0x1000) u++; if (u == 0xfffff000) u--; b = 0x1000-((u * u) >> 12); if (b == 0) b++; for (u=0; u<=c; u++) bp1[u] = ((bp2[u] - ((refl[c+1] * bp2[c-u]) >> 12)) * (0x1000000 / b)) >> 12; refl[c] = u = bp1[c]; if ((u + 0x1000) > 0x1fff) retval = 1; FFSWAP(int *, bp1, bp2); } return retval; }
1,391
qemu
b3e27c3aee8f5a96debfe0346e9c0e3a641a8516
1
static void vfio_disable_interrupts(VFIOPCIDevice *vdev) { switch (vdev->interrupt) { case VFIO_INT_INTx: vfio_disable_intx(vdev); break; case VFIO_INT_MSI: vfio_disable_msi(vdev); break; case VFIO_INT_MSIX: vfio_disable_msix(vdev); break; } }
1,392
FFmpeg
cf48b006400e34e1177d0ca22d1cdb5c900a199a
1
static inline int get_ue_code(GetBitContext *gb, int order) { if (order) { int ret = get_ue_golomb(gb) << order; return ret + get_bits(gb, order); } return get_ue_golomb(gb); }
1,393
FFmpeg
5f5e6af16982c172997abc75ff7a401124dd3dda
1
int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples) { int i, nb_samples1; short *bufin[2]; short *bufout[2]; short *buftmp2[2], *buftmp3[2]; short *output_bak = NULL; int lenout; if (s->input_channels == s->output_channels && s->ratio == 1.0 && 0) { /* nothing to do */ memcpy(output, input, nb_samples * s->input_channels * sizeof(short)); return nb_samples; } if (s->sample_fmt[0] != SAMPLE_FMT_S16) { int istride[1] = { s->sample_size[0] }; int ostride[1] = { 2 }; const void *ibuf[1] = { input }; void *obuf[1]; unsigned input_size = nb_samples*s->input_channels*s->sample_size[0]; if (!s->buffer_size[0] || s->buffer_size[0] < input_size) { av_free(s->buffer[0]); s->buffer_size[0] = input_size; s->buffer[0] = av_malloc(s->buffer_size[0]); if (!s->buffer[0]) { av_log(s, AV_LOG_ERROR, "Could not allocate buffer\n"); return 0; } } obuf[0] = s->buffer[0]; if (av_audio_convert(s->convert_ctx[0], obuf, ostride, ibuf, istride, nb_samples*s->input_channels) < 0) { av_log(s, AV_LOG_ERROR, "Audio sample format conversion failed\n"); return 0; } input = s->buffer[0]; } lenout= 4*nb_samples * s->ratio + 16; if (s->sample_fmt[1] != SAMPLE_FMT_S16) { output_bak = output; if (!s->buffer_size[1] || s->buffer_size[1] < lenout) { av_free(s->buffer[1]); s->buffer_size[1] = lenout; s->buffer[1] = av_malloc(s->buffer_size[1]); if (!s->buffer[1]) { av_log(s, AV_LOG_ERROR, "Could not allocate buffer\n"); return 0; } } output = s->buffer[1]; } /* XXX: move those malloc to resample init code */ for(i=0; i<s->filter_channels; i++){ bufin[i]= av_malloc( (nb_samples + s->temp_len) * sizeof(short) ); memcpy(bufin[i], s->temp[i], s->temp_len * sizeof(short)); buftmp2[i] = bufin[i] + s->temp_len; } /* make some zoom to avoid round pb */ bufout[0]= av_malloc( lenout * sizeof(short) ); bufout[1]= av_malloc( lenout * sizeof(short) ); if (s->input_channels == 2 && s->output_channels == 1) { buftmp3[0] = output; stereo_to_mono(buftmp2[0], input, nb_samples); } else if (s->output_channels >= 2 && s->input_channels == 1) { buftmp3[0] = bufout[0]; memcpy(buftmp2[0], input, nb_samples*sizeof(short)); } else if (s->output_channels >= 2) { buftmp3[0] = bufout[0]; buftmp3[1] = bufout[1]; stereo_split(buftmp2[0], buftmp2[1], input, nb_samples); } else { buftmp3[0] = output; memcpy(buftmp2[0], input, nb_samples*sizeof(short)); } nb_samples += s->temp_len; /* resample each channel */ nb_samples1 = 0; /* avoid warning */ for(i=0;i<s->filter_channels;i++) { int consumed; int is_last= i+1 == s->filter_channels; nb_samples1 = av_resample(s->resample_context, buftmp3[i], bufin[i], &consumed, nb_samples, lenout, is_last); s->temp_len= nb_samples - consumed; s->temp[i]= av_realloc(s->temp[i], s->temp_len*sizeof(short)); memcpy(s->temp[i], bufin[i] + consumed, s->temp_len*sizeof(short)); } if (s->output_channels == 2 && s->input_channels == 1) { mono_to_stereo(output, buftmp3[0], nb_samples1); } else if (s->output_channels == 2) { stereo_mux(output, buftmp3[0], buftmp3[1], nb_samples1); } else if (s->output_channels == 6) { ac3_5p1_mux(output, buftmp3[0], buftmp3[1], nb_samples1); } if (s->sample_fmt[1] != SAMPLE_FMT_S16) { int istride[1] = { 2 }; int ostride[1] = { s->sample_size[1] }; const void *ibuf[1] = { output }; void *obuf[1] = { output_bak }; if (av_audio_convert(s->convert_ctx[1], obuf, ostride, ibuf, istride, nb_samples1*s->output_channels) < 0) { av_log(s, AV_LOG_ERROR, "Audio sample format convertion failed\n"); return 0; } } for(i=0; i<s->filter_channels; i++) av_free(bufin[i]); av_free(bufout[0]); av_free(bufout[1]); return nb_samples1; }
1,394
FFmpeg
61c6eef5456f2bc8b1dc49a0a759c975551cea29
1
int ff_h264_decode_extradata(H264Context *h) { AVCodecContext *avctx = h->s.avctx; if (avctx->extradata[0] == 1) { int i, cnt, nalsize; unsigned char *p = avctx->extradata; h->is_avc = 1; if (avctx->extradata_size < 7) { av_log(avctx, AV_LOG_ERROR, "avcC too short\n"); return -1; } /* sps and pps in the avcC always have length coded with 2 bytes, * so put a fake nal_length_size = 2 while parsing them */ h->nal_length_size = 2; // Decode sps from avcC cnt = *(p + 5) & 0x1f; // Number of sps p += 6; for (i = 0; i < cnt; i++) { nalsize = AV_RB16(p) + 2; if (p - avctx->extradata + nalsize > avctx->extradata_size) return -1; if (decode_nal_units(h, p, nalsize) < 0) { av_log(avctx, AV_LOG_ERROR, "Decoding sps %d from avcC failed\n", i); return -1; } p += nalsize; } // Decode pps from avcC cnt = *(p++); // Number of pps for (i = 0; i < cnt; i++) { nalsize = AV_RB16(p) + 2; if (p - avctx->extradata + nalsize > avctx->extradata_size) return -1; if (decode_nal_units(h, p, nalsize) < 0) { av_log(avctx, AV_LOG_ERROR, "Decoding pps %d from avcC failed\n", i); return -1; } p += nalsize; } // Now store right nal length size, that will be used to parse all other nals h->nal_length_size = (avctx->extradata[4] & 0x03) + 1; } else { h->is_avc = 0; if (decode_nal_units(h, avctx->extradata, avctx->extradata_size) < 0) return -1; } return 0; }
1,395
FFmpeg
1c0e205fab4bd5bbfa0399af2cd5e281b414b3d5
1
void video_encode_example(const char *filename) { AVCodec *codec; AVCodecContext *c= NULL; int i, out_size, size, x, y, outbuf_size; FILE *f; AVFrame *picture; uint8_t *outbuf, *picture_buf; printf("Video encoding\n"); /* find the mpeg1 video encoder */ codec = avcodec_find_encoder(CODEC_ID_MPEG1VIDEO); if (!codec) { fprintf(stderr, "codec not found\n"); exit(1); } c= avcodec_alloc_context(); picture= avcodec_alloc_frame(); /* put sample parameters */ c->bit_rate = 400000; /* resolution must be a multiple of two */ c->width = 352; c->height = 288; /* frames per second */ c->frame_rate = 25; c->frame_rate_base= 1; c->gop_size = 10; /* emit one intra frame every ten frames */ c->max_b_frames=1; /* open it */ if (avcodec_open(c, codec) < 0) { fprintf(stderr, "could not open codec\n"); exit(1); } /* the codec gives us the frame size, in samples */ f = fopen(filename, "w"); if (!f) { fprintf(stderr, "could not open %s\n", filename); exit(1); } /* alloc image and output buffer */ outbuf_size = 100000; outbuf = malloc(outbuf_size); size = c->width * c->height; picture_buf = malloc((size * 3) / 2); /* size for YUV 420 */ picture->data[0] = picture_buf; picture->data[1] = picture->data[0] + size; picture->data[2] = picture->data[1] + size / 4; picture->linesize[0] = c->width; picture->linesize[1] = c->width / 2; picture->linesize[2] = c->width / 2; /* encode 1 second of video */ for(i=0;i<25;i++) { fflush(stdout); /* prepare a dummy image */ /* Y */ for(y=0;y<c->height;y++) { for(x=0;x<c->width;x++) { picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3; } } /* Cb and Cr */ for(y=0;y<c->height/2;y++) { for(x=0;x<c->width/2;x++) { picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2; picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5; } } /* encode the image */ out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture); printf("encoding frame %3d (size=%5d)\n", i, out_size); fwrite(outbuf, 1, out_size, f); } /* get the delayed frames */ for(; out_size; i++) { fflush(stdout); out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL); printf("write frame %3d (size=%5d)\n", i, out_size); fwrite(outbuf, 1, out_size, f); } /* add sequence end code to have a real mpeg file */ outbuf[0] = 0x00; outbuf[1] = 0x00; outbuf[2] = 0x01; outbuf[3] = 0xb7; fwrite(outbuf, 1, 4, f); fclose(f); free(picture_buf); free(outbuf); avcodec_close(c); free(c); free(picture); printf("\n"); }
1,396
qemu
e122636562218b3d442cd2cd18fbc188dd9ce709
1
static void socket_outgoing_migration(Object *src, Error *err, gpointer opaque) { MigrationState *s = opaque; QIOChannel *sioc = QIO_CHANNEL(src); if (err) { trace_migration_socket_outgoing_error(error_get_pretty(err)); s->to_dst_file = NULL; migrate_fd_error(s, err); } else { trace_migration_socket_outgoing_connected(); migration_set_outgoing_channel(s, sioc); } object_unref(src); }
1,397
qemu
efec3dd631d94160288392721a5f9c39e50fb2bc
1
static void apic_common_class_init(ObjectClass *klass, void *data) { ICCDeviceClass *idc = ICC_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_apic_common; dc->reset = apic_reset_common; dc->no_user = 1; dc->props = apic_properties_common; idc->init = apic_init_common; }
1,398
qemu
b14ef7c9ab41ea824c3ccadb070ad95567cca84e
1
static void do_unassigned_access(target_ulong addr, int is_write, int is_exec, int is_asi, int size) #else void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec, int is_asi, int size) #endif { CPUState *saved_env; /* XXX: hack to restore env in all cases, even if not called from generated code */ saved_env = env; env = cpu_single_env; #ifdef DEBUG_UNASSIGNED printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx "\n", addr, env->pc); #endif if (is_exec) raise_exception(TT_CODE_ACCESS); else raise_exception(TT_DATA_ACCESS); env = saved_env; }
1,399
qemu
b58c86e1e4cdf59373aad2ec25f99f772766374c
1
int qemu_fsdev_add(QemuOpts *opts) { int i; struct FsDriverListEntry *fsle; const char *fsdev_id = qemu_opts_id(opts); const char *fsdriver = qemu_opt_get(opts, "fsdriver"); const char *writeout = qemu_opt_get(opts, "writeout"); bool ro = qemu_opt_get_bool(opts, "readonly", 0); if (!fsdev_id) { fprintf(stderr, "fsdev: No id specified\n"); return -1; } if (fsdriver) { for (i = 0; i < ARRAY_SIZE(FsDrivers); i++) { if (strcmp(FsDrivers[i].name, fsdriver) == 0) { break; } } if (i == ARRAY_SIZE(FsDrivers)) { fprintf(stderr, "fsdev: fsdriver %s not found\n", fsdriver); return -1; } } else { fprintf(stderr, "fsdev: No fsdriver specified\n"); return -1; } fsle = g_malloc0(sizeof(*fsle)); fsle->fse.fsdev_id = g_strdup(fsdev_id); fsle->fse.ops = FsDrivers[i].ops; if (writeout) { if (!strcmp(writeout, "immediate")) { fsle->fse.export_flags |= V9FS_IMMEDIATE_WRITEOUT; } } if (ro) { fsle->fse.export_flags |= V9FS_RDONLY; } else { fsle->fse.export_flags &= ~V9FS_RDONLY; } if (fsle->fse.ops->parse_opts) { if (fsle->fse.ops->parse_opts(opts, &fsle->fse)) { return -1; } } QTAILQ_INSERT_TAIL(&fsdriver_entries, fsle, next); return 0; }
1,400
qemu
966439a67830239a6c520c5df6c55627b8153c8b
1
void OPPROTO op_set_Rc0 (void) { env->crf[0] = T0 | xer_ov; RETURN(); }
1,401
FFmpeg
bcbecff13f2d9c8af19039fa82703efd4c04eb97
1
static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt) { AVStream *st; int len, ret; for(;;) { /* select current input stream component */ st = s->cur_st; if (st) { if (!st->parser) { /* no parsing needed: we just output the packet as is */ /* raw data support */ *pkt = s->cur_pkt; compute_pkt_fields(s, st, NULL, pkt); s->cur_st = NULL; return 0; } else if (s->cur_len > 0) { /* we use the MPEG semantics: the pts and dts in a packet are given from the first frame beginning in it */ if (!st->got_frame) { st->cur_frame_pts = s->cur_pkt.pts; st->cur_frame_dts = s->cur_pkt.dts; s->cur_pkt.pts = AV_NOPTS_VALUE; s->cur_pkt.dts = AV_NOPTS_VALUE; st->got_frame = 1; } len = av_parser_parse(st->parser, &st->codec, &pkt->data, &pkt->size, s->cur_ptr, s->cur_len); /* increment read pointer */ s->cur_ptr += len; s->cur_len -= len; /* return packet if any */ if (pkt->size) { pkt->duration = 0; pkt->stream_index = st->index; pkt->pts = st->cur_frame_pts; pkt->dts = st->cur_frame_dts; pkt->destruct = av_destruct_packet_nofree; compute_pkt_fields(s, st, st->parser, pkt); st->got_frame = 0; return 0; } } else { s->cur_st = NULL; } } else { /* free previous packet */ if (s->cur_st && s->cur_st->parser) av_free_packet(&s->cur_pkt); /* read next packet */ ret = av_read_packet(s, &s->cur_pkt); if (ret < 0) return ret; /* convert the packet time stamp units and handle wrapping */ s->cur_pkt.pts = convert_timestamp_units(s, &s->last_pkt_pts, &s->last_pkt_pts_frac, &s->last_pkt_stream_pts, s->cur_pkt.pts); s->cur_pkt.dts = convert_timestamp_units(s, &s->last_pkt_dts, &s->last_pkt_dts_frac, &s->last_pkt_stream_dts, s->cur_pkt.dts); #if 0 if (s->cur_pkt.stream_index == 1) { if (s->cur_pkt.pts != AV_NOPTS_VALUE) printf("PACKET pts=%0.3f\n", (double)s->cur_pkt.pts / AV_TIME_BASE); if (s->cur_pkt.dts != AV_NOPTS_VALUE) printf("PACKET dts=%0.3f\n", (double)s->cur_pkt.dts / AV_TIME_BASE); } #endif /* duration field */ if (s->cur_pkt.duration != 0) { s->cur_pkt.duration = ((int64_t)s->cur_pkt.duration * AV_TIME_BASE * s->pts_num) / s->pts_den; } st = s->streams[s->cur_pkt.stream_index]; s->cur_st = st; s->cur_ptr = s->cur_pkt.data; s->cur_len = s->cur_pkt.size; if (st->need_parsing && !st->parser) { st->parser = av_parser_init(st->codec.codec_id); if (!st->parser) { /* no parser available : just output the raw packets */ st->need_parsing = 0; } } } } }
1,402
qemu
2ef45716e1d4820f10a90ee2f17a9cb4fe5a8806
1
static void monitor_data_destroy(Monitor *mon) { QDECREF(mon->outbuf); qemu_mutex_destroy(&mon->out_lock);
1,403
qemu
eefa3d8ef649f9055611361e2201cca49f8c3433
1
static void qio_channel_websock_encode(QIOChannelWebsock *ioc) { size_t header_size; union { char buf[QIO_CHANNEL_WEBSOCK_HEADER_LEN_64_BIT]; QIOChannelWebsockHeader ws; } header; if (!ioc->rawoutput.offset) { return; } header.ws.b0 = (1 << QIO_CHANNEL_WEBSOCK_HEADER_SHIFT_FIN) | (QIO_CHANNEL_WEBSOCK_OPCODE_BINARY_FRAME & QIO_CHANNEL_WEBSOCK_HEADER_FIELD_OPCODE); if (ioc->rawoutput.offset < QIO_CHANNEL_WEBSOCK_PAYLOAD_LEN_THRESHOLD_7_BIT) { header.ws.b1 = (uint8_t)ioc->rawoutput.offset; header_size = QIO_CHANNEL_WEBSOCK_HEADER_LEN_7_BIT; } else if (ioc->rawoutput.offset < QIO_CHANNEL_WEBSOCK_PAYLOAD_LEN_THRESHOLD_16_BIT) { header.ws.b1 = QIO_CHANNEL_WEBSOCK_PAYLOAD_LEN_MAGIC_16_BIT; header.ws.u.s16.l16 = cpu_to_be16((uint16_t)ioc->rawoutput.offset); header_size = QIO_CHANNEL_WEBSOCK_HEADER_LEN_16_BIT; } else { header.ws.b1 = QIO_CHANNEL_WEBSOCK_PAYLOAD_LEN_MAGIC_64_BIT; header.ws.u.s64.l64 = cpu_to_be64(ioc->rawoutput.offset); header_size = QIO_CHANNEL_WEBSOCK_HEADER_LEN_64_BIT; } header_size -= QIO_CHANNEL_WEBSOCK_HEADER_LEN_MASK; buffer_reserve(&ioc->encoutput, header_size + ioc->rawoutput.offset); buffer_append(&ioc->encoutput, header.buf, header_size); buffer_append(&ioc->encoutput, ioc->rawoutput.buffer, ioc->rawoutput.offset); buffer_reset(&ioc->rawoutput); }
1,405
FFmpeg
709628aa71f24520553eb10b0cf6d56784e6c3ec
1
static void process_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) { AVFilterContext *ctx = inlink->dst; ConcatContext *cat = ctx->priv; unsigned in_no = FF_INLINK_IDX(inlink); if (in_no < cat->cur_idx) { av_log(ctx, AV_LOG_ERROR, "Frame after EOF on input %s\n", ctx->input_pads[in_no].name); avfilter_unref_buffer(buf); } if (in_no >= cat->cur_idx + ctx->nb_outputs) { ff_bufqueue_add(ctx, &cat->in[in_no].queue, buf); } else { push_frame(ctx, in_no, buf); } }
1,406
FFmpeg
e45226adc46e513a1bb39ec2b09fb7c77515ab14
1
static int make_ydt15_entry(int p2, int p1, int16_t *ydt) #else static int make_ydt15_entry(int p1, int p2, int16_t *ydt) #endif { int lo, hi; lo = ydt[p1]; lo += (lo * 32) + (lo * 1024); hi = ydt[p2]; hi += (hi * 32) + (hi * 1024); return (lo + (hi * (1 << 16))) * 2; }
1,408
qemu
7bd92756303f2158a68d5166264dc30139b813b6
1
static void coroutine_fn v9fs_xattrwalk(void *opaque) { int64_t size; V9fsString name; ssize_t err = 0; size_t offset = 7; int32_t fid, newfid; V9fsFidState *file_fidp; V9fsFidState *xattr_fidp = NULL; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; v9fs_string_init(&name); err = pdu_unmarshal(pdu, offset, "dds", &fid, &newfid, &name); if (err < 0) { goto out_nofid; } trace_v9fs_xattrwalk(pdu->tag, pdu->id, fid, newfid, name.data); file_fidp = get_fid(pdu, fid); if (file_fidp == NULL) { err = -ENOENT; goto out_nofid; } xattr_fidp = alloc_fid(s, newfid); if (xattr_fidp == NULL) { err = -EINVAL; goto out; } v9fs_path_copy(&xattr_fidp->path, &file_fidp->path); if (!v9fs_string_size(&name)) { /* * listxattr request. Get the size first */ size = v9fs_co_llistxattr(pdu, &xattr_fidp->path, NULL, 0); if (size < 0) { err = size; clunk_fid(s, xattr_fidp->fid); goto out; } /* * Read the xattr value */ xattr_fidp->fs.xattr.len = size; xattr_fidp->fid_type = P9_FID_XATTR; xattr_fidp->fs.xattr.xattrwalk_fid = true; if (size) { xattr_fidp->fs.xattr.value = g_malloc(size); err = v9fs_co_llistxattr(pdu, &xattr_fidp->path, xattr_fidp->fs.xattr.value, xattr_fidp->fs.xattr.len); if (err < 0) { clunk_fid(s, xattr_fidp->fid); goto out; } } err = pdu_marshal(pdu, offset, "q", size); if (err < 0) { goto out; } err += offset; } else { /* * specific xattr fid. We check for xattr * presence also collect the xattr size */ size = v9fs_co_lgetxattr(pdu, &xattr_fidp->path, &name, NULL, 0); if (size < 0) { err = size; clunk_fid(s, xattr_fidp->fid); goto out; } /* * Read the xattr value */ xattr_fidp->fs.xattr.len = size; xattr_fidp->fid_type = P9_FID_XATTR; xattr_fidp->fs.xattr.xattrwalk_fid = true; if (size) { xattr_fidp->fs.xattr.value = g_malloc(size); err = v9fs_co_lgetxattr(pdu, &xattr_fidp->path, &name, xattr_fidp->fs.xattr.value, xattr_fidp->fs.xattr.len); if (err < 0) { clunk_fid(s, xattr_fidp->fid); goto out; } } err = pdu_marshal(pdu, offset, "q", size); if (err < 0) { goto out; } err += offset; } trace_v9fs_xattrwalk_return(pdu->tag, pdu->id, size); out: put_fid(pdu, file_fidp); if (xattr_fidp) { put_fid(pdu, xattr_fidp); } out_nofid: pdu_complete(pdu, err); v9fs_string_free(&name); }
1,411
qemu
9b2fadda3e0196ffd485adde4fe9cdd6fae35300
1
static void gen_slbmfev(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); #else if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); return; } gen_helper_load_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif }
1,412
qemu
f8ed85ac992c48814d916d5df4d44f9a971c5de4
1
void vga_common_init(VGACommonState *s, Object *obj, bool global_vmstate) { int i, j, v, b; for(i = 0;i < 256; i++) { v = 0; for(j = 0; j < 8; j++) { v |= ((i >> j) & 1) << (j * 4); } expand4[i] = v; v = 0; for(j = 0; j < 4; j++) { v |= ((i >> (2 * j)) & 3) << (j * 4); } expand2[i] = v; } for(i = 0; i < 16; i++) { v = 0; for(j = 0; j < 4; j++) { b = ((i >> j) & 1); v |= b << (2 * j); v |= b << (2 * j + 1); } expand4to8[i] = v; } s->vram_size_mb = uint_clamp(s->vram_size_mb, 1, 512); s->vram_size_mb = pow2ceil(s->vram_size_mb); s->vram_size = s->vram_size_mb << 20; if (!s->vbe_size) { s->vbe_size = s->vram_size; } s->is_vbe_vmstate = 1; memory_region_init_ram(&s->vram, obj, "vga.vram", s->vram_size, &error_abort); vmstate_register_ram(&s->vram, global_vmstate ? NULL : DEVICE(obj)); xen_register_framebuffer(&s->vram); s->vram_ptr = memory_region_get_ram_ptr(&s->vram); s->get_bpp = vga_get_bpp; s->get_offsets = vga_get_offsets; s->get_resolution = vga_get_resolution; s->hw_ops = &vga_ops; switch (vga_retrace_method) { case VGA_RETRACE_DUMB: s->retrace = vga_dumb_retrace; s->update_retrace_info = vga_dumb_update_retrace_info; break; case VGA_RETRACE_PRECISE: s->retrace = vga_precise_retrace; s->update_retrace_info = vga_precise_update_retrace_info; break; } /* * Set default fb endian based on target, could probably be turned * into a device attribute set by the machine/platform to remove * all target endian dependencies from this file. */ #ifdef TARGET_WORDS_BIGENDIAN s->default_endian_fb = true; #else s->default_endian_fb = false; #endif vga_dirty_log_start(s); }
1,413
FFmpeg
38152d9368beb080b4acd6cd9e5ccc89b3f733bf
1
static void dss_sp_scale_vector(int32_t *vec, int bits, int size) { int i; if (bits < 0) for (i = 0; i < size; i++) vec[i] = vec[i] >> -bits; else for (i = 0; i < size; i++) vec[i] = vec[i] << bits; }
1,414
qemu
a97fed52e57385fc749e6f6ef95be7ebdb81ba9b
1
void OPPROTO op_store_msr (void) { do_store_msr(env, T0); RETURN(); }
1,415
FFmpeg
b926cc7834d5bc998775528097831c0fbcf3730a
1
static void rac_normalise(RangeCoder *c) { for (;;) { c->range <<= 8; c->low <<= 8; if (c->src < c->src_end) { c->low |= *c->src++; } else if (!c->low) { c->got_error = 1; return; } if (c->range >= RAC_BOTTOM) return; } }
1,417
FFmpeg
e403e4bdbea08af0c4a068eb560b577d1b64cf7a
1
static int64_t scene_sad16(FrameRateContext *s, const uint16_t *p1, int p1_linesize, const uint16_t* p2, int p2_linesize, int height) { int64_t sad; int x, y; for (sad = y = 0; y < height; y += 8) { for (x = 0; x < p1_linesize; x += 8) { sad += sad_8x8_16(p1 + y * p1_linesize + x, p1_linesize, p2 + y * p2_linesize + x, p2_linesize); } } return sad; }
1,419
FFmpeg
417927af3c99bc17819995aa57ae05685deeace8
1
static int hds_write_packet(AVFormatContext *s, AVPacket *pkt) { HDSContext *c = s->priv_data; AVStream *st = s->streams[pkt->stream_index]; OutputStream *os = &c->streams[s->streams[pkt->stream_index]->id]; int64_t end_dts = (os->fragment_index) * c->min_frag_duration; int ret; if (st->first_dts == AV_NOPTS_VALUE) st->first_dts = pkt->dts; if ((!os->has_video || st->codec->codec_type == AVMEDIA_TYPE_VIDEO) && av_compare_ts(pkt->dts - st->first_dts, st->time_base, end_dts, AV_TIME_BASE_Q) >= 0 && pkt->flags & AV_PKT_FLAG_KEY && os->packets_written) { if ((ret = hds_flush(s, os, 0, pkt->dts)) < 0) return ret; } // Note, these fragment start timestamps, that represent a whole // OutputStream, assume all streams in it have the same time base. if (!os->packets_written) os->frag_start_ts = pkt->dts; os->last_ts = pkt->dts; os->packets_written++; return ff_write_chained(os->ctx, pkt->stream_index - os->first_stream, pkt, s); }
1,421
FFmpeg
aaebdce3d90725ff93a31678690a306da6e12bbb
0
static int gif_read_extension(GifState *s) { int ext_code, ext_len, gce_flags, gce_transparent_index; /* There must be at least 2 bytes: * 1 for extension label and 1 for extension length. */ if (bytestream2_get_bytes_left(&s->gb) < 2) return AVERROR_INVALIDDATA; ext_code = bytestream2_get_byteu(&s->gb); ext_len = bytestream2_get_byteu(&s->gb); av_dlog(s->avctx, "ext_code=0x%x len=%d\n", ext_code, ext_len); switch(ext_code) { case GIF_GCE_EXT_LABEL: if (ext_len != 4) goto discard_ext; /* We need at least 5 bytes more: 4 is for extension body * and 1 for next block size. */ if (bytestream2_get_bytes_left(&s->gb) < 5) return AVERROR_INVALIDDATA; gce_flags = bytestream2_get_byteu(&s->gb); bytestream2_skipu(&s->gb, 2); // delay during which the frame is shown gce_transparent_index = bytestream2_get_byteu(&s->gb); if (gce_flags & 0x01) s->transparent_color_index = gce_transparent_index; else s->transparent_color_index = -1; s->gce_disposal = (gce_flags >> 2) & 0x7; av_dlog(s->avctx, "gce_flags=%x tcolor=%d disposal=%d\n", gce_flags, s->transparent_color_index, s->gce_disposal); if (s->gce_disposal > 3) { s->gce_disposal = GCE_DISPOSAL_NONE; av_dlog(s->avctx, "invalid value in gce_disposal (%d). Using default value of 0.\n", ext_len); } ext_len = bytestream2_get_byteu(&s->gb); break; } /* NOTE: many extension blocks can come after */ discard_ext: while (ext_len != 0) { /* There must be at least ext_len bytes and 1 for next block size byte. */ if (bytestream2_get_bytes_left(&s->gb) < ext_len + 1) return AVERROR_INVALIDDATA; bytestream2_skipu(&s->gb, ext_len); ext_len = bytestream2_get_byteu(&s->gb); av_dlog(s->avctx, "ext_len1=%d\n", ext_len); } return 0; }
1,423
FFmpeg
13a099799e89a76eb921ca452e1b04a7a28a9855
0
yuv2rgb_2_c_template(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *ubuf0, const uint16_t *ubuf1, const uint16_t *vbuf0, const uint16_t *vbuf1, const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum PixelFormat target, int hasAlpha) { int yalpha1 = 4095 - yalpha; int uvalpha1 = 4095 - uvalpha; int i; for (i = 0; i < (dstW >> 1); i++) { int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19; int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19; int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19; int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19; int A1, A2; const void *r = c->table_rV[V], *g = (c->table_gU[U] + c->table_gV[V]), *b = c->table_bU[U]; if (hasAlpha) { A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19; A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19; } yuv2rgb_write(dest, i, Y1, Y2, U, V, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, r, g, b, y, target, hasAlpha); } }
1,424
FFmpeg
486637af8ef29ec215e0e0b7ecd3b5470f0e04e5
0
static inline void mix_3f_to_stereo(AC3DecodeContext *ctx) { int i; float (*output)[256] = ctx->audio_block.block_output; for (i = 0; i < 256; i++) { output[1][i] += output[2][i]; output[2][i] += output[3][i]; } memset(output[3], 0, sizeof(output[3])); }
1,425
FFmpeg
dc73c7adc0284871af34100a6062378c07a63569
0
static void decode_sigpass(Jpeg2000T1Context *t1, int width, int height, int bpno, int bandno, int bpass_csty_symbol, int vert_causal_ctx_csty_symbol) { int mask = 3 << (bpno - 1), y0, x, y; for (y0 = 0; y0 < height; y0 += 4) for (x = 0; x < width; x++) for (y = y0; y < height && y < y0 + 4; y++) { if ((t1->flags[y+1][x+1] & JPEG2000_T1_SIG_NB) && !(t1->flags[y+1][x+1] & (JPEG2000_T1_SIG | JPEG2000_T1_VIS))) { int flags_mask = -1; if (vert_causal_ctx_csty_symbol && y == y0 + 3) flags_mask &= ~(JPEG2000_T1_SIG_S | JPEG2000_T1_SIG_SW | JPEG2000_T1_SIG_SE); if (ff_mqc_decode(&t1->mqc, t1->mqc.cx_states + ff_jpeg2000_getsigctxno(t1->flags[y+1][x+1] & flags_mask, bandno))) { int xorbit, ctxno = ff_jpeg2000_getsgnctxno(t1->flags[y+1][x+1], &xorbit); if (bpass_csty_symbol) t1->data[y][x] = ff_mqc_decode(&t1->mqc, t1->mqc.cx_states + ctxno) ? -mask : mask; else t1->data[y][x] = (ff_mqc_decode(&t1->mqc, t1->mqc.cx_states + ctxno) ^ xorbit) ? -mask : mask; ff_jpeg2000_set_significance(t1, x, y, t1->data[y][x] < 0); } t1->flags[y + 1][x + 1] |= JPEG2000_T1_VIS; } } }
1,427
FFmpeg
0d21a84605bad4e75dacb8196e5859902ed36f01
0
void ff_fix_long_p_mvs(MpegEncContext * s) { const int f_code= s->f_code; int y; UINT8 * fcode_tab= s->fcode_tab; /* clip / convert to intra 16x16 type MVs */ for(y=0; y<s->mb_height; y++){ int x; int xy= (y+1)* (s->mb_width+2)+1; int i= y*s->mb_width; for(x=0; x<s->mb_width; x++){ if(s->mb_type[i]&MB_TYPE_INTER){ if( fcode_tab[s->p_mv_table[xy][0] + MAX_MV] > f_code || fcode_tab[s->p_mv_table[xy][0] + MAX_MV] == 0 || fcode_tab[s->p_mv_table[xy][1] + MAX_MV] > f_code || fcode_tab[s->p_mv_table[xy][1] + MAX_MV] == 0 ){ s->mb_type[i] &= ~MB_TYPE_INTER; s->mb_type[i] |= MB_TYPE_INTRA; s->p_mv_table[xy][0] = 0; s->p_mv_table[xy][1] = 0; } } xy++; i++; } } if(s->flags&CODEC_FLAG_4MV){ const int wrap= 2+ s->mb_width*2; /* clip / convert to intra 8x8 type MVs */ for(y=0; y<s->mb_height; y++){ int xy= (y*2 + 1)*wrap + 1; int i= y*s->mb_width; int x; for(x=0; x<s->mb_width; x++){ if(s->mb_type[i]&MB_TYPE_INTER4V){ int block; for(block=0; block<4; block++){ int off= (block& 1) + (block>>1)*wrap; int mx= s->motion_val[ xy + off ][0]; int my= s->motion_val[ xy + off ][1]; if( fcode_tab[mx + MAX_MV] > f_code || fcode_tab[mx + MAX_MV] == 0 || fcode_tab[my + MAX_MV] > f_code || fcode_tab[my + MAX_MV] == 0 ){ s->mb_type[i] &= ~MB_TYPE_INTER4V; s->mb_type[i] |= MB_TYPE_INTRA; } } xy+=2; i++; } } } } }
1,428
FFmpeg
d9cdb7d8d6d828bb5497ea3f0fd7edd2f3f6cc30
0
static av_cold int pcm_dvd_decode_init(AVCodecContext *avctx) { PCMDVDContext *s = avctx->priv_data; /* Invalid header to force parsing of the first header */ s->last_header = -1; /* reserve space for 8 channels, 3 bytes/sample, 4 samples/block */ if (!(s->extra_samples = av_malloc(8 * 3 * 4))) return AVERROR(ENOMEM); s->extra_sample_count = 0; return 0; }
1,429
FFmpeg
bcd7bf7eeb09a395cc01698842d1b8be9af483fc
0
void ff_h264_v_lpf_chroma_inter_msa(uint8_t *data, int img_width, int alpha, int beta, int8_t *tc) { uint8_t bs0 = 1; uint8_t bs1 = 1; uint8_t bs2 = 1; uint8_t bs3 = 1; if (tc[0] < 0) bs0 = 0; if (tc[1] < 0) bs1 = 0; if (tc[2] < 0) bs2 = 0; if (tc[3] < 0) bs3 = 0; avc_loopfilter_cb_or_cr_inter_edge_hor_msa(data, bs0, bs1, bs2, bs3, tc[0], tc[1], tc[2], tc[3], alpha, beta, img_width); }
1,430
FFmpeg
229843aa359ae0c9519977d7fa952688db63f559
0
static AVFrame *get_palette_frame(AVFilterContext *ctx) { AVFrame *out; PaletteGenContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; double ratio; int box_id = 0; struct range_box *box; /* reference only the used colors from histogram */ s->refs = load_color_refs(s->histogram, s->nb_refs); if (!s->refs) { av_log(ctx, AV_LOG_ERROR, "Unable to allocate references for %d different colors\n", s->nb_refs); return NULL; } /* create the palette frame */ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) return NULL; out->pts = 0; /* set first box for 0..nb_refs */ box = &s->boxes[box_id]; box->len = s->nb_refs; box->sorted_by = -1; box->color = get_avg_color(s->refs, box); box->variance = -1; s->nb_boxes = 1; while (box && box->len > 1) { int i, rr, gr, br, longest; uint64_t median, box_weight = 0; /* compute the box weight (sum all the weights of the colors in the * range) and its boundings */ uint8_t min[3] = {0xff, 0xff, 0xff}; uint8_t max[3] = {0x00, 0x00, 0x00}; for (i = box->start; i < box->start + box->len; i++) { const struct color_ref *ref = s->refs[i]; const uint32_t rgb = ref->color; const uint8_t r = rgb >> 16 & 0xff, g = rgb >> 8 & 0xff, b = rgb & 0xff; min[0] = FFMIN(r, min[0]), max[0] = FFMAX(r, max[0]); min[1] = FFMIN(g, min[1]), max[1] = FFMAX(g, max[1]); min[2] = FFMIN(b, min[2]), max[2] = FFMAX(b, max[2]); box_weight += ref->count; } /* define the axis to sort by according to the widest range of colors */ rr = max[0] - min[0]; gr = max[1] - min[1]; br = max[2] - min[2]; longest = 1; // pick green by default (the color the eye is the most sensitive to) if (br >= rr && br >= gr) longest = 2; if (rr >= gr && rr >= br) longest = 0; if (gr >= rr && gr >= br) longest = 1; // prefer green again av_dlog(ctx, "box #%02X [%6d..%-6d] (%6d) w:%-6"PRIu64" ranges:[%2x %2x %2x] sort by %c (already sorted:%c) ", box_id, box->start, box->start + box->len - 1, box->len, box_weight, rr, gr, br, "rgb"[longest], box->sorted_by == longest ? 'y':'n'); /* sort the range by its longest axis if it's not already sorted */ if (box->sorted_by != longest) { cmp_func cmpf = cmp_funcs[longest]; AV_QSORT(&s->refs[box->start], box->len, const struct color_ref *, cmpf); box->sorted_by = longest; } /* locate the median where to split */ median = (box_weight + 1) >> 1; box_weight = 0; /* if you have 2 boxes, the maximum is actually #0: you must have at * least 1 color on each side of the split, hence the -2 */ for (i = box->start; i < box->start + box->len - 2; i++) { box_weight += s->refs[i]->count; if (box_weight > median) break; } av_dlog(ctx, "split @ i=%-6d with w=%-6"PRIu64" (target=%6"PRIu64")\n", i, box_weight, median); split_box(s, box, i); box_id = get_next_box_id_to_split(s); box = box_id >= 0 ? &s->boxes[box_id] : NULL; } ratio = set_colorquant_ratio_meta(out, s->nb_boxes, s->nb_refs); av_log(ctx, AV_LOG_INFO, "%d%s colors generated out of %d colors; ratio=%f\n", s->nb_boxes, s->reserve_transparent ? "(+1)" : "", s->nb_refs, ratio); qsort(s->boxes, s->nb_boxes, sizeof(*s->boxes), cmp_color); write_palette(ctx, out); return out; }
1,431
qemu
d8f94e1bb275ab6a14a15220fd6afd0d04324aeb
1
static void sun4uv_init(MemoryRegion *address_space_mem, MachineState *machine, const struct hwdef *hwdef) { SPARCCPU *cpu; M48t59State *nvram; unsigned int i; uint64_t initrd_addr, initrd_size, kernel_addr, kernel_size, kernel_entry; PCIBus *pci_bus, *pci_bus2, *pci_bus3; ISABus *isa_bus; qemu_irq *ivec_irqs, *pbm_irqs; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; DriveInfo *fd[MAX_FD]; FWCfgState *fw_cfg; /* init CPUs */ cpu = cpu_devinit(machine->cpu_model, hwdef); /* set up devices */ ram_init(0, machine->ram_size); prom_init(hwdef->prom_addr, bios_name); ivec_irqs = qemu_allocate_irqs(cpu_set_ivec_irq, cpu, IVEC_MAX); pci_bus = pci_apb_init(APB_SPECIAL_BASE, APB_MEM_BASE, ivec_irqs, &pci_bus2, &pci_bus3, &pbm_irqs); pci_vga_init(pci_bus); // XXX Should be pci_bus3 isa_bus = pci_ebus_init(pci_bus, -1, pbm_irqs); i = 0; if (hwdef->console_serial_base) { serial_mm_init(address_space_mem, hwdef->console_serial_base, 0, NULL, 115200, serial_hds[i], DEVICE_BIG_ENDIAN); i++; } for(; i < MAX_SERIAL_PORTS; i++) { if (serial_hds[i]) { serial_isa_init(isa_bus, i, serial_hds[i]); } } for(i = 0; i < MAX_PARALLEL_PORTS; i++) { if (parallel_hds[i]) { parallel_init(isa_bus, i, parallel_hds[i]); } } for(i = 0; i < nb_nics; i++) pci_nic_init_nofail(&nd_table[i], pci_bus, "ne2k_pci", NULL); ide_drive_get(hd, MAX_IDE_BUS); pci_cmd646_ide_init(pci_bus, hd, 1); isa_create_simple(isa_bus, "i8042"); for(i = 0; i < MAX_FD; i++) { fd[i] = drive_get(IF_FLOPPY, 0, i); } fdctrl_init_isa(isa_bus, fd); nvram = m48t59_init_isa(isa_bus, 0x0074, NVRAM_SIZE, 59); initrd_size = 0; initrd_addr = 0; kernel_size = sun4u_load_kernel(machine->kernel_filename, machine->initrd_filename, ram_size, &initrd_size, &initrd_addr, &kernel_addr, &kernel_entry); sun4u_NVRAM_set_params(nvram, NVRAM_SIZE, "Sun4u", machine->ram_size, machine->boot_order, kernel_addr, kernel_size, machine->kernel_cmdline, initrd_addr, initrd_size, /* XXX: need an option to load a NVRAM image */ 0, graphic_width, graphic_height, graphic_depth, (uint8_t *)&nd_table[0].macaddr); fw_cfg = fw_cfg_init(BIOS_CFG_IOPORT, BIOS_CFG_IOPORT + 1, 0, 0); fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus); fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id); fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_entry); fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); if (machine->kernel_cmdline) { fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(machine->kernel_cmdline) + 1); fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, machine->kernel_cmdline); } else { fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, 0); } fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr); fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, machine->boot_order[0]); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_WIDTH, graphic_width); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_HEIGHT, graphic_height); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_DEPTH, graphic_depth); qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); }
1,432
FFmpeg
b2bbe8298ba5416f26ffadb43f9e75997ec02f7f
1
static int dss_read_metadata_date(AVFormatContext *s, unsigned int offset, const char *key) { AVIOContext *pb = s->pb; char datetime[64], string[DSS_TIME_SIZE + 1] = { 0 }; int y, month, d, h, minute, sec; int ret; avio_seek(pb, offset, SEEK_SET); ret = avio_read(s->pb, string, DSS_TIME_SIZE); if (ret < DSS_TIME_SIZE) return ret < 0 ? ret : AVERROR_EOF; sscanf(string, "%2d%2d%2d%2d%2d%2d", &y, &month, &d, &h, &minute, &sec); /* We deal with a two-digit year here, so set the default date to 2000 * and hope it will never be used in the next century. */ snprintf(datetime, sizeof(datetime), "%.4d-%.2d-%.2dT%.2d:%.2d:%.2d", y + 2000, month, d, h, minute, sec); return av_dict_set(&s->metadata, key, datetime, 0); }
1,433
qemu
7ea004ed67e08462926a8559e1c6953e387e4035
1
static void fdctrl_start_transfer(FDCtrl *fdctrl, int direction) { FDrive *cur_drv; uint8_t kh, kt, ks; SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK); cur_drv = get_cur_drv(fdctrl); kt = fdctrl->fifo[2]; kh = fdctrl->fifo[3]; ks = fdctrl->fifo[4]; FLOPPY_DPRINTF("Start transfer at %d %d %02x %02x (%d)\n", GET_CUR_DRV(fdctrl), kh, kt, ks, fd_sector_calc(kh, kt, ks, cur_drv->last_sect, NUM_SIDES(cur_drv))); switch (fd_seek(cur_drv, kh, kt, ks, fdctrl->config & FD_CONFIG_EIS)) { case 2: /* sect too big */ fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, 0x00, 0x00); fdctrl->fifo[3] = kt; fdctrl->fifo[4] = kh; fdctrl->fifo[5] = ks; return; case 3: /* track too big */ fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, FD_SR1_EC, 0x00); fdctrl->fifo[3] = kt; fdctrl->fifo[4] = kh; fdctrl->fifo[5] = ks; return; case 4: /* No seek enabled */ fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, 0x00, 0x00); fdctrl->fifo[3] = kt; fdctrl->fifo[4] = kh; fdctrl->fifo[5] = ks; return; case 1: fdctrl->status0 |= FD_SR0_SEEK; break; default: break; } /* Check the data rate. If the programmed data rate does not match * the currently inserted medium, the operation has to fail. */ if (fdctrl->check_media_rate && (fdctrl->dsr & FD_DSR_DRATEMASK) != cur_drv->media_rate) { FLOPPY_DPRINTF("data rate mismatch (fdc=%d, media=%d)\n", fdctrl->dsr & FD_DSR_DRATEMASK, cur_drv->media_rate); fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, FD_SR1_MA, 0x00); fdctrl->fifo[3] = kt; fdctrl->fifo[4] = kh; fdctrl->fifo[5] = ks; return; } /* Set the FIFO state */ fdctrl->data_dir = direction; fdctrl->data_pos = 0; fdctrl->msr |= FD_MSR_CMDBUSY; if (fdctrl->fifo[0] & 0x80) fdctrl->data_state |= FD_STATE_MULTI; else fdctrl->data_state &= ~FD_STATE_MULTI; if (fdctrl->fifo[5] == 00) { fdctrl->data_len = fdctrl->fifo[8]; } else { int tmp; fdctrl->data_len = 128 << (fdctrl->fifo[5] > 7 ? 7 : fdctrl->fifo[5]); tmp = (fdctrl->fifo[6] - ks + 1); if (fdctrl->fifo[0] & 0x80) tmp += fdctrl->fifo[6]; fdctrl->data_len *= tmp; } fdctrl->eot = fdctrl->fifo[6]; if (fdctrl->dor & FD_DOR_DMAEN) { int dma_mode; /* DMA transfer are enabled. Check if DMA channel is well programmed */ dma_mode = DMA_get_channel_mode(fdctrl->dma_chann); dma_mode = (dma_mode >> 2) & 3; FLOPPY_DPRINTF("dma_mode=%d direction=%d (%d - %d)\n", dma_mode, direction, (128 << fdctrl->fifo[5]) * (cur_drv->last_sect - ks + 1), fdctrl->data_len); if (((direction == FD_DIR_SCANE || direction == FD_DIR_SCANL || direction == FD_DIR_SCANH) && dma_mode == 0) || (direction == FD_DIR_WRITE && dma_mode == 2) || (direction == FD_DIR_READ && dma_mode == 1)) { /* No access is allowed until DMA transfer has completed */ fdctrl->msr &= ~FD_MSR_RQM; /* Now, we just have to wait for the DMA controller to * recall us... */ DMA_hold_DREQ(fdctrl->dma_chann); DMA_schedule(fdctrl->dma_chann); return; } else { FLOPPY_DPRINTF("bad dma_mode=%d direction=%d\n", dma_mode, direction); } } FLOPPY_DPRINTF("start non-DMA transfer\n"); fdctrl->msr |= FD_MSR_NONDMA; if (direction != FD_DIR_WRITE) fdctrl->msr |= FD_MSR_DIO; /* IO based transfer: calculate len */ fdctrl_raise_irq(fdctrl); }
1,436
FFmpeg
b61cb61ab8f9abca98cc8c4d67cbefdb30f1e82a
1
static int mxf_write_footer(AVFormatContext *s) { MXFContext *mxf = s->priv_data; AVIOContext *pb = s->pb; int err = 0; mxf->duration = mxf->last_indexed_edit_unit + mxf->edit_units_count; mxf_write_klv_fill(s); mxf->footer_partition_offset = avio_tell(pb); if (mxf->edit_unit_byte_count && s->oformat != &ff_mxf_opatom_muxer) { // no need to repeat index if ((err = mxf_write_partition(s, 0, 0, footer_partition_key, 0)) < 0) } else { if ((err = mxf_write_partition(s, 0, 2, footer_partition_key, 0)) < 0) mxf_write_klv_fill(s); mxf_write_index_table_segment(s); mxf_write_klv_fill(s); mxf_write_random_index_pack(s); if (s->pb->seekable) { if (s->oformat == &ff_mxf_opatom_muxer){ /* rewrite body partition to update lengths */ avio_seek(pb, mxf->body_partition_offset[0], SEEK_SET); if ((err = mxf_write_opatom_body_partition(s)) < 0) avio_seek(pb, 0, SEEK_SET); if (mxf->edit_unit_byte_count && s->oformat != &ff_mxf_opatom_muxer) { if ((err = mxf_write_partition(s, 1, 2, header_closed_partition_key, 1)) < 0) mxf_write_klv_fill(s); mxf_write_index_table_segment(s); } else { if ((err = mxf_write_partition(s, 0, 0, header_closed_partition_key, 1)) < 0) end: ff_audio_interleave_close(s); av_freep(&mxf->index_entries); av_freep(&mxf->body_partition_offset); av_freep(&mxf->timecode_track->priv_data); av_freep(&mxf->timecode_track); mxf_free(s); return err < 0 ? err : 0;
1,437
qemu
10a7b7e6fd9f250f0506568345d7b4d2ab52889d
1
static int inet_listen_saddr(InetSocketAddress *saddr, int port_offset, bool update_addr, Error **errp) { struct addrinfo ai,*res,*e; char port[33]; char uaddr[INET6_ADDRSTRLEN+1]; char uport[33]; int rc, port_min, port_max, p; int slisten = 0; int saved_errno = 0; bool socket_created = false; Error *err = NULL; memset(&ai,0, sizeof(ai)); ai.ai_flags = AI_PASSIVE; if (saddr->has_numeric && saddr->numeric) { ai.ai_flags |= AI_NUMERICHOST | AI_NUMERICSERV; } ai.ai_family = inet_ai_family_from_address(saddr, &err); ai.ai_socktype = SOCK_STREAM; if (err) { error_propagate(errp, err); return -1; } if (saddr->host == NULL) { error_setg(errp, "host not specified"); return -1; } if (saddr->port != NULL) { pstrcpy(port, sizeof(port), saddr->port); } else { port[0] = '\0'; } /* lookup */ if (port_offset) { unsigned long long baseport; if (strlen(port) == 0) { error_setg(errp, "port not specified"); return -1; } if (parse_uint_full(port, &baseport, 10) < 0) { error_setg(errp, "can't convert to a number: %s", port); return -1; } if (baseport > 65535 || baseport + port_offset > 65535) { error_setg(errp, "port %s out of range", port); return -1; } snprintf(port, sizeof(port), "%d", (int)baseport + port_offset); } rc = getaddrinfo(strlen(saddr->host) ? saddr->host : NULL, strlen(port) ? port : NULL, &ai, &res); if (rc != 0) { error_setg(errp, "address resolution failed for %s:%s: %s", saddr->host, port, gai_strerror(rc)); return -1; } /* create socket + bind/listen */ for (e = res; e != NULL; e = e->ai_next) { getnameinfo((struct sockaddr*)e->ai_addr,e->ai_addrlen, uaddr,INET6_ADDRSTRLEN,uport,32, NI_NUMERICHOST | NI_NUMERICSERV); slisten = create_fast_reuse_socket(e); if (slisten < 0) { continue; } socket_created = true; port_min = inet_getport(e); port_max = saddr->has_to ? saddr->to + port_offset : port_min; for (p = port_min; p <= port_max; p++) { inet_setport(e, p); rc = try_bind(slisten, saddr, e); if (rc) { if (errno == EADDRINUSE) { continue; } else { error_setg_errno(errp, errno, "Failed to bind socket"); goto listen_failed; } } if (!listen(slisten, 1)) { goto listen_ok; } if (errno != EADDRINUSE) { error_setg_errno(errp, errno, "Failed to listen on socket"); goto listen_failed; } /* Someone else managed to bind to the same port and beat us * to listen on it! Socket semantics does not allow us to * recover from this situation, so we need to recreate the * socket to allow bind attempts for subsequent ports: */ closesocket(slisten); slisten = create_fast_reuse_socket(e); if (slisten < 0) { error_setg_errno(errp, errno, "Failed to recreate failed listening socket"); goto listen_failed; } } } error_setg_errno(errp, errno, socket_created ? "Failed to find an available port" : "Failed to create a socket"); listen_failed: saved_errno = errno; if (slisten >= 0) { closesocket(slisten); } freeaddrinfo(res); errno = saved_errno; return -1; listen_ok: if (update_addr) { g_free(saddr->host); saddr->host = g_strdup(uaddr); g_free(saddr->port); saddr->port = g_strdup_printf("%d", inet_getport(e) - port_offset); saddr->has_ipv6 = saddr->ipv6 = e->ai_family == PF_INET6; saddr->has_ipv4 = saddr->ipv4 = e->ai_family != PF_INET6; } freeaddrinfo(res); return slisten; }
1,438
FFmpeg
5deb5ccbbb556c4a15f3c7494f00de2963d0aba6
0
static void *circular_buffer_task( void *_URLContext) { URLContext *h = _URLContext; UDPContext *s = h->priv_data; int old_cancelstate; pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old_cancelstate); ff_socket_nonblock(s->udp_fd, 0); while(1) { int left; int len; /* How much do we have left to the end of the buffer */ /* Whats the minimum we can read so that we dont comletely fill the buffer */ left = av_fifo_space(s->fifo); /* Blocking operations are always cancellation points; see "General Information" / "Thread Cancelation Overview" in Single Unix. */ pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_cancelstate); len = recv(s->udp_fd, s->tmp+4, sizeof(s->tmp)-4, 0); pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old_cancelstate); if (len < 0) { if (ff_neterrno() != AVERROR(EAGAIN) && ff_neterrno() != AVERROR(EINTR)) { s->circular_buffer_error = AVERROR(EIO); goto end; } continue; } AV_WL32(s->tmp, len); if(left < len + 4) { /* No Space left */ if (s->overrun_nonfatal) { av_log(h, AV_LOG_WARNING, "Circular buffer overrun. " "Surviving due to overrun_nonfatal option\n"); continue; } else { av_log(h, AV_LOG_ERROR, "Circular buffer overrun. " "To avoid, increase fifo_size URL option. " "To survive in such case, use overrun_nonfatal option\n"); s->circular_buffer_error = AVERROR(EIO); goto end; } } pthread_mutex_lock(&s->mutex); av_fifo_generic_write(s->fifo, s->tmp, len+4, NULL); pthread_cond_signal(&s->cond); pthread_mutex_unlock(&s->mutex); } end: pthread_mutex_lock(&s->mutex); pthread_cond_signal(&s->cond); pthread_mutex_unlock(&s->mutex); return NULL; }
1,440
FFmpeg
d2bf42895ac30d228491a8a95a5908351dc32783
0
static void FUNCC(pred8x8l_horizontal)(uint8_t *_src, int has_topleft, int has_topright, int _stride) { pixel *src = (pixel*)_src; int stride = _stride/sizeof(pixel); PREDICT_8x8_LOAD_LEFT; #define ROW(y) ((pixel4*)(src+y*stride))[0] =\ ((pixel4*)(src+y*stride))[1] = PIXEL_SPLAT_X4(l##y) ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7); #undef ROW }
1,442
FFmpeg
eb5b0422b595d488f5c2f2a37a62cd46dfbb6aa7
0
static int g726_init(AVCodecContext * avctx) { AVG726Context* c = (AVG726Context*)avctx->priv_data; if (avctx->sample_rate != 8000 || avctx->channels != 1 || (avctx->bit_rate != 16000 && avctx->bit_rate != 24000 && avctx->bit_rate != 32000 && avctx->bit_rate != 40000)) { av_log(avctx, AV_LOG_ERROR, "G726: unsupported audio format\n"); return -1; } g726_reset(&c->c, avctx->bit_rate); c->code_size = c->c.tbls->bits; c->bit_buffer = 0; c->bits_left = 0; return 0; }
1,443
FFmpeg
0e7865ce4152f8b04cda6a698bbee4fd4a94009d
0
static int decode_user_data(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; char buf[256]; int i; int e; int ver = 0, build = 0, ver2 = 0, ver3 = 0; char last; for (i = 0; i < 255 && get_bits_count(gb) < gb->size_in_bits; i++) { if (show_bits(gb, 23) == 0) break; buf[i] = get_bits(gb, 8); } buf[i] = 0; /* divx detection */ e = sscanf(buf, "DivX%dBuild%d%c", &ver, &build, &last); if (e < 2) e = sscanf(buf, "DivX%db%d%c", &ver, &build, &last); if (e >= 2) { ctx->divx_version = ver; ctx->divx_build = build; s->divx_packed = e == 3 && last == 'p'; } /* libavcodec detection */ e = sscanf(buf, "FFmpe%*[^b]b%d", &build) + 3; if (e != 4) e = sscanf(buf, "FFmpeg v%d.%d.%d / libavcodec build: %d", &ver, &ver2, &ver3, &build); if (e != 4) { e = sscanf(buf, "Lavc%d.%d.%d", &ver, &ver2, &ver3) + 1; if (e > 1) { if (ver > 0xFF || ver2 > 0xFF || ver3 > 0xFF) { av_log(s->avctx, AV_LOG_WARNING, "Unknown Lavc version string encountered, %d.%d.%d; " "clamping sub-version values to 8-bits.\n", ver, ver2, ver3); } build = ((ver & 0xFF) << 16) + ((ver2 & 0xFF) << 8) + (ver3 & 0xFF); } } if (e != 4) { if (strcmp(buf, "ffmpeg") == 0) ctx->lavc_build = 4600; } if (e == 4) ctx->lavc_build = build; /* Xvid detection */ e = sscanf(buf, "XviD%d", &build); if (e == 1) ctx->xvid_build = build; return 0; }
1,444
FFmpeg
f1bdc234370401c032cd85184e93c7c155eb6d62
0
int ff_get_wav_header(AVFormatContext *s, AVIOContext *pb, AVCodecContext *codec, int size) { int id; if (size < 14) return AVERROR_INVALIDDATA; id = avio_rl16(pb); codec->codec_type = AVMEDIA_TYPE_AUDIO; codec->channels = avio_rl16(pb); codec->sample_rate = avio_rl32(pb); codec->bit_rate = avio_rl32(pb) * 8; codec->block_align = avio_rl16(pb); if (size == 14) { /* We're dealing with plain vanilla WAVEFORMAT */ codec->bits_per_coded_sample = 8; } else codec->bits_per_coded_sample = avio_rl16(pb); if (id == 0xFFFE) { codec->codec_tag = 0; } else { codec->codec_tag = id; codec->codec_id = ff_wav_codec_get_id(id, codec->bits_per_coded_sample); } if (size >= 18) { /* We're obviously dealing with WAVEFORMATEX */ int cbSize = avio_rl16(pb); /* cbSize */ size -= 18; cbSize = FFMIN(size, cbSize); if (cbSize >= 22 && id == 0xfffe) { /* WAVEFORMATEXTENSIBLE */ parse_waveformatex(pb, codec); cbSize -= 22; size -= 22; } codec->extradata_size = cbSize; if (cbSize > 0) { av_free(codec->extradata); codec->extradata = av_mallocz(codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!codec->extradata) return AVERROR(ENOMEM); avio_read(pb, codec->extradata, codec->extradata_size); size -= cbSize; } /* It is possible for the chunk to contain garbage at the end */ if (size > 0) avio_skip(pb, size); } if (codec->sample_rate <= 0) { av_log(s, AV_LOG_ERROR, "Invalid sample rate: %d\n", codec->sample_rate); return AVERROR_INVALIDDATA; } if (codec->codec_id == AV_CODEC_ID_AAC_LATM) { /* Channels and sample_rate values are those prior to applying SBR * and/or PS. */ codec->channels = 0; codec->sample_rate = 0; } /* override bits_per_coded_sample for G.726 */ if (codec->codec_id == AV_CODEC_ID_ADPCM_G726) codec->bits_per_coded_sample = codec->bit_rate / codec->sample_rate; return 0; }
1,445
FFmpeg
eabbc64728c2fdb74f565aededec2ab023d20699
0
static int mkv_write_packet_internal(AVFormatContext *s, AVPacket *pkt, int add_cue) { MatroskaMuxContext *mkv = s->priv_data; AVIOContext *pb = s->pb; AVCodecParameters *par = s->streams[pkt->stream_index]->codecpar; int keyframe = !!(pkt->flags & AV_PKT_FLAG_KEY); int duration = pkt->duration; int ret; int64_t ts = mkv->tracks[pkt->stream_index].write_dts ? pkt->dts : pkt->pts; int64_t relative_packet_pos; int dash_tracknum = mkv->is_dash ? mkv->dash_track_number : pkt->stream_index + 1; if (ts == AV_NOPTS_VALUE) { av_log(s, AV_LOG_ERROR, "Can't write packet with unknown timestamp\n"); return AVERROR(EINVAL); } ts += mkv->tracks[pkt->stream_index].ts_offset; if (mkv->cluster_pos != -1) { int64_t cluster_time = ts - mkv->cluster_pts + mkv->tracks[pkt->stream_index].ts_offset; if ((int16_t)cluster_time != cluster_time) { av_log(s, AV_LOG_WARNING, "Starting new cluster due to timestamp\n"); mkv_start_new_cluster(s, pkt); } } if (mkv->cluster_pos == -1) { mkv->cluster_pos = avio_tell(s->pb); ret = start_ebml_master_crc32(s->pb, &mkv->dyn_bc, &mkv->cluster, MATROSKA_ID_CLUSTER, 0); if (ret < 0) return ret; put_ebml_uint(mkv->dyn_bc, MATROSKA_ID_CLUSTERTIMECODE, FFMAX(0, ts)); mkv->cluster_pts = FFMAX(0, ts); } pb = mkv->dyn_bc; relative_packet_pos = avio_tell(s->pb) - mkv->cluster.pos + avio_tell(pb); if (par->codec_type != AVMEDIA_TYPE_SUBTITLE) { mkv_write_block(s, pb, MATROSKA_ID_SIMPLEBLOCK, pkt, keyframe); if (s->pb->seekable && (par->codec_type == AVMEDIA_TYPE_VIDEO && keyframe || add_cue)) { ret = mkv_add_cuepoint(mkv->cues, pkt->stream_index, dash_tracknum, ts, mkv->cluster_pos, relative_packet_pos, -1); if (ret < 0) return ret; } } else { if (par->codec_id == AV_CODEC_ID_WEBVTT) { duration = mkv_write_vtt_blocks(s, pb, pkt); } else { ebml_master blockgroup = start_ebml_master(pb, MATROSKA_ID_BLOCKGROUP, mkv_blockgroup_size(pkt->size)); #if FF_API_CONVERGENCE_DURATION FF_DISABLE_DEPRECATION_WARNINGS /* For backward compatibility, prefer convergence_duration. */ if (pkt->convergence_duration > 0) { duration = pkt->convergence_duration; } FF_ENABLE_DEPRECATION_WARNINGS #endif /* All subtitle blocks are considered to be keyframes. */ mkv_write_block(s, pb, MATROSKA_ID_BLOCK, pkt, 1); put_ebml_uint(pb, MATROSKA_ID_BLOCKDURATION, duration); end_ebml_master(pb, blockgroup); } if (s->pb->seekable) { ret = mkv_add_cuepoint(mkv->cues, pkt->stream_index, dash_tracknum, ts, mkv->cluster_pos, relative_packet_pos, duration); if (ret < 0) return ret; } } mkv->duration = FFMAX(mkv->duration, ts + duration); if (mkv->stream_durations) mkv->stream_durations[pkt->stream_index] = FFMAX(mkv->stream_durations[pkt->stream_index], ts + duration); return 0; }
1,446
FFmpeg
0e6c8532215790bbe560a9eea4f3cc82bb55cf92
0
static av_cold int XAVS_init(AVCodecContext *avctx) { XavsContext *x4 = avctx->priv_data; x4->sei_size = 0; xavs_param_default(&x4->params); x4->params.pf_log = XAVS_log; x4->params.p_log_private = avctx; x4->params.i_keyint_max = avctx->gop_size; if (avctx->bit_rate) { x4->params.rc.i_bitrate = avctx->bit_rate / 1000; x4->params.rc.i_rc_method = XAVS_RC_ABR; } x4->params.rc.i_vbv_buffer_size = avctx->rc_buffer_size / 1000; x4->params.rc.i_vbv_max_bitrate = avctx->rc_max_rate / 1000; x4->params.rc.b_stat_write = avctx->flags & AV_CODEC_FLAG_PASS1; if (avctx->flags & AV_CODEC_FLAG_PASS2) { x4->params.rc.b_stat_read = 1; } else { if (x4->crf >= 0) { x4->params.rc.i_rc_method = XAVS_RC_CRF; x4->params.rc.f_rf_constant = x4->crf; } else if (x4->cqp >= 0) { x4->params.rc.i_rc_method = XAVS_RC_CQP; x4->params.rc.i_qp_constant = x4->cqp; } } if (x4->aud >= 0) x4->params.b_aud = x4->aud; if (x4->mbtree >= 0) x4->params.rc.b_mb_tree = x4->mbtree; if (x4->direct_pred >= 0) x4->params.analyse.i_direct_mv_pred = x4->direct_pred; if (x4->fast_pskip >= 0) x4->params.analyse.b_fast_pskip = x4->fast_pskip; if (x4->motion_est >= 0) x4->params.analyse.i_me_method = x4->motion_est; if (x4->mixed_refs >= 0) x4->params.analyse.b_mixed_references = x4->mixed_refs; if (x4->b_bias != INT_MIN) x4->params.i_bframe_bias = x4->b_bias; if (x4->cplxblur >= 0) x4->params.rc.f_complexity_blur = x4->cplxblur; #if FF_API_MOTION_EST FF_DISABLE_DEPRECATION_WARNINGS if (x4->motion_est < 0) { switch (avctx->me_method) { case ME_EPZS: x4->params.analyse.i_me_method = XAVS_ME_DIA; break; case ME_HEX: x4->params.analyse.i_me_method = XAVS_ME_HEX; break; case ME_UMH: x4->params.analyse.i_me_method = XAVS_ME_UMH; break; case ME_FULL: x4->params.analyse.i_me_method = XAVS_ME_ESA; break; case ME_TESA: x4->params.analyse.i_me_method = XAVS_ME_TESA; break; default: x4->params.analyse.i_me_method = XAVS_ME_HEX; } } FF_ENABLE_DEPRECATION_WARNINGS #endif x4->params.i_bframe = avctx->max_b_frames; /* cabac is not included in AVS JiZhun Profile */ x4->params.b_cabac = 0; x4->params.i_bframe_adaptive = avctx->b_frame_strategy; avctx->has_b_frames = !!avctx->max_b_frames; /* AVS doesn't allow B picture as reference */ /* The max allowed reference frame number of B is 2 */ x4->params.i_keyint_min = avctx->keyint_min; if (x4->params.i_keyint_min > x4->params.i_keyint_max) x4->params.i_keyint_min = x4->params.i_keyint_max; x4->params.i_scenecut_threshold = avctx->scenechange_threshold; // x4->params.b_deblocking_filter = avctx->flags & AV_CODEC_FLAG_LOOP_FILTER; x4->params.rc.i_qp_min = avctx->qmin; x4->params.rc.i_qp_max = avctx->qmax; x4->params.rc.i_qp_step = avctx->max_qdiff; x4->params.rc.f_qcompress = avctx->qcompress; /* 0.0 => cbr, 1.0 => constant qp */ x4->params.rc.f_qblur = avctx->qblur; /* temporally blur quants */ x4->params.i_frame_reference = avctx->refs; x4->params.i_width = avctx->width; x4->params.i_height = avctx->height; x4->params.vui.i_sar_width = avctx->sample_aspect_ratio.num; x4->params.vui.i_sar_height = avctx->sample_aspect_ratio.den; /* This is only used for counting the fps */ x4->params.i_fps_num = avctx->time_base.den; x4->params.i_fps_den = avctx->time_base.num; x4->params.analyse.inter = XAVS_ANALYSE_I8x8 |XAVS_ANALYSE_PSUB16x16| XAVS_ANALYSE_BSUB16x16; x4->params.analyse.i_me_range = avctx->me_range; x4->params.analyse.i_subpel_refine = avctx->me_subpel_quality; x4->params.analyse.b_chroma_me = avctx->me_cmp & FF_CMP_CHROMA; /* AVS P2 only enables 8x8 transform */ x4->params.analyse.b_transform_8x8 = 1; //avctx->flags2 & AV_CODEC_FLAG2_8X8DCT; x4->params.analyse.i_trellis = avctx->trellis; x4->params.analyse.i_noise_reduction = avctx->noise_reduction; if (avctx->level > 0) x4->params.i_level_idc = avctx->level; if (avctx->bit_rate > 0) x4->params.rc.f_rate_tolerance = (float)avctx->bit_rate_tolerance / avctx->bit_rate; if ((avctx->rc_buffer_size) && (avctx->rc_initial_buffer_occupancy <= avctx->rc_buffer_size)) { x4->params.rc.f_vbv_buffer_init = (float)avctx->rc_initial_buffer_occupancy / avctx->rc_buffer_size; } else x4->params.rc.f_vbv_buffer_init = 0.9; /* TAG:do we have MB tree RC method */ /* what is the RC method we are now using? Default NO */ x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor); x4->params.rc.f_pb_factor = avctx->b_quant_factor; x4->params.analyse.i_chroma_qp_offset = avctx->chromaoffset; x4->params.analyse.b_psnr = avctx->flags & AV_CODEC_FLAG_PSNR; x4->params.i_log_level = XAVS_LOG_DEBUG; x4->params.i_threads = avctx->thread_count; x4->params.b_interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT; if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) x4->params.b_repeat_headers = 0; x4->enc = xavs_encoder_open(&x4->params); if (!x4->enc) return -1; if (!(x4->pts_buffer = av_mallocz((avctx->max_b_frames+1) * sizeof(*x4->pts_buffer)))) return AVERROR(ENOMEM); /* TAG: Do we have GLOBAL HEADER in AVS */ /* We Have PPS and SPS in AVS */ if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) { xavs_nal_t *nal; int nnal, s, i, size; uint8_t *p; s = xavs_encoder_headers(x4->enc, &nal, &nnal); avctx->extradata = p = av_malloc(s); for (i = 0; i < nnal; i++) { /* Don't put the SEI in extradata. */ if (nal[i].i_type == NAL_SEI) { x4->sei = av_malloc( 5 + nal[i].i_payload * 4 / 3 ); if (xavs_nal_encode(x4->sei, &x4->sei_size, 1, nal + i) < 0) return -1; continue; } size = xavs_nal_encode(p, &s, 1, nal + i); if (size < 0) return -1; p += size; } avctx->extradata_size = p - avctx->extradata; } return 0; }
1,448
FFmpeg
ded5957d75def70d2f1fc1c1eae079230004974b
0
static int film_read_close(AVFormatContext *s) { FilmDemuxContext *film = s->priv_data; av_freep(&film->sample_table); av_freep(&film->stereo_buffer); return 0; }
1,449
FFmpeg
d6604b29ef544793479d7fb4e05ef6622bb3e534
0
static av_cold int png_enc_close(AVCodecContext *avctx) { av_frame_free(&avctx->coded_frame); return 0; }
1,450
FFmpeg
aa7982577c1dee021b72f4256f48d3c030d44e73
1
int opt_opencl_bench(void *optctx, const char *opt, const char *arg) { int i, j, nb_devices = 0, count = 0; int64_t score = 0; AVOpenCLDeviceList *device_list; AVOpenCLDeviceNode *device_node = NULL; OpenCLDeviceBenchmark *devices = NULL; cl_platform_id platform; av_opencl_get_device_list(&device_list); for (i = 0; i < device_list->platform_num; i++) nb_devices += device_list->platform_node[i]->device_num; if (!nb_devices) { av_log(NULL, AV_LOG_ERROR, "No OpenCL device detected!\n"); return AVERROR(EINVAL); } if (!(devices = av_malloc_array(nb_devices, sizeof(OpenCLDeviceBenchmark)))) { av_log(NULL, AV_LOG_ERROR, "Could not allocate buffer\n"); return AVERROR(ENOMEM); } for (i = 0; i < device_list->platform_num; i++) { for (j = 0; j < device_list->platform_node[i]->device_num; j++) { device_node = device_list->platform_node[i]->device_node[j]; platform = device_list->platform_node[i]->platform_id; score = av_opencl_benchmark(device_node, platform, run_opencl_bench); if (score > 0) { devices[count].platform_idx = i; devices[count].device_idx = j; devices[count].runtime = score; av_strlcpy(devices[count].device_name, device_node->device_name, sizeof(devices[count].device_name)); count++; } } } qsort(devices, count, sizeof(OpenCLDeviceBenchmark), compare_ocl_device_desc); fprintf(stderr, "platform_idx\tdevice_idx\tdevice_name\truntime\n"); for (i = 0; i < count; i++) fprintf(stdout, "%d\t%d\t%s\t%"PRId64"\n", devices[i].platform_idx, devices[i].device_idx, devices[i].device_name, devices[i].runtime); av_opencl_free_device_list(&device_list); av_free(devices); return 0; }
1,452
qemu
6f2d8978728c48ca46f5c01835438508aace5c64
1
void ppc_tlb_invalidate_one (CPUPPCState *env, target_ulong addr) { #if !defined(FLUSH_ALL_TLBS) addr &= TARGET_PAGE_MASK; switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: case POWERPC_MMU_SOFT_74xx: ppc6xx_tlb_invalidate_virt(env, addr, 0); if (env->id_tlbs == 1) ppc6xx_tlb_invalidate_virt(env, addr, 1); break; case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_SOFT_4xx_Z: ppc4xx_tlb_invalidate_virt(env, addr, env->spr[SPR_40x_PID]); break; case POWERPC_MMU_REAL_4xx: cpu_abort(env, "No TLB for PowerPC 4xx in real mode\n"); break; case POWERPC_MMU_BOOKE: /* XXX: TODO */ cpu_abort(env, "MMU model not implemented\n"); break; case POWERPC_MMU_BOOKE_FSL: /* XXX: TODO */ cpu_abort(env, "MMU model not implemented\n"); break; case POWERPC_MMU_32B: case POWERPC_MMU_601: /* tlbie invalidate TLBs for all segments */ addr &= ~((target_ulong)-1 << 28); /* XXX: this case should be optimized, * giving a mask to tlb_flush_page */ tlb_flush_page(env, addr | (0x0 << 28)); tlb_flush_page(env, addr | (0x1 << 28)); tlb_flush_page(env, addr | (0x2 << 28)); tlb_flush_page(env, addr | (0x3 << 28)); tlb_flush_page(env, addr | (0x4 << 28)); tlb_flush_page(env, addr | (0x5 << 28)); tlb_flush_page(env, addr | (0x6 << 28)); tlb_flush_page(env, addr | (0x7 << 28)); tlb_flush_page(env, addr | (0x8 << 28)); tlb_flush_page(env, addr | (0x9 << 28)); tlb_flush_page(env, addr | (0xA << 28)); tlb_flush_page(env, addr | (0xB << 28)); tlb_flush_page(env, addr | (0xC << 28)); tlb_flush_page(env, addr | (0xD << 28)); tlb_flush_page(env, addr | (0xE << 28)); tlb_flush_page(env, addr | (0xF << 28)); break; #if defined(TARGET_PPC64) case POWERPC_MMU_64B: /* tlbie invalidate TLBs for all segments */ /* XXX: given the fact that there are too many segments to invalidate, * and we still don't have a tlb_flush_mask(env, n, mask) in Qemu, * we just invalidate all TLBs */ tlb_flush(env, 1); break; #endif /* defined(TARGET_PPC64) */ default: /* XXX: TODO */ cpu_abort(env, "Unknown MMU model\n"); break; } #else ppc_tlb_invalidate_all(env); #endif }
1,453
qemu
a9416dc62c36079b93b4951c894a0b15e53bb38c
1
static void test_visitor_in_fail_list(TestInputVisitorData *data, const void *unused) { int64_t i64 = -1; Error *err = NULL; Visitor *v; /* Unvisited list tail */ v = visitor_input_test_init(data, "[ 1, 2, 3 ]"); visit_type_int(v, NULL, &i64, &error_abort); g_assert_cmpint(i64, ==, 1); visit_type_int(v, NULL, &i64, &error_abort); g_assert_cmpint(i64, ==, 2); visit_check_list(v, &err); }
1,454
qemu
0a608a6e132abffa8fd9455e2354a47acb95847e
1
int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar) { CPUS390XState *env = &cpu->env; uint32_t fh; ZpciFib fib; S390PCIBusDevice *pbdev; uint32_t data; uint64_t cc = ZPCI_PCI_LS_OK; if (env->psw.mask & PSW_MASK_PSTATE) { program_interrupt(env, PGM_PRIVILEGED, 6); return 0; } fh = env->regs[r1] >> 32; if (fiba & 0x7) { program_interrupt(env, PGM_SPECIFICATION, 6); return 0; } pbdev = s390_pci_find_dev_by_fh(fh); if (!pbdev) { setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; } memset(&fib, 0, sizeof(fib)); switch (pbdev->state) { case ZPCI_FS_RESERVED: case ZPCI_FS_STANDBY: setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; case ZPCI_FS_DISABLED: if (fh & FH_MASK_ENABLE) { setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; } goto out; /* BLOCKED bit is set to one coincident with the setting of ERROR bit. * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */ case ZPCI_FS_ERROR: fib.fc |= 0x20; case ZPCI_FS_BLOCKED: fib.fc |= 0x40; case ZPCI_FS_ENABLED: fib.fc |= 0x80; if (pbdev->iommu_enabled) { fib.fc |= 0x10; } if (!(fh & FH_MASK_ENABLE)) { env->regs[r1] |= 1ULL << 63; } break; case ZPCI_FS_PERMANENT_ERROR: setcc(cpu, ZPCI_PCI_LS_ERR); s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_PERM_ERROR); return 0; } stq_p(&fib.pba, pbdev->pba); stq_p(&fib.pal, pbdev->pal); stq_p(&fib.iota, pbdev->g_iota); stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr); stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr); stq_p(&fib.fmb_addr, pbdev->fmb_addr); data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) | ((uint32_t)pbdev->routes.adapter.ind_offset << 8) | ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset; stl_p(&fib.data, data); out: if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) { return 0; } setcc(cpu, cc); return 0; }
1,455
qemu
ce5b1bbf624b977a55ff7f85bb3871682d03baff
1
static void mips_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); MIPSCPU *cpu = MIPS_CPU(obj); CPUMIPSState *env = &cpu->env; cs->env_ptr = env; cpu_exec_init(cs, &error_abort); if (tcg_enabled()) { mips_tcg_init(); } }
1,456
FFmpeg
f1cd9b03f3fa875eb5e394281b4b688cec611658
1
static av_cold int omx_try_load(OMXContext *s, void *logctx, const char *libname, const char *prefix) { s->lib = dlopen(libname, RTLD_NOW | RTLD_GLOBAL); if (!s->lib) { av_log(logctx, AV_LOG_WARNING, "%s not found\n", libname); return AVERROR_ENCODER_NOT_FOUND; } s->ptr_Init = dlsym_prefixed(s->lib, "OMX_Init", prefix); s->ptr_Deinit = dlsym_prefixed(s->lib, "OMX_Deinit", prefix); s->ptr_ComponentNameEnum = dlsym_prefixed(s->lib, "OMX_ComponentNameEnum", prefix); s->ptr_GetHandle = dlsym_prefixed(s->lib, "OMX_GetHandle", prefix); s->ptr_FreeHandle = dlsym_prefixed(s->lib, "OMX_FreeHandle", prefix); s->ptr_GetComponentsOfRole = dlsym_prefixed(s->lib, "OMX_GetComponentsOfRole", prefix); s->ptr_GetRolesOfComponent = dlsym_prefixed(s->lib, "OMX_GetRolesOfComponent", prefix); if (!s->ptr_Init || !s->ptr_Deinit || !s->ptr_ComponentNameEnum || !s->ptr_GetHandle || !s->ptr_FreeHandle || !s->ptr_GetComponentsOfRole || !s->ptr_GetRolesOfComponent) { av_log(logctx, AV_LOG_WARNING, "Not all functions found in %s\n", libname); dlclose(s->lib); s->lib = NULL; return AVERROR_ENCODER_NOT_FOUND; } return 0; }
1,458
FFmpeg
3d5822d9cf07d08bce82903e4715658f46b01b5c
1
static void encode_cblk(Jpeg2000EncoderContext *s, Jpeg2000T1Context *t1, Jpeg2000Cblk *cblk, Jpeg2000Tile *tile, int width, int height, int bandpos, int lev) { int pass_t = 2, passno, x, y, max=0, nmsedec, bpno; int64_t wmsedec = 0; memset(t1->flags, 0, t1->stride * (height + 2) * sizeof(*t1->flags)); for (y = 0; y < height; y++){ for (x = 0; x < width; x++){ if (t1->data[(y) * t1->stride + x] < 0){ t1->flags[(y+1) * t1->stride + x+1] |= JPEG2000_T1_SGN; t1->data[(y) * t1->stride + x] = -t1->data[(y) * t1->stride + x]; } max = FFMAX(max, t1->data[(y) * t1->stride + x]); } } if (max == 0){ cblk->nonzerobits = 0; bpno = 0; } else{ cblk->nonzerobits = av_log2(max) + 1 - NMSEDEC_FRACBITS; bpno = cblk->nonzerobits - 1; } ff_mqc_initenc(&t1->mqc, cblk->data); for (passno = 0; bpno >= 0; passno++){ nmsedec=0; switch(pass_t){ case 0: encode_sigpass(t1, width, height, bandpos, &nmsedec, bpno); break; case 1: encode_refpass(t1, width, height, &nmsedec, bpno); break; case 2: encode_clnpass(t1, width, height, bandpos, &nmsedec, bpno); break; } cblk->passes[passno].rate = ff_mqc_flush_to(&t1->mqc, cblk->passes[passno].flushed, &cblk->passes[passno].flushed_len); wmsedec += (int64_t)nmsedec << (2*bpno); cblk->passes[passno].disto = wmsedec; if (++pass_t == 3){ pass_t = 0; bpno--; } } cblk->npasses = passno; cblk->ninclpasses = passno; cblk->passes[passno-1].rate = ff_mqc_flush_to(&t1->mqc, cblk->passes[passno-1].flushed, &cblk->passes[passno-1].flushed_len); }
1,460
qemu
2531088f6c1ce1f620f8d5a545f0af95598e69fc
1
static void gic_dist_writeb(void *opaque, hwaddr offset, uint32_t value, MemTxAttrs attrs) { GICState *s = (GICState *)opaque; int irq; int i; int cpu; cpu = gic_get_current_cpu(s); if (offset < 0x100) { if (offset == 0) { if (s->security_extn && !attrs.secure) { /* NS version is just an alias of the S version's bit 1 */ s->ctlr = deposit32(s->ctlr, 1, 1, value); } else if (gic_has_groups(s)) { s->ctlr = value & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1); } else { s->ctlr = value & GICD_CTLR_EN_GRP0; } DPRINTF("Distributor: Group0 %sabled; Group 1 %sabled\n", s->ctlr & GICD_CTLR_EN_GRP0 ? "En" : "Dis", s->ctlr & GICD_CTLR_EN_GRP1 ? "En" : "Dis"); } else if (offset < 4) { /* ignored. */ } else if (offset >= 0x80) { /* Interrupt Group Registers: RAZ/WI for NS access to secure * GIC, or for GICs without groups. */ if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { /* Every byte offset holds 8 group status bits */ irq = (offset - 0x80) * 8 + GIC_BASE_IRQ; if (irq >= s->num_irq) { goto bad_reg; } for (i = 0; i < 8; i++) { /* Group bits are banked for private interrupts */ int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; if (value & (1 << i)) { /* Group1 (Non-secure) */ GIC_SET_GROUP(irq + i, cm); } else { /* Group0 (Secure) */ GIC_CLEAR_GROUP(irq + i, cm); } } } } else { goto bad_reg; } } else if (offset < 0x180) { /* Interrupt Set Enable. */ irq = (offset - 0x100) * 8 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; if (irq < GIC_NR_SGIS) { value = 0xff; } for (i = 0; i < 8; i++) { if (value & (1 << i)) { int mask = (irq < GIC_INTERNAL) ? (1 << cpu) : GIC_TARGET(irq + i); int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; if (!GIC_TEST_ENABLED(irq + i, cm)) { DPRINTF("Enabled IRQ %d\n", irq + i); } GIC_SET_ENABLED(irq + i, cm); /* If a raised level triggered IRQ enabled then mark is as pending. */ if (GIC_TEST_LEVEL(irq + i, mask) && !GIC_TEST_EDGE_TRIGGER(irq + i)) { DPRINTF("Set %d pending mask %x\n", irq + i, mask); GIC_SET_PENDING(irq + i, mask); } } } } else if (offset < 0x200) { /* Interrupt Clear Enable. */ irq = (offset - 0x180) * 8 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; if (irq < GIC_NR_SGIS) { value = 0; } for (i = 0; i < 8; i++) { if (value & (1 << i)) { int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; if (GIC_TEST_ENABLED(irq + i, cm)) { DPRINTF("Disabled IRQ %d\n", irq + i); trace_gic_disable_irq(irq + i); } GIC_CLEAR_ENABLED(irq + i, cm); } } } else if (offset < 0x280) { /* Interrupt Set Pending. */ irq = (offset - 0x200) * 8 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; if (irq < GIC_NR_SGIS) { value = 0; } for (i = 0; i < 8; i++) { if (value & (1 << i)) { GIC_SET_PENDING(irq + i, GIC_TARGET(irq + i)); } } } else if (offset < 0x300) { /* Interrupt Clear Pending. */ irq = (offset - 0x280) * 8 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; if (irq < GIC_NR_SGIS) { value = 0; } for (i = 0; i < 8; i++) { /* ??? This currently clears the pending bit for all CPUs, even for per-CPU interrupts. It's unclear whether this is the corect behavior. */ if (value & (1 << i)) { GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK); } } } else if (offset < 0x400) { /* Interrupt Active. */ goto bad_reg; } else if (offset < 0x800) { /* Interrupt Priority. */ irq = (offset - 0x400) + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; gic_set_priority(s, cpu, irq, value, attrs); } else if (offset < 0xc00) { /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the * annoying exception of the 11MPCore's GIC. */ if (s->num_cpu != 1 || s->revision == REV_11MPCORE) { irq = (offset - 0x800) + GIC_BASE_IRQ; if (irq >= s->num_irq) { goto bad_reg; } if (irq < 29) { value = 0; } else if (irq < GIC_INTERNAL) { value = ALL_CPU_MASK; } s->irq_target[irq] = value & ALL_CPU_MASK; } } else if (offset < 0xf00) { /* Interrupt Configuration. */ irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; if (irq < GIC_NR_SGIS) value |= 0xaa; for (i = 0; i < 4; i++) { if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { if (value & (1 << (i * 2))) { GIC_SET_MODEL(irq + i); } else { GIC_CLEAR_MODEL(irq + i); } } if (value & (2 << (i * 2))) { GIC_SET_EDGE_TRIGGER(irq + i); } else { GIC_CLEAR_EDGE_TRIGGER(irq + i); } } } else if (offset < 0xf10) { /* 0xf00 is only handled for 32-bit writes. */ goto bad_reg; } else if (offset < 0xf20) { /* GICD_CPENDSGIRn */ if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { goto bad_reg; } irq = (offset - 0xf10); s->sgi_pending[irq][cpu] &= ~value; if (s->sgi_pending[irq][cpu] == 0) { GIC_CLEAR_PENDING(irq, 1 << cpu); } } else if (offset < 0xf30) { /* GICD_SPENDSGIRn */ if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { goto bad_reg; } irq = (offset - 0xf20); GIC_SET_PENDING(irq, 1 << cpu); s->sgi_pending[irq][cpu] |= value; } else { goto bad_reg; } gic_update(s); return; bad_reg: qemu_log_mask(LOG_GUEST_ERROR, "gic_dist_writeb: Bad offset %x\n", (int)offset); }
1,461
FFmpeg
46cb61819d867961e8f2052a8f13bcf2027d484f
1
static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { GifState *s = avctx->priv_data; AVFrame *picture = data; int ret; bytestream2_init(&s->gb, avpkt->data, avpkt->size); s->picture.pts = avpkt->pts; s->picture.pkt_pts = avpkt->pts; s->picture.pkt_dts = avpkt->dts; s->picture.pkt_duration = avpkt->duration; if (avpkt->size >= 6) { s->keyframe = memcmp(avpkt->data, gif87a_sig, 6) == 0 || memcmp(avpkt->data, gif89a_sig, 6) == 0; } else { s->keyframe = 0; if (s->keyframe) { s->keyframe_ok = 0; if ((ret = gif_read_header1(s)) < 0) return ret; if ((ret = av_image_check_size(s->screen_width, s->screen_height, 0, avctx)) < 0) return ret; avcodec_set_dimensions(avctx, s->screen_width, s->screen_height); if (s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); if ((ret = ff_get_buffer(avctx, &s->picture)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; s->picture.pict_type = AV_PICTURE_TYPE_I; s->picture.key_frame = 1; } else { if ((ret = avctx->reget_buffer(avctx, &s->picture)) < 0) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return ret; s->picture.pict_type = AV_PICTURE_TYPE_P; s->picture.key_frame = 0; ret = gif_parse_next_image(s, got_frame); if (ret < 0) return ret; else if (*got_frame) *picture = s->picture; return avpkt->size;
1,462
qemu
d4370741402a97b8b6d0c38fef18ab38bf25ab22
1
static GSList *gd_vc_init(GtkDisplayState *s, VirtualConsole *vc, int index, GSList *group, GtkWidget *view_menu) { #if defined(CONFIG_VTE) const char *label; char buffer[32]; char path[32]; #if VTE_CHECK_VERSION(0, 26, 0) VtePty *pty; #endif GIOChannel *chan; GtkWidget *scrolled_window; GtkAdjustment *vadjustment; int master_fd, slave_fd; snprintf(buffer, sizeof(buffer), "vc%d", index); snprintf(path, sizeof(path), "<QEMU>/View/VC%d", index); vc->chr = vcs[index]; if (vc->chr->label) { label = vc->chr->label; } else { label = buffer; } vc->menu_item = gtk_radio_menu_item_new_with_mnemonic(group, label); group = gtk_radio_menu_item_get_group(GTK_RADIO_MENU_ITEM(vc->menu_item)); gtk_menu_item_set_accel_path(GTK_MENU_ITEM(vc->menu_item), path); gtk_accel_map_add_entry(path, GDK_KEY_2 + index, HOTKEY_MODIFIERS); vc->terminal = vte_terminal_new(); master_fd = qemu_openpty_raw(&slave_fd, NULL); g_assert(master_fd != -1); #if VTE_CHECK_VERSION(0, 26, 0) pty = vte_pty_new_foreign(master_fd, NULL); vte_terminal_set_pty_object(VTE_TERMINAL(vc->terminal), pty); #else vte_terminal_set_pty(VTE_TERMINAL(vc->terminal), master_fd); #endif vte_terminal_set_scrollback_lines(VTE_TERMINAL(vc->terminal), -1); #if VTE_CHECK_VERSION(0, 28, 0) && GTK_CHECK_VERSION(3, 0, 0) vadjustment = gtk_scrollable_get_vadjustment(GTK_SCROLLABLE(vc->terminal)); #else vadjustment = vte_terminal_get_adjustment(VTE_TERMINAL(vc->terminal)); #endif scrolled_window = gtk_scrolled_window_new(NULL, vadjustment); gtk_container_add(GTK_CONTAINER(scrolled_window), vc->terminal); vte_terminal_set_size(VTE_TERMINAL(vc->terminal), 80, 25); vc->fd = slave_fd; vc->chr->opaque = vc; vc->scrolled_window = scrolled_window; gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(vc->scrolled_window), GTK_POLICY_AUTOMATIC, GTK_POLICY_AUTOMATIC); gtk_notebook_append_page(GTK_NOTEBOOK(s->notebook), scrolled_window, gtk_label_new(label)); g_signal_connect(vc->menu_item, "activate", G_CALLBACK(gd_menu_switch_vc), s); gtk_menu_shell_append(GTK_MENU_SHELL(view_menu), vc->menu_item); qemu_chr_be_generic_open(vc->chr); if (vc->chr->init) { vc->chr->init(vc->chr); } chan = g_io_channel_unix_new(vc->fd); g_io_add_watch(chan, G_IO_IN, gd_vc_in, vc); #endif /* CONFIG_VTE */ return group; }
1,463
FFmpeg
b64fe493717cb2f05a019d9f13778382a78d9d0a
1
static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index) { AVPacket out_pkt = { 0 }, flush_pkt = { 0 }; AVStream *st = s->streams[stream_index]; uint8_t *data = pkt ? pkt->data : NULL; int size = pkt ? pkt->size : 0; int ret = 0, got_output = 0; if (!pkt) { av_init_packet(&flush_pkt); pkt = &flush_pkt; got_output = 1; } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) { // preserve 0-size sync packets compute_pkt_fields(s, st, st->parser, pkt, AV_NOPTS_VALUE, AV_NOPTS_VALUE); } while (size > 0 || (pkt == &flush_pkt && got_output)) { int len; int64_t next_pts = pkt->pts; int64_t next_dts = pkt->dts; av_init_packet(&out_pkt); len = av_parser_parse2(st->parser, st->codec, &out_pkt.data, &out_pkt.size, data, size, pkt->pts, pkt->dts, pkt->pos); pkt->pts = pkt->dts = AV_NOPTS_VALUE; pkt->pos = -1; /* increment read pointer */ data += len; size -= len; got_output = !!out_pkt.size; if (!out_pkt.size) continue; if (pkt->side_data) { out_pkt.side_data = pkt->side_data; out_pkt.side_data_elems = pkt->side_data_elems; pkt->side_data = NULL; pkt->side_data_elems = 0; } /* set the duration */ out_pkt.duration = (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? pkt->duration : 0; if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { if (st->codec->sample_rate > 0) { out_pkt.duration = av_rescale_q_rnd(st->parser->duration, (AVRational) { 1, st->codec->sample_rate }, st->time_base, AV_ROUND_DOWN); } } out_pkt.stream_index = st->index; out_pkt.pts = st->parser->pts; out_pkt.dts = st->parser->dts; out_pkt.pos = st->parser->pos; if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW) out_pkt.pos = st->parser->frame_offset; if (st->parser->key_frame == 1 || (st->parser->key_frame == -1 && st->parser->pict_type == AV_PICTURE_TYPE_I)) out_pkt.flags |= AV_PKT_FLAG_KEY; if (st->parser->key_frame == -1 && st->parser->pict_type ==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY)) out_pkt.flags |= AV_PKT_FLAG_KEY; compute_pkt_fields(s, st, st->parser, &out_pkt, next_dts, next_pts); if ((ret = add_to_pktbuf(&s->internal->parse_queue, &out_pkt, &s->internal->parse_queue_end, 1))) { av_packet_unref(&out_pkt); goto fail; } } /* end of the stream => close and free the parser */ if (pkt == &flush_pkt) { av_parser_close(st->parser); st->parser = NULL; } fail: av_packet_unref(pkt); return ret; }
1,464
FFmpeg
ac4a5e3abd8a022ab32245ad527ffc37eabab8b1
1
int ff_frame_thread_init(AVCodecContext *avctx) { int thread_count = avctx->thread_count; const AVCodec *codec = avctx->codec; AVCodecContext *src = avctx; FrameThreadContext *fctx; int i, err = 0; #if HAVE_W32THREADS w32thread_init(); #endif if (!thread_count) { int nb_cpus = av_cpu_count(); av_log(avctx, AV_LOG_DEBUG, "detected %d logical cores\n", nb_cpus); // use number of cores + 1 as thread count if there is more than one if (nb_cpus > 1) thread_count = avctx->thread_count = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS); else thread_count = avctx->thread_count = 1; } if (thread_count <= 1) { avctx->active_thread_type = 0; return 0; } avctx->internal->thread_ctx = fctx = av_mallocz(sizeof(FrameThreadContext)); fctx->threads = av_mallocz(sizeof(PerThreadContext) * thread_count); pthread_mutex_init(&fctx->buffer_mutex, NULL); fctx->delaying = 1; for (i = 0; i < thread_count; i++) { AVCodecContext *copy = av_malloc(sizeof(AVCodecContext)); PerThreadContext *p = &fctx->threads[i]; pthread_mutex_init(&p->mutex, NULL); pthread_mutex_init(&p->progress_mutex, NULL); pthread_cond_init(&p->input_cond, NULL); pthread_cond_init(&p->progress_cond, NULL); pthread_cond_init(&p->output_cond, NULL); p->frame = av_frame_alloc(); if (!p->frame) { err = AVERROR(ENOMEM); goto error; } p->parent = fctx; p->avctx = copy; if (!copy) { err = AVERROR(ENOMEM); goto error; } *copy = *src; copy->internal = av_malloc(sizeof(AVCodecInternal)); if (!copy->internal) { err = AVERROR(ENOMEM); goto error; } *copy->internal = *src->internal; copy->internal->thread_ctx = p; copy->internal->pkt = &p->avpkt; if (!i) { src = copy; if (codec->init) err = codec->init(copy); update_context_from_thread(avctx, copy, 1); } else { copy->priv_data = av_malloc(codec->priv_data_size); if (!copy->priv_data) { err = AVERROR(ENOMEM); goto error; } memcpy(copy->priv_data, src->priv_data, codec->priv_data_size); copy->internal->is_copy = 1; if (codec->init_thread_copy) err = codec->init_thread_copy(copy); } if (err) goto error; if (!pthread_create(&p->thread, NULL, frame_worker_thread, p)) p->thread_init = 1; } return 0; error: ff_frame_thread_free(avctx, i+1); return err; }
1,465
qemu
4482e05cbbb7e50e476f6a9500cf0b38913bd939
1
CPUState *cpu_generic_init(const char *typename, const char *cpu_model) { /* TODO: all callers of cpu_generic_init() need to be converted to * call cpu_parse_features() only once, before calling cpu_generic_init(). */ const char *cpu_type = cpu_parse_cpu_model(typename, cpu_model); if (cpu_type) { return cpu_create(cpu_type); } return NULL; }
1,466
FFmpeg
24fee95321c1463360ba7042d026dae021854360
1
static int handle_metadata(RTMPContext *rt, RTMPPacket *pkt) { int ret, old_flv_size, type; const uint8_t *next; uint8_t *p; uint32_t size; uint32_t ts, cts, pts = 0; old_flv_size = update_offset(rt, pkt->size); if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0) { rt->flv_size = rt->flv_off = 0; return ret; } next = pkt->data; p = rt->flv_data + old_flv_size; /* copy data while rewriting timestamps */ ts = pkt->timestamp; while (next - pkt->data < pkt->size - RTMP_HEADER) { type = bytestream_get_byte(&next); size = bytestream_get_be24(&next); cts = bytestream_get_be24(&next); cts |= bytestream_get_byte(&next) << 24; if (!pts) pts = cts; ts += cts - pts; pts = cts; bytestream_put_byte(&p, type); bytestream_put_be24(&p, size); bytestream_put_be24(&p, ts); bytestream_put_byte(&p, ts >> 24); memcpy(p, next, size + 3 + 4); next += size + 3 + 4; p += size + 3 + 4; } memcpy(p, next, RTMP_HEADER); return 0; }
1,467
qemu
7d1b0095bff7157e856d1d0e6c4295641ced2752
1
static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1) { TCGv tmp; tcg_gen_sub_i32(dest, t0, t1); tmp = load_cpu_field(CF); tcg_gen_add_i32(dest, dest, tmp); tcg_gen_subi_i32(dest, dest, 1); dead_tmp(tmp); }
1,468
qemu
e41029b378b4a65a0b89b5a8dc087aca6b5d012d
1
static void gen_dstst(DisasContext *ctx) { if (rA(ctx->opcode) == 0) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX); } else { /* interpreted as no-op */ } }
1,470
FFmpeg
e84314515ac39543641cd148a33b155218df6a74
1
static MMSSCPacketType get_tcp_server_response(MMSTContext *mmst) { int read_result; MMSSCPacketType packet_type= -1; MMSContext *mms = &mmst->mms; for(;;) { read_result = url_read_complete(mms->mms_hd, mms->in_buffer, 8); if (read_result != 8) { if(read_result < 0) { av_log(NULL, AV_LOG_ERROR, "Error reading packet header: %d (%s)\n", read_result, strerror(read_result)); packet_type = SC_PKT_CANCEL; } else { av_log(NULL, AV_LOG_ERROR, "The server closed the connection\n"); packet_type = SC_PKT_NO_DATA; } return packet_type; } // handle command packet. if(AV_RL32(mms->in_buffer + 4)==0xb00bface) { int length_remaining, hr; mmst->incoming_flags= mms->in_buffer[3]; read_result= url_read_complete(mms->mms_hd, mms->in_buffer+8, 4); if(read_result != 4) { av_log(NULL, AV_LOG_ERROR, "Reading command packet length failed: %d (%s)\n", read_result, read_result < 0 ? strerror(read_result) : "The server closed the connection"); return read_result < 0 ? read_result : AVERROR_IO; } length_remaining= AV_RL32(mms->in_buffer+8) + 4; av_dlog(NULL, "Length remaining is %d\n", length_remaining); // read the rest of the packet. if (length_remaining < 0 || length_remaining > sizeof(mms->in_buffer) - 12) { av_log(NULL, AV_LOG_ERROR, "Incoming packet length %d exceeds bufsize %zu\n", length_remaining, sizeof(mms->in_buffer) - 12); return AVERROR_INVALIDDATA; } read_result = url_read_complete(mms->mms_hd, mms->in_buffer + 12, length_remaining) ; if (read_result != length_remaining) { av_log(NULL, AV_LOG_ERROR, "Reading pkt data (length=%d) failed: %d (%s)\n", length_remaining, read_result, read_result < 0 ? strerror(read_result) : "The server closed the connection"); return read_result < 0 ? read_result : AVERROR_IO; } packet_type= AV_RL16(mms->in_buffer+36); hr = AV_RL32(mms->in_buffer + 40); if (hr) { av_log(NULL, AV_LOG_ERROR, "Server sent a message with packet type 0x%x and error status code 0x%08x\n", packet_type, hr); return AVERROR_UNKNOWN; } } else { int length_remaining; int packet_id_type; int tmp; // note we cache the first 8 bytes, // then fill up the buffer with the others tmp = AV_RL16(mms->in_buffer + 6); length_remaining = (tmp - 8) & 0xffff; mmst->incoming_packet_seq = AV_RL32(mms->in_buffer); packet_id_type = mms->in_buffer[4]; mmst->incoming_flags = mms->in_buffer[5]; if (length_remaining < 0 || length_remaining > sizeof(mms->in_buffer) - 8) { av_log(NULL, AV_LOG_ERROR, "Data length %d is invalid or too large (max=%zu)\n", length_remaining, sizeof(mms->in_buffer)); return AVERROR_INVALIDDATA; } mms->remaining_in_len = length_remaining; mms->read_in_ptr = mms->in_buffer; read_result= url_read_complete(mms->mms_hd, mms->in_buffer, length_remaining); if(read_result != length_remaining) { av_log(NULL, AV_LOG_ERROR, "Failed to read packet data of size %d: %d (%s)\n", length_remaining, read_result, read_result < 0 ? strerror(read_result) : "The server closed the connection"); return read_result < 0 ? read_result : AVERROR_IO; } // if we successfully read everything. if(packet_id_type == mmst->header_packet_id) { packet_type = SC_PKT_ASF_HEADER; // Store the asf header if(!mms->header_parsed) { void *p = av_realloc(mms->asf_header, mms->asf_header_size + mms->remaining_in_len); if (!p) { av_freep(&mms->asf_header); return AVERROR(ENOMEM); } mms->asf_header = p; memcpy(mms->asf_header + mms->asf_header_size, mms->read_in_ptr, mms->remaining_in_len); mms->asf_header_size += mms->remaining_in_len; } // 0x04 means asf header is sent in multiple packets. if (mmst->incoming_flags == 0x04) continue; } else if(packet_id_type == mmst->packet_id) { packet_type = SC_PKT_ASF_MEDIA; } else { av_dlog(NULL, "packet id type %d is old.", packet_id_type); continue; } } // preprocess some packet type if(packet_type == SC_PKT_KEEPALIVE) { send_keepalive_packet(mmst); continue; } else if(packet_type == SC_PKT_STREAM_CHANGING) { handle_packet_stream_changing_type(mmst); } else if(packet_type == SC_PKT_ASF_MEDIA) { pad_media_packet(mms); } return packet_type; } }
1,471
qemu
aec4b054ea36c53c8b887da99f20010133b84378
1
static void invalid_dict_comma(void) { QObject *obj = qobject_from_json("{'abc':32,}", NULL); g_assert(obj == NULL); }
1,472
qemu
b061dc41f62048acd4a34c6570c0ea396cd9d0b4
1
static void type_initialize(TypeImpl *ti) { TypeImpl *parent; if (ti->class) { return; } ti->class_size = type_class_get_size(ti); ti->instance_size = type_object_get_size(ti); ti->class = g_malloc0(ti->class_size); parent = type_get_parent(ti); if (parent) { type_initialize(parent); GSList *e; int i; g_assert(parent->class_size <= ti->class_size); memcpy(ti->class, parent->class, parent->class_size); ti->class->interfaces = NULL; for (e = parent->class->interfaces; e; e = e->next) { ObjectClass *iface = e->data; type_initialize_interface(ti, object_class_get_name(iface)); } for (i = 0; i < ti->num_interfaces; i++) { TypeImpl *t = type_get_by_name(ti->interfaces[i].typename); for (e = ti->class->interfaces; e; e = e->next) { TypeImpl *target_type = OBJECT_CLASS(e->data)->type; if (type_is_ancestor(target_type, t)) { break; } } if (e) { continue; } type_initialize_interface(ti, ti->interfaces[i].typename); } } ti->class->type = ti; while (parent) { if (parent->class_base_init) { parent->class_base_init(ti->class, ti->class_data); } parent = type_get_parent(parent); } if (ti->class_init) { ti->class_init(ti->class, ti->class_data); } }
1,474
FFmpeg
783b350b2e49d06030b30ee9b7e1aa5825e4a5a5
1
static av_cold int decode_close_mp3on4(AVCodecContext * avctx) { MP3On4DecodeContext *s = avctx->priv_data; int i; for (i = 0; i < s->frames; i++) av_freep(&s->mp3decctx[i]); return 0; }
1,475
FFmpeg
6d9e74cd4179f42a8fa860f2e08d370c7c36325f
1
static inline void encode_vlc_codeword(PutBitContext *pb, unsigned codebook, int val) { unsigned int rice_order, exp_order, switch_bits, switch_val; int exponent; /* number of prefix bits to switch between Rice and expGolomb */ switch_bits = (codebook & 3) + 1; rice_order = codebook >> 5; /* rice code order */ exp_order = (codebook >> 2) & 7; /* exp golomb code order */ switch_val = switch_bits << rice_order; if (val >= switch_val) { val -= switch_val - (1 << exp_order); exponent = av_log2(val); put_bits(pb, exponent - exp_order + switch_bits, 0); put_bits(pb, 1, 1); put_bits(pb, exponent, val); } else { exponent = val >> rice_order; if (exponent) put_bits(pb, exponent, 0); put_bits(pb, 1, 1); if (rice_order) put_sbits(pb, rice_order, val); } }
1,477
FFmpeg
2ec4a84dca603a24a8131297036dfe30eed33dd7
1
static int decoder_decode_frame(Decoder *d, void *fframe) { int got_frame = 0; AVFrame *frame = fframe; d->flushed = 0; do { int ret = -1; if (d->queue->abort_request) return -1; if (!d->packet_pending || d->queue->serial != d->pkt_serial) { AVPacket pkt; do { if (d->queue->nb_packets == 0) SDL_CondSignal(d->empty_queue_cond); if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0) return -1; if (pkt.data == flush_pkt.data) { avcodec_flush_buffers(d->avctx); d->finished = 0; d->flushed = 1; d->next_pts = d->start_pts; d->next_pts_tb = d->start_pts_tb; } } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial); av_free_packet(&d->pkt); d->pkt_temp = d->pkt = pkt; d->packet_pending = 1; } switch (d->avctx->codec_type) { case AVMEDIA_TYPE_VIDEO: ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp); if (got_frame) { if (decoder_reorder_pts == -1) { frame->pts = av_frame_get_best_effort_timestamp(frame); } else if (decoder_reorder_pts) { frame->pts = frame->pkt_pts; } else { frame->pts = frame->pkt_dts; } } break; case AVMEDIA_TYPE_AUDIO: ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp); if (got_frame) { AVRational tb = (AVRational){1, frame->sample_rate}; if (frame->pts != AV_NOPTS_VALUE) frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb); else if (frame->pkt_pts != AV_NOPTS_VALUE) frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb); else if (d->next_pts != AV_NOPTS_VALUE) frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb); if (frame->pts != AV_NOPTS_VALUE) { d->next_pts = frame->pts + frame->nb_samples; d->next_pts_tb = tb; } } break; case AVMEDIA_TYPE_SUBTITLE: ret = avcodec_decode_subtitle2(d->avctx, fframe, &got_frame, &d->pkt_temp); break; } if (ret < 0) { d->packet_pending = 0; } else { d->pkt_temp.dts = d->pkt_temp.pts = AV_NOPTS_VALUE; if (d->pkt_temp.data) { if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO) ret = d->pkt_temp.size; d->pkt_temp.data += ret; d->pkt_temp.size -= ret; if (d->pkt_temp.size <= 0) d->packet_pending = 0; } else { if (!got_frame) { d->packet_pending = 0; d->finished = d->pkt_serial; } } } } while (!got_frame && !d->finished); return got_frame; }
1,478