project
stringclasses 2
values | commit_id
stringlengths 40
40
| target
int64 0
1
| func
stringlengths 26
142k
| idx
int64 0
27.3k
|
---|---|---|---|---|
FFmpeg | d5128fce38646d3f64c55feda42084888ba0e87e | 1 | static void decode_array_0000(APEContext *ctx, GetBitContext *gb,
int32_t *out, APERice *rice, int blockstodecode)
{
int i;
int ksummax, ksummin;
rice->ksum = 0;
for (i = 0; i < 5; i++) {
out[i] = get_rice_ook(&ctx->gb, 10);
rice->ksum += out[i];
}
rice->k = av_log2(rice->ksum / 10) + 1;
for (; i < 64; i++) {
out[i] = get_rice_ook(&ctx->gb, rice->k);
rice->ksum += out[i];
rice->k = av_log2(rice->ksum / ((i + 1) * 2)) + 1;
}
ksummax = 1 << rice->k + 7;
ksummin = rice->k ? (1 << rice->k + 6) : 0;
for (; i < blockstodecode; i++) {
out[i] = get_rice_ook(&ctx->gb, rice->k);
rice->ksum += out[i] - out[i - 64];
while (rice->ksum < ksummin) {
rice->k--;
ksummin = rice->k ? ksummin >> 1 : 0;
ksummax >>= 1;
}
while (rice->ksum >= ksummax) {
rice->k++;
if (rice->k > 24)
ksummax <<= 1;
ksummin = ksummin ? ksummin << 1 : 128;
}
}
for (i = 0; i < blockstodecode; i++) {
if (out[i] & 1)
out[i] = (out[i] >> 1) + 1;
else
out[i] = -(out[i] >> 1);
}
} | 25,883 |
FFmpeg | aac8b76983e340bc744d3542d676f72efa3b474f | 0 | static void filter_mb_edgeh( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int qp ) {
int i, d;
const int index_a = qp + h->slice_alpha_c0_offset;
const int alpha = (alpha_table+52)[index_a];
const int beta = (beta_table+52)[qp + h->slice_beta_offset];
const int pix_next = stride;
if( bS[0] < 4 ) {
int8_t tc[4];
for(i=0; i<4; i++)
tc[i] = bS[i] ? (tc0_table+52)[index_a][bS[i] - 1] : -1;
h->s.dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc);
} else {
h->s.dsp.h264_v_loop_filter_luma_intra(pix, stride, alpha, beta);
}
}
| 25,884 |
qemu | 0fbc20740342713f282b118b4a446c4c43df3f4a | 1 | int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
{
struct kvm_msi msi;
KVMMSIRoute *route;
if (s->direct_msi) {
msi.address_lo = (uint32_t)msg.address;
msi.address_hi = msg.address >> 32;
msi.data = le32_to_cpu(msg.data);
msi.flags = 0;
memset(msi.pad, 0, sizeof(msi.pad));
return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
}
route = kvm_lookup_msi_route(s, msg);
if (!route) {
int virq;
virq = kvm_irqchip_get_virq(s);
if (virq < 0) {
return virq;
}
route = g_malloc(sizeof(KVMMSIRoute));
route->kroute.gsi = virq;
route->kroute.type = KVM_IRQ_ROUTING_MSI;
route->kroute.flags = 0;
route->kroute.u.msi.address_lo = (uint32_t)msg.address;
route->kroute.u.msi.address_hi = msg.address >> 32;
route->kroute.u.msi.data = le32_to_cpu(msg.data);
kvm_add_routing_entry(s, &route->kroute);
kvm_irqchip_commit_routes(s);
QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
entry);
}
assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
return kvm_set_irq(s, route->kroute.gsi, 1);
}
| 25,885 |
qemu | 0d2e91c17829729812bf5d22d20dd0f5d2554ec2 | 1 | void sd_set_cb(SDState *sd, qemu_irq readonly, qemu_irq insert)
{
sd->readonly_cb = readonly;
sd->inserted_cb = insert;
qemu_set_irq(readonly, bdrv_is_read_only(sd->bdrv));
qemu_set_irq(insert, bdrv_is_inserted(sd->bdrv));
}
| 25,886 |
FFmpeg | 53ea595eec984e3109310e8bb7ff4b5786d91057 | 1 | static int mov_seek_stream(AVFormatContext *s, AVStream *st, int64_t timestamp, int flags)
{
MOVStreamContext *sc = st->priv_data;
int sample, time_sample;
int i;
sample = av_index_search_timestamp(st, timestamp, flags);
av_log(s, AV_LOG_TRACE, "stream %d, timestamp %"PRId64", sample %d\n", st->index, timestamp, sample);
if (sample < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
sample = 0;
if (sample < 0) /* not sure what to do */
return AVERROR_INVALIDDATA;
sc->current_sample = sample;
av_log(s, AV_LOG_TRACE, "stream %d, found sample %d\n", st->index, sc->current_sample);
/* adjust ctts index */
if (sc->ctts_data) {
time_sample = 0;
for (i = 0; i < sc->ctts_count; i++) {
int next = time_sample + sc->ctts_data[i].count;
if (next > sc->current_sample) {
sc->ctts_index = i;
sc->ctts_sample = sc->current_sample - time_sample;
break;
}
time_sample = next;
}
}
/* adjust stsd index */
time_sample = 0;
for (i = 0; i < sc->stsc_count; i++) {
int next = time_sample + mov_get_stsc_samples(sc, i);
if (next > sc->current_sample) {
sc->stsc_index = i;
sc->stsc_sample = sc->current_sample - time_sample;
break;
}
time_sample = next;
}
return sample;
}
| 25,887 |
FFmpeg | 7cc84d241ba6ef8e27e4d057176a4ad385ad3d59 | 1 | static int advanced_decode_i_mbs(VC9Context *v)
{
MpegEncContext *s = &v->s;
GetBitContext *gb = &v->s.gb;
int mqdiff, mquant, current_mb = 0, over_flags_mb = 0;
for (s->mb_y=0; s->mb_y<s->mb_height; s->mb_y++)
{
for (s->mb_x=0; s->mb_x<s->mb_width; s->mb_x++)
{
if (v->ac_pred_plane.is_raw)
s->ac_pred = get_bits(gb, 1);
else
s->ac_pred = v->ac_pred_plane.data[current_mb];
if (v->condover == 3 && v->over_flags_plane.is_raw)
over_flags_mb = get_bits(gb, 1);
GET_MQUANT();
/* TODO: lots */
}
current_mb++;
}
return 0;
}
| 25,888 |
FFmpeg | 428098165de4c3edfe42c1b7f00627d287015863 | 1 | static int altivec_uyvy_rgb32 (SwsContext *c,
unsigned char **in, int *instrides,
int srcSliceY, int srcSliceH,
unsigned char **oplanes, int *outstrides)
{
int w = c->srcW;
int h = srcSliceH;
int i,j;
vector unsigned char uyvy;
vector signed short Y,U,V;
vector signed short R0,G0,B0,R1,G1,B1;
vector unsigned char R,G,B;
vector unsigned char *out;
ubyte *img;
img = in[0];
out = (vector unsigned char *)(oplanes[0]+srcSliceY*outstrides[0]);
for (i=0;i<h;i++) {
for (j=0;j<w/16;j++) {
uyvy = vec_ld (0, img);
U = (vector signed short)
vec_perm (uyvy, (vector unsigned char)AVV(0), demux_u);
V = (vector signed short)
vec_perm (uyvy, (vector unsigned char)AVV(0), demux_v);
Y = (vector signed short)
vec_perm (uyvy, (vector unsigned char)AVV(0), demux_y);
cvtyuvtoRGB (c, Y,U,V,&R0,&G0,&B0);
uyvy = vec_ld (16, img);
U = (vector signed short)
vec_perm (uyvy, (vector unsigned char)AVV(0), demux_u);
V = (vector signed short)
vec_perm (uyvy, (vector unsigned char)AVV(0), demux_v);
Y = (vector signed short)
vec_perm (uyvy, (vector unsigned char)AVV(0), demux_y);
cvtyuvtoRGB (c, Y,U,V,&R1,&G1,&B1);
R = vec_packclp (R0,R1);
G = vec_packclp (G0,G1);
B = vec_packclp (B0,B1);
// vec_mstbgr24 (R,G,B, out);
out_rgba (R,G,B,out);
img += 32;
}
}
return srcSliceH;
}
| 25,890 |
qemu | 089f26bb735fb414b79f5fa3753910d5339d2a1d | 1 | static void test_primitives(gconstpointer opaque)
{
TestArgs *args = (TestArgs *) opaque;
const SerializeOps *ops = args->ops;
PrimitiveType *pt = args->test_data;
PrimitiveType *pt_copy = g_malloc0(sizeof(*pt_copy));
Error *err = NULL;
void *serialize_data;
char *double1, *double2;
pt_copy->type = pt->type;
ops->serialize(pt, &serialize_data, visit_primitive_type, &err);
ops->deserialize((void **)&pt_copy, serialize_data, visit_primitive_type, &err);
g_assert(err == NULL);
g_assert(pt_copy != NULL);
if (pt->type == PTYPE_STRING) {
g_assert_cmpstr(pt->value.string, ==, pt_copy->value.string);
g_free((char *)pt_copy->value.string);
} else if (pt->type == PTYPE_NUMBER) {
/* we serialize with %f for our reference visitors, so rather than fuzzy
* floating math to test "equality", just compare the formatted values
*/
double1 = g_malloc0(calc_float_string_storage(pt->value.number));
double2 = g_malloc0(calc_float_string_storage(pt_copy->value.number));
g_assert_cmpstr(double1, ==, double2);
g_free(double1);
g_free(double2);
} else if (pt->type == PTYPE_BOOLEAN) {
g_assert_cmpint(!!pt->value.max, ==, !!pt->value.max);
} else {
g_assert_cmpint(pt->value.max, ==, pt_copy->value.max);
}
ops->cleanup(serialize_data);
g_free(args);
g_free(pt_copy);
}
| 25,891 |
qemu | 8b3b720620a1137a1b794fc3ed64734236f94e06 | 1 | static int qcow_write_snapshots(BlockDriverState *bs)
{
BDRVQcowState *s = bs->opaque;
QCowSnapshot *sn;
QCowSnapshotHeader h;
int i, name_size, id_str_size, snapshots_size;
uint64_t data64;
uint32_t data32;
int64_t offset, snapshots_offset;
/* compute the size of the snapshots */
offset = 0;
for(i = 0; i < s->nb_snapshots; i++) {
sn = s->snapshots + i;
offset = align_offset(offset, 8);
offset += sizeof(h);
offset += strlen(sn->id_str);
offset += strlen(sn->name);
}
snapshots_size = offset;
snapshots_offset = qcow2_alloc_clusters(bs, snapshots_size);
offset = snapshots_offset;
if (offset < 0) {
return offset;
}
for(i = 0; i < s->nb_snapshots; i++) {
sn = s->snapshots + i;
memset(&h, 0, sizeof(h));
h.l1_table_offset = cpu_to_be64(sn->l1_table_offset);
h.l1_size = cpu_to_be32(sn->l1_size);
h.vm_state_size = cpu_to_be32(sn->vm_state_size);
h.date_sec = cpu_to_be32(sn->date_sec);
h.date_nsec = cpu_to_be32(sn->date_nsec);
h.vm_clock_nsec = cpu_to_be64(sn->vm_clock_nsec);
id_str_size = strlen(sn->id_str);
name_size = strlen(sn->name);
h.id_str_size = cpu_to_be16(id_str_size);
h.name_size = cpu_to_be16(name_size);
offset = align_offset(offset, 8);
if (bdrv_pwrite(bs->file, offset, &h, sizeof(h)) != sizeof(h))
goto fail;
offset += sizeof(h);
if (bdrv_pwrite(bs->file, offset, sn->id_str, id_str_size) != id_str_size)
goto fail;
offset += id_str_size;
if (bdrv_pwrite(bs->file, offset, sn->name, name_size) != name_size)
goto fail;
offset += name_size;
}
/* update the various header fields */
data64 = cpu_to_be64(snapshots_offset);
if (bdrv_pwrite(bs->file, offsetof(QCowHeader, snapshots_offset),
&data64, sizeof(data64)) != sizeof(data64))
goto fail;
data32 = cpu_to_be32(s->nb_snapshots);
if (bdrv_pwrite(bs->file, offsetof(QCowHeader, nb_snapshots),
&data32, sizeof(data32)) != sizeof(data32))
goto fail;
/* free the old snapshot table */
qcow2_free_clusters(bs, s->snapshots_offset, s->snapshots_size);
s->snapshots_offset = snapshots_offset;
s->snapshots_size = snapshots_size;
return 0;
fail:
return -1;
}
| 25,892 |
qemu | 02ffa034fb747f09a4f5658ed64871dcee4aaca2 | 1 | void sigaction_invoke(struct sigaction *action,
struct qemu_signalfd_siginfo *info)
{
siginfo_t si = { 0 };
si.si_signo = info->ssi_signo;
si.si_errno = info->ssi_errno;
si.si_code = info->ssi_code;
/* Convert the minimal set of fields defined by POSIX.
* Positive si_code values are reserved for kernel-generated
* signals, where the valid siginfo fields are determined by
* the signal number. But according to POSIX, it is unspecified
* whether SI_USER and SI_QUEUE have values less than or equal to
* zero.
*/
if (info->ssi_code == SI_USER || info->ssi_code == SI_QUEUE ||
info->ssi_code <= 0) {
/* SIGTERM, etc. */
si.si_pid = info->ssi_pid;
si.si_uid = info->ssi_uid;
} else if (info->ssi_signo == SIGILL || info->ssi_signo == SIGFPE ||
info->ssi_signo == SIGSEGV || info->ssi_signo == SIGBUS) {
si.si_addr = (void *)(uintptr_t)info->ssi_addr;
} else if (info->ssi_signo == SIGCHLD) {
si.si_pid = info->ssi_pid;
si.si_status = info->ssi_status;
si.si_uid = info->ssi_uid;
}
action->sa_sigaction(info->ssi_signo, &si, NULL);
}
| 25,893 |
FFmpeg | 38d553322891c8e47182f05199d19888422167dc | 1 | static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
{
PixdescTestContext *priv = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFilterBufferRef *outpicref;
int i;
outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
outlink->w, outlink->h);
outpicref = outlink->out_buf;
avfilter_copy_buffer_ref_props(outpicref, picref);
for (i = 0; i < 4; i++) {
int h = outlink->h;
h = i == 1 || i == 2 ? h>>priv->pix_desc->log2_chroma_h : h;
if (outpicref->data[i]) {
uint8_t *data = outpicref->data[i] +
(outpicref->linesize[i] > 0 ? 0 : outpicref->linesize[i] * (h-1));
memset(data, 0, FFABS(outpicref->linesize[i]) * h);
}
}
/* copy palette */
if (priv->pix_desc->flags & PIX_FMT_PAL)
memcpy(outpicref->data[1], outpicref->data[1], 256*4);
avfilter_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0));
}
| 25,894 |
FFmpeg | fc49f22c3b735db5aaac5f98e40b7124a2be13b8 | 1 | void av_noreturn exit_program(int ret)
{
int i, j;
for (i = 0; i < nb_filtergraphs; i++) {
avfilter_graph_free(&filtergraphs[i]->graph);
for (j = 0; j < filtergraphs[i]->nb_inputs; j++)
av_freep(&filtergraphs[i]->inputs[j]);
av_freep(&filtergraphs[i]->inputs);
for (j = 0; j < filtergraphs[i]->nb_outputs; j++)
av_freep(&filtergraphs[i]->outputs[j]);
av_freep(&filtergraphs[i]->outputs);
av_freep(&filtergraphs[i]);
}
av_freep(&filtergraphs);
/* close files */
for (i = 0; i < nb_output_files; i++) {
AVFormatContext *s = output_files[i]->ctx;
if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
avio_close(s->pb);
avformat_free_context(s);
av_dict_free(&output_files[i]->opts);
av_freep(&output_files[i]);
}
for (i = 0; i < nb_output_streams; i++) {
AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
while (bsfc) {
AVBitStreamFilterContext *next = bsfc->next;
av_bitstream_filter_close(bsfc);
bsfc = next;
}
output_streams[i]->bitstream_filters = NULL;
if (output_streams[i]->output_frame) {
AVFrame *frame = output_streams[i]->output_frame;
if (frame->extended_data != frame->data)
av_freep(&frame->extended_data);
av_freep(&frame);
}
av_freep(&output_streams[i]->filtered_frame);
av_freep(&output_streams[i]);
}
for (i = 0; i < nb_input_files; i++) {
avformat_close_input(&input_files[i]->ctx);
av_freep(&input_files[i]);
}
for (i = 0; i < nb_input_streams; i++) {
av_freep(&input_streams[i]->decoded_frame);
av_dict_free(&input_streams[i]->opts);
free_buffer_pool(input_streams[i]);
av_freep(&input_streams[i]->filters);
av_freep(&input_streams[i]);
}
if (vstats_file)
fclose(vstats_file);
av_free(vstats_filename);
av_freep(&input_streams);
av_freep(&input_files);
av_freep(&output_streams);
av_freep(&output_files);
uninit_opts();
av_freep(&audio_buf);
allocated_audio_buf_size = 0;
av_freep(&async_buf);
allocated_async_buf_size = 0;
avfilter_uninit();
avformat_network_deinit();
if (received_sigterm) {
av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
(int) received_sigterm);
exit (255);
}
exit(ret); /* not all OS-es handle main() return value */
}
| 25,895 |
qemu | 7717f248eebdcfe6de400404d0cf65dcb3633308 | 1 | static void openrisc_pic_cpu_handler(void *opaque, int irq, int level)
{
OpenRISCCPU *cpu = (OpenRISCCPU *)opaque;
CPUState *cs = CPU(cpu);
uint32_t irq_bit = 1 << irq;
if (irq > 31 || irq < 0) {
return;
}
if (level) {
cpu->env.picsr |= irq_bit;
} else {
cpu->env.picsr &= ~irq_bit;
}
if (cpu->env.picsr & cpu->env.picmr) {
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
} else {
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
cpu->env.picsr = 0;
}
}
| 25,896 |
FFmpeg | 229843aa359ae0c9519977d7fa952688db63f559 | 0 | static int mov_read_aclr(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int ret = 0;
int length = 0;
uint64_t original_size;
if (c->fc->nb_streams >= 1) {
AVCodecContext *codec = c->fc->streams[c->fc->nb_streams-1]->codec;
if (codec->codec_id == AV_CODEC_ID_H264)
return 0;
if (atom.size == 16) {
original_size = codec->extradata_size;
ret = mov_realloc_extradata(codec, atom);
if (!ret) {
length = mov_read_atom_into_extradata(c, pb, atom, codec, codec->extradata + original_size);
if (length == atom.size) {
const uint8_t range_value = codec->extradata[original_size + 19];
switch (range_value) {
case 1:
codec->color_range = AVCOL_RANGE_MPEG;
break;
case 2:
codec->color_range = AVCOL_RANGE_JPEG;
break;
default:
av_log(c, AV_LOG_WARNING, "ignored unknown aclr value (%d)\n", range_value);
break;
}
av_dlog(c, "color_range: %d\n", codec->color_range);
} else {
/* For some reason the whole atom was not added to the extradata */
av_log(c, AV_LOG_ERROR, "aclr not decoded - incomplete atom\n");
}
} else {
av_log(c, AV_LOG_ERROR, "aclr not decoded - unable to add atom to extradata\n");
}
} else {
av_log(c, AV_LOG_WARNING, "aclr not decoded - unexpected size %"PRId64"\n", atom.size);
}
}
return ret;
}
| 25,897 |
FFmpeg | e9266a2be04ea505285e32e411ef6120e9cbeba4 | 0 | static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f )
{
int interior_limit, filter_level;
if (s->segmentation.enabled) {
filter_level = s->segmentation.filter_level[s->segment];
if (!s->segmentation.absolute_vals)
filter_level += s->filter.level;
} else
filter_level = s->filter.level;
if (s->lf_delta.enabled) {
filter_level += s->lf_delta.ref[mb->ref_frame];
filter_level += s->lf_delta.mode[mb->mode];
}
/* Like av_clip for inputs 0 and max, where max is equal to (2^n-1) */
#define POW2CLIP(x,max) (((x) & ~max) ? (-(x))>>31 & max : (x));
filter_level = POW2CLIP(filter_level, 63);
interior_limit = filter_level;
if (s->filter.sharpness) {
interior_limit >>= s->filter.sharpness > 4 ? 2 : 1;
interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
}
interior_limit = FFMAX(interior_limit, 1);
f->filter_level = filter_level;
f->inner_limit = interior_limit;
f->inner_filter = !mb->skip || mb->mode == MODE_I4x4 || mb->mode == VP8_MVMODE_SPLIT;
}
| 25,898 |
FFmpeg | 6e42e6c4b410dbef8b593c2d796a5dad95f89ee4 | 1 | void rgb15tobgr16(const uint8_t *src, uint8_t *dst, long src_size)
{
long i;
long num_pixels = src_size >> 1;
for(i=0; i<num_pixels; i++)
{
unsigned b,g,r;
register uint16_t rgb;
rgb = src[2*i];
r = rgb&0x1F;
g = (rgb&0x3E0)>>5;
b = (rgb&0x7C00)>>10;
dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11);
}
}
| 25,899 |
qemu | 90cbed4656108fec86d157ced39192e0774a6615 | 1 | static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
int opc)
{
int addr_reg, data_reg, arg0, arg1, arg2, mem_index, s_bits;
#if defined(CONFIG_SOFTMMU)
uint32_t *label1_ptr, *label2_ptr;
data_reg = *args++;
addr_reg = *args++;
mem_index = *args;
s_bits = opc & 3;
arg0 = TCG_REG_O0;
arg1 = TCG_REG_O1;
arg2 = TCG_REG_O2;
#if defined(CONFIG_SOFTMMU)
/* srl addr_reg, x, arg1 */
tcg_out_arithi(s, arg1, addr_reg, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
SHIFT_SRL);
tcg_out_arithi(s, arg0, addr_reg, TARGET_PAGE_MASK | ((1 << s_bits) - 1),
ARITH_AND);
/* and arg1, x, arg1 */
tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
/* add arg1, x, arg1 */
tcg_out_addi(s, arg1, offsetof(CPUState,
tlb_table[mem_index][0].addr_read));
/* add env, arg1, arg1 */
tcg_out_arith(s, arg1, TCG_AREG0, arg1, ARITH_ADD);
/* ld [arg1], arg2 */
tcg_out32(s, TARGET_LD_OP | INSN_RD(arg2) | INSN_RS1(arg1) |
INSN_RS2(TCG_REG_G0));
/* subcc arg0, arg2, %g0 */
tcg_out_arith(s, TCG_REG_G0, arg0, arg2, ARITH_SUBCC);
/* will become:
be label1 */
label1_ptr = (uint32_t *)s->code_ptr;
tcg_out32(s, 0);
/* mov (delay slot) */
tcg_out_mov(s, arg0, addr_reg);
/* mov */
tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
/* XXX: move that code at the end of the TB */
/* qemu_ld_helper[s_bits](arg0, arg1) */
tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
- (tcg_target_ulong)s->code_ptr) >> 2)
& 0x3fffffff));
/* Store AREG0 in stack to avoid ugly glibc bugs that mangle
global registers */
// delay slot
tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
TCG_TARGET_CALL_STACK_OFFSET - sizeof(long), HOST_ST_OP);
tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
TCG_TARGET_CALL_STACK_OFFSET - sizeof(long), HOST_LD_OP);
/* data_reg = sign_extend(arg0) */
switch(opc) {
case 0 | 4:
/* sll arg0, 24/56, data_reg */
tcg_out_arithi(s, data_reg, arg0, (int)sizeof(tcg_target_long) * 8 - 8,
HOST_SLL_OP);
/* sra data_reg, 24/56, data_reg */
tcg_out_arithi(s, data_reg, data_reg,
(int)sizeof(tcg_target_long) * 8 - 8, HOST_SRA_OP);
break;
case 1 | 4:
/* sll arg0, 16/48, data_reg */
tcg_out_arithi(s, data_reg, arg0,
(int)sizeof(tcg_target_long) * 8 - 16, HOST_SLL_OP);
/* sra data_reg, 16/48, data_reg */
tcg_out_arithi(s, data_reg, data_reg,
(int)sizeof(tcg_target_long) * 8 - 16, HOST_SRA_OP);
break;
case 2 | 4:
/* sll arg0, 32, data_reg */
tcg_out_arithi(s, data_reg, arg0, 32, HOST_SLL_OP);
/* sra data_reg, 32, data_reg */
tcg_out_arithi(s, data_reg, data_reg, 32, HOST_SRA_OP);
break;
case 0:
case 1:
case 2:
case 3:
default:
/* mov */
tcg_out_mov(s, data_reg, arg0);
break;
}
/* will become:
ba label2 */
label2_ptr = (uint32_t *)s->code_ptr;
tcg_out32(s, 0);
/* nop (delay slot */
tcg_out_nop(s);
/* label1: */
*label1_ptr = (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x2) |
INSN_OFF22((unsigned long)s->code_ptr -
(unsigned long)label1_ptr));
/* ld [arg1 + x], arg1 */
tcg_out_ldst(s, arg1, arg1, offsetof(CPUTLBEntry, addend) -
offsetof(CPUTLBEntry, addr_read), HOST_LD_OP);
/* add addr_reg, arg1, arg0 */
tcg_out_arith(s, arg0, addr_reg, arg1, ARITH_ADD);
arg0 = addr_reg;
switch(opc) {
case 0:
/* ldub [arg0], data_reg */
tcg_out_ldst(s, data_reg, arg0, 0, LDUB);
break;
case 0 | 4:
/* ldsb [arg0], data_reg */
tcg_out_ldst(s, data_reg, arg0, 0, LDSB);
break;
case 1:
#ifdef TARGET_WORDS_BIGENDIAN
/* lduh [arg0], data_reg */
tcg_out_ldst(s, data_reg, arg0, 0, LDUH);
/* lduha [arg0] ASI_PRIMARY_LITTLE, data_reg */
tcg_out_ldst_asi(s, data_reg, arg0, 0, LDUHA, ASI_PRIMARY_LITTLE);
break;
case 1 | 4:
#ifdef TARGET_WORDS_BIGENDIAN
/* ldsh [arg0], data_reg */
tcg_out_ldst(s, data_reg, arg0, 0, LDSH);
/* ldsha [arg0] ASI_PRIMARY_LITTLE, data_reg */
tcg_out_ldst_asi(s, data_reg, arg0, 0, LDSHA, ASI_PRIMARY_LITTLE);
break;
case 2:
#ifdef TARGET_WORDS_BIGENDIAN
/* lduw [arg0], data_reg */
tcg_out_ldst(s, data_reg, arg0, 0, LDUW);
/* lduwa [arg0] ASI_PRIMARY_LITTLE, data_reg */
tcg_out_ldst_asi(s, data_reg, arg0, 0, LDUWA, ASI_PRIMARY_LITTLE);
break;
case 2 | 4:
#ifdef TARGET_WORDS_BIGENDIAN
/* ldsw [arg0], data_reg */
tcg_out_ldst(s, data_reg, arg0, 0, LDSW);
/* ldswa [arg0] ASI_PRIMARY_LITTLE, data_reg */
tcg_out_ldst_asi(s, data_reg, arg0, 0, LDSWA, ASI_PRIMARY_LITTLE);
break;
case 3:
#ifdef TARGET_WORDS_BIGENDIAN
/* ldx [arg0], data_reg */
tcg_out_ldst(s, data_reg, arg0, 0, LDX);
/* ldxa [arg0] ASI_PRIMARY_LITTLE, data_reg */
tcg_out_ldst_asi(s, data_reg, arg0, 0, LDXA, ASI_PRIMARY_LITTLE);
break;
default:
tcg_abort();
}
#if defined(CONFIG_SOFTMMU)
/* label2: */
*label2_ptr = (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2) |
INSN_OFF22((unsigned long)s->code_ptr -
(unsigned long)label2_ptr));
} | 25,900 |
FFmpeg | 39bb30f6640fe1faf4bbc779a79786028febc95d | 1 | static int mxf_read_index_table_segment(MXFIndexTableSegment *segment, ByteIOContext *pb, int tag)
{
switch(tag) {
case 0x3F05: dprintf(NULL, "EditUnitByteCount %d\n", get_be32(pb)); break;
case 0x3F06: dprintf(NULL, "IndexSID %d\n", get_be32(pb)); break;
case 0x3F07: dprintf(NULL, "BodySID %d\n", get_be32(pb)); break;
case 0x3F0B: dprintf(NULL, "IndexEditRate %d/%d\n", get_be32(pb), get_be32(pb)); break;
case 0x3F0C: dprintf(NULL, "IndexStartPosition %lld\n", get_be64(pb)); break;
case 0x3F0D: dprintf(NULL, "IndexDuration %lld\n", get_be64(pb)); break;
}
return 0;
}
| 25,901 |
qemu | 3e305e4a4752f70c0b5c3cf5b43ec957881714f7 | 1 | static const char *vnc_auth_name(VncDisplay *vd) {
switch (vd->auth) {
case VNC_AUTH_INVALID:
return "invalid";
case VNC_AUTH_NONE:
return "none";
case VNC_AUTH_VNC:
return "vnc";
case VNC_AUTH_RA2:
return "ra2";
case VNC_AUTH_RA2NE:
return "ra2ne";
case VNC_AUTH_TIGHT:
return "tight";
case VNC_AUTH_ULTRA:
return "ultra";
case VNC_AUTH_TLS:
return "tls";
case VNC_AUTH_VENCRYPT:
#ifdef CONFIG_VNC_TLS
switch (vd->subauth) {
case VNC_AUTH_VENCRYPT_PLAIN:
return "vencrypt+plain";
case VNC_AUTH_VENCRYPT_TLSNONE:
return "vencrypt+tls+none";
case VNC_AUTH_VENCRYPT_TLSVNC:
return "vencrypt+tls+vnc";
case VNC_AUTH_VENCRYPT_TLSPLAIN:
return "vencrypt+tls+plain";
case VNC_AUTH_VENCRYPT_X509NONE:
return "vencrypt+x509+none";
case VNC_AUTH_VENCRYPT_X509VNC:
return "vencrypt+x509+vnc";
case VNC_AUTH_VENCRYPT_X509PLAIN:
return "vencrypt+x509+plain";
case VNC_AUTH_VENCRYPT_TLSSASL:
return "vencrypt+tls+sasl";
case VNC_AUTH_VENCRYPT_X509SASL:
return "vencrypt+x509+sasl";
default:
return "vencrypt";
}
#else
return "vencrypt";
#endif
case VNC_AUTH_SASL:
return "sasl";
}
return "unknown";
}
| 25,902 |
qemu | 787aaf5703a702094f395db6795e74230282cd62 | 1 | static void cpu_x86_register(X86CPU *cpu, const char *name, Error **errp)
{
CPUX86State *env = &cpu->env;
x86_def_t def1, *def = &def1;
memset(def, 0, sizeof(*def));
if (cpu_x86_find_by_name(cpu, def, name) < 0) {
error_setg(errp, "Unable to find CPU definition: %s", name);
return;
}
if (kvm_enabled()) {
def->features[FEAT_KVM] |= kvm_default_features;
}
def->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
object_property_set_str(OBJECT(cpu), def->vendor, "vendor", errp);
object_property_set_int(OBJECT(cpu), def->level, "level", errp);
object_property_set_int(OBJECT(cpu), def->family, "family", errp);
object_property_set_int(OBJECT(cpu), def->model, "model", errp);
object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
env->features[FEAT_1_EDX] = def->features[FEAT_1_EDX];
env->features[FEAT_1_ECX] = def->features[FEAT_1_ECX];
env->features[FEAT_8000_0001_EDX] = def->features[FEAT_8000_0001_EDX];
env->features[FEAT_8000_0001_ECX] = def->features[FEAT_8000_0001_ECX];
object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
env->features[FEAT_KVM] = def->features[FEAT_KVM];
env->features[FEAT_SVM] = def->features[FEAT_SVM];
env->features[FEAT_C000_0001_EDX] = def->features[FEAT_C000_0001_EDX];
env->features[FEAT_7_0_EBX] = def->features[FEAT_7_0_EBX];
env->cpuid_xlevel2 = def->xlevel2;
object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
} | 25,903 |
qemu | 71d0770c4cec9f1dc04f4dadcbf7fd6c335030a9 | 1 | int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
const void *buf1, int count1)
{
BlockDriver *drv = bs->drv;
if (!drv)
return -ENOMEDIUM;
if (!drv->bdrv_pwrite)
return bdrv_pwrite_em(bs, offset, buf1, count1);
return drv->bdrv_pwrite(bs, offset, buf1, count1);
} | 25,904 |
FFmpeg | 13a099799e89a76eb921ca452e1b04a7a28a9855 | 0 | static void yuv2nv12X_c(SwsContext *c, const int16_t *lumFilter,
const int16_t **lumSrc, int lumFilterSize,
const int16_t *chrFilter, const int16_t **chrUSrc,
const int16_t **chrVSrc, int chrFilterSize,
const int16_t **alpSrc, uint8_t *dest, uint8_t *uDest,
uint8_t *vDest, uint8_t *aDest,
int dstW, int chrDstW)
{
enum PixelFormat dstFormat = c->dstFormat;
//FIXME Optimize (just quickly written not optimized..)
int i;
for (i=0; i<dstW; i++) {
int val=1<<18;
int j;
for (j=0; j<lumFilterSize; j++)
val += lumSrc[j][i] * lumFilter[j];
dest[i]= av_clip_uint8(val>>19);
}
if (!uDest)
return;
if (dstFormat == PIX_FMT_NV12)
for (i=0; i<chrDstW; i++) {
int u=1<<18;
int v=1<<18;
int j;
for (j=0; j<chrFilterSize; j++) {
u += chrUSrc[j][i] * chrFilter[j];
v += chrVSrc[j][i] * chrFilter[j];
}
uDest[2*i]= av_clip_uint8(u>>19);
uDest[2*i+1]= av_clip_uint8(v>>19);
}
else
for (i=0; i<chrDstW; i++) {
int u=1<<18;
int v=1<<18;
int j;
for (j=0; j<chrFilterSize; j++) {
u += chrUSrc[j][i] * chrFilter[j];
v += chrVSrc[j][i] * chrFilter[j];
}
uDest[2*i]= av_clip_uint8(v>>19);
uDest[2*i+1]= av_clip_uint8(u>>19);
}
}
| 25,905 |
qemu | e92f0e1910f0655a0edd8d87c5a7262d36517a89 | 1 | static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
QEMUIOVector *qiov, CoroutineEntry co_entry,
BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
BlkAioEmAIOCB *acb;
Coroutine *co;
bdrv_inc_in_flight(blk_bs(blk));
acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
acb->rwco = (BlkRwCo) {
.blk = blk,
.offset = offset,
.qiov = qiov,
.flags = flags,
.ret = NOT_DONE,
};
acb->bytes = bytes;
acb->has_returned = false;
co = qemu_coroutine_create(co_entry, acb);
qemu_coroutine_enter(co);
acb->has_returned = true;
if (acb->rwco.ret != NOT_DONE) {
aio_bh_schedule_oneshot(blk_get_aio_context(blk),
blk_aio_complete_bh, acb);
}
return &acb->common;
}
| 25,906 |
FFmpeg | 9243ec4a508c81a621e941bb7e012e2d45d93659 | 1 | static int rv10_decode_packet(AVCodecContext *avctx,
const uint8_t *buf, int buf_size, int buf_size2)
{
MpegEncContext *s = avctx->priv_data;
int mb_count, mb_pos, left, start_mb_x;
init_get_bits(&s->gb, buf, buf_size*8);
if(s->codec_id ==CODEC_ID_RV10)
mb_count = rv10_decode_picture_header(s);
else
mb_count = rv20_decode_picture_header(s);
if (mb_count < 0) {
av_log(s->avctx, AV_LOG_ERROR, "HEADER ERROR\n");
return -1;
}
if (s->mb_x >= s->mb_width ||
s->mb_y >= s->mb_height) {
av_log(s->avctx, AV_LOG_ERROR, "POS ERROR %d %d\n", s->mb_x, s->mb_y);
return -1;
}
mb_pos = s->mb_y * s->mb_width + s->mb_x;
left = s->mb_width * s->mb_height - mb_pos;
if (mb_count > left) {
av_log(s->avctx, AV_LOG_ERROR, "COUNT ERROR\n");
return -1;
}
if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
if(s->current_picture_ptr){ //FIXME write parser so we always have complete frames?
ff_er_frame_end(s);
ff_MPV_frame_end(s);
s->mb_x= s->mb_y = s->resync_mb_x = s->resync_mb_y= 0;
}
if(ff_MPV_frame_start(s, avctx) < 0)
return -1;
ff_er_frame_start(s);
} else {
if (s->current_picture_ptr->f.pict_type != s->pict_type) {
av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
return -1;
}
}
av_dlog(avctx, "qscale=%d\n", s->qscale);
/* default quantization values */
if(s->codec_id== CODEC_ID_RV10){
if(s->mb_y==0) s->first_slice_line=1;
}else{
s->first_slice_line=1;
s->resync_mb_x= s->mb_x;
}
start_mb_x= s->mb_x;
s->resync_mb_y= s->mb_y;
if(s->h263_aic){
s->y_dc_scale_table=
s->c_dc_scale_table= ff_aic_dc_scale_table;
}else{
s->y_dc_scale_table=
s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
}
if(s->modified_quant)
s->chroma_qscale_table= ff_h263_chroma_qscale_table;
ff_set_qscale(s, s->qscale);
s->rv10_first_dc_coded[0] = 0;
s->rv10_first_dc_coded[1] = 0;
s->rv10_first_dc_coded[2] = 0;
s->block_wrap[0]=
s->block_wrap[1]=
s->block_wrap[2]=
s->block_wrap[3]= s->b8_stride;
s->block_wrap[4]=
s->block_wrap[5]= s->mb_stride;
ff_init_block_index(s);
/* decode each macroblock */
for(s->mb_num_left= mb_count; s->mb_num_left>0; s->mb_num_left--) {
int ret;
ff_update_block_index(s);
av_dlog(avctx, "**mb x=%d y=%d\n", s->mb_x, s->mb_y);
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
ret=ff_h263_decode_mb(s, s->block);
if (ret != SLICE_ERROR && s->gb.size_in_bits < get_bits_count(&s->gb) && 8*buf_size2 >= get_bits_count(&s->gb)){
av_log(avctx, AV_LOG_DEBUG, "update size from %d to %d\n", s->gb.size_in_bits, 8*buf_size2);
s->gb.size_in_bits= 8*buf_size2;
ret= SLICE_OK;
}
if (ret == SLICE_ERROR || s->gb.size_in_bits < get_bits_count(&s->gb)) {
av_log(s->avctx, AV_LOG_ERROR, "ERROR at MB %d %d\n", s->mb_x, s->mb_y);
return -1;
}
if(s->pict_type != AV_PICTURE_TYPE_B)
ff_h263_update_motion_val(s);
ff_MPV_decode_mb(s, s->block);
if(s->loop_filter)
ff_h263_loop_filter(s);
if (++s->mb_x == s->mb_width) {
s->mb_x = 0;
s->mb_y++;
ff_init_block_index(s);
}
if(s->mb_x == s->resync_mb_x)
s->first_slice_line=0;
if(ret == SLICE_END) break;
}
ff_er_add_slice(s, start_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
return s->gb.size_in_bits;
}
| 25,907 |
qemu | b544c1aba8681c2fe5d6715fbd37cf6caf1bc7bb | 1 | static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
struct iovec *iov, int niov, bool create,
enum AIOCBState aiocb_type)
{
int nr_copies = s->inode.nr_copies;
SheepdogObjReq hdr;
unsigned int wlen = 0;
int ret;
uint64_t oid = aio_req->oid;
unsigned int datalen = aio_req->data_len;
uint64_t offset = aio_req->offset;
uint8_t flags = aio_req->flags;
uint64_t old_oid = aio_req->base_oid;
if (!nr_copies) {
error_report("bug");
}
memset(&hdr, 0, sizeof(hdr));
switch (aiocb_type) {
case AIOCB_FLUSH_CACHE:
hdr.opcode = SD_OP_FLUSH_VDI;
break;
case AIOCB_READ_UDATA:
hdr.opcode = SD_OP_READ_OBJ;
hdr.flags = flags;
break;
case AIOCB_WRITE_UDATA:
if (create) {
hdr.opcode = SD_OP_CREATE_AND_WRITE_OBJ;
} else {
hdr.opcode = SD_OP_WRITE_OBJ;
}
wlen = datalen;
hdr.flags = SD_FLAG_CMD_WRITE | flags;
break;
case AIOCB_DISCARD_OBJ:
hdr.opcode = SD_OP_DISCARD_OBJ;
break;
}
if (s->cache_flags) {
hdr.flags |= s->cache_flags;
}
hdr.oid = oid;
hdr.cow_oid = old_oid;
hdr.copies = s->inode.nr_copies;
hdr.data_length = datalen;
hdr.offset = offset;
hdr.id = aio_req->id;
qemu_co_mutex_lock(&s->lock);
s->co_send = qemu_coroutine_self();
aio_set_fd_handler(s->aio_context, s->fd,
co_read_response, co_write_request, s);
socket_set_cork(s->fd, 1);
/* send a header */
ret = qemu_co_send(s->fd, &hdr, sizeof(hdr));
if (ret != sizeof(hdr)) {
error_report("failed to send a req, %s", strerror(errno));
goto out;
}
if (wlen) {
ret = qemu_co_sendv(s->fd, iov, niov, aio_req->iov_offset, wlen);
if (ret != wlen) {
error_report("failed to send a data, %s", strerror(errno));
}
}
out:
socket_set_cork(s->fd, 0);
aio_set_fd_handler(s->aio_context, s->fd, co_read_response, NULL, s);
s->co_send = NULL;
qemu_co_mutex_unlock(&s->lock);
}
| 25,908 |
qemu | 9633fcc6a02f23e3ef00aa5fe3fe9c41f57c3456 | 1 | static void init_proc_750gx (CPUPPCState *env)
{
gen_spr_ne_601(env);
gen_spr_7xx(env);
/* XXX : not implemented (XXX: different from 750fx) */
spr_register(env, SPR_L2CR, "L2CR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, NULL,
0x00000000);
/* Time base */
gen_tbl(env);
/* Thermal management */
gen_spr_thrm(env);
/* XXX : not implemented */
spr_register(env, SPR_750_THRM4, "THRM4",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Hardware implementation registers */
/* XXX : not implemented (XXX: different from 750fx) */
spr_register(env, SPR_HID0, "HID0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* XXX : not implemented */
spr_register(env, SPR_HID1, "HID1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* XXX : not implemented (XXX: different from 750fx) */
spr_register(env, SPR_750FX_HID2, "HID2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Memory management */
gen_low_BATs(env);
/* PowerPC 750fx & 750gx has 8 DBATs and 8 IBATs */
gen_high_BATs(env);
init_excp_7x0(env);
env->dcache_line_size = 32;
env->icache_line_size = 32;
/* Allocate hardware IRQ controller */
ppc6xx_irq_init(env);
}
| 25,909 |
qemu | f56b9bc3ae20fc93815b34aa022be919941406ce | 1 | static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVCloopState *s = bs->opaque;
uint32_t offsets_size, max_compressed_block_size = 1, i;
int ret;
bs->read_only = 1;
/* read header */
ret = bdrv_pread(bs->file, 128, &s->block_size, 4);
if (ret < 0) {
return ret;
}
s->block_size = be32_to_cpu(s->block_size);
if (s->block_size % 512) {
error_setg(errp, "block_size %u must be a multiple of 512",
s->block_size);
return -EINVAL;
}
if (s->block_size == 0) {
error_setg(errp, "block_size cannot be zero");
return -EINVAL;
}
/* cloop's create_compressed_fs.c warns about block sizes beyond 256 KB but
* we can accept more. Prevent ridiculous values like 4 GB - 1 since we
* need a buffer this big.
*/
if (s->block_size > MAX_BLOCK_SIZE) {
error_setg(errp, "block_size %u must be %u MB or less",
s->block_size,
MAX_BLOCK_SIZE / (1024 * 1024));
return -EINVAL;
}
ret = bdrv_pread(bs->file, 128 + 4, &s->n_blocks, 4);
if (ret < 0) {
return ret;
}
s->n_blocks = be32_to_cpu(s->n_blocks);
/* read offsets */
if (s->n_blocks > UINT32_MAX / sizeof(uint64_t)) {
/* Prevent integer overflow */
error_setg(errp, "n_blocks %u must be %zu or less",
s->n_blocks,
UINT32_MAX / sizeof(uint64_t));
return -EINVAL;
}
offsets_size = s->n_blocks * sizeof(uint64_t);
if (offsets_size > 512 * 1024 * 1024) {
/* Prevent ridiculous offsets_size which causes memory allocation to
* fail or overflows bdrv_pread() size. In practice the 512 MB
* offsets[] limit supports 16 TB images at 256 KB block size.
*/
error_setg(errp, "image requires too many offsets, "
"try increasing block size");
return -EINVAL;
}
s->offsets = g_malloc(offsets_size);
ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size);
if (ret < 0) {
goto fail;
}
for(i=0;i<s->n_blocks;i++) {
s->offsets[i] = be64_to_cpu(s->offsets[i]);
if (i > 0) {
uint32_t size = s->offsets[i] - s->offsets[i - 1];
if (size > max_compressed_block_size) {
max_compressed_block_size = size;
}
}
}
/* initialize zlib engine */
s->compressed_block = g_malloc(max_compressed_block_size + 1);
s->uncompressed_block = g_malloc(s->block_size);
if (inflateInit(&s->zstream) != Z_OK) {
ret = -EINVAL;
goto fail;
}
s->current_block = s->n_blocks;
s->sectors_per_block = s->block_size/512;
bs->total_sectors = s->n_blocks * s->sectors_per_block;
qemu_co_mutex_init(&s->lock);
return 0;
fail:
g_free(s->offsets);
g_free(s->compressed_block);
g_free(s->uncompressed_block);
return ret;
}
| 25,910 |
qemu | a0fcac9c21dcbf481eeb5573a738f55023f5a953 | 1 | static void spapr_tce_table_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &vmstate_spapr_tce_table;
dc->init = spapr_tce_table_realize;
dc->reset = spapr_tce_reset;
QLIST_INIT(&spapr_tce_tables);
/* hcall-tce */
spapr_register_hypercall(H_PUT_TCE, h_put_tce);
} | 25,911 |
FFmpeg | 6179dc8aa7e5fc5358b9614306f93f1adadf22a4 | 1 | static void gmc1_motion(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
uint8_t **ref_picture)
{
uint8_t *ptr;
int src_x, src_y, motion_x, motion_y;
ptrdiff_t offset, linesize, uvlinesize;
int emu = 0;
motion_x = s->sprite_offset[0][0];
motion_y = s->sprite_offset[0][1];
src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy + 1));
src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy + 1));
motion_x <<= (3 - s->sprite_warping_accuracy);
motion_y <<= (3 - s->sprite_warping_accuracy);
src_x = av_clip(src_x, -16, s->width);
if (src_x == s->width)
motion_x = 0;
src_y = av_clip(src_y, -16, s->height);
if (src_y == s->height)
motion_y = 0;
linesize = s->linesize;
uvlinesize = s->uvlinesize;
ptr = ref_picture[0] + src_y * linesize + src_x;
if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) ||
(unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) {
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
linesize, linesize,
17, 17,
src_x, src_y,
s->h_edge_pos, s->v_edge_pos);
ptr = s->sc.edge_emu_buffer;
}
if ((motion_x | motion_y) & 7) {
s->mdsp.gmc1(dest_y, ptr, linesize, 16,
motion_x & 15, motion_y & 15, 128 - s->no_rounding);
s->mdsp.gmc1(dest_y + 8, ptr + 8, linesize, 16,
motion_x & 15, motion_y & 15, 128 - s->no_rounding);
} else {
int dxy;
dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2);
if (s->no_rounding) {
s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
} else {
s->hdsp.put_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
}
}
if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
return;
motion_x = s->sprite_offset[1][0];
motion_y = s->sprite_offset[1][1];
src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy + 1));
src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy + 1));
motion_x <<= (3 - s->sprite_warping_accuracy);
motion_y <<= (3 - s->sprite_warping_accuracy);
src_x = av_clip(src_x, -8, s->width >> 1);
if (src_x == s->width >> 1)
motion_x = 0;
src_y = av_clip(src_y, -8, s->height >> 1);
if (src_y == s->height >> 1)
motion_y = 0;
offset = (src_y * uvlinesize) + src_x;
ptr = ref_picture[1] + offset;
if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) ||
(unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) {
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
uvlinesize, uvlinesize,
9, 9,
src_x, src_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
ptr = s->sc.edge_emu_buffer;
emu = 1;
}
s->mdsp.gmc1(dest_cb, ptr, uvlinesize, 8,
motion_x & 15, motion_y & 15, 128 - s->no_rounding);
ptr = ref_picture[2] + offset;
if (emu) {
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
uvlinesize, uvlinesize,
9, 9,
src_x, src_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
ptr = s->sc.edge_emu_buffer;
}
s->mdsp.gmc1(dest_cr, ptr, uvlinesize, 8,
motion_x & 15, motion_y & 15, 128 - s->no_rounding);
}
| 25,912 |
FFmpeg | 0e15b7b0dde44130069739bfb98c29e74c72be86 | 0 | static void gxf_write_padding(ByteIOContext *pb, offset_t to_pad)
{
while (to_pad--) {
put_byte(pb, 0);
}
}
| 25,914 |
FFmpeg | 3941df546276b190cc9362fd093e6721e8e52f50 | 0 | static int aea_read_header(AVFormatContext *s)
{
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
/* Parse the amount of channels and skip to pos 2048(0x800) */
avio_skip(s->pb, 264);
st->codec->channels = avio_r8(s->pb);
avio_skip(s->pb, 1783);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_ATRAC1;
st->codec->sample_rate = 44100;
st->codec->bit_rate = 292000;
if (st->codec->channels != 1 && st->codec->channels != 2) {
av_log(s,AV_LOG_ERROR,"Channels %d not supported!\n",st->codec->channels);
return -1;
}
st->codec->channel_layout = (st->codec->channels == 1) ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
st->codec->block_align = AT1_SU_SIZE * st->codec->channels;
return 0;
}
| 25,915 |
qemu | e1833e1f96456fd8fc17463246fe0b2050e68efb | 0 | static void spr_write_ibatl (void *opaque, int sprn)
{
DisasContext *ctx = opaque;
gen_op_store_ibatl((sprn - SPR_IBAT0L) / 2);
RET_STOP(ctx);
}
| 25,916 |
qemu | c52ab08aee6f7d4717fc6b517174043126bd302f | 0 | static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
{
gen_update_cc_op(s);
/* If several instructions disable interrupts, only the first does it. */
if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
} else {
gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
}
if (s->tb->flags & HF_RF_MASK) {
gen_helper_reset_rf(cpu_env);
}
if (s->singlestep_enabled) {
gen_helper_debug(cpu_env);
} else if (s->tf) {
gen_helper_single_step(cpu_env);
} else {
tcg_gen_exit_tb(0);
}
s->is_jmp = DISAS_TB_JUMP;
}
| 25,917 |
qemu | dcfd14b3741983c466ad92fa2ae91eeafce3e5d5 | 0 | void tb_invalidate_page_range(target_ulong start, target_ulong end)
{
/* XXX: cannot enable it yet because it yields to MMU exception
where NIP != read address on PowerPC */
#if 0
target_ulong phys_addr;
phys_addr = get_phys_addr_code(env, start);
tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
#endif
}
| 25,918 |
qemu | 70a0c19e83aa4c71c879d51e426e89e4b3d4e014 | 0 | static bool kvmppc_is_pr(KVMState *ks)
{
/* Assume KVM-PR if the GET_PVINFO capability is available */
return kvm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
}
| 25,919 |
qemu | cd9ba1ebcf0439457f22b75b38533f6634f23c5f | 0 | void aio_set_fd_handler(AioContext *ctx,
int fd,
IOHandler *io_read,
IOHandler *io_write,
AioFlushHandler *io_flush,
void *opaque)
{
AioHandler *node;
node = find_aio_handler(ctx, fd);
/* Are we deleting the fd handler? */
if (!io_read && !io_write) {
if (node) {
/* If the lock is held, just mark the node as deleted */
if (ctx->walking_handlers)
node->deleted = 1;
else {
/* Otherwise, delete it for real. We can't just mark it as
* deleted because deleted nodes are only cleaned up after
* releasing the walking_handlers lock.
*/
QLIST_REMOVE(node, node);
g_free(node);
}
}
} else {
if (node == NULL) {
/* Alloc and insert if it's not already there */
node = g_malloc0(sizeof(AioHandler));
node->fd = fd;
QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
}
/* Update handler with latest information */
node->io_read = io_read;
node->io_write = io_write;
node->io_flush = io_flush;
node->opaque = opaque;
}
}
| 25,920 |
qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | 0 | static uint64_t gic_do_cpu_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
GICState **backref = (GICState **)opaque;
GICState *s = *backref;
int id = (backref - s->backref);
return gic_cpu_read(s, id, addr);
}
| 25,922 |
qemu | 6b98bd649520d07df4d1b7a0a54ac73bf178519c | 0 | void bdrv_io_unplug(BlockDriverState *bs)
{
BlockDriver *drv = bs->drv;
if (drv && drv->bdrv_io_unplug) {
drv->bdrv_io_unplug(bs);
} else if (bs->file) {
bdrv_io_unplug(bs->file->bs);
}
}
| 25,923 |
qemu | 9bc3a3a216e2689bfcdd36c3e079333bbdbf3ba0 | 0 | static void ehci_advance_async_state(EHCIState *ehci)
{
const int async = 1;
switch(ehci_get_state(ehci, async)) {
case EST_INACTIVE:
if (!ehci_async_enabled(ehci)) {
break;
}
ehci_set_state(ehci, async, EST_ACTIVE);
// No break, fall through to ACTIVE
case EST_ACTIVE:
if (!ehci_async_enabled(ehci)) {
ehci_queues_rip_all(ehci, async);
ehci_set_state(ehci, async, EST_INACTIVE);
break;
}
/* make sure guest has acknowledged the doorbell interrupt */
/* TO-DO: is this really needed? */
if (ehci->usbsts & USBSTS_IAA) {
DPRINTF("IAA status bit still set.\n");
break;
}
/* check that address register has been set */
if (ehci->asynclistaddr == 0) {
break;
}
ehci_set_state(ehci, async, EST_WAITLISTHEAD);
ehci_advance_state(ehci, async);
/* If the doorbell is set, the guest wants to make a change to the
* schedule. The host controller needs to release cached data.
* (section 4.8.2)
*/
if (ehci->usbcmd & USBCMD_IAAD) {
/* Remove all unseen qhs from the async qhs queue */
ehci_queues_rip_unused(ehci, async, 1);
DPRINTF("ASYNC: doorbell request acknowledged\n");
ehci->usbcmd &= ~USBCMD_IAAD;
ehci_set_interrupt(ehci, USBSTS_IAA);
}
break;
default:
/* this should only be due to a developer mistake */
fprintf(stderr, "ehci: Bad asynchronous state %d. "
"Resetting to active\n", ehci->astate);
assert(0);
}
}
| 25,924 |
FFmpeg | 43abef9fde0cf87153cc9031cad61f75b02cfa01 | 0 | static int rpl_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
AVIOContext *pb = s->pb;
RPLContext *rpl = s->priv_data;
AVStream *vst = NULL, *ast = NULL;
int total_audio_size;
int error = 0;
uint32_t i;
int32_t audio_format, chunk_catalog_offset, number_of_chunks;
AVRational fps;
char line[RPL_LINE_LENGTH];
// The header for RPL/ARMovie files is 21 lines of text
// containing the various header fields. The fields are always
// in the same order, and other text besides the first
// number usually isn't important.
// (The spec says that there exists some significance
// for the text in a few cases; samples needed.)
error |= read_line(pb, line, sizeof(line)); // ARMovie
error |= read_line(pb, line, sizeof(line)); // movie name
av_dict_set(&s->metadata, "title" , line, 0);
error |= read_line(pb, line, sizeof(line)); // date/copyright
av_dict_set(&s->metadata, "copyright", line, 0);
error |= read_line(pb, line, sizeof(line)); // author and other
av_dict_set(&s->metadata, "author" , line, 0);
// video headers
vst = avformat_new_stream(s, NULL);
if (!vst)
return AVERROR(ENOMEM);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_tag = read_line_and_int(pb, &error); // video format
vst->codec->width = read_line_and_int(pb, &error); // video width
vst->codec->height = read_line_and_int(pb, &error); // video height
vst->codec->bits_per_coded_sample = read_line_and_int(pb, &error); // video bits per sample
error |= read_line(pb, line, sizeof(line)); // video frames per second
fps = read_fps(line, &error);
avpriv_set_pts_info(vst, 32, fps.den, fps.num);
// Figure out the video codec
switch (vst->codec->codec_tag) {
#if 0
case 122:
vst->codec->codec_id = CODEC_ID_ESCAPE122;
break;
#endif
case 124:
vst->codec->codec_id = CODEC_ID_ESCAPE124;
// The header is wrong here, at least sometimes
vst->codec->bits_per_coded_sample = 16;
break;
case 130:
vst->codec->codec_id = CODEC_ID_ESCAPE130;
break;
default:
av_log(s, AV_LOG_WARNING,
"RPL video format %i not supported yet!\n",
vst->codec->codec_tag);
vst->codec->codec_id = CODEC_ID_NONE;
}
// Audio headers
// ARMovie supports multiple audio tracks; I don't have any
// samples, though. This code will ignore additional tracks.
audio_format = read_line_and_int(pb, &error); // audio format ID
if (audio_format) {
ast = avformat_new_stream(s, NULL);
if (!ast)
return AVERROR(ENOMEM);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_tag = audio_format;
ast->codec->sample_rate = read_line_and_int(pb, &error); // audio bitrate
ast->codec->channels = read_line_and_int(pb, &error); // number of audio channels
ast->codec->bits_per_coded_sample = read_line_and_int(pb, &error); // audio bits per sample
// At least one sample uses 0 for ADPCM, which is really 4 bits
// per sample.
if (ast->codec->bits_per_coded_sample == 0)
ast->codec->bits_per_coded_sample = 4;
ast->codec->bit_rate = ast->codec->sample_rate *
ast->codec->bits_per_coded_sample *
ast->codec->channels;
ast->codec->codec_id = CODEC_ID_NONE;
switch (audio_format) {
case 1:
if (ast->codec->bits_per_coded_sample == 16) {
// 16-bit audio is always signed
ast->codec->codec_id = CODEC_ID_PCM_S16LE;
break;
}
// There are some other formats listed as legal per the spec;
// samples needed.
break;
case 101:
if (ast->codec->bits_per_coded_sample == 8) {
// The samples with this kind of audio that I have
// are all unsigned.
ast->codec->codec_id = CODEC_ID_PCM_U8;
break;
} else if (ast->codec->bits_per_coded_sample == 4) {
ast->codec->codec_id = CODEC_ID_ADPCM_IMA_EA_SEAD;
break;
}
break;
}
if (ast->codec->codec_id == CODEC_ID_NONE) {
av_log(s, AV_LOG_WARNING,
"RPL audio format %i not supported yet!\n",
audio_format);
}
avpriv_set_pts_info(ast, 32, 1, ast->codec->bit_rate);
} else {
for (i = 0; i < 3; i++)
error |= read_line(pb, line, sizeof(line));
}
rpl->frames_per_chunk = read_line_and_int(pb, &error); // video frames per chunk
if (rpl->frames_per_chunk > 1 && vst->codec->codec_tag != 124)
av_log(s, AV_LOG_WARNING,
"Don't know how to split frames for video format %i. "
"Video stream will be broken!\n", vst->codec->codec_tag);
number_of_chunks = read_line_and_int(pb, &error); // number of chunks in the file
// The number in the header is actually the index of the last chunk.
number_of_chunks++;
error |= read_line(pb, line, sizeof(line)); // "even" chunk size in bytes
error |= read_line(pb, line, sizeof(line)); // "odd" chunk size in bytes
chunk_catalog_offset = // offset of the "chunk catalog"
read_line_and_int(pb, &error); // (file index)
error |= read_line(pb, line, sizeof(line)); // offset to "helpful" sprite
error |= read_line(pb, line, sizeof(line)); // size of "helpful" sprite
error |= read_line(pb, line, sizeof(line)); // offset to key frame list
// Read the index
avio_seek(pb, chunk_catalog_offset, SEEK_SET);
total_audio_size = 0;
for (i = 0; i < number_of_chunks; i++) {
int64_t offset, video_size, audio_size;
error |= read_line(pb, line, sizeof(line));
if (3 != sscanf(line, "%"PRId64" , %"PRId64" ; %"PRId64,
&offset, &video_size, &audio_size))
error = -1;
av_add_index_entry(vst, offset, i * rpl->frames_per_chunk,
video_size, rpl->frames_per_chunk, 0);
if (ast)
av_add_index_entry(ast, offset + video_size, total_audio_size,
audio_size, audio_size * 8, 0);
total_audio_size += audio_size * 8;
}
if (error) return AVERROR(EIO);
return 0;
}
| 25,926 |
qemu | f17fd4fdf0df3d2f3444399d04c38d22b9a3e1b7 | 0 | static QDict *monitor_parse_arguments(Monitor *mon,
const char **endp,
const mon_cmd_t *cmd)
{
const char *typestr;
char *key;
int c;
const char *p = *endp;
char buf[1024];
QDict *qdict = qdict_new();
/* parse the parameters */
typestr = cmd->args_type;
for(;;) {
typestr = key_get_info(typestr, &key);
if (!typestr)
break;
c = *typestr;
typestr++;
switch(c) {
case 'F':
case 'B':
case 's':
{
int ret;
while (qemu_isspace(*p))
p++;
if (*typestr == '?') {
typestr++;
if (*p == '\0') {
/* no optional string: NULL argument */
break;
}
}
ret = get_str(buf, sizeof(buf), &p);
if (ret < 0) {
switch(c) {
case 'F':
monitor_printf(mon, "%s: filename expected\n",
cmd->name);
break;
case 'B':
monitor_printf(mon, "%s: block device name expected\n",
cmd->name);
break;
default:
monitor_printf(mon, "%s: string expected\n", cmd->name);
break;
}
goto fail;
}
qdict_put(qdict, key, qstring_from_str(buf));
}
break;
case 'O':
{
QemuOptsList *opts_list;
QemuOpts *opts;
opts_list = qemu_find_opts(key);
if (!opts_list || opts_list->desc->name) {
goto bad_type;
}
while (qemu_isspace(*p)) {
p++;
}
if (!*p)
break;
if (get_str(buf, sizeof(buf), &p) < 0) {
goto fail;
}
opts = qemu_opts_parse_noisily(opts_list, buf, true);
if (!opts) {
goto fail;
}
qemu_opts_to_qdict(opts, qdict);
qemu_opts_del(opts);
}
break;
case '/':
{
int count, format, size;
while (qemu_isspace(*p))
p++;
if (*p == '/') {
/* format found */
p++;
count = 1;
if (qemu_isdigit(*p)) {
count = 0;
while (qemu_isdigit(*p)) {
count = count * 10 + (*p - '0');
p++;
}
}
size = -1;
format = -1;
for(;;) {
switch(*p) {
case 'o':
case 'd':
case 'u':
case 'x':
case 'i':
case 'c':
format = *p++;
break;
case 'b':
size = 1;
p++;
break;
case 'h':
size = 2;
p++;
break;
case 'w':
size = 4;
p++;
break;
case 'g':
case 'L':
size = 8;
p++;
break;
default:
goto next;
}
}
next:
if (*p != '\0' && !qemu_isspace(*p)) {
monitor_printf(mon, "invalid char in format: '%c'\n",
*p);
goto fail;
}
if (format < 0)
format = default_fmt_format;
if (format != 'i') {
/* for 'i', not specifying a size gives -1 as size */
if (size < 0)
size = default_fmt_size;
default_fmt_size = size;
}
default_fmt_format = format;
} else {
count = 1;
format = default_fmt_format;
if (format != 'i') {
size = default_fmt_size;
} else {
size = -1;
}
}
qdict_put(qdict, "count", qint_from_int(count));
qdict_put(qdict, "format", qint_from_int(format));
qdict_put(qdict, "size", qint_from_int(size));
}
break;
case 'i':
case 'l':
case 'M':
{
int64_t val;
while (qemu_isspace(*p))
p++;
if (*typestr == '?' || *typestr == '.') {
if (*typestr == '?') {
if (*p == '\0') {
typestr++;
break;
}
} else {
if (*p == '.') {
p++;
while (qemu_isspace(*p))
p++;
} else {
typestr++;
break;
}
}
typestr++;
}
if (get_expr(mon, &val, &p))
goto fail;
/* Check if 'i' is greater than 32-bit */
if ((c == 'i') && ((val >> 32) & 0xffffffff)) {
monitor_printf(mon, "\'%s\' has failed: ", cmd->name);
monitor_printf(mon, "integer is for 32-bit values\n");
goto fail;
} else if (c == 'M') {
if (val < 0) {
monitor_printf(mon, "enter a positive value\n");
goto fail;
}
val <<= 20;
}
qdict_put(qdict, key, qint_from_int(val));
}
break;
case 'o':
{
int64_t val;
char *end;
while (qemu_isspace(*p)) {
p++;
}
if (*typestr == '?') {
typestr++;
if (*p == '\0') {
break;
}
}
val = qemu_strtosz_MiB(p, &end);
if (val < 0) {
monitor_printf(mon, "invalid size\n");
goto fail;
}
qdict_put(qdict, key, qint_from_int(val));
p = end;
}
break;
case 'T':
{
double val;
while (qemu_isspace(*p))
p++;
if (*typestr == '?') {
typestr++;
if (*p == '\0') {
break;
}
}
if (get_double(mon, &val, &p) < 0) {
goto fail;
}
if (p[0] && p[1] == 's') {
switch (*p) {
case 'm':
val /= 1e3; p += 2; break;
case 'u':
val /= 1e6; p += 2; break;
case 'n':
val /= 1e9; p += 2; break;
}
}
if (*p && !qemu_isspace(*p)) {
monitor_printf(mon, "Unknown unit suffix\n");
goto fail;
}
qdict_put(qdict, key, qfloat_from_double(val));
}
break;
case 'b':
{
const char *beg;
bool val;
while (qemu_isspace(*p)) {
p++;
}
beg = p;
while (qemu_isgraph(*p)) {
p++;
}
if (p - beg == 2 && !memcmp(beg, "on", p - beg)) {
val = true;
} else if (p - beg == 3 && !memcmp(beg, "off", p - beg)) {
val = false;
} else {
monitor_printf(mon, "Expected 'on' or 'off'\n");
goto fail;
}
qdict_put(qdict, key, qbool_from_bool(val));
}
break;
case '-':
{
const char *tmp = p;
int skip_key = 0;
/* option */
c = *typestr++;
if (c == '\0')
goto bad_type;
while (qemu_isspace(*p))
p++;
if (*p == '-') {
p++;
if(c != *p) {
if(!is_valid_option(p, typestr)) {
monitor_printf(mon, "%s: unsupported option -%c\n",
cmd->name, *p);
goto fail;
} else {
skip_key = 1;
}
}
if(skip_key) {
p = tmp;
} else {
/* has option */
p++;
qdict_put(qdict, key, qbool_from_bool(true));
}
}
}
break;
case 'S':
{
/* package all remaining string */
int len;
while (qemu_isspace(*p)) {
p++;
}
if (*typestr == '?') {
typestr++;
if (*p == '\0') {
/* no remaining string: NULL argument */
break;
}
}
len = strlen(p);
if (len <= 0) {
monitor_printf(mon, "%s: string expected\n",
cmd->name);
goto fail;
}
qdict_put(qdict, key, qstring_from_str(p));
p += len;
}
break;
default:
bad_type:
monitor_printf(mon, "%s: unknown type '%c'\n", cmd->name, c);
goto fail;
}
g_free(key);
key = NULL;
}
/* check that all arguments were parsed */
while (qemu_isspace(*p))
p++;
if (*p != '\0') {
monitor_printf(mon, "%s: extraneous characters at the end of line\n",
cmd->name);
goto fail;
}
return qdict;
fail:
QDECREF(qdict);
g_free(key);
return NULL;
}
| 25,927 |
qemu | b3db211f3c80bb996a704d665fe275619f728bd4 | 0 | static void visitor_input_teardown(TestInputVisitorData *data,
const void *unused)
{
qobject_decref(data->obj);
data->obj = NULL;
if (data->qiv) {
visit_free(data->qiv);
data->qiv = NULL;
}
}
| 25,928 |
qemu | e5cb7e7677010f529d3f0f9dcdb385dea9446f8d | 0 | MigrationState *migrate_get_current(void)
{
static bool once;
static MigrationState current_migration = {
.state = MIGRATION_STATUS_NONE,
.xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
.mbps = -1,
.parameters = {
.compress_level = DEFAULT_MIGRATE_COMPRESS_LEVEL,
.compress_threads = DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
.decompress_threads = DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
.cpu_throttle_initial = DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL,
.cpu_throttle_increment = DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT,
.max_bandwidth = MAX_THROTTLE,
.downtime_limit = DEFAULT_MIGRATE_SET_DOWNTIME,
.x_checkpoint_delay = DEFAULT_MIGRATE_X_CHECKPOINT_DELAY,
},
};
if (!once) {
current_migration.parameters.tls_creds = g_strdup("");
current_migration.parameters.tls_hostname = g_strdup("");
once = true;
}
return ¤t_migration;
}
| 25,929 |
qemu | 1f01e50b8330c24714ddca5841fdbb703076b121 | 0 | static int coroutine_fn qed_aio_read_data(void *opaque, int ret,
uint64_t offset, size_t len)
{
QEDAIOCB *acb = opaque;
BDRVQEDState *s = acb_to_s(acb);
BlockDriverState *bs = acb->bs;
/* Adjust offset into cluster */
offset += qed_offset_into_cluster(s, acb->cur_pos);
trace_qed_aio_read_data(s, acb, ret, offset, len);
qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
/* Handle zero cluster and backing file reads */
if (ret == QED_CLUSTER_ZERO) {
qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
return 0;
} else if (ret != QED_CLUSTER_FOUND) {
return qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
&acb->backing_qiov);
}
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
ret = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size,
&acb->cur_qiov, 0);
if (ret < 0) {
return ret;
}
return 0;
}
| 25,930 |
qemu | b40acf99bef69fa8ab0f9092ff162fde945eec12 | 0 | static void portio_list_add_1(PortioList *piolist,
const MemoryRegionPortio *pio_init,
unsigned count, unsigned start,
unsigned off_low, unsigned off_high)
{
MemoryRegionPortio *pio;
MemoryRegionOps *ops;
MemoryRegion *region, *alias;
unsigned i;
/* Copy the sub-list and null-terminate it. */
pio = g_new(MemoryRegionPortio, count + 1);
memcpy(pio, pio_init, sizeof(MemoryRegionPortio) * count);
memset(pio + count, 0, sizeof(MemoryRegionPortio));
/* Adjust the offsets to all be zero-based for the region. */
for (i = 0; i < count; ++i) {
pio[i].offset -= off_low;
}
ops = g_new0(MemoryRegionOps, 1);
ops->old_portio = pio;
region = g_new(MemoryRegion, 1);
alias = g_new(MemoryRegion, 1);
/*
* Use an alias so that the callback is called with an absolute address,
* rather than an offset relative to to start + off_low.
*/
memory_region_init_io(region, ops, piolist->opaque, piolist->name,
INT64_MAX);
memory_region_init_alias(alias, piolist->name,
region, start + off_low, off_high - off_low);
memory_region_add_subregion(piolist->address_space,
start + off_low, alias);
piolist->regions[piolist->nr] = region;
piolist->aliases[piolist->nr] = alias;
++piolist->nr;
}
| 25,931 |
qemu | e1b42f456fad6e797eaf795ed2e400c4e47d5eb4 | 0 | static int bochs_read(BlockDriverState *bs, int64_t sector_num,
uint8_t *buf, int nb_sectors)
{
int ret;
while (nb_sectors > 0) {
int64_t block_offset = seek_to_sector(bs, sector_num);
if (block_offset >= 0) {
ret = bdrv_pread(bs->file, block_offset, buf, 512);
if (ret != 512) {
return -1;
}
} else
memset(buf, 0, 512);
nb_sectors--;
sector_num++;
buf += 512;
}
return 0;
}
| 25,933 |
qemu | b6fcf32d9b851a83dedcb609091236b97cc4a985 | 0 | static void visit_nested_struct_list(Visitor *v, void **native, Error **errp)
{
visit_type_UserDefNestedList(v, (UserDefNestedList **)native, NULL, errp);
}
| 25,935 |
FFmpeg | e6c90ce94f1b07f50cea2babf7471af455cca0ff | 0 | int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Context *h0)
{
unsigned int first_mb_in_slice;
unsigned int pps_id;
int ret;
unsigned int slice_type, tmp, i, j;
int default_ref_list_done = 0;
int last_pic_structure, last_pic_droppable;
int needs_reinit = 0;
int field_pic_flag, bottom_field_flag;
h->qpel_put = h->h264qpel.put_h264_qpel_pixels_tab;
h->qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab;
first_mb_in_slice = get_ue_golomb(&h->gb);
if (first_mb_in_slice == 0) { // FIXME better field boundary detection
if (h0->current_slice && h->cur_pic_ptr && FIELD_PICTURE(h)) {
ff_h264_field_end(h, sl, 1);
}
h0->current_slice = 0;
if (!h0->first_field) {
if (h->cur_pic_ptr && !h->droppable) {
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD);
}
h->cur_pic_ptr = NULL;
}
}
slice_type = get_ue_golomb_31(&h->gb);
if (slice_type > 9) {
av_log(h->avctx, AV_LOG_ERROR,
"slice type %d too large at %d %d\n",
slice_type, h->mb_x, h->mb_y);
return AVERROR_INVALIDDATA;
}
if (slice_type > 4) {
slice_type -= 5;
sl->slice_type_fixed = 1;
} else
sl->slice_type_fixed = 0;
slice_type = golomb_to_pict_type[slice_type];
if (slice_type == AV_PICTURE_TYPE_I ||
(h0->current_slice != 0 && slice_type == h0->last_slice_type)) {
default_ref_list_done = 1;
}
sl->slice_type = slice_type;
sl->slice_type_nos = slice_type & 3;
if (h->nal_unit_type == NAL_IDR_SLICE &&
sl->slice_type_nos != AV_PICTURE_TYPE_I) {
av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
return AVERROR_INVALIDDATA;
}
// to make a few old functions happy, it's wrong though
h->pict_type = sl->slice_type;
pps_id = get_ue_golomb(&h->gb);
if (pps_id >= MAX_PPS_COUNT) {
av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id);
return AVERROR_INVALIDDATA;
}
if (!h0->pps_buffers[pps_id]) {
av_log(h->avctx, AV_LOG_ERROR,
"non-existing PPS %u referenced\n",
pps_id);
return AVERROR_INVALIDDATA;
}
h->pps = *h0->pps_buffers[pps_id];
if (!h0->sps_buffers[h->pps.sps_id]) {
av_log(h->avctx, AV_LOG_ERROR,
"non-existing SPS %u referenced\n",
h->pps.sps_id);
return AVERROR_INVALIDDATA;
}
if (h->pps.sps_id != h->sps.sps_id ||
h0->sps_buffers[h->pps.sps_id]->new) {
h0->sps_buffers[h->pps.sps_id]->new = 0;
h->sps = *h0->sps_buffers[h->pps.sps_id];
if (h->bit_depth_luma != h->sps.bit_depth_luma ||
h->chroma_format_idc != h->sps.chroma_format_idc) {
h->bit_depth_luma = h->sps.bit_depth_luma;
h->chroma_format_idc = h->sps.chroma_format_idc;
needs_reinit = 1;
}
if ((ret = ff_h264_set_parameter_from_sps(h)) < 0)
return ret;
}
h->avctx->profile = ff_h264_get_profile(&h->sps);
h->avctx->level = h->sps.level_idc;
h->avctx->refs = h->sps.ref_frame_count;
if (h->mb_width != h->sps.mb_width ||
h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag))
needs_reinit = 1;
h->mb_width = h->sps.mb_width;
h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
h->mb_num = h->mb_width * h->mb_height;
h->mb_stride = h->mb_width + 1;
h->b_stride = h->mb_width * 4;
h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p
h->width = 16 * h->mb_width;
h->height = 16 * h->mb_height;
ret = init_dimensions(h);
if (ret < 0)
return ret;
if (h->sps.video_signal_type_present_flag) {
h->avctx->color_range = h->sps.full_range ? AVCOL_RANGE_JPEG
: AVCOL_RANGE_MPEG;
if (h->sps.colour_description_present_flag) {
if (h->avctx->colorspace != h->sps.colorspace)
needs_reinit = 1;
h->avctx->color_primaries = h->sps.color_primaries;
h->avctx->color_trc = h->sps.color_trc;
h->avctx->colorspace = h->sps.colorspace;
}
}
if (h->context_initialized && needs_reinit) {
if (h != h0) {
av_log(h->avctx, AV_LOG_ERROR,
"changing width %d -> %d / height %d -> %d on "
"slice %d\n",
h->width, h->avctx->coded_width,
h->height, h->avctx->coded_height,
h0->current_slice + 1);
return AVERROR_INVALIDDATA;
}
ff_h264_flush_change(h);
if ((ret = get_pixel_format(h)) < 0)
return ret;
h->avctx->pix_fmt = ret;
av_log(h->avctx, AV_LOG_INFO, "Reinit context to %dx%d, "
"pix_fmt: %d\n", h->width, h->height, h->avctx->pix_fmt);
if ((ret = h264_slice_header_init(h, 1)) < 0) {
av_log(h->avctx, AV_LOG_ERROR,
"h264_slice_header_init() failed\n");
return ret;
}
}
if (!h->context_initialized) {
if (h != h0) {
av_log(h->avctx, AV_LOG_ERROR,
"Cannot (re-)initialize context during parallel decoding.\n");
return AVERROR_PATCHWELCOME;
}
if ((ret = get_pixel_format(h)) < 0)
return ret;
h->avctx->pix_fmt = ret;
if ((ret = h264_slice_header_init(h, 0)) < 0) {
av_log(h->avctx, AV_LOG_ERROR,
"h264_slice_header_init() failed\n");
return ret;
}
}
if (h == h0 && h->dequant_coeff_pps != pps_id) {
h->dequant_coeff_pps = pps_id;
h264_init_dequant_tables(h);
}
h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num);
h->mb_mbaff = 0;
h->mb_aff_frame = 0;
last_pic_structure = h0->picture_structure;
last_pic_droppable = h0->droppable;
h->droppable = h->nal_ref_idc == 0;
if (h->sps.frame_mbs_only_flag) {
h->picture_structure = PICT_FRAME;
} else {
field_pic_flag = get_bits1(&h->gb);
if (field_pic_flag) {
bottom_field_flag = get_bits1(&h->gb);
h->picture_structure = PICT_TOP_FIELD + bottom_field_flag;
} else {
h->picture_structure = PICT_FRAME;
h->mb_aff_frame = h->sps.mb_aff;
}
}
h->mb_field_decoding_flag = h->picture_structure != PICT_FRAME;
if (h0->current_slice != 0) {
if (last_pic_structure != h->picture_structure ||
last_pic_droppable != h->droppable) {
av_log(h->avctx, AV_LOG_ERROR,
"Changing field mode (%d -> %d) between slices is not allowed\n",
last_pic_structure, h->picture_structure);
h->picture_structure = last_pic_structure;
h->droppable = last_pic_droppable;
return AVERROR_INVALIDDATA;
} else if (!h0->cur_pic_ptr) {
av_log(h->avctx, AV_LOG_ERROR,
"unset cur_pic_ptr on slice %d\n",
h0->current_slice + 1);
return AVERROR_INVALIDDATA;
}
} else {
/* Shorten frame num gaps so we don't have to allocate reference
* frames just to throw them away */
if (h->frame_num != h->prev_frame_num) {
int unwrap_prev_frame_num = h->prev_frame_num;
int max_frame_num = 1 << h->sps.log2_max_frame_num;
if (unwrap_prev_frame_num > h->frame_num)
unwrap_prev_frame_num -= max_frame_num;
if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) {
unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1;
if (unwrap_prev_frame_num < 0)
unwrap_prev_frame_num += max_frame_num;
h->prev_frame_num = unwrap_prev_frame_num;
}
}
/* See if we have a decoded first field looking for a pair...
* Here, we're using that to see if we should mark previously
* decode frames as "finished".
* We have to do that before the "dummy" in-between frame allocation,
* since that can modify s->current_picture_ptr. */
if (h0->first_field) {
assert(h0->cur_pic_ptr);
assert(h0->cur_pic_ptr->f.buf[0]);
assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
/* figure out if we have a complementary field pair */
if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
/* Previous field is unmatched. Don't display it, but let it
* remain for reference if marked as such. */
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
last_pic_structure == PICT_TOP_FIELD);
}
} else {
if (h0->cur_pic_ptr->frame_num != h->frame_num) {
/* This and previous field were reference, but had
* different frame_nums. Consider this field first in
* pair. Throw away previous field except for reference
* purposes. */
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
last_pic_structure == PICT_TOP_FIELD);
}
} else {
/* Second field in complementary pair */
if (!((last_pic_structure == PICT_TOP_FIELD &&
h->picture_structure == PICT_BOTTOM_FIELD) ||
(last_pic_structure == PICT_BOTTOM_FIELD &&
h->picture_structure == PICT_TOP_FIELD))) {
av_log(h->avctx, AV_LOG_ERROR,
"Invalid field mode combination %d/%d\n",
last_pic_structure, h->picture_structure);
h->picture_structure = last_pic_structure;
h->droppable = last_pic_droppable;
return AVERROR_INVALIDDATA;
} else if (last_pic_droppable != h->droppable) {
avpriv_request_sample(h->avctx,
"Found reference and non-reference fields in the same frame, which");
h->picture_structure = last_pic_structure;
h->droppable = last_pic_droppable;
return AVERROR_PATCHWELCOME;
}
}
}
}
while (h->frame_num != h->prev_frame_num &&
h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
h->frame_num, h->prev_frame_num);
ret = h264_frame_start(h);
if (ret < 0) {
h0->first_field = 0;
return ret;
}
h->prev_frame_num++;
h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
h->cur_pic_ptr->frame_num = h->prev_frame_num;
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
ret = ff_generate_sliding_window_mmcos(h, 1);
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
return ret;
ret = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index);
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
return ret;
/* Error concealment: If a ref is missing, copy the previous ref
* in its place.
* FIXME: Avoiding a memcpy would be nice, but ref handling makes
* many assumptions about there being no actual duplicates.
* FIXME: This does not copy padding for out-of-frame motion
* vectors. Given we are concealing a lost frame, this probably
* is not noticeable by comparison, but it should be fixed. */
if (h->short_ref_count) {
if (prev) {
av_image_copy(h->short_ref[0]->f.data,
h->short_ref[0]->f.linesize,
(const uint8_t **)prev->f.data,
prev->f.linesize,
h->avctx->pix_fmt,
h->mb_width * 16,
h->mb_height * 16);
h->short_ref[0]->poc = prev->poc + 2;
}
h->short_ref[0]->frame_num = h->prev_frame_num;
}
}
/* See if we have a decoded first field looking for a pair...
* We're using that to see whether to continue decoding in that
* frame, or to allocate a new one. */
if (h0->first_field) {
assert(h0->cur_pic_ptr);
assert(h0->cur_pic_ptr->f.buf[0]);
assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
/* figure out if we have a complementary field pair */
if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
/* Previous field is unmatched. Don't display it, but let it
* remain for reference if marked as such. */
h0->cur_pic_ptr = NULL;
h0->first_field = FIELD_PICTURE(h);
} else {
if (h0->cur_pic_ptr->frame_num != h->frame_num) {
/* This and the previous field had different frame_nums.
* Consider this field first in pair. Throw away previous
* one except for reference purposes. */
h0->first_field = 1;
h0->cur_pic_ptr = NULL;
} else {
/* Second field in complementary pair */
h0->first_field = 0;
}
}
} else {
/* Frame or first field in a potentially complementary pair */
h0->first_field = FIELD_PICTURE(h);
}
if (!FIELD_PICTURE(h) || h0->first_field) {
if (h264_frame_start(h) < 0) {
h0->first_field = 0;
return AVERROR_INVALIDDATA;
}
} else {
release_unused_pictures(h, 0);
}
}
if (h != h0 && (ret = clone_slice(h, h0)) < 0)
return ret;
h->cur_pic_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup
assert(h->mb_num == h->mb_width * h->mb_height);
if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
first_mb_in_slice >= h->mb_num) {
av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
return AVERROR_INVALIDDATA;
}
h->resync_mb_x = h->mb_x = first_mb_in_slice % h->mb_width;
h->resync_mb_y = h->mb_y = (first_mb_in_slice / h->mb_width) <<
FIELD_OR_MBAFF_PICTURE(h);
if (h->picture_structure == PICT_BOTTOM_FIELD)
h->resync_mb_y = h->mb_y = h->mb_y + 1;
assert(h->mb_y < h->mb_height);
if (h->picture_structure == PICT_FRAME) {
h->curr_pic_num = h->frame_num;
h->max_pic_num = 1 << h->sps.log2_max_frame_num;
} else {
h->curr_pic_num = 2 * h->frame_num + 1;
h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1);
}
if (h->nal_unit_type == NAL_IDR_SLICE)
get_ue_golomb(&h->gb); /* idr_pic_id */
if (h->sps.poc_type == 0) {
h->poc_lsb = get_bits(&h->gb, h->sps.log2_max_poc_lsb);
if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME)
h->delta_poc_bottom = get_se_golomb(&h->gb);
}
if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) {
h->delta_poc[0] = get_se_golomb(&h->gb);
if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME)
h->delta_poc[1] = get_se_golomb(&h->gb);
}
ff_init_poc(h, h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc);
if (h->pps.redundant_pic_cnt_present)
h->redundant_pic_count = get_ue_golomb(&h->gb);
ret = ff_set_ref_count(h, sl);
if (ret < 0)
return ret;
else if (ret == 1)
default_ref_list_done = 0;
if (!default_ref_list_done)
ff_h264_fill_default_ref_list(h, sl);
if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
ret = ff_h264_decode_ref_pic_list_reordering(h, sl);
if (ret < 0) {
sl->ref_count[1] = sl->ref_count[0] = 0;
return ret;
}
}
if ((h->pps.weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
(h->pps.weighted_bipred_idc == 1 &&
sl->slice_type_nos == AV_PICTURE_TYPE_B))
ff_pred_weight_table(h, sl);
else if (h->pps.weighted_bipred_idc == 2 &&
sl->slice_type_nos == AV_PICTURE_TYPE_B) {
implicit_weight_table(h, sl, -1);
} else {
sl->use_weight = 0;
for (i = 0; i < 2; i++) {
sl->luma_weight_flag[i] = 0;
sl->chroma_weight_flag[i] = 0;
}
}
// If frame-mt is enabled, only update mmco tables for the first slice
// in a field. Subsequent slices can temporarily clobber h->mmco_index
// or h->mmco, which will cause ref list mix-ups and decoding errors
// further down the line. This may break decoding if the first slice is
// corrupt, thus we only do this if frame-mt is enabled.
if (h->nal_ref_idc) {
ret = ff_h264_decode_ref_pic_marking(h0, &h->gb,
!(h->avctx->active_thread_type & FF_THREAD_FRAME) ||
h0->current_slice == 0);
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
return AVERROR_INVALIDDATA;
}
if (FRAME_MBAFF(h)) {
ff_h264_fill_mbaff_ref_list(h, sl);
if (h->pps.weighted_bipred_idc == 2 && sl->slice_type_nos == AV_PICTURE_TYPE_B) {
implicit_weight_table(h, sl, 0);
implicit_weight_table(h, sl, 1);
}
}
if (sl->slice_type_nos == AV_PICTURE_TYPE_B && !sl->direct_spatial_mv_pred)
ff_h264_direct_dist_scale_factor(h, sl);
ff_h264_direct_ref_list_init(h, sl);
if (sl->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) {
tmp = get_ue_golomb_31(&h->gb);
if (tmp > 2) {
av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
return AVERROR_INVALIDDATA;
}
h->cabac_init_idc = tmp;
}
sl->last_qscale_diff = 0;
tmp = h->pps.init_qp + get_se_golomb(&h->gb);
if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) {
av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
return AVERROR_INVALIDDATA;
}
sl->qscale = tmp;
sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale);
sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale);
// FIXME qscale / qp ... stuff
if (sl->slice_type == AV_PICTURE_TYPE_SP)
get_bits1(&h->gb); /* sp_for_switch_flag */
if (sl->slice_type == AV_PICTURE_TYPE_SP ||
sl->slice_type == AV_PICTURE_TYPE_SI)
get_se_golomb(&h->gb); /* slice_qs_delta */
h->deblocking_filter = 1;
h->slice_alpha_c0_offset = 0;
h->slice_beta_offset = 0;
if (h->pps.deblocking_filter_parameters_present) {
tmp = get_ue_golomb_31(&h->gb);
if (tmp > 2) {
av_log(h->avctx, AV_LOG_ERROR,
"deblocking_filter_idc %u out of range\n", tmp);
return AVERROR_INVALIDDATA;
}
h->deblocking_filter = tmp;
if (h->deblocking_filter < 2)
h->deblocking_filter ^= 1; // 1<->0
if (h->deblocking_filter) {
h->slice_alpha_c0_offset = get_se_golomb(&h->gb) * 2;
h->slice_beta_offset = get_se_golomb(&h->gb) * 2;
if (h->slice_alpha_c0_offset > 12 ||
h->slice_alpha_c0_offset < -12 ||
h->slice_beta_offset > 12 ||
h->slice_beta_offset < -12) {
av_log(h->avctx, AV_LOG_ERROR,
"deblocking filter parameters %d %d out of range\n",
h->slice_alpha_c0_offset, h->slice_beta_offset);
return AVERROR_INVALIDDATA;
}
}
}
if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
(h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
(h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
(h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
h->nal_ref_idc == 0))
h->deblocking_filter = 0;
if (h->deblocking_filter == 1 && h0->max_contexts > 1) {
if (h->avctx->flags2 & CODEC_FLAG2_FAST) {
/* Cheat slightly for speed:
* Do not bother to deblock across slices. */
h->deblocking_filter = 2;
} else {
h0->max_contexts = 1;
if (!h0->single_decode_warning) {
av_log(h->avctx, AV_LOG_INFO,
"Cannot parallelize deblocking type 1, decoding such frames in sequential order\n");
h0->single_decode_warning = 1;
}
if (h != h0) {
av_log(h->avctx, AV_LOG_ERROR,
"Deblocking switched inside frame.\n");
return 1;
}
}
}
sl->qp_thresh = 15 -
FFMIN(h->slice_alpha_c0_offset, h->slice_beta_offset) -
FFMAX3(0,
h->pps.chroma_qp_index_offset[0],
h->pps.chroma_qp_index_offset[1]) +
6 * (h->sps.bit_depth_luma - 8);
h0->last_slice_type = slice_type;
sl->slice_num = ++h0->current_slice;
if (sl->slice_num >= MAX_SLICES) {
av_log(h->avctx, AV_LOG_ERROR,
"Too many slices, increase MAX_SLICES and recompile\n");
}
for (j = 0; j < 2; j++) {
int id_list[16];
int *ref2frm = sl->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
for (i = 0; i < 16; i++) {
id_list[i] = 60;
if (j < sl->list_count && i < sl->ref_count[j] &&
sl->ref_list[j][i].f.buf[0]) {
int k;
AVBuffer *buf = sl->ref_list[j][i].f.buf[0]->buffer;
for (k = 0; k < h->short_ref_count; k++)
if (h->short_ref[k]->f.buf[0]->buffer == buf) {
id_list[i] = k;
break;
}
for (k = 0; k < h->long_ref_count; k++)
if (h->long_ref[k] && h->long_ref[k]->f.buf[0]->buffer == buf) {
id_list[i] = h->short_ref_count + k;
break;
}
}
}
ref2frm[0] =
ref2frm[1] = -1;
for (i = 0; i < 16; i++)
ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
ref2frm[18 + 0] =
ref2frm[18 + 1] = -1;
for (i = 16; i < 48; i++)
ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
(sl->ref_list[j][i].reference & 3);
}
if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
av_log(h->avctx, AV_LOG_DEBUG,
"slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
sl->slice_num,
(h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
first_mb_in_slice,
av_get_picture_type_char(sl->slice_type),
sl->slice_type_fixed ? " fix" : "",
h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
pps_id, h->frame_num,
h->cur_pic_ptr->field_poc[0],
h->cur_pic_ptr->field_poc[1],
sl->ref_count[0], sl->ref_count[1],
sl->qscale,
h->deblocking_filter,
h->slice_alpha_c0_offset, h->slice_beta_offset,
sl->use_weight,
sl->use_weight == 1 && sl->use_weight_chroma ? "c" : "",
sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
}
return 0;
}
| 25,938 |
qemu | 937470bb5470825e781ae50e92ff973a6b54d80f | 1 | Object *qio_task_get_source(QIOTask *task)
{
object_ref(task->source);
return task->source;
}
| 25,939 |
qemu | 4ae4b609ee2d5bcc9df6c03c21dc1fed527aada1 | 1 | static void gen_ldx(DisasContext *dc, uint32_t code, uint32_t flags)
{
I_TYPE(instr, code);
TCGv addr = tcg_temp_new();
TCGv data;
/*
* WARNING: Loads into R_ZERO are ignored, but we must generate the
* memory access itself to emulate the CPU precisely. Load
* from a protected page to R_ZERO will cause SIGSEGV on
* the Nios2 CPU.
*/
if (likely(instr.b != R_ZERO)) {
data = dc->cpu_R[instr.b];
} else {
data = tcg_temp_new();
}
tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16s);
tcg_gen_qemu_ld_tl(data, addr, dc->mem_idx, flags);
if (unlikely(instr.b == R_ZERO)) {
tcg_temp_free(data);
}
tcg_temp_free(addr);
}
| 25,940 |
FFmpeg | a66099192159d02b1a1c1820ddb24c7cea271a44 | 0 | static int64_t mpegts_get_dts(AVFormatContext *s, int stream_index,
int64_t *ppos, int64_t pos_limit)
{
MpegTSContext *ts = s->priv_data;
int64_t pos;
int pos47 = ts->pos47_full % ts->raw_packet_size;
pos = ((*ppos + ts->raw_packet_size - 1 - pos47) / ts->raw_packet_size) * ts->raw_packet_size + pos47;
ff_read_frame_flush(s);
if (avio_seek(s->pb, pos, SEEK_SET) < 0)
return AV_NOPTS_VALUE;
while(pos < pos_limit) {
int ret;
AVPacket pkt;
av_init_packet(&pkt);
ret= av_read_frame(s, &pkt);
if(ret < 0)
return AV_NOPTS_VALUE;
av_free_packet(&pkt);
if(pkt.dts != AV_NOPTS_VALUE && pkt.pos >= 0){
ff_reduce_index(s, pkt.stream_index);
av_add_index_entry(s->streams[pkt.stream_index], pkt.pos, pkt.dts, 0, 0, AVINDEX_KEYFRAME /* FIXME keyframe? */);
if(pkt.stream_index == stream_index){
*ppos= pkt.pos;
return pkt.dts;
}
}
pos = pkt.pos;
}
return AV_NOPTS_VALUE;
}
| 25,941 |
qemu | ff52aab2df5c5e10f231481961b88d25a3021724 | 1 | static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
{
BDRVQcowState *s = bs->opaque;
int l2_index;
uint64_t *l2_table;
uint64_t entry;
unsigned int nb_clusters;
int ret;
uint64_t alloc_cluster_offset;
trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
*bytes);
assert(*bytes > 0);
/*
* Calculate the number of clusters to look for. We stop at L2 table
* boundaries to keep things simple.
*/
nb_clusters =
size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
l2_index = offset_to_l2_index(s, guest_offset);
nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
/* Find L2 entry for the first involved cluster */
ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
if (ret < 0) {
return ret;
entry = be64_to_cpu(l2_table[l2_index]);
/* For the moment, overwrite compressed clusters one by one */
if (entry & QCOW_OFLAG_COMPRESSED) {
nb_clusters = 1;
} else {
nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
/* This function is only called when there were no non-COW clusters, so if
* we can't find any unallocated or COW clusters either, something is
* wrong with our code. */
assert(nb_clusters > 0);
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
if (ret < 0) {
return ret;
/* Allocate, if necessary at a given offset in the image file */
alloc_cluster_offset = start_of_cluster(s, *host_offset);
ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
&nb_clusters);
if (ret < 0) {
/* Can't extend contiguous allocation */
if (nb_clusters == 0) {
*bytes = 0;
return 0;
/*
* Save info needed for meta data update.
*
* requested_sectors: Number of sectors from the start of the first
* newly allocated cluster to the end of the (possibly shortened
* before) write request.
*
* avail_sectors: Number of sectors from the start of the first
* newly allocated to the end of the last newly allocated cluster.
*
* nb_sectors: The number of sectors from the start of the first
* newly allocated cluster to the end of the area that the write
* request actually writes to (excluding COW at the end)
*/
int requested_sectors =
(*bytes + offset_into_cluster(s, guest_offset))
>> BDRV_SECTOR_BITS;
int avail_sectors = nb_clusters
<< (s->cluster_bits - BDRV_SECTOR_BITS);
int alloc_n_start = offset_into_cluster(s, guest_offset)
>> BDRV_SECTOR_BITS;
int nb_sectors = MIN(requested_sectors, avail_sectors);
QCowL2Meta *old_m = *m;
*m = g_malloc0(sizeof(**m));
**m = (QCowL2Meta) {
.next = old_m,
.alloc_offset = alloc_cluster_offset,
.offset = start_of_cluster(s, guest_offset),
.nb_clusters = nb_clusters,
.nb_available = nb_sectors,
.cow_start = {
.offset = 0,
.nb_sectors = alloc_n_start,
},
.cow_end = {
.offset = nb_sectors * BDRV_SECTOR_SIZE,
.nb_sectors = avail_sectors - nb_sectors,
},
};
qemu_co_queue_init(&(*m)->dependent_requests);
QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
*host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
*bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE)
- offset_into_cluster(s, guest_offset));
assert(*bytes != 0);
return 1;
fail:
if (*m && (*m)->nb_clusters > 0) {
QLIST_REMOVE(*m, next_in_flight);
return ret; | 25,942 |
FFmpeg | 68bd11f5de271a1a674f196a9e8ca2e7fe40ab6e | 1 | static int rv10_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
UINT8 *buf, int buf_size)
{
MpegEncContext *s = avctx->priv_data;
int i, mb_count, mb_pos, left;
DCTELEM block[6][64];
AVPicture *pict = data;
#ifdef DEBUG
printf("*****frame %d size=%d\n", avctx->frame_number, buf_size);
#endif
/* no supplementary picture */
if (buf_size == 0) {
*data_size = 0;
return 0;
}
init_get_bits(&s->gb, buf, buf_size);
mb_count = rv10_decode_picture_header(s);
if (mb_count < 0) {
#ifdef DEBUG
printf("HEADER ERROR\n");
#endif
return -1;
}
if (s->mb_x >= s->mb_width ||
s->mb_y >= s->mb_height) {
#ifdef DEBUG
printf("POS ERROR %d %d\n", s->mb_x, s->mb_y);
#endif
return -1;
}
mb_pos = s->mb_y * s->mb_width + s->mb_x;
left = s->mb_width * s->mb_height - mb_pos;
if (mb_count > left) {
#ifdef DEBUG
printf("COUNT ERROR\n");
#endif
return -1;
}
if (s->mb_x == 0 && s->mb_y == 0) {
MPV_frame_start(s, avctx);
}
#ifdef DEBUG
printf("qscale=%d\n", s->qscale);
#endif
/* default quantization values */
s->y_dc_scale = 8;
s->c_dc_scale = 8;
s->rv10_first_dc_coded[0] = 0;
s->rv10_first_dc_coded[1] = 0;
s->rv10_first_dc_coded[2] = 0;
s->block_wrap[0]=
s->block_wrap[1]=
s->block_wrap[2]=
s->block_wrap[3]= s->mb_width*2 + 2;
s->block_wrap[4]=
s->block_wrap[5]= s->mb_width + 2;
s->block_index[0]= s->block_wrap[0]*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
s->block_index[1]= s->block_wrap[0]*(s->mb_y*2 + 1) + s->mb_x*2;
s->block_index[2]= s->block_wrap[0]*(s->mb_y*2 + 2) - 1 + s->mb_x*2;
s->block_index[3]= s->block_wrap[0]*(s->mb_y*2 + 2) + s->mb_x*2;
s->block_index[4]= s->block_wrap[4]*(s->mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
s->block_index[5]= s->block_wrap[4]*(s->mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
/* decode each macroblock */
for(i=0;i<mb_count;i++) {
s->block_index[0]+=2;
s->block_index[1]+=2;
s->block_index[2]+=2;
s->block_index[3]+=2;
s->block_index[4]++;
s->block_index[5]++;
#ifdef DEBUG
printf("**mb x=%d y=%d\n", s->mb_x, s->mb_y);
#endif
memset(block, 0, sizeof(block));
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
if (h263_decode_mb(s, block) < 0) {
#ifdef DEBUG
printf("ERROR\n");
#endif
return -1;
}
MPV_decode_mb(s, block);
if (++s->mb_x == s->mb_width) {
s->mb_x = 0;
s->mb_y++;
s->block_index[0]= s->block_wrap[0]*(s->mb_y*2 + 1) - 1;
s->block_index[1]= s->block_wrap[0]*(s->mb_y*2 + 1);
s->block_index[2]= s->block_wrap[0]*(s->mb_y*2 + 2) - 1;
s->block_index[3]= s->block_wrap[0]*(s->mb_y*2 + 2);
s->block_index[4]= s->block_wrap[4]*(s->mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2);
s->block_index[5]= s->block_wrap[4]*(s->mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2);
}
}
if (s->mb_x == 0 &&
s->mb_y == s->mb_height) {
MPV_frame_end(s);
pict->data[0] = s->current_picture[0];
pict->data[1] = s->current_picture[1];
pict->data[2] = s->current_picture[2];
pict->linesize[0] = s->linesize;
pict->linesize[1] = s->uvlinesize;
pict->linesize[2] = s->uvlinesize;
avctx->quality = s->qscale;
*data_size = sizeof(AVPicture);
} else {
*data_size = 0;
}
return buf_size;
}
| 25,943 |
FFmpeg | dd1e6b2a139a9eea61aefe24fc3295499e70d04b | 0 | static int http_open(URLContext *h, const char *uri, int flags)
{
HTTPContext *s = h->priv_data;
h->is_streamed = 1;
s->filesize = -1;
av_strlcpy(s->location, uri, sizeof(s->location));
if (s->headers) {
int len = strlen(s->headers);
if (len < 2 || strcmp("\r\n", s->headers + len - 2))
av_log(h, AV_LOG_WARNING, "No trailing CRLF found in HTTP header.\n");
}
return http_open_cnx(h);
}
| 25,945 |
qemu | c31bc98e3bcf52fe1cd4b9b7a70869330eae80ea | 1 | static inline void softusb_read_pmem(MilkymistSoftUsbState *s,
uint32_t offset, uint8_t *buf, uint32_t len)
{
if (offset + len >= s->pmem_size) {
error_report("milkymist_softusb: read pmem out of bounds "
"at offset 0x%x, len %d", offset, len);
return;
}
memcpy(buf, s->pmem_ptr + offset, len);
} | 25,946 |
FFmpeg | 3ee8eefbf2623e1e337df7d962412b0703336431 | 1 | int attribute_align_arg sws_scale(struct SwsContext *c,
const uint8_t * const srcSlice[],
const int srcStride[], int srcSliceY,
int srcSliceH, uint8_t *const dst[],
const int dstStride[])
{
int i, ret;
const uint8_t *src2[4] = { srcSlice[0], srcSlice[1], srcSlice[2], srcSlice[3] };
uint8_t *dst2[4] = { dst[0], dst[1], dst[2], dst[3] };
uint8_t *rgb0_tmp = NULL;
// do not mess up sliceDir if we have a "trailing" 0-size slice
if (srcSliceH == 0)
return 0;
if (!check_image_pointers(srcSlice, c->srcFormat, srcStride)) {
av_log(c, AV_LOG_ERROR, "bad src image pointers\n");
return 0;
}
if (!check_image_pointers((const uint8_t* const*)dst, c->dstFormat, dstStride)) {
av_log(c, AV_LOG_ERROR, "bad dst image pointers\n");
return 0;
}
if (c->sliceDir == 0 && srcSliceY != 0 && srcSliceY + srcSliceH != c->srcH) {
av_log(c, AV_LOG_ERROR, "Slices start in the middle!\n");
return 0;
}
if (c->sliceDir == 0) {
if (srcSliceY == 0) c->sliceDir = 1; else c->sliceDir = -1;
}
if (usePal(c->srcFormat)) {
for (i = 0; i < 256; i++) {
int p, r, g, b, y, u, v, a = 0xff;
if (c->srcFormat == AV_PIX_FMT_PAL8) {
p = ((const uint32_t *)(srcSlice[1]))[i];
a = (p >> 24) & 0xFF;
r = (p >> 16) & 0xFF;
g = (p >> 8) & 0xFF;
b = p & 0xFF;
} else if (c->srcFormat == AV_PIX_FMT_RGB8) {
r = ( i >> 5 ) * 36;
g = ((i >> 2) & 7) * 36;
b = ( i & 3) * 85;
} else if (c->srcFormat == AV_PIX_FMT_BGR8) {
b = ( i >> 6 ) * 85;
g = ((i >> 3) & 7) * 36;
r = ( i & 7) * 36;
} else if (c->srcFormat == AV_PIX_FMT_RGB4_BYTE) {
r = ( i >> 3 ) * 255;
g = ((i >> 1) & 3) * 85;
b = ( i & 1) * 255;
} else if (c->srcFormat == AV_PIX_FMT_GRAY8 || c->srcFormat == AV_PIX_FMT_GRAY8A) {
r = g = b = i;
} else {
av_assert1(c->srcFormat == AV_PIX_FMT_BGR4_BYTE);
b = ( i >> 3 ) * 255;
g = ((i >> 1) & 3) * 85;
r = ( i & 1) * 255;
}
#define RGB2YUV_SHIFT 15
#define BY ( (int) (0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define BV (-(int) (0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define BU ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define GY ( (int) (0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define GV (-(int) (0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define GU (-(int) (0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define RY ( (int) (0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define RV ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
#define RU (-(int) (0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
y = av_clip_uint8((RY * r + GY * g + BY * b + ( 33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
u = av_clip_uint8((RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
v = av_clip_uint8((RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
c->pal_yuv[i]= y + (u<<8) + (v<<16) + (a<<24);
switch (c->dstFormat) {
case AV_PIX_FMT_BGR32:
#if !HAVE_BIGENDIAN
case AV_PIX_FMT_RGB24:
#endif
c->pal_rgb[i]= r + (g<<8) + (b<<16) + (a<<24);
break;
case AV_PIX_FMT_BGR32_1:
#if HAVE_BIGENDIAN
case AV_PIX_FMT_BGR24:
#endif
c->pal_rgb[i]= a + (r<<8) + (g<<16) + (b<<24);
break;
case AV_PIX_FMT_RGB32_1:
#if HAVE_BIGENDIAN
case AV_PIX_FMT_RGB24:
#endif
c->pal_rgb[i]= a + (b<<8) + (g<<16) + (r<<24);
break;
case AV_PIX_FMT_RGB32:
#if !HAVE_BIGENDIAN
case AV_PIX_FMT_BGR24:
#endif
default:
c->pal_rgb[i]= b + (g<<8) + (r<<16) + (a<<24);
}
}
}
if (c->src0Alpha && !c->dst0Alpha && isALPHA(c->dstFormat)) {
uint8_t *base;
int x,y;
rgb0_tmp = av_malloc(FFABS(srcStride[0]) * srcSliceH + 32);
base = srcStride[0] < 0 ? rgb0_tmp - srcStride[0] * (srcSliceH-1) : rgb0_tmp;
for (y=0; y<srcSliceH; y++){
memcpy(base + srcStride[0]*y, src2[0] + srcStride[0]*y, 4*c->srcW);
for (x=c->src0Alpha-1; x<4*c->srcW; x+=4) {
base[ srcStride[0]*y + x] = 0xFF;
}
}
src2[0] = base;
}
// copy strides, so they can safely be modified
if (c->sliceDir == 1) {
// slices go from top to bottom
int srcStride2[4] = { srcStride[0], srcStride[1], srcStride[2],
srcStride[3] };
int dstStride2[4] = { dstStride[0], dstStride[1], dstStride[2],
dstStride[3] };
reset_ptr(src2, c->srcFormat);
reset_ptr((void*)dst2, c->dstFormat);
/* reset slice direction at end of frame */
if (srcSliceY + srcSliceH == c->srcH)
c->sliceDir = 0;
ret = c->swScale(c, src2, srcStride2, srcSliceY, srcSliceH, dst2,
dstStride2);
} else {
// slices go from bottom to top => we flip the image internally
int srcStride2[4] = { -srcStride[0], -srcStride[1], -srcStride[2],
-srcStride[3] };
int dstStride2[4] = { -dstStride[0], -dstStride[1], -dstStride[2],
-dstStride[3] };
src2[0] += (srcSliceH - 1) * srcStride[0];
if (!usePal(c->srcFormat))
src2[1] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[1];
src2[2] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[2];
src2[3] += (srcSliceH - 1) * srcStride[3];
dst2[0] += ( c->dstH - 1) * dstStride[0];
dst2[1] += ((c->dstH >> c->chrDstVSubSample) - 1) * dstStride[1];
dst2[2] += ((c->dstH >> c->chrDstVSubSample) - 1) * dstStride[2];
dst2[3] += ( c->dstH - 1) * dstStride[3];
reset_ptr(src2, c->srcFormat);
reset_ptr((void*)dst2, c->dstFormat);
/* reset slice direction at end of frame */
if (!srcSliceY)
c->sliceDir = 0;
ret = c->swScale(c, src2, srcStride2, c->srcH-srcSliceY-srcSliceH,
srcSliceH, dst2, dstStride2);
}
av_free(rgb0_tmp);
return ret;
}
| 25,949 |
FFmpeg | 9d66aa2c8fa60fe4a570021175ce66316baeb746 | 1 | static av_cold int MPA_encode_init(AVCodecContext *avctx)
{
MpegAudioContext *s = avctx->priv_data;
int freq = avctx->sample_rate;
int bitrate = avctx->bit_rate;
int channels = avctx->channels;
int i, v, table;
float a;
if (channels <= 0 || channels > 2){
av_log(avctx, AV_LOG_ERROR, "encoding %d channel(s) is not allowed in mp2\n", channels);
return AVERROR(EINVAL);
}
bitrate = bitrate / 1000;
s->nb_channels = channels;
avctx->frame_size = MPA_FRAME_SIZE;
avctx->delay = 512 - 32 + 1;
/* encoding freq */
s->lsf = 0;
for(i=0;i<3;i++) {
if (avpriv_mpa_freq_tab[i] == freq)
break;
if ((avpriv_mpa_freq_tab[i] / 2) == freq) {
s->lsf = 1;
break;
}
}
if (i == 3){
av_log(avctx, AV_LOG_ERROR, "Sampling rate %d is not allowed in mp2\n", freq);
return AVERROR(EINVAL);
}
s->freq_index = i;
/* encoding bitrate & frequency */
for(i=0;i<15;i++) {
if (avpriv_mpa_bitrate_tab[s->lsf][1][i] == bitrate)
break;
}
if (i == 15){
av_log(avctx, AV_LOG_ERROR, "bitrate %d is not allowed in mp2\n", bitrate);
return AVERROR(EINVAL);
}
s->bitrate_index = i;
/* compute total header size & pad bit */
a = (float)(bitrate * 1000 * MPA_FRAME_SIZE) / (freq * 8.0);
s->frame_size = ((int)a) * 8;
/* frame fractional size to compute padding */
s->frame_frac = 0;
s->frame_frac_incr = (int)((a - floor(a)) * 65536.0);
/* select the right allocation table */
table = ff_mpa_l2_select_table(bitrate, s->nb_channels, freq, s->lsf);
/* number of used subbands */
s->sblimit = ff_mpa_sblimit_table[table];
s->alloc_table = ff_mpa_alloc_tables[table];
av_dlog(avctx, "%d kb/s, %d Hz, frame_size=%d bits, table=%d, padincr=%x\n",
bitrate, freq, s->frame_size, table, s->frame_frac_incr);
for(i=0;i<s->nb_channels;i++)
s->samples_offset[i] = 0;
for(i=0;i<257;i++) {
int v;
v = ff_mpa_enwindow[i];
#if WFRAC_BITS != 16
v = (v + (1 << (16 - WFRAC_BITS - 1))) >> (16 - WFRAC_BITS);
#endif
s->filter_bank[i] = v;
if ((i & 63) != 0)
v = -v;
if (i != 0)
s->filter_bank[512 - i] = v;
}
for(i=0;i<64;i++) {
v = (int)(exp2((3 - i) / 3.0) * (1 << 20));
if (v <= 0)
v = 1;
s->scale_factor_table[i] = v;
#if USE_FLOATS
s->scale_factor_inv_table[i] = exp2(-(3 - i) / 3.0) / (float)(1 << 20);
#else
#define P 15
s->scale_factor_shift[i] = 21 - P - (i / 3);
s->scale_factor_mult[i] = (1 << P) * exp2((i % 3) / 3.0);
#endif
}
for(i=0;i<128;i++) {
v = i - 64;
if (v <= -3)
v = 0;
else if (v < 0)
v = 1;
else if (v == 0)
v = 2;
else if (v < 3)
v = 3;
else
v = 4;
s->scale_diff_table[i] = v;
}
for(i=0;i<17;i++) {
v = ff_mpa_quant_bits[i];
if (v < 0)
v = -v;
else
v = v * 3;
s->total_quant_bits[i] = 12 * v;
}
return 0;
}
| 25,950 |
FFmpeg | a66dcfeedc68c080965cf78e1e0694967acef5af | 1 | static av_cold int vc1_decode_init(AVCodecContext *avctx)
{
VC1Context *v = avctx->priv_data;
MpegEncContext *s = &v->s;
GetBitContext gb;
int ret;
/* save the container output size for WMImage */
v->output_width = avctx->width;
v->output_height = avctx->height;
if (!avctx->extradata_size || !avctx->extradata)
return -1;
if (!(avctx->flags & CODEC_FLAG_GRAY))
avctx->pix_fmt = ff_get_format(avctx, avctx->codec->pix_fmts);
else
avctx->pix_fmt = AV_PIX_FMT_GRAY8;
v->s.avctx = avctx;
if ((ret = ff_vc1_init_common(v)) < 0)
return ret;
// ensure static VLC tables are initialized
if ((ret = ff_msmpeg4_decode_init(avctx)) < 0)
return ret;
if ((ret = ff_vc1_decode_init_alloc_tables(v)) < 0)
return ret;
// Hack to ensure the above functions will be called
// again once we know all necessary settings.
// That this is necessary might indicate a bug.
ff_vc1_decode_end(avctx);
ff_blockdsp_init(&s->bdsp, avctx);
ff_h264chroma_init(&v->h264chroma, 8);
ff_qpeldsp_init(&s->qdsp);
if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
int count = 0;
// looks like WMV3 has a sequence header stored in the extradata
// advanced sequence header may be before the first frame
// the last byte of the extradata is a version number, 1 for the
// samples we can decode
init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0)
return ret;
count = avctx->extradata_size*8 - get_bits_count(&gb);
if (count > 0) {
av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
count, get_bits(&gb, count));
} else if (count < 0) {
av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
}
} else { // VC1/WVC1/WVP2
const uint8_t *start = avctx->extradata;
uint8_t *end = avctx->extradata + avctx->extradata_size;
const uint8_t *next;
int size, buf2_size;
uint8_t *buf2 = NULL;
int seq_initialized = 0, ep_initialized = 0;
if (avctx->extradata_size < 16) {
av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
return -1;
}
buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
next = start;
for (; next < end; start = next) {
next = find_next_marker(start + 4, end);
size = next - start - 4;
if (size <= 0)
continue;
buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
init_get_bits(&gb, buf2, buf2_size * 8);
switch (AV_RB32(start)) {
case VC1_CODE_SEQHDR:
if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0) {
av_free(buf2);
return ret;
}
seq_initialized = 1;
break;
case VC1_CODE_ENTRYPOINT:
if ((ret = ff_vc1_decode_entry_point(avctx, v, &gb)) < 0) {
av_free(buf2);
return ret;
}
ep_initialized = 1;
break;
}
}
av_free(buf2);
if (!seq_initialized || !ep_initialized) {
av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
return -1;
}
v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
}
v->sprite_output_frame = av_frame_alloc();
if (!v->sprite_output_frame)
avctx->profile = v->profile;
if (v->profile == PROFILE_ADVANCED)
avctx->level = v->level;
avctx->has_b_frames = !!avctx->max_b_frames;
if (v->color_prim == 1 || v->color_prim == 5 || v->color_prim == 6)
avctx->color_primaries = v->color_prim;
if (v->transfer_char == 1 || v->transfer_char == 7)
avctx->color_trc = v->transfer_char;
if (v->matrix_coef == 1 || v->matrix_coef == 6 || v->matrix_coef == 7)
avctx->colorspace = v->matrix_coef;
s->mb_width = (avctx->coded_width + 15) >> 4;
s->mb_height = (avctx->coded_height + 15) >> 4;
if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
ff_vc1_init_transposed_scantables(v);
} else {
memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
v->left_blk_sh = 3;
v->top_blk_sh = 0;
}
if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
v->sprite_width = avctx->coded_width;
v->sprite_height = avctx->coded_height;
avctx->coded_width = avctx->width = v->output_width;
avctx->coded_height = avctx->height = v->output_height;
// prevent 16.16 overflows
if (v->sprite_width > 1 << 14 ||
v->sprite_height > 1 << 14 ||
v->output_width > 1 << 14 ||
v->output_height > 1 << 14) return -1;
if ((v->sprite_width&1) || (v->sprite_height&1)) {
avpriv_request_sample(avctx, "odd sprites support");
return AVERROR_PATCHWELCOME;
}
}
return 0;
} | 25,952 |
FFmpeg | 4d388c0cd05dd4de545e8ea333ab4de7d67ad12d | 1 | int ff_h264_fill_default_ref_list(H264Context *h)
{
int i, len;
if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
Picture *sorted[32];
int cur_poc, list;
int lens[2];
if (FIELD_PICTURE(h))
cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure == PICT_BOTTOM_FIELD];
else
cur_poc = h->cur_pic_ptr->poc;
for (list = 0; list < 2; list++) {
len = add_sorted(sorted, h->short_ref, h->short_ref_count, cur_poc, 1 ^ list);
len += add_sorted(sorted + len, h->short_ref, h->short_ref_count, cur_poc, 0 ^ list);
assert(len <= 32);
len = build_def_list(h->default_ref_list[list], sorted, len, 0, h->picture_structure);
len += build_def_list(h->default_ref_list[list] + len, h->long_ref, 16, 1, h->picture_structure);
assert(len <= 32);
if (len < h->ref_count[list])
memset(&h->default_ref_list[list][len], 0, sizeof(Picture) * (h->ref_count[list] - len));
lens[list] = len;
}
if (lens[0] == lens[1] && lens[1] > 1) {
for (i = 0; i < lens[0] &&
h->default_ref_list[0][i].f.buf[0]->buffer ==
h->default_ref_list[1][i].f.buf[0]->buffer; i++);
if (i == lens[0]) {
Picture tmp;
COPY_PICTURE(&tmp, &h->default_ref_list[1][0]);
COPY_PICTURE(&h->default_ref_list[1][0], &h->default_ref_list[1][1]);
COPY_PICTURE(&h->default_ref_list[1][1], &tmp);
}
}
} else {
len = build_def_list(h->default_ref_list[0], h->short_ref, h->short_ref_count, 0, h->picture_structure);
len += build_def_list(h->default_ref_list[0] + len, h-> long_ref, 16, 1, h->picture_structure);
assert(len <= 32);
if (len < h->ref_count[0])
memset(&h->default_ref_list[0][len], 0, sizeof(Picture) * (h->ref_count[0] - len));
}
#ifdef TRACE
for (i = 0; i < h->ref_count[0]; i++) {
tprintf(h->avctx, "List0: %s fn:%d 0x%p\n",
(h->default_ref_list[0][i].long_ref ? "LT" : "ST"),
h->default_ref_list[0][i].pic_id,
h->default_ref_list[0][i].f.data[0]);
}
if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
for (i = 0; i < h->ref_count[1]; i++) {
tprintf(h->avctx, "List1: %s fn:%d 0x%p\n",
(h->default_ref_list[1][i].long_ref ? "LT" : "ST"),
h->default_ref_list[1][i].pic_id,
h->default_ref_list[1][i].f.data[0]);
}
}
#endif
return 0;
}
| 25,953 |
FFmpeg | b4356e4118b6cbe3a6ed81d16369acc5ff40ad05 | 1 | static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{
AVFilterContext *ctx = link->dst;
CropContext *crop = ctx->priv;
AVFilterBufferRef *ref2;
int i;
picref->video->w = crop->w;
picref->video->h = crop->h;
ref2 = avfilter_ref_buffer(picref, ~0);
crop->var_values[VAR_T] = picref->pts == AV_NOPTS_VALUE ?
NAN : picref->pts * av_q2d(link->time_base);
crop->var_values[VAR_POS] = picref->pos == -1 ? NAN : picref->pos;
crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL);
crop->var_values[VAR_Y] = av_expr_eval(crop->y_pexpr, crop->var_values, NULL);
crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL);
normalize_double(&crop->x, crop->var_values[VAR_X]);
normalize_double(&crop->y, crop->var_values[VAR_Y]);
if (crop->x < 0) crop->x = 0;
if (crop->y < 0) crop->y = 0;
if ((unsigned)crop->x + (unsigned)crop->w > link->w) crop->x = link->w - crop->w;
if ((unsigned)crop->y + (unsigned)crop->h > link->h) crop->y = link->h - crop->h;
crop->x &= ~((1 << crop->hsub) - 1);
crop->y &= ~((1 << crop->vsub) - 1);
av_log(ctx, AV_LOG_DEBUG,
"n:%d t:%f x:%d y:%d x+w:%d y+h:%d\n",
(int)crop->var_values[VAR_N], crop->var_values[VAR_T], crop->x, crop->y, crop->x+crop->w, crop->y+crop->h);
ref2->data[0] += crop->y * ref2->linesize[0];
ref2->data[0] += crop->x * crop->max_step[0];
if (!(av_pix_fmt_descriptors[link->format].flags & PIX_FMT_PAL)) {
for (i = 1; i < 3; i ++) {
if (ref2->data[i]) {
ref2->data[i] += (crop->y >> crop->vsub) * ref2->linesize[i];
ref2->data[i] += (crop->x * crop->max_step[i]) >> crop->hsub;
}
}
}
/* alpha plane */
if (ref2->data[3]) {
ref2->data[3] += crop->y * ref2->linesize[3];
ref2->data[3] += crop->x * crop->max_step[3];
}
avfilter_start_frame(link->dst->outputs[0], ref2);
} | 25,954 |
qemu | 9b2fadda3e0196ffd485adde4fe9cdd6fae35300 | 1 | static void gen_tlbsx_440(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
TCGv t0;
if (unlikely(ctx->pr)) {
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
tcg_temp_free(t0);
if (Rc(ctx->opcode)) {
TCGLabel *l1 = gen_new_label();
tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
gen_set_label(l1);
}
#endif
}
| 25,955 |
qemu | 111049a4ecefc9cf1ac75c773f4c5c165f27fe63 | 1 | static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
{
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
BlockdevBackup *backup;
BlockDriverState *bs, *target;
Error *local_err = NULL;
assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP);
backup = common->action->u.blockdev_backup.data;
bs = qmp_get_root_bs(backup->device, errp);
if (!bs) {
return;
}
target = bdrv_lookup_bs(backup->target, backup->target, errp);
if (!target) {
return;
}
/* AioContext is released in .clean() */
state->aio_context = bdrv_get_aio_context(bs);
if (state->aio_context != bdrv_get_aio_context(target)) {
state->aio_context = NULL;
error_setg(errp, "Backup between two IO threads is not implemented");
return;
}
aio_context_acquire(state->aio_context);
state->bs = bs;
bdrv_drained_begin(state->bs);
do_blockdev_backup(backup, common->block_job_txn, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
state->job = state->bs->job;
}
| 25,956 |
qemu | df83eabd5245828cbca32060aa191d8b03bc5d50 | 1 | static int guest_get_network_stats(const char *name,
GuestNetworkInterfaceStat *stats)
{
DWORD if_index = 0;
MIB_IFROW a_mid_ifrow;
memset(&a_mid_ifrow, 0, sizeof(a_mid_ifrow));
if_index = get_interface_index(name);
a_mid_ifrow.dwIndex = if_index;
if (NO_ERROR == GetIfEntry(&a_mid_ifrow)) {
stats->rx_bytes = a_mid_ifrow.dwInOctets;
stats->rx_packets = a_mid_ifrow.dwInUcastPkts;
stats->rx_errs = a_mid_ifrow.dwInErrors;
stats->rx_dropped = a_mid_ifrow.dwInDiscards;
stats->tx_bytes = a_mid_ifrow.dwOutOctets;
stats->tx_packets = a_mid_ifrow.dwOutUcastPkts;
stats->tx_errs = a_mid_ifrow.dwOutErrors;
stats->tx_dropped = a_mid_ifrow.dwOutDiscards;
return 0;
}
return -1;
}
| 25,957 |
qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | 0 | void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
{
CPUARMState *env = &cpu->env;
int kernel_size;
int initrd_size;
int n;
int is_linux = 0;
uint64_t elf_entry;
target_phys_addr_t entry;
int big_endian;
QemuOpts *machine_opts;
/* Load the kernel. */
if (!info->kernel_filename) {
fprintf(stderr, "Kernel image must be specified\n");
exit(1);
}
machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
if (machine_opts) {
info->dtb_filename = qemu_opt_get(machine_opts, "dtb");
} else {
info->dtb_filename = NULL;
}
if (!info->secondary_cpu_reset_hook) {
info->secondary_cpu_reset_hook = default_reset_secondary;
}
if (!info->write_secondary_boot) {
info->write_secondary_boot = default_write_secondary;
}
if (info->nb_cpus == 0)
info->nb_cpus = 1;
#ifdef TARGET_WORDS_BIGENDIAN
big_endian = 1;
#else
big_endian = 0;
#endif
/* Assume that raw images are linux kernels, and ELF images are not. */
kernel_size = load_elf(info->kernel_filename, NULL, NULL, &elf_entry,
NULL, NULL, big_endian, ELF_MACHINE, 1);
entry = elf_entry;
if (kernel_size < 0) {
kernel_size = load_uimage(info->kernel_filename, &entry, NULL,
&is_linux);
}
if (kernel_size < 0) {
entry = info->loader_start + KERNEL_LOAD_ADDR;
kernel_size = load_image_targphys(info->kernel_filename, entry,
info->ram_size - KERNEL_LOAD_ADDR);
is_linux = 1;
}
if (kernel_size < 0) {
fprintf(stderr, "qemu: could not load kernel '%s'\n",
info->kernel_filename);
exit(1);
}
info->entry = entry;
if (is_linux) {
if (info->initrd_filename) {
initrd_size = load_image_targphys(info->initrd_filename,
info->loader_start
+ INITRD_LOAD_ADDR,
info->ram_size
- INITRD_LOAD_ADDR);
if (initrd_size < 0) {
fprintf(stderr, "qemu: could not load initrd '%s'\n",
info->initrd_filename);
exit(1);
}
} else {
initrd_size = 0;
}
info->initrd_size = initrd_size;
bootloader[4] = info->board_id;
/* for device tree boot, we pass the DTB directly in r2. Otherwise
* we point to the kernel args.
*/
if (info->dtb_filename) {
/* Place the DTB after the initrd in memory */
target_phys_addr_t dtb_start = TARGET_PAGE_ALIGN(info->loader_start
+ INITRD_LOAD_ADDR
+ initrd_size);
if (load_dtb(dtb_start, info)) {
exit(1);
}
bootloader[5] = dtb_start;
} else {
bootloader[5] = info->loader_start + KERNEL_ARGS_ADDR;
if (info->ram_size >= (1ULL << 32)) {
fprintf(stderr, "qemu: RAM size must be less than 4GB to boot"
" Linux kernel using ATAGS (try passing a device tree"
" using -dtb)\n");
exit(1);
}
}
bootloader[6] = entry;
for (n = 0; n < sizeof(bootloader) / 4; n++) {
bootloader[n] = tswap32(bootloader[n]);
}
rom_add_blob_fixed("bootloader", bootloader, sizeof(bootloader),
info->loader_start);
if (info->nb_cpus > 1) {
info->write_secondary_boot(cpu, info);
}
}
info->is_linux = is_linux;
for (; env; env = env->next_cpu) {
cpu = arm_env_get_cpu(env);
env->boot_info = info;
qemu_register_reset(do_cpu_reset, cpu);
}
}
| 25,959 |
qemu | db39fcf1f690b02d612e2bfc00980700887abe03 | 0 | static CharDriverState *qemu_chr_open_null(void)
{
CharDriverState *chr;
chr = g_malloc0(sizeof(CharDriverState));
chr->chr_write = null_chr_write;
chr->explicit_be_open = true;
return chr;
}
| 25,960 |
qemu | 028b0da424ba85049557c61f9f0a8a6698352b41 | 0 | static bool nvic_rettobase(NVICState *s)
{
int irq, nhand = 0;
for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
if (s->vectors[irq].active) {
nhand++;
if (nhand == 2) {
return 0;
}
}
}
return 1;
}
| 25,961 |
qemu | 7e01376daea75e888c370aab521a7d4aeaf2ffd1 | 0 | void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1)
{
int cssid, ssid, schid, m;
SubchDev *sch;
int ret = -ENODEV;
int cc;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
program_interrupt(&cpu->env, PGM_OPERAND, 2);
return;
}
trace_ioinst_sch_id("xsch", cssid, ssid, schid);
sch = css_find_subch(m, cssid, ssid, schid);
if (sch && css_subch_visible(sch)) {
ret = css_do_xsch(sch);
}
switch (ret) {
case -ENODEV:
cc = 3;
break;
case -EBUSY:
cc = 2;
break;
case 0:
cc = 0;
break;
default:
cc = 1;
break;
}
setcc(cpu, cc);
}
| 25,963 |
qemu | 37f51384ae05bd50f83308339dbffa3e78404874 | 0 | static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
uint8_t devfn, VTDContextEntry *ce)
{
VTDRootEntry re;
int ret_fr;
X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
ret_fr = vtd_get_root_entry(s, bus_num, &re);
if (ret_fr) {
return ret_fr;
}
if (!vtd_root_entry_present(&re)) {
/* Not error - it's okay we don't have root entry. */
trace_vtd_re_not_present(bus_num);
return -VTD_FR_ROOT_ENTRY_P;
}
if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD(VTD_HOST_ADDRESS_WIDTH))) {
trace_vtd_re_invalid(re.rsvd, re.val);
return -VTD_FR_ROOT_ENTRY_RSVD;
}
ret_fr = vtd_get_context_entry_from_root(&re, devfn, ce);
if (ret_fr) {
return ret_fr;
}
if (!vtd_ce_present(ce)) {
/* Not error - it's okay we don't have context entry. */
trace_vtd_ce_not_present(bus_num, devfn);
return -VTD_FR_CONTEXT_ENTRY_P;
}
if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) ||
(ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO(VTD_HOST_ADDRESS_WIDTH))) {
trace_vtd_ce_invalid(ce->hi, ce->lo);
return -VTD_FR_CONTEXT_ENTRY_RSVD;
}
/* Check if the programming of context-entry is valid */
if (!vtd_is_level_supported(s, vtd_ce_get_level(ce))) {
trace_vtd_ce_invalid(ce->hi, ce->lo);
return -VTD_FR_CONTEXT_ENTRY_INV;
}
/* Do translation type check */
if (!vtd_ce_type_check(x86_iommu, ce)) {
trace_vtd_ce_invalid(ce->hi, ce->lo);
return -VTD_FR_CONTEXT_ENTRY_INV;
}
return 0;
}
| 25,964 |
qemu | 3b22c4707decb706b10ce023534f8b79413ff9fe | 0 | static void do_interrupt_real(int intno, int is_int, int error_code,
unsigned int next_eip)
{
SegmentCache *dt;
uint8_t *ptr, *ssp;
int selector;
uint32_t offset, esp;
uint32_t old_cs, old_eip;
/* real mode (simpler !) */
dt = &env->idt;
if (intno * 4 + 3 > dt->limit)
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
ptr = dt->base + intno * 4;
offset = lduw(ptr);
selector = lduw(ptr + 2);
esp = env->regs[R_ESP];
ssp = env->segs[R_SS].base;
if (is_int)
old_eip = next_eip;
else
old_eip = env->eip;
old_cs = env->segs[R_CS].selector;
esp -= 2;
stw(ssp + (esp & 0xffff), compute_eflags());
esp -= 2;
stw(ssp + (esp & 0xffff), old_cs);
esp -= 2;
stw(ssp + (esp & 0xffff), old_eip);
/* update processor state */
env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
env->eip = offset;
env->segs[R_CS].selector = selector;
env->segs[R_CS].base = (uint8_t *)(selector << 4);
env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
}
| 25,965 |
FFmpeg | 3ee8ca9b0894df3aaf5086c643283cb58ef9763d | 0 | void ff_ass_init(AVSubtitle *sub)
{
memset(sub, 0, sizeof(*sub));
}
| 25,966 |
qemu | 2374e73edafff0586cbfb67c333c5a7588f81fd5 | 0 | uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
{
uint64_t ret;
if (t1 == env->lock) {
stl_raw(t1, t0);
ret = 0;
} else
ret = 1;
env->lock = 1;
return ret;
}
| 25,967 |
qemu | 9be385980d37e8f4fd33f605f5fb1c3d144170a8 | 0 | int AUD_read (SWVoiceIn *sw, void *buf, int size)
{
int bytes;
if (!sw) {
/* XXX: Consider options */
return size;
}
if (!sw->hw->enabled) {
dolog ("Reading from disabled voice %s\n", SW_NAME (sw));
return 0;
}
bytes = sw->hw->pcm_ops->read (sw, buf, size);
return bytes;
}
| 25,968 |
qemu | 7bd427d801e1e3293a634d3c83beadaa90ffb911 | 0 | QEMUFile *qemu_fopen_ops_buffered(void *opaque,
size_t bytes_per_sec,
BufferedPutFunc *put_buffer,
BufferedPutReadyFunc *put_ready,
BufferedWaitForUnfreezeFunc *wait_for_unfreeze,
BufferedCloseFunc *close)
{
QEMUFileBuffered *s;
s = qemu_mallocz(sizeof(*s));
s->opaque = opaque;
s->xfer_limit = bytes_per_sec / 10;
s->put_buffer = put_buffer;
s->put_ready = put_ready;
s->wait_for_unfreeze = wait_for_unfreeze;
s->close = close;
s->file = qemu_fopen_ops(s, buffered_put_buffer, NULL,
buffered_close, buffered_rate_limit,
buffered_set_rate_limit,
buffered_get_rate_limit);
s->timer = qemu_new_timer(rt_clock, buffered_rate_tick, s);
qemu_mod_timer(s->timer, qemu_get_clock(rt_clock) + 100);
return s->file;
}
| 25,969 |
qemu | 8297be80f7cf71e09617669a8bd8b2836dcfd4c3 | 0 | int keysym2scancode(void *kbd_layout, int keysym)
{
kbd_layout_t *k = kbd_layout;
if (keysym < MAX_NORMAL_KEYCODE) {
if (k->keysym2keycode[keysym] == 0) {
trace_keymap_unmapped(keysym);
fprintf(stderr, "Warning: no scancode found for keysym %d\n",
keysym);
}
return k->keysym2keycode[keysym];
} else {
int i;
#ifdef XK_ISO_Left_Tab
if (keysym == XK_ISO_Left_Tab) {
keysym = XK_Tab;
}
#endif
for (i = 0; i < k->extra_count; i++) {
if (k->keysym2keycode_extra[i].keysym == keysym) {
return k->keysym2keycode_extra[i].keycode;
}
}
}
return 0;
}
| 25,970 |
qemu | cd42d5b23691ad73edfd6dbcfc935a960a9c5a65 | 0 | gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb,
bool search_pc)
{
CPUState *cs = CPU(cpu);
CPUM68KState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
uint16_t *gen_opc_end;
CPUBreakpoint *bp;
int j, lj;
target_ulong pc_start;
int pc_offset;
int num_insns;
int max_insns;
/* generate intermediate code */
pc_start = tb->pc;
dc->tb = tb;
gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
dc->env = env;
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
dc->cc_op = CC_OP_DYNAMIC;
dc->singlestep_enabled = cs->singlestep_enabled;
dc->fpcr = env->fpcr;
dc->user = (env->sr & SR_S) == 0;
dc->is_mem = 0;
dc->done_mac = 0;
lj = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0)
max_insns = CF_COUNT_MASK;
gen_tb_start();
do {
pc_offset = dc->pc - pc_start;
gen_throws_exception = NULL;
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
gen_exception(dc, dc->pc, EXCP_DEBUG);
dc->is_jmp = DISAS_JUMP;
break;
}
}
if (dc->is_jmp)
break;
}
if (search_pc) {
j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
if (lj < j) {
lj++;
while (lj < j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
tcg_ctx.gen_opc_pc[lj] = dc->pc;
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
gen_io_start();
dc->insn_pc = dc->pc;
disas_m68k_insn(env, dc);
num_insns++;
} while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
!cs->singlestep_enabled &&
!singlestep &&
(pc_offset) < (TARGET_PAGE_SIZE - 32) &&
num_insns < max_insns);
if (tb->cflags & CF_LAST_IO)
gen_io_end();
if (unlikely(cs->singlestep_enabled)) {
/* Make sure the pc is updated, and raise a debug exception. */
if (!dc->is_jmp) {
gen_flush_cc_op(dc);
tcg_gen_movi_i32(QREG_PC, dc->pc);
}
gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
} else {
switch(dc->is_jmp) {
case DISAS_NEXT:
gen_flush_cc_op(dc);
gen_jmp_tb(dc, 0, dc->pc);
break;
default:
case DISAS_JUMP:
case DISAS_UPDATE:
gen_flush_cc_op(dc);
/* indicate that the hash table must be used to find the next TB */
tcg_gen_exit_tb(0);
break;
case DISAS_TB_JUMP:
/* nothing more to generate */
break;
}
}
gen_tb_end(tb, num_insns);
*tcg_ctx.gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(env, pc_start, dc->pc - pc_start, 0);
qemu_log("\n");
}
#endif
if (search_pc) {
j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;
} else {
tb->size = dc->pc - pc_start;
tb->icount = num_insns;
}
//optimize_flags();
//expand_target_qops();
}
| 25,971 |
qemu | a3251186fc6a04d421e9c4b65aa04ec32379ec38 | 0 | static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
{
TCGv t0, t1;
int size, shift;
switch (s->cc_op) {
case CC_OP_SUBB ... CC_OP_SUBQ:
/* (DATA_TYPE)(CC_DST + CC_SRC) < (DATA_TYPE)CC_SRC */
size = s->cc_op - CC_OP_SUBB;
t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
/* If no temporary was used, be careful not to alias t1 and t0. */
t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
tcg_gen_add_tl(t0, cpu_cc_dst, cpu_cc_src);
gen_extu(size, t0);
goto add_sub;
case CC_OP_ADDB ... CC_OP_ADDQ:
/* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
size = s->cc_op - CC_OP_ADDB;
t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
add_sub:
return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
.reg2 = t1, .mask = -1, .use_reg2 = true };
case CC_OP_SBBB ... CC_OP_SBBQ:
/* (DATA_TYPE)(CC_DST + CC_SRC + 1) <= (DATA_TYPE)CC_SRC */
size = s->cc_op - CC_OP_SBBB;
t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
if (TCGV_EQUAL(t1, reg) && TCGV_EQUAL(reg, cpu_cc_src)) {
tcg_gen_mov_tl(cpu_tmp0, cpu_cc_src);
t1 = cpu_tmp0;
}
tcg_gen_add_tl(reg, cpu_cc_dst, cpu_cc_src);
tcg_gen_addi_tl(reg, reg, 1);
gen_extu(size, reg);
t0 = reg;
goto adc_sbb;
case CC_OP_ADCB ... CC_OP_ADCQ:
/* (DATA_TYPE)CC_DST <= (DATA_TYPE)CC_SRC */
size = s->cc_op - CC_OP_ADCB;
t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
adc_sbb:
return (CCPrepare) { .cond = TCG_COND_LEU, .reg = t0,
.reg2 = t1, .mask = -1, .use_reg2 = true };
case CC_OP_LOGICB ... CC_OP_LOGICQ:
return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
case CC_OP_INCB ... CC_OP_INCQ:
case CC_OP_DECB ... CC_OP_DECQ:
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
.mask = -1, .no_setcond = true };
case CC_OP_SHLB ... CC_OP_SHLQ:
/* (CC_SRC >> (DATA_BITS - 1)) & 1 */
size = s->cc_op - CC_OP_SHLB;
shift = (8 << size) - 1;
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
.mask = (target_ulong)1 << shift };
case CC_OP_MULB ... CC_OP_MULQ:
return (CCPrepare) { .cond = TCG_COND_NE,
.reg = cpu_cc_src, .mask = -1 };
case CC_OP_EFLAGS:
case CC_OP_SARB ... CC_OP_SARQ:
/* CC_SRC & 1 */
return (CCPrepare) { .cond = TCG_COND_NE,
.reg = cpu_cc_src, .mask = CC_C };
default:
/* The need to compute only C from CC_OP_DYNAMIC is important
in efficiently implementing e.g. INC at the start of a TB. */
gen_update_cc_op(s);
gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_env, cpu_cc_op);
tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
.mask = -1, .no_setcond = true };
}
}
| 25,972 |
qemu | 4be746345f13e99e468c60acbd3a355e8183e3ce | 0 | static void pflash_cfi01_realize(DeviceState *dev, Error **errp)
{
pflash_t *pfl = CFI_PFLASH01(dev);
uint64_t total_len;
int ret;
uint64_t blocks_per_device, device_len;
int num_devices;
Error *local_err = NULL;
total_len = pfl->sector_len * pfl->nb_blocs;
/* These are only used to expose the parameters of each device
* in the cfi_table[].
*/
num_devices = pfl->device_width ? (pfl->bank_width / pfl->device_width) : 1;
blocks_per_device = pfl->nb_blocs / num_devices;
device_len = pfl->sector_len * blocks_per_device;
/* XXX: to be fixed */
#if 0
if (total_len != (8 * 1024 * 1024) && total_len != (16 * 1024 * 1024) &&
total_len != (32 * 1024 * 1024) && total_len != (64 * 1024 * 1024))
return NULL;
#endif
memory_region_init_rom_device(
&pfl->mem, OBJECT(dev),
pfl->be ? &pflash_cfi01_ops_be : &pflash_cfi01_ops_le, pfl,
pfl->name, total_len, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
vmstate_register_ram(&pfl->mem, DEVICE(pfl));
pfl->storage = memory_region_get_ram_ptr(&pfl->mem);
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &pfl->mem);
if (pfl->bs) {
/* read the initial flash content */
ret = bdrv_read(pfl->bs, 0, pfl->storage, total_len >> 9);
if (ret < 0) {
vmstate_unregister_ram(&pfl->mem, DEVICE(pfl));
error_setg(errp, "failed to read the initial flash content");
return;
}
}
if (pfl->bs) {
pfl->ro = bdrv_is_read_only(pfl->bs);
} else {
pfl->ro = 0;
}
/* Default to devices being used at their maximum device width. This was
* assumed before the device_width support was added.
*/
if (!pfl->max_device_width) {
pfl->max_device_width = pfl->device_width;
}
pfl->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pflash_timer, pfl);
pfl->wcycle = 0;
pfl->cmd = 0;
pfl->status = 0;
/* Hardcoded CFI table */
pfl->cfi_len = 0x52;
/* Standard "QRY" string */
pfl->cfi_table[0x10] = 'Q';
pfl->cfi_table[0x11] = 'R';
pfl->cfi_table[0x12] = 'Y';
/* Command set (Intel) */
pfl->cfi_table[0x13] = 0x01;
pfl->cfi_table[0x14] = 0x00;
/* Primary extended table address (none) */
pfl->cfi_table[0x15] = 0x31;
pfl->cfi_table[0x16] = 0x00;
/* Alternate command set (none) */
pfl->cfi_table[0x17] = 0x00;
pfl->cfi_table[0x18] = 0x00;
/* Alternate extended table (none) */
pfl->cfi_table[0x19] = 0x00;
pfl->cfi_table[0x1A] = 0x00;
/* Vcc min */
pfl->cfi_table[0x1B] = 0x45;
/* Vcc max */
pfl->cfi_table[0x1C] = 0x55;
/* Vpp min (no Vpp pin) */
pfl->cfi_table[0x1D] = 0x00;
/* Vpp max (no Vpp pin) */
pfl->cfi_table[0x1E] = 0x00;
/* Reserved */
pfl->cfi_table[0x1F] = 0x07;
/* Timeout for min size buffer write */
pfl->cfi_table[0x20] = 0x07;
/* Typical timeout for block erase */
pfl->cfi_table[0x21] = 0x0a;
/* Typical timeout for full chip erase (4096 ms) */
pfl->cfi_table[0x22] = 0x00;
/* Reserved */
pfl->cfi_table[0x23] = 0x04;
/* Max timeout for buffer write */
pfl->cfi_table[0x24] = 0x04;
/* Max timeout for block erase */
pfl->cfi_table[0x25] = 0x04;
/* Max timeout for chip erase */
pfl->cfi_table[0x26] = 0x00;
/* Device size */
pfl->cfi_table[0x27] = ctz32(device_len); /* + 1; */
/* Flash device interface (8 & 16 bits) */
pfl->cfi_table[0x28] = 0x02;
pfl->cfi_table[0x29] = 0x00;
/* Max number of bytes in multi-bytes write */
if (pfl->bank_width == 1) {
pfl->cfi_table[0x2A] = 0x08;
} else {
pfl->cfi_table[0x2A] = 0x0B;
}
pfl->writeblock_size = 1 << pfl->cfi_table[0x2A];
pfl->cfi_table[0x2B] = 0x00;
/* Number of erase block regions (uniform) */
pfl->cfi_table[0x2C] = 0x01;
/* Erase block region 1 */
pfl->cfi_table[0x2D] = blocks_per_device - 1;
pfl->cfi_table[0x2E] = (blocks_per_device - 1) >> 8;
pfl->cfi_table[0x2F] = pfl->sector_len >> 8;
pfl->cfi_table[0x30] = pfl->sector_len >> 16;
/* Extended */
pfl->cfi_table[0x31] = 'P';
pfl->cfi_table[0x32] = 'R';
pfl->cfi_table[0x33] = 'I';
pfl->cfi_table[0x34] = '1';
pfl->cfi_table[0x35] = '0';
pfl->cfi_table[0x36] = 0x00;
pfl->cfi_table[0x37] = 0x00;
pfl->cfi_table[0x38] = 0x00;
pfl->cfi_table[0x39] = 0x00;
pfl->cfi_table[0x3a] = 0x00;
pfl->cfi_table[0x3b] = 0x00;
pfl->cfi_table[0x3c] = 0x00;
pfl->cfi_table[0x3f] = 0x01; /* Number of protection fields */
}
| 25,973 |
qemu | f0267ef7115656119bf00ed77857789adc036bda | 0 | static long do_rt_sigreturn_v2(CPUARMState *env)
{
abi_ulong frame_addr;
struct rt_sigframe_v2 *frame = NULL;
/*
* Since we stacked the signal on a 64-bit boundary,
* then 'sp' should be word aligned here. If it's
* not, then the user is trying to mess with us.
*/
frame_addr = env->regs[13];
trace_user_do_rt_sigreturn(env, frame_addr);
if (frame_addr & 7) {
goto badframe;
}
if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
goto badframe;
}
if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
goto badframe;
}
unlock_user_struct(frame, frame_addr, 0);
return env->regs[0];
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV /* , current */);
return 0;
}
| 25,974 |
qemu | 2da9b7d456278bccc6ce889ae350f2867155d7e8 | 0 | void bdrv_drain_all_begin(void)
{
/* Always run first iteration so any pending completion BHs run */
bool waited = true;
BlockDriverState *bs;
BdrvNextIterator it;
GSList *aio_ctxs = NULL, *ctx;
block_job_pause_all();
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
AioContext *aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
bdrv_parent_drained_begin(bs);
aio_disable_external(aio_context);
aio_context_release(aio_context);
if (!g_slist_find(aio_ctxs, aio_context)) {
aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
}
}
/* Note that completion of an asynchronous I/O operation can trigger any
* number of other I/O operations on other devices---for example a
* coroutine can submit an I/O request to another device in response to
* request completion. Therefore we must keep looping until there was no
* more activity rather than simply draining each device independently.
*/
while (waited) {
waited = false;
for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
AioContext *aio_context = ctx->data;
aio_context_acquire(aio_context);
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
if (aio_context == bdrv_get_aio_context(bs)) {
/* FIXME Calling this multiple times is wrong */
bdrv_drain_invoke(bs, true);
waited |= bdrv_drain_recurse(bs, true);
}
}
aio_context_release(aio_context);
}
}
g_slist_free(aio_ctxs);
}
| 25,976 |
qemu | 60fe637bf0e4d7989e21e50f52526444765c63b4 | 1 | static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
ram_addr_t block_offset, ram_addr_t offset,
size_t size, int *bytes_sent)
{
QEMUFileRDMA *rfile = opaque;
RDMAContext *rdma = rfile->rdma;
int ret;
CHECK_ERROR_STATE();
qemu_fflush(f);
if (size > 0) {
/*
* Add this page to the current 'chunk'. If the chunk
* is full, or the page doen't belong to the current chunk,
* an actual RDMA write will occur and a new chunk will be formed.
*/
ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
if (ret < 0) {
fprintf(stderr, "rdma migration: write error! %d\n", ret);
goto err;
}
/*
* We always return 1 bytes because the RDMA
* protocol is completely asynchronous. We do not yet know
* whether an identified chunk is zero or not because we're
* waiting for other pages to potentially be merged with
* the current chunk. So, we have to call qemu_update_position()
* later on when the actual write occurs.
*/
if (bytes_sent) {
*bytes_sent = 1;
}
} else {
uint64_t index, chunk;
/* TODO: Change QEMUFileOps prototype to be signed: size_t => long
if (size < 0) {
ret = qemu_rdma_drain_cq(f, rdma);
if (ret < 0) {
fprintf(stderr, "rdma: failed to synchronously drain"
" completion queue before unregistration.\n");
goto err;
}
}
*/
ret = qemu_rdma_search_ram_block(rdma, block_offset,
offset, size, &index, &chunk);
if (ret) {
fprintf(stderr, "ram block search failed\n");
goto err;
}
qemu_rdma_signal_unregister(rdma, index, chunk, 0);
/*
* TODO: Synchronous, guaranteed unregistration (should not occur during
* fast-path). Otherwise, unregisters will process on the next call to
* qemu_rdma_drain_cq()
if (size < 0) {
qemu_rdma_unregister_waiting(rdma);
}
*/
}
/*
* Drain the Completion Queue if possible, but do not block,
* just poll.
*
* If nothing to poll, the end of the iteration will do this
* again to make sure we don't overflow the request queue.
*/
while (1) {
uint64_t wr_id, wr_id_in;
int ret = qemu_rdma_poll(rdma, &wr_id_in, NULL);
if (ret < 0) {
fprintf(stderr, "rdma migration: polling error! %d\n", ret);
goto err;
}
wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
if (wr_id == RDMA_WRID_NONE) {
break;
}
}
return RAM_SAVE_CONTROL_DELAYED;
err:
rdma->error_state = ret;
return ret;
}
| 25,977 |
qemu | 6b5166f8a82888638bb9aba9dc49aa7fa25f292f | 1 | int xen_be_init(void)
{
xenstore = xs_daemon_open();
if (!xenstore) {
xen_be_printf(NULL, 0, "can't connect to xenstored\n");
return -1;
}
if (qemu_set_fd_handler(xs_fileno(xenstore), xenstore_update, NULL, NULL) < 0) {
goto err;
}
if (xen_xc == XC_HANDLER_INITIAL_VALUE) {
/* Check if xen_init() have been called */
goto err;
}
return 0;
err:
qemu_set_fd_handler(xs_fileno(xenstore), NULL, NULL, NULL);
xs_daemon_close(xenstore);
xenstore = NULL;
return -1;
}
| 25,978 |
FFmpeg | 61ee2ca7758672128e30b3e87908b6845e006d71 | 1 | static void guess_palette(DVDSubContext* ctx,
uint32_t *rgba_palette,
uint32_t subtitle_color)
{
static const uint8_t level_map[4][4] = {
// this configuration (full range, lowest to highest) in tests
// seemed most common, so assume this
{0xff},
{0x00, 0xff},
{0x00, 0x80, 0xff},
{0x00, 0x55, 0xaa, 0xff},
};
uint8_t color_used[16] = { 0 };
int nb_opaque_colors, i, level, j, r, g, b;
uint8_t *colormap = ctx->colormap, *alpha = ctx->alpha;
if(ctx->has_palette) {
for(i = 0; i < 4; i++)
rgba_palette[i] = (ctx->palette[colormap[i]] & 0x00ffffff)
| ((alpha[i] * 17U) << 24);
return;
}
for(i = 0; i < 4; i++)
rgba_palette[i] = 0;
nb_opaque_colors = 0;
for(i = 0; i < 4; i++) {
if (alpha[i] != 0 && !color_used[colormap[i]]) {
color_used[colormap[i]] = 1;
nb_opaque_colors++;
}
}
if (nb_opaque_colors == 0)
return;
j = 0;
memset(color_used, 0, 16);
for(i = 0; i < 4; i++) {
if (alpha[i] != 0) {
if (!color_used[colormap[i]]) {
level = level_map[nb_opaque_colors - 1][j];
r = (((subtitle_color >> 16) & 0xff) * level) >> 8;
g = (((subtitle_color >> 8) & 0xff) * level) >> 8;
b = (((subtitle_color >> 0) & 0xff) * level) >> 8;
rgba_palette[i] = b | (g << 8) | (r << 16) | ((alpha[i] * 17) << 24);
color_used[colormap[i]] = (i + 1);
j++;
} else {
rgba_palette[i] = (rgba_palette[color_used[colormap[i]] - 1] & 0x00ffffff) |
((alpha[i] * 17) << 24);
}
}
}
}
| 25,979 |
FFmpeg | f4bd9fe326ad1315a74206939ae56df93b940a09 | 1 | int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice)
{
MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = first_slice ? h->mmco : mmco_temp;
int mmco_index = 0, i;
assert(h->long_ref_count + h->short_ref_count <= h->sps.ref_frame_count);
if (h->short_ref_count &&
h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count &&
!(FIELD_PICTURE(h) && !h->first_field && h->cur_pic_ptr->reference)) {
mmco[0].opcode = MMCO_SHORT2UNUSED;
mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num;
mmco_index = 1;
if (FIELD_PICTURE(h)) {
mmco[0].short_pic_num *= 2;
mmco[1].opcode = MMCO_SHORT2UNUSED;
mmco[1].short_pic_num = mmco[0].short_pic_num + 1;
mmco_index = 2;
}
}
if (first_slice) {
h->mmco_index = mmco_index;
} else if (!first_slice && mmco_index >= 0 &&
(mmco_index != h->mmco_index ||
(i = check_opcodes(h->mmco, mmco_temp, mmco_index)))) {
av_log(h->avctx, AV_LOG_ERROR,
"Inconsistent MMCO state between slices [%d, %d, %d]\n",
mmco_index, h->mmco_index, i);
return AVERROR_INVALIDDATA;
}
return 0;
}
| 25,981 |
FFmpeg | 0ecca7a49f8e254c12a3a1de048d738bfbb614c6 | 1 | ImgReSampleContext *img_resample_full_init(int owidth, int oheight,
int iwidth, int iheight,
int topBand, int bottomBand,
int leftBand, int rightBand,
int padtop, int padbottom,
int padleft, int padright)
{
ImgReSampleContext *s;
s = av_mallocz(sizeof(ImgReSampleContext));
if (!s)
s->line_buf = av_mallocz(owidth * (LINE_BUF_HEIGHT + NB_TAPS));
if (!s->line_buf)
goto fail;
s->owidth = owidth;
s->oheight = oheight;
s->iwidth = iwidth;
s->iheight = iheight;
s->topBand = topBand;
s->bottomBand = bottomBand;
s->leftBand = leftBand;
s->rightBand = rightBand;
s->padtop = padtop;
s->padbottom = padbottom;
s->padleft = padleft;
s->padright = padright;
s->pad_owidth = owidth - (padleft + padright);
s->pad_oheight = oheight - (padtop + padbottom);
s->h_incr = ((iwidth - leftBand - rightBand) * POS_FRAC) / s->pad_owidth;
s->v_incr = ((iheight - topBand - bottomBand) * POS_FRAC) / s->pad_oheight;
av_build_filter(&s->h_filters[0][0], (float) s->pad_owidth /
(float) (iwidth - leftBand - rightBand), NB_TAPS, NB_PHASES, 1<<FILTER_BITS, 0);
av_build_filter(&s->v_filters[0][0], (float) s->pad_oheight /
(float) (iheight - topBand - bottomBand), NB_TAPS, NB_PHASES, 1<<FILTER_BITS, 0);
return s;
fail:
av_free(s);
} | 25,983 |
qemu | ff71a4545c0d9b452e77a91ab1c46f79a10a9eca | 1 | static abi_long do_socketcall(int num, abi_ulong vptr)
{
static const unsigned ac[] = { /* number of arguments per call */
[SOCKOP_socket] = 3, /* domain, type, protocol */
[SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
[SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
[SOCKOP_listen] = 2, /* sockfd, backlog */
[SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
[SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
[SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
[SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
[SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
[SOCKOP_send] = 4, /* sockfd, msg, len, flags */
[SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
[SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
[SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
[SOCKOP_shutdown] = 2, /* sockfd, how */
[SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
[SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
[SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
[SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
[SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
[SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
};
abi_long a[6]; /* max 6 args */
/* first, collect the arguments in a[] according to ac[] */
if (num >= 0 && num < ARRAY_SIZE(ac)) {
unsigned i;
assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
for (i = 0; i < ac[num]; ++i) {
if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
return -TARGET_EFAULT;
}
}
}
/* now when we have the args, actually handle the call */
switch (num) {
case SOCKOP_socket: /* domain, type, protocol */
return do_socket(a[0], a[1], a[2]);
case SOCKOP_bind: /* sockfd, addr, addrlen */
return do_bind(a[0], a[1], a[2]);
case SOCKOP_connect: /* sockfd, addr, addrlen */
return do_connect(a[0], a[1], a[2]);
case SOCKOP_listen: /* sockfd, backlog */
return get_errno(listen(a[0], a[1]));
case SOCKOP_accept: /* sockfd, addr, addrlen */
return do_accept4(a[0], a[1], a[2], 0);
case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
return do_accept4(a[0], a[1], a[2], a[3]);
case SOCKOP_getsockname: /* sockfd, addr, addrlen */
return do_getsockname(a[0], a[1], a[2]);
case SOCKOP_getpeername: /* sockfd, addr, addrlen */
return do_getpeername(a[0], a[1], a[2]);
case SOCKOP_socketpair: /* domain, type, protocol, tab */
return do_socketpair(a[0], a[1], a[2], a[3]);
case SOCKOP_send: /* sockfd, msg, len, flags */
return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
case SOCKOP_recv: /* sockfd, msg, len, flags */
return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
case SOCKOP_shutdown: /* sockfd, how */
return get_errno(shutdown(a[0], a[1]));
case SOCKOP_sendmsg: /* sockfd, msg, flags */
return do_sendrecvmsg(a[0], a[1], a[2], 1);
case SOCKOP_recvmsg: /* sockfd, msg, flags */
return do_sendrecvmsg(a[0], a[1], a[2], 0);
case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
default:
gemu_log("Unsupported socketcall: %d\n", num);
return -TARGET_ENOSYS;
}
}
| 25,984 |
qemu | 6da67de6803e93cbb7e93ac3497865832f8c00ea | 1 | static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
MemTxAttrs attrs,
const uint8_t *buf,
int len, hwaddr addr1,
hwaddr l, MemoryRegion *mr)
{
uint8_t *ptr;
uint64_t val;
MemTxResult result = MEMTX_OK;
bool release_lock = false;
for (;;) {
if (!memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
l = memory_access_size(mr, l, addr1);
/* XXX: could force current_cpu to NULL to avoid
potential bugs */
switch (l) {
case 8:
/* 64 bit write access */
val = ldq_p(buf);
result |= memory_region_dispatch_write(mr, addr1, val, 8,
attrs);
break;
case 4:
/* 32 bit write access */
val = ldl_p(buf);
result |= memory_region_dispatch_write(mr, addr1, val, 4,
attrs);
break;
case 2:
/* 16 bit write access */
val = lduw_p(buf);
result |= memory_region_dispatch_write(mr, addr1, val, 2,
attrs);
break;
case 1:
/* 8 bit write access */
val = ldub_p(buf);
result |= memory_region_dispatch_write(mr, addr1, val, 1,
attrs);
break;
default:
abort();
}
} else {
/* RAM case */
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
memcpy(ptr, buf, l);
invalidate_and_set_dirty(mr, addr1, l);
}
if (release_lock) {
qemu_mutex_unlock_iothread();
release_lock = false;
}
len -= l;
buf += l;
addr += l;
if (!len) {
break;
}
l = len;
mr = address_space_translate(as, addr, &addr1, &l, true);
}
return result;
}
| 25,985 |
qemu | 297a3646c2947ee64a6d42ca264039732c6218e0 | 1 | void visit_type_int64(Visitor *v, int64_t *obj, const char *name, Error **errp)
{
if (!error_is_set(errp)) {
if (v->type_int64) {
v->type_int64(v, obj, name, errp);
} else {
v->type_int(v, obj, name, errp);
}
}
}
| 25,987 |
FFmpeg | c23acbaed40101c677dfcfbbfe0d2c230a8e8f44 | 1 | static inline void idct4col_add(uint8_t *dest, int line_size, const DCTELEM *col)
{
int c0, c1, c2, c3, a0, a1, a2, a3;
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
a0 = col[8*0];
a1 = col[8*1];
a2 = col[8*2];
a3 = col[8*3];
c0 = (a0 + a2)*C3 + (1 << (C_SHIFT - 1));
c2 = (a0 - a2)*C3 + (1 << (C_SHIFT - 1));
c1 = a1 * C1 + a3 * C2;
c3 = a1 * C2 - a3 * C1;
dest[0] = cm[dest[0] + ((c0 + c1) >> C_SHIFT)];
dest += line_size;
dest[0] = cm[dest[0] + ((c2 + c3) >> C_SHIFT)];
dest += line_size;
dest[0] = cm[dest[0] + ((c2 - c3) >> C_SHIFT)];
dest += line_size;
dest[0] = cm[dest[0] + ((c0 - c1) >> C_SHIFT)];
}
| 25,988 |
qemu | e8a095dadb70e2ea6d5169d261920db3747bfa45 | 1 | void bdrv_detach_aio_context(BlockDriverState *bs)
{
BdrvAioNotifier *baf;
BdrvChild *child;
if (!bs->drv) {
return;
}
QLIST_FOREACH(baf, &bs->aio_notifiers, list) {
baf->detach_aio_context(baf->opaque);
}
if (bs->drv->bdrv_detach_aio_context) {
bs->drv->bdrv_detach_aio_context(bs);
}
QLIST_FOREACH(child, &bs->children, next) {
bdrv_detach_aio_context(child->bs);
}
bs->aio_context = NULL;
}
| 25,989 |
qemu | fe463b7dbc16cc66f3b9a8b7be197fb340378fa3 | 0 | void cpu_ppc_reset (void *opaque)
{
CPUPPCState *env;
target_ulong msr;
env = opaque;
msr = (target_ulong)0;
if (0) {
/* XXX: find a suitable condition to enable the hypervisor mode */
msr |= (target_ulong)MSR_HVB;
}
msr |= (target_ulong)0 << MSR_AP; /* TO BE CHECKED */
msr |= (target_ulong)0 << MSR_SA; /* TO BE CHECKED */
msr |= (target_ulong)1 << MSR_EP;
#if defined (DO_SINGLE_STEP) && 0
/* Single step trace mode */
msr |= (target_ulong)1 << MSR_SE;
msr |= (target_ulong)1 << MSR_BE;
#endif
#if defined(CONFIG_USER_ONLY)
msr |= (target_ulong)1 << MSR_FP; /* Allow floating point usage */
msr |= (target_ulong)1 << MSR_PR;
#else
env->nip = env->hreset_vector | env->excp_prefix;
if (env->mmu_model != POWERPC_MMU_REAL)
ppc_tlb_invalidate_all(env);
#endif
env->msr = msr;
hreg_compute_hflags(env);
env->reserve = (target_ulong)-1ULL;
/* Be sure no exception or interrupt is pending */
env->pending_interrupts = 0;
env->exception_index = POWERPC_EXCP_NONE;
env->error_code = 0;
/* Flush all TLBs */
tlb_flush(env, 1);
}
| 25,990 |
qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | 0 | static void omap_sti_write(void *opaque, target_phys_addr_t addr,
uint64_t value, unsigned size)
{
struct omap_sti_s *s = (struct omap_sti_s *) opaque;
if (size != 4) {
return omap_badwidth_write32(opaque, addr, value);
}
switch (addr) {
case 0x00: /* STI_REVISION */
case 0x14: /* STI_SYSSTATUS / STI_RX_STATUS / XTI_SYSSTATUS */
OMAP_RO_REG(addr);
return;
case 0x10: /* STI_SYSCONFIG */
if (value & (1 << 1)) /* SOFTRESET */
omap_sti_reset(s);
s->sysconfig = value & 0xfe;
break;
case 0x18: /* STI_IRQSTATUS */
s->irqst &= ~value;
omap_sti_interrupt_update(s);
break;
case 0x1c: /* STI_IRQSETEN / STI_IRQCLREN */
s->irqen = value & 0xffff;
omap_sti_interrupt_update(s);
break;
case 0x2c: /* STI_CLK_CTRL / XTI_SCLKCRTL */
s->clkcontrol = value & 0xff;
break;
case 0x30: /* STI_SERIAL_CFG / XTI_SCONFIG */
s->serial_config = value & 0xff;
break;
case 0x24: /* STI_ER / STI_DR / XTI_TRACESELECT */
case 0x28: /* STI_RX_DR / XTI_RXDATA */
/* TODO */
return;
default:
OMAP_BAD_REG(addr);
return;
}
}
| 25,991 |
qemu | 880a7578381d1c7ed4d41c7599ae3cc06567a824 | 0 | static void gdb_chr_event(void *opaque, int event)
{
switch (event) {
case CHR_EVENT_RESET:
vm_stop(EXCP_INTERRUPT);
gdb_syscall_state = opaque;
gdb_has_xml = 0;
break;
default:
break;
}
}
| 25,992 |
qemu | e4c8f004c55d9da3eae3e14df740238bf805b5d6 | 0 | static void release_keys(void *opaque)
{
int keycode;
while (nb_pending_keycodes > 0) {
nb_pending_keycodes--;
keycode = keycodes[nb_pending_keycodes];
if (keycode & 0x80)
kbd_put_keycode(0xe0);
kbd_put_keycode(keycode | 0x80);
}
}
| 25,993 |
qemu | ef546f1275f6563e8934dd5e338d29d9f9909ca6 | 0 | static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
unsigned req_size, unsigned resp_size)
{
VirtIODevice *vdev = (VirtIODevice *) req->dev;
size_t in_size, out_size;
if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
&req->req, req_size) < req_size) {
return -EINVAL;
}
if (qemu_iovec_concat_iov(&req->resp_iov,
req->elem.in_sg, req->elem.in_num, 0,
resp_size) < resp_size) {
return -EINVAL;
}
req->resp_size = resp_size;
/* Old BIOSes left some padding by mistake after the req_size/resp_size.
* As a workaround, always consider the first buffer as the virtio-scsi
* request/response, making the payload start at the second element
* of the iovec.
*
* The actual length of the response header, stored in req->resp_size,
* does not change.
*
* TODO: always disable this workaround for virtio 1.0 devices.
*/
if ((vdev->guest_features & (1 << VIRTIO_F_ANY_LAYOUT)) == 0) {
req_size = req->elem.out_sg[0].iov_len;
resp_size = req->elem.in_sg[0].iov_len;
}
out_size = qemu_sgl_concat(req, req->elem.out_sg,
&req->elem.out_addr[0], req->elem.out_num,
req_size);
in_size = qemu_sgl_concat(req, req->elem.in_sg,
&req->elem.in_addr[0], req->elem.in_num,
resp_size);
if (out_size && in_size) {
return -ENOTSUP;
}
if (out_size) {
req->mode = SCSI_XFER_TO_DEV;
} else if (in_size) {
req->mode = SCSI_XFER_FROM_DEV;
}
return 0;
}
| 25,994 |
qemu | 8df1426e44176512be1b6456e90d100d1af907e1 | 0 | void pc_dimm_memory_unplug(DeviceState *dev, MemoryHotplugState *hpms,
MemoryRegion *mr)
{
PCDIMMDevice *dimm = PC_DIMM(dev);
numa_unset_mem_node_id(dimm->addr, memory_region_size(mr), dimm->node);
memory_region_del_subregion(&hpms->mr, mr);
vmstate_unregister_ram(mr, dev);
}
| 25,995 |
qemu | 73aaec4a39b3cf11082303a6cf6bcde8796c09c6 | 0 | static void kvm_handle_internal_error(CPUState *env, struct kvm_run *run)
{
if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
int i;
fprintf(stderr, "KVM internal error. Suberror: %d\n",
run->internal.suberror);
for (i = 0; i < run->internal.ndata; ++i) {
fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
i, (uint64_t)run->internal.data[i]);
}
}
cpu_dump_state(env, stderr, fprintf, 0);
if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
fprintf(stderr, "emulation failure\n");
if (!kvm_arch_stop_on_emulation_error(env)) {
return;
}
}
/* FIXME: Should trigger a qmp message to let management know
* something went wrong.
*/
vm_stop(0);
}
| 25,996 |
FFmpeg | c89e428ed8c2c31396af2d18cab4342b7d82958f | 0 | void ff_slice_buffer_init(slice_buffer *buf, int line_count,
int max_allocated_lines, int line_width,
IDWTELEM *base_buffer)
{
int i;
buf->base_buffer = base_buffer;
buf->line_count = line_count;
buf->line_width = line_width;
buf->data_count = max_allocated_lines;
buf->line = av_mallocz(sizeof(IDWTELEM *) * line_count);
buf->data_stack = av_malloc(sizeof(IDWTELEM *) * max_allocated_lines);
for (i = 0; i < max_allocated_lines; i++)
buf->data_stack[i] = av_malloc(sizeof(IDWTELEM) * line_width);
buf->data_stack_top = max_allocated_lines - 1;
}
| 25,998 |
qemu | 34975e536f3531ad852d724a46280b882ec1bc9d | 0 | static void xenfb_mouse_event(DeviceState *dev, QemuConsole *src,
InputEvent *evt)
{
struct XenInput *xenfb = (struct XenInput *)dev;
InputBtnEvent *btn;
InputMoveEvent *move;
QemuConsole *con;
DisplaySurface *surface;
int scale;
switch (evt->type) {
case INPUT_EVENT_KIND_BTN:
btn = evt->u.btn.data;
switch (btn->button) {
case INPUT_BUTTON_LEFT:
xenfb_send_key(xenfb, btn->down, BTN_LEFT);
break;
case INPUT_BUTTON_RIGHT:
xenfb_send_key(xenfb, btn->down, BTN_LEFT + 1);
break;
case INPUT_BUTTON_MIDDLE:
xenfb_send_key(xenfb, btn->down, BTN_LEFT + 2);
break;
case INPUT_BUTTON_WHEEL_UP:
if (btn->down) {
xenfb->wheel--;
}
break;
case INPUT_BUTTON_WHEEL_DOWN:
if (btn->down) {
xenfb->wheel++;
}
break;
default:
break;
}
break;
case INPUT_EVENT_KIND_ABS:
move = evt->u.abs.data;
con = qemu_console_lookup_by_index(0);
if (!con) {
xen_pv_printf(&xenfb->c.xendev, 0, "No QEMU console available");
return;
}
surface = qemu_console_surface(con);
switch (move->axis) {
case INPUT_AXIS_X:
scale = surface_width(surface) - 1;
break;
case INPUT_AXIS_Y:
scale = surface_height(surface) - 1;
break;
default:
scale = 0x8000;
break;
}
xenfb->axis[move->axis] = move->value * scale / 0x7fff;
break;
case INPUT_EVENT_KIND_REL:
move = evt->u.rel.data;
xenfb->axis[move->axis] += move->value;
break;
default:
break;
}
}
| 25,999 |
qemu | db39fcf1f690b02d612e2bfc00980700887abe03 | 0 | static CharDriverState *qemu_chr_open_socket_fd(int fd, bool do_nodelay,
bool is_listen, bool is_telnet,
bool is_waitconnect,
Error **errp)
{
CharDriverState *chr = NULL;
TCPCharDriver *s = NULL;
char host[NI_MAXHOST], serv[NI_MAXSERV];
const char *left = "", *right = "";
struct sockaddr_storage ss;
socklen_t ss_len = sizeof(ss);
memset(&ss, 0, ss_len);
if (getsockname(fd, (struct sockaddr *) &ss, &ss_len) != 0) {
error_setg_errno(errp, errno, "getsockname");
return NULL;
}
chr = g_malloc0(sizeof(CharDriverState));
s = g_malloc0(sizeof(TCPCharDriver));
s->connected = 0;
s->fd = -1;
s->listen_fd = -1;
s->read_msgfds = 0;
s->read_msgfds_num = 0;
s->write_msgfds = 0;
s->write_msgfds_num = 0;
chr->filename = g_malloc(256);
switch (ss.ss_family) {
#ifndef _WIN32
case AF_UNIX:
s->is_unix = 1;
snprintf(chr->filename, 256, "unix:%s%s",
((struct sockaddr_un *)(&ss))->sun_path,
is_listen ? ",server" : "");
break;
#endif
case AF_INET6:
left = "[";
right = "]";
/* fall through */
case AF_INET:
s->do_nodelay = do_nodelay;
getnameinfo((struct sockaddr *) &ss, ss_len, host, sizeof(host),
serv, sizeof(serv), NI_NUMERICHOST | NI_NUMERICSERV);
snprintf(chr->filename, 256, "%s:%s%s%s:%s%s",
is_telnet ? "telnet" : "tcp",
left, host, right, serv,
is_listen ? ",server" : "");
break;
}
chr->opaque = s;
chr->chr_write = tcp_chr_write;
chr->chr_sync_read = tcp_chr_sync_read;
chr->chr_close = tcp_chr_close;
chr->get_msgfds = tcp_get_msgfds;
chr->set_msgfds = tcp_set_msgfds;
chr->chr_add_client = tcp_chr_add_client;
chr->chr_add_watch = tcp_chr_add_watch;
chr->chr_update_read_handler = tcp_chr_update_read_handler;
/* be isn't opened until we get a connection */
chr->explicit_be_open = true;
if (is_listen) {
s->listen_fd = fd;
s->listen_chan = io_channel_from_socket(s->listen_fd);
s->listen_tag = g_io_add_watch(s->listen_chan, G_IO_IN, tcp_chr_accept, chr);
if (is_telnet) {
s->do_telnetopt = 1;
}
} else {
s->connected = 1;
s->fd = fd;
socket_set_nodelay(fd);
s->chan = io_channel_from_socket(s->fd);
tcp_chr_connect(chr);
}
if (is_listen && is_waitconnect) {
fprintf(stderr, "QEMU waiting for connection on: %s\n",
chr->filename);
tcp_chr_accept(s->listen_chan, G_IO_IN, chr);
qemu_set_nonblock(s->listen_fd);
}
return chr;
}
| 26,000 |
qemu | 8aaf42ed0f203da63860b0a3ab3ff2bdfe9b4cb0 | 0 | static void dhcp_decode(const struct bootp_t *bp, int *pmsg_type,
const struct in_addr **preq_addr)
{
const uint8_t *p, *p_end;
int len, tag;
*pmsg_type = 0;
*preq_addr = NULL;
p = bp->bp_vend;
p_end = p + DHCP_OPT_LEN;
if (memcmp(p, rfc1533_cookie, 4) != 0)
return;
p += 4;
while (p < p_end) {
tag = p[0];
if (tag == RFC1533_PAD) {
p++;
} else if (tag == RFC1533_END) {
break;
} else {
p++;
if (p >= p_end)
break;
len = *p++;
DPRINTF("dhcp: tag=%d len=%d\n", tag, len);
switch(tag) {
case RFC2132_MSG_TYPE:
if (len >= 1)
*pmsg_type = p[0];
break;
case RFC2132_REQ_ADDR:
if (len >= 4)
*preq_addr = (struct in_addr *)p;
break;
default:
break;
}
p += len;
}
}
if (*pmsg_type == DHCPREQUEST && !*preq_addr && bp->bp_ciaddr.s_addr) {
*preq_addr = &bp->bp_ciaddr;
}
}
| 26,001 |