project
stringclasses 2
values | commit_id
stringlengths 40
40
| target
int64 0
1
| func
stringlengths 26
142k
| idx
int64 0
27.3k
|
---|---|---|---|---|
qemu | 5dd7a535b71a0f2f8e7af75c5d694174359ce323 | 1 | iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
void *command_data, void *opaque)
{
struct IscsiTask *iTask = opaque;
struct scsi_task *task = command_data;
iTask->status = status;
iTask->do_retry = 0;
iTask->task = task;
if (status != SCSI_STATUS_GOOD) {
if (iTask->retries++ < ISCSI_CMD_RETRIES) {
if (status == SCSI_STATUS_CHECK_CONDITION
&& task->sense.key == SCSI_SENSE_UNIT_ATTENTION) {
error_report("iSCSI CheckCondition: %s",
iscsi_get_error(iscsi));
iTask->do_retry = 1;
goto out;
}
/* status 0x28 is SCSI_TASK_SET_FULL. It was first introduced
* in libiscsi 1.10.0. Hardcode this value here to avoid
* the need to bump the libiscsi requirement to 1.10.0 */
if (status == SCSI_STATUS_BUSY || status == 0x28) {
unsigned retry_time =
exp_random(iscsi_retry_times[iTask->retries - 1]);
error_report("iSCSI Busy/TaskSetFull (retry #%u in %u ms): %s",
iTask->retries, retry_time,
iscsi_get_error(iscsi));
aio_timer_init(iTask->iscsilun->aio_context,
&iTask->retry_timer, QEMU_CLOCK_REALTIME,
SCALE_MS, iscsi_retry_timer_expired, iTask);
timer_mod(&iTask->retry_timer,
qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + retry_time);
iTask->do_retry = 1;
return;
}
}
error_report("iSCSI Failure: %s", iscsi_get_error(iscsi));
} else {
iTask->iscsilun->force_next_flush |= iTask->force_next_flush;
}
out:
if (iTask->co) {
iTask->bh = aio_bh_new(iTask->iscsilun->aio_context,
iscsi_co_generic_bh_cb, iTask);
qemu_bh_schedule(iTask->bh);
} else {
iTask->complete = 1;
}
}
| 27,130 |
FFmpeg | 015da965a68bdb48819dc98317888fc84eced599 | 1 | int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
int *frame_size_ptr,
AVPacket *avpkt)
{
AVFrame frame = {0};
int ret, got_frame = 0;
if (avctx->get_buffer != avcodec_default_get_buffer) {
av_log(avctx, AV_LOG_ERROR, "Custom get_buffer() for use with"
"avcodec_decode_audio3() detected. Overriding with avcodec_default_get_buffer\n");
av_log(avctx, AV_LOG_ERROR, "Please port your application to "
"avcodec_decode_audio4()\n");
avctx->get_buffer = avcodec_default_get_buffer;
}
ret = avcodec_decode_audio4(avctx, &frame, &got_frame, avpkt);
if (ret >= 0 && got_frame) {
int ch, plane_size;
int planar = av_sample_fmt_is_planar(avctx->sample_fmt);
int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels,
frame.nb_samples,
avctx->sample_fmt, 1);
if (*frame_size_ptr < data_size) {
av_log(avctx, AV_LOG_ERROR, "output buffer size is too small for "
"the current frame (%d < %d)\n", *frame_size_ptr, data_size);
return AVERROR(EINVAL);
}
memcpy(samples, frame.extended_data[0], plane_size);
if (planar && avctx->channels > 1) {
uint8_t *out = ((uint8_t *)samples) + plane_size;
for (ch = 1; ch < avctx->channels; ch++) {
memcpy(out, frame.extended_data[ch], plane_size);
out += plane_size;
}
}
*frame_size_ptr = data_size;
} else {
*frame_size_ptr = 0;
}
return ret;
}
| 27,132 |
FFmpeg | 3cfa310c5de526dbc40d7b33eb6234cff29d8f8c | 1 | static int ape_read_header(AVFormatContext * s)
{
AVIOContext *pb = s->pb;
APEContext *ape = s->priv_data;
AVStream *st;
uint32_t tag;
int i;
int total_blocks, final_size = 0;
int64_t pts, file_size;
/* Skip any leading junk such as id3v2 tags */
ape->junklength = avio_tell(pb);
tag = avio_rl32(pb);
if (tag != MKTAG('M', 'A', 'C', ' '))
return AVERROR_INVALIDDATA;
ape->fileversion = avio_rl16(pb);
if (ape->fileversion < APE_MIN_VERSION || ape->fileversion > APE_MAX_VERSION) {
av_log(s, AV_LOG_ERROR, "Unsupported file version - %d.%02d\n",
ape->fileversion / 1000, (ape->fileversion % 1000) / 10);
return AVERROR_PATCHWELCOME;
}
if (ape->fileversion >= 3980) {
ape->padding1 = avio_rl16(pb);
ape->descriptorlength = avio_rl32(pb);
ape->headerlength = avio_rl32(pb);
ape->seektablelength = avio_rl32(pb);
ape->wavheaderlength = avio_rl32(pb);
ape->audiodatalength = avio_rl32(pb);
ape->audiodatalength_high = avio_rl32(pb);
ape->wavtaillength = avio_rl32(pb);
avio_read(pb, ape->md5, 16);
/* Skip any unknown bytes at the end of the descriptor.
This is for future compatibility */
if (ape->descriptorlength > 52)
avio_skip(pb, ape->descriptorlength - 52);
/* Read header data */
ape->compressiontype = avio_rl16(pb);
ape->formatflags = avio_rl16(pb);
ape->blocksperframe = avio_rl32(pb);
ape->finalframeblocks = avio_rl32(pb);
ape->totalframes = avio_rl32(pb);
ape->bps = avio_rl16(pb);
ape->channels = avio_rl16(pb);
ape->samplerate = avio_rl32(pb);
} else {
ape->descriptorlength = 0;
ape->headerlength = 32;
ape->compressiontype = avio_rl16(pb);
ape->formatflags = avio_rl16(pb);
ape->channels = avio_rl16(pb);
ape->samplerate = avio_rl32(pb);
ape->wavheaderlength = avio_rl32(pb);
ape->wavtaillength = avio_rl32(pb);
ape->totalframes = avio_rl32(pb);
ape->finalframeblocks = avio_rl32(pb);
if (ape->formatflags & MAC_FORMAT_FLAG_HAS_PEAK_LEVEL) {
avio_skip(pb, 4); /* Skip the peak level */
ape->headerlength += 4;
}
if (ape->formatflags & MAC_FORMAT_FLAG_HAS_SEEK_ELEMENTS) {
ape->seektablelength = avio_rl32(pb);
ape->headerlength += 4;
ape->seektablelength *= sizeof(int32_t);
} else
ape->seektablelength = ape->totalframes * sizeof(int32_t);
if (ape->formatflags & MAC_FORMAT_FLAG_8_BIT)
ape->bps = 8;
else if (ape->formatflags & MAC_FORMAT_FLAG_24_BIT)
ape->bps = 24;
else
ape->bps = 16;
if (ape->fileversion >= 3950)
ape->blocksperframe = 73728 * 4;
else if (ape->fileversion >= 3900 || (ape->fileversion >= 3800 && ape->compressiontype >= 4000))
ape->blocksperframe = 73728;
else
ape->blocksperframe = 9216;
/* Skip any stored wav header */
if (!(ape->formatflags & MAC_FORMAT_FLAG_CREATE_WAV_HEADER))
avio_skip(pb, ape->wavheaderlength);
}
if(!ape->totalframes){
av_log(s, AV_LOG_ERROR, "No frames in the file!\n");
return AVERROR(EINVAL);
}
if(ape->totalframes > UINT_MAX / sizeof(APEFrame)){
av_log(s, AV_LOG_ERROR, "Too many frames: %"PRIu32"\n",
ape->totalframes);
return AVERROR_INVALIDDATA;
}
if (ape->seektablelength / sizeof(*ape->seektable) < ape->totalframes) {
av_log(s, AV_LOG_ERROR,
"Number of seek entries is less than number of frames: %zu vs. %"PRIu32"\n",
ape->seektablelength / sizeof(*ape->seektable), ape->totalframes);
return AVERROR_INVALIDDATA;
}
ape->frames = av_malloc(ape->totalframes * sizeof(APEFrame));
if(!ape->frames)
return AVERROR(ENOMEM);
ape->firstframe = ape->junklength + ape->descriptorlength + ape->headerlength + ape->seektablelength + ape->wavheaderlength;
if (ape->fileversion < 3810)
ape->firstframe += ape->totalframes;
ape->currentframe = 0;
ape->totalsamples = ape->finalframeblocks;
if (ape->totalframes > 1)
ape->totalsamples += ape->blocksperframe * (ape->totalframes - 1);
if (ape->seektablelength > 0) {
ape->seektable = av_malloc(ape->seektablelength);
if (!ape->seektable)
return AVERROR(ENOMEM);
for (i = 0; i < ape->seektablelength / sizeof(uint32_t) && !pb->eof_reached; i++)
ape->seektable[i] = avio_rl32(pb);
if (ape->fileversion < 3810) {
ape->bittable = av_malloc(ape->totalframes);
if (!ape->bittable)
return AVERROR(ENOMEM);
for (i = 0; i < ape->totalframes && !pb->eof_reached; i++)
ape->bittable[i] = avio_r8(pb);
}
}
ape->frames[0].pos = ape->firstframe;
ape->frames[0].nblocks = ape->blocksperframe;
ape->frames[0].skip = 0;
for (i = 1; i < ape->totalframes; i++) {
ape->frames[i].pos = ape->seektable[i] + ape->junklength;
ape->frames[i].nblocks = ape->blocksperframe;
ape->frames[i - 1].size = ape->frames[i].pos - ape->frames[i - 1].pos;
ape->frames[i].skip = (ape->frames[i].pos - ape->frames[0].pos) & 3;
}
ape->frames[ape->totalframes - 1].nblocks = ape->finalframeblocks;
/* calculate final packet size from total file size, if available */
file_size = avio_size(pb);
if (file_size > 0) {
final_size = file_size - ape->frames[ape->totalframes - 1].pos -
ape->wavtaillength;
final_size -= final_size & 3;
}
if (file_size <= 0 || final_size <= 0)
final_size = ape->finalframeblocks * 8;
ape->frames[ape->totalframes - 1].size = final_size;
for (i = 0; i < ape->totalframes; i++) {
if(ape->frames[i].skip){
ape->frames[i].pos -= ape->frames[i].skip;
ape->frames[i].size += ape->frames[i].skip;
}
ape->frames[i].size = (ape->frames[i].size + 3) & ~3;
}
if (ape->fileversion < 3810) {
for (i = 0; i < ape->totalframes; i++) {
if (i < ape->totalframes - 1 && ape->bittable[i + 1])
ape->frames[i].size += 4;
ape->frames[i].skip <<= 3;
ape->frames[i].skip += ape->bittable[i];
}
}
ape_dumpinfo(s, ape);
av_log(s, AV_LOG_DEBUG, "Decoding file - v%d.%02d, compression level %"PRIu16"\n",
ape->fileversion / 1000, (ape->fileversion % 1000) / 10,
ape->compressiontype);
/* now we are ready: build format streams */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
total_blocks = (ape->totalframes == 0) ? 0 : ((ape->totalframes - 1) * ape->blocksperframe) + ape->finalframeblocks;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_APE;
st->codec->codec_tag = MKTAG('A', 'P', 'E', ' ');
st->codec->channels = ape->channels;
st->codec->sample_rate = ape->samplerate;
st->codec->bits_per_coded_sample = ape->bps;
st->nb_frames = ape->totalframes;
st->start_time = 0;
st->duration = total_blocks;
avpriv_set_pts_info(st, 64, 1, ape->samplerate);
if (ff_alloc_extradata(st->codec, APE_EXTRADATA_SIZE))
return AVERROR(ENOMEM);
AV_WL16(st->codec->extradata + 0, ape->fileversion);
AV_WL16(st->codec->extradata + 2, ape->compressiontype);
AV_WL16(st->codec->extradata + 4, ape->formatflags);
pts = 0;
for (i = 0; i < ape->totalframes; i++) {
ape->frames[i].pts = pts;
av_add_index_entry(st, ape->frames[i].pos, ape->frames[i].pts, 0, 0, AVINDEX_KEYFRAME);
pts += ape->blocksperframe;
}
/* try to read APE tags */
if (pb->seekable) {
ff_ape_parse_tag(s);
avio_seek(pb, 0, SEEK_SET);
}
return 0;
}
| 27,133 |
FFmpeg | 64ecb78b7179cab2dbdf835463104679dbb7c895 | 1 | int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile,
int level)
{
VDPAUHWContext *hwctx = avctx->hwaccel_context;
VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
VdpVideoSurfaceQueryCapabilities *surface_query_caps;
VdpDecoderQueryCapabilities *decoder_query_caps;
VdpDecoderCreate *create;
void *func;
VdpStatus status;
VdpBool supported;
uint32_t max_level, max_mb, max_width, max_height;
VdpChromaType type;
uint32_t width;
uint32_t height;
vdctx->width = UINT32_MAX;
vdctx->height = UINT32_MAX;
if (av_vdpau_get_surface_parameters(avctx, &type, &width, &height))
return AVERROR(ENOSYS);
if (hwctx) {
hwctx->reset = 0;
if (hwctx->context.decoder != VDP_INVALID_HANDLE) {
vdctx->decoder = hwctx->context.decoder;
vdctx->render = hwctx->context.render;
vdctx->device = VDP_INVALID_HANDLE;
return 0; /* Decoder created by user */
}
vdctx->device = hwctx->device;
vdctx->get_proc_address = hwctx->get_proc_address;
if (hwctx->flags & AV_HWACCEL_FLAG_IGNORE_LEVEL)
level = 0;
if (!(hwctx->flags & AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH) &&
type != VDP_CHROMA_TYPE_420)
return AVERROR(ENOSYS);
} else {
AVHWFramesContext *frames_ctx = NULL;
AVVDPAUDeviceContext *dev_ctx;
// We assume the hw_frames_ctx always survives until ff_vdpau_common_uninit
// is called. This holds true as the user is not allowed to touch
// hw_device_ctx, or hw_frames_ctx after get_format (and ff_get_format
// itself also uninits before unreffing hw_frames_ctx).
if (avctx->hw_frames_ctx) {
frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
} else if (avctx->hw_device_ctx) {
int ret;
avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx);
if (!avctx->hw_frames_ctx)
return AVERROR(ENOMEM);
frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
frames_ctx->format = AV_PIX_FMT_VDPAU;
frames_ctx->sw_format = avctx->sw_pix_fmt;
frames_ctx->width = avctx->coded_width;
frames_ctx->height = avctx->coded_height;
ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
if (ret < 0) {
av_buffer_unref(&avctx->hw_frames_ctx);
return ret;
}
}
if (!frames_ctx) {
av_log(avctx, AV_LOG_ERROR, "A hardware frames context is "
"required for VDPAU decoding.\n");
return AVERROR(EINVAL);
}
dev_ctx = frames_ctx->device_ctx->hwctx;
vdctx->device = dev_ctx->device;
vdctx->get_proc_address = dev_ctx->get_proc_address;
if (avctx->hwaccel_flags & AV_HWACCEL_FLAG_IGNORE_LEVEL)
level = 0;
}
if (level < 0)
return AVERROR(ENOTSUP);
status = vdctx->get_proc_address(vdctx->device,
VDP_FUNC_ID_GET_INFORMATION_STRING,
&func);
if (status != VDP_STATUS_OK)
return vdpau_error(status);
else
info = func;
status = info(&info_string);
if (status != VDP_STATUS_OK)
return vdpau_error(status);
if (avctx->codec_id == AV_CODEC_ID_HEVC && strncmp(info_string, "NVIDIA ", 7) == 0 &&
!(avctx->hwaccel_flags & AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH)) {
av_log(avctx, AV_LOG_VERBOSE, "HEVC with NVIDIA VDPAU drivers is buggy, skipping.\n");
return AVERROR(ENOTSUP);
}
status = vdctx->get_proc_address(vdctx->device,
VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES,
&func);
if (status != VDP_STATUS_OK)
return vdpau_error(status);
else
surface_query_caps = func;
status = surface_query_caps(vdctx->device, type, &supported,
&max_width, &max_height);
if (status != VDP_STATUS_OK)
return vdpau_error(status);
if (supported != VDP_TRUE ||
max_width < width || max_height < height)
return AVERROR(ENOTSUP);
status = vdctx->get_proc_address(vdctx->device,
VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES,
&func);
if (status != VDP_STATUS_OK)
return vdpau_error(status);
else
decoder_query_caps = func;
status = decoder_query_caps(vdctx->device, profile, &supported, &max_level,
&max_mb, &max_width, &max_height);
#ifdef VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE
if ((status != VDP_STATUS_OK || supported != VDP_TRUE) && profile == VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE) {
profile = VDP_DECODER_PROFILE_H264_MAIN;
status = decoder_query_caps(vdctx->device, profile, &supported,
&max_level, &max_mb,
&max_width, &max_height);
}
#endif
if (status != VDP_STATUS_OK)
return vdpau_error(status);
if (supported != VDP_TRUE || max_level < level ||
max_width < width || max_height < height)
return AVERROR(ENOTSUP);
status = vdctx->get_proc_address(vdctx->device, VDP_FUNC_ID_DECODER_CREATE,
&func);
if (status != VDP_STATUS_OK)
return vdpau_error(status);
else
create = func;
status = vdctx->get_proc_address(vdctx->device, VDP_FUNC_ID_DECODER_RENDER,
&func);
if (status != VDP_STATUS_OK)
return vdpau_error(status);
else
vdctx->render = func;
status = create(vdctx->device, profile, width, height, avctx->refs,
&vdctx->decoder);
if (status == VDP_STATUS_OK) {
vdctx->width = avctx->coded_width;
vdctx->height = avctx->coded_height;
}
return vdpau_error(status);
} | 27,134 |
FFmpeg | 4c439f6e3999ae534991ecde943e45b00c80b8d2 | 1 | static int idcin_probe(AVProbeData *p)
{
unsigned int number, sample_rate;
/*
* This is what you could call a "probabilistic" file check: id CIN
* files don't have a definite file signature. In lieu of such a marker,
* perform sanity checks on the 5 32-bit header fields:
* width, height: greater than 0, less than or equal to 1024
* audio sample rate: greater than or equal to 8000, less than or
* equal to 48000, or 0 for no audio
* audio sample width (bytes/sample): 0 for no audio, or 1 or 2
* audio channels: 0 for no audio, or 1 or 2
*/
/* check we have enough data to do all checks, otherwise the
0-padding may cause a wrong recognition */
if (p->buf_size < 20)
return 0;
/* check the video width */
number = AV_RL32(&p->buf[0]);
if ((number == 0) || (number > 1024))
return 0;
/* check the video height */
number = AV_RL32(&p->buf[4]);
if ((number == 0) || (number > 1024))
return 0;
/* check the audio sample rate */
sample_rate = AV_RL32(&p->buf[8]);
if (sample_rate && (sample_rate < 8000 || sample_rate > 48000))
return 0;
/* check the audio bytes/sample */
number = AV_RL32(&p->buf[12]);
if (number > 2 || sample_rate && !number)
return 0;
/* check the audio channels */
number = AV_RL32(&p->buf[16]);
if (number > 2 || sample_rate && !number)
return 0;
/* return half certainty since this check is a bit sketchy */
return AVPROBE_SCORE_EXTENSION;
}
| 27,136 |
FFmpeg | 9f0eaf792a8560a089643489403e549c30fb3170 | 0 | static inline int hpel_motion(MpegEncContext *s,
uint8_t *dest, uint8_t *src,
int src_x, int src_y,
op_pixels_func *pix_op,
int motion_x, int motion_y)
{
int dxy = 0;
int emu = 0;
src_x += motion_x >> 1;
src_y += motion_y >> 1;
/* WARNING: do no forget half pels */
src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu?
if (src_x != s->width)
dxy |= motion_x & 1;
src_y = av_clip(src_y, -16, s->height);
if (src_y != s->height)
dxy |= (motion_y & 1) << 1;
src += src_y * s->linesize + src_x;
if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 1) - 8, 0) ||
(unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y & 1) - 8, 0)) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
s->linesize, s->linesize,
9, 9,
src_x, src_y,
s->h_edge_pos, s->v_edge_pos);
src = s->edge_emu_buffer;
emu = 1;
}
pix_op[dxy](dest, src, s->linesize, 8);
return emu;
}
| 27,138 |
FFmpeg | 0058584580b87feb47898e60e4b80c7f425882ad | 0 | static inline void downmix_dualmono_to_mono(float *samples)
{
int i;
for (i = 0; i < 256; i++) {
samples[i] += samples[i + 256];
samples[i + 256] = 0;
}
}
| 27,139 |
qemu | 33e66b86d89040f0a9e99aa53deb74ce8936a649 | 1 | SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, DriveInfo *dinfo, int unit)
{
const char *driver;
DeviceState *dev;
driver = bdrv_is_sg(dinfo->bdrv) ? "scsi-generic" : "scsi-disk";
dev = qdev_create(&bus->qbus, driver);
qdev_prop_set_uint32(dev, "scsi-id", unit);
qdev_prop_set_drive(dev, "drive", dinfo);
qdev_init(dev);
return DO_UPCAST(SCSIDevice, qdev, dev);
}
| 27,140 |
FFmpeg | 6892d145a0c80249bd61ee7dd31ec851c5076bcd | 1 | static int film_read_close(AVFormatContext *s)
{
FilmDemuxContext *film = s->priv_data;
av_free(film->sample_table);
av_free(film->stereo_buffer);
return 0;
}
| 27,141 |
FFmpeg | 220b24c7c97dc033ceab1510549f66d0e7b52ef1 | 1 | static av_cold int libschroedinger_decode_init(AVCodecContext *avctx)
{
SchroDecoderParams *p_schro_params = avctx->priv_data;
/* First of all, initialize our supporting libraries. */
schro_init();
schro_debug_set_level(avctx->debug);
p_schro_params->decoder = schro_decoder_new();
schro_decoder_set_skip_ratio(p_schro_params->decoder, 1);
if (!p_schro_params->decoder)
return -1;
/* Initialize the decoded frame queue. */
ff_schro_queue_init(&p_schro_params->dec_frame_queue);
return 0;
}
| 27,143 |
FFmpeg | 7992bdbeb4ba72a9d28e72acc2b3bc0d198401ec | 1 | static void update_stream_timings(AVFormatContext *ic)
{
int64_t start_time, start_time1, start_time_text, end_time, end_time1;
int64_t duration, duration1, filesize;
int i;
AVStream *st;
AVProgram *p;
start_time = INT64_MAX;
start_time_text = INT64_MAX;
end_time = INT64_MIN;
duration = INT64_MIN;
for(i = 0;i < ic->nb_streams; i++) {
st = ic->streams[i];
if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
if (start_time1 < start_time_text)
start_time_text = start_time1;
} else
start_time = FFMIN(start_time, start_time1);
end_time1 = AV_NOPTS_VALUE;
if (st->duration != AV_NOPTS_VALUE) {
end_time1 = start_time1
+ av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
end_time = FFMAX(end_time, end_time1);
}
for(p = NULL; (p = av_find_program_from_stream(ic, p, i)); ){
if(p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1)
p->start_time = start_time1;
if(p->end_time < end_time1)
p->end_time = end_time1;
}
}
if (st->duration != AV_NOPTS_VALUE) {
duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
duration = FFMAX(duration, duration1);
}
}
if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
start_time = start_time_text;
else if(start_time > start_time_text)
av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
if (start_time != INT64_MAX) {
ic->start_time = start_time;
if (end_time != INT64_MIN) {
if (ic->nb_programs) {
for (i=0; i<ic->nb_programs; i++) {
p = ic->programs[i];
if(p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time)
duration = FFMAX(duration, p->end_time - p->start_time);
}
} else
duration = FFMAX(duration, end_time - start_time);
}
}
if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) {
ic->duration = duration;
}
if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
/* compute the bitrate */
ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
(double)ic->duration;
}
}
| 27,144 |
qemu | 36bcac16fdd6ecb75314db06171f54dcd400ab8c | 1 | static int sd_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
int ret, fd;
uint32_t vid = 0;
BDRVSheepdogState *s = bs->opaque;
char vdi[SD_MAX_VDI_LEN], tag[SD_MAX_VDI_TAG_LEN];
uint32_t snapid;
char *buf = NULL;
QemuOpts *opts;
Error *local_err = NULL;
const char *filename;
s->bs = bs;
s->aio_context = bdrv_get_aio_context(bs);
opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
qemu_opts_absorb_qdict(opts, options, &local_err);
if (local_err) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto err_no_fd;
}
filename = qemu_opt_get(opts, "filename");
QLIST_INIT(&s->inflight_aio_head);
QLIST_INIT(&s->failed_aio_head);
QLIST_INIT(&s->inflight_aiocb_head);
s->fd = -1;
memset(vdi, 0, sizeof(vdi));
memset(tag, 0, sizeof(tag));
if (strstr(filename, "://")) {
ret = sd_parse_uri(s, filename, vdi, &snapid, tag);
} else {
ret = parse_vdiname(s, filename, vdi, &snapid, tag);
}
if (ret < 0) {
error_setg(errp, "Can't parse filename");
goto err_no_fd;
}
s->fd = get_sheep_fd(s, errp);
if (s->fd < 0) {
ret = s->fd;
goto err_no_fd;
}
ret = find_vdi_name(s, vdi, snapid, tag, &vid, true, errp);
if (ret) {
goto err;
}
/*
* QEMU block layer emulates writethrough cache as 'writeback + flush', so
* we always set SD_FLAG_CMD_CACHE (writeback cache) as default.
*/
s->cache_flags = SD_FLAG_CMD_CACHE;
if (flags & BDRV_O_NOCACHE) {
s->cache_flags = SD_FLAG_CMD_DIRECT;
}
s->discard_supported = true;
if (snapid || tag[0] != '\0') {
DPRINTF("%" PRIx32 " snapshot inode was open.\n", vid);
s->is_snapshot = true;
}
fd = connect_to_sdog(s, errp);
if (fd < 0) {
ret = fd;
goto err;
}
buf = g_malloc(SD_INODE_SIZE);
ret = read_object(fd, s->bs, buf, vid_to_vdi_oid(vid),
0, SD_INODE_SIZE, 0, s->cache_flags);
closesocket(fd);
if (ret) {
error_setg(errp, "Can't read snapshot inode");
goto err;
}
memcpy(&s->inode, buf, sizeof(s->inode));
bs->total_sectors = s->inode.vdi_size / BDRV_SECTOR_SIZE;
pstrcpy(s->name, sizeof(s->name), vdi);
qemu_co_mutex_init(&s->lock);
qemu_co_queue_init(&s->overlapping_queue);
qemu_opts_del(opts);
g_free(buf);
return 0;
err:
aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd,
false, NULL, NULL, NULL, NULL);
closesocket(s->fd);
err_no_fd:
qemu_opts_del(opts);
g_free(buf);
return ret;
}
| 27,145 |
qemu | 6cec5487990bf3f1f22b3fcb871978255e92ae0d | 1 | static void pixel_format_message (VncState *vs) {
char pad[3] = { 0, 0, 0 };
vnc_write_u8(vs, vs->depth * 8); /* bits-per-pixel */
if (vs->depth == 4) vnc_write_u8(vs, 24); /* depth */
else vnc_write_u8(vs, vs->depth * 8); /* depth */
#ifdef WORDS_BIGENDIAN
vnc_write_u8(vs, 1); /* big-endian-flag */
#else
vnc_write_u8(vs, 0); /* big-endian-flag */
#endif
vnc_write_u8(vs, 1); /* true-color-flag */
if (vs->depth == 4) {
vnc_write_u16(vs, 0xFF); /* red-max */
vnc_write_u16(vs, 0xFF); /* green-max */
vnc_write_u16(vs, 0xFF); /* blue-max */
vnc_write_u8(vs, 16); /* red-shift */
vnc_write_u8(vs, 8); /* green-shift */
vnc_write_u8(vs, 0); /* blue-shift */
vs->send_hextile_tile = send_hextile_tile_32;
} else if (vs->depth == 2) {
vnc_write_u16(vs, 31); /* red-max */
vnc_write_u16(vs, 63); /* green-max */
vnc_write_u16(vs, 31); /* blue-max */
vnc_write_u8(vs, 11); /* red-shift */
vnc_write_u8(vs, 5); /* green-shift */
vnc_write_u8(vs, 0); /* blue-shift */
vs->send_hextile_tile = send_hextile_tile_16;
} else if (vs->depth == 1) {
/* XXX: change QEMU pixel 8 bit pixel format to match the VNC one ? */
vnc_write_u16(vs, 7); /* red-max */
vnc_write_u16(vs, 7); /* green-max */
vnc_write_u16(vs, 3); /* blue-max */
vnc_write_u8(vs, 5); /* red-shift */
vnc_write_u8(vs, 2); /* green-shift */
vnc_write_u8(vs, 0); /* blue-shift */
vs->send_hextile_tile = send_hextile_tile_8;
}
vs->client_red_max = vs->server_red_max;
vs->client_green_max = vs->server_green_max;
vs->client_blue_max = vs->server_blue_max;
vs->client_red_shift = vs->server_red_shift;
vs->client_green_shift = vs->server_green_shift;
vs->client_blue_shift = vs->server_blue_shift;
vs->pix_bpp = vs->depth * 8;
vs->write_pixels = vnc_write_pixels_copy;
vnc_write(vs, pad, 3); /* padding */
}
| 27,146 |
FFmpeg | 4d87001096ff1d4e3ee6f88f8caddbd8ccb2c816 | 1 | static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
{
VP56RangeCoder *c = &s->c;
int header_size, hscale, vscale, i, j, k, l, m, ret;
int width = s->avctx->width;
int height = s->avctx->height;
s->keyframe = !(buf[0] & 1);
s->profile = (buf[0]>>1) & 7;
s->invisible = !(buf[0] & 0x10);
header_size = AV_RL24(buf) >> 5;
buf += 3;
buf_size -= 3;
if (s->profile > 3)
av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
if (!s->profile)
memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
else // profile 1-3 use bilinear, 4+ aren't defined so whatever
memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_bilinear_pixels_tab, sizeof(s->put_pixels_tab));
if (header_size > buf_size - 7*s->keyframe) {
av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
return AVERROR_INVALIDDATA;
}
if (s->keyframe) {
if (AV_RL24(buf) != 0x2a019d) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid start code 0x%x\n", AV_RL24(buf));
return AVERROR_INVALIDDATA;
}
width = AV_RL16(buf+3) & 0x3fff;
height = AV_RL16(buf+5) & 0x3fff;
hscale = buf[4] >> 6;
vscale = buf[6] >> 6;
buf += 7;
buf_size -= 7;
if (hscale || vscale)
av_log_missing_feature(s->avctx, "Upscaling", 1);
s->update_golden = s->update_altref = VP56_FRAME_CURRENT;
for (i = 0; i < 4; i++)
for (j = 0; j < 16; j++)
memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
sizeof(s->prob->token[i][j]));
memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter, sizeof(s->prob->pred16x16));
memcpy(s->prob->pred8x8c , vp8_pred8x8c_prob_inter , sizeof(s->prob->pred8x8c));
memcpy(s->prob->mvc , vp8_mv_default_prob , sizeof(s->prob->mvc));
memset(&s->segmentation, 0, sizeof(s->segmentation));
}
if (!s->macroblocks_base || /* first frame */
width != s->avctx->width || height != s->avctx->height) {
if ((ret = update_dimensions(s, width, height)) < 0)
return ret;
}
ff_vp56_init_range_decoder(c, buf, header_size);
buf += header_size;
buf_size -= header_size;
if (s->keyframe) {
if (vp8_rac_get(c))
av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
vp8_rac_get(c); // whether we can skip clamping in dsp functions
}
if ((s->segmentation.enabled = vp8_rac_get(c)))
parse_segment_info(s);
else
s->segmentation.update_map = 0; // FIXME: move this to some init function?
s->filter.simple = vp8_rac_get(c);
s->filter.level = vp8_rac_get_uint(c, 6);
s->filter.sharpness = vp8_rac_get_uint(c, 3);
if ((s->lf_delta.enabled = vp8_rac_get(c)))
if (vp8_rac_get(c))
update_lf_deltas(s);
if (setup_partitions(s, buf, buf_size)) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
return AVERROR_INVALIDDATA;
}
get_quants(s);
if (!s->keyframe) {
update_refs(s);
s->sign_bias[VP56_FRAME_GOLDEN] = vp8_rac_get(c);
s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
}
// if we aren't saving this frame's probabilities for future frames,
// make a copy of the current probabilities
if (!(s->update_probabilities = vp8_rac_get(c)))
s->prob[1] = s->prob[0];
s->update_last = s->keyframe || vp8_rac_get(c);
for (i = 0; i < 4; i++)
for (j = 0; j < 8; j++)
for (k = 0; k < 3; k++)
for (l = 0; l < NUM_DCT_TOKENS-1; l++)
if (vp56_rac_get_prob_branchy(c, vp8_token_update_probs[i][j][k][l])) {
int prob = vp8_rac_get_uint(c, 8);
for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
}
if ((s->mbskip_enabled = vp8_rac_get(c)))
s->prob->mbskip = vp8_rac_get_uint(c, 8);
if (!s->keyframe) {
s->prob->intra = vp8_rac_get_uint(c, 8);
s->prob->last = vp8_rac_get_uint(c, 8);
s->prob->golden = vp8_rac_get_uint(c, 8);
if (vp8_rac_get(c))
for (i = 0; i < 4; i++)
s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
if (vp8_rac_get(c))
for (i = 0; i < 3; i++)
s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
// 17.2 MV probability update
for (i = 0; i < 2; i++)
for (j = 0; j < 19; j++)
if (vp56_rac_get_prob_branchy(c, vp8_mv_update_prob[i][j]))
s->prob->mvc[i][j] = vp8_rac_get_nn(c);
}
return 0;
}
| 27,147 |
qemu | 1c02e2a17104fe7fc11893125864dc0daf1e6d5b | 1 | static int load_refcount_block(BlockDriverState *bs,
int64_t refcount_block_offset)
{
BDRVQcowState *s = bs->opaque;
int ret;
if (cache_refcount_updates) {
ret = write_refcount_block(bs);
if (ret < 0) {
return ret;
}
}
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_LOAD);
ret = bdrv_pread(bs->file, refcount_block_offset, s->refcount_block_cache,
s->cluster_size);
if (ret < 0) {
return ret;
}
s->refcount_block_cache_offset = refcount_block_offset;
return 0;
} | 27,149 |
qemu | 0b8b8753e4d94901627b3e86431230f2319215c4 | 1 | void qemu_coroutine_enter(Coroutine *co, void *opaque)
{
Coroutine *self = qemu_coroutine_self();
CoroutineAction ret;
trace_qemu_coroutine_enter(self, co, opaque);
if (co->caller) {
fprintf(stderr, "Co-routine re-entered recursively\n");
abort();
}
co->caller = self;
co->entry_arg = opaque;
ret = qemu_coroutine_switch(self, co, COROUTINE_ENTER);
qemu_co_queue_run_restart(co);
switch (ret) {
case COROUTINE_YIELD:
return;
case COROUTINE_TERMINATE:
trace_qemu_coroutine_terminate(co);
coroutine_delete(co);
return;
default:
abort();
}
}
| 27,150 |
FFmpeg | 7444cf9a9c0b8b2bba8198af2823521c654a48f4 | 1 | static int imc_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
int ret, i;
IMCContext *q = avctx->priv_data;
LOCAL_ALIGNED_16(uint16_t, buf16, [IMC_BLOCK_SIZE / 2]);
if (buf_size < IMC_BLOCK_SIZE * avctx->channels) {
av_log(avctx, AV_LOG_ERROR, "frame too small!\n");
return AVERROR_INVALIDDATA;
}
/* get output buffer */
frame->nb_samples = COEFFS;
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret;
for (i = 0; i < avctx->channels; i++) {
q->out_samples = (float *)frame->extended_data[i];
q->bdsp.bswap16_buf(buf16, (const uint16_t *) buf, IMC_BLOCK_SIZE / 2);
init_get_bits(&q->gb, (const uint8_t*)buf16, IMC_BLOCK_SIZE * 8);
buf += IMC_BLOCK_SIZE;
if ((ret = imc_decode_block(avctx, q, i)) < 0)
return ret;
}
if (avctx->channels == 2) {
q->fdsp.butterflies_float((float *)frame->extended_data[0],
(float *)frame->extended_data[1], COEFFS);
}
*got_frame_ptr = 1;
return IMC_BLOCK_SIZE * avctx->channels;
}
| 27,152 |
FFmpeg | 073c2593c9f0aa4445a6fc1b9b24e6e52a8cc2c1 | 1 | int init_vlc(VLC *vlc, int nb_bits, int nb_codes,
const void *bits, int bits_wrap, int bits_size,
const void *codes, int codes_wrap, int codes_size)
{
vlc->bits = nb_bits;
vlc->table = NULL;
vlc->table_allocated = 0;
vlc->table_size = 0;
#ifdef DEBUG_VLC
printf("build table nb_codes=%d\n", nb_codes);
#endif
if (build_table(vlc, nb_bits, nb_codes,
bits, bits_wrap, bits_size,
codes, codes_wrap, codes_size,
0, 0) < 0) {
av_free(vlc->table);
return -1;
}
return 0;
}
| 27,153 |
FFmpeg | 31fdf3065daceb31e12fd26a367445676d761180 | 1 | static void filter(AVFilterContext *ctx)
{
IDETContext *idet = ctx->priv;
int y, i;
int64_t alpha[2]={0};
int64_t delta=0;
Type type, best_type;
int match = 0;
for (i = 0; i < idet->csp->nb_components; i++) {
int w = idet->cur->video->w;
int h = idet->cur->video->h;
int refs = idet->cur->linesize[i];
if (i && i<3) {
w >>= idet->csp->log2_chroma_w;
h >>= idet->csp->log2_chroma_h;
}
for (y = 2; y < h - 2; y++) {
uint8_t *prev = &idet->prev->data[i][y*refs];
uint8_t *cur = &idet->cur ->data[i][y*refs];
uint8_t *next = &idet->next->data[i][y*refs];
alpha[ y &1] += idet->filter_line(cur-refs, prev, cur+refs, w);
alpha[(y^1)&1] += idet->filter_line(cur-refs, next, cur+refs, w);
delta += idet->filter_line(cur-refs, cur, cur+refs, w);
}
}
if (alpha[0] / (float)alpha[1] > idet->interlace_threshold){
type = TFF;
}else if(alpha[1] / (float)alpha[0] > idet->interlace_threshold){
type = BFF;
}else if(alpha[1] / (float)delta > idet->progressive_threshold){
type = PROGRSSIVE;
}else{
type = UNDETERMINED;
}
memmove(idet->history+1, idet->history, HIST_SIZE-1);
idet->history[0] = type;
best_type = UNDETERMINED;
for(i=0; i<HIST_SIZE; i++){
if(idet->history[i] != UNDETERMINED){
if(best_type == UNDETERMINED)
best_type = idet->history[i];
if(idet->history[i] == best_type) {
match++;
}else{
match=0;
break;
}
}
}
if(idet->last_type == UNDETERMINED){
if(match ) idet->last_type = best_type;
}else{
if(match>2) idet->last_type = best_type;
}
if (idet->last_type == TFF){
idet->cur->video->top_field_first = 1;
idet->cur->video->interlaced = 1;
}else if(idet->last_type == BFF){
idet->cur->video->top_field_first = 0;
idet->cur->video->interlaced = 1;
}else if(idet->last_type == PROGRSSIVE){
idet->cur->video->interlaced = 0;
}
idet->prestat [ type] ++;
idet->poststat[idet->last_type] ++;
av_log(ctx, AV_LOG_DEBUG, "Single frame:%s, Multi frame:%s\n", type2str(type), type2str(idet->last_type));
}
| 27,154 |
qemu | 45bbbb466cf4a6280076ea5a51f67ef5bedee345 | 1 | static void div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
{
uint64_t q, r, a1, a0;
int i, qb;
a0 = *plow;
a1 = *phigh;
if (a1 == 0) {
q = a0 / b;
r = a0 % b;
*plow = q;
*phigh = r;
} else {
/* XXX: use a better algorithm */
for(i = 0; i < 64; i++) {
a1 = (a1 << 1) | (a0 >> 63);
if (a1 >= b) {
a1 -= b;
qb = 1;
} else {
qb = 0;
}
a0 = (a0 << 1) | qb;
}
#if defined(DEBUG_MULDIV)
printf("div: 0x%016llx%016llx / 0x%016llx: q=0x%016llx r=0x%016llx\n",
*phigh, *plow, b, a0, a1);
#endif
*plow = a0;
*phigh = a1;
}
}
| 27,155 |
FFmpeg | 01ecb7172b684f1c4b3e748f95c5a9a494ca36ec | 1 | static float get_band_cost_UPAIR12_mips(struct AACEncContext *s,
PutBitContext *pb, const float *in,
const float *scaled, int size, int scale_idx,
int cb, const float lambda, const float uplim,
int *bits)
{
const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
int i;
float cost = 0;
int qc1, qc2, qc3, qc4;
int curbits = 0;
uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1];
float *p_codes = (float *)ff_aac_codebook_vectors[cb-1];
for (i = 0; i < size; i += 4) {
const float *vec, *vec2;
int curidx, curidx2;
int sign1, count1, sign2, count2;
int *in_int = (int *)&in[i];
float *in_pos = (float *)&in[i];
float di0, di1, di2, di3;
int t0, t1, t2, t3, t4;
qc1 = scaled[i ] * Q34 + ROUND_STANDARD;
qc2 = scaled[i+1] * Q34 + ROUND_STANDARD;
qc3 = scaled[i+2] * Q34 + ROUND_STANDARD;
qc4 = scaled[i+3] * Q34 + ROUND_STANDARD;
__asm__ volatile (
".set push \n\t"
".set noreorder \n\t"
"ori %[t4], $zero, 12 \n\t"
"ori %[sign1], $zero, 0 \n\t"
"ori %[sign2], $zero, 0 \n\t"
"slt %[t0], %[t4], %[qc1] \n\t"
"slt %[t1], %[t4], %[qc2] \n\t"
"slt %[t2], %[t4], %[qc3] \n\t"
"slt %[t3], %[t4], %[qc4] \n\t"
"movn %[qc1], %[t4], %[t0] \n\t"
"movn %[qc2], %[t4], %[t1] \n\t"
"movn %[qc3], %[t4], %[t2] \n\t"
"movn %[qc4], %[t4], %[t3] \n\t"
"lw %[t0], 0(%[in_int]) \n\t"
"lw %[t1], 4(%[in_int]) \n\t"
"lw %[t2], 8(%[in_int]) \n\t"
"lw %[t3], 12(%[in_int]) \n\t"
"slt %[t0], %[t0], $zero \n\t"
"movn %[sign1], %[t0], %[qc1] \n\t"
"slt %[t2], %[t2], $zero \n\t"
"movn %[sign2], %[t2], %[qc3] \n\t"
"slt %[t1], %[t1], $zero \n\t"
"sll %[t0], %[sign1], 1 \n\t"
"or %[t0], %[t0], %[t1] \n\t"
"movn %[sign1], %[t0], %[qc2] \n\t"
"slt %[t3], %[t3], $zero \n\t"
"sll %[t0], %[sign2], 1 \n\t"
"or %[t0], %[t0], %[t3] \n\t"
"movn %[sign2], %[t0], %[qc4] \n\t"
"slt %[count1], $zero, %[qc1] \n\t"
"slt %[t1], $zero, %[qc2] \n\t"
"slt %[count2], $zero, %[qc3] \n\t"
"slt %[t2], $zero, %[qc4] \n\t"
"addu %[count1], %[count1], %[t1] \n\t"
"addu %[count2], %[count2], %[t2] \n\t"
".set pop \n\t"
: [qc1]"+r"(qc1), [qc2]"+r"(qc2),
[qc3]"+r"(qc3), [qc4]"+r"(qc4),
[sign1]"=&r"(sign1), [count1]"=&r"(count1),
[sign2]"=&r"(sign2), [count2]"=&r"(count2),
[t0]"=&r"(t0), [t1]"=&r"(t1), [t2]"=&r"(t2), [t3]"=&r"(t3),
[t4]"=&r"(t4)
: [in_int]"r"(in_int)
: "memory"
);
curidx = 13 * qc1;
curidx += qc2;
curidx2 = 13 * qc3;
curidx2 += qc4;
curbits += p_bits[curidx];
curbits += p_bits[curidx2];
curbits += upair12_sign_bits[curidx];
curbits += upair12_sign_bits[curidx2];
vec = &p_codes[curidx*2];
vec2 = &p_codes[curidx2*2];
__asm__ volatile (
".set push \n\t"
".set noreorder \n\t"
"lwc1 %[di0], 0(%[in_pos]) \n\t"
"lwc1 %[di1], 4(%[in_pos]) \n\t"
"lwc1 %[di2], 8(%[in_pos]) \n\t"
"lwc1 %[di3], 12(%[in_pos]) \n\t"
"abs.s %[di0], %[di0] \n\t"
"abs.s %[di1], %[di1] \n\t"
"abs.s %[di2], %[di2] \n\t"
"abs.s %[di3], %[di3] \n\t"
"lwc1 $f0, 0(%[vec]) \n\t"
"lwc1 $f1, 4(%[vec]) \n\t"
"lwc1 $f2, 0(%[vec2]) \n\t"
"lwc1 $f3, 4(%[vec2]) \n\t"
"nmsub.s %[di0], %[di0], $f0, %[IQ] \n\t"
"nmsub.s %[di1], %[di1], $f1, %[IQ] \n\t"
"nmsub.s %[di2], %[di2], $f2, %[IQ] \n\t"
"nmsub.s %[di3], %[di3], $f3, %[IQ] \n\t"
".set pop \n\t"
: [di0]"=&f"(di0), [di1]"=&f"(di1),
[di2]"=&f"(di2), [di3]"=&f"(di3)
: [in_pos]"r"(in_pos), [vec]"r"(vec),
[vec2]"r"(vec2), [IQ]"f"(IQ)
: "$f0", "$f1", "$f2", "$f3",
"memory"
);
cost += di0 * di0 + di1 * di1
+ di2 * di2 + di3 * di3;
}
if (bits)
*bits = curbits;
return cost * lambda + curbits;
}
| 27,156 |
FFmpeg | 56706ac0d5723cb549fec2602e798ab1bf6004cd | 1 | static int libopenjpeg_copy_unpacked16(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
{
int compno;
int x;
int y;
int width;
int height;
int *image_line;
int frame_index;
const int numcomps = image->numcomps;
uint16_t *frame_ptr;
for (compno = 0; compno < numcomps; ++compno) {
if (image->comps[compno].w > frame->linesize[compno]) {
av_log(avctx, AV_LOG_ERROR, "Error: frame's linesize is too small for the image\n");
return 0;
}
}
for (compno = 0; compno < numcomps; ++compno) {
width = avctx->width / image->comps[compno].dx;
height = avctx->height / image->comps[compno].dy;
frame_ptr = (uint16_t *)frame->data[compno];
for (y = 0; y < height; ++y) {
image_line = image->comps[compno].data + y * image->comps[compno].w;
frame_index = y * (frame->linesize[compno] / 2);
for (x = 0; x < width; ++x)
image_line[x] = frame_ptr[frame_index++];
for (; x < image->comps[compno].w; ++x) {
image_line[x] = image_line[x - 1];
}
}
for (; y < image->comps[compno].h; ++y) {
image_line = image->comps[compno].data + y * image->comps[compno].w;
for (x = 0; x < image->comps[compno].w; ++x) {
image_line[x] = image_line[x - image->comps[compno].w];
}
}
}
return 1;
}
| 27,157 |
FFmpeg | 7f526efd17973ec6d2204f7a47b6923e2be31363 | 1 | inline static void RENAME(hcscale)(uint16_t *dst, int dstWidth, uint8_t *src1, uint8_t *src2,
int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
int32_t *mmx2FilterPos)
{
if(srcFormat==IMGFMT_YUY2)
{
RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
src1= formatConvBuffer;
src2= formatConvBuffer+2048;
}
else if(srcFormat==IMGFMT_UYVY)
{
RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
src1= formatConvBuffer;
src2= formatConvBuffer+2048;
}
else if(srcFormat==IMGFMT_BGR32)
{
RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
src1= formatConvBuffer;
src2= formatConvBuffer+2048;
}
else if(srcFormat==IMGFMT_BGR24)
{
RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
src1= formatConvBuffer;
src2= formatConvBuffer+2048;
}
else if(srcFormat==IMGFMT_BGR16)
{
RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
src1= formatConvBuffer;
src2= formatConvBuffer+2048;
}
else if(srcFormat==IMGFMT_BGR15)
{
RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
src1= formatConvBuffer;
src2= formatConvBuffer+2048;
}
else if(srcFormat==IMGFMT_RGB32)
{
RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
src1= formatConvBuffer;
src2= formatConvBuffer+2048;
}
else if(srcFormat==IMGFMT_RGB24)
{
RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
src1= formatConvBuffer;
src2= formatConvBuffer+2048;
}
else if(isGray(srcFormat))
{
return;
}
#ifdef HAVE_MMX
// use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
#else
if(!(flags&SWS_FAST_BILINEAR))
#endif
{
RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
}
else // Fast Bilinear upscale / crap downscale
{
#if defined(ARCH_X86) || defined(ARCH_X86_64)
#ifdef HAVE_MMX2
int i;
if(canMMX2BeUsed)
{
asm volatile(
"pxor %%mm7, %%mm7 \n\t"
"mov %0, %%"REG_c" \n\t"
"mov %1, %%"REG_D" \n\t"
"mov %2, %%"REG_d" \n\t"
"mov %3, %%"REG_b" \n\t"
"xor %%"REG_a", %%"REG_a" \n\t" // i
PREFETCH" (%%"REG_c") \n\t"
PREFETCH" 32(%%"REG_c") \n\t"
PREFETCH" 64(%%"REG_c") \n\t"
#ifdef ARCH_X86_64
#define FUNNY_UV_CODE \
"movl (%%"REG_b"), %%esi \n\t"\
"call *%4 \n\t"\
"movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
"add %%"REG_S", %%"REG_c" \n\t"\
"add %%"REG_a", %%"REG_D" \n\t"\
"xor %%"REG_a", %%"REG_a" \n\t"\
#else
#define FUNNY_UV_CODE \
"movl (%%"REG_b"), %%esi \n\t"\
"call *%4 \n\t"\
"addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
"add %%"REG_a", %%"REG_D" \n\t"\
"xor %%"REG_a", %%"REG_a" \n\t"\
#endif
FUNNY_UV_CODE
FUNNY_UV_CODE
FUNNY_UV_CODE
FUNNY_UV_CODE
"xor %%"REG_a", %%"REG_a" \n\t" // i
"mov %5, %%"REG_c" \n\t" // src
"mov %1, %%"REG_D" \n\t" // buf1
"add $4096, %%"REG_D" \n\t"
PREFETCH" (%%"REG_c") \n\t"
PREFETCH" 32(%%"REG_c") \n\t"
PREFETCH" 64(%%"REG_c") \n\t"
FUNNY_UV_CODE
FUNNY_UV_CODE
FUNNY_UV_CODE
FUNNY_UV_CODE
:: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
"m" (funnyUVCode), "m" (src2)
: "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
);
for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
{
// printf("%d %d %d\n", dstWidth, i, srcW);
dst[i] = src1[srcW-1]*128;
dst[i+2048] = src2[srcW-1]*128;
}
}
else
{
#endif
long xInc_shr16 = (long) (xInc >> 16);
int xInc_mask = xInc & 0xffff;
asm volatile(
"xor %%"REG_a", %%"REG_a" \n\t" // i
"xor %%"REG_b", %%"REG_b" \n\t" // xx
"xorl %%ecx, %%ecx \n\t" // 2*xalpha
".balign 16 \n\t"
"1: \n\t"
"mov %0, %%"REG_S" \n\t"
"movzbl (%%"REG_S", %%"REG_b"), %%edi \n\t" //src[xx]
"movzbl 1(%%"REG_S", %%"REG_b"), %%esi \n\t" //src[xx+1]
"subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
"imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
"shll $16, %%edi \n\t"
"addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
"mov %1, %%"REG_D" \n\t"
"shrl $9, %%esi \n\t"
"movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
"movzbl (%5, %%"REG_b"), %%edi \n\t" //src[xx]
"movzbl 1(%5, %%"REG_b"), %%esi \n\t" //src[xx+1]
"subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
"imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
"shll $16, %%edi \n\t"
"addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
"mov %1, %%"REG_D" \n\t"
"shrl $9, %%esi \n\t"
"movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t"
"addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
"adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
"add $1, %%"REG_a" \n\t"
"cmp %2, %%"REG_a" \n\t"
" jb 1b \n\t"
/* GCC-3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
which is needed to support GCC-4.0 */
#if defined(ARCH_X86_64) && ((__GNUC__ > 3) || ( __GNUC__ == 3 && __GNUC_MINOR__ >= 4))
:: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
#else
:: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
#endif
"r" (src2)
: "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
);
#ifdef HAVE_MMX2
} //if MMX2 can't be used
#endif
#else
int i;
unsigned int xpos=0;
for(i=0;i<dstWidth;i++)
{
register unsigned int xx=xpos>>16;
register unsigned int xalpha=(xpos&0xFFFF)>>9;
dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
/* slower
dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
*/
xpos+=xInc;
}
#endif
}
}
| 27,158 |
FFmpeg | 243b9fea90aade8cf8197fb8f362ccc03c7f6295 | 1 | static int tak_read_header(AVFormatContext *s)
{
TAKDemuxContext *tc = s->priv_data;
AVIOContext *pb = s->pb;
GetBitContext gb;
AVStream *st;
uint8_t *buffer = NULL;
int ret;
st = avformat_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_TAK;
st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
tc->mlast_frame = 0;
if (avio_rl32(pb) != MKTAG('t', 'B', 'a', 'K')) {
avio_seek(pb, -4, SEEK_CUR);
return 0;
}
while (!url_feof(pb)) {
enum TAKMetaDataType type;
int size;
type = avio_r8(pb) & 0x7f;
size = avio_rl24(pb);
switch (type) {
case TAK_METADATA_STREAMINFO:
case TAK_METADATA_LAST_FRAME:
case TAK_METADATA_ENCODER:
if (size <= 3)
return AVERROR_INVALIDDATA;
buffer = av_malloc(size - 3 + FF_INPUT_BUFFER_PADDING_SIZE);
if (!buffer)
return AVERROR(ENOMEM);
ffio_init_checksum(pb, tak_check_crc, 0xCE04B7U);
if (avio_read(pb, buffer, size - 3) != size - 3) {
av_freep(&buffer);
return AVERROR(EIO);
}
if (ffio_get_checksum(s->pb) != avio_rb24(pb)) {
av_log(s, AV_LOG_ERROR, "%d metadata block CRC error.\n", type);
if (s->error_recognition & AV_EF_EXPLODE) {
av_freep(&buffer);
return AVERROR_INVALIDDATA;
}
}
init_get_bits8(&gb, buffer, size - 3);
break;
case TAK_METADATA_MD5: {
uint8_t md5[16];
int i;
if (size != 19)
return AVERROR_INVALIDDATA;
ffio_init_checksum(pb, tak_check_crc, 0xCE04B7U);
avio_read(pb, md5, 16);
if (ffio_get_checksum(s->pb) != avio_rb24(pb)) {
av_log(s, AV_LOG_ERROR, "MD5 metadata block CRC error.\n");
if (s->error_recognition & AV_EF_EXPLODE)
return AVERROR_INVALIDDATA;
}
av_log(s, AV_LOG_VERBOSE, "MD5=");
for (i = 0; i < 16; i++)
av_log(s, AV_LOG_VERBOSE, "%02x", md5[i]);
av_log(s, AV_LOG_VERBOSE, "\n");
break;
}
case TAK_METADATA_END: {
int64_t curpos = avio_tell(pb);
if (pb->seekable) {
ff_ape_parse_tag(s);
avio_seek(pb, curpos, SEEK_SET);
}
tc->data_end += curpos;
return 0;
}
default:
ret = avio_skip(pb, size);
if (ret < 0)
return ret;
}
if (type == TAK_METADATA_STREAMINFO) {
TAKStreamInfo ti;
avpriv_tak_parse_streaminfo(&gb, &ti);
if (ti.samples > 0)
st->duration = ti.samples;
st->codec->bits_per_coded_sample = ti.bps;
if (ti.ch_layout)
st->codec->channel_layout = ti.ch_layout;
st->codec->sample_rate = ti.sample_rate;
st->codec->channels = ti.channels;
st->start_time = 0;
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
st->codec->extradata = buffer;
st->codec->extradata_size = size - 3;
buffer = NULL;
} else if (type == TAK_METADATA_LAST_FRAME) {
if (size != 11)
return AVERROR_INVALIDDATA;
tc->mlast_frame = 1;
tc->data_end = get_bits64(&gb, TAK_LAST_FRAME_POS_BITS) +
get_bits(&gb, TAK_LAST_FRAME_SIZE_BITS);
av_freep(&buffer);
} else if (type == TAK_METADATA_ENCODER) {
av_log(s, AV_LOG_VERBOSE, "encoder version: %0X\n",
get_bits_long(&gb, TAK_ENCODER_VERSION_BITS));
av_freep(&buffer);
}
}
return AVERROR_EOF;
} | 27,159 |
qemu | ea375f9ab8c76686dca0af8cb4f87a4eb569cad3 | 1 | static void cpu_pre_save(void *opaque)
{
CPUState *env = opaque;
int i;
cpu_synchronize_state(env);
/* FPU */
env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
env->fptag_vmstate = 0;
for(i = 0; i < 8; i++) {
env->fptag_vmstate |= ((!env->fptags[i]) << i);
}
#ifdef USE_X86LDOUBLE
env->fpregs_format_vmstate = 0;
#else
env->fpregs_format_vmstate = 1;
#endif
}
| 27,160 |
qemu | d87576e38df760ef1cb635197d51f207e2a8eda9 | 1 | static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
GICv3CPUState *cs = icc_cs_from_env(env);
uint64_t value = 0;
int i;
for (i = 0; i < cs->num_list_regs; i++) {
uint64_t lr = cs->ich_lr_el2[i];
if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
((lr & ICH_LR_EL2_HW) == 1 || (lr & ICH_LR_EL2_EOI) == 0)) {
value |= (1 << i);
}
}
trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
return value;
}
| 27,161 |
qemu | 72f0d0bf51362011c4d841a89fb8f5cfb16e0bf3 | 1 | static int mp_user_removexattr(FsContext *ctx,
const char *path, const char *name)
{
char *buffer;
int ret;
if (strncmp(name, "user.virtfs.", 12) == 0) {
/*
* Don't allow fetch of user.virtfs namesapce
* in case of mapped security
*/
errno = EACCES;
return -1;
}
buffer = rpath(ctx, path);
ret = lremovexattr(buffer, name);
g_free(buffer);
return ret;
}
| 27,162 |
FFmpeg | 9d656110966fbdde0fd1d2e685f3ed3633ba3596 | 0 | static int decode_subframe_fixed(FLACContext *s, int channel, int pred_order)
{
int i;
av_log(s->avctx, AV_LOG_DEBUG, " SUBFRAME FIXED\n");
/* warm up samples */
av_log(s->avctx, AV_LOG_DEBUG, " warm up samples: %d\n", pred_order);
for (i = 0; i < pred_order; i++)
{
s->decoded[channel][i] = get_sbits(&s->gb, s->curr_bps);
// av_log(s->avctx, AV_LOG_DEBUG, " %d: %d\n", i, s->decoded[channel][i]);
}
if (decode_residuals(s, channel, pred_order) < 0)
return -1;
switch(pred_order)
{
case 0:
break;
case 1:
for (i = pred_order; i < s->blocksize; i++)
s->decoded[channel][i] += s->decoded[channel][i-1];
break;
case 2:
for (i = pred_order; i < s->blocksize; i++)
s->decoded[channel][i] += 2*s->decoded[channel][i-1]
- s->decoded[channel][i-2];
break;
case 3:
for (i = pred_order; i < s->blocksize; i++)
s->decoded[channel][i] += 3*s->decoded[channel][i-1]
- 3*s->decoded[channel][i-2]
+ s->decoded[channel][i-3];
break;
case 4:
for (i = pred_order; i < s->blocksize; i++)
s->decoded[channel][i] += 4*s->decoded[channel][i-1]
- 6*s->decoded[channel][i-2]
+ 4*s->decoded[channel][i-3]
- s->decoded[channel][i-4];
break;
default:
av_log(s->avctx, AV_LOG_ERROR, "illegal pred order %d\n", pred_order);
return -1;
}
return 0;
}
| 27,163 |
FFmpeg | 0634c5425306547e593bedbbbd2d982d7f0a27cf | 0 | static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
const AVFrame *frame, int *got_packet_ptr)
{
AACEncContext *s = avctx->priv_data;
float **samples = s->planar_samples, *samples2, *la, *overlap;
ChannelElement *cpe;
SingleChannelElement *sce;
IndividualChannelStream *ics;
int i, its, ch, w, chans, tag, start_ch, ret, frame_bits;
int target_bits, rate_bits, too_many_bits, too_few_bits;
int ms_mode = 0, is_mode = 0, tns_mode = 0, pred_mode = 0;
int chan_el_counter[4];
FFPsyWindowInfo windows[AAC_MAX_CHANNELS];
if (s->last_frame == 2)
return 0;
/* add current frame to queue */
if (frame) {
if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
return ret;
}
copy_input_samples(s, frame);
if (s->psypp)
ff_psy_preprocess(s->psypp, s->planar_samples, s->channels);
if (!avctx->frame_number)
return 0;
start_ch = 0;
for (i = 0; i < s->chan_map[0]; i++) {
FFPsyWindowInfo* wi = windows + start_ch;
tag = s->chan_map[i+1];
chans = tag == TYPE_CPE ? 2 : 1;
cpe = &s->cpe[i];
for (ch = 0; ch < chans; ch++) {
float clip_avoidance_factor;
sce = &cpe->ch[ch];
ics = &sce->ics;
s->cur_channel = start_ch + ch;
overlap = &samples[s->cur_channel][0];
samples2 = overlap + 1024;
la = samples2 + (448+64);
if (!frame)
la = NULL;
if (tag == TYPE_LFE) {
wi[ch].window_type[0] = ONLY_LONG_SEQUENCE;
wi[ch].window_shape = 0;
wi[ch].num_windows = 1;
wi[ch].grouping[0] = 1;
/* Only the lowest 12 coefficients are used in a LFE channel.
* The expression below results in only the bottom 8 coefficients
* being used for 11.025kHz to 16kHz sample rates.
*/
ics->num_swb = s->samplerate_index >= 8 ? 1 : 3;
} else {
wi[ch] = s->psy.model->window(&s->psy, samples2, la, s->cur_channel,
ics->window_sequence[0]);
}
ics->window_sequence[1] = ics->window_sequence[0];
ics->window_sequence[0] = wi[ch].window_type[0];
ics->use_kb_window[1] = ics->use_kb_window[0];
ics->use_kb_window[0] = wi[ch].window_shape;
ics->num_windows = wi[ch].num_windows;
ics->swb_sizes = s->psy.bands [ics->num_windows == 8];
ics->num_swb = tag == TYPE_LFE ? ics->num_swb : s->psy.num_bands[ics->num_windows == 8];
ics->max_sfb = FFMIN(ics->max_sfb, ics->num_swb);
ics->swb_offset = wi[ch].window_type[0] == EIGHT_SHORT_SEQUENCE ?
ff_swb_offset_128 [s->samplerate_index]:
ff_swb_offset_1024[s->samplerate_index];
ics->tns_max_bands = wi[ch].window_type[0] == EIGHT_SHORT_SEQUENCE ?
ff_tns_max_bands_128 [s->samplerate_index]:
ff_tns_max_bands_1024[s->samplerate_index];
clip_avoidance_factor = 0.0f;
for (w = 0; w < ics->num_windows; w++)
ics->group_len[w] = wi[ch].grouping[w];
for (w = 0; w < ics->num_windows; w++) {
if (wi[ch].clipping[w] > CLIP_AVOIDANCE_FACTOR) {
ics->window_clipping[w] = 1;
clip_avoidance_factor = FFMAX(clip_avoidance_factor, wi[ch].clipping[w]);
} else {
ics->window_clipping[w] = 0;
}
}
if (clip_avoidance_factor > CLIP_AVOIDANCE_FACTOR) {
ics->clip_avoidance_factor = CLIP_AVOIDANCE_FACTOR / clip_avoidance_factor;
} else {
ics->clip_avoidance_factor = 1.0f;
}
apply_window_and_mdct(s, sce, overlap);
if (s->options.ltp && s->coder->update_ltp) {
s->coder->update_ltp(s, sce);
apply_window[sce->ics.window_sequence[0]](s->fdsp, sce, &sce->ltp_state[0]);
s->mdct1024.mdct_calc(&s->mdct1024, sce->lcoeffs, sce->ret_buf);
}
if (isnan(cpe->ch->coeffs[0])) {
av_log(avctx, AV_LOG_ERROR, "Input contains NaN\n");
return AVERROR(EINVAL);
}
avoid_clipping(s, sce);
}
start_ch += chans;
}
if ((ret = ff_alloc_packet2(avctx, avpkt, 8192 * s->channels, 0)) < 0)
return ret;
frame_bits = its = 0;
do {
init_put_bits(&s->pb, avpkt->data, avpkt->size);
if ((avctx->frame_number & 0xFF)==1 && !(avctx->flags & AV_CODEC_FLAG_BITEXACT))
put_bitstream_info(s, LIBAVCODEC_IDENT);
start_ch = 0;
target_bits = 0;
memset(chan_el_counter, 0, sizeof(chan_el_counter));
for (i = 0; i < s->chan_map[0]; i++) {
FFPsyWindowInfo* wi = windows + start_ch;
const float *coeffs[2];
tag = s->chan_map[i+1];
chans = tag == TYPE_CPE ? 2 : 1;
cpe = &s->cpe[i];
cpe->common_window = 0;
memset(cpe->is_mask, 0, sizeof(cpe->is_mask));
memset(cpe->ms_mask, 0, sizeof(cpe->ms_mask));
put_bits(&s->pb, 3, tag);
put_bits(&s->pb, 4, chan_el_counter[tag]++);
for (ch = 0; ch < chans; ch++) {
sce = &cpe->ch[ch];
coeffs[ch] = sce->coeffs;
sce->ics.predictor_present = 0;
sce->ics.ltp.present = 0;
memset(sce->ics.ltp.used, 0, sizeof(sce->ics.ltp.used));
memset(sce->ics.prediction_used, 0, sizeof(sce->ics.prediction_used));
memset(&sce->tns, 0, sizeof(TemporalNoiseShaping));
for (w = 0; w < 128; w++)
if (sce->band_type[w] > RESERVED_BT)
sce->band_type[w] = 0;
}
s->psy.bitres.alloc = -1;
s->psy.bitres.bits = s->last_frame_pb_count / s->channels;
s->psy.model->analyze(&s->psy, start_ch, coeffs, wi);
if (s->psy.bitres.alloc > 0) {
/* Lambda unused here on purpose, we need to take psy's unscaled allocation */
target_bits += s->psy.bitres.alloc
* (s->lambda / (avctx->global_quality ? avctx->global_quality : 120));
s->psy.bitres.alloc /= chans;
}
s->cur_type = tag;
for (ch = 0; ch < chans; ch++) {
s->cur_channel = start_ch + ch;
if (s->options.pns && s->coder->mark_pns)
s->coder->mark_pns(s, avctx, &cpe->ch[ch]);
s->coder->search_for_quantizers(avctx, s, &cpe->ch[ch], s->lambda);
}
if (chans > 1
&& wi[0].window_type[0] == wi[1].window_type[0]
&& wi[0].window_shape == wi[1].window_shape) {
cpe->common_window = 1;
for (w = 0; w < wi[0].num_windows; w++) {
if (wi[0].grouping[w] != wi[1].grouping[w]) {
cpe->common_window = 0;
break;
}
}
}
for (ch = 0; ch < chans; ch++) { /* TNS and PNS */
sce = &cpe->ch[ch];
s->cur_channel = start_ch + ch;
if (s->options.tns && s->coder->search_for_tns)
s->coder->search_for_tns(s, sce);
if (s->options.tns && s->coder->apply_tns_filt)
s->coder->apply_tns_filt(s, sce);
if (sce->tns.present)
tns_mode = 1;
if (s->options.pns && s->coder->search_for_pns)
s->coder->search_for_pns(s, avctx, sce);
}
s->cur_channel = start_ch;
if (s->options.intensity_stereo) { /* Intensity Stereo */
if (s->coder->search_for_is)
s->coder->search_for_is(s, avctx, cpe);
if (cpe->is_mode) is_mode = 1;
apply_intensity_stereo(cpe);
}
if (s->options.pred) { /* Prediction */
for (ch = 0; ch < chans; ch++) {
sce = &cpe->ch[ch];
s->cur_channel = start_ch + ch;
if (s->options.pred && s->coder->search_for_pred)
s->coder->search_for_pred(s, sce);
if (cpe->ch[ch].ics.predictor_present) pred_mode = 1;
}
if (s->coder->adjust_common_pred)
s->coder->adjust_common_pred(s, cpe);
for (ch = 0; ch < chans; ch++) {
sce = &cpe->ch[ch];
s->cur_channel = start_ch + ch;
if (s->options.pred && s->coder->apply_main_pred)
s->coder->apply_main_pred(s, sce);
}
s->cur_channel = start_ch;
}
if (s->options.mid_side) { /* Mid/Side stereo */
if (s->options.mid_side == -1 && s->coder->search_for_ms)
s->coder->search_for_ms(s, cpe);
else if (cpe->common_window)
memset(cpe->ms_mask, 1, sizeof(cpe->ms_mask));
apply_mid_side_stereo(cpe);
}
adjust_frame_information(cpe, chans);
if (s->options.ltp) { /* LTP */
for (ch = 0; ch < chans; ch++) {
sce = &cpe->ch[ch];
s->cur_channel = start_ch + ch;
if (s->coder->search_for_ltp)
s->coder->search_for_ltp(s, sce, cpe->common_window);
if (sce->ics.ltp.present) pred_mode = 1;
}
s->cur_channel = start_ch;
if (s->coder->adjust_common_ltp)
s->coder->adjust_common_ltp(s, cpe);
}
if (chans == 2) {
put_bits(&s->pb, 1, cpe->common_window);
if (cpe->common_window) {
put_ics_info(s, &cpe->ch[0].ics);
if (s->coder->encode_main_pred)
s->coder->encode_main_pred(s, &cpe->ch[0]);
if (s->coder->encode_ltp_info)
s->coder->encode_ltp_info(s, &cpe->ch[0], 1);
encode_ms_info(&s->pb, cpe);
if (cpe->ms_mode) ms_mode = 1;
}
}
for (ch = 0; ch < chans; ch++) {
s->cur_channel = start_ch + ch;
encode_individual_channel(avctx, s, &cpe->ch[ch], cpe->common_window);
}
start_ch += chans;
}
if (avctx->flags & CODEC_FLAG_QSCALE) {
/* When using a constant Q-scale, don't mess with lambda */
break;
}
/* rate control stuff
* allow between the nominal bitrate, and what psy's bit reservoir says to target
* but drift towards the nominal bitrate always
*/
frame_bits = put_bits_count(&s->pb);
rate_bits = avctx->bit_rate * 1024 / avctx->sample_rate;
rate_bits = FFMIN(rate_bits, 6144 * s->channels - 3);
too_many_bits = FFMAX(target_bits, rate_bits);
too_many_bits = FFMIN(too_many_bits, 6144 * s->channels - 3);
too_few_bits = FFMIN(FFMAX(rate_bits - rate_bits/4, target_bits), too_many_bits);
/* When using ABR, be strict (but only for increasing) */
too_few_bits = too_few_bits - too_few_bits/8;
too_many_bits = too_many_bits + too_many_bits/2;
if ( its == 0 /* for steady-state Q-scale tracking */
|| (its < 5 && (frame_bits < too_few_bits || frame_bits > too_many_bits))
|| frame_bits >= 6144 * s->channels - 3 )
{
float ratio = ((float)rate_bits) / frame_bits;
if (frame_bits >= too_few_bits && frame_bits <= too_many_bits) {
/*
* This path is for steady-state Q-scale tracking
* When frame bits fall within the stable range, we still need to adjust
* lambda to maintain it like so in a stable fashion (large jumps in lambda
* create artifacts and should be avoided), but slowly
*/
ratio = sqrtf(sqrtf(ratio));
ratio = av_clipf(ratio, 0.9f, 1.1f);
} else {
/* Not so fast though */
ratio = sqrtf(ratio);
}
s->lambda = FFMIN(s->lambda * ratio, 65536.f);
/* Keep iterating if we must reduce and lambda is in the sky */
if (ratio > 0.9f && ratio < 1.1f) {
break;
} else {
if (is_mode || ms_mode || tns_mode || pred_mode) {
for (i = 0; i < s->chan_map[0]; i++) {
// Must restore coeffs
chans = tag == TYPE_CPE ? 2 : 1;
cpe = &s->cpe[i];
for (ch = 0; ch < chans; ch++)
memcpy(cpe->ch[ch].coeffs, cpe->ch[ch].pcoeffs, sizeof(cpe->ch[ch].coeffs));
}
}
its++;
}
} else {
break;
}
} while (1);
if (s->options.ltp && s->coder->ltp_insert_new_frame)
s->coder->ltp_insert_new_frame(s);
put_bits(&s->pb, 3, TYPE_END);
flush_put_bits(&s->pb);
s->last_frame_pb_count = put_bits_count(&s->pb);
s->lambda_sum += s->lambda;
s->lambda_count++;
if (!frame)
s->last_frame++;
ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts,
&avpkt->duration);
avpkt->size = put_bits_count(&s->pb) >> 3;
*got_packet_ptr = 1;
return 0;
}
| 27,165 |
qemu | 5f5a1318653c08e435cfa52f60b6a712815b659d | 1 | void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
uint32_t val = data;
if (addr > (vdev->config_len - sizeof(val)))
return;
stl_p(vdev->config + addr, val);
if (k->set_config) {
k->set_config(vdev, vdev->config);
}
}
| 27,166 |
qemu | b20e61e0d52eef57cf5db55087b16e0b5207e730 | 1 | int bdrv_open_image(BlockDriverState **pbs, const char *filename,
QDict *options, const char *bdref_key, int flags,
bool allow_none, Error **errp)
{
QDict *image_options;
int ret;
char *bdref_key_dot;
const char *reference;
assert(pbs);
assert(*pbs == NULL);
bdref_key_dot = g_strdup_printf("%s.", bdref_key);
qdict_extract_subqdict(options, &image_options, bdref_key_dot);
g_free(bdref_key_dot);
reference = qdict_get_try_str(options, bdref_key);
if (!filename && !reference && !qdict_size(image_options)) {
if (allow_none) {
ret = 0;
} else {
error_setg(errp, "A block device must be specified for \"%s\"",
bdref_key);
ret = -EINVAL;
}
goto done;
}
ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp);
done:
qdict_del(options, bdref_key);
return ret;
} | 27,167 |
qemu | 4f4321c11ff6e98583846bfd6f0e81954924b003 | 1 | static void musb_packet(MUSBState *s, MUSBEndPoint *ep,
int epnum, int pid, int len, USBCallback cb, int dir)
{
int ret;
int idx = epnum && dir;
int ttype;
/* ep->type[0,1] contains:
* in bits 7:6 the speed (0 - invalid, 1 - high, 2 - full, 3 - slow)
* in bits 5:4 the transfer type (BULK / INT)
* in bits 3:0 the EP num
*/
ttype = epnum ? (ep->type[idx] >> 4) & 3 : 0;
ep->timeout[dir] = musb_timeout(ttype,
ep->type[idx] >> 6, ep->interval[idx]);
ep->interrupt[dir] = ttype == USB_ENDPOINT_XFER_INT;
ep->delayed_cb[dir] = cb;
ep->packey[dir].p.pid = pid;
/* A wild guess on the FADDR semantics... */
ep->packey[dir].p.devaddr = ep->faddr[idx];
ep->packey[dir].p.devep = ep->type[idx] & 0xf;
ep->packey[dir].p.data = (void *) ep->buf[idx];
ep->packey[dir].p.len = len;
ep->packey[dir].ep = ep;
ep->packey[dir].dir = dir;
if (s->port.dev)
ret = usb_handle_packet(s->port.dev, &ep->packey[dir].p);
else
ret = USB_RET_NODEV;
if (ret == USB_RET_ASYNC) {
ep->status[dir] = len;
return;
}
ep->status[dir] = ret;
musb_schedule_cb(&s->port, &ep->packey[dir].p);
}
| 27,168 |
qemu | 0b5538c300a56c3cfb33022840fe0b4968147e7a | 1 | bool st_set_trace_file(const char *file)
{
st_set_trace_file_enabled(false);
free(trace_file_name);
if (!file) {
if (asprintf(&trace_file_name, CONFIG_TRACE_FILE, getpid()) < 0) {
trace_file_name = NULL;
return false;
}
} else {
if (asprintf(&trace_file_name, "%s", file) < 0) {
trace_file_name = NULL;
return false;
}
}
st_set_trace_file_enabled(true);
return true;
}
| 27,170 |
qemu | a01672d3968cf91208666d371784110bfde9d4f8 | 1 | static int kvm_client_migration_log(struct CPUPhysMemoryClient *client,
int enable)
{
return kvm_set_migration_log(enable);
}
| 27,172 |
FFmpeg | 64b164f44abc232dbb125b36e2d00b54e1531ba7 | 1 | static AVFilterContext *parse_filter(const char **buf, AVFilterGraph *graph,
int index, AVClass *log_ctx)
{
char *opts = NULL;
char *name = consume_string(buf);
if(**buf == '=') {
(*buf)++;
opts = consume_string(buf);
}
return create_filter(graph, index, name, opts, log_ctx);
}
| 27,173 |
FFmpeg | dec2fa8cc7089605d1d934d65dd2709cfe8aece2 | 1 | static AVCodec *AVCodecInitialize(enum AVCodecID codec_id)
{
AVCodec *res;
avcodec_register_all();
av_log_set_level(AV_LOG_PANIC);
res = avcodec_find_decoder(codec_id);
if (!res)
error("Failed to find decoder");
return res;
}
| 27,174 |
qemu | 0ec7b534821c8b287317f97cd98ee3f908bd3839 | 0 | static int poll_rest(gboolean poll_msgs, HANDLE *handles, gint nhandles,
GPollFD *fds, guint nfds, gint timeout)
{
DWORD ready;
GPollFD *f;
int recursed_result;
if (poll_msgs) {
/* Wait for either messages or handles
* -> Use MsgWaitForMultipleObjectsEx
*/
ready = MsgWaitForMultipleObjectsEx(nhandles, handles, timeout,
QS_ALLINPUT, MWMO_ALERTABLE);
if (ready == WAIT_FAILED) {
gchar *emsg = g_win32_error_message(GetLastError());
g_warning("MsgWaitForMultipleObjectsEx failed: %s", emsg);
g_free(emsg);
}
} else if (nhandles == 0) {
/* No handles to wait for, just the timeout */
if (timeout == INFINITE) {
ready = WAIT_FAILED;
} else {
SleepEx(timeout, TRUE);
ready = WAIT_TIMEOUT;
}
} else {
/* Wait for just handles
* -> Use WaitForMultipleObjectsEx
*/
ready =
WaitForMultipleObjectsEx(nhandles, handles, FALSE, timeout, TRUE);
if (ready == WAIT_FAILED) {
gchar *emsg = g_win32_error_message(GetLastError());
g_warning("WaitForMultipleObjectsEx failed: %s", emsg);
g_free(emsg);
}
}
if (ready == WAIT_FAILED) {
return -1;
} else if (ready == WAIT_TIMEOUT || ready == WAIT_IO_COMPLETION) {
return 0;
} else if (poll_msgs && ready == WAIT_OBJECT_0 + nhandles) {
for (f = fds; f < &fds[nfds]; ++f) {
if (f->fd == G_WIN32_MSG_HANDLE && f->events & G_IO_IN) {
f->revents |= G_IO_IN;
}
}
/* If we have a timeout, or no handles to poll, be satisfied
* with just noticing we have messages waiting.
*/
if (timeout != 0 || nhandles == 0) {
return 1;
}
/* If no timeout and handles to poll, recurse to poll them,
* too.
*/
recursed_result = poll_rest(FALSE, handles, nhandles, fds, nfds, 0);
return (recursed_result == -1) ? -1 : 1 + recursed_result;
} else if (/* QEMU: removed the following unneeded statement which causes
* a compiler warning: ready >= WAIT_OBJECT_0 && */
ready < WAIT_OBJECT_0 + nhandles) {
for (f = fds; f < &fds[nfds]; ++f) {
if ((HANDLE) f->fd == handles[ready - WAIT_OBJECT_0]) {
f->revents = f->events;
}
}
/* If no timeout and polling several handles, recurse to poll
* the rest of them.
*/
if (timeout == 0 && nhandles > 1) {
/* Remove the handle that fired */
int i;
if (ready < nhandles - 1) {
for (i = ready - WAIT_OBJECT_0 + 1; i < nhandles; i++) {
handles[i-1] = handles[i];
}
}
nhandles--;
recursed_result = poll_rest(FALSE, handles, nhandles, fds, nfds, 0);
return (recursed_result == -1) ? -1 : 1 + recursed_result;
}
return 1;
}
return 0;
}
| 27,175 |
qemu | 7a39fe588251ba042c91bf23d53b0ba820bf964c | 0 | int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
{
/* Inject NMI */
if (env->interrupt_request & CPU_INTERRUPT_NMI) {
env->interrupt_request &= ~CPU_INTERRUPT_NMI;
DPRINTF("injected NMI\n");
kvm_vcpu_ioctl(env, KVM_NMI);
}
if (!kvm_irqchip_in_kernel()) {
/* Force the VCPU out of its inner loop to process the INIT request */
if (env->interrupt_request & CPU_INTERRUPT_INIT) {
env->exit_request = 1;
}
/* Try to inject an interrupt if the guest can accept it */
if (run->ready_for_interrupt_injection &&
(env->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) {
int irq;
env->interrupt_request &= ~CPU_INTERRUPT_HARD;
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
struct kvm_interrupt intr;
intr.irq = irq;
/* FIXME: errors */
DPRINTF("injected interrupt %d\n", irq);
kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
}
}
/* If we have an interrupt but the guest is not ready to receive an
* interrupt, request an interrupt window exit. This will
* cause a return to userspace as soon as the guest is ready to
* receive interrupts. */
if ((env->interrupt_request & CPU_INTERRUPT_HARD)) {
run->request_interrupt_window = 1;
} else {
run->request_interrupt_window = 0;
}
DPRINTF("setting tpr\n");
run->cr8 = cpu_get_apic_tpr(env->apic_state);
}
return 0;
}
| 27,176 |
qemu | dcb2b9e1003a9179650b44c747faa4e5767ce92b | 0 | static void gen_msync(DisasContext *ctx)
{
/* interpreted as no-op */
}
| 27,177 |
qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | 0 | static void nvram_writeb (void *opaque, target_phys_addr_t addr, uint32_t value)
{
M48t59State *NVRAM = opaque;
m48t59_write(NVRAM, addr, value & 0xff);
}
| 27,178 |
qemu | b011f61931f0113b29b7cd7e921dd022e0b04834 | 0 | static int json_lexer_feed_char(JSONLexer *lexer, char ch, bool flush)
{
int char_consumed, new_state;
lexer->x++;
if (ch == '\n') {
lexer->x = 0;
lexer->y++;
}
do {
new_state = json_lexer[lexer->state][(uint8_t)ch];
char_consumed = !TERMINAL_NEEDED_LOOKAHEAD(lexer->state, new_state);
if (char_consumed) {
qstring_append_chr(lexer->token, ch);
}
switch (new_state) {
case JSON_OPERATOR:
case JSON_ESCAPE:
case JSON_INTEGER:
case JSON_FLOAT:
case JSON_KEYWORD:
case JSON_STRING:
lexer->emit(lexer, lexer->token, new_state, lexer->x, lexer->y);
case JSON_SKIP:
QDECREF(lexer->token);
lexer->token = qstring_new();
new_state = IN_START;
break;
case IN_ERROR:
QDECREF(lexer->token);
lexer->token = qstring_new();
new_state = IN_START;
return -EINVAL;
default:
break;
}
lexer->state = new_state;
} while (!char_consumed && !flush);
/* Do not let a single token grow to an arbitrarily large size,
* this is a security consideration.
*/
if (lexer->token->length > MAX_TOKEN_SIZE) {
lexer->emit(lexer, lexer->token, lexer->state, lexer->x, lexer->y);
QDECREF(lexer->token);
lexer->token = qstring_new();
lexer->state = IN_START;
}
return 0;
}
| 27,179 |
qemu | 1f01e50b8330c24714ddca5841fdbb703076b121 | 0 | static int coroutine_fn qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
{
BDRVQEDState *s = acb_to_s(acb);
int ret;
/* Cancel timer when the first allocating request comes in */
if (s->allocating_acb == NULL) {
qed_cancel_need_check_timer(s);
}
/* Freeze this request if another allocating write is in progress */
if (s->allocating_acb != acb || s->allocating_write_reqs_plugged) {
if (s->allocating_acb != NULL) {
qemu_co_queue_wait(&s->allocating_write_reqs, NULL);
assert(s->allocating_acb == NULL);
}
s->allocating_acb = acb;
return -EAGAIN; /* start over with looking up table entries */
}
acb->cur_nclusters = qed_bytes_to_clusters(s,
qed_offset_into_cluster(s, acb->cur_pos) + len);
qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
if (acb->flags & QED_AIOCB_ZERO) {
/* Skip ahead if the clusters are already zero */
if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
return 0;
}
acb->cur_cluster = 1;
} else {
acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
}
if (qed_should_set_need_check(s)) {
s->header.features |= QED_F_NEED_CHECK;
ret = qed_write_header(s);
if (ret < 0) {
return ret;
}
}
if (!(acb->flags & QED_AIOCB_ZERO)) {
ret = qed_aio_write_cow(acb);
if (ret < 0) {
return ret;
}
}
return qed_aio_write_l2_update(acb, acb->cur_cluster);
}
| 27,180 |
qemu | d2cb36af2b0040d421b347e6e4e803e07220f78d | 0 | int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
{
BDRVQcow2State *s = bs->opaque;
QCowSnapshot *new_snapshot_list = NULL;
QCowSnapshot *old_snapshot_list = NULL;
QCowSnapshot sn1, *sn = &sn1;
int i, ret;
uint64_t *l1_table = NULL;
int64_t l1_table_offset;
if (s->nb_snapshots >= QCOW_MAX_SNAPSHOTS) {
return -EFBIG;
}
memset(sn, 0, sizeof(*sn));
/* Generate an ID */
find_new_snapshot_id(bs, sn_info->id_str, sizeof(sn_info->id_str));
/* Check that the ID is unique */
if (find_snapshot_by_id_and_name(bs, sn_info->id_str, NULL) >= 0) {
return -EEXIST;
}
/* Populate sn with passed data */
sn->id_str = g_strdup(sn_info->id_str);
sn->name = g_strdup(sn_info->name);
sn->disk_size = bs->total_sectors * BDRV_SECTOR_SIZE;
sn->vm_state_size = sn_info->vm_state_size;
sn->date_sec = sn_info->date_sec;
sn->date_nsec = sn_info->date_nsec;
sn->vm_clock_nsec = sn_info->vm_clock_nsec;
/* Allocate the L1 table of the snapshot and copy the current one there. */
l1_table_offset = qcow2_alloc_clusters(bs, s->l1_size * sizeof(uint64_t));
if (l1_table_offset < 0) {
ret = l1_table_offset;
goto fail;
}
sn->l1_table_offset = l1_table_offset;
sn->l1_size = s->l1_size;
l1_table = g_try_new(uint64_t, s->l1_size);
if (s->l1_size && l1_table == NULL) {
ret = -ENOMEM;
goto fail;
}
for(i = 0; i < s->l1_size; i++) {
l1_table[i] = cpu_to_be64(s->l1_table[i]);
}
ret = qcow2_pre_write_overlap_check(bs, 0, sn->l1_table_offset,
s->l1_size * sizeof(uint64_t));
if (ret < 0) {
goto fail;
}
ret = bdrv_pwrite(bs->file, sn->l1_table_offset, l1_table,
s->l1_size * sizeof(uint64_t));
if (ret < 0) {
goto fail;
}
g_free(l1_table);
l1_table = NULL;
/*
* Increase the refcounts of all clusters and make sure everything is
* stable on disk before updating the snapshot table to contain a pointer
* to the new L1 table.
*/
ret = qcow2_update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, 1);
if (ret < 0) {
goto fail;
}
/* Append the new snapshot to the snapshot list */
new_snapshot_list = g_new(QCowSnapshot, s->nb_snapshots + 1);
if (s->snapshots) {
memcpy(new_snapshot_list, s->snapshots,
s->nb_snapshots * sizeof(QCowSnapshot));
old_snapshot_list = s->snapshots;
}
s->snapshots = new_snapshot_list;
s->snapshots[s->nb_snapshots++] = *sn;
ret = qcow2_write_snapshots(bs);
if (ret < 0) {
g_free(s->snapshots);
s->snapshots = old_snapshot_list;
s->nb_snapshots--;
goto fail;
}
g_free(old_snapshot_list);
/* The VM state isn't needed any more in the active L1 table; in fact, it
* hurts by causing expensive COW for the next snapshot. */
qcow2_discard_clusters(bs, qcow2_vm_state_offset(s),
align_offset(sn->vm_state_size, s->cluster_size)
>> BDRV_SECTOR_BITS,
QCOW2_DISCARD_NEVER, false);
#ifdef DEBUG_ALLOC
{
BdrvCheckResult result = {0};
qcow2_check_refcounts(bs, &result, 0);
}
#endif
return 0;
fail:
g_free(sn->id_str);
g_free(sn->name);
g_free(l1_table);
return ret;
}
| 27,181 |
qemu | b2bedb214469af55179d907a60cd67fed6b0779e | 0 | static void superio_ioport_writeb(void *opaque, uint32_t addr, uint32_t data)
{
int can_write;
SuperIOConfig *superio_conf = opaque;
DPRINTF("superio_ioport_writeb address 0x%x val 0x%x \n", addr, data);
if (addr == 0x3f0) {
superio_conf->index = data & 0xff;
} else {
/* 0x3f1 */
switch (superio_conf->index) {
case 0x00 ... 0xdf:
case 0xe4:
case 0xe5:
case 0xe9 ... 0xed:
case 0xf3:
case 0xf5:
case 0xf7:
case 0xf9 ... 0xfb:
case 0xfd ... 0xff:
can_write = 0;
break;
default:
can_write = 1;
if (can_write) {
switch (superio_conf->index) {
case 0xe7:
if ((data & 0xff) != 0xfe) {
DPRINTF("chage uart 1 base. unsupported yet \n");
}
break;
case 0xe8:
if ((data & 0xff) != 0xbe) {
DPRINTF("chage uart 2 base. unsupported yet \n");
}
break;
default:
superio_conf->config[superio_conf->index] = data & 0xff;
}
}
}
superio_conf->config[superio_conf->index] = data & 0xff;
}
}
| 27,182 |
qemu | bb5fc20f7c1c65e95030da3629dd0d7a0cce38cd | 0 | static void monitor_handle_command1(void *opaque, const char *cmdline)
{
monitor_handle_command(cmdline);
if (!monitor_suspended)
monitor_start_input();
else
monitor_suspended = 2;
}
| 27,184 |
qemu | fd56e0612b6454a282fa6a953fdb09281a98c589 | 0 | static void do_pci_unregister_device(PCIDevice *pci_dev)
{
pci_dev->bus->devices[pci_dev->devfn] = NULL;
pci_config_free(pci_dev);
if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) {
memory_region_del_subregion(&pci_dev->bus_master_container_region,
&pci_dev->bus_master_enable_region);
}
address_space_destroy(&pci_dev->bus_master_as);
}
| 27,185 |
qemu | b6dcbe086c77ec683f5ff0b693593cda1d61f3a1 | 0 | CPUState *ppc405cr_init (target_phys_addr_t ram_bases[4],
target_phys_addr_t ram_sizes[4],
uint32_t sysclk, qemu_irq **picp,
int do_init)
{
clk_setup_t clk_setup[PPC405CR_CLK_NB];
qemu_irq dma_irqs[4];
CPUState *env;
qemu_irq *pic, *irqs;
memset(clk_setup, 0, sizeof(clk_setup));
env = ppc4xx_init("405cr", &clk_setup[PPC405CR_CPU_CLK],
&clk_setup[PPC405CR_TMR_CLK], sysclk);
/* Memory mapped devices registers */
/* PLB arbitrer */
ppc4xx_plb_init(env);
/* PLB to OPB bridge */
ppc4xx_pob_init(env);
/* OBP arbitrer */
ppc4xx_opba_init(0xef600600);
/* Universal interrupt controller */
irqs = g_malloc0(sizeof(qemu_irq) * PPCUIC_OUTPUT_NB);
irqs[PPCUIC_OUTPUT_INT] =
((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT];
irqs[PPCUIC_OUTPUT_CINT] =
((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT];
pic = ppcuic_init(env, irqs, 0x0C0, 0, 1);
*picp = pic;
/* SDRAM controller */
ppc4xx_sdram_init(env, pic[14], 1, ram_bases, ram_sizes, do_init);
/* External bus controller */
ppc405_ebc_init(env);
/* DMA controller */
dma_irqs[0] = pic[26];
dma_irqs[1] = pic[25];
dma_irqs[2] = pic[24];
dma_irqs[3] = pic[23];
ppc405_dma_init(env, dma_irqs);
/* Serial ports */
if (serial_hds[0] != NULL) {
serial_mm_init(0xef600300, 0, pic[0], PPC_SERIAL_MM_BAUDBASE,
serial_hds[0], 1, 1);
}
if (serial_hds[1] != NULL) {
serial_mm_init(0xef600400, 0, pic[1], PPC_SERIAL_MM_BAUDBASE,
serial_hds[1], 1, 1);
}
/* IIC controller */
ppc405_i2c_init(0xef600500, pic[2]);
/* GPIO */
ppc405_gpio_init(0xef600700);
/* CPU control */
ppc405cr_cpc_init(env, clk_setup, sysclk);
return env;
}
| 27,186 |
qemu | f53a829bb9ef14be800556cbc02d8b20fc1050a7 | 0 | int nbd_client_session_co_discard(NbdClientSession *client, int64_t sector_num,
int nb_sectors)
{
struct nbd_request request = { .type = NBD_CMD_TRIM };
struct nbd_reply reply;
ssize_t ret;
if (!(client->nbdflags & NBD_FLAG_SEND_TRIM)) {
return 0;
}
request.from = sector_num * 512;
request.len = nb_sectors * 512;
nbd_coroutine_start(client, &request);
ret = nbd_co_send_request(client, &request, NULL, 0);
if (ret < 0) {
reply.error = -ret;
} else {
nbd_co_receive_reply(client, &request, &reply, NULL, 0);
}
nbd_coroutine_end(client, &request);
return -reply.error;
}
| 27,188 |
qemu | d368ba4376b2c1c24175c74b3733b8fe64dbe8a6 | 0 | static void sdhci_do_adma(SDHCIState *s)
{
unsigned int n, begin, length;
const uint16_t block_size = s->blksize & 0x0fff;
ADMADescr dscr;
int i;
for (i = 0; i < SDHC_ADMA_DESCS_PER_DELAY; ++i) {
s->admaerr &= ~SDHC_ADMAERR_LENGTH_MISMATCH;
get_adma_description(s, &dscr);
DPRINT_L2("ADMA loop: addr=" TARGET_FMT_plx ", len=%d, attr=%x\n",
dscr.addr, dscr.length, dscr.attr);
if ((dscr.attr & SDHC_ADMA_ATTR_VALID) == 0) {
/* Indicate that error occurred in ST_FDS state */
s->admaerr &= ~SDHC_ADMAERR_STATE_MASK;
s->admaerr |= SDHC_ADMAERR_STATE_ST_FDS;
/* Generate ADMA error interrupt */
if (s->errintstsen & SDHC_EISEN_ADMAERR) {
s->errintsts |= SDHC_EIS_ADMAERR;
s->norintsts |= SDHC_NIS_ERR;
}
sdhci_update_irq(s);
return;
}
length = dscr.length ? dscr.length : 65536;
switch (dscr.attr & SDHC_ADMA_ATTR_ACT_MASK) {
case SDHC_ADMA_ATTR_ACT_TRAN: /* data transfer */
if (s->trnmod & SDHC_TRNS_READ) {
while (length) {
if (s->data_count == 0) {
for (n = 0; n < block_size; n++) {
s->fifo_buffer[n] = sd_read_data(s->card);
}
}
begin = s->data_count;
if ((length + begin) < block_size) {
s->data_count = length + begin;
length = 0;
} else {
s->data_count = block_size;
length -= block_size - begin;
}
dma_memory_write(&address_space_memory, dscr.addr,
&s->fifo_buffer[begin],
s->data_count - begin);
dscr.addr += s->data_count - begin;
if (s->data_count == block_size) {
s->data_count = 0;
if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
s->blkcnt--;
if (s->blkcnt == 0) {
break;
}
}
}
}
} else {
while (length) {
begin = s->data_count;
if ((length + begin) < block_size) {
s->data_count = length + begin;
length = 0;
} else {
s->data_count = block_size;
length -= block_size - begin;
}
dma_memory_read(&address_space_memory, dscr.addr,
&s->fifo_buffer[begin],
s->data_count - begin);
dscr.addr += s->data_count - begin;
if (s->data_count == block_size) {
for (n = 0; n < block_size; n++) {
sd_write_data(s->card, s->fifo_buffer[n]);
}
s->data_count = 0;
if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
s->blkcnt--;
if (s->blkcnt == 0) {
break;
}
}
}
}
}
s->admasysaddr += dscr.incr;
break;
case SDHC_ADMA_ATTR_ACT_LINK: /* link to next descriptor table */
s->admasysaddr = dscr.addr;
DPRINT_L1("ADMA link: admasysaddr=0x%lx\n", s->admasysaddr);
break;
default:
s->admasysaddr += dscr.incr;
break;
}
if (dscr.attr & SDHC_ADMA_ATTR_INT) {
DPRINT_L1("ADMA interrupt: admasysaddr=0x%lx\n", s->admasysaddr);
if (s->norintstsen & SDHC_NISEN_DMA) {
s->norintsts |= SDHC_NIS_DMA;
}
sdhci_update_irq(s);
}
/* ADMA transfer terminates if blkcnt == 0 or by END attribute */
if (((s->trnmod & SDHC_TRNS_BLK_CNT_EN) &&
(s->blkcnt == 0)) || (dscr.attr & SDHC_ADMA_ATTR_END)) {
DPRINT_L2("ADMA transfer completed\n");
if (length || ((dscr.attr & SDHC_ADMA_ATTR_END) &&
(s->trnmod & SDHC_TRNS_BLK_CNT_EN) &&
s->blkcnt != 0)) {
ERRPRINT("SD/MMC host ADMA length mismatch\n");
s->admaerr |= SDHC_ADMAERR_LENGTH_MISMATCH |
SDHC_ADMAERR_STATE_ST_TFR;
if (s->errintstsen & SDHC_EISEN_ADMAERR) {
ERRPRINT("Set ADMA error flag\n");
s->errintsts |= SDHC_EIS_ADMAERR;
s->norintsts |= SDHC_NIS_ERR;
}
sdhci_update_irq(s);
}
SDHCI_GET_CLASS(s)->end_data_transfer(s);
return;
}
}
/* we have unfinished business - reschedule to continue ADMA */
timer_mod(s->transfer_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_TRANSFER_DELAY);
}
| 27,189 |
qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | 0 | static uint32_t omap_sysctl_read8(void *opaque, target_phys_addr_t addr)
{
struct omap_sysctl_s *s = (struct omap_sysctl_s *) opaque;
int pad_offset, byte_offset;
int value;
switch (addr) {
case 0x030 ... 0x140: /* CONTROL_PADCONF - only used in the POP */
pad_offset = (addr - 0x30) >> 2;
byte_offset = (addr - 0x30) & (4 - 1);
value = s->padconf[pad_offset];
value = (value >> (byte_offset * 8)) & 0xff;
return value;
default:
break;
}
OMAP_BAD_REG(addr);
return 0;
}
| 27,190 |
qemu | d0b3e4f5f4a29d48250887e5c0b3c65bc4dc6d13 | 0 | abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
{
void *ptr, *prev;
abi_ulong addr;
int wrapped, repeat;
/* If 'start' == 0, then a default start address is used. */
if (start == 0) {
start = mmap_next_start;
} else {
start &= qemu_host_page_mask;
}
size = HOST_PAGE_ALIGN(size);
if (RESERVED_VA) {
return mmap_find_vma_reserved(start, size);
}
addr = start;
wrapped = repeat = 0;
prev = 0;
for (;; prev = ptr) {
/*
* Reserve needed memory area to avoid a race.
* It should be discarded using:
* - mmap() with MAP_FIXED flag
* - mremap() with MREMAP_FIXED flag
* - shmat() with SHM_REMAP flag
*/
ptr = mmap(g2h(addr), size, PROT_NONE,
MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
/* ENOMEM, if host address space has no memory */
if (ptr == MAP_FAILED) {
return (abi_ulong)-1;
}
/* Count the number of sequential returns of the same address.
This is used to modify the search algorithm below. */
repeat = (ptr == prev ? repeat + 1 : 0);
if (h2g_valid(ptr + size - 1)) {
addr = h2g(ptr);
if ((addr & ~TARGET_PAGE_MASK) == 0) {
/* Success. */
if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
mmap_next_start = addr + size;
}
return addr;
}
/* The address is not properly aligned for the target. */
switch (repeat) {
case 0:
/* Assume the result that the kernel gave us is the
first with enough free space, so start again at the
next higher target page. */
addr = TARGET_PAGE_ALIGN(addr);
break;
case 1:
/* Sometimes the kernel decides to perform the allocation
at the top end of memory instead. */
addr &= TARGET_PAGE_MASK;
break;
case 2:
/* Start over at low memory. */
addr = 0;
break;
default:
/* Fail. This unaligned block must the last. */
addr = -1;
break;
}
} else {
/* Since the result the kernel gave didn't fit, start
again at low memory. If any repetition, fail. */
addr = (repeat ? -1 : 0);
}
/* Unmap and try again. */
munmap(ptr, size);
/* ENOMEM if we checked the whole of the target address space. */
if (addr == -1ul) {
return (abi_ulong)-1;
} else if (addr == 0) {
if (wrapped) {
return (abi_ulong)-1;
}
wrapped = 1;
/* Don't actually use 0 when wrapping, instead indicate
that we'd truely like an allocation in low memory. */
addr = (mmap_min_addr > TARGET_PAGE_SIZE
? TARGET_PAGE_ALIGN(mmap_min_addr)
: TARGET_PAGE_SIZE);
} else if (wrapped && addr >= start) {
return (abi_ulong)-1;
}
}
}
| 27,191 |
qemu | 079d0b7f1eedcc634c371fe05b617fdc55c8b762 | 0 | static int do_token_out(USBDevice *s, USBPacket *p)
{
assert(p->devep == 0);
switch(s->setup_state) {
case SETUP_STATE_ACK:
if (s->setup_buf[0] & USB_DIR_IN) {
s->setup_state = SETUP_STATE_IDLE;
/* transfer OK */
} else {
/* ignore additional output */
}
return 0;
case SETUP_STATE_DATA:
if (!(s->setup_buf[0] & USB_DIR_IN)) {
int len = s->setup_len - s->setup_index;
if (len > p->iov.size) {
len = p->iov.size;
}
usb_packet_copy(p, s->data_buf + s->setup_index, len);
s->setup_index += len;
if (s->setup_index >= s->setup_len)
s->setup_state = SETUP_STATE_ACK;
return len;
}
s->setup_state = SETUP_STATE_IDLE;
return USB_RET_STALL;
default:
return USB_RET_STALL;
}
}
| 27,192 |
qemu | 7399a337e4126f7c8c8af3336726f001378c4798 | 0 | int page_unprotect(target_ulong address, uintptr_t pc)
{
unsigned int prot;
PageDesc *p;
target_ulong host_start, host_end, addr;
/* Technically this isn't safe inside a signal handler. However we
know this only ever happens in a synchronous SEGV handler, so in
practice it seems to be ok. */
mmap_lock();
p = page_find(address >> TARGET_PAGE_BITS);
if (!p) {
mmap_unlock();
return 0;
}
/* if the page was really writable, then we change its
protection back to writable */
if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
host_start = address & qemu_host_page_mask;
host_end = host_start + qemu_host_page_size;
prot = 0;
for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
p = page_find(addr >> TARGET_PAGE_BITS);
p->flags |= PAGE_WRITE;
prot |= p->flags;
/* and since the content will be modified, we must invalidate
the corresponding translated code. */
if (tb_invalidate_phys_page(addr, pc)) {
mmap_unlock();
return 2;
}
#ifdef DEBUG_TB_CHECK
tb_invalidate_check(addr);
#endif
}
mprotect((void *)g2h(host_start), qemu_host_page_size,
prot & PAGE_BITS);
mmap_unlock();
return 1;
}
mmap_unlock();
return 0;
}
| 27,193 |
FFmpeg | 83fd377c94d8fbffdb3e69fb3efe1976ff897a88 | 0 | static void truncpasses(Jpeg2000EncoderContext *s, Jpeg2000Tile *tile)
{
int precno, compno, reslevelno, bandno, cblkno, lev;
Jpeg2000CodingStyle *codsty = &s->codsty;
for (compno = 0; compno < s->ncomponents; compno++){
Jpeg2000Component *comp = tile->comp + compno;
for (reslevelno = 0, lev = codsty->nreslevels-1; reslevelno < codsty->nreslevels; reslevelno++, lev--){
Jpeg2000ResLevel *reslevel = comp->reslevel + reslevelno;
for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++){
for (bandno = 0; bandno < reslevel->nbands ; bandno++){
int bandpos = bandno + (reslevelno > 0);
Jpeg2000Band *band = reslevel->band + bandno;
Jpeg2000Prec *prec = band->prec + precno;
for (cblkno = 0; cblkno < prec->nb_codeblocks_height * prec->nb_codeblocks_width; cblkno++){
Jpeg2000Cblk *cblk = prec->cblk + cblkno;
cblk->ninclpasses = getcut(cblk, s->lambda,
(int64_t)dwt_norms[codsty->transform][bandpos][lev] * (int64_t)band->i_stepsize >> 16);
}
}
}
}
}
}
| 27,195 |
qemu | 60fe637bf0e4d7989e21e50f52526444765c63b4 | 1 | uint64_t blk_mig_bytes_remaining(void)
{
return blk_mig_bytes_total() - blk_mig_bytes_transferred();
}
| 27,196 |
FFmpeg | 65d3359fb366ea265a8468d76a111cb7352f0b55 | 1 | static int init_tile(Jpeg2000DecoderContext *s, int tileno)
{
int compno;
int tilex = tileno % s->numXtiles;
int tiley = tileno / s->numXtiles;
Jpeg2000Tile *tile = s->tile + tileno;
if (!tile->comp)
return AVERROR(ENOMEM);
tile->coord[0][0] = av_clip(tilex * s->tile_width + s->tile_offset_x, s->image_offset_x, s->width);
tile->coord[0][1] = av_clip((tilex + 1) * s->tile_width + s->tile_offset_x, s->image_offset_x, s->width);
tile->coord[1][0] = av_clip(tiley * s->tile_height + s->tile_offset_y, s->image_offset_y, s->height);
tile->coord[1][1] = av_clip((tiley + 1) * s->tile_height + s->tile_offset_y, s->image_offset_y, s->height);
for (compno = 0; compno < s->ncomponents; compno++) {
Jpeg2000Component *comp = tile->comp + compno;
Jpeg2000CodingStyle *codsty = tile->codsty + compno;
Jpeg2000QuantStyle *qntsty = tile->qntsty + compno;
int ret; // global bandno
comp->coord_o[0][0] = tile->coord[0][0];
comp->coord_o[0][1] = tile->coord[0][1];
comp->coord_o[1][0] = tile->coord[1][0];
comp->coord_o[1][1] = tile->coord[1][1];
if (compno) {
comp->coord_o[0][0] /= s->cdx[compno];
comp->coord_o[0][1] /= s->cdx[compno];
comp->coord_o[1][0] /= s->cdy[compno];
comp->coord_o[1][1] /= s->cdy[compno];
}
comp->coord[0][0] = ff_jpeg2000_ceildivpow2(comp->coord_o[0][0], s->reduction_factor);
comp->coord[0][1] = ff_jpeg2000_ceildivpow2(comp->coord_o[0][1], s->reduction_factor);
comp->coord[1][0] = ff_jpeg2000_ceildivpow2(comp->coord_o[1][0], s->reduction_factor);
comp->coord[1][1] = ff_jpeg2000_ceildivpow2(comp->coord_o[1][1], s->reduction_factor);
if (ret = ff_jpeg2000_init_component(comp, codsty, qntsty,
s->cbps[compno], s->cdx[compno],
s->cdy[compno], s->avctx))
return ret;
}
return 0;
}
| 27,197 |
FFmpeg | e56b0984103b981ec25fe8a22ef9c4905b9751dd | 1 | static int srt_write_packet(AVFormatContext *avf, AVPacket *pkt)
{
SRTContext *srt = avf->priv_data;
int write_ts = avf->streams[0]->codec->codec_id != AV_CODEC_ID_SRT;
srt->index++;
if (write_ts) {
int64_t s = pkt->pts, e, d = pkt->duration;
if (d <= 0)
/* For backward compatibility, fallback to convergence_duration. */
d = pkt->convergence_duration;
if (s == AV_NOPTS_VALUE || d < 0) {
av_log(avf, AV_LOG_ERROR, "Insufficient timestamps.\n");
return AVERROR(EINVAL);
}
e = s + d;
avio_printf(avf->pb, "%d\n%02d:%02d:%02d,%03d --> %02d:%02d:%02d,%03d\n",
srt->index,
(int)(s / 3600000), (int)(s / 60000) % 60,
(int)(s / 1000) % 60, (int)(s % 1000),
(int)(e / 3600000), (int)(e / 60000) % 60,
(int)(e / 1000) % 60, (int)(e % 1000));
}
avio_write(avf->pb, pkt->data, pkt->size);
if (write_ts)
avio_write(avf->pb, "\n\n", 2);
avio_flush(avf->pb);
return 0;
}
| 27,198 |
FFmpeg | bcaf64b605442e1622d16da89d4ec0e7730b8a8c | 0 | static int flac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
const AVFrame *frame, int *got_packet_ptr)
{
FlacEncodeContext *s;
int frame_bytes, out_bytes, ret;
s = avctx->priv_data;
/* when the last block is reached, update the header in extradata */
if (!frame) {
s->max_framesize = s->max_encoded_framesize;
av_md5_final(s->md5ctx, s->md5sum);
write_streaminfo(s, avctx->extradata);
return 0;
}
/* change max_framesize for small final frame */
if (frame->nb_samples < s->frame.blocksize) {
s->max_framesize = ff_flac_get_max_frame_size(frame->nb_samples,
s->channels,
avctx->bits_per_raw_sample);
}
init_frame(s, frame->nb_samples);
copy_samples(s, frame->data[0]);
channel_decorrelation(s);
remove_wasted_bits(s);
frame_bytes = encode_frame(s);
/* fallback to verbatim mode if the compressed frame is larger than it
would be if encoded uncompressed. */
if (frame_bytes < 0 || frame_bytes > s->max_framesize) {
s->frame.verbatim_only = 1;
frame_bytes = encode_frame(s);
if (frame_bytes < 0) {
av_log(avctx, AV_LOG_ERROR, "Bad frame count\n");
return frame_bytes;
}
}
if ((ret = ff_alloc_packet2(avctx, avpkt, frame_bytes)))
return ret;
out_bytes = write_frame(s, avpkt);
s->frame_count++;
s->sample_count += frame->nb_samples;
if ((ret = update_md5_sum(s, frame->data[0])) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error updating MD5 checksum\n");
return ret;
}
if (out_bytes > s->max_encoded_framesize)
s->max_encoded_framesize = out_bytes;
if (out_bytes < s->min_framesize)
s->min_framesize = out_bytes;
avpkt->pts = frame->pts;
avpkt->duration = ff_samples_to_time_base(avctx, frame->nb_samples);
avpkt->size = out_bytes;
*got_packet_ptr = 1;
return 0;
}
| 27,200 |
FFmpeg | daa1ea049a9445b7bed03963cb789497065dd1eb | 0 | void ff_vp3_v_loop_filter_mmx(uint8_t *src, int stride, int *bounding_values)
{
__asm__ volatile(
"movq %0, %%mm6 \n\t"
"movq %1, %%mm4 \n\t"
"movq %2, %%mm2 \n\t"
"movq %3, %%mm1 \n\t"
VP3_LOOP_FILTER(%4)
"movq %%mm4, %1 \n\t"
"movq %%mm3, %2 \n\t"
: "+m" (*(uint64_t*)(src - 2*stride)),
"+m" (*(uint64_t*)(src - 1*stride)),
"+m" (*(uint64_t*)(src + 0*stride)),
"+m" (*(uint64_t*)(src + 1*stride))
: "m"(*(uint64_t*)(bounding_values+129))
);
}
| 27,202 |
FFmpeg | 253d0be6a1ecc343d29ff8e1df0ddf961ab9c772 | 0 | static int decode(AVCodecContext *avctx, void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
const uint8_t *buf_end;
uint8_t segment_type;
int segment_length;
int i, ret;
av_dlog(avctx, "PGS sub packet:\n");
for (i = 0; i < buf_size; i++) {
av_dlog(avctx, "%02x ", buf[i]);
if (i % 16 == 15)
av_dlog(avctx, "\n");
}
if (i & 15)
av_dlog(avctx, "\n");
*data_size = 0;
/* Ensure that we have received at a least a segment code and segment length */
if (buf_size < 3)
return -1;
buf_end = buf + buf_size;
/* Step through buffer to identify segments */
while (buf < buf_end) {
segment_type = bytestream_get_byte(&buf);
segment_length = bytestream_get_be16(&buf);
av_dlog(avctx, "Segment Length %d, Segment Type %x\n", segment_length, segment_type);
if (segment_type != DISPLAY_SEGMENT && segment_length > buf_end - buf)
break;
switch (segment_type) {
case PALETTE_SEGMENT:
parse_palette_segment(avctx, buf, segment_length);
break;
case PICTURE_SEGMENT:
parse_picture_segment(avctx, buf, segment_length);
break;
case PRESENTATION_SEGMENT:
ret = parse_presentation_segment(avctx, buf, segment_length, avpkt->pts);
if (ret < 0)
return ret;
break;
case WINDOW_SEGMENT:
/*
* Window Segment Structure (No new information provided):
* 2 bytes: Unknown,
* 2 bytes: X position of subtitle,
* 2 bytes: Y position of subtitle,
* 2 bytes: Width of subtitle,
* 2 bytes: Height of subtitle.
*/
break;
case DISPLAY_SEGMENT:
*data_size = display_end_segment(avctx, data, buf, segment_length);
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unknown subtitle segment type 0x%x, length %d\n",
segment_type, segment_length);
break;
}
buf += segment_length;
}
return buf_size;
}
| 27,204 |
FFmpeg | 6ec688e1bc76dd93151cbca1c340162ae4b10d77 | 0 | static int mov_finalize_stsd_codec(MOVContext *c, AVIOContext *pb,
AVStream *st, MOVStreamContext *sc)
{
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
!st->codec->sample_rate && sc->time_scale > 1)
st->codec->sample_rate = sc->time_scale;
/* special codec parameters handling */
switch (st->codec->codec_id) {
#if CONFIG_DV_DEMUXER
case AV_CODEC_ID_DVAUDIO:
c->dv_fctx = avformat_alloc_context();
if (!c->dv_fctx) {
av_log(c->fc, AV_LOG_ERROR, "dv demux context alloc error\n");
return AVERROR(ENOMEM);
}
c->dv_demux = avpriv_dv_init_demux(c->dv_fctx);
if (!c->dv_demux) {
av_log(c->fc, AV_LOG_ERROR, "dv demux context init error\n");
return AVERROR(ENOMEM);
}
sc->dv_audio_container = 1;
st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
break;
#endif
/* no ifdef since parameters are always those */
case AV_CODEC_ID_QCELP:
st->codec->channels = 1;
// force sample rate for qcelp when not stored in mov
if (st->codec->codec_tag != MKTAG('Q','c','l','p'))
st->codec->sample_rate = 8000;
break;
case AV_CODEC_ID_AMR_NB:
st->codec->channels = 1;
/* force sample rate for amr, stsd in 3gp does not store sample rate */
st->codec->sample_rate = 8000;
break;
case AV_CODEC_ID_AMR_WB:
st->codec->channels = 1;
st->codec->sample_rate = 16000;
break;
case AV_CODEC_ID_MP2:
case AV_CODEC_ID_MP3:
/* force type after stsd for m1a hdlr */
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
case AV_CODEC_ID_GSM:
case AV_CODEC_ID_ADPCM_MS:
case AV_CODEC_ID_ADPCM_IMA_WAV:
case AV_CODEC_ID_ILBC:
st->codec->block_align = sc->bytes_per_frame;
break;
case AV_CODEC_ID_ALAC:
if (st->codec->extradata_size == 36) {
st->codec->channels = AV_RB8 (st->codec->extradata + 21);
st->codec->sample_rate = AV_RB32(st->codec->extradata + 32);
}
break;
case AV_CODEC_ID_VC1:
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
default:
break;
}
return 0;
}
| 27,205 |
qemu | d9bce9d99f4656ae0b0127f7472db9067b8f84ab | 1 | void OPPROTO op_srli_T1 (void)
{
T1 = T1 >> PARAM1;
RETURN();
}
| 27,206 |
FFmpeg | 23b203014f5dbd85b75a6b97597be9c877cd3a1b | 1 | static int decode_pic_hdr(IVI45DecContext *ctx, AVCodecContext *avctx)
{
int pic_size_indx, i, p;
IVIPicConfig pic_conf;
if (get_bits(&ctx->gb, 18) != 0x3FFF8) {
av_log(avctx, AV_LOG_ERROR, "Invalid picture start code!\n");
return AVERROR_INVALIDDATA;
}
ctx->prev_frame_type = ctx->frame_type;
ctx->frame_type = get_bits(&ctx->gb, 3);
if (ctx->frame_type == 7) {
av_log(avctx, AV_LOG_ERROR, "Invalid frame type: %d\n", ctx->frame_type);
return AVERROR_INVALIDDATA;
}
#if IVI4_STREAM_ANALYSER
if (ctx->frame_type == FRAMETYPE_BIDIR)
ctx->has_b_frames = 1;
#endif
ctx->transp_status = get_bits1(&ctx->gb);
#if IVI4_STREAM_ANALYSER
if (ctx->transp_status) {
ctx->has_transp = 1;
}
#endif
/* unknown bit: Mac decoder ignores this bit, XANIM returns error */
if (get_bits1(&ctx->gb)) {
av_log(avctx, AV_LOG_ERROR, "Sync bit is set!\n");
return AVERROR_INVALIDDATA;
}
ctx->data_size = get_bits1(&ctx->gb) ? get_bits(&ctx->gb, 24) : 0;
/* null frames don't contain anything else so we just return */
if (ctx->frame_type >= FRAMETYPE_NULL_FIRST) {
av_dlog(avctx, "Null frame encountered!\n");
return 0;
}
/* Check key lock status. If enabled - ignore lock word. */
/* Usually we have to prompt the user for the password, but */
/* we don't do that because Indeo 4 videos can be decoded anyway */
if (get_bits1(&ctx->gb)) {
skip_bits_long(&ctx->gb, 32);
av_dlog(avctx, "Password-protected clip!\n");
}
pic_size_indx = get_bits(&ctx->gb, 3);
if (pic_size_indx == IVI4_PIC_SIZE_ESC) {
pic_conf.pic_height = get_bits(&ctx->gb, 16);
pic_conf.pic_width = get_bits(&ctx->gb, 16);
} else {
pic_conf.pic_height = ivi4_common_pic_sizes[pic_size_indx * 2 + 1];
pic_conf.pic_width = ivi4_common_pic_sizes[pic_size_indx * 2 ];
}
/* Decode tile dimensions. */
if (get_bits1(&ctx->gb)) {
pic_conf.tile_height = scale_tile_size(pic_conf.pic_height, get_bits(&ctx->gb, 4));
pic_conf.tile_width = scale_tile_size(pic_conf.pic_width, get_bits(&ctx->gb, 4));
#if IVI4_STREAM_ANALYSER
ctx->uses_tiling = 1;
#endif
} else {
pic_conf.tile_height = pic_conf.pic_height;
pic_conf.tile_width = pic_conf.pic_width;
}
/* Decode chroma subsampling. We support only 4:4 aka YVU9. */
if (get_bits(&ctx->gb, 2)) {
av_log(avctx, AV_LOG_ERROR, "Only YVU9 picture format is supported!\n");
return AVERROR_INVALIDDATA;
}
pic_conf.chroma_height = (pic_conf.pic_height + 3) >> 2;
pic_conf.chroma_width = (pic_conf.pic_width + 3) >> 2;
/* decode subdivision of the planes */
pic_conf.luma_bands = decode_plane_subdivision(&ctx->gb);
if (pic_conf.luma_bands)
pic_conf.chroma_bands = decode_plane_subdivision(&ctx->gb);
ctx->is_scalable = pic_conf.luma_bands != 1 || pic_conf.chroma_bands != 1;
if (ctx->is_scalable && (pic_conf.luma_bands != 4 || pic_conf.chroma_bands != 1)) {
av_log(avctx, AV_LOG_ERROR, "Scalability: unsupported subdivision! Luma bands: %d, chroma bands: %d\n",
pic_conf.luma_bands, pic_conf.chroma_bands);
return AVERROR_INVALIDDATA;
}
/* check if picture layout was changed and reallocate buffers */
if (ivi_pic_config_cmp(&pic_conf, &ctx->pic_conf)) {
if (ff_ivi_init_planes(ctx->planes, &pic_conf)) {
av_log(avctx, AV_LOG_ERROR, "Couldn't reallocate color planes!\n");
return AVERROR(ENOMEM);
}
ctx->pic_conf = pic_conf;
/* set default macroblock/block dimensions */
for (p = 0; p <= 2; p++) {
for (i = 0; i < (!p ? pic_conf.luma_bands : pic_conf.chroma_bands); i++) {
ctx->planes[p].bands[i].mb_size = !p ? (!ctx->is_scalable ? 16 : 8) : 4;
ctx->planes[p].bands[i].blk_size = !p ? 8 : 4;
}
}
if (ff_ivi_init_tiles(ctx->planes, ctx->pic_conf.tile_width,
ctx->pic_conf.tile_height)) {
av_log(avctx, AV_LOG_ERROR,
"Couldn't reallocate internal structures!\n");
return AVERROR(ENOMEM);
}
}
ctx->frame_num = get_bits1(&ctx->gb) ? get_bits(&ctx->gb, 20) : 0;
/* skip decTimeEst field if present */
if (get_bits1(&ctx->gb))
skip_bits(&ctx->gb, 8);
/* decode macroblock and block huffman codebooks */
if (ff_ivi_dec_huff_desc(&ctx->gb, get_bits1(&ctx->gb), IVI_MB_HUFF, &ctx->mb_vlc, avctx) ||
ff_ivi_dec_huff_desc(&ctx->gb, get_bits1(&ctx->gb), IVI_BLK_HUFF, &ctx->blk_vlc, avctx))
return AVERROR_INVALIDDATA;
ctx->rvmap_sel = get_bits1(&ctx->gb) ? get_bits(&ctx->gb, 3) : 8;
ctx->in_imf = get_bits1(&ctx->gb);
ctx->in_q = get_bits1(&ctx->gb);
ctx->pic_glob_quant = get_bits(&ctx->gb, 5);
/* TODO: ignore this parameter if unused */
ctx->unknown1 = get_bits1(&ctx->gb) ? get_bits(&ctx->gb, 3) : 0;
ctx->checksum = get_bits1(&ctx->gb) ? get_bits(&ctx->gb, 16) : 0;
/* skip picture header extension if any */
while (get_bits1(&ctx->gb)) {
av_dlog(avctx, "Pic hdr extension encountered!\n");
skip_bits(&ctx->gb, 8);
}
if (get_bits1(&ctx->gb)) {
av_log(avctx, AV_LOG_ERROR, "Bad blocks bits encountered!\n");
}
align_get_bits(&ctx->gb);
return 0;
} | 27,207 |
FFmpeg | 28f9ab7029bd1a02f659995919f899f84ee7361b | 0 | void ff_vp3_idct_add_altivec(uint8_t *dst, int stride, DCTELEM block[64])
{
LOAD_ZERO;
vec_u8 t, vdst;
vec_s16 vdst_16;
vec_u8 vdst_mask = vec_mergeh(vec_splat_u8(-1), vec_lvsl(0, dst));
IDCT_START
IDCT_1D(NOP, NOP)
TRANSPOSE8(b0, b1, b2, b3, b4, b5, b6, b7);
IDCT_1D(ADD8, SHIFT4)
#define ADD(a)\
vdst = vec_ld(0, dst);\
vdst_16 = (vec_s16)vec_perm(vdst, zero_u8v, vdst_mask);\
vdst_16 = vec_adds(a, vdst_16);\
t = vec_packsu(vdst_16, vdst_16);\
vec_ste((vec_u32)t, 0, (unsigned int *)dst);\
vec_ste((vec_u32)t, 4, (unsigned int *)dst);
ADD(b0) dst += stride;
ADD(b1) dst += stride;
ADD(b2) dst += stride;
ADD(b3) dst += stride;
ADD(b4) dst += stride;
ADD(b5) dst += stride;
ADD(b6) dst += stride;
ADD(b7)
}
| 27,208 |
qemu | 5923f85fb82df7c8c60a89458a5ae856045e5ab1 | 1 | static void qobject_output_type_uint64(Visitor *v, const char *name,
uint64_t *obj, Error **errp)
{
/* FIXME values larger than INT64_MAX become negative */
QObjectOutputVisitor *qov = to_qov(v);
qobject_output_add(qov, name, qnum_from_int(*obj));
}
| 27,209 |
FFmpeg | c69461d73797e02e7a3ab4316050c241fa91f53f | 1 | static int asf_read_replicated_data(AVFormatContext *s, ASFPacket *asf_pkt)
{
ASFContext *asf = s->priv_data;
AVIOContext *pb = s->pb;
int ret;
if (!asf_pkt->data_size) {
asf_pkt->data_size = asf_pkt->size_left = avio_rl32(pb); // read media object size
if (asf_pkt->data_size <= 0)
return AVERROR_INVALIDDATA;
if ((ret = av_new_packet(&asf_pkt->avpkt, asf_pkt->data_size)) < 0)
return ret;
} else
avio_skip(pb, 4); // reading of media object size is already done
asf_pkt->dts = avio_rl32(pb); // read presentation time
if (asf->rep_data_len && (asf->rep_data_len >= 8))
avio_skip(pb, asf->rep_data_len - 8); // skip replicated data
return 0;
}
| 27,210 |
qemu | e3d142d073d02f0a3a4aad79eb838c15b6f99c01 | 1 | float64 float64_muladd(float64 a, float64 b, float64 c, int flags STATUS_PARAM)
{
flag aSign, bSign, cSign, zSign;
int_fast16_t aExp, bExp, cExp, pExp, zExp, expDiff;
uint64_t aSig, bSig, cSig;
flag pInf, pZero, pSign;
uint64_t pSig0, pSig1, cSig0, cSig1, zSig0, zSig1;
int shiftcount;
flag signflip, infzero;
a = float64_squash_input_denormal(a STATUS_VAR);
b = float64_squash_input_denormal(b STATUS_VAR);
c = float64_squash_input_denormal(c STATUS_VAR);
aSig = extractFloat64Frac(a);
aExp = extractFloat64Exp(a);
aSign = extractFloat64Sign(a);
bSig = extractFloat64Frac(b);
bExp = extractFloat64Exp(b);
bSign = extractFloat64Sign(b);
cSig = extractFloat64Frac(c);
cExp = extractFloat64Exp(c);
cSign = extractFloat64Sign(c);
infzero = ((aExp == 0 && aSig == 0 && bExp == 0x7ff && bSig == 0) ||
(aExp == 0x7ff && aSig == 0 && bExp == 0 && bSig == 0));
/* It is implementation-defined whether the cases of (0,inf,qnan)
* and (inf,0,qnan) raise InvalidOperation or not (and what QNaN
* they return if they do), so we have to hand this information
* off to the target-specific pick-a-NaN routine.
*/
if (((aExp == 0x7ff) && aSig) ||
((bExp == 0x7ff) && bSig) ||
((cExp == 0x7ff) && cSig)) {
return propagateFloat64MulAddNaN(a, b, c, infzero STATUS_VAR);
}
if (infzero) {
float_raise(float_flag_invalid STATUS_VAR);
return float64_default_nan;
}
if (flags & float_muladd_negate_c) {
cSign ^= 1;
}
signflip = (flags & float_muladd_negate_result) ? 1 : 0;
/* Work out the sign and type of the product */
pSign = aSign ^ bSign;
if (flags & float_muladd_negate_product) {
pSign ^= 1;
}
pInf = (aExp == 0x7ff) || (bExp == 0x7ff);
pZero = ((aExp | aSig) == 0) || ((bExp | bSig) == 0);
if (cExp == 0x7ff) {
if (pInf && (pSign ^ cSign)) {
/* addition of opposite-signed infinities => InvalidOperation */
float_raise(float_flag_invalid STATUS_VAR);
return float64_default_nan;
}
/* Otherwise generate an infinity of the same sign */
return packFloat64(cSign ^ signflip, 0x7ff, 0);
}
if (pInf) {
return packFloat64(pSign ^ signflip, 0x7ff, 0);
}
if (pZero) {
if (cExp == 0) {
if (cSig == 0) {
/* Adding two exact zeroes */
if (pSign == cSign) {
zSign = pSign;
} else if (STATUS(float_rounding_mode) == float_round_down) {
zSign = 1;
} else {
zSign = 0;
}
return packFloat64(zSign ^ signflip, 0, 0);
}
/* Exact zero plus a denorm */
if (STATUS(flush_to_zero)) {
float_raise(float_flag_output_denormal STATUS_VAR);
return packFloat64(cSign ^ signflip, 0, 0);
}
}
/* Zero plus something non-zero : just return the something */
return packFloat64(cSign ^ signflip, cExp, cSig);
}
if (aExp == 0) {
normalizeFloat64Subnormal(aSig, &aExp, &aSig);
}
if (bExp == 0) {
normalizeFloat64Subnormal(bSig, &bExp, &bSig);
}
/* Calculate the actual result a * b + c */
/* Multiply first; this is easy. */
/* NB: we subtract 0x3fe where float64_mul() subtracts 0x3ff
* because we want the true exponent, not the "one-less-than"
* flavour that roundAndPackFloat64() takes.
*/
pExp = aExp + bExp - 0x3fe;
aSig = (aSig | LIT64(0x0010000000000000))<<10;
bSig = (bSig | LIT64(0x0010000000000000))<<11;
mul64To128(aSig, bSig, &pSig0, &pSig1);
if ((int64_t)(pSig0 << 1) >= 0) {
shortShift128Left(pSig0, pSig1, 1, &pSig0, &pSig1);
pExp--;
}
zSign = pSign ^ signflip;
/* Now [pSig0:pSig1] is the significand of the multiply, with the explicit
* bit in position 126.
*/
if (cExp == 0) {
if (!cSig) {
/* Throw out the special case of c being an exact zero now */
shift128RightJamming(pSig0, pSig1, 64, &pSig0, &pSig1);
return roundAndPackFloat64(zSign, pExp - 1,
pSig1 STATUS_VAR);
}
normalizeFloat64Subnormal(cSig, &cExp, &cSig);
}
/* Shift cSig and add the explicit bit so [cSig0:cSig1] is the
* significand of the addend, with the explicit bit in position 126.
*/
cSig0 = cSig << (126 - 64 - 52);
cSig1 = 0;
cSig0 |= LIT64(0x4000000000000000);
expDiff = pExp - cExp;
if (pSign == cSign) {
/* Addition */
if (expDiff > 0) {
/* scale c to match p */
shift128RightJamming(cSig0, cSig1, expDiff, &cSig0, &cSig1);
zExp = pExp;
} else if (expDiff < 0) {
/* scale p to match c */
shift128RightJamming(pSig0, pSig1, -expDiff, &pSig0, &pSig1);
zExp = cExp;
} else {
/* no scaling needed */
zExp = cExp;
}
/* Add significands and make sure explicit bit ends up in posn 126 */
add128(pSig0, pSig1, cSig0, cSig1, &zSig0, &zSig1);
if ((int64_t)zSig0 < 0) {
shift128RightJamming(zSig0, zSig1, 1, &zSig0, &zSig1);
} else {
zExp--;
}
shift128RightJamming(zSig0, zSig1, 64, &zSig0, &zSig1);
return roundAndPackFloat64(zSign, zExp, zSig1 STATUS_VAR);
} else {
/* Subtraction */
if (expDiff > 0) {
shift128RightJamming(cSig0, cSig1, expDiff, &cSig0, &cSig1);
sub128(pSig0, pSig1, cSig0, cSig1, &zSig0, &zSig1);
zExp = pExp;
} else if (expDiff < 0) {
shift128RightJamming(pSig0, pSig1, -expDiff, &pSig0, &pSig1);
sub128(cSig0, cSig1, pSig0, pSig1, &zSig0, &zSig1);
zExp = cExp;
zSign ^= 1;
} else {
zExp = pExp;
if (lt128(cSig0, cSig1, pSig0, pSig1)) {
sub128(pSig0, pSig1, cSig0, cSig1, &zSig0, &zSig1);
} else if (lt128(pSig0, pSig1, cSig0, cSig1)) {
sub128(cSig0, cSig1, pSig0, pSig1, &zSig0, &zSig1);
zSign ^= 1;
} else {
/* Exact zero */
zSign = signflip;
if (STATUS(float_rounding_mode) == float_round_down) {
zSign ^= 1;
}
return packFloat64(zSign, 0, 0);
}
}
--zExp;
/* Do the equivalent of normalizeRoundAndPackFloat64() but
* starting with the significand in a pair of uint64_t.
*/
if (zSig0) {
shiftcount = countLeadingZeros64(zSig0) - 1;
shortShift128Left(zSig0, zSig1, shiftcount, &zSig0, &zSig1);
if (zSig1) {
zSig0 |= 1;
}
zExp -= shiftcount;
} else {
shiftcount = countLeadingZeros64(zSig1) - 1;
zSig0 = zSig1 << shiftcount;
zExp -= (shiftcount + 64);
}
return roundAndPackFloat64(zSign, zExp, zSig0 STATUS_VAR);
}
}
| 27,211 |
qemu | 94ef4f337fb614f18b765a8e0e878a4c23cdedcd | 1 | static void vga_precise_update_retrace_info(VGACommonState *s)
{
int htotal_chars;
int hretr_start_char;
int hretr_skew_chars;
int hretr_end_char;
int vtotal_lines;
int vretr_start_line;
int vretr_end_line;
int dots;
#if 0
int div2, sldiv2;
#endif
int clocking_mode;
int clock_sel;
const int clk_hz[] = {25175000, 28322000, 25175000, 25175000};
int64_t chars_per_sec;
struct vga_precise_retrace *r = &s->retrace_info.precise;
htotal_chars = s->cr[VGA_CRTC_H_TOTAL] + 5;
hretr_start_char = s->cr[VGA_CRTC_H_SYNC_START];
hretr_skew_chars = (s->cr[VGA_CRTC_H_SYNC_END] >> 5) & 3;
hretr_end_char = s->cr[VGA_CRTC_H_SYNC_END] & 0x1f;
vtotal_lines = (s->cr[VGA_CRTC_V_TOTAL] |
(((s->cr[VGA_CRTC_OVERFLOW] & 1) |
((s->cr[VGA_CRTC_OVERFLOW] >> 4) & 2)) << 8)) + 2;
vretr_start_line = s->cr[VGA_CRTC_V_SYNC_START] |
((((s->cr[VGA_CRTC_OVERFLOW] >> 2) & 1) |
((s->cr[VGA_CRTC_OVERFLOW] >> 6) & 2)) << 8);
vretr_end_line = s->cr[VGA_CRTC_V_SYNC_END] & 0xf;
clocking_mode = (s->sr[VGA_SEQ_CLOCK_MODE] >> 3) & 1;
clock_sel = (s->msr >> 2) & 3;
dots = (s->msr & 1) ? 8 : 9;
chars_per_sec = clk_hz[clock_sel] / dots;
htotal_chars <<= clocking_mode;
r->total_chars = vtotal_lines * htotal_chars;
if (r->freq) {
r->ticks_per_char = NANOSECONDS_PER_SECOND / (r->total_chars * r->freq);
} else {
r->ticks_per_char = NANOSECONDS_PER_SECOND / chars_per_sec;
}
r->vstart = vretr_start_line;
r->vend = r->vstart + vretr_end_line + 1;
r->hstart = hretr_start_char + hretr_skew_chars;
r->hend = r->hstart + hretr_end_char + 1;
r->htotal = htotal_chars;
#if 0
div2 = (s->cr[VGA_CRTC_MODE] >> 2) & 1;
sldiv2 = (s->cr[VGA_CRTC_MODE] >> 3) & 1;
printf (
"hz=%f\n"
"htotal = %d\n"
"hretr_start = %d\n"
"hretr_skew = %d\n"
"hretr_end = %d\n"
"vtotal = %d\n"
"vretr_start = %d\n"
"vretr_end = %d\n"
"div2 = %d sldiv2 = %d\n"
"clocking_mode = %d\n"
"clock_sel = %d %d\n"
"dots = %d\n"
"ticks/char = %" PRId64 "\n"
"\n",
(double) NANOSECONDS_PER_SECOND / (r->ticks_per_char * r->total_chars),
htotal_chars,
hretr_start_char,
hretr_skew_chars,
hretr_end_char,
vtotal_lines,
vretr_start_line,
vretr_end_line,
div2, sldiv2,
clocking_mode,
clock_sel,
clk_hz[clock_sel],
dots,
r->ticks_per_char
);
#endif
}
| 27,212 |
qemu | fe345a3d5d7ed4bc2965c65542832b1fa785ae9d | 1 | static void diag288_timer_expired(void *dev)
{
qemu_log_mask(CPU_LOG_RESET, "Watchdog timer expired.\n");
watchdog_perform_action();
/* Reset the watchdog only if the guest was notified about expiry. */
switch (get_watchdog_action()) {
case WDT_DEBUG:
case WDT_NONE:
case WDT_PAUSE:
return;
}
wdt_diag288_reset(dev);
}
| 27,213 |
qemu | 04e00c92ef75629a241ebc50537f75de0867928d | 1 | void pcie_aer_inject_error_print(Monitor *mon, const QObject *data)
{
QDict *qdict;
int devfn;
assert(qobject_type(data) == QTYPE_QDICT);
qdict = qobject_to_qdict(data);
devfn = (int)qdict_get_int(qdict, "devfn");
monitor_printf(mon, "OK id: %s root bus: %s, bus: %x devfn: %x.%x\n",
qdict_get_str(qdict, "id"),
qdict_get_str(qdict, "root_bus"),
(int) qdict_get_int(qdict, "bus"),
PCI_SLOT(devfn), PCI_FUNC(devfn));
}
| 27,214 |
qemu | a5f533909e746ca6e534b232fb42c9c6fd81b468 | 1 | static void gen_branch(DisasContext *ctx, int insn_bytes)
{
if (ctx->hflags & MIPS_HFLAG_BMASK) {
int proc_hflags = ctx->hflags & MIPS_HFLAG_BMASK;
/* Branches completion */
ctx->hflags &= ~MIPS_HFLAG_BMASK;
ctx->bstate = BS_BRANCH;
save_cpu_state(ctx, 0);
/* FIXME: Need to clear can_do_io. */
switch (proc_hflags & MIPS_HFLAG_BMASK_BASE) {
case MIPS_HFLAG_FBNSLOT:
MIPS_DEBUG("forbidden slot");
gen_goto_tb(ctx, 0, ctx->pc + insn_bytes);
break;
case MIPS_HFLAG_B:
/* unconditional branch */
MIPS_DEBUG("unconditional branch");
if (proc_hflags & MIPS_HFLAG_BX) {
tcg_gen_xori_i32(hflags, hflags, MIPS_HFLAG_M16);
}
gen_goto_tb(ctx, 0, ctx->btarget);
break;
case MIPS_HFLAG_BL:
/* blikely taken case */
MIPS_DEBUG("blikely branch taken");
gen_goto_tb(ctx, 0, ctx->btarget);
break;
case MIPS_HFLAG_BC:
/* Conditional branch */
MIPS_DEBUG("conditional branch");
{
TCGLabel *l1 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_NE, bcond, 0, l1);
gen_goto_tb(ctx, 1, ctx->pc + insn_bytes);
gen_set_label(l1);
gen_goto_tb(ctx, 0, ctx->btarget);
}
break;
case MIPS_HFLAG_BR:
/* unconditional branch to register */
MIPS_DEBUG("branch to register");
if (ctx->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
TCGv t0 = tcg_temp_new();
TCGv_i32 t1 = tcg_temp_new_i32();
tcg_gen_andi_tl(t0, btarget, 0x1);
tcg_gen_trunc_tl_i32(t1, t0);
tcg_temp_free(t0);
tcg_gen_andi_i32(hflags, hflags, ~(uint32_t)MIPS_HFLAG_M16);
tcg_gen_shli_i32(t1, t1, MIPS_HFLAG_M16_SHIFT);
tcg_gen_or_i32(hflags, hflags, t1);
tcg_temp_free_i32(t1);
tcg_gen_andi_tl(cpu_PC, btarget, ~(target_ulong)0x1);
} else {
tcg_gen_mov_tl(cpu_PC, btarget);
}
if (ctx->singlestep_enabled) {
save_cpu_state(ctx, 0);
gen_helper_0e0i(raise_exception, EXCP_DEBUG);
}
tcg_gen_exit_tb(0);
break;
default:
MIPS_DEBUG("unknown branch");
break;
}
}
}
| 27,215 |
FFmpeg | 3d2cd42f8a19460d1fd44771646b4411c3148383 | 0 | static int flac_encode_frame(AVCodecContext *avctx, uint8_t *frame,
int buf_size, void *data)
{
FlacEncodeContext *s;
const int16_t *samples = data;
int frame_bytes, out_bytes;
s = avctx->priv_data;
/* when the last block is reached, update the header in extradata */
if (!data) {
s->max_framesize = s->max_encoded_framesize;
av_md5_final(s->md5ctx, s->md5sum);
write_streaminfo(s, avctx->extradata);
return 0;
}
/* change max_framesize for small final frame */
if (avctx->frame_size < s->frame.blocksize) {
s->max_framesize = ff_flac_get_max_frame_size(avctx->frame_size,
s->channels, 16);
}
init_frame(s);
copy_samples(s, samples);
channel_decorrelation(s);
frame_bytes = encode_frame(s);
if (buf_size < frame_bytes) {
av_log(avctx, AV_LOG_ERROR, "output buffer too small\n");
return 0;
}
out_bytes = write_frame(s, frame, buf_size);
/* fallback to verbatim mode if the compressed frame is larger than it
would be if encoded uncompressed. */
if (out_bytes > s->max_framesize) {
s->frame.verbatim_only = 1;
frame_bytes = encode_frame(s);
if (buf_size < frame_bytes) {
av_log(avctx, AV_LOG_ERROR, "output buffer too small\n");
return 0;
}
out_bytes = write_frame(s, frame, buf_size);
}
s->frame_count++;
avctx->coded_frame->pts = s->sample_count;
s->sample_count += avctx->frame_size;
update_md5_sum(s, samples);
if (out_bytes > s->max_encoded_framesize)
s->max_encoded_framesize = out_bytes;
if (out_bytes < s->min_framesize)
s->min_framesize = out_bytes;
return out_bytes;
}
| 27,217 |
FFmpeg | 6f1ccca4ae3b93b6a2a820a7a0e72081ab35767c | 0 | static int dnxhd_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
DNXHDContext *ctx = avctx->priv_data;
ThreadFrame frame = { .f = data };
AVFrame *picture = data;
int first_field = 1;
int ret, i;
ff_dlog(avctx, "frame size %d\n", buf_size);
decode_coding_unit:
if ((ret = dnxhd_decode_header(ctx, picture, buf, buf_size, first_field)) < 0)
return ret;
if ((avctx->width || avctx->height) &&
(ctx->width != avctx->width || ctx->height != avctx->height)) {
av_log(avctx, AV_LOG_WARNING, "frame size changed: %dx%d -> %dx%d\n",
avctx->width, avctx->height, ctx->width, ctx->height);
first_field = 1;
}
if (avctx->pix_fmt != AV_PIX_FMT_NONE && avctx->pix_fmt != ctx->pix_fmt) {
av_log(avctx, AV_LOG_WARNING, "pix_fmt changed: %s -> %s\n",
av_get_pix_fmt_name(avctx->pix_fmt), av_get_pix_fmt_name(ctx->pix_fmt));
first_field = 1;
}
avctx->pix_fmt = ctx->pix_fmt;
ret = ff_set_dimensions(avctx, ctx->width, ctx->height);
if (ret < 0)
return ret;
if (first_field) {
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
return ret;
picture->pict_type = AV_PICTURE_TYPE_I;
picture->key_frame = 1;
}
ctx->buf_size = buf_size - 0x280;
ctx->buf = buf + 0x280;
avctx->execute2(avctx, dnxhd_decode_row, picture, NULL, ctx->mb_height);
if (first_field && picture->interlaced_frame) {
buf += ctx->cid_table->coding_unit_size;
buf_size -= ctx->cid_table->coding_unit_size;
first_field = 0;
goto decode_coding_unit;
}
ret = 0;
for (i = 0; i < avctx->thread_count; i++) {
ret += ctx->rows[i].errors;
ctx->rows[i].errors = 0;
}
if (ret) {
av_log(ctx->avctx, AV_LOG_ERROR, "%d lines with errors\n", ret);
return AVERROR_INVALIDDATA;
}
*got_frame = 1;
return avpkt->size;
}
| 27,218 |
FFmpeg | 0ecca7a49f8e254c12a3a1de048d738bfbb614c6 | 1 | static void encode_gray_bitstream(HYuvContext *s, int count){
int i;
count/=2;
if(s->flags&CODEC_FLAG_PASS1){
for(i=0; i<count; i++){
s->stats[0][ s->temp[0][2*i ] ]++;
s->stats[0][ s->temp[0][2*i+1] ]++;
}
}else if(s->context){
for(i=0; i<count; i++){
s->stats[0][ s->temp[0][2*i ] ]++;
put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
s->stats[0][ s->temp[0][2*i+1] ]++;
put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
}
}else{
for(i=0; i<count; i++){
put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
}
}
}
| 27,219 |
qemu | f3c7d0389fe8a2792fd4c1cf151b885de03c8f62 | 1 | qemu_irq qemu_irq_invert(qemu_irq irq)
{
/* The default state for IRQs is low, so raise the output now. */
qemu_irq_raise(irq);
return qemu_allocate_irqs(qemu_notirq, irq, 1)[0];
}
| 27,220 |
FFmpeg | 53918a1c54b49e76c3ca517e2caada8372995712 | 1 | static int dca_exss_parse_asset_header(DCAContext *s)
{
int header_pos = get_bits_count(&s->gb);
int header_size;
int channels;
int embedded_stereo = 0;
int embedded_6ch = 0;
int drc_code_present;
int extensions_mask;
int i, j;
if (get_bits_left(&s->gb) < 16)
return -1;
/* We will parse just enough to get to the extensions bitmask with which
* we can set the profile value. */
header_size = get_bits(&s->gb, 9) + 1;
skip_bits(&s->gb, 3); // asset index
if (s->static_fields) {
if (get_bits1(&s->gb))
skip_bits(&s->gb, 4); // asset type descriptor
if (get_bits1(&s->gb))
skip_bits_long(&s->gb, 24); // language descriptor
if (get_bits1(&s->gb)) {
/* How can one fit 1024 bytes of text here if the maximum value
* for the asset header size field above was 512 bytes? */
int text_length = get_bits(&s->gb, 10) + 1;
if (get_bits_left(&s->gb) < text_length * 8)
return -1;
skip_bits_long(&s->gb, text_length * 8); // info text
}
skip_bits(&s->gb, 5); // bit resolution - 1
skip_bits(&s->gb, 4); // max sample rate code
channels = get_bits(&s->gb, 8) + 1;
if (get_bits1(&s->gb)) { // 1-to-1 channels to speakers
int spkr_remap_sets;
int spkr_mask_size = 16;
int num_spkrs[7];
if (channels > 2)
embedded_stereo = get_bits1(&s->gb);
if (channels > 6)
embedded_6ch = get_bits1(&s->gb);
if (get_bits1(&s->gb)) {
spkr_mask_size = (get_bits(&s->gb, 2) + 1) << 2;
skip_bits(&s->gb, spkr_mask_size); // spkr activity mask
}
spkr_remap_sets = get_bits(&s->gb, 3);
for (i = 0; i < spkr_remap_sets; i++) {
/* std layout mask for each remap set */
num_spkrs[i] = dca_exss_mask2count(get_bits(&s->gb, spkr_mask_size));
}
for (i = 0; i < spkr_remap_sets; i++) {
int num_dec_ch_remaps = get_bits(&s->gb, 5) + 1;
if (get_bits_left(&s->gb) < 0)
return -1;
for (j = 0; j < num_spkrs[i]; j++) {
int remap_dec_ch_mask = get_bits_long(&s->gb, num_dec_ch_remaps);
int num_dec_ch = av_popcount(remap_dec_ch_mask);
skip_bits_long(&s->gb, num_dec_ch * 5); // remap codes
}
}
} else {
skip_bits(&s->gb, 3); // representation type
}
}
drc_code_present = get_bits1(&s->gb);
if (drc_code_present)
get_bits(&s->gb, 8); // drc code
if (get_bits1(&s->gb))
skip_bits(&s->gb, 5); // dialog normalization code
if (drc_code_present && embedded_stereo)
get_bits(&s->gb, 8); // drc stereo code
if (s->mix_metadata && get_bits1(&s->gb)) {
skip_bits(&s->gb, 1); // external mix
skip_bits(&s->gb, 6); // post mix gain code
if (get_bits(&s->gb, 2) != 3) // mixer drc code
skip_bits(&s->gb, 3); // drc limit
else
skip_bits(&s->gb, 8); // custom drc code
if (get_bits1(&s->gb)) // channel specific scaling
for (i = 0; i < s->num_mix_configs; i++)
skip_bits_long(&s->gb, s->mix_config_num_ch[i] * 6); // scale codes
else
skip_bits_long(&s->gb, s->num_mix_configs * 6); // scale codes
for (i = 0; i < s->num_mix_configs; i++) {
if (get_bits_left(&s->gb) < 0)
return -1;
dca_exss_skip_mix_coeffs(&s->gb, channels, s->mix_config_num_ch[i]);
if (embedded_6ch)
dca_exss_skip_mix_coeffs(&s->gb, 6, s->mix_config_num_ch[i]);
if (embedded_stereo)
dca_exss_skip_mix_coeffs(&s->gb, 2, s->mix_config_num_ch[i]);
}
}
switch (get_bits(&s->gb, 2)) {
case 0: extensions_mask = get_bits(&s->gb, 12); break;
case 1: extensions_mask = DCA_EXT_EXSS_XLL; break;
case 2: extensions_mask = DCA_EXT_EXSS_LBR; break;
case 3: extensions_mask = 0; /* aux coding */ break;
}
/* not parsed further, we were only interested in the extensions mask */
if (get_bits_left(&s->gb) < 0)
return -1;
if (get_bits_count(&s->gb) - header_pos > header_size * 8) {
av_log(s->avctx, AV_LOG_WARNING, "Asset header size mismatch.\n");
return -1;
}
skip_bits_long(&s->gb, header_pos + header_size * 8 - get_bits_count(&s->gb));
if (extensions_mask & DCA_EXT_EXSS_XLL)
s->profile = FF_PROFILE_DTS_HD_MA;
else if (extensions_mask & (DCA_EXT_EXSS_XBR | DCA_EXT_EXSS_X96 |
DCA_EXT_EXSS_XXCH))
s->profile = FF_PROFILE_DTS_HD_HRA;
if (!(extensions_mask & DCA_EXT_CORE))
av_log(s->avctx, AV_LOG_WARNING, "DTS core detection mismatch.\n");
if ((extensions_mask & DCA_CORE_EXTS) != s->core_ext_mask)
av_log(s->avctx, AV_LOG_WARNING,
"DTS extensions detection mismatch (%d, %d)\n",
extensions_mask & DCA_CORE_EXTS, s->core_ext_mask);
return 0;
}
| 27,221 |
FFmpeg | ed08cbd7b172a1dba74230527ef761aafaf2fcce | 1 | void ff_aac_search_for_ltp(AACEncContext *s, SingleChannelElement *sce,
int common_window)
{
int w, g, w2, i, start = 0, count = 0;
int saved_bits = -(15 + FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB));
float *C34 = &s->scoefs[128*0], *PCD = &s->scoefs[128*1];
float *PCD34 = &s->scoefs[128*2];
const int max_ltp = FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB);
if (sce->ics.window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
if (sce->ics.ltp.lag) {
memset(&sce->lcoeffs[0], 0.0f, 3072*sizeof(sce->lcoeffs[0]));
memset(&sce->ics.ltp, 0, sizeof(LongTermPrediction));
}
return;
}
if (!sce->ics.ltp.lag)
return;
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
start = 0;
for (g = 0; g < sce->ics.num_swb; g++) {
int bits1 = 0, bits2 = 0;
float dist1 = 0.0f, dist2 = 0.0f;
if (w*16+g > max_ltp) {
start += sce->ics.swb_sizes[g];
continue;
}
for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
int bits_tmp1, bits_tmp2;
FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g];
for (i = 0; i < sce->ics.swb_sizes[g]; i++)
PCD[i] = sce->coeffs[start+(w+w2)*128+i] - sce->lcoeffs[start+(w+w2)*128+i];
abs_pow34_v(C34, &sce->coeffs[start+(w+w2)*128], sce->ics.swb_sizes[g]);
abs_pow34_v(PCD34, PCD, sce->ics.swb_sizes[g]);
dist1 += quantize_band_cost(s, &sce->coeffs[start+(w+w2)*128], C34, sce->ics.swb_sizes[g],
sce->sf_idx[(w+w2)*16+g], sce->band_type[(w+w2)*16+g],
s->lambda/band->threshold, INFINITY, &bits_tmp1, NULL, 0);
dist2 += quantize_band_cost(s, PCD, PCD34, sce->ics.swb_sizes[g],
sce->sf_idx[(w+w2)*16+g],
sce->band_type[(w+w2)*16+g],
s->lambda/band->threshold, INFINITY, &bits_tmp2, NULL, 0);
bits1 += bits_tmp1;
bits2 += bits_tmp2;
}
if (dist2 < dist1 && bits2 < bits1) {
for (w2 = 0; w2 < sce->ics.group_len[w]; w2++)
for (i = 0; i < sce->ics.swb_sizes[g]; i++)
sce->coeffs[start+(w+w2)*128+i] -= sce->lcoeffs[start+(w+w2)*128+i];
sce->ics.ltp.used[w*16+g] = 1;
saved_bits += bits1 - bits2;
count++;
}
start += sce->ics.swb_sizes[g];
}
}
sce->ics.ltp.present = !!count && (saved_bits >= 0);
sce->ics.predictor_present = !!sce->ics.ltp.present;
/* Reset any marked sfbs */
if (!sce->ics.ltp.present && !!count) {
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
start = 0;
for (g = 0; g < sce->ics.num_swb; g++) {
if (sce->ics.ltp.used[w*16+g]) {
for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
for (i = 0; i < sce->ics.swb_sizes[g]; i++) {
sce->coeffs[start+(w+w2)*128+i] += sce->lcoeffs[start+(w+w2)*128+i];
}
}
}
start += sce->ics.swb_sizes[g];
}
}
}
}
| 27,222 |
qemu | 84cab1e2f5be3ea6eaa65c9fc0422fb992946ce0 | 1 | static void gen_std(DisasContext *ctx)
{
int rs;
TCGv EA;
rs = rS(ctx->opcode);
if ((ctx->opcode & 0x3) == 0x2) {
#if defined(CONFIG_USER_ONLY)
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
/* stq */
if (unlikely(ctx->mem_idx == 0)) {
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
if (unlikely(rs & 1)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
if (unlikely(ctx->le_mode)) {
/* Little-endian mode is not handled */
gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
return;
}
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
gen_qemu_st64(ctx, cpu_gpr[rs], EA);
gen_addr_add(ctx, EA, EA, 8);
gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
tcg_temp_free(EA);
#endif
} else {
/* std / stdu */
if (Rc(ctx->opcode)) {
if (unlikely(rA(ctx->opcode) == 0)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
}
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
gen_qemu_st64(ctx, cpu_gpr[rs], EA);
if (Rc(ctx->opcode))
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
tcg_temp_free(EA);
}
}
| 27,223 |
FFmpeg | d256ed78ffe202a4dcc8d625becffc716bfa3977 | 1 | static int parse_vtrk(AVFormatContext *s,
FourxmDemuxContext *fourxm, uint8_t *buf, int size,
int left)
{
AVStream *st;
/* check that there is enough data */
if (size != vtrk_SIZE || left < size + 8) {
return AVERROR_INVALIDDATA;
}
/* allocate a new AVStream */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 60, 1, fourxm->fps);
fourxm->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_4XM;
st->codec->extradata_size = 4;
st->codec->extradata = av_malloc(4);
AV_WL32(st->codec->extradata, AV_RL32(buf + 16));
st->codec->width = AV_RL32(buf + 36);
st->codec->height = AV_RL32(buf + 40);
return 0;
}
| 27,224 |
qemu | f11dc27bcc031b9dc90ffd82c503b55f83c246ca | 1 | static void free_test_data(test_data *data)
{
AcpiSdtTable *temp;
int i;
g_free(data->rsdt_tables_addr);
for (i = 0; i < data->tables->len; ++i) {
temp = &g_array_index(data->tables, AcpiSdtTable, i);
g_free(temp->aml);
if (temp->aml_file &&
!temp->tmp_files_retain &&
g_strstr_len(temp->aml_file, -1, "aml-")) {
unlink(temp->aml_file);
}
g_free(temp->aml_file);
g_free(temp->asl);
if (temp->asl_file &&
!temp->tmp_files_retain) {
unlink(temp->asl_file);
}
g_free(temp->asl_file);
}
g_array_free(data->tables, false);
}
| 27,225 |
FFmpeg | 7ab631261033a71a52563c3b23b6eef826eb5994 | 1 | static void draw_digit(int digit, uint8_t *dst, unsigned dst_linesize,
unsigned segment_width)
{
#define TOP_HBAR 1
#define MID_HBAR 2
#define BOT_HBAR 4
#define LEFT_TOP_VBAR 8
#define LEFT_BOT_VBAR 16
#define RIGHT_TOP_VBAR 32
#define RIGHT_BOT_VBAR 64
struct {
int x, y, w, h;
} segments[] = {
{ 1, 0, 5, 1 }, /* TOP_HBAR */
{ 1, 6, 5, 1 }, /* MID_HBAR */
{ 1, 12, 5, 1 }, /* BOT_HBAR */
{ 0, 1, 1, 5 }, /* LEFT_TOP_VBAR */
{ 0, 7, 1, 5 }, /* LEFT_BOT_VBAR */
{ 6, 1, 1, 5 }, /* RIGHT_TOP_VBAR */
{ 6, 7, 1, 5 } /* RIGHT_BOT_VBAR */
};
static const unsigned char masks[10] = {
/* 0 */ TOP_HBAR |BOT_HBAR|LEFT_TOP_VBAR|LEFT_BOT_VBAR|RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
/* 1 */ RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
/* 2 */ TOP_HBAR|MID_HBAR|BOT_HBAR|LEFT_BOT_VBAR |RIGHT_TOP_VBAR,
/* 3 */ TOP_HBAR|MID_HBAR|BOT_HBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
/* 4 */ MID_HBAR |LEFT_TOP_VBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
/* 5 */ TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR |RIGHT_BOT_VBAR,
/* 6 */ TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR|LEFT_BOT_VBAR |RIGHT_BOT_VBAR,
/* 7 */ TOP_HBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
/* 8 */ TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR|LEFT_BOT_VBAR|RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
/* 9 */ TOP_HBAR|BOT_HBAR|MID_HBAR|LEFT_TOP_VBAR |RIGHT_TOP_VBAR|RIGHT_BOT_VBAR,
};
unsigned mask = masks[digit];
int i;
draw_rectangle(0, dst, dst_linesize, segment_width, 0, 0, 8, 13);
for (i = 0; i < FF_ARRAY_ELEMS(segments); i++)
if (mask & (1<<i))
draw_rectangle(255, dst, dst_linesize, segment_width,
segments[i].x, segments[i].y, segments[i].w, segments[i].h);
}
| 27,226 |
FFmpeg | 4582e4c086bc36e20e3c0b85c8108cfa352e0d88 | 0 | static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
{
int i, j, ret;
int scaler_count = 0, resampler_count = 0;
int count_queried = 0, count_merged = 0, count_already_merged = 0,
count_delayed = 0;
for (i = 0; i < graph->nb_filters; i++) {
AVFilterContext *f = graph->filters[i];
if (formats_declared(f))
continue;
if (f->filter->query_formats)
ret = filter_query_formats(f);
else
ret = ff_default_query_formats(f);
if (ret < 0 && ret != AVERROR(EAGAIN))
return ret;
/* note: EAGAIN could indicate a partial success, not counted yet */
count_queried += ret >= 0;
}
/* go through and merge as many format lists as possible */
for (i = 0; i < graph->nb_filters; i++) {
AVFilterContext *filter = graph->filters[i];
for (j = 0; j < filter->nb_inputs; j++) {
AVFilterLink *link = filter->inputs[j];
int convert_needed = 0;
if (!link)
continue;
#define MERGE_DISPATCH(field, statement) \
if (!(link->in_ ## field && link->out_ ## field)) { \
count_delayed++; \
} else if (link->in_ ## field == link->out_ ## field) { \
count_already_merged++; \
} else { \
count_merged++; \
statement \
}
MERGE_DISPATCH(formats,
if (!ff_merge_formats(link->in_formats, link->out_formats,
link->type))
convert_needed = 1;
)
if (link->type == AVMEDIA_TYPE_AUDIO) {
MERGE_DISPATCH(channel_layouts,
if (!ff_merge_channel_layouts(link->in_channel_layouts,
link->out_channel_layouts))
convert_needed = 1;
)
MERGE_DISPATCH(samplerates,
if (!ff_merge_samplerates(link->in_samplerates,
link->out_samplerates))
convert_needed = 1;
)
}
#undef MERGE_DISPATCH
if (convert_needed) {
AVFilterContext *convert;
AVFilter *filter;
AVFilterLink *inlink, *outlink;
char scale_args[256];
char inst_name[30];
/* couldn't merge format lists. auto-insert conversion filter */
switch (link->type) {
case AVMEDIA_TYPE_VIDEO:
if (!(filter = avfilter_get_by_name("scale"))) {
av_log(log_ctx, AV_LOG_ERROR, "'scale' filter "
"not present, cannot convert pixel formats.\n");
return AVERROR(EINVAL);
}
snprintf(inst_name, sizeof(inst_name), "auto-inserted scaler %d",
scaler_count++);
av_strlcpy(scale_args, "0:0", sizeof(scale_args));
if (graph->scale_sws_opts) {
av_strlcat(scale_args, ":", sizeof(scale_args));
av_strlcat(scale_args, graph->scale_sws_opts, sizeof(scale_args));
}
if ((ret = avfilter_graph_create_filter(&convert, filter,
inst_name, scale_args, NULL,
graph)) < 0)
return ret;
break;
case AVMEDIA_TYPE_AUDIO:
if (!(filter = avfilter_get_by_name("aresample"))) {
av_log(log_ctx, AV_LOG_ERROR, "'aresample' filter "
"not present, cannot convert audio formats.\n");
return AVERROR(EINVAL);
}
snprintf(inst_name, sizeof(inst_name), "auto-inserted resampler %d",
resampler_count++);
scale_args[0] = '\0';
if (graph->aresample_swr_opts)
snprintf(scale_args, sizeof(scale_args), "%s",
graph->aresample_swr_opts);
if ((ret = avfilter_graph_create_filter(&convert, filter,
inst_name, graph->aresample_swr_opts,
NULL, graph)) < 0)
return ret;
break;
default:
return AVERROR(EINVAL);
}
if ((ret = avfilter_insert_filter(link, convert, 0, 0)) < 0)
return ret;
filter_query_formats(convert);
inlink = convert->inputs[0];
outlink = convert->outputs[0];
if (!ff_merge_formats( inlink->in_formats, inlink->out_formats, inlink->type) ||
!ff_merge_formats(outlink->in_formats, outlink->out_formats, outlink->type))
ret |= AVERROR(ENOSYS);
if (inlink->type == AVMEDIA_TYPE_AUDIO &&
(!ff_merge_samplerates(inlink->in_samplerates,
inlink->out_samplerates) ||
!ff_merge_channel_layouts(inlink->in_channel_layouts,
inlink->out_channel_layouts)))
ret |= AVERROR(ENOSYS);
if (outlink->type == AVMEDIA_TYPE_AUDIO &&
(!ff_merge_samplerates(outlink->in_samplerates,
outlink->out_samplerates) ||
!ff_merge_channel_layouts(outlink->in_channel_layouts,
outlink->out_channel_layouts)))
ret |= AVERROR(ENOSYS);
if (ret < 0) {
av_log(log_ctx, AV_LOG_ERROR,
"Impossible to convert between the formats supported by the filter "
"'%s' and the filter '%s'\n", link->src->name, link->dst->name);
return ret;
}
}
}
}
av_log(graph, AV_LOG_DEBUG, "query_formats: "
"%d queried, %d merged, %d already done, %d delayed\n",
count_queried, count_merged, count_already_merged, count_delayed);
if (count_delayed) {
AVBPrint bp;
if (count_queried || count_merged)
return AVERROR(EAGAIN);
av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC);
for (i = 0; i < graph->nb_filters; i++)
if (!formats_declared(graph->filters[i]))
av_bprintf(&bp, "%s%s", bp.len ? ", " : "",
graph->filters[i]->name);
av_log(graph, AV_LOG_ERROR,
"The following filters could not choose their formats: %s\n"
"Consider inserting the (a)format filter near their input or "
"output.\n", bp.str);
return AVERROR(EIO);
}
return 0;
}
| 27,227 |
FFmpeg | 486637af8ef29ec215e0e0b7ecd3b5470f0e04e5 | 0 | static inline void mix_2f_1r_to_stereo(AC3DecodeContext *ctx)
{
int i;
float (*output)[256] = ctx->audio_block.block_output;
for (i = 0; i < 256; i++) {
output[1][i] += output[2][i];
output[2][i] += output[3][i];
}
memset(output[3], 0, sizeof(output[3]));
}
| 27,228 |
FFmpeg | 851ded8918c977d8160c6617b69604f758cabf50 | 0 | static int decode_cabac_mb_cbp_chroma( H264Context *h) {
int ctx;
int cbp_a, cbp_b;
cbp_a = (h->left_cbp>>4)&0x03;
cbp_b = (h-> top_cbp>>4)&0x03;
ctx = 0;
if( cbp_a > 0 ) ctx++;
if( cbp_b > 0 ) ctx += 2;
if( get_cabac( &h->cabac, &h->cabac_state[77 + ctx] ) == 0 )
return 0;
ctx = 4;
if( cbp_a == 2 ) ctx++;
if( cbp_b == 2 ) ctx += 2;
return 1 + get_cabac( &h->cabac, &h->cabac_state[77 + ctx] );
}
| 27,229 |
FFmpeg | cb65b32c97b06fc611b53c1ab77a2edbaadee84f | 0 | static int mp3_write_audio_packet(AVFormatContext *s, AVPacket *pkt)
{
MP3Context *mp3 = s->priv_data;
if (pkt && pkt->data && pkt->size >= 4) {
MPADecodeHeader c;
int av_unused base;
avpriv_mpegaudio_decode_header(&c, AV_RB32(pkt->data));
if (!mp3->initial_bitrate)
mp3->initial_bitrate = c.bit_rate;
if ((c.bit_rate == 0) || (mp3->initial_bitrate != c.bit_rate))
mp3->has_variable_bitrate = 1;
#ifdef FILTER_VBR_HEADERS
/* filter out XING and INFO headers. */
base = 4 + xing_offtbl[c.lsf == 1][c.nb_channels == 1];
if (base + 4 <= pkt->size) {
uint32_t v = AV_RB32(pkt->data + base);
if (MKBETAG('X','i','n','g') == v || MKBETAG('I','n','f','o') == v)
return 0;
}
/* filter out VBRI headers. */
base = 4 + 32;
if (base + 4 <= pkt->size && MKBETAG('V','B','R','I') == AV_RB32(pkt->data + base))
return 0;
#endif
if (mp3->xing_offset)
mp3_xing_add_frame(mp3, pkt);
}
return ff_raw_write_packet(s, pkt);
}
| 27,230 |
FFmpeg | c97f54020d5d55511e28622551f13233bd8ceb56 | 0 | int has_altivec(void)
{
#ifdef __AMIGAOS4__
ULONG result = 0;
extern struct ExecIFace *IExec;
IExec->GetCPUInfoTags(GCIT_VectorUnit, &result, TAG_DONE);
if (result == VECTORTYPE_ALTIVEC) return 1;
return 0;
#else /* __AMIGAOS4__ */
#ifdef SYS_DARWIN
int sels[2] = {CTL_HW, HW_VECTORUNIT};
int has_vu = 0;
size_t len = sizeof(has_vu);
int err;
err = sysctl(sels, 2, &has_vu, &len, NULL, 0);
if (err == 0) return (has_vu != 0);
#else /* SYS_DARWIN */
/* no Darwin, do it the brute-force way */
/* this is borrowed from the libmpeg2 library */
{
signal (SIGILL, sigill_handler);
if (sigsetjmp (jmpbuf, 1)) {
signal (SIGILL, SIG_DFL);
} else {
canjump = 1;
asm volatile ("mtspr 256, %0\n\t"
"vand %%v0, %%v0, %%v0"
:
: "r" (-1));
signal (SIGILL, SIG_DFL);
return 1;
}
}
#endif /* SYS_DARWIN */
return 0;
#endif /* __AMIGAOS4__ */
}
| 27,231 |
FFmpeg | dcc39ee10e82833ce24aa57926c00ffeb1948198 | 0 | static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
{
AVCodecContext *avctx = s->avctx;
Mpeg1Context *s1 = (Mpeg1Context *) s;
int ret;
if (s->picture_structure == PICT_FRAME)
s->first_field = 0;
else
s->first_field ^= 1;
/* start frame decoding */
if (s->first_field || s->picture_structure == PICT_FRAME) {
AVFrameSideData *pan_scan;
if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
return ret;
ff_mpeg_er_frame_start(s);
/* first check if we must repeat the frame */
s->current_picture_ptr->f->repeat_pict = 0;
if (s->repeat_first_field) {
if (s->progressive_sequence) {
if (s->top_field_first)
s->current_picture_ptr->f->repeat_pict = 4;
else
s->current_picture_ptr->f->repeat_pict = 2;
} else if (s->progressive_frame) {
s->current_picture_ptr->f->repeat_pict = 1;
}
}
pan_scan = av_frame_new_side_data(s->current_picture_ptr->f,
AV_FRAME_DATA_PANSCAN,
sizeof(s1->pan_scan));
if (!pan_scan)
return AVERROR(ENOMEM);
memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan));
if (s1->a53_caption) {
AVFrameSideData *sd = av_frame_new_side_data(
s->current_picture_ptr->f, AV_FRAME_DATA_A53_CC,
s1->a53_caption_size);
if (sd)
memcpy(sd->data, s1->a53_caption, s1->a53_caption_size);
av_freep(&s1->a53_caption);
}
if (s1->has_stereo3d) {
AVStereo3D *stereo = av_stereo3d_create_side_data(s->current_picture_ptr->f);
if (!stereo)
return AVERROR(ENOMEM);
*stereo = s1->stereo3d;
s1->has_stereo3d = 0;
}
if (s1->has_afd) {
AVFrameSideData *sd =
av_frame_new_side_data(s->current_picture_ptr->f,
AV_FRAME_DATA_AFD, 1);
if (!sd)
return AVERROR(ENOMEM);
*sd->data = s1->afd;
s1->has_afd = 0;
}
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
ff_thread_finish_setup(avctx);
} else { // second field
int i;
if (!s->current_picture_ptr) {
av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
return AVERROR_INVALIDDATA;
}
if (s->avctx->hwaccel &&
(s->avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD)) {
if (s->avctx->hwaccel->end_frame(s->avctx) < 0)
av_log(avctx, AV_LOG_ERROR,
"hardware accelerator failed to decode first field\n");
}
for (i = 0; i < 4; i++) {
s->current_picture.f->data[i] = s->current_picture_ptr->f->data[i];
if (s->picture_structure == PICT_BOTTOM_FIELD)
s->current_picture.f->data[i] +=
s->current_picture_ptr->f->linesize[i];
}
}
if (avctx->hwaccel) {
if ((ret = avctx->hwaccel->start_frame(avctx, buf, buf_size)) < 0)
return ret;
}
#if FF_API_XVMC
FF_DISABLE_DEPRECATION_WARNINGS
// ff_mpv_frame_start will call this function too,
// but we need to call it on every field
if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
if (ff_xvmc_field_start(s, avctx) < 0)
return -1;
FF_ENABLE_DEPRECATION_WARNINGS
#endif /* FF_API_XVMC */
return 0;
}
| 27,232 |
qemu | 21ce148c7ec71ee32834061355a5ecfd1a11f90f | 1 | static inline int cris_bound_w(int v, int b)
{
int r = v;
asm ("bound.w\t%1, %0\n" : "+r" (r) : "ri" (b));
return r;
}
| 27,233 |
FFmpeg | 1cb0edb40b8e94e1a50ad40c40d43e34ed8435fe | 1 | static void mpeg_decode_picture_coding_extension(MpegEncContext *s)
{
s->full_pel[0] = s->full_pel[1] = 0;
s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
s->mpeg_f_code[1][1] = get_bits(&s->gb, 4);
s->intra_dc_precision = get_bits(&s->gb, 2);
s->picture_structure = get_bits(&s->gb, 2);
s->top_field_first = get_bits1(&s->gb);
s->frame_pred_frame_dct = get_bits1(&s->gb);
s->concealment_motion_vectors = get_bits1(&s->gb);
s->q_scale_type = get_bits1(&s->gb);
s->intra_vlc_format = get_bits1(&s->gb);
s->alternate_scan = get_bits1(&s->gb);
s->repeat_first_field = get_bits1(&s->gb);
s->chroma_420_type = get_bits1(&s->gb);
s->progressive_frame = get_bits1(&s->gb);
/* composite display not parsed */
dprintf("intra_dc_precion=%d\n", s->intra_dc_precision);
dprintf("picture_structure=%d\n", s->picture_structure);
dprintf("conceal=%d\n", s->concealment_motion_vectors);
dprintf("intra_vlc_format=%d\n", s->intra_vlc_format);
dprintf("alternate_scan=%d\n", s->alternate_scan);
dprintf("frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
}
| 27,234 |
qemu | c5633d998a27502ad8cc10c2d46f91b02555ae7a | 1 | static int xen_remove_from_physmap(XenIOState *state,
hwaddr start_addr,
ram_addr_t size)
{
unsigned long i = 0;
int rc = 0;
XenPhysmap *physmap = NULL;
hwaddr phys_offset = 0;
physmap = get_physmapping(state, start_addr, size);
if (physmap == NULL) {
return -1;
}
phys_offset = physmap->phys_offset;
size = physmap->size;
DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", from ",
"%"HWADDR_PRIx"\n", phys_offset, phys_offset + size, start_addr);
size >>= TARGET_PAGE_BITS;
start_addr >>= TARGET_PAGE_BITS;
phys_offset >>= TARGET_PAGE_BITS;
for (i = 0; i < size; i++) {
unsigned long idx = start_addr + i;
xen_pfn_t gpfn = phys_offset + i;
rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
if (rc) {
fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
return -rc;
}
}
QLIST_REMOVE(physmap, list);
if (state->log_for_dirtybit == physmap) {
state->log_for_dirtybit = NULL;
}
free(physmap);
return 0;
}
| 27,235 |
FFmpeg | 3836af476534e6f84be7b3a19afce3530af50703 | 1 | static int mpegvideo_probe(AVProbeData *p)
{
uint32_t code= -1;
int pic=0, seq=0, slice=0, pspack=0, vpes=0, apes=0, res=0, sicle=0;
int i;
uint32_t last = 0;
for(i=0; i<p->buf_size; i++){
code = (code<<8) + p->buf[i];
if ((code & 0xffffff00) == 0x100) {
switch(code){
case SEQ_START_CODE: seq++; break;
case PICTURE_START_CODE: pic++; break;
case PACK_START_CODE: pspack++; break;
case 0x1b6:
res++; break;
}
if (code >= SLICE_START_CODE && code <= 0x1af) {
if (last >= SLICE_START_CODE && last <= 0x1af) {
if (code >= last) slice++;
else sicle++;
}else{
if (code == SLICE_START_CODE) slice++;
else sicle++;
}
}
if ((code & 0x1f0) == VIDEO_ID) vpes++;
else if((code & 0x1e0) == AUDIO_ID) apes++;
last = code;
}
}
if(seq && seq*9<=pic*10 && pic*9<=slice*10 && !pspack && !apes && !res && slice > sicle) {
if(vpes) return AVPROBE_SCORE_EXTENSION / 4;
else return pic>1 ? AVPROBE_SCORE_EXTENSION + 1 : AVPROBE_SCORE_EXTENSION / 2; // +1 for .mpg
}
return 0;
}
| 27,236 |
qemu | d400fc018b326104d26d730e5cc8c36c1f662c34 | 1 | static void usb_ohci_init(OHCIState *ohci, DeviceState *dev,
int num_ports, dma_addr_t localmem_base,
char *masterbus, uint32_t firstport,
AddressSpace *as, Error **errp)
{
Error *err = NULL;
int i;
ohci->as = as;
if (usb_frame_time == 0) {
#ifdef OHCI_TIME_WARP
usb_frame_time = NANOSECONDS_PER_SECOND;
usb_bit_time = NANOSECONDS_PER_SECOND / (USB_HZ / 1000);
#else
usb_frame_time = NANOSECONDS_PER_SECOND / 1000;
if (NANOSECONDS_PER_SECOND >= USB_HZ) {
usb_bit_time = NANOSECONDS_PER_SECOND / USB_HZ;
} else {
usb_bit_time = 1;
#endif
trace_usb_ohci_init_time(usb_frame_time, usb_bit_time);
ohci->num_ports = num_ports;
if (masterbus) {
USBPort *ports[OHCI_MAX_PORTS];
for(i = 0; i < num_ports; i++) {
ports[i] = &ohci->rhport[i].port;
usb_register_companion(masterbus, ports, num_ports,
firstport, ohci, &ohci_port_ops,
USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL,
&err);
if (err) {
error_propagate(errp, err);
} else {
usb_bus_new(&ohci->bus, sizeof(ohci->bus), &ohci_bus_ops, dev);
for (i = 0; i < num_ports; i++) {
usb_register_port(&ohci->bus, &ohci->rhport[i].port,
ohci, i, &ohci_port_ops,
USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL);
memory_region_init_io(&ohci->mem, OBJECT(dev), &ohci_mem_ops,
ohci, "ohci", 256);
ohci->localmem_base = localmem_base;
ohci->name = object_get_typename(OBJECT(dev));
usb_packet_init(&ohci->usb_packet);
ohci->async_td = 0;
ohci->eof_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
ohci_frame_boundary, ohci); | 27,238 |
qemu | efec3dd631d94160288392721a5f9c39e50fb2bc | 1 | static void isabus_fdc_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = isabus_fdc_realize;
dc->fw_name = "fdc";
dc->no_user = 1;
dc->reset = fdctrl_external_reset_isa;
dc->vmsd = &vmstate_isa_fdc;
dc->props = isa_fdc_properties;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
| 27,239 |
qemu | 277acfe8b38de35be8cb6e274678b5a7919c2d44 | 0 | static int64_t expr_unary(Monitor *mon)
{
int64_t n;
char *p;
int ret;
switch(*pch) {
case '+':
next();
n = expr_unary(mon);
break;
case '-':
next();
n = -expr_unary(mon);
break;
case '~':
next();
n = ~expr_unary(mon);
break;
case '(':
next();
n = expr_sum(mon);
if (*pch != ')') {
expr_error(mon, "')' expected");
}
next();
break;
case '\'':
pch++;
if (*pch == '\0')
expr_error(mon, "character constant expected");
n = *pch;
pch++;
if (*pch != '\'')
expr_error(mon, "missing terminating \' character");
next();
break;
case '$':
{
char buf[128], *q;
target_long reg=0;
pch++;
q = buf;
while ((*pch >= 'a' && *pch <= 'z') ||
(*pch >= 'A' && *pch <= 'Z') ||
(*pch >= '0' && *pch <= '9') ||
*pch == '_' || *pch == '.') {
if ((q - buf) < sizeof(buf) - 1)
*q++ = *pch;
pch++;
}
while (qemu_isspace(*pch))
pch++;
*q = 0;
ret = get_monitor_def(®, buf);
if (ret < 0)
expr_error(mon, "unknown register");
n = reg;
}
break;
case '\0':
expr_error(mon, "unexpected end of expression");
n = 0;
break;
default:
errno = 0;
n = strtoull(pch, &p, 0);
if (errno == ERANGE) {
expr_error(mon, "number too large");
}
if (pch == p) {
expr_error(mon, "invalid char in expression");
}
pch = p;
while (qemu_isspace(*pch))
pch++;
break;
}
return n;
}
| 27,240 |
qemu | b2bedb214469af55179d907a60cd67fed6b0779e | 0 | mst_fpga_writeb(void *opaque, target_phys_addr_t addr, uint32_t value)
{
mst_irq_state *s = (mst_irq_state *) opaque;
value &= 0xffffffff;
switch (addr) {
case MST_LEDDAT1:
s->leddat1 = value;
break;
case MST_LEDDAT2:
s->leddat2 = value;
break;
case MST_LEDCTRL:
s->ledctrl = value;
break;
case MST_GPSWR:
s->gpswr = value;
break;
case MST_MSCWR1:
s->mscwr1 = value;
break;
case MST_MSCWR2:
s->mscwr2 = value;
break;
case MST_MSCWR3:
s->mscwr3 = value;
break;
case MST_MSCRD:
s->mscrd = value;
break;
case MST_INTMSKENA: /* Mask interrupt */
s->intmskena = (value & 0xFEEFF);
qemu_set_irq(s->parent, s->intsetclr & s->intmskena);
break;
case MST_INTSETCLR: /* clear or set interrupt */
s->intsetclr = (value & 0xFEEFF);
qemu_set_irq(s->parent, s->intsetclr & s->intmskena);
break;
/* For PCMCIAx allow the to change only power and reset */
case MST_PCMCIA0:
s->pcmcia0 = (value & 0x1f) | (s->pcmcia0 & ~0x1f);
break;
case MST_PCMCIA1:
s->pcmcia1 = (value & 0x1f) | (s->pcmcia1 & ~0x1f);
break;
default:
printf("Mainstone - mst_fpga_writeb: Bad register offset "
"0x" TARGET_FMT_plx " \n", addr);
}
}
| 27,241 |
FFmpeg | 59163139679b0aa2cb84cd6d7a3f696ed5a5813a | 0 | static int get_std_framerate(int i){
if(i<60*12) return i*1001;
else return ((const int[]){24,30,60,12,15,48})[i-60*12]*1000*12;
}
| 27,243 |
FFmpeg | 912ce9dd2080c5837285a471d750fa311e09b555 | 0 | void ff_jpeg2000_cleanup(Jpeg2000Component *comp, Jpeg2000CodingStyle *codsty)
{
int reslevelno, bandno, precno;
for (reslevelno = 0;
comp->reslevel && reslevelno < codsty->nreslevels;
reslevelno++) {
Jpeg2000ResLevel *reslevel = comp->reslevel + reslevelno;
for (bandno = 0; bandno < reslevel->nbands; bandno++) {
Jpeg2000Band *band = reslevel->band + bandno;
for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++) {
Jpeg2000Prec *prec = band->prec + precno;
av_freep(&prec->zerobits);
av_freep(&prec->cblkincl);
av_freep(&prec->cblk);
}
av_freep(&band->prec);
}
av_freep(&reslevel->band);
}
ff_dwt_destroy(&comp->dwt);
av_freep(&comp->reslevel);
av_freep(&comp->i_data);
av_freep(&comp->f_data);
}
| 27,244 |
qemu | 9307c4c1d93939db9b04117b654253af5113dc21 | 0 | static void do_cont(int argc, const char **argv)
{
vm_start();
}
| 27,245 |
qemu | 37654d9e6af84003982f8b9a5d59a4aef28e0a79 | 0 | static inline void _t_gen_mov_TN_env(TCGv tn, int offset)
{
if (offset > sizeof(CPUCRISState)) {
fprintf(stderr, "wrong load from env from off=%d\n", offset);
}
tcg_gen_ld_tl(tn, cpu_env, offset);
}
| 27,246 |
qemu | 1f0c461b82d5ec2664ca0cfc9548f80da87a8f8a | 0 | static const char *bdrv_get_parent_name(const BlockDriverState *bs)
{
BdrvChild *c;
const char *name;
/* If multiple parents have a name, just pick the first one. */
QLIST_FOREACH(c, &bs->parents, next_parent) {
if (c->role->get_name) {
name = c->role->get_name(c);
if (name && *name) {
return name;
}
}
}
return NULL;
}
| 27,248 |