instruction
stringclasses 1
value | input
stringlengths 90
139k
| output
stringlengths 16
138k
| __index_level_0__
int64 165k
175k
|
---|---|---|---|
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void nsc_encode_sse2(NSC_CONTEXT* context, const BYTE* data,
UINT32 scanline)
{
nsc_encode_argb_to_aycocg_sse2(context, data, scanline);
if (context->ChromaSubsamplingLevel > 0)
{
nsc_encode_subsampling_sse2(context);
}
}
Commit Message: Fixed CVE-2018-8788
Thanks to Eyal Itkin from Check Point Software Technologies.
CWE ID: CWE-787 | static void nsc_encode_sse2(NSC_CONTEXT* context, const BYTE* data,
static BOOL nsc_encode_sse2(NSC_CONTEXT* context, const BYTE* data,
UINT32 scanline)
{
nsc_encode_argb_to_aycocg_sse2(context, data, scanline);
if (context->ChromaSubsamplingLevel > 0)
{
nsc_encode_subsampling_sse2(context);
}
return TRUE;
}
| 169,291 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: mem_log_init(const char* prog_name, const char *banner)
{
size_t log_name_len;
char *log_name;
if (__test_bit(LOG_CONSOLE_BIT, &debug)) {
log_op = stderr;
return;
}
if (log_op)
fclose(log_op);
log_name_len = 5 + strlen(prog_name) + 5 + 7 + 4 + 1; /* "/tmp/" + prog_name + "_mem." + PID + ".log" + '\0" */
log_name = malloc(log_name_len);
if (!log_name) {
log_message(LOG_INFO, "Unable to malloc log file name");
log_op = stderr;
return;
}
snprintf(log_name, log_name_len, "/tmp/%s_mem.%d.log", prog_name, getpid());
log_op = fopen(log_name, "a");
if (log_op == NULL) {
log_message(LOG_INFO, "Unable to open %s for appending", log_name);
log_op = stderr;
}
else {
int fd = fileno(log_op);
/* We don't want any children to inherit the log file */
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
/* Make the log output line buffered. This was to ensure that
* children didn't inherit the buffer, but the CLOEXEC above
* should resolve that. */
setlinebuf(log_op);
fprintf(log_op, "\n");
}
free(log_name);
terminate_banner = banner;
}
Commit Message: When opening files for write, ensure they aren't symbolic links
Issue #1048 identified that if, for example, a non privileged user
created a symbolic link from /etc/keepalvied.data to /etc/passwd,
writing to /etc/keepalived.data (which could be invoked via DBus)
would cause /etc/passwd to be overwritten.
This commit stops keepalived writing to pathnames where the ultimate
component is a symbolic link, by setting O_NOFOLLOW whenever opening
a file for writing.
This might break some setups, where, for example, /etc/keepalived.data
was a symbolic link to /home/fred/keepalived.data. If this was the case,
instead create a symbolic link from /home/fred/keepalived.data to
/tmp/keepalived.data, so that the file is still accessible via
/home/fred/keepalived.data.
There doesn't appear to be a way around this backward incompatibility,
since even checking if the pathname is a symbolic link prior to opening
for writing would create a race condition.
Signed-off-by: Quentin Armitage <[email protected]>
CWE ID: CWE-59 | mem_log_init(const char* prog_name, const char *banner)
{
size_t log_name_len;
char *log_name;
if (__test_bit(LOG_CONSOLE_BIT, &debug)) {
log_op = stderr;
return;
}
if (log_op)
fclose(log_op);
log_name_len = 5 + strlen(prog_name) + 5 + 7 + 4 + 1; /* "/tmp/" + prog_name + "_mem." + PID + ".log" + '\0" */
log_name = malloc(log_name_len);
if (!log_name) {
log_message(LOG_INFO, "Unable to malloc log file name");
log_op = stderr;
return;
}
snprintf(log_name, log_name_len, "/tmp/%s_mem.%d.log", prog_name, getpid());
log_op = fopen_safe(log_name, "a");
if (log_op == NULL) {
log_message(LOG_INFO, "Unable to open %s for appending", log_name);
log_op = stderr;
}
else {
int fd = fileno(log_op);
/* We don't want any children to inherit the log file */
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
/* Make the log output line buffered. This was to ensure that
* children didn't inherit the buffer, but the CLOEXEC above
* should resolve that. */
setlinebuf(log_op);
fprintf(log_op, "\n");
}
free(log_name);
terminate_banner = banner;
}
| 168,995 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: coolkey_find_attribute(sc_card_t *card, sc_cardctl_coolkey_attribute_t *attribute)
{
u8 object_record_type;
CK_ATTRIBUTE_TYPE attr_type = attribute->attribute_type;
const u8 *obj = attribute->object->data;
const u8 *attr = NULL;
size_t buf_len = attribute->object->length;
coolkey_object_header_t *object_head;
int attribute_count,i;
attribute->attribute_data_type = SC_CARDCTL_COOLKEY_ATTR_TYPE_STRING;
attribute->attribute_length = 0;
attribute->attribute_value = NULL;
if (obj == NULL) {
/* cast away const so we can cache the data value */
int r = coolkey_fill_object(card, (sc_cardctl_coolkey_object_t *)attribute->object);
if (r < 0) {
return r;
}
obj = attribute->object->data;
}
/* should be a static assert so we catch this at compile time */
assert(sizeof(coolkey_object_header_t) >= sizeof(coolkey_v0_object_header_t));
/* make sure we have enough of the object to read the record_type */
if (buf_len <= sizeof(coolkey_v0_object_header_t)) {
return SC_ERROR_CORRUPTED_DATA;
}
object_head = (coolkey_object_header_t *)obj;
object_record_type = object_head->record_type;
/* make sure it's a type we recognize */
if ((object_record_type != COOLKEY_V1_OBJECT) && (object_record_type != COOLKEY_V0_OBJECT)) {
return SC_ERROR_CORRUPTED_DATA;
}
/*
* now loop through all the attributes in the list. first find the start of the list
*/
attr = coolkey_attribute_start(obj, object_record_type, buf_len);
if (attr == NULL) {
return SC_ERROR_CORRUPTED_DATA;
}
buf_len -= (attr-obj);
/* now get the count */
attribute_count = coolkey_get_attribute_count(obj, object_record_type, buf_len);
for (i=0; i < attribute_count; i++) {
size_t record_len = coolkey_get_attribute_record_len(attr, object_record_type, buf_len);
/* make sure we have the complete record */
if (buf_len < record_len) {
return SC_ERROR_CORRUPTED_DATA;
}
/* does the attribute match the one we are looking for */
if (attr_type == coolkey_get_attribute_type(attr, object_record_type, record_len)) {
/* yup, return it */
return coolkey_get_attribute_data(attr, object_record_type, record_len, attribute);
}
/* go to the next attribute on the list */
buf_len -= record_len;
attr += record_len;
}
/* not find in attribute list, check the fixed attribute record */
if (object_record_type == COOLKEY_V1_OBJECT) {
unsigned long fixed_attributes = bebytes2ulong(object_head->fixed_attributes_values);
return coolkey_get_attribute_data_fixed(attr_type, fixed_attributes, attribute);
}
return SC_ERROR_DATA_OBJECT_NOT_FOUND;
}
Commit Message: fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
CWE ID: CWE-125 | coolkey_find_attribute(sc_card_t *card, sc_cardctl_coolkey_attribute_t *attribute)
{
u8 object_record_type;
CK_ATTRIBUTE_TYPE attr_type = attribute->attribute_type;
const u8 *obj = attribute->object->data;
const u8 *attr = NULL;
size_t buf_len = attribute->object->length;
coolkey_object_header_t *object_head;
int attribute_count,i;
attribute->attribute_data_type = SC_CARDCTL_COOLKEY_ATTR_TYPE_STRING;
attribute->attribute_length = 0;
attribute->attribute_value = NULL;
if (obj == NULL) {
/* cast away const so we can cache the data value */
int r = coolkey_fill_object(card, (sc_cardctl_coolkey_object_t *)attribute->object);
if (r < 0) {
return r;
}
obj = attribute->object->data;
}
/* should be a static assert so we catch this at compile time */
assert(sizeof(coolkey_object_header_t) >= sizeof(coolkey_v0_object_header_t));
/* make sure we have enough of the object to read the record_type */
if (buf_len <= sizeof(coolkey_v0_object_header_t)) {
return SC_ERROR_CORRUPTED_DATA;
}
object_head = (coolkey_object_header_t *)obj;
object_record_type = object_head->record_type;
/* make sure it's a type we recognize */
if ((object_record_type != COOLKEY_V1_OBJECT) && (object_record_type != COOLKEY_V0_OBJECT)) {
return SC_ERROR_CORRUPTED_DATA;
}
/*
* now loop through all the attributes in the list. first find the start of the list
*/
attr = coolkey_attribute_start(obj, object_record_type, buf_len);
if (attr == NULL) {
return SC_ERROR_CORRUPTED_DATA;
}
buf_len -= (attr-obj);
/* now get the count */
attribute_count = coolkey_get_attribute_count(obj, object_record_type, buf_len);
for (i=0; i < attribute_count; i++) {
size_t record_len = coolkey_get_attribute_record_len(attr, object_record_type, buf_len);
/* make sure we have the complete record */
if (buf_len < record_len || record_len < 4) {
return SC_ERROR_CORRUPTED_DATA;
}
/* does the attribute match the one we are looking for */
if (attr_type == coolkey_get_attribute_type(attr, object_record_type, record_len)) {
/* yup, return it */
return coolkey_get_attribute_data(attr, object_record_type, record_len, attribute);
}
/* go to the next attribute on the list */
buf_len -= record_len;
attr += record_len;
}
/* not find in attribute list, check the fixed attribute record */
if (object_record_type == COOLKEY_V1_OBJECT) {
unsigned long fixed_attributes = bebytes2ulong(object_head->fixed_attributes_values);
return coolkey_get_attribute_data_fixed(attr_type, fixed_attributes, attribute);
}
return SC_ERROR_DATA_OBJECT_NOT_FOUND;
}
| 169,051 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: ResourceHostMsg_Request CreateXHRRequestWithOrigin(const char* origin) {
ResourceHostMsg_Request request;
request.method = "GET";
request.url = GURL("http://bar.com/simple_page.html");
request.first_party_for_cookies = GURL(origin);
request.referrer_policy = blink::WebReferrerPolicyDefault;
request.headers = base::StringPrintf("Origin: %s\r\n", origin);
request.load_flags = 0;
request.origin_pid = 0;
request.resource_type = RESOURCE_TYPE_XHR;
request.request_context = 0;
request.appcache_host_id = kAppCacheNoHostId;
request.download_to_file = false;
request.should_reset_appcache = false;
request.is_main_frame = true;
request.parent_is_main_frame = false;
request.parent_render_frame_id = -1;
request.transition_type = ui::PAGE_TRANSITION_LINK;
request.allow_download = true;
return request;
}
Commit Message: Block a compromised renderer from reusing request ids.
BUG=578882
Review URL: https://codereview.chromium.org/1608573002
Cr-Commit-Position: refs/heads/master@{#372547}
CWE ID: CWE-362 | ResourceHostMsg_Request CreateXHRRequestWithOrigin(const char* origin) {
ResourceHostMsg_Request CreateXHRRequest(const char* url) {
ResourceHostMsg_Request request;
request.method = "GET";
request.url = GURL(url);
request.referrer_policy = blink::WebReferrerPolicyDefault;
request.load_flags = 0;
request.origin_pid = 0;
request.resource_type = RESOURCE_TYPE_XHR;
request.request_context = 0;
request.appcache_host_id = kAppCacheNoHostId;
request.download_to_file = false;
request.should_reset_appcache = false;
request.is_main_frame = true;
request.parent_is_main_frame = false;
request.parent_render_frame_id = -1;
request.transition_type = ui::PAGE_TRANSITION_LINK;
request.allow_download = true;
return request;
}
| 172,272 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: xmlPushInput(xmlParserCtxtPtr ctxt, xmlParserInputPtr input) {
int ret;
if (input == NULL) return(-1);
if (xmlParserDebugEntities) {
if ((ctxt->input != NULL) && (ctxt->input->filename))
xmlGenericError(xmlGenericErrorContext,
"%s(%d): ", ctxt->input->filename,
ctxt->input->line);
xmlGenericError(xmlGenericErrorContext,
"Pushing input %d : %.30s\n", ctxt->inputNr+1, input->cur);
}
ret = inputPush(ctxt, input);
GROW;
return(ret);
}
Commit Message: libxml: XML_PARSER_EOF checks from upstream
BUG=229019
TBR=cpu
Review URL: https://chromiumcodereview.appspot.com/14053009
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@196804 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | xmlPushInput(xmlParserCtxtPtr ctxt, xmlParserInputPtr input) {
int ret;
if (input == NULL) return(-1);
if (xmlParserDebugEntities) {
if ((ctxt->input != NULL) && (ctxt->input->filename))
xmlGenericError(xmlGenericErrorContext,
"%s(%d): ", ctxt->input->filename,
ctxt->input->line);
xmlGenericError(xmlGenericErrorContext,
"Pushing input %d : %.30s\n", ctxt->inputNr+1, input->cur);
}
ret = inputPush(ctxt, input);
if (ctxt->instate == XML_PARSER_EOF)
return(-1);
GROW;
return(ret);
}
| 171,309 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static Image *ReadTILEImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
Image
*image,
*tile_image;
ImageInfo
*read_info;
/*
Initialize Image structure.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
read_info=CloneImageInfo(image_info);
SetImageInfoBlob(read_info,(void *) NULL,0);
*read_info->magick='\0';
tile_image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
if (tile_image == (Image *) NULL)
return((Image *) NULL);
image=AcquireImage(image_info);
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(OptionError,"MustSpecifyImageSize");
if (*image_info->filename == '\0')
ThrowReaderException(OptionError,"MustSpecifyAnImageName");
image->colorspace=tile_image->colorspace;
image->matte=tile_image->matte;
if (image->matte != MagickFalse)
(void) SetImageBackgroundColor(image);
(void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent);
if (LocaleCompare(tile_image->magick,"PATTERN") == 0)
{
tile_image->tile_offset.x=0;
tile_image->tile_offset.y=0;
}
(void) TextureImage(image,tile_image);
tile_image=DestroyImage(tile_image);
if (image->colorspace == GRAYColorspace)
image->type=GrayscaleType;
return(GetFirstImageInList(image));
}
Commit Message:
CWE ID: CWE-119 | static Image *ReadTILEImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
Image
*image,
*tile_image;
ImageInfo
*read_info;
MagickBooleanType
status;
/*
Initialize Image structure.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
read_info=CloneImageInfo(image_info);
SetImageInfoBlob(read_info,(void *) NULL,0);
*read_info->magick='\0';
tile_image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
if (tile_image == (Image *) NULL)
return((Image *) NULL);
image=AcquireImage(image_info);
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(OptionError,"MustSpecifyImageSize");
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
if (*image_info->filename == '\0')
ThrowReaderException(OptionError,"MustSpecifyAnImageName");
image->colorspace=tile_image->colorspace;
image->matte=tile_image->matte;
if (image->matte != MagickFalse)
(void) SetImageBackgroundColor(image);
(void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent);
if (LocaleCompare(tile_image->magick,"PATTERN") == 0)
{
tile_image->tile_offset.x=0;
tile_image->tile_offset.y=0;
}
(void) TextureImage(image,tile_image);
tile_image=DestroyImage(tile_image);
if (image->colorspace == GRAYColorspace)
image->type=GrayscaleType;
return(GetFirstImageInList(image));
}
| 168,610 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
status=MagickFalse;
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
status=MagickTrue;
if (size != 0)
{
layer_info=(LayerInfo *) NULL;
number_layers=(short) ReadBlobShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->matte=MagickTrue;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(layer_info,0,(size_t) number_layers*
sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
x,
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
layer_info[i].page.y=ReadBlobSignedLong(image);
layer_info[i].page.x=ReadBlobSignedLong(image);
y=ReadBlobSignedLong(image);
x=ReadBlobSignedLong(image);
layer_info[i].page.width=(size_t) (x-layer_info[i].page.x);
layer_info[i].page.height=(size_t) (y-layer_info[i].page.y);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
(void) ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=ReadBlobSignedLong(image);
layer_info[i].mask.page.x=ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-
layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (ReadBlobSignedLong(image)-
layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,
(double) layer_info[i].mask.page.height,(double)
((MagickOffsetType) length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
/*
We read it, but don't use it...
*/
for (j=0; j < (ssize_t) length; j+=8)
{
size_t blend_source=ReadBlobLong(image);
size_t blend_dest=ReadBlobLong(image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" source(%x), dest(%x)",(unsigned int)
blend_source,(unsigned int) blend_dest);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) ||
(layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping == MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=0; j < layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType)
number_layers);
if (status == MagickFalse)
break;
}
}
if (status != MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers > 0)
{
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
}
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
}
return(status);
}
Commit Message: https://github.com/ImageMagick/ImageMagick/issues/714
CWE ID: CWE-834 | static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
status=MagickFalse;
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
status=MagickTrue;
if (size != 0)
{
layer_info=(LayerInfo *) NULL;
number_layers=(short) ReadBlobShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->matte=MagickTrue;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(layer_info,0,(size_t) number_layers*
sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
x,
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
layer_info[i].page.y=ReadBlobSignedLong(image);
layer_info[i].page.x=ReadBlobSignedLong(image);
y=ReadBlobSignedLong(image);
x=ReadBlobSignedLong(image);
layer_info[i].page.width=(size_t) (x-layer_info[i].page.x);
layer_info[i].page.height=(size_t) (y-layer_info[i].page.y);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
(void) ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=ReadBlobSignedLong(image);
layer_info[i].mask.page.x=ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-
layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (ReadBlobSignedLong(image)-
layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,
(double) layer_info[i].mask.page.height,(double)
((MagickOffsetType) length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
/*
We read it, but don't use it...
*/
for (j=0; j < (ssize_t) length; j+=8)
{
size_t blend_source=ReadBlobLong(image);
size_t blend_dest=ReadBlobLong(image);
if (EOFBlob(image) != MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" source(%x), dest(%x)",(unsigned int)
blend_source,(unsigned int) blend_dest);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) ||
(layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping == MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=0; j < layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType)
number_layers);
if (status == MagickFalse)
break;
}
}
if (status != MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers > 0)
{
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
}
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
}
return(status);
}
| 167,760 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int aesni_cbc_hmac_sha1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
const unsigned char *in, size_t len)
{
EVP_AES_HMAC_SHA1 *key = data(ctx);
unsigned int l;
size_t plen = key->payload_length, iv = 0, /* explicit IV in TLS 1.1 and
* later */
sha_off = 0;
# if defined(STITCHED_CALL)
size_t aes_off = 0, blocks;
sha_off = SHA_CBLOCK - key->md.num;
# endif
key->payload_length = NO_PAYLOAD_LENGTH;
if (len % AES_BLOCK_SIZE)
return 0;
if (ctx->encrypt) {
if (plen == NO_PAYLOAD_LENGTH)
plen = len;
else if (len !=
((plen + SHA_DIGEST_LENGTH +
AES_BLOCK_SIZE) & -AES_BLOCK_SIZE))
return 0;
else if (key->aux.tls_ver >= TLS1_1_VERSION)
iv = AES_BLOCK_SIZE;
# if defined(STITCHED_CALL)
if (plen > (sha_off + iv)
&& (blocks = (plen - (sha_off + iv)) / SHA_CBLOCK)) {
SHA1_Update(&key->md, in + iv, sha_off);
aesni_cbc_sha1_enc(in, out, blocks, &key->ks,
ctx->iv, &key->md, in + iv + sha_off);
blocks *= SHA_CBLOCK;
aes_off += blocks;
sha_off += blocks;
key->md.Nh += blocks >> 29;
key->md.Nl += blocks <<= 3;
if (key->md.Nl < (unsigned int)blocks)
key->md.Nh++;
} else {
sha_off = 0;
}
# endif
sha_off += iv;
SHA1_Update(&key->md, in + sha_off, plen - sha_off);
if (plen != len) { /* "TLS" mode of operation */
if (in != out)
memcpy(out + aes_off, in + aes_off, plen - aes_off);
/* calculate HMAC and append it to payload */
SHA1_Final(out + plen, &key->md);
key->md = key->tail;
SHA1_Update(&key->md, out + plen, SHA_DIGEST_LENGTH);
SHA1_Final(out + plen, &key->md);
/* pad the payload|hmac */
plen += SHA_DIGEST_LENGTH;
for (l = len - plen - 1; plen < len; plen++)
out[plen] = l;
/* encrypt HMAC|padding at once */
aesni_cbc_encrypt(out + aes_off, out + aes_off, len - aes_off,
&key->ks, ctx->iv, 1);
} else {
aesni_cbc_encrypt(in + aes_off, out + aes_off, len - aes_off,
&key->ks, ctx->iv, 1);
}
} else {
union {
unsigned int u[SHA_DIGEST_LENGTH / sizeof(unsigned int)];
unsigned char c[32 + SHA_DIGEST_LENGTH];
} mac, *pmac;
/* arrange cache line alignment */
pmac = (void *)(((size_t)mac.c + 31) & ((size_t)0 - 32));
if (plen != NO_PAYLOAD_LENGTH) { /* "TLS" mode of operation */
size_t inp_len, mask, j, i;
unsigned int res, maxpad, pad, bitlen;
int ret = 1;
union {
unsigned int u[SHA_LBLOCK];
unsigned char c[SHA_CBLOCK];
} *data = (void *)key->md.data;
# if defined(STITCHED_DECRYPT_CALL)
unsigned char tail_iv[AES_BLOCK_SIZE];
int stitch = 0;
# endif
if ((key->aux.tls_aad[plen - 4] << 8 | key->aux.tls_aad[plen - 3])
>= TLS1_1_VERSION) {
if (len < (AES_BLOCK_SIZE + SHA_DIGEST_LENGTH + 1))
return 0;
/* omit explicit iv */
memcpy(ctx->iv, in, AES_BLOCK_SIZE);
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
len -= AES_BLOCK_SIZE;
} else if (len < (SHA_DIGEST_LENGTH + 1))
return 0;
# if defined(STITCHED_DECRYPT_CALL)
if (len >= 1024 && ctx->key_len == 32) {
/* decrypt last block */
memcpy(tail_iv, in + len - 2 * AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
aesni_cbc_encrypt(in + len - AES_BLOCK_SIZE,
out + len - AES_BLOCK_SIZE, AES_BLOCK_SIZE,
&key->ks, tail_iv, 0);
stitch = 1;
} else
# endif
/* decrypt HMAC|padding at once */
aesni_cbc_encrypt(in, out, len, &key->ks, ctx->iv, 0);
/* figure out payload length */
pad = out[len - 1];
maxpad = len - (SHA_DIGEST_LENGTH + 1);
maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8);
maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8);
maxpad &= 255;
inp_len = len - (SHA_DIGEST_LENGTH + pad + 1);
mask = (0 - ((inp_len - len) >> (sizeof(inp_len) * 8 - 1)));
inp_len &= mask;
key->aux.tls_aad[plen - 1] = inp_len;
/* calculate HMAC */
key->md = key->head;
SHA1_Update(&key->md, key->aux.tls_aad, plen);
# if defined(STITCHED_DECRYPT_CALL)
if (stitch) {
blocks = (len - (256 + 32 + SHA_CBLOCK)) / SHA_CBLOCK;
aes_off = len - AES_BLOCK_SIZE - blocks * SHA_CBLOCK;
sha_off = SHA_CBLOCK - plen;
aesni_cbc_encrypt(in, out, aes_off, &key->ks, ctx->iv, 0);
SHA1_Update(&key->md, out, sha_off);
aesni256_cbc_sha1_dec(in + aes_off,
out + aes_off, blocks, &key->ks,
ctx->iv, &key->md, out + sha_off);
sha_off += blocks *= SHA_CBLOCK;
out += sha_off;
len -= sha_off;
inp_len -= sha_off;
key->md.Nl += (blocks << 3); /* at most 18 bits */
memcpy(ctx->iv, tail_iv, AES_BLOCK_SIZE);
}
# endif
# if 1
len -= SHA_DIGEST_LENGTH; /* amend mac */
if (len >= (256 + SHA_CBLOCK)) {
j = (len - (256 + SHA_CBLOCK)) & (0 - SHA_CBLOCK);
j += SHA_CBLOCK - key->md.num;
SHA1_Update(&key->md, out, j);
out += j;
len -= j;
inp_len -= j;
}
/* but pretend as if we hashed padded payload */
bitlen = key->md.Nl + (inp_len << 3); /* at most 18 bits */
# ifdef BSWAP4
bitlen = BSWAP4(bitlen);
# else
mac.c[0] = 0;
mac.c[1] = (unsigned char)(bitlen >> 16);
mac.c[2] = (unsigned char)(bitlen >> 8);
mac.c[3] = (unsigned char)bitlen;
bitlen = mac.u[0];
# endif
pmac->u[0] = 0;
pmac->u[1] = 0;
pmac->u[2] = 0;
pmac->u[3] = 0;
pmac->u[4] = 0;
for (res = key->md.num, j = 0; j < len; j++) {
size_t c = out[j];
mask = (j - inp_len) >> (sizeof(j) * 8 - 8);
c &= mask;
c |= 0x80 & ~mask & ~((inp_len - j) >> (sizeof(j) * 8 - 8));
data->c[res++] = (unsigned char)c;
if (res != SHA_CBLOCK)
continue;
/* j is not incremented yet */
mask = 0 - ((inp_len + 7 - j) >> (sizeof(j) * 8 - 1));
data->u[SHA_LBLOCK - 1] |= bitlen & mask;
sha1_block_data_order(&key->md, data, 1);
mask &= 0 - ((j - inp_len - 72) >> (sizeof(j) * 8 - 1));
pmac->u[0] |= key->md.h0 & mask;
pmac->u[1] |= key->md.h1 & mask;
pmac->u[2] |= key->md.h2 & mask;
pmac->u[3] |= key->md.h3 & mask;
pmac->u[4] |= key->md.h4 & mask;
res = 0;
}
for (i = res; i < SHA_CBLOCK; i++, j++)
data->c[i] = 0;
if (res > SHA_CBLOCK - 8) {
mask = 0 - ((inp_len + 8 - j) >> (sizeof(j) * 8 - 1));
data->u[SHA_LBLOCK - 1] |= bitlen & mask;
sha1_block_data_order(&key->md, data, 1);
mask &= 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
pmac->u[0] |= key->md.h0 & mask;
pmac->u[1] |= key->md.h1 & mask;
pmac->u[2] |= key->md.h2 & mask;
pmac->u[3] |= key->md.h3 & mask;
pmac->u[4] |= key->md.h4 & mask;
memset(data, 0, SHA_CBLOCK);
j += 64;
}
data->u[SHA_LBLOCK - 1] = bitlen;
sha1_block_data_order(&key->md, data, 1);
mask = 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
pmac->u[0] |= key->md.h0 & mask;
pmac->u[1] |= key->md.h1 & mask;
pmac->u[2] |= key->md.h2 & mask;
pmac->u[3] |= key->md.h3 & mask;
pmac->u[4] |= key->md.h4 & mask;
# ifdef BSWAP4
pmac->u[0] = BSWAP4(pmac->u[0]);
pmac->u[1] = BSWAP4(pmac->u[1]);
pmac->u[2] = BSWAP4(pmac->u[2]);
pmac->u[3] = BSWAP4(pmac->u[3]);
pmac->u[4] = BSWAP4(pmac->u[4]);
# else
for (i = 0; i < 5; i++) {
res = pmac->u[i];
pmac->c[4 * i + 0] = (unsigned char)(res >> 24);
pmac->c[4 * i + 1] = (unsigned char)(res >> 16);
pmac->c[4 * i + 2] = (unsigned char)(res >> 8);
pmac->c[4 * i + 3] = (unsigned char)res;
}
# endif
len += SHA_DIGEST_LENGTH;
# else
SHA1_Update(&key->md, out, inp_len);
res = key->md.num;
SHA1_Final(pmac->c, &key->md);
{
unsigned int inp_blocks, pad_blocks;
/* but pretend as if we hashed padded payload */
inp_blocks =
1 + ((SHA_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1));
res += (unsigned int)(len - inp_len);
pad_blocks = res / SHA_CBLOCK;
res %= SHA_CBLOCK;
pad_blocks +=
1 + ((SHA_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1));
for (; inp_blocks < pad_blocks; inp_blocks++)
sha1_block_data_order(&key->md, data, 1);
}
# endif
key->md = key->tail;
SHA1_Update(&key->md, pmac->c, SHA_DIGEST_LENGTH);
SHA1_Final(pmac->c, &key->md);
/* verify HMAC */
out += inp_len;
len -= inp_len;
# if 1
{
unsigned char *p = out + len - 1 - maxpad - SHA_DIGEST_LENGTH;
size_t off = out - p;
unsigned int c, cmask;
maxpad += SHA_DIGEST_LENGTH;
for (res = 0, i = 0, j = 0; j < maxpad; j++) {
c = p[j];
cmask =
((int)(j - off - SHA_DIGEST_LENGTH)) >> (sizeof(int) *
8 - 1);
res |= (c ^ pad) & ~cmask; /* ... and padding */
cmask &= ((int)(off - 1 - j)) >> (sizeof(int) * 8 - 1);
res |= (c ^ pmac->c[i]) & cmask;
i += 1 & cmask;
}
maxpad -= SHA_DIGEST_LENGTH;
res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1));
ret &= (int)~res;
}
# else
for (res = 0, i = 0; i < SHA_DIGEST_LENGTH; i++)
res |= out[i] ^ pmac->c[i];
res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1));
ret &= (int)~res;
/* verify padding */
pad = (pad & ~res) | (maxpad & res);
out = out + len - 1 - pad;
for (res = 0, i = 0; i < pad; i++)
res |= out[i] ^ pad;
res = (0 - res) >> (sizeof(res) * 8 - 1);
ret &= (int)~res;
# endif
return ret;
} else {
# if defined(STITCHED_DECRYPT_CALL)
if (len >= 1024 && ctx->key_len == 32) {
if (sha_off %= SHA_CBLOCK)
blocks = (len - 3 * SHA_CBLOCK) / SHA_CBLOCK;
else
blocks = (len - 2 * SHA_CBLOCK) / SHA_CBLOCK;
aes_off = len - blocks * SHA_CBLOCK;
aesni_cbc_encrypt(in, out, aes_off, &key->ks, ctx->iv, 0);
SHA1_Update(&key->md, out, sha_off);
aesni256_cbc_sha1_dec(in + aes_off,
out + aes_off, blocks, &key->ks,
ctx->iv, &key->md, out + sha_off);
sha_off += blocks *= SHA_CBLOCK;
out += sha_off;
len -= sha_off;
key->md.Nh += blocks >> 29;
key->md.Nl += blocks <<= 3;
if (key->md.Nl < (unsigned int)blocks)
key->md.Nh++;
} else
# endif
/* decrypt HMAC|padding at once */
aesni_cbc_encrypt(in, out, len, &key->ks, ctx->iv, 0);
SHA1_Update(&key->md, out, len);
}
}
return 1;
}
Commit Message:
CWE ID: CWE-310 | static int aesni_cbc_hmac_sha1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
const unsigned char *in, size_t len)
{
EVP_AES_HMAC_SHA1 *key = data(ctx);
unsigned int l;
size_t plen = key->payload_length, iv = 0, /* explicit IV in TLS 1.1 and
* later */
sha_off = 0;
# if defined(STITCHED_CALL)
size_t aes_off = 0, blocks;
sha_off = SHA_CBLOCK - key->md.num;
# endif
key->payload_length = NO_PAYLOAD_LENGTH;
if (len % AES_BLOCK_SIZE)
return 0;
if (ctx->encrypt) {
if (plen == NO_PAYLOAD_LENGTH)
plen = len;
else if (len !=
((plen + SHA_DIGEST_LENGTH +
AES_BLOCK_SIZE) & -AES_BLOCK_SIZE))
return 0;
else if (key->aux.tls_ver >= TLS1_1_VERSION)
iv = AES_BLOCK_SIZE;
# if defined(STITCHED_CALL)
if (plen > (sha_off + iv)
&& (blocks = (plen - (sha_off + iv)) / SHA_CBLOCK)) {
SHA1_Update(&key->md, in + iv, sha_off);
aesni_cbc_sha1_enc(in, out, blocks, &key->ks,
ctx->iv, &key->md, in + iv + sha_off);
blocks *= SHA_CBLOCK;
aes_off += blocks;
sha_off += blocks;
key->md.Nh += blocks >> 29;
key->md.Nl += blocks <<= 3;
if (key->md.Nl < (unsigned int)blocks)
key->md.Nh++;
} else {
sha_off = 0;
}
# endif
sha_off += iv;
SHA1_Update(&key->md, in + sha_off, plen - sha_off);
if (plen != len) { /* "TLS" mode of operation */
if (in != out)
memcpy(out + aes_off, in + aes_off, plen - aes_off);
/* calculate HMAC and append it to payload */
SHA1_Final(out + plen, &key->md);
key->md = key->tail;
SHA1_Update(&key->md, out + plen, SHA_DIGEST_LENGTH);
SHA1_Final(out + plen, &key->md);
/* pad the payload|hmac */
plen += SHA_DIGEST_LENGTH;
for (l = len - plen - 1; plen < len; plen++)
out[plen] = l;
/* encrypt HMAC|padding at once */
aesni_cbc_encrypt(out + aes_off, out + aes_off, len - aes_off,
&key->ks, ctx->iv, 1);
} else {
aesni_cbc_encrypt(in + aes_off, out + aes_off, len - aes_off,
&key->ks, ctx->iv, 1);
}
} else {
union {
unsigned int u[SHA_DIGEST_LENGTH / sizeof(unsigned int)];
unsigned char c[32 + SHA_DIGEST_LENGTH];
} mac, *pmac;
/* arrange cache line alignment */
pmac = (void *)(((size_t)mac.c + 31) & ((size_t)0 - 32));
if (plen != NO_PAYLOAD_LENGTH) { /* "TLS" mode of operation */
size_t inp_len, mask, j, i;
unsigned int res, maxpad, pad, bitlen;
int ret = 1;
union {
unsigned int u[SHA_LBLOCK];
unsigned char c[SHA_CBLOCK];
} *data = (void *)key->md.data;
# if defined(STITCHED_DECRYPT_CALL)
unsigned char tail_iv[AES_BLOCK_SIZE];
int stitch = 0;
# endif
if ((key->aux.tls_aad[plen - 4] << 8 | key->aux.tls_aad[plen - 3])
>= TLS1_1_VERSION) {
if (len < (AES_BLOCK_SIZE + SHA_DIGEST_LENGTH + 1))
return 0;
/* omit explicit iv */
memcpy(ctx->iv, in, AES_BLOCK_SIZE);
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
len -= AES_BLOCK_SIZE;
} else if (len < (SHA_DIGEST_LENGTH + 1))
return 0;
# if defined(STITCHED_DECRYPT_CALL)
if (len >= 1024 && ctx->key_len == 32) {
/* decrypt last block */
memcpy(tail_iv, in + len - 2 * AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
aesni_cbc_encrypt(in + len - AES_BLOCK_SIZE,
out + len - AES_BLOCK_SIZE, AES_BLOCK_SIZE,
&key->ks, tail_iv, 0);
stitch = 1;
} else
# endif
/* decrypt HMAC|padding at once */
aesni_cbc_encrypt(in, out, len, &key->ks, ctx->iv, 0);
/* figure out payload length */
pad = out[len - 1];
maxpad = len - (SHA_DIGEST_LENGTH + 1);
maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8);
maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8);
maxpad &= 255;
ret &= constant_time_ge(maxpad, pad);
inp_len = len - (SHA_DIGEST_LENGTH + pad + 1);
mask = (0 - ((inp_len - len) >> (sizeof(inp_len) * 8 - 1)));
inp_len &= mask;
key->aux.tls_aad[plen - 1] = inp_len;
/* calculate HMAC */
key->md = key->head;
SHA1_Update(&key->md, key->aux.tls_aad, plen);
# if defined(STITCHED_DECRYPT_CALL)
if (stitch) {
blocks = (len - (256 + 32 + SHA_CBLOCK)) / SHA_CBLOCK;
aes_off = len - AES_BLOCK_SIZE - blocks * SHA_CBLOCK;
sha_off = SHA_CBLOCK - plen;
aesni_cbc_encrypt(in, out, aes_off, &key->ks, ctx->iv, 0);
SHA1_Update(&key->md, out, sha_off);
aesni256_cbc_sha1_dec(in + aes_off,
out + aes_off, blocks, &key->ks,
ctx->iv, &key->md, out + sha_off);
sha_off += blocks *= SHA_CBLOCK;
out += sha_off;
len -= sha_off;
inp_len -= sha_off;
key->md.Nl += (blocks << 3); /* at most 18 bits */
memcpy(ctx->iv, tail_iv, AES_BLOCK_SIZE);
}
# endif
# if 1
len -= SHA_DIGEST_LENGTH; /* amend mac */
if (len >= (256 + SHA_CBLOCK)) {
j = (len - (256 + SHA_CBLOCK)) & (0 - SHA_CBLOCK);
j += SHA_CBLOCK - key->md.num;
SHA1_Update(&key->md, out, j);
out += j;
len -= j;
inp_len -= j;
}
/* but pretend as if we hashed padded payload */
bitlen = key->md.Nl + (inp_len << 3); /* at most 18 bits */
# ifdef BSWAP4
bitlen = BSWAP4(bitlen);
# else
mac.c[0] = 0;
mac.c[1] = (unsigned char)(bitlen >> 16);
mac.c[2] = (unsigned char)(bitlen >> 8);
mac.c[3] = (unsigned char)bitlen;
bitlen = mac.u[0];
# endif
pmac->u[0] = 0;
pmac->u[1] = 0;
pmac->u[2] = 0;
pmac->u[3] = 0;
pmac->u[4] = 0;
for (res = key->md.num, j = 0; j < len; j++) {
size_t c = out[j];
mask = (j - inp_len) >> (sizeof(j) * 8 - 8);
c &= mask;
c |= 0x80 & ~mask & ~((inp_len - j) >> (sizeof(j) * 8 - 8));
data->c[res++] = (unsigned char)c;
if (res != SHA_CBLOCK)
continue;
/* j is not incremented yet */
mask = 0 - ((inp_len + 7 - j) >> (sizeof(j) * 8 - 1));
data->u[SHA_LBLOCK - 1] |= bitlen & mask;
sha1_block_data_order(&key->md, data, 1);
mask &= 0 - ((j - inp_len - 72) >> (sizeof(j) * 8 - 1));
pmac->u[0] |= key->md.h0 & mask;
pmac->u[1] |= key->md.h1 & mask;
pmac->u[2] |= key->md.h2 & mask;
pmac->u[3] |= key->md.h3 & mask;
pmac->u[4] |= key->md.h4 & mask;
res = 0;
}
for (i = res; i < SHA_CBLOCK; i++, j++)
data->c[i] = 0;
if (res > SHA_CBLOCK - 8) {
mask = 0 - ((inp_len + 8 - j) >> (sizeof(j) * 8 - 1));
data->u[SHA_LBLOCK - 1] |= bitlen & mask;
sha1_block_data_order(&key->md, data, 1);
mask &= 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
pmac->u[0] |= key->md.h0 & mask;
pmac->u[1] |= key->md.h1 & mask;
pmac->u[2] |= key->md.h2 & mask;
pmac->u[3] |= key->md.h3 & mask;
pmac->u[4] |= key->md.h4 & mask;
memset(data, 0, SHA_CBLOCK);
j += 64;
}
data->u[SHA_LBLOCK - 1] = bitlen;
sha1_block_data_order(&key->md, data, 1);
mask = 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
pmac->u[0] |= key->md.h0 & mask;
pmac->u[1] |= key->md.h1 & mask;
pmac->u[2] |= key->md.h2 & mask;
pmac->u[3] |= key->md.h3 & mask;
pmac->u[4] |= key->md.h4 & mask;
# ifdef BSWAP4
pmac->u[0] = BSWAP4(pmac->u[0]);
pmac->u[1] = BSWAP4(pmac->u[1]);
pmac->u[2] = BSWAP4(pmac->u[2]);
pmac->u[3] = BSWAP4(pmac->u[3]);
pmac->u[4] = BSWAP4(pmac->u[4]);
# else
for (i = 0; i < 5; i++) {
res = pmac->u[i];
pmac->c[4 * i + 0] = (unsigned char)(res >> 24);
pmac->c[4 * i + 1] = (unsigned char)(res >> 16);
pmac->c[4 * i + 2] = (unsigned char)(res >> 8);
pmac->c[4 * i + 3] = (unsigned char)res;
}
# endif
len += SHA_DIGEST_LENGTH;
# else
SHA1_Update(&key->md, out, inp_len);
res = key->md.num;
SHA1_Final(pmac->c, &key->md);
{
unsigned int inp_blocks, pad_blocks;
/* but pretend as if we hashed padded payload */
inp_blocks =
1 + ((SHA_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1));
res += (unsigned int)(len - inp_len);
pad_blocks = res / SHA_CBLOCK;
res %= SHA_CBLOCK;
pad_blocks +=
1 + ((SHA_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1));
for (; inp_blocks < pad_blocks; inp_blocks++)
sha1_block_data_order(&key->md, data, 1);
}
# endif
key->md = key->tail;
SHA1_Update(&key->md, pmac->c, SHA_DIGEST_LENGTH);
SHA1_Final(pmac->c, &key->md);
/* verify HMAC */
out += inp_len;
len -= inp_len;
# if 1
{
unsigned char *p = out + len - 1 - maxpad - SHA_DIGEST_LENGTH;
size_t off = out - p;
unsigned int c, cmask;
maxpad += SHA_DIGEST_LENGTH;
for (res = 0, i = 0, j = 0; j < maxpad; j++) {
c = p[j];
cmask =
((int)(j - off - SHA_DIGEST_LENGTH)) >> (sizeof(int) *
8 - 1);
res |= (c ^ pad) & ~cmask; /* ... and padding */
cmask &= ((int)(off - 1 - j)) >> (sizeof(int) * 8 - 1);
res |= (c ^ pmac->c[i]) & cmask;
i += 1 & cmask;
}
maxpad -= SHA_DIGEST_LENGTH;
res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1));
ret &= (int)~res;
}
# else
for (res = 0, i = 0; i < SHA_DIGEST_LENGTH; i++)
res |= out[i] ^ pmac->c[i];
res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1));
ret &= (int)~res;
/* verify padding */
pad = (pad & ~res) | (maxpad & res);
out = out + len - 1 - pad;
for (res = 0, i = 0; i < pad; i++)
res |= out[i] ^ pad;
res = (0 - res) >> (sizeof(res) * 8 - 1);
ret &= (int)~res;
# endif
return ret;
} else {
# if defined(STITCHED_DECRYPT_CALL)
if (len >= 1024 && ctx->key_len == 32) {
if (sha_off %= SHA_CBLOCK)
blocks = (len - 3 * SHA_CBLOCK) / SHA_CBLOCK;
else
blocks = (len - 2 * SHA_CBLOCK) / SHA_CBLOCK;
aes_off = len - blocks * SHA_CBLOCK;
aesni_cbc_encrypt(in, out, aes_off, &key->ks, ctx->iv, 0);
SHA1_Update(&key->md, out, sha_off);
aesni256_cbc_sha1_dec(in + aes_off,
out + aes_off, blocks, &key->ks,
ctx->iv, &key->md, out + sha_off);
sha_off += blocks *= SHA_CBLOCK;
out += sha_off;
len -= sha_off;
key->md.Nh += blocks >> 29;
key->md.Nl += blocks <<= 3;
if (key->md.Nl < (unsigned int)blocks)
key->md.Nh++;
} else
# endif
/* decrypt HMAC|padding at once */
aesni_cbc_encrypt(in, out, len, &key->ks, ctx->iv, 0);
SHA1_Update(&key->md, out, len);
}
}
return 1;
}
| 165,214 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
unsigned char arg[128];
int ret = 0;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (*len < get_arglen[GET_CMDID(cmd)]) {
pr_err("get_ctl: len %u < %u\n",
*len, get_arglen[GET_CMDID(cmd)]);
return -EINVAL;
}
if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0)
return -EFAULT;
if (mutex_lock_interruptible(&__ip_vs_mutex))
return -ERESTARTSYS;
switch (cmd) {
case IP_VS_SO_GET_VERSION:
{
char buf[64];
sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)",
NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE);
if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
ret = -EFAULT;
goto out;
}
*len = strlen(buf)+1;
}
break;
case IP_VS_SO_GET_INFO:
{
struct ip_vs_getinfo info;
info.version = IP_VS_VERSION_CODE;
info.size = IP_VS_CONN_TAB_SIZE;
info.num_services = ip_vs_num_services;
if (copy_to_user(user, &info, sizeof(info)) != 0)
ret = -EFAULT;
}
break;
case IP_VS_SO_GET_SERVICES:
{
struct ip_vs_get_services *get;
int size;
get = (struct ip_vs_get_services *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_service_entry) * get->num_services;
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_service_entries(get, user);
}
break;
case IP_VS_SO_GET_SERVICE:
{
struct ip_vs_service_entry *entry;
struct ip_vs_service *svc;
union nf_inet_addr addr;
entry = (struct ip_vs_service_entry *)arg;
addr.ip = entry->addr;
if (entry->fwmark)
svc = __ip_vs_svc_fwm_get(AF_INET, entry->fwmark);
else
svc = __ip_vs_service_get(AF_INET, entry->protocol,
&addr, entry->port);
if (svc) {
ip_vs_copy_service(entry, svc);
if (copy_to_user(user, entry, sizeof(*entry)) != 0)
ret = -EFAULT;
ip_vs_service_put(svc);
} else
ret = -ESRCH;
}
break;
case IP_VS_SO_GET_DESTS:
{
struct ip_vs_get_dests *get;
int size;
get = (struct ip_vs_get_dests *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_dest_entry) * get->num_dests;
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_dest_entries(get, user);
}
break;
case IP_VS_SO_GET_TIMEOUT:
{
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(&t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
}
break;
case IP_VS_SO_GET_DAEMON:
{
struct ip_vs_daemon_user d[2];
memset(&d, 0, sizeof(d));
if (ip_vs_sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn));
d[0].syncid = ip_vs_master_syncid;
}
if (ip_vs_sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn));
d[1].syncid = ip_vs_backup_syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
ret = -EFAULT;
}
break;
default:
ret = -EINVAL;
}
out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
Commit Message: ipvs: Add boundary check on ioctl arguments
The ipvs code has a nifty system for doing the size of ioctl command
copies; it defines an array with values into which it indexes the cmd
to find the right length.
Unfortunately, the ipvs code forgot to check if the cmd was in the
range that the array provides, allowing for an index outside of the
array, which then gives a "garbage" result into the length, which
then gets used for copying into a stack buffer.
Fix this by adding sanity checks on these as well as the copy size.
[ [email protected]: adjusted limit to IP_VS_SO_GET_MAX ]
Signed-off-by: Arjan van de Ven <[email protected]>
Acked-by: Julian Anastasov <[email protected]>
Signed-off-by: Simon Horman <[email protected]>
Signed-off-by: Patrick McHardy <[email protected]>
CWE ID: CWE-119 | do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
unsigned char arg[128];
int ret = 0;
unsigned int copylen;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX)
return -EINVAL;
if (*len < get_arglen[GET_CMDID(cmd)]) {
pr_err("get_ctl: len %u < %u\n",
*len, get_arglen[GET_CMDID(cmd)]);
return -EINVAL;
}
copylen = get_arglen[GET_CMDID(cmd)];
if (copylen > 128)
return -EINVAL;
if (copy_from_user(arg, user, copylen) != 0)
return -EFAULT;
if (mutex_lock_interruptible(&__ip_vs_mutex))
return -ERESTARTSYS;
switch (cmd) {
case IP_VS_SO_GET_VERSION:
{
char buf[64];
sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)",
NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE);
if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
ret = -EFAULT;
goto out;
}
*len = strlen(buf)+1;
}
break;
case IP_VS_SO_GET_INFO:
{
struct ip_vs_getinfo info;
info.version = IP_VS_VERSION_CODE;
info.size = IP_VS_CONN_TAB_SIZE;
info.num_services = ip_vs_num_services;
if (copy_to_user(user, &info, sizeof(info)) != 0)
ret = -EFAULT;
}
break;
case IP_VS_SO_GET_SERVICES:
{
struct ip_vs_get_services *get;
int size;
get = (struct ip_vs_get_services *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_service_entry) * get->num_services;
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_service_entries(get, user);
}
break;
case IP_VS_SO_GET_SERVICE:
{
struct ip_vs_service_entry *entry;
struct ip_vs_service *svc;
union nf_inet_addr addr;
entry = (struct ip_vs_service_entry *)arg;
addr.ip = entry->addr;
if (entry->fwmark)
svc = __ip_vs_svc_fwm_get(AF_INET, entry->fwmark);
else
svc = __ip_vs_service_get(AF_INET, entry->protocol,
&addr, entry->port);
if (svc) {
ip_vs_copy_service(entry, svc);
if (copy_to_user(user, entry, sizeof(*entry)) != 0)
ret = -EFAULT;
ip_vs_service_put(svc);
} else
ret = -ESRCH;
}
break;
case IP_VS_SO_GET_DESTS:
{
struct ip_vs_get_dests *get;
int size;
get = (struct ip_vs_get_dests *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_dest_entry) * get->num_dests;
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_dest_entries(get, user);
}
break;
case IP_VS_SO_GET_TIMEOUT:
{
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(&t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
}
break;
case IP_VS_SO_GET_DAEMON:
{
struct ip_vs_daemon_user d[2];
memset(&d, 0, sizeof(d));
if (ip_vs_sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn));
d[0].syncid = ip_vs_master_syncid;
}
if (ip_vs_sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn));
d[1].syncid = ip_vs_backup_syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
ret = -EFAULT;
}
break;
default:
ret = -EINVAL;
}
out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
| 165,957 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void put_crypt_info(struct fscrypt_info *ci)
{
if (!ci)
return;
key_put(ci->ci_keyring_key);
crypto_free_skcipher(ci->ci_ctfm);
kmem_cache_free(fscrypt_info_cachep, ci);
}
Commit Message: fscrypt: remove broken support for detecting keyring key revocation
Filesystem encryption ostensibly supported revoking a keyring key that
had been used to "unlock" encrypted files, causing those files to become
"locked" again. This was, however, buggy for several reasons, the most
severe of which was that when key revocation happened to be detected for
an inode, its fscrypt_info was immediately freed, even while other
threads could be using it for encryption or decryption concurrently.
This could be exploited to crash the kernel or worse.
This patch fixes the use-after-free by removing the code which detects
the keyring key having been revoked, invalidated, or expired. Instead,
an encrypted inode that is "unlocked" now simply remains unlocked until
it is evicted from memory. Note that this is no worse than the case for
block device-level encryption, e.g. dm-crypt, and it still remains
possible for a privileged user to evict unused pages, inodes, and
dentries by running 'sync; echo 3 > /proc/sys/vm/drop_caches', or by
simply unmounting the filesystem. In fact, one of those actions was
already needed anyway for key revocation to work even somewhat sanely.
This change is not expected to break any applications.
In the future I'd like to implement a real API for fscrypt key
revocation that interacts sanely with ongoing filesystem operations ---
waiting for existing operations to complete and blocking new operations,
and invalidating and sanitizing key material and plaintext from the VFS
caches. But this is a hard problem, and for now this bug must be fixed.
This bug affected almost all versions of ext4, f2fs, and ubifs
encryption, and it was potentially reachable in any kernel configured
with encryption support (CONFIG_EXT4_ENCRYPTION=y,
CONFIG_EXT4_FS_ENCRYPTION=y, CONFIG_F2FS_FS_ENCRYPTION=y, or
CONFIG_UBIFS_FS_ENCRYPTION=y). Note that older kernels did not use the
shared fs/crypto/ code, but due to the potential security implications
of this bug, it may still be worthwhile to backport this fix to them.
Fixes: b7236e21d55f ("ext4 crypto: reorganize how we store keys in the inode")
Cc: [email protected] # v4.2+
Signed-off-by: Eric Biggers <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
Acked-by: Michael Halcrow <[email protected]>
CWE ID: CWE-416 | static void put_crypt_info(struct fscrypt_info *ci)
{
if (!ci)
return;
crypto_free_skcipher(ci->ci_ctfm);
kmem_cache_free(fscrypt_info_cachep, ci);
}
| 168,283 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: GDataFileError GDataWapiFeedProcessor::FeedToFileResourceMap(
const std::vector<DocumentFeed*>& feed_list,
FileResourceIdMap* file_map,
int64* feed_changestamp,
FeedToFileResourceMapUmaStats* uma_stats) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
DCHECK(uma_stats);
GDataFileError error = GDATA_FILE_OK;
uma_stats->num_regular_files = 0;
uma_stats->num_hosted_documents = 0;
uma_stats->num_files_with_entry_kind.clear();
for (size_t i = 0; i < feed_list.size(); ++i) {
const DocumentFeed* feed = feed_list[i];
if (i == 0) {
const Link* root_feed_upload_link =
feed->GetLinkByType(Link::RESUMABLE_CREATE_MEDIA);
if (root_feed_upload_link)
directory_service_->root()->set_upload_url(
root_feed_upload_link->href());
*feed_changestamp = feed->largest_changestamp();
DCHECK_GE(*feed_changestamp, 0);
}
for (ScopedVector<DocumentEntry>::const_iterator iter =
feed->entries().begin();
iter != feed->entries().end(); ++iter) {
DocumentEntry* doc = *iter;
GDataEntry* entry = GDataEntry::FromDocumentEntry(
NULL, doc, directory_service_);
if (!entry)
continue;
GDataFile* as_file = entry->AsGDataFile();
if (as_file) {
if (as_file->is_hosted_document())
++uma_stats->num_hosted_documents;
else
++uma_stats->num_regular_files;
++uma_stats->num_files_with_entry_kind[as_file->kind()];
}
FileResourceIdMap::iterator map_entry =
file_map->find(entry->resource_id());
if (map_entry != file_map->end()) {
LOG(WARNING) << "Found duplicate file "
<< map_entry->second->base_name();
delete map_entry->second;
file_map->erase(map_entry);
}
file_map->insert(
std::pair<std::string, GDataEntry*>(entry->resource_id(), entry));
}
}
if (error != GDATA_FILE_OK) {
STLDeleteValues(file_map);
}
return error;
}
Commit Message: Remove parent* arg from GDataEntry ctor.
* Remove static FromDocumentEntry from GDataEntry, GDataFile, GDataDirectory. Replace with InitFromDocumentEntry.
* Move common code from GDataFile::InitFromDocumentEntry and GDataDirectory::InitFromDocumentEntry to GDataEntry::InitFromDocumentEntry.
* Add GDataDirectoryService::FromDocumentEntry and use this everywhere.
* Make ctors of GDataFile, GDataDirectory private, so these must be created by GDataDirectoryService's CreateGDataFile and
CreateGDataDirectory. Make GDataEntry ctor protected.
BUG=141494
TEST=unit tests.
Review URL: https://chromiumcodereview.appspot.com/10854083
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@151008 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | GDataFileError GDataWapiFeedProcessor::FeedToFileResourceMap(
const std::vector<DocumentFeed*>& feed_list,
FileResourceIdMap* file_map,
int64* feed_changestamp,
FeedToFileResourceMapUmaStats* uma_stats) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
DCHECK(uma_stats);
GDataFileError error = GDATA_FILE_OK;
uma_stats->num_regular_files = 0;
uma_stats->num_hosted_documents = 0;
uma_stats->num_files_with_entry_kind.clear();
for (size_t i = 0; i < feed_list.size(); ++i) {
const DocumentFeed* feed = feed_list[i];
if (i == 0) {
const Link* root_feed_upload_link =
feed->GetLinkByType(Link::RESUMABLE_CREATE_MEDIA);
if (root_feed_upload_link)
directory_service_->root()->set_upload_url(
root_feed_upload_link->href());
*feed_changestamp = feed->largest_changestamp();
DCHECK_GE(*feed_changestamp, 0);
}
for (ScopedVector<DocumentEntry>::const_iterator iter =
feed->entries().begin();
iter != feed->entries().end(); ++iter) {
DocumentEntry* doc = *iter;
GDataEntry* entry = directory_service_->FromDocumentEntry(doc);
if (!entry)
continue;
GDataFile* as_file = entry->AsGDataFile();
if (as_file) {
if (as_file->is_hosted_document())
++uma_stats->num_hosted_documents;
else
++uma_stats->num_regular_files;
++uma_stats->num_files_with_entry_kind[as_file->kind()];
}
FileResourceIdMap::iterator map_entry =
file_map->find(entry->resource_id());
if (map_entry != file_map->end()) {
LOG(WARNING) << "Found duplicate file "
<< map_entry->second->base_name();
delete map_entry->second;
file_map->erase(map_entry);
}
file_map->insert(
std::pair<std::string, GDataEntry*>(entry->resource_id(), entry));
}
}
if (error != GDATA_FILE_OK) {
STLDeleteValues(file_map);
}
return error;
}
| 171,496 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: scoped_ptr<GDataEntry> GDataDirectoryService::FromProtoString(
const std::string& serialized_proto) {
GDataEntryProto entry_proto;
if (!entry_proto.ParseFromString(serialized_proto))
return scoped_ptr<GDataEntry>();
scoped_ptr<GDataEntry> entry;
if (entry_proto.file_info().is_directory()) {
entry.reset(new GDataDirectory(NULL, this));
if (!entry->FromProto(entry_proto)) {
NOTREACHED() << "FromProto (directory) failed";
entry.reset();
}
} else {
scoped_ptr<GDataFile> file(new GDataFile(NULL, this));
if (file->FromProto(entry_proto)) {
entry.reset(file.release());
} else {
NOTREACHED() << "FromProto (file) failed";
}
}
return entry.Pass();
}
Commit Message: Remove parent* arg from GDataEntry ctor.
* Remove static FromDocumentEntry from GDataEntry, GDataFile, GDataDirectory. Replace with InitFromDocumentEntry.
* Move common code from GDataFile::InitFromDocumentEntry and GDataDirectory::InitFromDocumentEntry to GDataEntry::InitFromDocumentEntry.
* Add GDataDirectoryService::FromDocumentEntry and use this everywhere.
* Make ctors of GDataFile, GDataDirectory private, so these must be created by GDataDirectoryService's CreateGDataFile and
CreateGDataDirectory. Make GDataEntry ctor protected.
BUG=141494
TEST=unit tests.
Review URL: https://chromiumcodereview.appspot.com/10854083
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@151008 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | scoped_ptr<GDataEntry> GDataDirectoryService::FromProtoString(
const std::string& serialized_proto) {
GDataEntryProto entry_proto;
if (!entry_proto.ParseFromString(serialized_proto))
return scoped_ptr<GDataEntry>();
scoped_ptr<GDataEntry> entry;
if (entry_proto.file_info().is_directory()) {
entry.reset(CreateGDataDirectory());
if (!entry->FromProto(entry_proto)) {
NOTREACHED() << "FromProto (directory) failed";
entry.reset();
}
} else {
scoped_ptr<GDataFile> file(CreateGDataFile());
if (file->FromProto(entry_proto)) {
entry.reset(file.release());
} else {
NOTREACHED() << "FromProto (file) failed";
}
}
return entry.Pass();
}
| 171,488 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void PageInfo::GetSafeBrowsingStatusByMaliciousContentStatus(
security_state::MaliciousContentStatus malicious_content_status,
PageInfo::SafeBrowsingStatus* status,
base::string16* details) {
switch (malicious_content_status) {
case security_state::MALICIOUS_CONTENT_STATUS_NONE:
NOTREACHED();
break;
case security_state::MALICIOUS_CONTENT_STATUS_MALWARE:
*status = PageInfo::SAFE_BROWSING_STATUS_MALWARE;
*details = l10n_util::GetStringUTF16(IDS_PAGE_INFO_MALWARE_DETAILS);
break;
case security_state::MALICIOUS_CONTENT_STATUS_SOCIAL_ENGINEERING:
*status = PageInfo::SAFE_BROWSING_STATUS_SOCIAL_ENGINEERING;
*details =
l10n_util::GetStringUTF16(IDS_PAGE_INFO_SOCIAL_ENGINEERING_DETAILS);
break;
case security_state::MALICIOUS_CONTENT_STATUS_UNWANTED_SOFTWARE:
*status = PageInfo::SAFE_BROWSING_STATUS_UNWANTED_SOFTWARE;
*details =
l10n_util::GetStringUTF16(IDS_PAGE_INFO_UNWANTED_SOFTWARE_DETAILS);
break;
case security_state::MALICIOUS_CONTENT_STATUS_SIGN_IN_PASSWORD_REUSE:
#if defined(FULL_SAFE_BROWSING)
*status = PageInfo::SAFE_BROWSING_STATUS_SIGN_IN_PASSWORD_REUSE;
*details = password_protection_service_
? password_protection_service_->GetWarningDetailText(
PasswordReuseEvent::SIGN_IN_PASSWORD)
: base::string16();
#endif
break;
case security_state::MALICIOUS_CONTENT_STATUS_ENTERPRISE_PASSWORD_REUSE:
#if defined(FULL_SAFE_BROWSING)
*status = PageInfo::SAFE_BROWSING_STATUS_ENTERPRISE_PASSWORD_REUSE;
*details = password_protection_service_
? password_protection_service_->GetWarningDetailText(
PasswordReuseEvent::ENTERPRISE_PASSWORD)
: base::string16();
#endif
break;
case security_state::MALICIOUS_CONTENT_STATUS_BILLING:
*status = PageInfo::SAFE_BROWSING_STATUS_BILLING;
*details = l10n_util::GetStringUTF16(IDS_PAGE_INFO_BILLING_DETAILS);
break;
}
}
Commit Message: Revert "PageInfo: decouple safe browsing and TLS statii."
This reverts commit ee95bc44021230127c7e6e9a8cf9d3820760f77c.
Reason for revert: suspect causing unit_tests failure on Linux MSAN Tests:
https://ci.chromium.org/p/chromium/builders/ci/Linux%20MSan%20Tests/17649
PageInfoBubbleViewTest.ChangingFlashSettingForSiteIsRemembered
PageInfoBubbleViewTest.EnsureCloseCallback
PageInfoBubbleViewTest.NotificationPermissionRevokeUkm
PageInfoBubbleViewTest.OpenPageInfoBubbleAfterNavigationStart
PageInfoBubbleViewTest.SetPermissionInfo
PageInfoBubbleViewTest.SetPermissionInfoForUsbGuard
PageInfoBubbleViewTest.SetPermissionInfoWithPolicyUsbDevices
PageInfoBubbleViewTest.SetPermissionInfoWithUsbDevice
PageInfoBubbleViewTest.SetPermissionInfoWithUserAndPolicyUsbDevices
PageInfoBubbleViewTest.UpdatingSiteDataRetainsLayout
https://logs.chromium.org/logs/chromium/buildbucket/cr-buildbucket.appspot.com/8909718923797040064/+/steps/unit_tests/0/logs/Deterministic_failure:_PageInfoBubbleViewTest.ChangingFlashSettingForSiteIsRemembered__status_CRASH_/0
[ RUN ] PageInfoBubbleViewTest.ChangingFlashSettingForSiteIsRemembered
==9056==WARNING: MemorySanitizer: use-of-uninitialized-value
#0 0x561baaab15ec in PageInfoUI::GetSecurityDescription(PageInfoUI::IdentityInfo const&) const ./../../chrome/browser/ui/page_info/page_info_ui.cc:250:3
#1 0x561bab6a1548 in PageInfoBubbleView::SetIdentityInfo(PageInfoUI::IdentityInfo const&) ./../../chrome/browser/ui/views/page_info/page_info_bubble_view.cc:802:7
#2 0x561baaaab3bb in PageInfo::PresentSiteIdentity() ./../../chrome/browser/ui/page_info/page_info.cc:969:8
#3 0x561baaaa0a21 in PageInfo::PageInfo(PageInfoUI*, Profile*, TabSpecificContentSettings*, content::WebContents*, GURL const&, security_state::SecurityLevel, security_state::VisibleSecurityState const&) ./../../chrome/browser/ui/page_info/page_info.cc:344:3
#4 0x561bab69b6dd in PageInfoBubbleView::PageInfoBubbleView(views::View*, gfx::Rect const&, aura::Window*, Profile*, content::WebContents*, GURL const&, security_state::SecurityLevel, security_state::VisibleSecurityState const&, base::OnceCallback<void (views::Widget::ClosedReason, bool)>) ./../../chrome/browser/ui/views/page_info/page_info_bubble_view.cc:576:24
...
Original change's description:
> PageInfo: decouple safe browsing and TLS statii.
>
> Previously, the Page Info bubble maintained a single variable to
> identify all reasons that a page might have a non-standard status. This
> lead to the display logic making assumptions about, for instance, the
> validity of a certificate when the page was flagged by Safe Browsing.
>
> This CL separates out the Safe Browsing status from the site identity
> status so that the page info bubble can inform the user that the site's
> certificate is invalid, even if it's also flagged by Safe Browsing.
>
> Bug: 869925
> Change-Id: I34107225b4206c8f32771ccd75e9367668d0a72b
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1662537
> Reviewed-by: Mustafa Emre Acer <[email protected]>
> Reviewed-by: Bret Sepulveda <[email protected]>
> Auto-Submit: Joe DeBlasio <[email protected]>
> Commit-Queue: Joe DeBlasio <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#671847}
[email protected],[email protected],[email protected]
Change-Id: I8be652952e7276bcc9266124693352e467159cc4
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: 869925
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1673985
Reviewed-by: Takashi Sakamoto <[email protected]>
Commit-Queue: Takashi Sakamoto <[email protected]>
Cr-Commit-Position: refs/heads/master@{#671932}
CWE ID: CWE-311 | void PageInfo::GetSafeBrowsingStatusByMaliciousContentStatus(
void PageInfo::GetSiteIdentityByMaliciousContentStatus(
security_state::MaliciousContentStatus malicious_content_status,
PageInfo::SiteIdentityStatus* status,
base::string16* details) {
switch (malicious_content_status) {
case security_state::MALICIOUS_CONTENT_STATUS_NONE:
NOTREACHED();
break;
case security_state::MALICIOUS_CONTENT_STATUS_MALWARE:
*status = PageInfo::SITE_IDENTITY_STATUS_MALWARE;
*details = l10n_util::GetStringUTF16(IDS_PAGE_INFO_MALWARE_DETAILS);
break;
case security_state::MALICIOUS_CONTENT_STATUS_SOCIAL_ENGINEERING:
*status = PageInfo::SITE_IDENTITY_STATUS_SOCIAL_ENGINEERING;
*details =
l10n_util::GetStringUTF16(IDS_PAGE_INFO_SOCIAL_ENGINEERING_DETAILS);
break;
case security_state::MALICIOUS_CONTENT_STATUS_UNWANTED_SOFTWARE:
*status = PageInfo::SITE_IDENTITY_STATUS_UNWANTED_SOFTWARE;
*details =
l10n_util::GetStringUTF16(IDS_PAGE_INFO_UNWANTED_SOFTWARE_DETAILS);
break;
case security_state::MALICIOUS_CONTENT_STATUS_SIGN_IN_PASSWORD_REUSE:
#if defined(FULL_SAFE_BROWSING)
*status = PageInfo::SITE_IDENTITY_STATUS_SIGN_IN_PASSWORD_REUSE;
*details = password_protection_service_
? password_protection_service_->GetWarningDetailText(
PasswordReuseEvent::SIGN_IN_PASSWORD)
: base::string16();
#endif
break;
case security_state::MALICIOUS_CONTENT_STATUS_ENTERPRISE_PASSWORD_REUSE:
#if defined(FULL_SAFE_BROWSING)
*status = PageInfo::SITE_IDENTITY_STATUS_ENTERPRISE_PASSWORD_REUSE;
*details = password_protection_service_
? password_protection_service_->GetWarningDetailText(
PasswordReuseEvent::ENTERPRISE_PASSWORD)
: base::string16();
#endif
break;
case security_state::MALICIOUS_CONTENT_STATUS_BILLING:
*status = PageInfo::SITE_IDENTITY_STATUS_BILLING;
*details = l10n_util::GetStringUTF16(IDS_PAGE_INFO_BILLING_DETAILS);
break;
}
}
| 172,435 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void registerURL(const char* url, const char* file, const char* mimeType)
{
registerMockedURLLoad(KURL(m_baseUrl, url), WebString::fromUTF8(file), m_folder, WebString::fromUTF8(mimeType));
}
Commit Message: Revert 162155 "This review merges the two existing page serializ..."
Change r162155 broke the world even though it was landed using the CQ.
> This review merges the two existing page serializers, WebPageSerializerImpl and
> PageSerializer, into one, PageSerializer. In addition to this it moves all
> the old tests from WebPageNewSerializerTest and WebPageSerializerTest to the
> PageSerializerTest structure and splits out one test for MHTML into a new
> MHTMLTest file.
>
> Saving as 'Webpage, Complete', 'Webpage, HTML Only' and as MHTML when the
> 'Save Page as MHTML' flag is enabled now uses the same code, and should thus
> have the same feature set. Meaning that both modes now should be a bit better.
>
> Detailed list of changes:
>
> - PageSerializerTest: Prepare for more DTD test
> - PageSerializerTest: Remove now unneccesary input image test
> - PageSerializerTest: Remove unused WebPageSerializer/Impl code
> - PageSerializerTest: Move data URI morph test
> - PageSerializerTest: Move data URI test
> - PageSerializerTest: Move namespace test
> - PageSerializerTest: Move SVG Image test
> - MHTMLTest: Move MHTML specific test to own test file
> - PageSerializerTest: Delete duplicate XML header test
> - PageSerializerTest: Move blank frame test
> - PageSerializerTest: Move CSS test
> - PageSerializerTest: Add frameset/frame test
> - PageSerializerTest: Move old iframe test
> - PageSerializerTest: Move old elements test
> - Use PageSerizer for saving web pages
> - PageSerializerTest: Test for rewriting links
> - PageSerializer: Add rewrite link accumulator
> - PageSerializer: Serialize images in iframes/frames src
> - PageSerializer: XHTML fix for meta tags
> - PageSerializer: Add presentation CSS
> - PageSerializer: Rename out parameter
>
> BUG=
> [email protected]
>
> Review URL: https://codereview.chromium.org/68613003
[email protected]
Review URL: https://codereview.chromium.org/73673003
git-svn-id: svn://svn.chromium.org/blink/trunk@162156 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-119 | void registerURL(const char* url, const char* file, const char* mimeType)
| 171,574 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void SyncBackendHost::Initialize(
SyncFrontend* frontend,
const GURL& sync_service_url,
const syncable::ModelTypeSet& types,
net::URLRequestContextGetter* baseline_context_getter,
const SyncCredentials& credentials,
bool delete_sync_data_folder) {
if (!core_thread_.Start())
return;
frontend_ = frontend;
DCHECK(frontend);
registrar_.workers[GROUP_DB] = new DatabaseModelWorker();
registrar_.workers[GROUP_UI] = new UIModelWorker();
registrar_.workers[GROUP_PASSIVE] = new ModelSafeWorker();
if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableSyncTypedUrls) || types.count(syncable::TYPED_URLS)) {
registrar_.workers[GROUP_HISTORY] =
new HistoryModelWorker(
profile_->GetHistoryService(Profile::IMPLICIT_ACCESS));
}
for (syncable::ModelTypeSet::const_iterator it = types.begin();
it != types.end(); ++it) {
registrar_.routing_info[(*it)] = GROUP_PASSIVE;
}
PasswordStore* password_store =
profile_->GetPasswordStore(Profile::IMPLICIT_ACCESS);
if (password_store) {
registrar_.workers[GROUP_PASSWORD] =
new PasswordModelWorker(password_store);
} else {
LOG_IF(WARNING, types.count(syncable::PASSWORDS) > 0) << "Password store "
<< "not initialized, cannot sync passwords";
registrar_.routing_info.erase(syncable::PASSWORDS);
}
registrar_.routing_info[syncable::NIGORI] = GROUP_PASSIVE;
core_->CreateSyncNotifier(baseline_context_getter);
InitCore(Core::DoInitializeOptions(
sync_service_url,
MakeHttpBridgeFactory(baseline_context_getter),
credentials,
delete_sync_data_folder,
RestoreEncryptionBootstrapToken(),
false));
}
Commit Message: Enable HistoryModelWorker by default, now that bug 69561 is fixed.
BUG=69561
TEST=Run sync manually and run integration tests, sync should not crash.
Review URL: http://codereview.chromium.org/7016007
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@85211 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | void SyncBackendHost::Initialize(
SyncFrontend* frontend,
const GURL& sync_service_url,
const syncable::ModelTypeSet& types,
net::URLRequestContextGetter* baseline_context_getter,
const SyncCredentials& credentials,
bool delete_sync_data_folder) {
if (!core_thread_.Start())
return;
frontend_ = frontend;
DCHECK(frontend);
registrar_.workers[GROUP_DB] = new DatabaseModelWorker();
registrar_.workers[GROUP_UI] = new UIModelWorker();
registrar_.workers[GROUP_PASSIVE] = new ModelSafeWorker();
registrar_.workers[GROUP_HISTORY] = new HistoryModelWorker(
profile_->GetHistoryService(Profile::IMPLICIT_ACCESS));
for (syncable::ModelTypeSet::const_iterator it = types.begin();
it != types.end(); ++it) {
registrar_.routing_info[(*it)] = GROUP_PASSIVE;
}
PasswordStore* password_store =
profile_->GetPasswordStore(Profile::IMPLICIT_ACCESS);
if (password_store) {
registrar_.workers[GROUP_PASSWORD] =
new PasswordModelWorker(password_store);
} else {
LOG_IF(WARNING, types.count(syncable::PASSWORDS) > 0) << "Password store "
<< "not initialized, cannot sync passwords";
registrar_.routing_info.erase(syncable::PASSWORDS);
}
registrar_.routing_info[syncable::NIGORI] = GROUP_PASSIVE;
core_->CreateSyncNotifier(baseline_context_getter);
InitCore(Core::DoInitializeOptions(
sync_service_url,
MakeHttpBridgeFactory(baseline_context_getter),
credentials,
delete_sync_data_folder,
RestoreEncryptionBootstrapToken(),
false));
}
| 170,614 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static inline void set_socket_blocking(int s, int blocking)
{
int opts;
opts = fcntl(s, F_GETFL);
if (opts<0) APPL_TRACE_ERROR("set blocking (%s)", strerror(errno));
if(blocking)
opts &= ~O_NONBLOCK;
else opts |= O_NONBLOCK;
if (fcntl(s, F_SETFL, opts) < 0)
APPL_TRACE_ERROR("set blocking (%s)", strerror(errno));
}
Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
CWE ID: CWE-284 | static inline void set_socket_blocking(int s, int blocking)
{
int opts;
opts = TEMP_FAILURE_RETRY(fcntl(s, F_GETFL));
if (opts<0) APPL_TRACE_ERROR("set blocking (%s)", strerror(errno));
if(blocking)
opts &= ~O_NONBLOCK;
else opts |= O_NONBLOCK;
if (TEMP_FAILURE_RETRY(fcntl(s, F_SETFL, opts)) < 0)
APPL_TRACE_ERROR("set blocking (%s)", strerror(errno));
}
| 173,466 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void die(const char *fmt, ...) {
va_list ap;
va_start(ap, fmt);
vprintf(fmt, ap);
if(fmt[strlen(fmt)-1] != '\n')
printf("\n");
exit(EXIT_FAILURE);
}
Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478
DO NOT MERGE - libvpx: Pull from upstream
Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06
BUG=23452792
Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
CWE ID: CWE-119 | static void die(const char *fmt, ...) {
| 174,495 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: store_current_palette(png_store *ps, int *npalette)
{
/* This is an internal error (the call has been made outside a read
* operation.)
*/
if (ps->current == NULL)
store_log(ps, ps->pread, "no current stream for palette", 1);
/* The result may be null if there is no palette. */
*npalette = ps->current->npalette;
return ps->current->palette;
}
Commit Message: DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
CWE ID: | store_current_palette(png_store *ps, int *npalette)
{
/* This is an internal error (the call has been made outside a read
* operation.)
*/
if (ps->current == NULL)
{
store_log(ps, ps->pread, "no current stream for palette", 1);
return NULL;
}
/* The result may be null if there is no palette. */
*npalette = ps->current->npalette;
return ps->current->palette;
}
| 173,703 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void ExtensionTtsPlatformImpl::clear_error() {
error_ = std::string();
}
Commit Message: Extend TTS extension API to support richer events returned from the engine
to the client. Previously we just had a completed event; this adds start,
word boundary, sentence boundary, and marker boundary. In addition,
interrupted and canceled, which were previously errors, now become events.
Mac and Windows implementations extended to support as many of these events
as possible.
BUG=67713
BUG=70198
BUG=75106
BUG=83404
TEST=Updates all TTS API tests to be event-based, and adds new tests.
Review URL: http://codereview.chromium.org/6792014
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@91665 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-20 | void ExtensionTtsPlatformImpl::clear_error() {
| 170,393 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void RegisterProperties(const ImePropertyList& prop_list) {
current_ime_properties_ = prop_list;
FOR_EACH_OBSERVER(Observer, observers_,
PropertyListChanged(this,
current_ime_properties_));
}
Commit Message: Remove use of libcros from InputMethodLibrary.
BUG=chromium-os:16238
TEST==confirm that input methods work as before on the netbook. Also confirm that the chrome builds and works on the desktop as before.
Review URL: http://codereview.chromium.org/7003086
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@89142 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | void RegisterProperties(const ImePropertyList& prop_list) {
void RegisterProperties(const input_method::ImePropertyList& prop_list) {
current_ime_properties_ = prop_list;
FOR_EACH_OBSERVER(InputMethodLibrary::Observer, observers_,
PropertyListChanged(this,
current_ime_properties_));
}
| 170,501 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: ieee802_15_4_if_print(netdissect_options *ndo,
const struct pcap_pkthdr *h, const u_char *p)
{
u_int caplen = h->caplen;
u_int hdrlen;
uint16_t fc;
uint8_t seq;
uint16_t panid = 0;
if (caplen < 3) {
ND_PRINT((ndo, "[|802.15.4]"));
return caplen;
}
hdrlen = 3;
fc = EXTRACT_LE_16BITS(p);
seq = EXTRACT_LE_8BITS(p + 2);
p += 3;
caplen -= 3;
ND_PRINT((ndo,"IEEE 802.15.4 %s packet ", ftypes[FC_FRAME_TYPE(fc)]));
if (ndo->ndo_vflag)
ND_PRINT((ndo,"seq %02x ", seq));
/*
* Destination address and PAN ID, if present.
*/
switch (FC_DEST_ADDRESSING_MODE(fc)) {
case FC_ADDRESSING_MODE_NONE:
if (fc & FC_PAN_ID_COMPRESSION) {
/*
* PAN ID compression; this requires that both
* the source and destination addresses be present,
* but the destination address is missing.
*/
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"none "));
break;
case FC_ADDRESSING_MODE_RESERVED:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"reserved destination addressing mode"));
return hdrlen;
case FC_ADDRESSING_MODE_SHORT:
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p + 2)));
p += 2;
caplen -= 2;
hdrlen += 2;
break;
case FC_ADDRESSING_MODE_LONG:
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
if (caplen < 8) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p)));
p += 8;
caplen -= 8;
hdrlen += 8;
break;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"< "));
/*
* Source address and PAN ID, if present.
*/
switch (FC_SRC_ADDRESSING_MODE(fc)) {
case FC_ADDRESSING_MODE_NONE:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"none "));
break;
case FC_ADDRESSING_MODE_RESERVED:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"reserved source addressing mode"));
return 0;
case FC_ADDRESSING_MODE_SHORT:
if (!(fc & FC_PAN_ID_COMPRESSION)) {
/*
* The source PAN ID is not compressed out, so
* fetch it. (Otherwise, we'll use the destination
* PAN ID, fetched above.)
*/
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
}
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p)));
p += 2;
caplen -= 2;
hdrlen += 2;
break;
case FC_ADDRESSING_MODE_LONG:
if (!(fc & FC_PAN_ID_COMPRESSION)) {
/*
* The source PAN ID is not compressed out, so
* fetch it. (Otherwise, we'll use the destination
* PAN ID, fetched above.)
*/
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
}
if (caplen < 8) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p)));
p += 8;
caplen -= 8;
hdrlen += 8;
break;
}
if (!ndo->ndo_suppress_default_print)
ND_DEFAULTPRINT(p, caplen);
return hdrlen;
}
Commit Message: CVE-2017-13000/IEEE 802.15.4: Fix bug introduced two fixes prior.
We've already advanced the pointer past the PAN ID, if present; it now
points to the address, so don't add 2 to it.
This fixes a buffer over-read discovered by Forcepoint's security
researchers Otto Airamo & Antti Levomäki.
Add a test using the capture file supplied by the reporter(s).
CWE ID: CWE-125 | ieee802_15_4_if_print(netdissect_options *ndo,
const struct pcap_pkthdr *h, const u_char *p)
{
u_int caplen = h->caplen;
u_int hdrlen;
uint16_t fc;
uint8_t seq;
uint16_t panid = 0;
if (caplen < 3) {
ND_PRINT((ndo, "[|802.15.4]"));
return caplen;
}
hdrlen = 3;
fc = EXTRACT_LE_16BITS(p);
seq = EXTRACT_LE_8BITS(p + 2);
p += 3;
caplen -= 3;
ND_PRINT((ndo,"IEEE 802.15.4 %s packet ", ftypes[FC_FRAME_TYPE(fc)]));
if (ndo->ndo_vflag)
ND_PRINT((ndo,"seq %02x ", seq));
/*
* Destination address and PAN ID, if present.
*/
switch (FC_DEST_ADDRESSING_MODE(fc)) {
case FC_ADDRESSING_MODE_NONE:
if (fc & FC_PAN_ID_COMPRESSION) {
/*
* PAN ID compression; this requires that both
* the source and destination addresses be present,
* but the destination address is missing.
*/
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"none "));
break;
case FC_ADDRESSING_MODE_RESERVED:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"reserved destination addressing mode"));
return hdrlen;
case FC_ADDRESSING_MODE_SHORT:
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p)));
p += 2;
caplen -= 2;
hdrlen += 2;
break;
case FC_ADDRESSING_MODE_LONG:
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
if (caplen < 8) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p)));
p += 8;
caplen -= 8;
hdrlen += 8;
break;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"< "));
/*
* Source address and PAN ID, if present.
*/
switch (FC_SRC_ADDRESSING_MODE(fc)) {
case FC_ADDRESSING_MODE_NONE:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"none "));
break;
case FC_ADDRESSING_MODE_RESERVED:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"reserved source addressing mode"));
return 0;
case FC_ADDRESSING_MODE_SHORT:
if (!(fc & FC_PAN_ID_COMPRESSION)) {
/*
* The source PAN ID is not compressed out, so
* fetch it. (Otherwise, we'll use the destination
* PAN ID, fetched above.)
*/
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
}
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p)));
p += 2;
caplen -= 2;
hdrlen += 2;
break;
case FC_ADDRESSING_MODE_LONG:
if (!(fc & FC_PAN_ID_COMPRESSION)) {
/*
* The source PAN ID is not compressed out, so
* fetch it. (Otherwise, we'll use the destination
* PAN ID, fetched above.)
*/
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
}
if (caplen < 8) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p)));
p += 8;
caplen -= 8;
hdrlen += 8;
break;
}
if (!ndo->ndo_suppress_default_print)
ND_DEFAULTPRINT(p, caplen);
return hdrlen;
}
| 167,907 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool ContainerNode::replaceChild(PassRefPtr<Node> newChild, Node* oldChild, ExceptionCode& ec, bool shouldLazyAttach)
{
ASSERT(refCount() || parentOrHostNode());
RefPtr<Node> protect(this);
ec = 0;
if (oldChild == newChild) // nothing to do
return true;
checkReplaceChild(newChild.get(), oldChild, ec);
if (ec)
return false;
if (!oldChild || oldChild->parentNode() != this) {
ec = NOT_FOUND_ERR;
return false;
}
#if ENABLE(MUTATION_OBSERVERS)
ChildListMutationScope mutation(this);
#endif
RefPtr<Node> next = oldChild->nextSibling();
RefPtr<Node> removedChild = oldChild;
removeChild(oldChild, ec);
if (ec)
return false;
if (next && (next->previousSibling() == newChild || next == newChild)) // nothing to do
return true;
checkReplaceChild(newChild.get(), oldChild, ec);
if (ec)
return false;
NodeVector targets;
collectChildrenAndRemoveFromOldParent(newChild.get(), targets, ec);
if (ec)
return false;
InspectorInstrumentation::willInsertDOMNode(document(), this);
for (NodeVector::const_iterator it = targets.begin(); it != targets.end(); ++it) {
Node* child = it->get();
if (next && next->parentNode() != this)
break;
if (child->parentNode())
break;
treeScope()->adoptIfNeeded(child);
forbidEventDispatch();
if (next)
insertBeforeCommon(next.get(), child);
else
appendChildToContainer(child, this);
allowEventDispatch();
updateTreeAfterInsertion(this, child, shouldLazyAttach);
}
dispatchSubtreeModifiedEvent();
return true;
}
Commit Message: https://bugs.webkit.org/show_bug.cgi?id=93587
Node::replaceChild() can create bad DOM topology with MutationEvent, Part 2
Reviewed by Kent Tamura.
Source/WebCore:
This is a followup of r124156. replaceChild() has yet another hidden
MutationEvent trigger. This change added a guard for it.
Test: fast/events/mutation-during-replace-child-2.html
* dom/ContainerNode.cpp:
(WebCore::ContainerNode::replaceChild):
LayoutTests:
* fast/events/mutation-during-replace-child-2-expected.txt: Added.
* fast/events/mutation-during-replace-child-2.html: Added.
git-svn-id: svn://svn.chromium.org/blink/trunk@125237 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-399 | bool ContainerNode::replaceChild(PassRefPtr<Node> newChild, Node* oldChild, ExceptionCode& ec, bool shouldLazyAttach)
{
ASSERT(refCount() || parentOrHostNode());
RefPtr<Node> protect(this);
ec = 0;
if (oldChild == newChild) // nothing to do
return true;
checkReplaceChild(newChild.get(), oldChild, ec);
if (ec)
return false;
if (!oldChild || oldChild->parentNode() != this) {
ec = NOT_FOUND_ERR;
return false;
}
#if ENABLE(MUTATION_OBSERVERS)
ChildListMutationScope mutation(this);
#endif
RefPtr<Node> next = oldChild->nextSibling();
RefPtr<Node> removedChild = oldChild;
removeChild(oldChild, ec);
if (ec)
return false;
if (next && (next->previousSibling() == newChild || next == newChild)) // nothing to do
return true;
checkReplaceChild(newChild.get(), oldChild, ec);
if (ec)
return false;
NodeVector targets;
collectChildrenAndRemoveFromOldParent(newChild.get(), targets, ec);
if (ec)
return false;
// Does this yet another check because collectChildrenAndRemoveFromOldParent() fires a MutationEvent.
checkReplaceChild(newChild.get(), oldChild, ec);
if (ec)
return false;
InspectorInstrumentation::willInsertDOMNode(document(), this);
for (NodeVector::const_iterator it = targets.begin(); it != targets.end(); ++it) {
Node* child = it->get();
if (next && next->parentNode() != this)
break;
if (child->parentNode())
break;
treeScope()->adoptIfNeeded(child);
forbidEventDispatch();
if (next)
insertBeforeCommon(next.get(), child);
else
appendChildToContainer(child, this);
allowEventDispatch();
updateTreeAfterInsertion(this, child, shouldLazyAttach);
}
dispatchSubtreeModifiedEvent();
return true;
}
| 170,321 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: stringprep (char *in,
size_t maxlen,
Stringprep_profile_flags flags,
const Stringprep_profile * profile)
{
int rc;
char *utf8 = NULL;
uint32_t *ucs4 = NULL;
size_t ucs4len, maxucs4len, adducs4len = 50;
do
{
uint32_t *newp;
free (ucs4);
ucs4 = stringprep_utf8_to_ucs4 (in, -1, &ucs4len);
maxucs4len = ucs4len + adducs4len;
newp = realloc (ucs4, maxucs4len * sizeof (uint32_t));
if (!newp)
return STRINGPREP_MALLOC_ERROR;
}
ucs4 = newp;
rc = stringprep_4i (ucs4, &ucs4len, maxucs4len, flags, profile);
adducs4len += 50;
}
Commit Message:
CWE ID: CWE-119 | stringprep (char *in,
size_t maxlen,
Stringprep_profile_flags flags,
const Stringprep_profile * profile)
{
int rc;
char *utf8 = NULL;
uint32_t *ucs4 = NULL;
size_t ucs4len, maxucs4len, adducs4len = 50;
do
{
uint32_t *newp;
free (ucs4);
ucs4 = stringprep_utf8_to_ucs4 (in, -1, &ucs4len);
if (ucs4 == NULL)
return STRINGPREP_ICONV_ERROR;
maxucs4len = ucs4len + adducs4len;
newp = realloc (ucs4, maxucs4len * sizeof (uint32_t));
if (!newp)
return STRINGPREP_MALLOC_ERROR;
}
ucs4 = newp;
rc = stringprep_4i (ucs4, &ucs4len, maxucs4len, flags, profile);
adducs4len += 50;
}
| 164,762 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static PHP_NAMED_FUNCTION(zif_zip_entry_read)
{
zval * zip_entry;
zend_long len = 0;
zip_read_rsrc * zr_rsrc;
zend_string *buffer;
int n = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "r|l", &zip_entry, &len) == FAILURE) {
return;
}
if ((zr_rsrc = (zip_read_rsrc *)zend_fetch_resource(Z_RES_P(zip_entry), le_zip_entry_name, le_zip_entry)) == NULL) {
RETURN_FALSE;
}
if (len <= 0) {
len = 1024;
}
if (zr_rsrc->zf) {
buffer = zend_string_alloc(len, 0);
n = zip_fread(zr_rsrc->zf, ZSTR_VAL(buffer), ZSTR_LEN(buffer));
if (n > 0) {
ZSTR_VAL(buffer)[n] = '\0';
ZSTR_LEN(buffer) = n;
RETURN_NEW_STR(buffer);
} else {
zend_string_free(buffer);
RETURN_EMPTY_STRING()
}
} else {
RETURN_FALSE;
}
}
Commit Message: Fix bug #71923 - integer overflow in ZipArchive::getFrom*
CWE ID: CWE-190 | static PHP_NAMED_FUNCTION(zif_zip_entry_read)
{
zval * zip_entry;
zend_long len = 0;
zip_read_rsrc * zr_rsrc;
zend_string *buffer;
int n = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "r|l", &zip_entry, &len) == FAILURE) {
return;
}
if ((zr_rsrc = (zip_read_rsrc *)zend_fetch_resource(Z_RES_P(zip_entry), le_zip_entry_name, le_zip_entry)) == NULL) {
RETURN_FALSE;
}
if (len <= 0) {
len = 1024;
}
if (zr_rsrc->zf) {
buffer = zend_string_safe_alloc(1, len, 0, 0);
n = zip_fread(zr_rsrc->zf, ZSTR_VAL(buffer), ZSTR_LEN(buffer));
if (n > 0) {
ZSTR_VAL(buffer)[n] = '\0';
ZSTR_LEN(buffer) = n;
RETURN_NEW_STR(buffer);
} else {
zend_string_free(buffer);
RETURN_EMPTY_STRING()
}
} else {
RETURN_FALSE;
}
}
| 167,380 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: WORD32 ih264d_video_decode(iv_obj_t *dec_hdl, void *pv_api_ip, void *pv_api_op)
{
/* ! */
dec_struct_t * ps_dec = (dec_struct_t *)(dec_hdl->pv_codec_handle);
WORD32 i4_err_status = 0;
UWORD8 *pu1_buf = NULL;
WORD32 buflen;
UWORD32 u4_max_ofst, u4_length_of_start_code = 0;
UWORD32 bytes_consumed = 0;
UWORD32 cur_slice_is_nonref = 0;
UWORD32 u4_next_is_aud;
UWORD32 u4_first_start_code_found = 0;
WORD32 ret = 0,api_ret_value = IV_SUCCESS;
WORD32 header_data_left = 0,frame_data_left = 0;
UWORD8 *pu1_bitstrm_buf;
ivd_video_decode_ip_t *ps_dec_ip;
ivd_video_decode_op_t *ps_dec_op;
ithread_set_name((void*)"Parse_thread");
ps_dec_ip = (ivd_video_decode_ip_t *)pv_api_ip;
ps_dec_op = (ivd_video_decode_op_t *)pv_api_op;
{
UWORD32 u4_size;
u4_size = ps_dec_op->u4_size;
memset(ps_dec_op, 0, sizeof(ivd_video_decode_op_t));
ps_dec_op->u4_size = u4_size;
}
ps_dec->pv_dec_out = ps_dec_op;
if(ps_dec->init_done != 1)
{
return IV_FAIL;
}
/*Data memory barries instruction,so that bitstream write by the application is complete*/
DATA_SYNC();
if(0 == ps_dec->u1_flushfrm)
{
if(ps_dec_ip->pv_stream_buffer == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_FRM_BS_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->u4_num_Bytes <= 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_NUMBYTES_INV;
return IV_FAIL;
}
}
ps_dec->u1_pic_decode_done = 0;
ps_dec_op->u4_num_bytes_consumed = 0;
ps_dec->ps_out_buffer = NULL;
if(ps_dec_ip->u4_size
>= offsetof(ivd_video_decode_ip_t, s_out_buffer))
ps_dec->ps_out_buffer = &ps_dec_ip->s_out_buffer;
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_output_present = 0;
ps_dec->s_disp_op.u4_error_code = 1;
ps_dec->u4_fmt_conv_num_rows = FMT_CONV_NUM_ROWS;
if(0 == ps_dec->u4_share_disp_buf
&& ps_dec->i4_decode_header == 0)
{
UWORD32 i;
if(ps_dec->ps_out_buffer->u4_num_bufs == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUFS;
return IV_FAIL;
}
for(i = 0; i < ps_dec->ps_out_buffer->u4_num_bufs; i++)
{
if(ps_dec->ps_out_buffer->pu1_bufs[i] == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_OP_BUF_NULL;
return IV_FAIL;
}
if(ps_dec->ps_out_buffer->u4_min_out_buf_size[i] == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |=
IVD_DISP_FRM_ZERO_OP_BUF_SIZE;
return IV_FAIL;
}
}
}
if(ps_dec->u4_total_frames_decoded >= NUM_FRAMES_LIMIT)
{
ps_dec_op->u4_error_code = ERROR_FRAME_LIMIT_OVER;
return IV_FAIL;
}
/* ! */
ps_dec->u4_ts = ps_dec_ip->u4_ts;
ps_dec_op->u4_error_code = 0;
ps_dec_op->e_pic_type = -1;
ps_dec_op->u4_output_present = 0;
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec->i4_frametype = -1;
ps_dec->i4_content_type = -1;
/*
* For field pictures, set the bottom and top picture decoded u4_flag correctly.
*/
{
if((TOP_FIELD_ONLY | BOT_FIELD_ONLY) == ps_dec->u1_top_bottom_decoded)
{
ps_dec->u1_top_bottom_decoded = 0;
}
}
ps_dec->u4_slice_start_code_found = 0;
/* In case the deocder is not in flush mode(in shared mode),
then decoder has to pick up a buffer to write current frame.
Check if a frame is available in such cases */
if(ps_dec->u1_init_dec_flag == 1 && ps_dec->u4_share_disp_buf == 1
&& ps_dec->u1_flushfrm == 0)
{
UWORD32 i;
WORD32 disp_avail = 0, free_id;
/* Check if at least one buffer is available with the codec */
/* If not then return to application with error */
for(i = 0; i < ps_dec->u1_pic_bufs; i++)
{
if(0 == ps_dec->u4_disp_buf_mapping[i]
|| 1 == ps_dec->u4_disp_buf_to_be_freed[i])
{
disp_avail = 1;
break;
}
}
if(0 == disp_avail)
{
/* If something is queued for display wait for that buffer to be returned */
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
return (IV_FAIL);
}
while(1)
{
pic_buffer_t *ps_pic_buf;
ps_pic_buf = (pic_buffer_t *)ih264_buf_mgr_get_next_free(
(buf_mgr_t *)ps_dec->pv_pic_buf_mgr, &free_id);
if(ps_pic_buf == NULL)
{
UWORD32 i, display_queued = 0;
/* check if any buffer was given for display which is not returned yet */
for(i = 0; i < (MAX_DISP_BUFS_NEW); i++)
{
if(0 != ps_dec->u4_disp_buf_mapping[i])
{
display_queued = 1;
break;
}
}
/* If some buffer is queued for display, then codec has to singal an error and wait
for that buffer to be returned.
If nothing is queued for display then codec has ownership of all display buffers
and it can reuse any of the existing buffers and continue decoding */
if(1 == display_queued)
{
/* If something is queued for display wait for that buffer to be returned */
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1
<< IVD_UNSUPPORTEDPARAM);
return (IV_FAIL);
}
}
else
{
/* If the buffer is with display, then mark it as in use and then look for a buffer again */
if(1 == ps_dec->u4_disp_buf_mapping[free_id])
{
ih264_buf_mgr_set_status(
(buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
free_id,
BUF_MGR_IO);
}
else
{
/**
* Found a free buffer for present call. Release it now.
* Will be again obtained later.
*/
ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
free_id,
BUF_MGR_IO);
break;
}
}
}
}
if(ps_dec->u1_flushfrm && ps_dec->u1_init_dec_flag)
{
ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer,
&(ps_dec->s_disp_op));
if(0 == ps_dec->s_disp_op.u4_error_code)
{
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht;
ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op),
ps_dec->u4_fmt_conv_cur_row,
ps_dec->u4_fmt_conv_num_rows);
ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows;
ps_dec->u4_output_present = 1;
}
ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op));
ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width;
ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height;
ps_dec_op->u4_new_seq = 0;
ps_dec_op->u4_output_present = ps_dec->u4_output_present;
ps_dec_op->u4_progressive_frame_flag =
ps_dec->s_disp_op.u4_progressive_frame_flag;
ps_dec_op->e_output_format =
ps_dec->s_disp_op.e_output_format;
ps_dec_op->s_disp_frm_buf = ps_dec->s_disp_op.s_disp_frm_buf;
ps_dec_op->e4_fld_type = ps_dec->s_disp_op.e4_fld_type;
ps_dec_op->u4_ts = ps_dec->s_disp_op.u4_ts;
ps_dec_op->u4_disp_buf_id = ps_dec->s_disp_op.u4_disp_buf_id;
/*In the case of flush ,since no frame is decoded set pic type as invalid*/
ps_dec_op->u4_is_ref_flag = -1;
ps_dec_op->e_pic_type = IV_NA_FRAME;
ps_dec_op->u4_frame_decoded_flag = 0;
if(0 == ps_dec->s_disp_op.u4_error_code)
{
return (IV_SUCCESS);
}
else
return (IV_FAIL);
}
if(ps_dec->u1_res_changed == 1)
{
/*if resolution has changed and all buffers have been flushed, reset decoder*/
ih264d_init_decoder(ps_dec);
}
ps_dec->u4_prev_nal_skipped = 0;
ps_dec->u2_cur_mb_addr = 0;
ps_dec->u2_total_mbs_coded = 0;
ps_dec->u2_cur_slice_num = 0;
ps_dec->cur_dec_mb_num = 0;
ps_dec->cur_recon_mb_num = 0;
ps_dec->u4_first_slice_in_pic = 2;
ps_dec->u1_first_pb_nal_in_pic = 1;
ps_dec->u1_slice_header_done = 0;
ps_dec->u1_dangling_field = 0;
ps_dec->u4_dec_thread_created = 0;
ps_dec->u4_bs_deblk_thread_created = 0;
ps_dec->u4_cur_bs_mb_num = 0;
ps_dec->u4_start_recon_deblk = 0;
DEBUG_THREADS_PRINTF(" Starting process call\n");
ps_dec->u4_pic_buf_got = 0;
do
{
WORD32 buf_size;
pu1_buf = (UWORD8*)ps_dec_ip->pv_stream_buffer
+ ps_dec_op->u4_num_bytes_consumed;
u4_max_ofst = ps_dec_ip->u4_num_Bytes
- ps_dec_op->u4_num_bytes_consumed;
/* If dynamic bitstream buffer is not allocated and
* header decode is done, then allocate dynamic bitstream buffer
*/
if((NULL == ps_dec->pu1_bits_buf_dynamic) &&
(ps_dec->i4_header_decoded & 1))
{
WORD32 size;
void *pv_buf;
void *pv_mem_ctxt = ps_dec->pv_mem_ctxt;
size = MAX(256000, ps_dec->u2_pic_wd * ps_dec->u2_pic_ht * 3 / 2);
pv_buf = ps_dec->pf_aligned_alloc(pv_mem_ctxt, 128, size);
RETURN_IF((NULL == pv_buf), IV_FAIL);
ps_dec->pu1_bits_buf_dynamic = pv_buf;
ps_dec->u4_dynamic_bits_buf_size = size;
}
if(ps_dec->pu1_bits_buf_dynamic)
{
pu1_bitstrm_buf = ps_dec->pu1_bits_buf_dynamic;
buf_size = ps_dec->u4_dynamic_bits_buf_size;
}
else
{
pu1_bitstrm_buf = ps_dec->pu1_bits_buf_static;
buf_size = ps_dec->u4_static_bits_buf_size;
}
u4_next_is_aud = 0;
buflen = ih264d_find_start_code(pu1_buf, 0, u4_max_ofst,
&u4_length_of_start_code,
&u4_next_is_aud);
if(buflen == -1)
buflen = 0;
/* Ignore bytes beyond the allocated size of intermediate buffer */
buflen = MIN(buflen, buf_size);
bytes_consumed = buflen + u4_length_of_start_code;
ps_dec_op->u4_num_bytes_consumed += bytes_consumed;
{
UWORD8 u1_firstbyte, u1_nal_ref_idc;
if(ps_dec->i4_app_skip_mode == IVD_SKIP_B)
{
u1_firstbyte = *(pu1_buf + u4_length_of_start_code);
u1_nal_ref_idc = (UWORD8)(NAL_REF_IDC(u1_firstbyte));
if(u1_nal_ref_idc == 0)
{
/*skip non reference frames*/
cur_slice_is_nonref = 1;
continue;
}
else
{
if(1 == cur_slice_is_nonref)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_num_bytes_consumed -=
bytes_consumed;
ps_dec_op->e_pic_type = IV_B_FRAME;
ps_dec_op->u4_error_code =
IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1
<< IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size =
sizeof(ivd_video_decode_op_t);
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
}
}
}
if(buflen)
{
memcpy(pu1_bitstrm_buf, pu1_buf + u4_length_of_start_code,
buflen);
/* Decoder may read extra 8 bytes near end of the frame */
if((buflen + 8) < buf_size)
{
memset(pu1_bitstrm_buf + buflen, 0, 8);
}
u4_first_start_code_found = 1;
}
else
{
/*start code not found*/
if(u4_first_start_code_found == 0)
{
/*no start codes found in current process call*/
ps_dec->i4_error_code = ERROR_START_CODE_NOT_FOUND;
ps_dec_op->u4_error_code |= 1 << IVD_INSUFFICIENTDATA;
if(ps_dec->u4_pic_buf_got == 0)
{
ih264d_fill_output_struct_from_context(ps_dec,
ps_dec_op);
ps_dec_op->u4_error_code = ps_dec->i4_error_code;
ps_dec_op->u4_frame_decoded_flag = 0;
return (IV_FAIL);
}
else
{
ps_dec->u1_pic_decode_done = 1;
continue;
}
}
else
{
/* a start code has already been found earlier in the same process call*/
frame_data_left = 0;
continue;
}
}
ps_dec->u4_return_to_app = 0;
ret = ih264d_parse_nal_unit(dec_hdl, ps_dec_op,
pu1_bitstrm_buf, buflen);
if(ret != OK)
{
UWORD32 error = ih264d_map_error(ret);
ps_dec_op->u4_error_code = error | ret;
api_ret_value = IV_FAIL;
if((ret == IVD_RES_CHANGED)
|| (ret == IVD_MEM_ALLOC_FAILED)
|| (ret == ERROR_UNAVAIL_PICBUF_T)
|| (ret == ERROR_UNAVAIL_MVBUF_T)
|| (ret == ERROR_INV_SPS_PPS_T))
{
ps_dec->u4_slice_start_code_found = 0;
break;
}
if((ret == ERROR_INCOMPLETE_FRAME) || (ret == ERROR_DANGLING_FIELD_IN_PIC))
{
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
api_ret_value = IV_FAIL;
break;
}
if(ret == ERROR_IN_LAST_SLICE_OF_PIC)
{
api_ret_value = IV_FAIL;
break;
}
}
if(ps_dec->u4_return_to_app)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
header_data_left = ((ps_dec->i4_decode_header == 1)
&& (ps_dec->i4_header_decoded != 3)
&& (ps_dec_op->u4_num_bytes_consumed
< ps_dec_ip->u4_num_Bytes));
frame_data_left = (((ps_dec->i4_decode_header == 0)
&& ((ps_dec->u1_pic_decode_done == 0)
|| (u4_next_is_aud == 1)))
&& (ps_dec_op->u4_num_bytes_consumed
< ps_dec_ip->u4_num_Bytes));
}
while(( header_data_left == 1)||(frame_data_left == 1));
if((ps_dec->u4_slice_start_code_found == 1)
&& (ret != IVD_MEM_ALLOC_FAILED)
&& ps_dec->u2_total_mbs_coded < ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
{
WORD32 num_mb_skipped;
WORD32 prev_slice_err;
pocstruct_t temp_poc;
WORD32 ret1;
num_mb_skipped = (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
- ps_dec->u2_total_mbs_coded;
if(ps_dec->u4_first_slice_in_pic && (ps_dec->u4_pic_buf_got == 0))
prev_slice_err = 1;
else
prev_slice_err = 2;
ret1 = ih264d_mark_err_slice_skip(ps_dec, num_mb_skipped, ps_dec->u1_nal_unit_type == IDR_SLICE_NAL, ps_dec->ps_cur_slice->u2_frame_num,
&temp_poc, prev_slice_err);
if((ret1 == ERROR_UNAVAIL_PICBUF_T) || (ret1 == ERROR_UNAVAIL_MVBUF_T))
{
return IV_FAIL;
}
}
if((ret == IVD_RES_CHANGED)
|| (ret == IVD_MEM_ALLOC_FAILED)
|| (ret == ERROR_UNAVAIL_PICBUF_T)
|| (ret == ERROR_UNAVAIL_MVBUF_T)
|| (ret == ERROR_INV_SPS_PPS_T))
{
/* signal the decode thread */
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet */
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
/* dont consume bitstream for change in resolution case */
if(ret == IVD_RES_CHANGED)
{
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
}
return IV_FAIL;
}
if(ps_dec->u1_separate_parse)
{
/* If Format conversion is not complete,
complete it here */
if(ps_dec->u4_num_cores == 2)
{
/*do deblocking of all mbs*/
if((ps_dec->u4_nmb_deblk == 0) &&(ps_dec->u4_start_recon_deblk == 1) && (ps_dec->ps_cur_sps->u1_mb_aff_flag == 0))
{
UWORD32 u4_num_mbs,u4_max_addr;
tfr_ctxt_t s_tfr_ctxt;
tfr_ctxt_t *ps_tfr_cxt = &s_tfr_ctxt;
pad_mgr_t *ps_pad_mgr = &ps_dec->s_pad_mgr;
/*BS is done for all mbs while parsing*/
u4_max_addr = (ps_dec->u2_frm_wd_in_mbs * ps_dec->u2_frm_ht_in_mbs) - 1;
ps_dec->u4_cur_bs_mb_num = u4_max_addr + 1;
ih264d_init_deblk_tfr_ctxt(ps_dec, ps_pad_mgr, ps_tfr_cxt,
ps_dec->u2_frm_wd_in_mbs, 0);
u4_num_mbs = u4_max_addr
- ps_dec->u4_cur_deblk_mb_num + 1;
DEBUG_PERF_PRINTF("mbs left for deblocking= %d \n",u4_num_mbs);
if(u4_num_mbs != 0)
ih264d_check_mb_map_deblk(ps_dec, u4_num_mbs,
ps_tfr_cxt,1);
ps_dec->u4_start_recon_deblk = 0;
}
}
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
}
DATA_SYNC();
if((ps_dec_op->u4_error_code & 0xff)
!= ERROR_DYNAMIC_RESOLUTION_NOT_SUPPORTED)
{
ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width;
ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height;
}
if(ps_dec->i4_header_decoded != 3)
{
ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA);
}
if(ps_dec->i4_decode_header == 1 && ps_dec->i4_header_decoded != 3)
{
ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA);
}
if(ps_dec->u4_prev_nal_skipped)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
if((ps_dec->u4_slice_start_code_found == 1)
&& (ERROR_DANGLING_FIELD_IN_PIC != i4_err_status))
{
/*
* For field pictures, set the bottom and top picture decoded u4_flag correctly.
*/
if(ps_dec->ps_cur_slice->u1_field_pic_flag)
{
if(1 == ps_dec->ps_cur_slice->u1_bottom_field_flag)
{
ps_dec->u1_top_bottom_decoded |= BOT_FIELD_ONLY;
}
else
{
ps_dec->u1_top_bottom_decoded |= TOP_FIELD_ONLY;
}
}
/* if new frame in not found (if we are still getting slices from previous frame)
* ih264d_deblock_display is not called. Such frames will not be added to reference /display
*/
if((ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC) == 0)
{
/* Calling Function to deblock Picture and Display */
ret = ih264d_deblock_display(ps_dec);
if(ret != 0)
{
return IV_FAIL;
}
}
/*set to complete ,as we dont support partial frame decode*/
if(ps_dec->i4_header_decoded == 3)
{
ps_dec->u2_total_mbs_coded = ps_dec->ps_cur_sps->u2_max_mb_addr + 1;
}
/*Update the i4_frametype at the end of picture*/
if(ps_dec->ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL)
{
ps_dec->i4_frametype = IV_IDR_FRAME;
}
else if(ps_dec->i4_pic_type == B_SLICE)
{
ps_dec->i4_frametype = IV_B_FRAME;
}
else if(ps_dec->i4_pic_type == P_SLICE)
{
ps_dec->i4_frametype = IV_P_FRAME;
}
else if(ps_dec->i4_pic_type == I_SLICE)
{
ps_dec->i4_frametype = IV_I_FRAME;
}
else
{
H264_DEC_DEBUG_PRINT("Shouldn't come here\n");
}
ps_dec->i4_content_type = ps_dec->ps_cur_slice->u1_field_pic_flag;
ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded + 2;
ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded
- ps_dec->ps_cur_slice->u1_field_pic_flag;
}
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
{
/* In case the decoder is configured to run in low delay mode,
* then get display buffer and then format convert.
* Note in this mode, format conversion does not run paralelly in a thread and adds to the codec cycles
*/
if((IVD_DECODE_FRAME_OUT == ps_dec->e_frm_out_mode)
&& ps_dec->u1_init_dec_flag)
{
ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer,
&(ps_dec->s_disp_op));
if(0 == ps_dec->s_disp_op.u4_error_code)
{
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_output_present = 1;
}
}
ih264d_fill_output_struct_from_context(ps_dec, ps_dec_op);
/* If Format conversion is not complete,
complete it here */
if(ps_dec->u4_output_present &&
(ps_dec->u4_fmt_conv_cur_row < ps_dec->s_disp_frame_info.u4_y_ht))
{
ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht
- ps_dec->u4_fmt_conv_cur_row;
ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op),
ps_dec->u4_fmt_conv_cur_row,
ps_dec->u4_fmt_conv_num_rows);
ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows;
}
ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op));
}
if(ps_dec->i4_decode_header == 1 && (ps_dec->i4_header_decoded & 1) == 1)
{
ps_dec_op->u4_progressive_frame_flag = 1;
if((NULL != ps_dec->ps_cur_sps) && (1 == (ps_dec->ps_cur_sps->u1_is_valid)))
{
if((0 == ps_dec->ps_sps->u1_frame_mbs_only_flag)
&& (0 == ps_dec->ps_sps->u1_mb_aff_flag))
ps_dec_op->u4_progressive_frame_flag = 0;
}
}
/*Data memory barrier instruction,so that yuv write by the library is complete*/
DATA_SYNC();
H264_DEC_DEBUG_PRINT("The num bytes consumed: %d\n",
ps_dec_op->u4_num_bytes_consumed);
return api_ret_value;
}
Commit Message: Fixed error concealment when no MBs are decoded in the current pic
Bug: 29493002
Change-Id: I3fae547ddb0616b4e6579580985232bd3d65881e
CWE ID: CWE-284 | WORD32 ih264d_video_decode(iv_obj_t *dec_hdl, void *pv_api_ip, void *pv_api_op)
{
/* ! */
dec_struct_t * ps_dec = (dec_struct_t *)(dec_hdl->pv_codec_handle);
WORD32 i4_err_status = 0;
UWORD8 *pu1_buf = NULL;
WORD32 buflen;
UWORD32 u4_max_ofst, u4_length_of_start_code = 0;
UWORD32 bytes_consumed = 0;
UWORD32 cur_slice_is_nonref = 0;
UWORD32 u4_next_is_aud;
UWORD32 u4_first_start_code_found = 0;
WORD32 ret = 0,api_ret_value = IV_SUCCESS;
WORD32 header_data_left = 0,frame_data_left = 0;
UWORD8 *pu1_bitstrm_buf;
ivd_video_decode_ip_t *ps_dec_ip;
ivd_video_decode_op_t *ps_dec_op;
ithread_set_name((void*)"Parse_thread");
ps_dec_ip = (ivd_video_decode_ip_t *)pv_api_ip;
ps_dec_op = (ivd_video_decode_op_t *)pv_api_op;
{
UWORD32 u4_size;
u4_size = ps_dec_op->u4_size;
memset(ps_dec_op, 0, sizeof(ivd_video_decode_op_t));
ps_dec_op->u4_size = u4_size;
}
ps_dec->pv_dec_out = ps_dec_op;
if(ps_dec->init_done != 1)
{
return IV_FAIL;
}
/*Data memory barries instruction,so that bitstream write by the application is complete*/
DATA_SYNC();
if(0 == ps_dec->u1_flushfrm)
{
if(ps_dec_ip->pv_stream_buffer == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_FRM_BS_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->u4_num_Bytes <= 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_NUMBYTES_INV;
return IV_FAIL;
}
}
ps_dec->u1_pic_decode_done = 0;
ps_dec_op->u4_num_bytes_consumed = 0;
ps_dec->ps_out_buffer = NULL;
if(ps_dec_ip->u4_size
>= offsetof(ivd_video_decode_ip_t, s_out_buffer))
ps_dec->ps_out_buffer = &ps_dec_ip->s_out_buffer;
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_output_present = 0;
ps_dec->s_disp_op.u4_error_code = 1;
ps_dec->u4_fmt_conv_num_rows = FMT_CONV_NUM_ROWS;
if(0 == ps_dec->u4_share_disp_buf
&& ps_dec->i4_decode_header == 0)
{
UWORD32 i;
if(ps_dec->ps_out_buffer->u4_num_bufs == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUFS;
return IV_FAIL;
}
for(i = 0; i < ps_dec->ps_out_buffer->u4_num_bufs; i++)
{
if(ps_dec->ps_out_buffer->pu1_bufs[i] == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_OP_BUF_NULL;
return IV_FAIL;
}
if(ps_dec->ps_out_buffer->u4_min_out_buf_size[i] == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |=
IVD_DISP_FRM_ZERO_OP_BUF_SIZE;
return IV_FAIL;
}
}
}
if(ps_dec->u4_total_frames_decoded >= NUM_FRAMES_LIMIT)
{
ps_dec_op->u4_error_code = ERROR_FRAME_LIMIT_OVER;
return IV_FAIL;
}
/* ! */
ps_dec->u4_ts = ps_dec_ip->u4_ts;
ps_dec_op->u4_error_code = 0;
ps_dec_op->e_pic_type = -1;
ps_dec_op->u4_output_present = 0;
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec->i4_frametype = -1;
ps_dec->i4_content_type = -1;
/*
* For field pictures, set the bottom and top picture decoded u4_flag correctly.
*/
{
if((TOP_FIELD_ONLY | BOT_FIELD_ONLY) == ps_dec->u1_top_bottom_decoded)
{
ps_dec->u1_top_bottom_decoded = 0;
}
}
ps_dec->u4_slice_start_code_found = 0;
/* In case the deocder is not in flush mode(in shared mode),
then decoder has to pick up a buffer to write current frame.
Check if a frame is available in such cases */
if(ps_dec->u1_init_dec_flag == 1 && ps_dec->u4_share_disp_buf == 1
&& ps_dec->u1_flushfrm == 0)
{
UWORD32 i;
WORD32 disp_avail = 0, free_id;
/* Check if at least one buffer is available with the codec */
/* If not then return to application with error */
for(i = 0; i < ps_dec->u1_pic_bufs; i++)
{
if(0 == ps_dec->u4_disp_buf_mapping[i]
|| 1 == ps_dec->u4_disp_buf_to_be_freed[i])
{
disp_avail = 1;
break;
}
}
if(0 == disp_avail)
{
/* If something is queued for display wait for that buffer to be returned */
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
return (IV_FAIL);
}
while(1)
{
pic_buffer_t *ps_pic_buf;
ps_pic_buf = (pic_buffer_t *)ih264_buf_mgr_get_next_free(
(buf_mgr_t *)ps_dec->pv_pic_buf_mgr, &free_id);
if(ps_pic_buf == NULL)
{
UWORD32 i, display_queued = 0;
/* check if any buffer was given for display which is not returned yet */
for(i = 0; i < (MAX_DISP_BUFS_NEW); i++)
{
if(0 != ps_dec->u4_disp_buf_mapping[i])
{
display_queued = 1;
break;
}
}
/* If some buffer is queued for display, then codec has to singal an error and wait
for that buffer to be returned.
If nothing is queued for display then codec has ownership of all display buffers
and it can reuse any of the existing buffers and continue decoding */
if(1 == display_queued)
{
/* If something is queued for display wait for that buffer to be returned */
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1
<< IVD_UNSUPPORTEDPARAM);
return (IV_FAIL);
}
}
else
{
/* If the buffer is with display, then mark it as in use and then look for a buffer again */
if(1 == ps_dec->u4_disp_buf_mapping[free_id])
{
ih264_buf_mgr_set_status(
(buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
free_id,
BUF_MGR_IO);
}
else
{
/**
* Found a free buffer for present call. Release it now.
* Will be again obtained later.
*/
ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
free_id,
BUF_MGR_IO);
break;
}
}
}
}
if(ps_dec->u1_flushfrm && ps_dec->u1_init_dec_flag)
{
ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer,
&(ps_dec->s_disp_op));
if(0 == ps_dec->s_disp_op.u4_error_code)
{
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht;
ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op),
ps_dec->u4_fmt_conv_cur_row,
ps_dec->u4_fmt_conv_num_rows);
ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows;
ps_dec->u4_output_present = 1;
}
ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op));
ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width;
ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height;
ps_dec_op->u4_new_seq = 0;
ps_dec_op->u4_output_present = ps_dec->u4_output_present;
ps_dec_op->u4_progressive_frame_flag =
ps_dec->s_disp_op.u4_progressive_frame_flag;
ps_dec_op->e_output_format =
ps_dec->s_disp_op.e_output_format;
ps_dec_op->s_disp_frm_buf = ps_dec->s_disp_op.s_disp_frm_buf;
ps_dec_op->e4_fld_type = ps_dec->s_disp_op.e4_fld_type;
ps_dec_op->u4_ts = ps_dec->s_disp_op.u4_ts;
ps_dec_op->u4_disp_buf_id = ps_dec->s_disp_op.u4_disp_buf_id;
/*In the case of flush ,since no frame is decoded set pic type as invalid*/
ps_dec_op->u4_is_ref_flag = -1;
ps_dec_op->e_pic_type = IV_NA_FRAME;
ps_dec_op->u4_frame_decoded_flag = 0;
if(0 == ps_dec->s_disp_op.u4_error_code)
{
return (IV_SUCCESS);
}
else
return (IV_FAIL);
}
if(ps_dec->u1_res_changed == 1)
{
/*if resolution has changed and all buffers have been flushed, reset decoder*/
ih264d_init_decoder(ps_dec);
}
ps_dec->u4_prev_nal_skipped = 0;
ps_dec->u2_cur_mb_addr = 0;
ps_dec->u2_total_mbs_coded = 0;
ps_dec->u2_cur_slice_num = 0;
ps_dec->cur_dec_mb_num = 0;
ps_dec->cur_recon_mb_num = 0;
ps_dec->u4_first_slice_in_pic = 2;
ps_dec->u1_first_pb_nal_in_pic = 1;
ps_dec->u1_slice_header_done = 0;
ps_dec->u1_dangling_field = 0;
ps_dec->u4_dec_thread_created = 0;
ps_dec->u4_bs_deblk_thread_created = 0;
ps_dec->u4_cur_bs_mb_num = 0;
ps_dec->u4_start_recon_deblk = 0;
DEBUG_THREADS_PRINTF(" Starting process call\n");
ps_dec->u4_pic_buf_got = 0;
do
{
WORD32 buf_size;
pu1_buf = (UWORD8*)ps_dec_ip->pv_stream_buffer
+ ps_dec_op->u4_num_bytes_consumed;
u4_max_ofst = ps_dec_ip->u4_num_Bytes
- ps_dec_op->u4_num_bytes_consumed;
/* If dynamic bitstream buffer is not allocated and
* header decode is done, then allocate dynamic bitstream buffer
*/
if((NULL == ps_dec->pu1_bits_buf_dynamic) &&
(ps_dec->i4_header_decoded & 1))
{
WORD32 size;
void *pv_buf;
void *pv_mem_ctxt = ps_dec->pv_mem_ctxt;
size = MAX(256000, ps_dec->u2_pic_wd * ps_dec->u2_pic_ht * 3 / 2);
pv_buf = ps_dec->pf_aligned_alloc(pv_mem_ctxt, 128, size);
RETURN_IF((NULL == pv_buf), IV_FAIL);
ps_dec->pu1_bits_buf_dynamic = pv_buf;
ps_dec->u4_dynamic_bits_buf_size = size;
}
if(ps_dec->pu1_bits_buf_dynamic)
{
pu1_bitstrm_buf = ps_dec->pu1_bits_buf_dynamic;
buf_size = ps_dec->u4_dynamic_bits_buf_size;
}
else
{
pu1_bitstrm_buf = ps_dec->pu1_bits_buf_static;
buf_size = ps_dec->u4_static_bits_buf_size;
}
u4_next_is_aud = 0;
buflen = ih264d_find_start_code(pu1_buf, 0, u4_max_ofst,
&u4_length_of_start_code,
&u4_next_is_aud);
if(buflen == -1)
buflen = 0;
/* Ignore bytes beyond the allocated size of intermediate buffer */
buflen = MIN(buflen, buf_size);
bytes_consumed = buflen + u4_length_of_start_code;
ps_dec_op->u4_num_bytes_consumed += bytes_consumed;
{
UWORD8 u1_firstbyte, u1_nal_ref_idc;
if(ps_dec->i4_app_skip_mode == IVD_SKIP_B)
{
u1_firstbyte = *(pu1_buf + u4_length_of_start_code);
u1_nal_ref_idc = (UWORD8)(NAL_REF_IDC(u1_firstbyte));
if(u1_nal_ref_idc == 0)
{
/*skip non reference frames*/
cur_slice_is_nonref = 1;
continue;
}
else
{
if(1 == cur_slice_is_nonref)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_num_bytes_consumed -=
bytes_consumed;
ps_dec_op->e_pic_type = IV_B_FRAME;
ps_dec_op->u4_error_code =
IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1
<< IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size =
sizeof(ivd_video_decode_op_t);
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
}
}
}
if(buflen)
{
memcpy(pu1_bitstrm_buf, pu1_buf + u4_length_of_start_code,
buflen);
/* Decoder may read extra 8 bytes near end of the frame */
if((buflen + 8) < buf_size)
{
memset(pu1_bitstrm_buf + buflen, 0, 8);
}
u4_first_start_code_found = 1;
}
else
{
/*start code not found*/
if(u4_first_start_code_found == 0)
{
/*no start codes found in current process call*/
ps_dec->i4_error_code = ERROR_START_CODE_NOT_FOUND;
ps_dec_op->u4_error_code |= 1 << IVD_INSUFFICIENTDATA;
if(ps_dec->u4_pic_buf_got == 0)
{
ih264d_fill_output_struct_from_context(ps_dec,
ps_dec_op);
ps_dec_op->u4_error_code = ps_dec->i4_error_code;
ps_dec_op->u4_frame_decoded_flag = 0;
return (IV_FAIL);
}
else
{
ps_dec->u1_pic_decode_done = 1;
continue;
}
}
else
{
/* a start code has already been found earlier in the same process call*/
frame_data_left = 0;
continue;
}
}
ps_dec->u4_return_to_app = 0;
ret = ih264d_parse_nal_unit(dec_hdl, ps_dec_op,
pu1_bitstrm_buf, buflen);
if(ret != OK)
{
UWORD32 error = ih264d_map_error(ret);
ps_dec_op->u4_error_code = error | ret;
api_ret_value = IV_FAIL;
if((ret == IVD_RES_CHANGED)
|| (ret == IVD_MEM_ALLOC_FAILED)
|| (ret == ERROR_UNAVAIL_PICBUF_T)
|| (ret == ERROR_UNAVAIL_MVBUF_T)
|| (ret == ERROR_INV_SPS_PPS_T))
{
ps_dec->u4_slice_start_code_found = 0;
break;
}
if((ret == ERROR_INCOMPLETE_FRAME) || (ret == ERROR_DANGLING_FIELD_IN_PIC))
{
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
api_ret_value = IV_FAIL;
break;
}
if(ret == ERROR_IN_LAST_SLICE_OF_PIC)
{
api_ret_value = IV_FAIL;
break;
}
}
if(ps_dec->u4_return_to_app)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
header_data_left = ((ps_dec->i4_decode_header == 1)
&& (ps_dec->i4_header_decoded != 3)
&& (ps_dec_op->u4_num_bytes_consumed
< ps_dec_ip->u4_num_Bytes));
frame_data_left = (((ps_dec->i4_decode_header == 0)
&& ((ps_dec->u1_pic_decode_done == 0)
|| (u4_next_is_aud == 1)))
&& (ps_dec_op->u4_num_bytes_consumed
< ps_dec_ip->u4_num_Bytes));
}
while(( header_data_left == 1)||(frame_data_left == 1));
if((ps_dec->u4_slice_start_code_found == 1)
&& (ret != IVD_MEM_ALLOC_FAILED)
&& ps_dec->u2_total_mbs_coded < ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
{
WORD32 num_mb_skipped;
WORD32 prev_slice_err;
pocstruct_t temp_poc;
WORD32 ret1;
num_mb_skipped = (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
- ps_dec->u2_total_mbs_coded;
if(ps_dec->u4_first_slice_in_pic && (ps_dec->u4_pic_buf_got == 0))
prev_slice_err = 1;
else
prev_slice_err = 2;
if(ps_dec->u4_first_slice_in_pic && (ps_dec->u2_total_mbs_coded == 0))
prev_slice_err = 1;
ret1 = ih264d_mark_err_slice_skip(ps_dec, num_mb_skipped, ps_dec->u1_nal_unit_type == IDR_SLICE_NAL, ps_dec->ps_cur_slice->u2_frame_num,
&temp_poc, prev_slice_err);
if((ret1 == ERROR_UNAVAIL_PICBUF_T) || (ret1 == ERROR_UNAVAIL_MVBUF_T))
{
return IV_FAIL;
}
}
if((ret == IVD_RES_CHANGED)
|| (ret == IVD_MEM_ALLOC_FAILED)
|| (ret == ERROR_UNAVAIL_PICBUF_T)
|| (ret == ERROR_UNAVAIL_MVBUF_T)
|| (ret == ERROR_INV_SPS_PPS_T))
{
/* signal the decode thread */
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet */
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
/* dont consume bitstream for change in resolution case */
if(ret == IVD_RES_CHANGED)
{
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
}
return IV_FAIL;
}
if(ps_dec->u1_separate_parse)
{
/* If Format conversion is not complete,
complete it here */
if(ps_dec->u4_num_cores == 2)
{
/*do deblocking of all mbs*/
if((ps_dec->u4_nmb_deblk == 0) &&(ps_dec->u4_start_recon_deblk == 1) && (ps_dec->ps_cur_sps->u1_mb_aff_flag == 0))
{
UWORD32 u4_num_mbs,u4_max_addr;
tfr_ctxt_t s_tfr_ctxt;
tfr_ctxt_t *ps_tfr_cxt = &s_tfr_ctxt;
pad_mgr_t *ps_pad_mgr = &ps_dec->s_pad_mgr;
/*BS is done for all mbs while parsing*/
u4_max_addr = (ps_dec->u2_frm_wd_in_mbs * ps_dec->u2_frm_ht_in_mbs) - 1;
ps_dec->u4_cur_bs_mb_num = u4_max_addr + 1;
ih264d_init_deblk_tfr_ctxt(ps_dec, ps_pad_mgr, ps_tfr_cxt,
ps_dec->u2_frm_wd_in_mbs, 0);
u4_num_mbs = u4_max_addr
- ps_dec->u4_cur_deblk_mb_num + 1;
DEBUG_PERF_PRINTF("mbs left for deblocking= %d \n",u4_num_mbs);
if(u4_num_mbs != 0)
ih264d_check_mb_map_deblk(ps_dec, u4_num_mbs,
ps_tfr_cxt,1);
ps_dec->u4_start_recon_deblk = 0;
}
}
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
}
DATA_SYNC();
if((ps_dec_op->u4_error_code & 0xff)
!= ERROR_DYNAMIC_RESOLUTION_NOT_SUPPORTED)
{
ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width;
ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height;
}
if(ps_dec->i4_header_decoded != 3)
{
ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA);
}
if(ps_dec->i4_decode_header == 1 && ps_dec->i4_header_decoded != 3)
{
ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA);
}
if(ps_dec->u4_prev_nal_skipped)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
if((ps_dec->u4_slice_start_code_found == 1)
&& (ERROR_DANGLING_FIELD_IN_PIC != i4_err_status))
{
/*
* For field pictures, set the bottom and top picture decoded u4_flag correctly.
*/
if(ps_dec->ps_cur_slice->u1_field_pic_flag)
{
if(1 == ps_dec->ps_cur_slice->u1_bottom_field_flag)
{
ps_dec->u1_top_bottom_decoded |= BOT_FIELD_ONLY;
}
else
{
ps_dec->u1_top_bottom_decoded |= TOP_FIELD_ONLY;
}
}
/* if new frame in not found (if we are still getting slices from previous frame)
* ih264d_deblock_display is not called. Such frames will not be added to reference /display
*/
if((ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC) == 0)
{
/* Calling Function to deblock Picture and Display */
ret = ih264d_deblock_display(ps_dec);
if(ret != 0)
{
return IV_FAIL;
}
}
/*set to complete ,as we dont support partial frame decode*/
if(ps_dec->i4_header_decoded == 3)
{
ps_dec->u2_total_mbs_coded = ps_dec->ps_cur_sps->u2_max_mb_addr + 1;
}
/*Update the i4_frametype at the end of picture*/
if(ps_dec->ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL)
{
ps_dec->i4_frametype = IV_IDR_FRAME;
}
else if(ps_dec->i4_pic_type == B_SLICE)
{
ps_dec->i4_frametype = IV_B_FRAME;
}
else if(ps_dec->i4_pic_type == P_SLICE)
{
ps_dec->i4_frametype = IV_P_FRAME;
}
else if(ps_dec->i4_pic_type == I_SLICE)
{
ps_dec->i4_frametype = IV_I_FRAME;
}
else
{
H264_DEC_DEBUG_PRINT("Shouldn't come here\n");
}
ps_dec->i4_content_type = ps_dec->ps_cur_slice->u1_field_pic_flag;
ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded + 2;
ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded
- ps_dec->ps_cur_slice->u1_field_pic_flag;
}
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
{
/* In case the decoder is configured to run in low delay mode,
* then get display buffer and then format convert.
* Note in this mode, format conversion does not run paralelly in a thread and adds to the codec cycles
*/
if((IVD_DECODE_FRAME_OUT == ps_dec->e_frm_out_mode)
&& ps_dec->u1_init_dec_flag)
{
ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer,
&(ps_dec->s_disp_op));
if(0 == ps_dec->s_disp_op.u4_error_code)
{
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_output_present = 1;
}
}
ih264d_fill_output_struct_from_context(ps_dec, ps_dec_op);
/* If Format conversion is not complete,
complete it here */
if(ps_dec->u4_output_present &&
(ps_dec->u4_fmt_conv_cur_row < ps_dec->s_disp_frame_info.u4_y_ht))
{
ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht
- ps_dec->u4_fmt_conv_cur_row;
ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op),
ps_dec->u4_fmt_conv_cur_row,
ps_dec->u4_fmt_conv_num_rows);
ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows;
}
ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op));
}
if(ps_dec->i4_decode_header == 1 && (ps_dec->i4_header_decoded & 1) == 1)
{
ps_dec_op->u4_progressive_frame_flag = 1;
if((NULL != ps_dec->ps_cur_sps) && (1 == (ps_dec->ps_cur_sps->u1_is_valid)))
{
if((0 == ps_dec->ps_sps->u1_frame_mbs_only_flag)
&& (0 == ps_dec->ps_sps->u1_mb_aff_flag))
ps_dec_op->u4_progressive_frame_flag = 0;
}
}
/*Data memory barrier instruction,so that yuv write by the library is complete*/
DATA_SYNC();
H264_DEC_DEBUG_PRINT("The num bytes consumed: %d\n",
ps_dec_op->u4_num_bytes_consumed);
return api_ret_value;
}
| 173,413 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void DOMWindow::focus(LocalDOMWindow* incumbent_window) {
if (!GetFrame())
return;
Page* page = GetFrame()->GetPage();
if (!page)
return;
DCHECK(incumbent_window);
ExecutionContext* incumbent_execution_context =
incumbent_window->GetExecutionContext();
bool allow_focus = incumbent_execution_context->IsWindowInteractionAllowed();
if (allow_focus) {
incumbent_execution_context->ConsumeWindowInteraction();
} else {
DCHECK(IsMainThread());
allow_focus =
opener() && (opener() != this) &&
(ToDocument(incumbent_execution_context)->domWindow() == opener());
}
if (GetFrame()->IsMainFrame() && allow_focus)
page->GetChromeClient().Focus();
page->GetFocusController().FocusDocumentView(GetFrame(),
true /* notifyEmbedder */);
}
Commit Message: If a page calls |window.focus()|, kick it out of fullscreen.
BUG=776418, 800056
Change-Id: I1880fe600e4814c073f247c43b1c1ac80c8fc017
Reviewed-on: https://chromium-review.googlesource.com/852378
Reviewed-by: Nasko Oskov <[email protected]>
Reviewed-by: Philip Jägenstedt <[email protected]>
Commit-Queue: Avi Drissman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#533790}
CWE ID: | void DOMWindow::focus(LocalDOMWindow* incumbent_window) {
if (!GetFrame())
return;
Page* page = GetFrame()->GetPage();
if (!page)
return;
DCHECK(incumbent_window);
ExecutionContext* incumbent_execution_context =
incumbent_window->GetExecutionContext();
bool allow_focus = incumbent_execution_context->IsWindowInteractionAllowed();
if (allow_focus) {
incumbent_execution_context->ConsumeWindowInteraction();
} else {
DCHECK(IsMainThread());
allow_focus =
opener() && (opener() != this) &&
(ToDocument(incumbent_execution_context)->domWindow() == opener());
}
if (GetFrame()->IsMainFrame() && allow_focus)
page->GetChromeClient().Focus(incumbent_window->GetFrame());
page->GetFocusController().FocusDocumentView(GetFrame(),
true /* notifyEmbedder */);
}
| 172,722 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: ConvolveFunctions(convolve_fn_t h8, convolve_fn_t h8_avg,
convolve_fn_t v8, convolve_fn_t v8_avg,
convolve_fn_t hv8, convolve_fn_t hv8_avg)
: h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg), v8_avg_(v8_avg),
hv8_avg_(hv8_avg) {}
Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478
DO NOT MERGE - libvpx: Pull from upstream
Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06
BUG=23452792
Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
CWE ID: CWE-119 | ConvolveFunctions(convolve_fn_t h8, convolve_fn_t h8_avg,
| 174,503 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: png_set_IHDR(png_structp png_ptr, png_infop info_ptr,
png_uint_32 width, png_uint_32 height, int bit_depth,
int color_type, int interlace_type, int compression_type,
int filter_type)
{
png_debug1(1, "in %s storage function", "IHDR");
if (png_ptr == NULL || info_ptr == NULL)
return;
info_ptr->width = width;
info_ptr->height = height;
info_ptr->bit_depth = (png_byte)bit_depth;
info_ptr->color_type = (png_byte)color_type;
info_ptr->compression_type = (png_byte)compression_type;
info_ptr->filter_type = (png_byte)filter_type;
info_ptr->interlace_type = (png_byte)interlace_type;
png_check_IHDR (png_ptr, info_ptr->width, info_ptr->height,
info_ptr->bit_depth, info_ptr->color_type, info_ptr->interlace_type,
info_ptr->compression_type, info_ptr->filter_type);
if (info_ptr->color_type == PNG_COLOR_TYPE_PALETTE)
info_ptr->channels = 1;
else if (info_ptr->color_type & PNG_COLOR_MASK_COLOR)
info_ptr->channels = 3;
else
info_ptr->channels = 1;
if (info_ptr->color_type & PNG_COLOR_MASK_ALPHA)
info_ptr->channels++;
info_ptr->pixel_depth = (png_byte)(info_ptr->channels * info_ptr->bit_depth);
/* Check for potential overflow */
if (width > (PNG_UINT_32_MAX
>> 3) /* 8-byte RGBA pixels */
- 64 /* bigrowbuf hack */
- 1 /* filter byte */
- 7*8 /* rounding of width to multiple of 8 pixels */
- 8) /* extra max_pixel_depth pad */
info_ptr->rowbytes = (png_size_t)0;
else
info_ptr->rowbytes = PNG_ROWBYTES(info_ptr->pixel_depth, width);
}
Commit Message: third_party/libpng: update to 1.2.54
[email protected]
BUG=560291
Review URL: https://codereview.chromium.org/1467263003
Cr-Commit-Position: refs/heads/master@{#362298}
CWE ID: CWE-119 | png_set_IHDR(png_structp png_ptr, png_infop info_ptr,
png_uint_32 width, png_uint_32 height, int bit_depth,
int color_type, int interlace_type, int compression_type,
int filter_type)
{
png_debug1(1, "in %s storage function", "IHDR");
if (png_ptr == NULL || info_ptr == NULL)
return;
info_ptr->width = width;
info_ptr->height = height;
info_ptr->bit_depth = (png_byte)bit_depth;
info_ptr->color_type = (png_byte)color_type;
info_ptr->compression_type = (png_byte)compression_type;
info_ptr->filter_type = (png_byte)filter_type;
info_ptr->interlace_type = (png_byte)interlace_type;
png_check_IHDR (png_ptr, info_ptr->width, info_ptr->height,
info_ptr->bit_depth, info_ptr->color_type, info_ptr->interlace_type,
info_ptr->compression_type, info_ptr->filter_type);
if (info_ptr->color_type == PNG_COLOR_TYPE_PALETTE)
info_ptr->channels = 1;
else if (info_ptr->color_type & PNG_COLOR_MASK_COLOR)
info_ptr->channels = 3;
else
info_ptr->channels = 1;
if (info_ptr->color_type & PNG_COLOR_MASK_ALPHA)
info_ptr->channels++;
info_ptr->pixel_depth = (png_byte)(info_ptr->channels * info_ptr->bit_depth);
/* Check for potential overflow */
if (width > (PNG_UINT_32_MAX
>> 3) /* 8-byte RGBA pixels */
- 64 /* bigrowbuf hack */
- 1 /* filter byte */
- 7*8 /* rounding of width to multiple of 8 pixels */
- 8) /* extra max_pixel_depth pad */
{
info_ptr->rowbytes = (png_size_t)0;
png_error(png_ptr, "Image width is too large for this architecture");
}
else
info_ptr->rowbytes = PNG_ROWBYTES(info_ptr->pixel_depth, width);
}
| 172,182 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static inline int check_entry_size_and_hooks(struct arpt_entry *e,
struct xt_table_info *newinfo,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
unsigned int valid_hooks)
{
unsigned int h;
int err;
if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
(unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
duprintf("Bad offset %p\n", e);
return -EINVAL;
}
if (e->next_offset
< sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) {
duprintf("checking: element %p size %u\n",
e, e->next_offset);
return -EINVAL;
}
err = check_entry(e);
if (err)
return err;
/* Check hooks & underflows */
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
continue;
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
pr_err("Underflows must be unconditional and "
"use the STANDARD target with "
"ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
}
}
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
return 0;
}
Commit Message: netfilter: x_tables: make sure e->next_offset covers remaining blob size
Otherwise this function may read data beyond the ruleset blob.
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
CWE ID: CWE-119 | static inline int check_entry_size_and_hooks(struct arpt_entry *e,
struct xt_table_info *newinfo,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
unsigned int valid_hooks)
{
unsigned int h;
int err;
if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
(unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p\n", e);
return -EINVAL;
}
if (e->next_offset
< sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) {
duprintf("checking: element %p size %u\n",
e, e->next_offset);
return -EINVAL;
}
err = check_entry(e);
if (err)
return err;
/* Check hooks & underflows */
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
continue;
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
pr_err("Underflows must be unconditional and "
"use the STANDARD target with "
"ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
}
}
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
return 0;
}
| 167,210 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int inet_sk_rebuild_header(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
__be32 daddr;
int err;
/* Route is OK, nothing to do. */
if (rt)
return 0;
/* Reroute. */
daddr = inet->inet_daddr;
if (inet->opt && inet->opt->srr)
daddr = inet->opt->faddr;
rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
sk->sk_protocol, RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if);
if (!IS_ERR(rt)) {
err = 0;
sk_setup_caps(sk, &rt->dst);
} else {
err = PTR_ERR(rt);
/* Routing failed... */
sk->sk_route_caps = 0;
/*
* Other protocols have to map its equivalent state to TCP_SYN_SENT.
* DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
*/
if (!sysctl_ip_dynaddr ||
sk->sk_state != TCP_SYN_SENT ||
(sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
(err = inet_sk_reselect_saddr(sk)) != 0)
sk->sk_err_soft = -err;
}
return err;
}
Commit Message: inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-362 | int inet_sk_rebuild_header(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
__be32 daddr;
struct ip_options_rcu *inet_opt;
int err;
/* Route is OK, nothing to do. */
if (rt)
return 0;
/* Reroute. */
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
daddr = inet->inet_daddr;
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
rcu_read_unlock();
rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
sk->sk_protocol, RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if);
if (!IS_ERR(rt)) {
err = 0;
sk_setup_caps(sk, &rt->dst);
} else {
err = PTR_ERR(rt);
/* Routing failed... */
sk->sk_route_caps = 0;
/*
* Other protocols have to map its equivalent state to TCP_SYN_SENT.
* DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
*/
if (!sysctl_ip_dynaddr ||
sk->sk_state != TCP_SYN_SENT ||
(sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
(err = inet_sk_reselect_saddr(sk)) != 0)
sk->sk_err_soft = -err;
}
return err;
}
| 165,543 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: mget(struct magic_set *ms, const unsigned char *s, struct magic *m,
size_t nbytes, size_t o, unsigned int cont_level, int mode, int text,
int flip, int recursion_level, int *printed_something,
int *need_separator, int *returnval)
{
uint32_t soffset, offset = ms->offset;
uint32_t lhs;
int rv, oneed_separator, in_type;
char *sbuf, *rbuf;
union VALUETYPE *p = &ms->ms_value;
struct mlist ml;
if (recursion_level >= 20) {
file_error(ms, 0, "recursion nesting exceeded");
return -1;
}
if (mcopy(ms, p, m->type, m->flag & INDIR, s, (uint32_t)(offset + o),
(uint32_t)nbytes, m) == -1)
return -1;
if ((ms->flags & MAGIC_DEBUG) != 0) {
fprintf(stderr, "mget(type=%d, flag=%x, offset=%u, o=%"
SIZE_T_FORMAT "u, " "nbytes=%" SIZE_T_FORMAT "u)\n",
m->type, m->flag, offset, o, nbytes);
mdebug(offset, (char *)(void *)p, sizeof(union VALUETYPE));
#ifndef COMPILE_ONLY
file_mdump(m);
#endif
}
if (m->flag & INDIR) {
int off = m->in_offset;
if (m->in_op & FILE_OPINDIRECT) {
const union VALUETYPE *q = CAST(const union VALUETYPE *,
((const void *)(s + offset + off)));
switch (cvt_flip(m->in_type, flip)) {
case FILE_BYTE:
off = q->b;
break;
case FILE_SHORT:
off = q->h;
break;
case FILE_BESHORT:
off = (short)((q->hs[0]<<8)|(q->hs[1]));
break;
case FILE_LESHORT:
off = (short)((q->hs[1]<<8)|(q->hs[0]));
break;
case FILE_LONG:
off = q->l;
break;
case FILE_BELONG:
case FILE_BEID3:
off = (int32_t)((q->hl[0]<<24)|(q->hl[1]<<16)|
(q->hl[2]<<8)|(q->hl[3]));
break;
case FILE_LEID3:
case FILE_LELONG:
off = (int32_t)((q->hl[3]<<24)|(q->hl[2]<<16)|
(q->hl[1]<<8)|(q->hl[0]));
break;
case FILE_MELONG:
off = (int32_t)((q->hl[1]<<24)|(q->hl[0]<<16)|
(q->hl[3]<<8)|(q->hl[2]));
break;
}
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect offs=%u\n", off);
}
switch (in_type = cvt_flip(m->in_type, flip)) {
case FILE_BYTE:
if (OFFSET_OOB(nbytes, offset, 1))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->b & off;
break;
case FILE_OPOR:
offset = p->b | off;
break;
case FILE_OPXOR:
offset = p->b ^ off;
break;
case FILE_OPADD:
offset = p->b + off;
break;
case FILE_OPMINUS:
offset = p->b - off;
break;
case FILE_OPMULTIPLY:
offset = p->b * off;
break;
case FILE_OPDIVIDE:
offset = p->b / off;
break;
case FILE_OPMODULO:
offset = p->b % off;
break;
}
} else
offset = p->b;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_BESHORT:
if (OFFSET_OOB(nbytes, offset, 2))
return 0;
lhs = (p->hs[0] << 8) | p->hs[1];
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = lhs & off;
break;
case FILE_OPOR:
offset = lhs | off;
break;
case FILE_OPXOR:
offset = lhs ^ off;
break;
case FILE_OPADD:
offset = lhs + off;
break;
case FILE_OPMINUS:
offset = lhs - off;
break;
case FILE_OPMULTIPLY:
offset = lhs * off;
break;
case FILE_OPDIVIDE:
offset = lhs / off;
break;
case FILE_OPMODULO:
offset = lhs % off;
break;
}
} else
offset = lhs;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LESHORT:
if (OFFSET_OOB(nbytes, offset, 2))
return 0;
lhs = (p->hs[1] << 8) | p->hs[0];
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = lhs & off;
break;
case FILE_OPOR:
offset = lhs | off;
break;
case FILE_OPXOR:
offset = lhs ^ off;
break;
case FILE_OPADD:
offset = lhs + off;
break;
case FILE_OPMINUS:
offset = lhs - off;
break;
case FILE_OPMULTIPLY:
offset = lhs * off;
break;
case FILE_OPDIVIDE:
offset = lhs / off;
break;
case FILE_OPMODULO:
offset = lhs % off;
break;
}
} else
offset = lhs;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_SHORT:
if (OFFSET_OOB(nbytes, offset, 2))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->h & off;
break;
case FILE_OPOR:
offset = p->h | off;
break;
case FILE_OPXOR:
offset = p->h ^ off;
break;
case FILE_OPADD:
offset = p->h + off;
break;
case FILE_OPMINUS:
offset = p->h - off;
break;
case FILE_OPMULTIPLY:
offset = p->h * off;
break;
case FILE_OPDIVIDE:
offset = p->h / off;
break;
case FILE_OPMODULO:
offset = p->h % off;
break;
}
}
else
offset = p->h;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_BELONG:
case FILE_BEID3:
if (OFFSET_OOB(nbytes, offset, 4))
return 0;
lhs = (p->hl[0] << 24) | (p->hl[1] << 16) |
(p->hl[2] << 8) | p->hl[3];
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = lhs & off;
break;
case FILE_OPOR:
offset = lhs | off;
break;
case FILE_OPXOR:
offset = lhs ^ off;
break;
case FILE_OPADD:
offset = lhs + off;
break;
case FILE_OPMINUS:
offset = lhs - off;
break;
case FILE_OPMULTIPLY:
offset = lhs * off;
break;
case FILE_OPDIVIDE:
offset = lhs / off;
break;
case FILE_OPMODULO:
offset = lhs % off;
break;
}
} else
offset = lhs;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LELONG:
case FILE_LEID3:
if (OFFSET_OOB(nbytes, offset, 4))
return 0;
lhs = (p->hl[3] << 24) | (p->hl[2] << 16) |
(p->hl[1] << 8) | p->hl[0];
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = lhs & off;
break;
case FILE_OPOR:
offset = lhs | off;
break;
case FILE_OPXOR:
offset = lhs ^ off;
break;
case FILE_OPADD:
offset = lhs + off;
break;
case FILE_OPMINUS:
offset = lhs - off;
break;
case FILE_OPMULTIPLY:
offset = lhs * off;
break;
case FILE_OPDIVIDE:
offset = lhs / off;
break;
case FILE_OPMODULO:
offset = lhs % off;
break;
}
} else
offset = lhs;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_MELONG:
if (OFFSET_OOB(nbytes, offset, 4))
return 0;
lhs = (p->hl[1] << 24) | (p->hl[0] << 16) |
(p->hl[3] << 8) | p->hl[2];
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = lhs & off;
break;
case FILE_OPOR:
offset = lhs | off;
break;
case FILE_OPXOR:
offset = lhs ^ off;
break;
case FILE_OPADD:
offset = lhs + off;
break;
case FILE_OPMINUS:
offset = lhs - off;
break;
case FILE_OPMULTIPLY:
offset = lhs * off;
break;
case FILE_OPDIVIDE:
offset = lhs / off;
break;
case FILE_OPMODULO:
offset = lhs % off;
break;
}
} else
offset = lhs;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LONG:
if (OFFSET_OOB(nbytes, offset, 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->l & off;
break;
case FILE_OPOR:
offset = p->l | off;
break;
case FILE_OPXOR:
offset = p->l ^ off;
break;
case FILE_OPADD:
offset = p->l + off;
break;
case FILE_OPMINUS:
offset = p->l - off;
break;
case FILE_OPMULTIPLY:
offset = p->l * off;
break;
case FILE_OPDIVIDE:
offset = p->l / off;
break;
case FILE_OPMODULO:
offset = p->l % off;
break;
}
} else
offset = p->l;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
default:
break;
}
switch (in_type) {
case FILE_LEID3:
case FILE_BEID3:
offset = ((((offset >> 0) & 0x7f) << 0) |
(((offset >> 8) & 0x7f) << 7) |
(((offset >> 16) & 0x7f) << 14) |
(((offset >> 24) & 0x7f) << 21)) + 10;
break;
default:
break;
}
if (m->flag & INDIROFFADD) {
offset += ms->c.li[cont_level-1].off;
if (offset == 0) {
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr,
"indirect *zero* offset\n");
return 0;
}
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect +offs=%u\n", offset);
}
if (mcopy(ms, p, m->type, 0, s, offset, nbytes, m) == -1)
return -1;
ms->offset = offset;
if ((ms->flags & MAGIC_DEBUG) != 0) {
mdebug(offset, (char *)(void *)p,
sizeof(union VALUETYPE));
#ifndef COMPILE_ONLY
file_mdump(m);
#endif
}
}
/* Verify we have enough data to match magic type */
switch (m->type) {
case FILE_BYTE:
if (OFFSET_OOB(nbytes, offset, 1))
return 0;
break;
case FILE_SHORT:
case FILE_BESHORT:
case FILE_LESHORT:
if (OFFSET_OOB(nbytes, offset, 2))
return 0;
break;
case FILE_LONG:
case FILE_BELONG:
case FILE_LELONG:
case FILE_MELONG:
case FILE_DATE:
case FILE_BEDATE:
case FILE_LEDATE:
case FILE_MEDATE:
case FILE_LDATE:
case FILE_BELDATE:
case FILE_LELDATE:
case FILE_MELDATE:
case FILE_FLOAT:
case FILE_BEFLOAT:
case FILE_LEFLOAT:
if (OFFSET_OOB(nbytes, offset, 4))
return 0;
break;
case FILE_DOUBLE:
case FILE_BEDOUBLE:
case FILE_LEDOUBLE:
if (OFFSET_OOB(nbytes, offset, 8))
return 0;
break;
case FILE_STRING:
case FILE_PSTRING:
case FILE_SEARCH:
if (OFFSET_OOB(nbytes, offset, m->vallen))
return 0;
break;
case FILE_REGEX:
if (nbytes < offset)
return 0;
break;
case FILE_INDIRECT:
if (offset == 0)
return 0;
if (nbytes < offset)
return 0;
sbuf = ms->o.buf;
soffset = ms->offset;
ms->o.buf = NULL;
ms->offset = 0;
rv = file_softmagic(ms, s + offset, nbytes - offset,
recursion_level, BINTEST, text);
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect @offs=%u[%d]\n", offset, rv);
rbuf = ms->o.buf;
ms->o.buf = sbuf;
ms->offset = soffset;
if (rv == 1) {
if ((ms->flags & (MAGIC_MIME|MAGIC_APPLE)) == 0 &&
file_printf(ms, F(ms, m, "%u"), offset) == -1) {
free(rbuf);
return -1;
}
if (file_printf(ms, "%s", rbuf) == -1) {
free(rbuf);
return -1;
}
}
free(rbuf);
return rv;
case FILE_USE:
if (nbytes < offset)
return 0;
sbuf = m->value.s;
if (*sbuf == '^') {
sbuf++;
flip = !flip;
}
if (file_magicfind(ms, sbuf, &ml) == -1) {
file_error(ms, 0, "cannot find entry `%s'", sbuf);
return -1;
}
oneed_separator = *need_separator;
if (m->flag & NOSPACE)
*need_separator = 0;
rv = match(ms, ml.magic, ml.nmagic, s, nbytes, offset + o,
mode, text, flip, recursion_level, printed_something,
need_separator, returnval);
if (rv != 1)
*need_separator = oneed_separator;
return rv;
case FILE_NAME:
if (file_printf(ms, "%s", m->desc) == -1)
return -1;
return 1;
case FILE_DEFAULT: /* nothing to check */
case FILE_CLEAR:
default:
break;
}
if (!mconvert(ms, m, flip))
return 0;
return 1;
}
Commit Message: - reduce recursion level from 20 to 10 and make a symbolic constant for it.
- pull out the guts of saving and restoring the output buffer into functions
and take care not to overwrite the error message if an error happened.
CWE ID: CWE-399 | mget(struct magic_set *ms, const unsigned char *s, struct magic *m,
size_t nbytes, size_t o, unsigned int cont_level, int mode, int text,
int flip, int recursion_level, int *printed_something,
int *need_separator, int *returnval)
{
uint32_t offset = ms->offset;
uint32_t lhs;
file_pushbuf_t *pb;
int rv, oneed_separator, in_type;
char *rbuf;
union VALUETYPE *p = &ms->ms_value;
struct mlist ml;
if (recursion_level >= MAX_RECURSION_LEVEL) {
file_error(ms, 0, "recursion nesting exceeded");
return -1;
}
if (mcopy(ms, p, m->type, m->flag & INDIR, s, (uint32_t)(offset + o),
(uint32_t)nbytes, m) == -1)
return -1;
if ((ms->flags & MAGIC_DEBUG) != 0) {
fprintf(stderr, "mget(type=%d, flag=%x, offset=%u, o=%"
SIZE_T_FORMAT "u, " "nbytes=%" SIZE_T_FORMAT "u)\n",
m->type, m->flag, offset, o, nbytes);
mdebug(offset, (char *)(void *)p, sizeof(union VALUETYPE));
#ifndef COMPILE_ONLY
file_mdump(m);
#endif
}
if (m->flag & INDIR) {
int off = m->in_offset;
if (m->in_op & FILE_OPINDIRECT) {
const union VALUETYPE *q = CAST(const union VALUETYPE *,
((const void *)(s + offset + off)));
switch (cvt_flip(m->in_type, flip)) {
case FILE_BYTE:
off = q->b;
break;
case FILE_SHORT:
off = q->h;
break;
case FILE_BESHORT:
off = (short)((q->hs[0]<<8)|(q->hs[1]));
break;
case FILE_LESHORT:
off = (short)((q->hs[1]<<8)|(q->hs[0]));
break;
case FILE_LONG:
off = q->l;
break;
case FILE_BELONG:
case FILE_BEID3:
off = (int32_t)((q->hl[0]<<24)|(q->hl[1]<<16)|
(q->hl[2]<<8)|(q->hl[3]));
break;
case FILE_LEID3:
case FILE_LELONG:
off = (int32_t)((q->hl[3]<<24)|(q->hl[2]<<16)|
(q->hl[1]<<8)|(q->hl[0]));
break;
case FILE_MELONG:
off = (int32_t)((q->hl[1]<<24)|(q->hl[0]<<16)|
(q->hl[3]<<8)|(q->hl[2]));
break;
}
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect offs=%u\n", off);
}
switch (in_type = cvt_flip(m->in_type, flip)) {
case FILE_BYTE:
if (OFFSET_OOB(nbytes, offset, 1))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->b & off;
break;
case FILE_OPOR:
offset = p->b | off;
break;
case FILE_OPXOR:
offset = p->b ^ off;
break;
case FILE_OPADD:
offset = p->b + off;
break;
case FILE_OPMINUS:
offset = p->b - off;
break;
case FILE_OPMULTIPLY:
offset = p->b * off;
break;
case FILE_OPDIVIDE:
offset = p->b / off;
break;
case FILE_OPMODULO:
offset = p->b % off;
break;
}
} else
offset = p->b;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_BESHORT:
if (OFFSET_OOB(nbytes, offset, 2))
return 0;
lhs = (p->hs[0] << 8) | p->hs[1];
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = lhs & off;
break;
case FILE_OPOR:
offset = lhs | off;
break;
case FILE_OPXOR:
offset = lhs ^ off;
break;
case FILE_OPADD:
offset = lhs + off;
break;
case FILE_OPMINUS:
offset = lhs - off;
break;
case FILE_OPMULTIPLY:
offset = lhs * off;
break;
case FILE_OPDIVIDE:
offset = lhs / off;
break;
case FILE_OPMODULO:
offset = lhs % off;
break;
}
} else
offset = lhs;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LESHORT:
if (OFFSET_OOB(nbytes, offset, 2))
return 0;
lhs = (p->hs[1] << 8) | p->hs[0];
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = lhs & off;
break;
case FILE_OPOR:
offset = lhs | off;
break;
case FILE_OPXOR:
offset = lhs ^ off;
break;
case FILE_OPADD:
offset = lhs + off;
break;
case FILE_OPMINUS:
offset = lhs - off;
break;
case FILE_OPMULTIPLY:
offset = lhs * off;
break;
case FILE_OPDIVIDE:
offset = lhs / off;
break;
case FILE_OPMODULO:
offset = lhs % off;
break;
}
} else
offset = lhs;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_SHORT:
if (OFFSET_OOB(nbytes, offset, 2))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->h & off;
break;
case FILE_OPOR:
offset = p->h | off;
break;
case FILE_OPXOR:
offset = p->h ^ off;
break;
case FILE_OPADD:
offset = p->h + off;
break;
case FILE_OPMINUS:
offset = p->h - off;
break;
case FILE_OPMULTIPLY:
offset = p->h * off;
break;
case FILE_OPDIVIDE:
offset = p->h / off;
break;
case FILE_OPMODULO:
offset = p->h % off;
break;
}
}
else
offset = p->h;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_BELONG:
case FILE_BEID3:
if (OFFSET_OOB(nbytes, offset, 4))
return 0;
lhs = (p->hl[0] << 24) | (p->hl[1] << 16) |
(p->hl[2] << 8) | p->hl[3];
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = lhs & off;
break;
case FILE_OPOR:
offset = lhs | off;
break;
case FILE_OPXOR:
offset = lhs ^ off;
break;
case FILE_OPADD:
offset = lhs + off;
break;
case FILE_OPMINUS:
offset = lhs - off;
break;
case FILE_OPMULTIPLY:
offset = lhs * off;
break;
case FILE_OPDIVIDE:
offset = lhs / off;
break;
case FILE_OPMODULO:
offset = lhs % off;
break;
}
} else
offset = lhs;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LELONG:
case FILE_LEID3:
if (OFFSET_OOB(nbytes, offset, 4))
return 0;
lhs = (p->hl[3] << 24) | (p->hl[2] << 16) |
(p->hl[1] << 8) | p->hl[0];
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = lhs & off;
break;
case FILE_OPOR:
offset = lhs | off;
break;
case FILE_OPXOR:
offset = lhs ^ off;
break;
case FILE_OPADD:
offset = lhs + off;
break;
case FILE_OPMINUS:
offset = lhs - off;
break;
case FILE_OPMULTIPLY:
offset = lhs * off;
break;
case FILE_OPDIVIDE:
offset = lhs / off;
break;
case FILE_OPMODULO:
offset = lhs % off;
break;
}
} else
offset = lhs;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_MELONG:
if (OFFSET_OOB(nbytes, offset, 4))
return 0;
lhs = (p->hl[1] << 24) | (p->hl[0] << 16) |
(p->hl[3] << 8) | p->hl[2];
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = lhs & off;
break;
case FILE_OPOR:
offset = lhs | off;
break;
case FILE_OPXOR:
offset = lhs ^ off;
break;
case FILE_OPADD:
offset = lhs + off;
break;
case FILE_OPMINUS:
offset = lhs - off;
break;
case FILE_OPMULTIPLY:
offset = lhs * off;
break;
case FILE_OPDIVIDE:
offset = lhs / off;
break;
case FILE_OPMODULO:
offset = lhs % off;
break;
}
} else
offset = lhs;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
case FILE_LONG:
if (OFFSET_OOB(nbytes, offset, 4))
return 0;
if (off) {
switch (m->in_op & FILE_OPS_MASK) {
case FILE_OPAND:
offset = p->l & off;
break;
case FILE_OPOR:
offset = p->l | off;
break;
case FILE_OPXOR:
offset = p->l ^ off;
break;
case FILE_OPADD:
offset = p->l + off;
break;
case FILE_OPMINUS:
offset = p->l - off;
break;
case FILE_OPMULTIPLY:
offset = p->l * off;
break;
case FILE_OPDIVIDE:
offset = p->l / off;
break;
case FILE_OPMODULO:
offset = p->l % off;
break;
}
} else
offset = p->l;
if (m->in_op & FILE_OPINVERSE)
offset = ~offset;
break;
default:
break;
}
switch (in_type) {
case FILE_LEID3:
case FILE_BEID3:
offset = ((((offset >> 0) & 0x7f) << 0) |
(((offset >> 8) & 0x7f) << 7) |
(((offset >> 16) & 0x7f) << 14) |
(((offset >> 24) & 0x7f) << 21)) + 10;
break;
default:
break;
}
if (m->flag & INDIROFFADD) {
offset += ms->c.li[cont_level-1].off;
if (offset == 0) {
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr,
"indirect *zero* offset\n");
return 0;
}
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect +offs=%u\n", offset);
}
if (mcopy(ms, p, m->type, 0, s, offset, nbytes, m) == -1)
return -1;
ms->offset = offset;
if ((ms->flags & MAGIC_DEBUG) != 0) {
mdebug(offset, (char *)(void *)p,
sizeof(union VALUETYPE));
#ifndef COMPILE_ONLY
file_mdump(m);
#endif
}
}
/* Verify we have enough data to match magic type */
switch (m->type) {
case FILE_BYTE:
if (OFFSET_OOB(nbytes, offset, 1))
return 0;
break;
case FILE_SHORT:
case FILE_BESHORT:
case FILE_LESHORT:
if (OFFSET_OOB(nbytes, offset, 2))
return 0;
break;
case FILE_LONG:
case FILE_BELONG:
case FILE_LELONG:
case FILE_MELONG:
case FILE_DATE:
case FILE_BEDATE:
case FILE_LEDATE:
case FILE_MEDATE:
case FILE_LDATE:
case FILE_BELDATE:
case FILE_LELDATE:
case FILE_MELDATE:
case FILE_FLOAT:
case FILE_BEFLOAT:
case FILE_LEFLOAT:
if (OFFSET_OOB(nbytes, offset, 4))
return 0;
break;
case FILE_DOUBLE:
case FILE_BEDOUBLE:
case FILE_LEDOUBLE:
if (OFFSET_OOB(nbytes, offset, 8))
return 0;
break;
case FILE_STRING:
case FILE_PSTRING:
case FILE_SEARCH:
if (OFFSET_OOB(nbytes, offset, m->vallen))
return 0;
break;
case FILE_REGEX:
if (nbytes < offset)
return 0;
break;
case FILE_INDIRECT:
if (offset == 0)
return 0;
if (nbytes < offset)
return 0;
if ((pb = file_push_buffer(ms)) == NULL)
return -1;
rv = file_softmagic(ms, s + offset, nbytes - offset,
recursion_level, BINTEST, text);
if ((ms->flags & MAGIC_DEBUG) != 0)
fprintf(stderr, "indirect @offs=%u[%d]\n", offset, rv);
rbuf = file_pop_buffer(ms, pb);
if (rbuf == NULL)
return -1;
if (rv == 1) {
if ((ms->flags & (MAGIC_MIME|MAGIC_APPLE)) == 0 &&
file_printf(ms, F(ms, m, "%u"), offset) == -1) {
free(rbuf);
return -1;
}
if (file_printf(ms, "%s", rbuf) == -1) {
free(rbuf);
return -1;
}
}
free(rbuf);
return rv;
case FILE_USE:
if (nbytes < offset)
return 0;
rbuf = m->value.s;
if (*rbuf == '^') {
rbuf++;
flip = !flip;
}
if (file_magicfind(ms, rbuf, &ml) == -1) {
file_error(ms, 0, "cannot find entry `%s'", rbuf);
return -1;
}
oneed_separator = *need_separator;
if (m->flag & NOSPACE)
*need_separator = 0;
rv = match(ms, ml.magic, ml.nmagic, s, nbytes, offset + o,
mode, text, flip, recursion_level, printed_something,
need_separator, returnval);
if (rv != 1)
*need_separator = oneed_separator;
return rv;
case FILE_NAME:
if (file_printf(ms, "%s", m->desc) == -1)
return -1;
return 1;
case FILE_DEFAULT: /* nothing to check */
case FILE_CLEAR:
default:
break;
}
if (!mconvert(ms, m, flip))
return 0;
return 1;
}
| 166,248 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: AppModalDialog::~AppModalDialog() {
}
Commit Message: Fix a Windows crash bug with javascript alerts from extension popups.
BUG=137707
Review URL: https://chromiumcodereview.appspot.com/10828423
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@152716 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-20 | AppModalDialog::~AppModalDialog() {
CompleteDialog();
}
| 170,755 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: gfx::Vector2d LayerTreeHost::DistributeScrollOffsetToViewports(
const gfx::Vector2d offset,
Layer* layer) {
DCHECK(layer);
if (layer != outer_viewport_scroll_layer_.get())
return offset;
gfx::Vector2d inner_viewport_offset =
inner_viewport_scroll_layer_->scroll_offset();
gfx::Vector2d outer_viewport_offset =
outer_viewport_scroll_layer_->scroll_offset();
if (offset == inner_viewport_offset + outer_viewport_offset) {
return outer_viewport_offset;
}
gfx::Vector2d max_outer_viewport_scroll_offset =
outer_viewport_scroll_layer_->MaxScrollOffset();
gfx::Vector2d max_inner_viewport_scroll_offset =
inner_viewport_scroll_layer_->MaxScrollOffset();
outer_viewport_offset = offset - inner_viewport_offset;
outer_viewport_offset.SetToMin(max_outer_viewport_scroll_offset);
outer_viewport_offset.SetToMax(gfx::Vector2d());
inner_viewport_offset = offset - outer_viewport_offset;
inner_viewport_offset.SetToMin(max_inner_viewport_scroll_offset);
inner_viewport_offset.SetToMax(gfx::Vector2d());
inner_viewport_scroll_layer_->SetScrollOffset(inner_viewport_offset);
return outer_viewport_offset;
}
Commit Message: Removed pinch viewport scroll offset distribution
The associated change in Blink makes the pinch viewport a proper
ScrollableArea meaning the normal path for synchronizing layer scroll
offsets is used.
This is a 2 sided patch, the other CL:
https://codereview.chromium.org/199253002/
BUG=349941
Review URL: https://codereview.chromium.org/210543002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@260105 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | gfx::Vector2d LayerTreeHost::DistributeScrollOffsetToViewports(
| 171,199 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void LoginHtmlDialog::GetDialogSize(gfx::Size* size) const {
size->SetSize(width_, height_);
}
Commit Message: cros: The next 100 clang plugin errors.
BUG=none
TEST=none
TBR=dpolukhin
Review URL: http://codereview.chromium.org/7022008
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@85418 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | void LoginHtmlDialog::GetDialogSize(gfx::Size* size) const {
bool LoginHtmlDialog::ShouldShowDialogTitle() const {
return true;
}
void LoginHtmlDialog::Observe(NotificationType type,
const NotificationSource& source,
const NotificationDetails& details) {
DCHECK(type.value == NotificationType::LOAD_COMPLETED_MAIN_FRAME);
if (bubble_frame_view_)
bubble_frame_view_->StopThrobber();
}
| 170,615 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir;
if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
(((insn >> 30) & 3) != 3))
goto kill_user;
dir = decode_direction(insn);
if(!ok_for_user(regs, insn, dir)) {
goto kill_user;
} else {
int err, size = decode_access_size(insn);
unsigned long addr;
if(floating_point_load_or_store_p(insn)) {
printk("User FPU load/store unaligned unsupported.\n");
goto kill_user;
}
addr = compute_effective_address(regs, insn);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
switch(dir) {
case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
regs),
size, (unsigned long *) addr,
decode_signedness(insn));
break;
case store:
err = do_int_store(((insn>>25)&0x1f), size,
(unsigned long *) addr, regs);
break;
case both:
/*
* This was supported in 2.4. However, we question
* the value of SWAP instruction across word boundaries.
*/
printk("Unaligned SWAP unsupported.\n");
err = -EFAULT;
break;
default:
unaligned_panic("Impossible user unaligned trap.");
goto out;
}
if (err)
goto kill_user;
else
advance(regs);
goto out;
}
kill_user:
user_mna_trap_fault(regs, insn);
out:
;
}
Commit Message: perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: Michael Cree <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Deng-Cheng Zhu <[email protected]>
Cc: Anton Blanchard <[email protected]>
Cc: Eric B Munson <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: Paul Mundt <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Jason Wessel <[email protected]>
Cc: Don Zickus <[email protected]>
Link: http://lkml.kernel.org/n/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
CWE ID: CWE-399 | asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir;
if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
(((insn >> 30) & 3) != 3))
goto kill_user;
dir = decode_direction(insn);
if(!ok_for_user(regs, insn, dir)) {
goto kill_user;
} else {
int err, size = decode_access_size(insn);
unsigned long addr;
if(floating_point_load_or_store_p(insn)) {
printk("User FPU load/store unaligned unsupported.\n");
goto kill_user;
}
addr = compute_effective_address(regs, insn);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
switch(dir) {
case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
regs),
size, (unsigned long *) addr,
decode_signedness(insn));
break;
case store:
err = do_int_store(((insn>>25)&0x1f), size,
(unsigned long *) addr, regs);
break;
case both:
/*
* This was supported in 2.4. However, we question
* the value of SWAP instruction across word boundaries.
*/
printk("Unaligned SWAP unsupported.\n");
err = -EFAULT;
break;
default:
unaligned_panic("Impossible user unaligned trap.");
goto out;
}
if (err)
goto kill_user;
else
advance(regs);
goto out;
}
kill_user:
user_mna_trap_fault(regs, insn);
out:
;
}
| 165,806 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool AppCacheDatabase::UpgradeSchema() {
return DeleteExistingAndCreateNewDatabase();
}
Commit Message: Reland "AppCache: Add padding to cross-origin responses."
This is a reland of 85b389caa7d725cdd31f59e9a2b79ff54804b7b7
Initialized CacheRecord::padding_size to 0.
Original change's description:
> AppCache: Add padding to cross-origin responses.
>
> Bug: 918293
> Change-Id: I4f16640f06feac009d6bbbb624951da6d2669f6c
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1488059
> Commit-Queue: Staphany Park <[email protected]>
> Reviewed-by: Victor Costan <[email protected]>
> Reviewed-by: Marijn Kruisselbrink <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#644624}
Bug: 918293
Change-Id: Ie1d3f99c7e8a854d33255a4d66243da2ce16441c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1539906
Reviewed-by: Victor Costan <[email protected]>
Commit-Queue: Staphany Park <[email protected]>
Cr-Commit-Position: refs/heads/master@{#644719}
CWE ID: CWE-200 | bool AppCacheDatabase::UpgradeSchema() {
// Start from scratch for versions that would require unsupported migrations.
if (meta_table_->GetVersionNumber() < 7)
return DeleteExistingAndCreateNewDatabase();
sql::Transaction transaction(db_.get());
if (!transaction.Begin())
return false;
if (!db_->Execute("ALTER TABLE Caches ADD COLUMN padding_size INTEGER"))
return false;
if (!db_->Execute("ALTER TABLE Entries ADD COLUMN padding_size INTEGER"))
return false;
meta_table_->SetVersionNumber(8);
meta_table_->SetCompatibleVersionNumber(8);
if (!AppCacheBackfillerVersion8(db_.get()).BackfillPaddingSizes())
return false;
return transaction.Commit();
}
| 172,983 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
compat_ulong_t, mode, compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode, compat_ulong_t, flags)
{
long err = 0;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
nodemask_t bm;
nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
nm = compat_alloc_user_space(alloc_size);
err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
}
if (err)
return -EFAULT;
return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
}
Commit Message: mm/mempolicy.c: fix error handling in set_mempolicy and mbind.
In the case that compat_get_bitmap fails we do not want to copy the
bitmap to the user as it will contain uninitialized stack data and leak
sensitive data.
Signed-off-by: Chris Salls <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: CWE-388 | COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
compat_ulong_t, mode, compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode, compat_ulong_t, flags)
{
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
nodemask_t bm;
nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
if (copy_to_user(nm, nodes_addr(bm), alloc_size))
return -EFAULT;
}
return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
}
| 168,258 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: get_linux_shareopts(const char *shareopts, char **plinux_opts)
{
int rc;
assert(plinux_opts != NULL);
*plinux_opts = NULL;
/* default options for Solaris shares */
(void) add_linux_shareopt(plinux_opts, "no_subtree_check", NULL);
(void) add_linux_shareopt(plinux_opts, "no_root_squash", NULL);
(void) add_linux_shareopt(plinux_opts, "mountpoint", NULL);
rc = foreach_nfs_shareopt(shareopts, get_linux_shareopts_cb,
plinux_opts);
if (rc != SA_OK) {
free(*plinux_opts);
*plinux_opts = NULL;
}
return (rc);
}
Commit Message: Move nfs.c:foreach_nfs_shareopt() to libshare.c:foreach_shareopt()
so that it can be (re)used in other parts of libshare.
CWE ID: CWE-200 | get_linux_shareopts(const char *shareopts, char **plinux_opts)
{
int rc;
assert(plinux_opts != NULL);
*plinux_opts = NULL;
/* default options for Solaris shares */
(void) add_linux_shareopt(plinux_opts, "no_subtree_check", NULL);
(void) add_linux_shareopt(plinux_opts, "no_root_squash", NULL);
(void) add_linux_shareopt(plinux_opts, "mountpoint", NULL);
rc = foreach_shareopt(shareopts, get_linux_shareopts_cb,
plinux_opts);
if (rc != SA_OK) {
free(*plinux_opts);
*plinux_opts = NULL;
}
return (rc);
}
| 170,134 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: GURL SanitizeFrontendURL(
const GURL& url,
const std::string& scheme,
const std::string& host,
const std::string& path,
bool allow_query) {
std::vector<std::string> query_parts;
if (allow_query) {
for (net::QueryIterator it(url); !it.IsAtEnd(); it.Advance()) {
std::string value = SanitizeFrontendQueryParam(it.GetKey(),
it.GetValue());
if (!value.empty()) {
query_parts.push_back(
base::StringPrintf("%s=%s", it.GetKey().c_str(), value.c_str()));
}
}
}
std::string query =
query_parts.empty() ? "" : "?" + base::JoinString(query_parts, "&");
std::string constructed = base::StringPrintf("%s://%s%s%s",
scheme.c_str(), host.c_str(), path.c_str(), query.c_str());
GURL result = GURL(constructed);
if (!result.is_valid())
return GURL();
return result;
}
Commit Message: DevTools: move front-end URL handling to DevToolsUIBindingds
BUG=662859
Review-Url: https://codereview.chromium.org/2607833002
Cr-Commit-Position: refs/heads/master@{#440926}
CWE ID: CWE-200 | GURL SanitizeFrontendURL(
| 172,460 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void perform_gamma_scale16_tests(png_modifier *pm)
{
# ifndef PNG_MAX_GAMMA_8
# define PNG_MAX_GAMMA_8 11
# endif
# define SBIT_16_TO_8 PNG_MAX_GAMMA_8
/* Include the alpha cases here. Note that sbit matches the internal value
* used by the library - otherwise we will get spurious errors from the
* internal sbit style approximation.
*
* The threshold test is here because otherwise the 16 to 8 conversion will
* proceed *without* gamma correction, and the tests above will fail (but not
* by much) - this could be fixed, it only appears with the -g option.
*/
unsigned int i, j;
for (i=0; i<pm->ngamma_tests; ++i)
{
for (j=0; j<pm->ngamma_tests; ++j)
{
if (i != j &&
fabs(pm->gammas[j]/pm->gammas[i]-1) >= PNG_GAMMA_THRESHOLD)
{
gamma_transform_test(pm, 0, 16, 0, pm->interlace_type,
1/pm->gammas[i], pm->gammas[j], SBIT_16_TO_8,
pm->use_input_precision_16to8, 1 /*scale16*/);
if (fail(pm))
return;
gamma_transform_test(pm, 2, 16, 0, pm->interlace_type,
1/pm->gammas[i], pm->gammas[j], SBIT_16_TO_8,
pm->use_input_precision_16to8, 1 /*scale16*/);
if (fail(pm))
return;
gamma_transform_test(pm, 4, 16, 0, pm->interlace_type,
1/pm->gammas[i], pm->gammas[j], SBIT_16_TO_8,
pm->use_input_precision_16to8, 1 /*scale16*/);
if (fail(pm))
return;
gamma_transform_test(pm, 6, 16, 0, pm->interlace_type,
1/pm->gammas[i], pm->gammas[j], SBIT_16_TO_8,
pm->use_input_precision_16to8, 1 /*scale16*/);
if (fail(pm))
return;
}
}
}
}
Commit Message: DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
CWE ID: | static void perform_gamma_scale16_tests(png_modifier *pm)
{
# ifndef PNG_MAX_GAMMA_8
# define PNG_MAX_GAMMA_8 11
# endif
# if defined PNG_MAX_GAMMA_8 || PNG_LIBPNG_VER < 10700
# define SBIT_16_TO_8 PNG_MAX_GAMMA_8
# else
# define SBIT_16_TO_8 16
# endif
/* Include the alpha cases here. Note that sbit matches the internal value
* used by the library - otherwise we will get spurious errors from the
* internal sbit style approximation.
*
* The threshold test is here because otherwise the 16 to 8 conversion will
* proceed *without* gamma correction, and the tests above will fail (but not
* by much) - this could be fixed, it only appears with the -g option.
*/
unsigned int i, j;
for (i=0; i<pm->ngamma_tests; ++i)
{
for (j=0; j<pm->ngamma_tests; ++j)
{
if (i != j &&
fabs(pm->gammas[j]/pm->gammas[i]-1) >= PNG_GAMMA_THRESHOLD)
{
gamma_transform_test(pm, 0, 16, 0, pm->interlace_type,
1/pm->gammas[i], pm->gammas[j], SBIT_16_TO_8,
pm->use_input_precision_16to8, 1 /*scale16*/);
if (fail(pm))
return;
gamma_transform_test(pm, 2, 16, 0, pm->interlace_type,
1/pm->gammas[i], pm->gammas[j], SBIT_16_TO_8,
pm->use_input_precision_16to8, 1 /*scale16*/);
if (fail(pm))
return;
gamma_transform_test(pm, 4, 16, 0, pm->interlace_type,
1/pm->gammas[i], pm->gammas[j], SBIT_16_TO_8,
pm->use_input_precision_16to8, 1 /*scale16*/);
if (fail(pm))
return;
gamma_transform_test(pm, 6, 16, 0, pm->interlace_type,
1/pm->gammas[i], pm->gammas[j], SBIT_16_TO_8,
pm->use_input_precision_16to8, 1 /*scale16*/);
if (fail(pm))
return;
}
}
}
}
| 173,681 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int wasm_dis(WasmOp *op, const unsigned char *buf, int buf_len) {
op->len = 1;
op->op = buf[0];
if (op->op > 0xbf) {
return 1;
}
WasmOpDef *opdef = &opcodes[op->op];
switch (op->op) {
case WASM_OP_TRAP:
case WASM_OP_NOP:
case WASM_OP_ELSE:
case WASM_OP_RETURN:
case WASM_OP_DROP:
case WASM_OP_SELECT:
case WASM_OP_I32EQZ:
case WASM_OP_I32EQ:
case WASM_OP_I32NE:
case WASM_OP_I32LTS:
case WASM_OP_I32LTU:
case WASM_OP_I32GTS:
case WASM_OP_I32GTU:
case WASM_OP_I32LES:
case WASM_OP_I32LEU:
case WASM_OP_I32GES:
case WASM_OP_I32GEU:
case WASM_OP_I64EQZ:
case WASM_OP_I64EQ:
case WASM_OP_I64NE:
case WASM_OP_I64LTS:
case WASM_OP_I64LTU:
case WASM_OP_I64GTS:
case WASM_OP_I64GTU:
case WASM_OP_I64LES:
case WASM_OP_I64LEU:
case WASM_OP_I64GES:
case WASM_OP_I64GEU:
case WASM_OP_F32EQ:
case WASM_OP_F32NE:
case WASM_OP_F32LT:
case WASM_OP_F32GT:
case WASM_OP_F32LE:
case WASM_OP_F32GE:
case WASM_OP_F64EQ:
case WASM_OP_F64NE:
case WASM_OP_F64LT:
case WASM_OP_F64GT:
case WASM_OP_F64LE:
case WASM_OP_F64GE:
case WASM_OP_I32CLZ:
case WASM_OP_I32CTZ:
case WASM_OP_I32POPCNT:
case WASM_OP_I32ADD:
case WASM_OP_I32SUB:
case WASM_OP_I32MUL:
case WASM_OP_I32DIVS:
case WASM_OP_I32DIVU:
case WASM_OP_I32REMS:
case WASM_OP_I32REMU:
case WASM_OP_I32AND:
case WASM_OP_I32OR:
case WASM_OP_I32XOR:
case WASM_OP_I32SHL:
case WASM_OP_I32SHRS:
case WASM_OP_I32SHRU:
case WASM_OP_I32ROTL:
case WASM_OP_I32ROTR:
case WASM_OP_I64CLZ:
case WASM_OP_I64CTZ:
case WASM_OP_I64POPCNT:
case WASM_OP_I64ADD:
case WASM_OP_I64SUB:
case WASM_OP_I64MUL:
case WASM_OP_I64DIVS:
case WASM_OP_I64DIVU:
case WASM_OP_I64REMS:
case WASM_OP_I64REMU:
case WASM_OP_I64AND:
case WASM_OP_I64OR:
case WASM_OP_I64XOR:
case WASM_OP_I64SHL:
case WASM_OP_I64SHRS:
case WASM_OP_I64SHRU:
case WASM_OP_I64ROTL:
case WASM_OP_I64ROTR:
case WASM_OP_F32ABS:
case WASM_OP_F32NEG:
case WASM_OP_F32CEIL:
case WASM_OP_F32FLOOR:
case WASM_OP_F32TRUNC:
case WASM_OP_F32NEAREST:
case WASM_OP_F32SQRT:
case WASM_OP_F32ADD:
case WASM_OP_F32SUB:
case WASM_OP_F32MUL:
case WASM_OP_F32DIV:
case WASM_OP_F32MIN:
case WASM_OP_F32MAX:
case WASM_OP_F32COPYSIGN:
case WASM_OP_F64ABS:
case WASM_OP_F64NEG:
case WASM_OP_F64CEIL:
case WASM_OP_F64FLOOR:
case WASM_OP_F64TRUNC:
case WASM_OP_F64NEAREST:
case WASM_OP_F64SQRT:
case WASM_OP_F64ADD:
case WASM_OP_F64SUB:
case WASM_OP_F64MUL:
case WASM_OP_F64DIV:
case WASM_OP_F64MIN:
case WASM_OP_F64MAX:
case WASM_OP_F64COPYSIGN:
case WASM_OP_I32WRAPI64:
case WASM_OP_I32TRUNCSF32:
case WASM_OP_I32TRUNCUF32:
case WASM_OP_I32TRUNCSF64:
case WASM_OP_I32TRUNCUF64:
case WASM_OP_I64EXTENDSI32:
case WASM_OP_I64EXTENDUI32:
case WASM_OP_I64TRUNCSF32:
case WASM_OP_I64TRUNCUF32:
case WASM_OP_I64TRUNCSF64:
case WASM_OP_I64TRUNCUF64:
case WASM_OP_F32CONVERTSI32:
case WASM_OP_F32CONVERTUI32:
case WASM_OP_F32CONVERTSI64:
case WASM_OP_F32CONVERTUI64:
case WASM_OP_F32DEMOTEF64:
case WASM_OP_F64CONVERTSI32:
case WASM_OP_F64CONVERTUI32:
case WASM_OP_F64CONVERTSI64:
case WASM_OP_F64CONVERTUI64:
case WASM_OP_F64PROMOTEF32:
case WASM_OP_I32REINTERPRETF32:
case WASM_OP_I64REINTERPRETF64:
case WASM_OP_F32REINTERPRETI32:
case WASM_OP_F64REINTERPRETI64:
case WASM_OP_END:
{
snprintf (op->txt, R_ASM_BUFSIZE, "%s", opdef->txt);
}
break;
case WASM_OP_BLOCK:
case WASM_OP_LOOP:
case WASM_OP_IF:
{
st32 val = 0;
size_t n = read_i32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
switch (0x80 - val) {
case R_BIN_WASM_VALUETYPE_EMPTY:
snprintf (op->txt, R_ASM_BUFSIZE, "%s", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_i32:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result i32)", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_i64:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result i64)", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_f32:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result f32)", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_f64:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result f64)", opdef->txt);
break;
default:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result ?)", opdef->txt);
break;
}
op->len += n;
}
break;
case WASM_OP_BR:
case WASM_OP_BRIF:
case WASM_OP_CALL:
{
ut32 val = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d", opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_BRTABLE:
{
ut32 count = 0, *table = NULL, def = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &count);
if (!(n > 0 && n < buf_len)) {
goto err;
}
if (!(table = calloc (count, sizeof (ut32)))) {
goto err;
}
int i = 0;
op->len += n;
for (i = 0; i < count; i++) {
n = read_u32_leb128 (buf + op->len, buf + buf_len, &table[i]);
if (!(op->len + n <= buf_len)) {
goto beach;
}
op->len += n;
}
n = read_u32_leb128 (buf + op->len, buf + buf_len, &def);
if (!(n > 0 && n + op->len < buf_len)) {
goto beach;
}
op->len += n;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d ", opdef->txt, count);
for (i = 0; i < count && strlen (op->txt) + 10 < R_ASM_BUFSIZE; i++) {
int optxtlen = strlen (op->txt);
snprintf (op->txt + optxtlen, R_ASM_BUFSIZE - optxtlen, "%d ", table[i]);
}
snprintf (op->txt + strlen (op->txt), R_ASM_BUFSIZE, "%d", def);
free (table);
break;
beach:
free (table);
goto err;
}
break;
case WASM_OP_CALLINDIRECT:
{
ut32 val = 0, reserved = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
op->len += n;
n = read_u32_leb128 (buf + op->len, buf + buf_len, &reserved);
if (!(n == 1 && op->len + n <= buf_len)) goto err;
reserved &= 0x1;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d %d", opdef->txt, val, reserved);
op->len += n;
}
break;
case WASM_OP_GETLOCAL:
case WASM_OP_SETLOCAL:
case WASM_OP_TEELOCAL:
case WASM_OP_GETGLOBAL:
case WASM_OP_SETGLOBAL:
{
ut32 val = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d", opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_I32LOAD:
case WASM_OP_I64LOAD:
case WASM_OP_F32LOAD:
case WASM_OP_F64LOAD:
case WASM_OP_I32LOAD8S:
case WASM_OP_I32LOAD8U:
case WASM_OP_I32LOAD16S:
case WASM_OP_I32LOAD16U:
case WASM_OP_I64LOAD8S:
case WASM_OP_I64LOAD8U:
case WASM_OP_I64LOAD16S:
case WASM_OP_I64LOAD16U:
case WASM_OP_I64LOAD32S:
case WASM_OP_I64LOAD32U:
case WASM_OP_I32STORE:
case WASM_OP_I64STORE:
case WASM_OP_F32STORE:
case WASM_OP_F64STORE:
case WASM_OP_I32STORE8:
case WASM_OP_I32STORE16:
case WASM_OP_I64STORE8:
case WASM_OP_I64STORE16:
case WASM_OP_I64STORE32:
{
ut32 flag = 0, offset = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &flag);
if (!(n > 0 && n < buf_len)) goto err;
op->len += n;
n = read_u32_leb128 (buf + op->len, buf + buf_len, &offset);
if (!(n > 0 && op->len + n <= buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d %d", opdef->txt, flag, offset);
op->len += n;
}
break;
case WASM_OP_CURRENTMEMORY:
case WASM_OP_GROWMEMORY:
{
ut32 reserved = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &reserved);
if (!(n == 1 && n < buf_len)) goto err;
reserved &= 0x1;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d", opdef->txt, reserved);
op->len += n;
}
break;
case WASM_OP_I32CONST:
{
st32 val = 0;
size_t n = read_i32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" PFMT32d, opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_I64CONST:
{
st64 val = 0;
size_t n = read_i64_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" PFMT64d, opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_F32CONST:
{
ut32 val = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
long double d = (long double)val;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" LDBLFMT, opdef->txt, d);
op->len += n;
}
break;
case WASM_OP_F64CONST:
{
ut64 val = 0;
size_t n = read_u64_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
long double d = (long double)val;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" LDBLFMT, opdef->txt, d);
op->len += n;
}
break;
default:
goto err;
}
return op->len;
err:
op->len = 1;
snprintf (op->txt, R_ASM_BUFSIZE, "invalid");
return op->len;
}
Commit Message: Fix #9969 - Stack overflow in wasm disassembler
CWE ID: CWE-119 | int wasm_dis(WasmOp *op, const unsigned char *buf, int buf_len) {
op->len = 1;
op->op = buf[0];
if (op->op > 0xbf) {
return 1;
}
WasmOpDef *opdef = &opcodes[op->op];
switch (op->op) {
case WASM_OP_TRAP:
case WASM_OP_NOP:
case WASM_OP_ELSE:
case WASM_OP_RETURN:
case WASM_OP_DROP:
case WASM_OP_SELECT:
case WASM_OP_I32EQZ:
case WASM_OP_I32EQ:
case WASM_OP_I32NE:
case WASM_OP_I32LTS:
case WASM_OP_I32LTU:
case WASM_OP_I32GTS:
case WASM_OP_I32GTU:
case WASM_OP_I32LES:
case WASM_OP_I32LEU:
case WASM_OP_I32GES:
case WASM_OP_I32GEU:
case WASM_OP_I64EQZ:
case WASM_OP_I64EQ:
case WASM_OP_I64NE:
case WASM_OP_I64LTS:
case WASM_OP_I64LTU:
case WASM_OP_I64GTS:
case WASM_OP_I64GTU:
case WASM_OP_I64LES:
case WASM_OP_I64LEU:
case WASM_OP_I64GES:
case WASM_OP_I64GEU:
case WASM_OP_F32EQ:
case WASM_OP_F32NE:
case WASM_OP_F32LT:
case WASM_OP_F32GT:
case WASM_OP_F32LE:
case WASM_OP_F32GE:
case WASM_OP_F64EQ:
case WASM_OP_F64NE:
case WASM_OP_F64LT:
case WASM_OP_F64GT:
case WASM_OP_F64LE:
case WASM_OP_F64GE:
case WASM_OP_I32CLZ:
case WASM_OP_I32CTZ:
case WASM_OP_I32POPCNT:
case WASM_OP_I32ADD:
case WASM_OP_I32SUB:
case WASM_OP_I32MUL:
case WASM_OP_I32DIVS:
case WASM_OP_I32DIVU:
case WASM_OP_I32REMS:
case WASM_OP_I32REMU:
case WASM_OP_I32AND:
case WASM_OP_I32OR:
case WASM_OP_I32XOR:
case WASM_OP_I32SHL:
case WASM_OP_I32SHRS:
case WASM_OP_I32SHRU:
case WASM_OP_I32ROTL:
case WASM_OP_I32ROTR:
case WASM_OP_I64CLZ:
case WASM_OP_I64CTZ:
case WASM_OP_I64POPCNT:
case WASM_OP_I64ADD:
case WASM_OP_I64SUB:
case WASM_OP_I64MUL:
case WASM_OP_I64DIVS:
case WASM_OP_I64DIVU:
case WASM_OP_I64REMS:
case WASM_OP_I64REMU:
case WASM_OP_I64AND:
case WASM_OP_I64OR:
case WASM_OP_I64XOR:
case WASM_OP_I64SHL:
case WASM_OP_I64SHRS:
case WASM_OP_I64SHRU:
case WASM_OP_I64ROTL:
case WASM_OP_I64ROTR:
case WASM_OP_F32ABS:
case WASM_OP_F32NEG:
case WASM_OP_F32CEIL:
case WASM_OP_F32FLOOR:
case WASM_OP_F32TRUNC:
case WASM_OP_F32NEAREST:
case WASM_OP_F32SQRT:
case WASM_OP_F32ADD:
case WASM_OP_F32SUB:
case WASM_OP_F32MUL:
case WASM_OP_F32DIV:
case WASM_OP_F32MIN:
case WASM_OP_F32MAX:
case WASM_OP_F32COPYSIGN:
case WASM_OP_F64ABS:
case WASM_OP_F64NEG:
case WASM_OP_F64CEIL:
case WASM_OP_F64FLOOR:
case WASM_OP_F64TRUNC:
case WASM_OP_F64NEAREST:
case WASM_OP_F64SQRT:
case WASM_OP_F64ADD:
case WASM_OP_F64SUB:
case WASM_OP_F64MUL:
case WASM_OP_F64DIV:
case WASM_OP_F64MIN:
case WASM_OP_F64MAX:
case WASM_OP_F64COPYSIGN:
case WASM_OP_I32WRAPI64:
case WASM_OP_I32TRUNCSF32:
case WASM_OP_I32TRUNCUF32:
case WASM_OP_I32TRUNCSF64:
case WASM_OP_I32TRUNCUF64:
case WASM_OP_I64EXTENDSI32:
case WASM_OP_I64EXTENDUI32:
case WASM_OP_I64TRUNCSF32:
case WASM_OP_I64TRUNCUF32:
case WASM_OP_I64TRUNCSF64:
case WASM_OP_I64TRUNCUF64:
case WASM_OP_F32CONVERTSI32:
case WASM_OP_F32CONVERTUI32:
case WASM_OP_F32CONVERTSI64:
case WASM_OP_F32CONVERTUI64:
case WASM_OP_F32DEMOTEF64:
case WASM_OP_F64CONVERTSI32:
case WASM_OP_F64CONVERTUI32:
case WASM_OP_F64CONVERTSI64:
case WASM_OP_F64CONVERTUI64:
case WASM_OP_F64PROMOTEF32:
case WASM_OP_I32REINTERPRETF32:
case WASM_OP_I64REINTERPRETF64:
case WASM_OP_F32REINTERPRETI32:
case WASM_OP_F64REINTERPRETI64:
case WASM_OP_END:
{
snprintf (op->txt, R_ASM_BUFSIZE, "%s", opdef->txt);
}
break;
case WASM_OP_BLOCK:
case WASM_OP_LOOP:
case WASM_OP_IF:
{
st32 val = 0;
size_t n = read_i32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
switch (0x80 - val) {
case R_BIN_WASM_VALUETYPE_EMPTY:
snprintf (op->txt, R_ASM_BUFSIZE, "%s", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_i32:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result i32)", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_i64:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result i64)", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_f32:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result f32)", opdef->txt);
break;
case R_BIN_WASM_VALUETYPE_f64:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result f64)", opdef->txt);
break;
default:
snprintf (op->txt, R_ASM_BUFSIZE, "%s (result ?)", opdef->txt);
break;
}
op->len += n;
}
break;
case WASM_OP_BR:
case WASM_OP_BRIF:
case WASM_OP_CALL:
{
ut32 val = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d", opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_BRTABLE:
{
ut32 count = 0, *table = NULL, def = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &count);
if (!(n > 0 && n < buf_len)) {
goto err;
}
if (!(table = calloc (count, sizeof (ut32)))) {
goto err;
}
int i = 0;
op->len += n;
for (i = 0; i < count; i++) {
n = read_u32_leb128 (buf + op->len, buf + buf_len, &table[i]);
if (!(op->len + n <= buf_len)) {
goto beach;
}
op->len += n;
}
n = read_u32_leb128 (buf + op->len, buf + buf_len, &def);
if (!(n > 0 && n + op->len < buf_len)) {
goto beach;
}
op->len += n;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d ", opdef->txt, count);
char *txt = op->txt;
int txtLen = strlen (op->txt);
int txtLeft = R_ASM_BUFSIZE - txtLen;
txt += txtLen;
for (i = 0; i < count && txtLen + 10 < R_ASM_BUFSIZE; i++) {
snprintf (txt, txtLeft, "%d ", table[i]);
txtLen = strlen (txt);
txt += txtLen;
txtLeft -= txtLen;
}
snprintf (txt, txtLeft - 1, "%d", def);
free (table);
break;
beach:
free (table);
goto err;
}
break;
case WASM_OP_CALLINDIRECT:
{
ut32 val = 0, reserved = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
op->len += n;
n = read_u32_leb128 (buf + op->len, buf + buf_len, &reserved);
if (!(n == 1 && op->len + n <= buf_len)) goto err;
reserved &= 0x1;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d %d", opdef->txt, val, reserved);
op->len += n;
}
break;
case WASM_OP_GETLOCAL:
case WASM_OP_SETLOCAL:
case WASM_OP_TEELOCAL:
case WASM_OP_GETGLOBAL:
case WASM_OP_SETGLOBAL:
{
ut32 val = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d", opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_I32LOAD:
case WASM_OP_I64LOAD:
case WASM_OP_F32LOAD:
case WASM_OP_F64LOAD:
case WASM_OP_I32LOAD8S:
case WASM_OP_I32LOAD8U:
case WASM_OP_I32LOAD16S:
case WASM_OP_I32LOAD16U:
case WASM_OP_I64LOAD8S:
case WASM_OP_I64LOAD8U:
case WASM_OP_I64LOAD16S:
case WASM_OP_I64LOAD16U:
case WASM_OP_I64LOAD32S:
case WASM_OP_I64LOAD32U:
case WASM_OP_I32STORE:
case WASM_OP_I64STORE:
case WASM_OP_F32STORE:
case WASM_OP_F64STORE:
case WASM_OP_I32STORE8:
case WASM_OP_I32STORE16:
case WASM_OP_I64STORE8:
case WASM_OP_I64STORE16:
case WASM_OP_I64STORE32:
{
ut32 flag = 0, offset = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &flag);
if (!(n > 0 && n < buf_len)) goto err;
op->len += n;
n = read_u32_leb128 (buf + op->len, buf + buf_len, &offset);
if (!(n > 0 && op->len + n <= buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d %d", opdef->txt, flag, offset);
op->len += n;
}
break;
case WASM_OP_CURRENTMEMORY:
case WASM_OP_GROWMEMORY:
{
ut32 reserved = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &reserved);
if (!(n == 1 && n < buf_len)) goto err;
reserved &= 0x1;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %d", opdef->txt, reserved);
op->len += n;
}
break;
case WASM_OP_I32CONST:
{
st32 val = 0;
size_t n = read_i32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" PFMT32d, opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_I64CONST:
{
st64 val = 0;
size_t n = read_i64_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" PFMT64d, opdef->txt, val);
op->len += n;
}
break;
case WASM_OP_F32CONST:
{
ut32 val = 0;
size_t n = read_u32_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
long double d = (long double)val;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" LDBLFMT, opdef->txt, d);
op->len += n;
}
break;
case WASM_OP_F64CONST:
{
ut64 val = 0;
size_t n = read_u64_leb128 (buf + 1, buf + buf_len, &val);
if (!(n > 0 && n < buf_len)) goto err;
long double d = (long double)val;
snprintf (op->txt, R_ASM_BUFSIZE, "%s %" LDBLFMT, opdef->txt, d);
op->len += n;
}
break;
default:
goto err;
}
return op->len;
err:
op->len = 1;
snprintf (op->txt, R_ASM_BUFSIZE, "invalid");
return op->len;
}
| 169,230 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: key_ref_t key_create_or_update(key_ref_t keyring_ref,
const char *type,
const char *description,
const void *payload,
size_t plen,
key_perm_t perm,
unsigned long flags)
{
struct keyring_index_key index_key = {
.description = description,
};
struct key_preparsed_payload prep;
struct assoc_array_edit *edit;
const struct cred *cred = current_cred();
struct key *keyring, *key = NULL;
key_ref_t key_ref;
int ret;
/* look up the key type to see if it's one of the registered kernel
* types */
index_key.type = key_type_lookup(type);
if (IS_ERR(index_key.type)) {
key_ref = ERR_PTR(-ENODEV);
goto error;
}
key_ref = ERR_PTR(-EINVAL);
if (!index_key.type->match || !index_key.type->instantiate ||
(!index_key.description && !index_key.type->preparse))
goto error_put_type;
keyring = key_ref_to_ptr(keyring_ref);
key_check(keyring);
key_ref = ERR_PTR(-ENOTDIR);
if (keyring->type != &key_type_keyring)
goto error_put_type;
memset(&prep, 0, sizeof(prep));
prep.data = payload;
prep.datalen = plen;
prep.quotalen = index_key.type->def_datalen;
prep.trusted = flags & KEY_ALLOC_TRUSTED;
prep.expiry = TIME_T_MAX;
if (index_key.type->preparse) {
ret = index_key.type->preparse(&prep);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_free_prep;
}
if (!index_key.description)
index_key.description = prep.description;
key_ref = ERR_PTR(-EINVAL);
if (!index_key.description)
goto error_free_prep;
}
index_key.desc_len = strlen(index_key.description);
key_ref = ERR_PTR(-EPERM);
if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags))
goto error_free_prep;
flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0;
ret = __key_link_begin(keyring, &index_key, &edit);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_free_prep;
}
/* if we're going to allocate a new key, we're going to have
* to modify the keyring */
ret = key_permission(keyring_ref, KEY_NEED_WRITE);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_link_end;
}
/* if it's possible to update this type of key, search for an existing
* key of the same type and description in the destination keyring and
* update that instead if possible
*/
if (index_key.type->update) {
key_ref = find_key_to_update(keyring_ref, &index_key);
if (key_ref)
goto found_matching_key;
}
/* if the client doesn't provide, decide on the permissions we want */
if (perm == KEY_PERM_UNDEF) {
perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
perm |= KEY_USR_VIEW;
if (index_key.type->read)
perm |= KEY_POS_READ;
if (index_key.type == &key_type_keyring ||
index_key.type->update)
perm |= KEY_POS_WRITE;
}
/* allocate a new key */
key = key_alloc(index_key.type, index_key.description,
cred->fsuid, cred->fsgid, cred, perm, flags);
if (IS_ERR(key)) {
key_ref = ERR_CAST(key);
goto error_link_end;
}
/* instantiate it and link it into the target keyring */
ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
if (ret < 0) {
key_put(key);
key_ref = ERR_PTR(ret);
goto error_link_end;
}
key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
error_link_end:
__key_link_end(keyring, &index_key, edit);
error_free_prep:
if (index_key.type->preparse)
index_key.type->free_preparse(&prep);
error_put_type:
key_type_put(index_key.type);
error:
return key_ref;
found_matching_key:
/* we found a matching key, so we're going to try to update it
* - we can drop the locks first as we have the key pinned
*/
__key_link_end(keyring, &index_key, edit);
key_ref = __key_update(key_ref, &prep);
goto error_free_prep;
}
Commit Message: KEYS: Remove key_type::match in favour of overriding default by match_preparse
A previous patch added a ->match_preparse() method to the key type. This is
allowed to override the function called by the iteration algorithm.
Therefore, we can just set a default that simply checks for an exact match of
the key description with the original criterion data and allow match_preparse
to override it as needed.
The key_type::match op is then redundant and can be removed, as can the
user_match() function.
Signed-off-by: David Howells <[email protected]>
Acked-by: Vivek Goyal <[email protected]>
CWE ID: CWE-476 | key_ref_t key_create_or_update(key_ref_t keyring_ref,
const char *type,
const char *description,
const void *payload,
size_t plen,
key_perm_t perm,
unsigned long flags)
{
struct keyring_index_key index_key = {
.description = description,
};
struct key_preparsed_payload prep;
struct assoc_array_edit *edit;
const struct cred *cred = current_cred();
struct key *keyring, *key = NULL;
key_ref_t key_ref;
int ret;
/* look up the key type to see if it's one of the registered kernel
* types */
index_key.type = key_type_lookup(type);
if (IS_ERR(index_key.type)) {
key_ref = ERR_PTR(-ENODEV);
goto error;
}
key_ref = ERR_PTR(-EINVAL);
if (!index_key.type->instantiate ||
(!index_key.description && !index_key.type->preparse))
goto error_put_type;
keyring = key_ref_to_ptr(keyring_ref);
key_check(keyring);
key_ref = ERR_PTR(-ENOTDIR);
if (keyring->type != &key_type_keyring)
goto error_put_type;
memset(&prep, 0, sizeof(prep));
prep.data = payload;
prep.datalen = plen;
prep.quotalen = index_key.type->def_datalen;
prep.trusted = flags & KEY_ALLOC_TRUSTED;
prep.expiry = TIME_T_MAX;
if (index_key.type->preparse) {
ret = index_key.type->preparse(&prep);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_free_prep;
}
if (!index_key.description)
index_key.description = prep.description;
key_ref = ERR_PTR(-EINVAL);
if (!index_key.description)
goto error_free_prep;
}
index_key.desc_len = strlen(index_key.description);
key_ref = ERR_PTR(-EPERM);
if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags))
goto error_free_prep;
flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0;
ret = __key_link_begin(keyring, &index_key, &edit);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_free_prep;
}
/* if we're going to allocate a new key, we're going to have
* to modify the keyring */
ret = key_permission(keyring_ref, KEY_NEED_WRITE);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_link_end;
}
/* if it's possible to update this type of key, search for an existing
* key of the same type and description in the destination keyring and
* update that instead if possible
*/
if (index_key.type->update) {
key_ref = find_key_to_update(keyring_ref, &index_key);
if (key_ref)
goto found_matching_key;
}
/* if the client doesn't provide, decide on the permissions we want */
if (perm == KEY_PERM_UNDEF) {
perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
perm |= KEY_USR_VIEW;
if (index_key.type->read)
perm |= KEY_POS_READ;
if (index_key.type == &key_type_keyring ||
index_key.type->update)
perm |= KEY_POS_WRITE;
}
/* allocate a new key */
key = key_alloc(index_key.type, index_key.description,
cred->fsuid, cred->fsgid, cred, perm, flags);
if (IS_ERR(key)) {
key_ref = ERR_CAST(key);
goto error_link_end;
}
/* instantiate it and link it into the target keyring */
ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
if (ret < 0) {
key_put(key);
key_ref = ERR_PTR(ret);
goto error_link_end;
}
key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
error_link_end:
__key_link_end(keyring, &index_key, edit);
error_free_prep:
if (index_key.type->preparse)
index_key.type->free_preparse(&prep);
error_put_type:
key_type_put(index_key.type);
error:
return key_ref;
found_matching_key:
/* we found a matching key, so we're going to try to update it
* - we can drop the locks first as we have the key pinned
*/
__key_link_end(keyring, &index_key, edit);
key_ref = __key_update(key_ref, &prep);
goto error_free_prep;
}
| 168,439 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: ChromeContentBrowserClient::CreateThrottlesForNavigation(
content::NavigationHandle* handle) {
std::vector<std::unique_ptr<content::NavigationThrottle>> throttles;
if (handle->IsInMainFrame()) {
throttles.push_back(
page_load_metrics::MetricsNavigationThrottle::Create(handle));
}
#if BUILDFLAG(ENABLE_PLUGINS)
std::unique_ptr<content::NavigationThrottle> flash_url_throttle =
FlashDownloadInterception::MaybeCreateThrottleFor(handle);
if (flash_url_throttle)
throttles.push_back(std::move(flash_url_throttle));
#endif
#if BUILDFLAG(ENABLE_SUPERVISED_USERS)
std::unique_ptr<content::NavigationThrottle> supervised_user_throttle =
SupervisedUserNavigationThrottle::MaybeCreateThrottleFor(handle);
if (supervised_user_throttle)
throttles.push_back(std::move(supervised_user_throttle));
#endif
#if defined(OS_ANDROID)
prerender::PrerenderContents* prerender_contents =
prerender::PrerenderContents::FromWebContents(handle->GetWebContents());
if (!prerender_contents && handle->IsInMainFrame()) {
throttles.push_back(
navigation_interception::InterceptNavigationDelegate::CreateThrottleFor(
handle));
}
throttles.push_back(InterceptOMADownloadNavigationThrottle::Create(handle));
#elif BUILDFLAG(ENABLE_EXTENSIONS)
if (handle->IsInMainFrame()) {
auto url_to_app_throttle =
PlatformAppNavigationRedirector::MaybeCreateThrottleFor(handle);
if (url_to_app_throttle)
throttles.push_back(std::move(url_to_app_throttle));
}
if (base::FeatureList::IsEnabled(features::kDesktopPWAWindowing)) {
if (base::FeatureList::IsEnabled(features::kDesktopPWAsLinkCapturing)) {
auto bookmark_app_experimental_throttle =
extensions::BookmarkAppExperimentalNavigationThrottle::
MaybeCreateThrottleFor(handle);
if (bookmark_app_experimental_throttle)
throttles.push_back(std::move(bookmark_app_experimental_throttle));
} else if (!base::FeatureList::IsEnabled(
features::kDesktopPWAsStayInWindow)) {
auto bookmark_app_throttle =
extensions::BookmarkAppNavigationThrottle::MaybeCreateThrottleFor(
handle);
if (bookmark_app_throttle)
throttles.push_back(std::move(bookmark_app_throttle));
}
}
if (base::FeatureList::IsEnabled(
features::kMimeHandlerViewInCrossProcessFrame)) {
auto plugin_frame_attach_throttle =
extensions::ExtensionsGuestViewMessageFilter::MaybeCreateThrottle(
handle);
if (plugin_frame_attach_throttle)
throttles.push_back(std::move(plugin_frame_attach_throttle));
}
#endif
#if defined(OS_CHROMEOS)
if (handle->IsInMainFrame()) {
if (merge_session_throttling_utils::ShouldAttachNavigationThrottle() &&
!merge_session_throttling_utils::AreAllSessionMergedAlready() &&
handle->GetURL().SchemeIsHTTPOrHTTPS()) {
throttles.push_back(MergeSessionNavigationThrottle::Create(handle));
}
auto url_to_apps_throttle =
chromeos::AppsNavigationThrottle::MaybeCreate(handle);
if (url_to_apps_throttle)
throttles.push_back(std::move(url_to_apps_throttle));
}
#endif
#if BUILDFLAG(ENABLE_EXTENSIONS)
throttles.push_back(
std::make_unique<extensions::ExtensionNavigationThrottle>(handle));
std::unique_ptr<content::NavigationThrottle> user_script_throttle =
extensions::ExtensionsBrowserClient::Get()
->GetUserScriptListener()
->CreateNavigationThrottle(handle);
if (user_script_throttle)
throttles.push_back(std::move(user_script_throttle));
#endif
#if BUILDFLAG(ENABLE_SUPERVISED_USERS)
std::unique_ptr<content::NavigationThrottle> supervised_user_nav_throttle =
SupervisedUserGoogleAuthNavigationThrottle::MaybeCreate(handle);
if (supervised_user_nav_throttle)
throttles.push_back(std::move(supervised_user_nav_throttle));
#endif
content::WebContents* web_contents = handle->GetWebContents();
if (auto* subresource_filter_client =
ChromeSubresourceFilterClient::FromWebContents(web_contents)) {
subresource_filter_client->MaybeAppendNavigationThrottles(handle,
&throttles);
}
#if !defined(OS_ANDROID)
std::unique_ptr<content::NavigationThrottle>
background_tab_navigation_throttle = resource_coordinator::
BackgroundTabNavigationThrottle::MaybeCreateThrottleFor(handle);
if (background_tab_navigation_throttle)
throttles.push_back(std::move(background_tab_navigation_throttle));
#endif
#if defined(SAFE_BROWSING_DB_LOCAL)
std::unique_ptr<content::NavigationThrottle>
password_protection_navigation_throttle =
safe_browsing::MaybeCreateNavigationThrottle(handle);
if (password_protection_navigation_throttle) {
throttles.push_back(std::move(password_protection_navigation_throttle));
}
#endif
std::unique_ptr<content::NavigationThrottle> pdf_iframe_throttle =
PDFIFrameNavigationThrottle::MaybeCreateThrottleFor(handle);
if (pdf_iframe_throttle)
throttles.push_back(std::move(pdf_iframe_throttle));
std::unique_ptr<content::NavigationThrottle> tab_under_throttle =
TabUnderNavigationThrottle::MaybeCreate(handle);
if (tab_under_throttle)
throttles.push_back(std::move(tab_under_throttle));
throttles.push_back(std::make_unique<PolicyBlacklistNavigationThrottle>(
handle, handle->GetWebContents()->GetBrowserContext()));
if (base::FeatureList::IsEnabled(features::kSSLCommittedInterstitials)) {
throttles.push_back(std::make_unique<SSLErrorNavigationThrottle>(
handle,
std::make_unique<CertificateReportingServiceCertReporter>(web_contents),
base::Bind(&SSLErrorHandler::HandleSSLError)));
}
std::unique_ptr<content::NavigationThrottle> https_upgrade_timing_throttle =
TypedNavigationTimingThrottle::MaybeCreateThrottleFor(handle);
if (https_upgrade_timing_throttle)
throttles.push_back(std::move(https_upgrade_timing_throttle));
#if !defined(OS_ANDROID)
std::unique_ptr<content::NavigationThrottle> devtools_throttle =
DevToolsWindow::MaybeCreateNavigationThrottle(handle);
if (devtools_throttle)
throttles.push_back(std::move(devtools_throttle));
std::unique_ptr<content::NavigationThrottle> new_tab_page_throttle =
NewTabPageNavigationThrottle::MaybeCreateThrottleFor(handle);
if (new_tab_page_throttle)
throttles.push_back(std::move(new_tab_page_throttle));
std::unique_ptr<content::NavigationThrottle>
google_password_manager_throttle =
GooglePasswordManagerNavigationThrottle::MaybeCreateThrottleFor(
handle);
if (google_password_manager_throttle)
throttles.push_back(std::move(google_password_manager_throttle));
#endif
std::unique_ptr<content::NavigationThrottle> previews_lite_page_throttle =
PreviewsLitePageDecider::MaybeCreateThrottleFor(handle);
if (previews_lite_page_throttle)
throttles.push_back(std::move(previews_lite_page_throttle));
if (base::FeatureList::IsEnabled(safe_browsing::kCommittedSBInterstitials)) {
throttles.push_back(
std::make_unique<safe_browsing::SafeBrowsingNavigationThrottle>(
handle));
}
#if defined(OS_WIN) || defined(OS_MACOSX) || \
(defined(OS_LINUX) && !defined(OS_CHROMEOS))
std::unique_ptr<content::NavigationThrottle> browser_switcher_throttle =
browser_switcher::BrowserSwitcherNavigationThrottle ::
MaybeCreateThrottleFor(handle);
if (browser_switcher_throttle)
throttles.push_back(std::move(browser_switcher_throttle));
#endif
return throttles;
}
Commit Message: [GuestView] - Introduce MimeHandlerViewAttachHelper
This CL is for the most part a mechanical change which extracts almost
all the frame-based MimeHandlerView code out of
ExtensionsGuestViewMessageFilter. This change both removes the current
clutter form EGVMF as well as fixesa race introduced when the
frame-based logic was added to EGVMF. The reason for the race was that
EGVMF is destroyed on IO thread but all the access to it (for
frame-based MHV) are from UI.
[email protected],[email protected]
Bug: 659750, 896679, 911161, 918861
Change-Id: I6474b870e4d56daa68be03637bb633665d9f9dda
Reviewed-on: https://chromium-review.googlesource.com/c/1401451
Commit-Queue: Ehsan Karamad <[email protected]>
Reviewed-by: James MacLean <[email protected]>
Reviewed-by: Ehsan Karamad <[email protected]>
Cr-Commit-Position: refs/heads/master@{#621155}
CWE ID: CWE-362 | ChromeContentBrowserClient::CreateThrottlesForNavigation(
content::NavigationHandle* handle) {
std::vector<std::unique_ptr<content::NavigationThrottle>> throttles;
if (handle->IsInMainFrame()) {
throttles.push_back(
page_load_metrics::MetricsNavigationThrottle::Create(handle));
}
#if BUILDFLAG(ENABLE_PLUGINS)
std::unique_ptr<content::NavigationThrottle> flash_url_throttle =
FlashDownloadInterception::MaybeCreateThrottleFor(handle);
if (flash_url_throttle)
throttles.push_back(std::move(flash_url_throttle));
#endif
#if BUILDFLAG(ENABLE_SUPERVISED_USERS)
std::unique_ptr<content::NavigationThrottle> supervised_user_throttle =
SupervisedUserNavigationThrottle::MaybeCreateThrottleFor(handle);
if (supervised_user_throttle)
throttles.push_back(std::move(supervised_user_throttle));
#endif
#if defined(OS_ANDROID)
prerender::PrerenderContents* prerender_contents =
prerender::PrerenderContents::FromWebContents(handle->GetWebContents());
if (!prerender_contents && handle->IsInMainFrame()) {
throttles.push_back(
navigation_interception::InterceptNavigationDelegate::CreateThrottleFor(
handle));
}
throttles.push_back(InterceptOMADownloadNavigationThrottle::Create(handle));
#elif BUILDFLAG(ENABLE_EXTENSIONS)
if (handle->IsInMainFrame()) {
auto url_to_app_throttle =
PlatformAppNavigationRedirector::MaybeCreateThrottleFor(handle);
if (url_to_app_throttle)
throttles.push_back(std::move(url_to_app_throttle));
}
if (base::FeatureList::IsEnabled(features::kDesktopPWAWindowing)) {
if (base::FeatureList::IsEnabled(features::kDesktopPWAsLinkCapturing)) {
auto bookmark_app_experimental_throttle =
extensions::BookmarkAppExperimentalNavigationThrottle::
MaybeCreateThrottleFor(handle);
if (bookmark_app_experimental_throttle)
throttles.push_back(std::move(bookmark_app_experimental_throttle));
} else if (!base::FeatureList::IsEnabled(
features::kDesktopPWAsStayInWindow)) {
auto bookmark_app_throttle =
extensions::BookmarkAppNavigationThrottle::MaybeCreateThrottleFor(
handle);
if (bookmark_app_throttle)
throttles.push_back(std::move(bookmark_app_throttle));
}
}
if (base::FeatureList::IsEnabled(
features::kMimeHandlerViewInCrossProcessFrame)) {
auto plugin_frame_attach_throttle =
extensions::MimeHandlerViewAttachHelper::MaybeCreateThrottle(handle);
if (plugin_frame_attach_throttle)
throttles.push_back(std::move(plugin_frame_attach_throttle));
}
#endif
#if defined(OS_CHROMEOS)
if (handle->IsInMainFrame()) {
if (merge_session_throttling_utils::ShouldAttachNavigationThrottle() &&
!merge_session_throttling_utils::AreAllSessionMergedAlready() &&
handle->GetURL().SchemeIsHTTPOrHTTPS()) {
throttles.push_back(MergeSessionNavigationThrottle::Create(handle));
}
auto url_to_apps_throttle =
chromeos::AppsNavigationThrottle::MaybeCreate(handle);
if (url_to_apps_throttle)
throttles.push_back(std::move(url_to_apps_throttle));
}
#endif
#if BUILDFLAG(ENABLE_EXTENSIONS)
throttles.push_back(
std::make_unique<extensions::ExtensionNavigationThrottle>(handle));
std::unique_ptr<content::NavigationThrottle> user_script_throttle =
extensions::ExtensionsBrowserClient::Get()
->GetUserScriptListener()
->CreateNavigationThrottle(handle);
if (user_script_throttle)
throttles.push_back(std::move(user_script_throttle));
#endif
#if BUILDFLAG(ENABLE_SUPERVISED_USERS)
std::unique_ptr<content::NavigationThrottle> supervised_user_nav_throttle =
SupervisedUserGoogleAuthNavigationThrottle::MaybeCreate(handle);
if (supervised_user_nav_throttle)
throttles.push_back(std::move(supervised_user_nav_throttle));
#endif
content::WebContents* web_contents = handle->GetWebContents();
if (auto* subresource_filter_client =
ChromeSubresourceFilterClient::FromWebContents(web_contents)) {
subresource_filter_client->MaybeAppendNavigationThrottles(handle,
&throttles);
}
#if !defined(OS_ANDROID)
std::unique_ptr<content::NavigationThrottle>
background_tab_navigation_throttle = resource_coordinator::
BackgroundTabNavigationThrottle::MaybeCreateThrottleFor(handle);
if (background_tab_navigation_throttle)
throttles.push_back(std::move(background_tab_navigation_throttle));
#endif
#if defined(SAFE_BROWSING_DB_LOCAL)
std::unique_ptr<content::NavigationThrottle>
password_protection_navigation_throttle =
safe_browsing::MaybeCreateNavigationThrottle(handle);
if (password_protection_navigation_throttle) {
throttles.push_back(std::move(password_protection_navigation_throttle));
}
#endif
std::unique_ptr<content::NavigationThrottle> pdf_iframe_throttle =
PDFIFrameNavigationThrottle::MaybeCreateThrottleFor(handle);
if (pdf_iframe_throttle)
throttles.push_back(std::move(pdf_iframe_throttle));
std::unique_ptr<content::NavigationThrottle> tab_under_throttle =
TabUnderNavigationThrottle::MaybeCreate(handle);
if (tab_under_throttle)
throttles.push_back(std::move(tab_under_throttle));
throttles.push_back(std::make_unique<PolicyBlacklistNavigationThrottle>(
handle, handle->GetWebContents()->GetBrowserContext()));
if (base::FeatureList::IsEnabled(features::kSSLCommittedInterstitials)) {
throttles.push_back(std::make_unique<SSLErrorNavigationThrottle>(
handle,
std::make_unique<CertificateReportingServiceCertReporter>(web_contents),
base::Bind(&SSLErrorHandler::HandleSSLError)));
}
std::unique_ptr<content::NavigationThrottle> https_upgrade_timing_throttle =
TypedNavigationTimingThrottle::MaybeCreateThrottleFor(handle);
if (https_upgrade_timing_throttle)
throttles.push_back(std::move(https_upgrade_timing_throttle));
#if !defined(OS_ANDROID)
std::unique_ptr<content::NavigationThrottle> devtools_throttle =
DevToolsWindow::MaybeCreateNavigationThrottle(handle);
if (devtools_throttle)
throttles.push_back(std::move(devtools_throttle));
std::unique_ptr<content::NavigationThrottle> new_tab_page_throttle =
NewTabPageNavigationThrottle::MaybeCreateThrottleFor(handle);
if (new_tab_page_throttle)
throttles.push_back(std::move(new_tab_page_throttle));
std::unique_ptr<content::NavigationThrottle>
google_password_manager_throttle =
GooglePasswordManagerNavigationThrottle::MaybeCreateThrottleFor(
handle);
if (google_password_manager_throttle)
throttles.push_back(std::move(google_password_manager_throttle));
#endif
std::unique_ptr<content::NavigationThrottle> previews_lite_page_throttle =
PreviewsLitePageDecider::MaybeCreateThrottleFor(handle);
if (previews_lite_page_throttle)
throttles.push_back(std::move(previews_lite_page_throttle));
if (base::FeatureList::IsEnabled(safe_browsing::kCommittedSBInterstitials)) {
throttles.push_back(
std::make_unique<safe_browsing::SafeBrowsingNavigationThrottle>(
handle));
}
#if defined(OS_WIN) || defined(OS_MACOSX) || \
(defined(OS_LINUX) && !defined(OS_CHROMEOS))
std::unique_ptr<content::NavigationThrottle> browser_switcher_throttle =
browser_switcher::BrowserSwitcherNavigationThrottle ::
MaybeCreateThrottleFor(handle);
if (browser_switcher_throttle)
throttles.push_back(std::move(browser_switcher_throttle));
#endif
return throttles;
}
| 173,035 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void PrintWebViewHelper::OnPrintPreview(const base::DictionaryValue& settings) {
print_preview_context_.OnPrintPreview();
UMA_HISTOGRAM_ENUMERATION("PrintPreview.PreviewEvent",
PREVIEW_EVENT_REQUESTED, PREVIEW_EVENT_MAX);
if (!print_preview_context_.source_frame()) {
DidFinishPrinting(FAIL_PREVIEW);
return;
}
if (!UpdatePrintSettings(print_preview_context_.source_frame(),
print_preview_context_.source_node(), settings)) {
if (print_preview_context_.last_error() != PREVIEW_ERROR_BAD_SETTING) {
Send(new PrintHostMsg_PrintPreviewInvalidPrinterSettings(
routing_id(), print_pages_params_
? print_pages_params_->params.document_cookie
: 0));
notify_browser_of_print_failure_ = false; // Already sent.
}
DidFinishPrinting(FAIL_PREVIEW);
return;
}
if (print_pages_params_->params.is_first_request &&
!print_preview_context_.IsModifiable()) {
PrintHostMsg_SetOptionsFromDocument_Params options;
if (SetOptionsFromPdfDocument(&options))
Send(new PrintHostMsg_SetOptionsFromDocument(routing_id(), options));
}
is_print_ready_metafile_sent_ = false;
print_pages_params_->params.supports_alpha_blend = true;
bool generate_draft_pages = false;
if (!settings.GetBoolean(kSettingGenerateDraftData, &generate_draft_pages)) {
NOTREACHED();
}
print_preview_context_.set_generate_draft_pages(generate_draft_pages);
PrepareFrameForPreviewDocument();
}
Commit Message: Crash on nested IPC handlers in PrintWebViewHelper
Class is not designed to handle nested IPC. Regular flows also does not
expect them. Still during printing of plugging them may show message
boxes and start nested message loops.
For now we are going just crash. If stats show us that this case is
frequent we will have to do something more complicated.
BUG=502562
Review URL: https://codereview.chromium.org/1228693002
Cr-Commit-Position: refs/heads/master@{#338100}
CWE ID: | void PrintWebViewHelper::OnPrintPreview(const base::DictionaryValue& settings) {
CHECK_LE(ipc_nesting_level_, 1);
print_preview_context_.OnPrintPreview();
UMA_HISTOGRAM_ENUMERATION("PrintPreview.PreviewEvent",
PREVIEW_EVENT_REQUESTED, PREVIEW_EVENT_MAX);
if (!print_preview_context_.source_frame()) {
DidFinishPrinting(FAIL_PREVIEW);
return;
}
if (!UpdatePrintSettings(print_preview_context_.source_frame(),
print_preview_context_.source_node(), settings)) {
if (print_preview_context_.last_error() != PREVIEW_ERROR_BAD_SETTING) {
Send(new PrintHostMsg_PrintPreviewInvalidPrinterSettings(
routing_id(), print_pages_params_
? print_pages_params_->params.document_cookie
: 0));
notify_browser_of_print_failure_ = false; // Already sent.
}
DidFinishPrinting(FAIL_PREVIEW);
return;
}
if (print_pages_params_->params.is_first_request &&
!print_preview_context_.IsModifiable()) {
PrintHostMsg_SetOptionsFromDocument_Params options;
if (SetOptionsFromPdfDocument(&options))
Send(new PrintHostMsg_SetOptionsFromDocument(routing_id(), options));
}
is_print_ready_metafile_sent_ = false;
print_pages_params_->params.supports_alpha_blend = true;
bool generate_draft_pages = false;
if (!settings.GetBoolean(kSettingGenerateDraftData, &generate_draft_pages)) {
NOTREACHED();
}
print_preview_context_.set_generate_draft_pages(generate_draft_pages);
PrepareFrameForPreviewDocument();
}
| 171,876 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void SoftHEVC::onQueueFilled(OMX_U32 portIndex) {
UNUSED(portIndex);
if (mSignalledError) {
return;
}
if (mOutputPortSettingsChange != NONE) {
return;
}
if (NULL == mCodecCtx) {
if (OK != initDecoder()) {
return;
}
}
if (outputBufferWidth() != mStride) {
/* Set the run-time (dynamic) parameters */
mStride = outputBufferWidth();
setParams(mStride);
}
List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex);
List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex);
/* If input EOS is seen and decoder is not in flush mode,
* set the decoder in flush mode.
* There can be a case where EOS is sent along with last picture data
* In that case, only after decoding that input data, decoder has to be
* put in flush. This case is handled here */
if (mReceivedEOS && !mIsInFlush) {
setFlushMode();
}
while (!outQueue.empty()) {
BufferInfo *inInfo;
OMX_BUFFERHEADERTYPE *inHeader;
BufferInfo *outInfo;
OMX_BUFFERHEADERTYPE *outHeader;
size_t timeStampIx;
inInfo = NULL;
inHeader = NULL;
if (!mIsInFlush) {
if (!inQueue.empty()) {
inInfo = *inQueue.begin();
inHeader = inInfo->mHeader;
} else {
break;
}
}
outInfo = *outQueue.begin();
outHeader = outInfo->mHeader;
outHeader->nFlags = 0;
outHeader->nTimeStamp = 0;
outHeader->nOffset = 0;
if (inHeader != NULL && (inHeader->nFlags & OMX_BUFFERFLAG_EOS)) {
mReceivedEOS = true;
if (inHeader->nFilledLen == 0) {
inQueue.erase(inQueue.begin());
inInfo->mOwnedByUs = false;
notifyEmptyBufferDone(inHeader);
inHeader = NULL;
setFlushMode();
}
}
/* Get a free slot in timestamp array to hold input timestamp */
{
size_t i;
timeStampIx = 0;
for (i = 0; i < MAX_TIME_STAMPS; i++) {
if (!mTimeStampsValid[i]) {
timeStampIx = i;
break;
}
}
if (inHeader != NULL) {
mTimeStampsValid[timeStampIx] = true;
mTimeStamps[timeStampIx] = inHeader->nTimeStamp;
}
}
{
ivd_video_decode_ip_t s_dec_ip;
ivd_video_decode_op_t s_dec_op;
WORD32 timeDelay, timeTaken;
size_t sizeY, sizeUV;
if (!setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx)) {
ALOGE("Decoder arg setup failed");
notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
mSignalledError = true;
return;
}
GETTIME(&mTimeStart, NULL);
/* Compute time elapsed between end of previous decode()
* to start of current decode() */
TIME_DIFF(mTimeEnd, mTimeStart, timeDelay);
IV_API_CALL_STATUS_T status;
status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
bool resChanged = (IVD_RES_CHANGED == (s_dec_op.u4_error_code & 0xFF));
GETTIME(&mTimeEnd, NULL);
/* Compute time taken for decode() */
TIME_DIFF(mTimeStart, mTimeEnd, timeTaken);
ALOGV("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay,
s_dec_op.u4_num_bytes_consumed);
if (s_dec_op.u4_frame_decoded_flag && !mFlushNeeded) {
mFlushNeeded = true;
}
if ((inHeader != NULL) && (1 != s_dec_op.u4_frame_decoded_flag)) {
/* If the input did not contain picture data, then ignore
* the associated timestamp */
mTimeStampsValid[timeStampIx] = false;
}
if (mChangingResolution && !s_dec_op.u4_output_present) {
mChangingResolution = false;
resetDecoder();
resetPlugin();
continue;
}
if (resChanged) {
mChangingResolution = true;
if (mFlushNeeded) {
setFlushMode();
}
continue;
}
if ((0 < s_dec_op.u4_pic_wd) && (0 < s_dec_op.u4_pic_ht)) {
uint32_t width = s_dec_op.u4_pic_wd;
uint32_t height = s_dec_op.u4_pic_ht;
bool portWillReset = false;
handlePortSettingsChange(&portWillReset, width, height);
if (portWillReset) {
resetDecoder();
return;
}
}
if (s_dec_op.u4_output_present) {
outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
outHeader->nTimeStamp = mTimeStamps[s_dec_op.u4_ts];
mTimeStampsValid[s_dec_op.u4_ts] = false;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
notifyFillBufferDone(outHeader);
outHeader = NULL;
} else {
/* If in flush mode and no output is returned by the codec,
* then come out of flush mode */
mIsInFlush = false;
/* If EOS was recieved on input port and there is no output
* from the codec, then signal EOS on output port */
if (mReceivedEOS) {
outHeader->nFilledLen = 0;
outHeader->nFlags |= OMX_BUFFERFLAG_EOS;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
notifyFillBufferDone(outHeader);
outHeader = NULL;
resetPlugin();
}
}
}
if (inHeader != NULL) {
inInfo->mOwnedByUs = false;
inQueue.erase(inQueue.begin());
inInfo = NULL;
notifyEmptyBufferDone(inHeader);
inHeader = NULL;
}
}
}
Commit Message: SoftHEVC: Exit gracefully in case of decoder errors
Exit for error in allocation and unsupported resolutions
Bug: 28816956
Change-Id: Ieb830bedeb3a7431d1d21a024927df630f7eda1e
CWE ID: CWE-172 | void SoftHEVC::onQueueFilled(OMX_U32 portIndex) {
UNUSED(portIndex);
if (mSignalledError) {
return;
}
if (mOutputPortSettingsChange != NONE) {
return;
}
if (NULL == mCodecCtx) {
if (OK != initDecoder()) {
ALOGE("Failed to initialize decoder");
notify(OMX_EventError, OMX_ErrorUnsupportedSetting, 0, NULL);
mSignalledError = true;
return;
}
}
if (outputBufferWidth() != mStride) {
/* Set the run-time (dynamic) parameters */
mStride = outputBufferWidth();
setParams(mStride);
}
List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex);
List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex);
/* If input EOS is seen and decoder is not in flush mode,
* set the decoder in flush mode.
* There can be a case where EOS is sent along with last picture data
* In that case, only after decoding that input data, decoder has to be
* put in flush. This case is handled here */
if (mReceivedEOS && !mIsInFlush) {
setFlushMode();
}
while (!outQueue.empty()) {
BufferInfo *inInfo;
OMX_BUFFERHEADERTYPE *inHeader;
BufferInfo *outInfo;
OMX_BUFFERHEADERTYPE *outHeader;
size_t timeStampIx;
inInfo = NULL;
inHeader = NULL;
if (!mIsInFlush) {
if (!inQueue.empty()) {
inInfo = *inQueue.begin();
inHeader = inInfo->mHeader;
} else {
break;
}
}
outInfo = *outQueue.begin();
outHeader = outInfo->mHeader;
outHeader->nFlags = 0;
outHeader->nTimeStamp = 0;
outHeader->nOffset = 0;
if (inHeader != NULL && (inHeader->nFlags & OMX_BUFFERFLAG_EOS)) {
mReceivedEOS = true;
if (inHeader->nFilledLen == 0) {
inQueue.erase(inQueue.begin());
inInfo->mOwnedByUs = false;
notifyEmptyBufferDone(inHeader);
inHeader = NULL;
setFlushMode();
}
}
/* Get a free slot in timestamp array to hold input timestamp */
{
size_t i;
timeStampIx = 0;
for (i = 0; i < MAX_TIME_STAMPS; i++) {
if (!mTimeStampsValid[i]) {
timeStampIx = i;
break;
}
}
if (inHeader != NULL) {
mTimeStampsValid[timeStampIx] = true;
mTimeStamps[timeStampIx] = inHeader->nTimeStamp;
}
}
{
ivd_video_decode_ip_t s_dec_ip;
ivd_video_decode_op_t s_dec_op;
WORD32 timeDelay, timeTaken;
size_t sizeY, sizeUV;
if (!setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx)) {
ALOGE("Decoder arg setup failed");
notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
mSignalledError = true;
return;
}
GETTIME(&mTimeStart, NULL);
/* Compute time elapsed between end of previous decode()
* to start of current decode() */
TIME_DIFF(mTimeEnd, mTimeStart, timeDelay);
IV_API_CALL_STATUS_T status;
status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
bool unsupportedResolution =
(IVD_STREAM_WIDTH_HEIGHT_NOT_SUPPORTED == (s_dec_op.u4_error_code & 0xFF));
/* Check for unsupported dimensions */
if (unsupportedResolution) {
ALOGE("Unsupported resolution : %dx%d", mWidth, mHeight);
notify(OMX_EventError, OMX_ErrorUnsupportedSetting, 0, NULL);
mSignalledError = true;
return;
}
bool allocationFailed = (IVD_MEM_ALLOC_FAILED == (s_dec_op.u4_error_code & 0xFF));
if (allocationFailed) {
ALOGE("Allocation failure in decoder");
notify(OMX_EventError, OMX_ErrorUnsupportedSetting, 0, NULL);
mSignalledError = true;
return;
}
bool resChanged = (IVD_RES_CHANGED == (s_dec_op.u4_error_code & 0xFF));
GETTIME(&mTimeEnd, NULL);
/* Compute time taken for decode() */
TIME_DIFF(mTimeStart, mTimeEnd, timeTaken);
ALOGV("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay,
s_dec_op.u4_num_bytes_consumed);
if (s_dec_op.u4_frame_decoded_flag && !mFlushNeeded) {
mFlushNeeded = true;
}
if ((inHeader != NULL) && (1 != s_dec_op.u4_frame_decoded_flag)) {
/* If the input did not contain picture data, then ignore
* the associated timestamp */
mTimeStampsValid[timeStampIx] = false;
}
if (mChangingResolution && !s_dec_op.u4_output_present) {
mChangingResolution = false;
resetDecoder();
resetPlugin();
continue;
}
if (resChanged) {
mChangingResolution = true;
if (mFlushNeeded) {
setFlushMode();
}
continue;
}
if ((0 < s_dec_op.u4_pic_wd) && (0 < s_dec_op.u4_pic_ht)) {
uint32_t width = s_dec_op.u4_pic_wd;
uint32_t height = s_dec_op.u4_pic_ht;
bool portWillReset = false;
handlePortSettingsChange(&portWillReset, width, height);
if (portWillReset) {
resetDecoder();
return;
}
}
if (s_dec_op.u4_output_present) {
outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
outHeader->nTimeStamp = mTimeStamps[s_dec_op.u4_ts];
mTimeStampsValid[s_dec_op.u4_ts] = false;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
notifyFillBufferDone(outHeader);
outHeader = NULL;
} else {
/* If in flush mode and no output is returned by the codec,
* then come out of flush mode */
mIsInFlush = false;
/* If EOS was recieved on input port and there is no output
* from the codec, then signal EOS on output port */
if (mReceivedEOS) {
outHeader->nFilledLen = 0;
outHeader->nFlags |= OMX_BUFFERFLAG_EOS;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
notifyFillBufferDone(outHeader);
outHeader = NULL;
resetPlugin();
}
}
}
if (inHeader != NULL) {
inInfo->mOwnedByUs = false;
inQueue.erase(inQueue.begin());
inInfo = NULL;
notifyEmptyBufferDone(inHeader);
inHeader = NULL;
}
}
}
| 173,517 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
struct snd_ctl_tlv __user *_tlv,
int op_flag)
{
struct snd_card *card = file->card;
struct snd_ctl_tlv tlv;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int len;
int err = 0;
if (copy_from_user(&tlv, _tlv, sizeof(tlv)))
return -EFAULT;
if (tlv.length < sizeof(unsigned int) * 2)
return -EINVAL;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_numid(card, tlv.numid);
if (kctl == NULL) {
err = -ENOENT;
goto __kctl_end;
}
if (kctl->tlv.p == NULL) {
err = -ENXIO;
goto __kctl_end;
}
vd = &kctl->vd[tlv.numid - kctl->id.numid];
if ((op_flag == 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ) == 0) ||
(op_flag > 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) == 0) ||
(op_flag < 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND) == 0)) {
err = -ENXIO;
goto __kctl_end;
}
if (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
if (vd->owner != NULL && vd->owner != file) {
err = -EPERM;
goto __kctl_end;
}
err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
if (err > 0) {
up_read(&card->controls_rwsem);
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id);
return 0;
}
} else {
if (op_flag) {
err = -ENXIO;
goto __kctl_end;
}
len = kctl->tlv.p[1] + 2 * sizeof(unsigned int);
if (tlv.length < len) {
err = -ENOMEM;
goto __kctl_end;
}
if (copy_to_user(_tlv->tlv, kctl->tlv.p, len))
err = -EFAULT;
}
__kctl_end:
up_read(&card->controls_rwsem);
return err;
}
Commit Message: ALSA: control: Don't access controls outside of protected regions
A control that is visible on the card->controls list can be freed at any time.
This means we must not access any of its memory while not holding the
controls_rw_lock. Otherwise we risk a use after free access.
Signed-off-by: Lars-Peter Clausen <[email protected]>
Acked-by: Jaroslav Kysela <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
CWE ID: | static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
struct snd_ctl_tlv __user *_tlv,
int op_flag)
{
struct snd_card *card = file->card;
struct snd_ctl_tlv tlv;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int len;
int err = 0;
if (copy_from_user(&tlv, _tlv, sizeof(tlv)))
return -EFAULT;
if (tlv.length < sizeof(unsigned int) * 2)
return -EINVAL;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_numid(card, tlv.numid);
if (kctl == NULL) {
err = -ENOENT;
goto __kctl_end;
}
if (kctl->tlv.p == NULL) {
err = -ENXIO;
goto __kctl_end;
}
vd = &kctl->vd[tlv.numid - kctl->id.numid];
if ((op_flag == 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ) == 0) ||
(op_flag > 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) == 0) ||
(op_flag < 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND) == 0)) {
err = -ENXIO;
goto __kctl_end;
}
if (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
if (vd->owner != NULL && vd->owner != file) {
err = -EPERM;
goto __kctl_end;
}
err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
if (err > 0) {
struct snd_ctl_elem_id id = kctl->id;
up_read(&card->controls_rwsem);
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
return 0;
}
} else {
if (op_flag) {
err = -ENXIO;
goto __kctl_end;
}
len = kctl->tlv.p[1] + 2 * sizeof(unsigned int);
if (tlv.length < len) {
err = -ENOMEM;
goto __kctl_end;
}
if (copy_to_user(_tlv->tlv, kctl->tlv.p, len))
err = -EFAULT;
}
__kctl_end:
up_read(&card->controls_rwsem);
return err;
}
| 166,295 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int _server_handle_vCont(libgdbr_t *g, int (*cmd_cb) (void*, const char*, char*, size_t), void *core_ptr) {
char *action = NULL;
if (send_ack (g) < 0) {
return -1;
}
g->data[g->data_len] = '\0';
if (g->data[5] == '?') {
return send_msg (g, "vCont;c;s");
}
if (!(action = strtok (g->data, ";"))) {
return send_msg (g, "E01");
}
while (action = strtok (NULL, ";")) {
eprintf ("action: %s\n", action);
switch (action[0]) {
case 's':
if (cmd_cb (core_ptr, "ds", NULL, 0) < 0) {
send_msg (g, "E01");
return -1;
}
return send_msg (g, "OK");
case 'c':
if (cmd_cb (core_ptr, "dc", NULL, 0) < 0) {
send_msg (g, "E01");
return -1;
}
return send_msg (g, "OK");
default:
return send_msg (g, "E01");
}
}
}
Commit Message: Fix ext2 buffer overflow in r2_sbu_grub_memmove
CWE ID: CWE-787 | static int _server_handle_vCont(libgdbr_t *g, int (*cmd_cb) (void*, const char*, char*, size_t), void *core_ptr) {
char *action = NULL;
if (send_ack (g) < 0) {
return -1;
}
g->data[g->data_len] = '\0';
if (g->data[5] == '?') {
return send_msg (g, "vCont;c;s");
}
if (!(action = strtok (g->data, ";"))) {
return send_msg (g, "E01");
}
while (action = strtok (NULL, ";")) {
eprintf ("action: %s\n", action);
switch (action[0]) {
case 's':
if (cmd_cb (core_ptr, "ds", NULL, 0) < 0) {
send_msg (g, "E01");
return -1;
}
return send_msg (g, "OK");
case 'c':
if (cmd_cb (core_ptr, "dc", NULL, 0) < 0) {
send_msg (g, "E01");
return -1;
}
return send_msg (g, "OK");
default:
return send_msg (g, "E01");
}
}
return -1;
}
| 168,081 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: header_seek (SF_PRIVATE *psf, sf_count_t position, int whence)
{
switch (whence)
{ case SEEK_SET :
if (position > SIGNED_SIZEOF (psf->header))
{ /* Too much header to cache so just seek instead. */
psf_fseek (psf, position, whence) ;
return ;
} ;
if (position > psf->headend)
psf->headend += psf_fread (psf->header + psf->headend, 1, position - psf->headend, psf) ;
psf->headindex = position ;
break ;
case SEEK_CUR :
if (psf->headindex + position < 0)
break ;
if (psf->headindex >= SIGNED_SIZEOF (psf->header))
{ psf_fseek (psf, position, whence) ;
return ;
} ;
if (psf->headindex + position <= psf->headend)
{ psf->headindex += position ;
break ;
} ;
if (psf->headindex + position > SIGNED_SIZEOF (psf->header))
{ /* Need to jump this without caching it. */
psf->headindex = psf->headend ;
psf_fseek (psf, position, SEEK_CUR) ;
break ;
} ;
psf->headend += psf_fread (psf->header + psf->headend, 1, position - (psf->headend - psf->headindex), psf) ;
psf->headindex = psf->headend ;
break ;
case SEEK_END :
default :
psf_log_printf (psf, "Bad whence param in header_seek().\n") ;
break ;
} ;
return ;
} /* header_seek */
Commit Message: src/ : Move to a variable length header buffer
Previously, the `psf->header` buffer was a fixed length specified by
`SF_HEADER_LEN` which was set to `12292`. This was problematic for
two reasons; this value was un-necessarily large for the majority
of files and too small for some others.
Now the size of the header buffer starts at 256 bytes and grows as
necessary up to a maximum of 100k.
CWE ID: CWE-119 | header_seek (SF_PRIVATE *psf, sf_count_t position, int whence)
{
switch (whence)
{ case SEEK_SET :
if (psf->header.indx + position >= psf->header.len)
psf_bump_header_allocation (psf, position) ;
if (position > psf->header.len)
{ /* Too much header to cache so just seek instead. */
psf_fseek (psf, position, whence) ;
return ;
} ;
if (position > psf->header.end)
psf->header.end += psf_fread (psf->header.ptr + psf->header.end, 1, position - psf->header.end, psf) ;
psf->header.indx = position ;
break ;
case SEEK_CUR :
if (psf->header.indx + position >= psf->header.len)
psf_bump_header_allocation (psf, position) ;
if (psf->header.indx + position < 0)
break ;
if (psf->header.indx >= psf->header.len)
{ psf_fseek (psf, position, whence) ;
return ;
} ;
if (psf->header.indx + position <= psf->header.end)
{ psf->header.indx += position ;
break ;
} ;
if (psf->header.indx + position > psf->header.len)
{ /* Need to jump this without caching it. */
psf->header.indx = psf->header.end ;
psf_fseek (psf, position, SEEK_CUR) ;
break ;
} ;
psf->header.end += psf_fread (psf->header.ptr + psf->header.end, 1, position - (psf->header.end - psf->header.indx), psf) ;
psf->header.indx = psf->header.end ;
break ;
case SEEK_END :
default :
psf_log_printf (psf, "Bad whence param in header_seek().\n") ;
break ;
} ;
return ;
} /* header_seek */
| 170,062 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: FLAC__bool read_metadata_vorbiscomment_(FLAC__StreamDecoder *decoder, FLAC__StreamMetadata_VorbisComment *obj, unsigned length)
{
FLAC__uint32 i;
FLAC__ASSERT(FLAC__bitreader_is_consumed_byte_aligned(decoder->private_->input));
/* read vendor string */
if (length >= 8) {
length -= 8; /* vendor string length + num comments entries alone take 8 bytes */
FLAC__ASSERT(FLAC__STREAM_METADATA_VORBIS_COMMENT_ENTRY_LENGTH_LEN == 32);
if (!FLAC__bitreader_read_uint32_little_endian(decoder->private_->input, &obj->vendor_string.length))
return false; /* read_callback_ sets the state for us */
if (obj->vendor_string.length > 0) {
if (length < obj->vendor_string.length) {
obj->vendor_string.length = 0;
obj->vendor_string.entry = 0;
goto skip;
}
else
length -= obj->vendor_string.length;
if (0 == (obj->vendor_string.entry = safe_malloc_add_2op_(obj->vendor_string.length, /*+*/1))) {
decoder->protected_->state = FLAC__STREAM_DECODER_MEMORY_ALLOCATION_ERROR;
return false;
}
if (!FLAC__bitreader_read_byte_block_aligned_no_crc(decoder->private_->input, obj->vendor_string.entry, obj->vendor_string.length))
return false; /* read_callback_ sets the state for us */
obj->vendor_string.entry[obj->vendor_string.length] = '\0';
}
else
obj->vendor_string.entry = 0;
/* read num comments */
FLAC__ASSERT(FLAC__STREAM_METADATA_VORBIS_COMMENT_NUM_COMMENTS_LEN == 32);
if (!FLAC__bitreader_read_uint32_little_endian(decoder->private_->input, &obj->num_comments))
return false; /* read_callback_ sets the state for us */
/* read comments */
if (obj->num_comments > 100000) {
/* Possibly malicious file. */
obj->num_comments = 0;
return false;
}
if (obj->num_comments > 0) {
if (0 == (obj->comments = safe_malloc_mul_2op_p(obj->num_comments, /*times*/sizeof(FLAC__StreamMetadata_VorbisComment_Entry)))) {
decoder->protected_->state = FLAC__STREAM_DECODER_MEMORY_ALLOCATION_ERROR;
return false;
}
for (i = 0; i < obj->num_comments; i++) {
/* Initialize here just to make sure. */
obj->comments[i].length = 0;
obj->comments[i].entry = 0;
FLAC__ASSERT(FLAC__STREAM_METADATA_VORBIS_COMMENT_ENTRY_LENGTH_LEN == 32);
if (length < 4) {
obj->num_comments = i;
goto skip;
}
else
length -= 4;
if (!FLAC__bitreader_read_uint32_little_endian(decoder->private_->input, &obj->comments[i].length))
return false; /* read_callback_ sets the state for us */
if (obj->comments[i].length > 0) {
if (length < obj->comments[i].length) {
obj->num_comments = i;
goto skip;
}
else
length -= obj->comments[i].length;
if (0 == (obj->comments[i].entry = safe_malloc_add_2op_(obj->comments[i].length, /*+*/1))) {
decoder->protected_->state = FLAC__STREAM_DECODER_MEMORY_ALLOCATION_ERROR;
return false;
}
memset (obj->comments[i].entry, 0, obj->comments[i].length) ;
if (!FLAC__bitreader_read_byte_block_aligned_no_crc(decoder->private_->input, obj->comments[i].entry, obj->comments[i].length)) {
obj->num_comments = i;
goto skip;
}
obj->comments[i].entry[obj->comments[i].length] = '\0';
}
else
obj->comments[i].entry = 0;
}
}
else
obj->comments = 0;
}
skip:
if (length > 0) {
/* This will only happen on files with invalid data in comments */
if(!FLAC__bitreader_skip_byte_block_aligned_no_crc(decoder->private_->input, length))
return false; /* read_callback_ sets the state for us */
}
return true;
}
Commit Message: Avoid free-before-initialize vulnerability in heap
Bug: 27211885
Change-Id: Ib9c93bd9ffdde2a5f8d31a86f06e267dc9c152db
CWE ID: CWE-119 | FLAC__bool read_metadata_vorbiscomment_(FLAC__StreamDecoder *decoder, FLAC__StreamMetadata_VorbisComment *obj, unsigned length)
{
FLAC__uint32 i;
FLAC__ASSERT(FLAC__bitreader_is_consumed_byte_aligned(decoder->private_->input));
/* read vendor string */
if (length >= 8) {
length -= 8; /* vendor string length + num comments entries alone take 8 bytes */
FLAC__ASSERT(FLAC__STREAM_METADATA_VORBIS_COMMENT_ENTRY_LENGTH_LEN == 32);
if (!FLAC__bitreader_read_uint32_little_endian(decoder->private_->input, &obj->vendor_string.length))
return false; /* read_callback_ sets the state for us */
if (obj->vendor_string.length > 0) {
if (length < obj->vendor_string.length) {
obj->vendor_string.length = 0;
obj->vendor_string.entry = 0;
goto skip;
}
else
length -= obj->vendor_string.length;
if (0 == (obj->vendor_string.entry = safe_malloc_add_2op_(obj->vendor_string.length, /*+*/1))) {
decoder->protected_->state = FLAC__STREAM_DECODER_MEMORY_ALLOCATION_ERROR;
return false;
}
if (!FLAC__bitreader_read_byte_block_aligned_no_crc(decoder->private_->input, obj->vendor_string.entry, obj->vendor_string.length))
return false; /* read_callback_ sets the state for us */
obj->vendor_string.entry[obj->vendor_string.length] = '\0';
}
else
obj->vendor_string.entry = 0;
/* read num comments */
FLAC__ASSERT(FLAC__STREAM_METADATA_VORBIS_COMMENT_NUM_COMMENTS_LEN == 32);
if (!FLAC__bitreader_read_uint32_little_endian(decoder->private_->input, &obj->num_comments))
return false; /* read_callback_ sets the state for us */
/* read comments */
if (obj->num_comments > 100000) {
/* Possibly malicious file. */
obj->num_comments = 0;
return false;
}
if (obj->num_comments > 0) {
if (0 == (obj->comments = safe_malloc_mul_2op_p(obj->num_comments, /*times*/sizeof(FLAC__StreamMetadata_VorbisComment_Entry)))) {
decoder->protected_->state = FLAC__STREAM_DECODER_MEMORY_ALLOCATION_ERROR;
obj->num_comments = 0;
return false;
}
for (i = 0; i < obj->num_comments; i++) {
/* Initialize here just to make sure. */
obj->comments[i].length = 0;
obj->comments[i].entry = 0;
FLAC__ASSERT(FLAC__STREAM_METADATA_VORBIS_COMMENT_ENTRY_LENGTH_LEN == 32);
if (length < 4) {
obj->num_comments = i;
goto skip;
}
else
length -= 4;
if (!FLAC__bitreader_read_uint32_little_endian(decoder->private_->input, &obj->comments[i].length))
return false; /* read_callback_ sets the state for us */
if (obj->comments[i].length > 0) {
if (length < obj->comments[i].length) {
obj->num_comments = i;
goto skip;
}
else
length -= obj->comments[i].length;
if (0 == (obj->comments[i].entry = safe_malloc_add_2op_(obj->comments[i].length, /*+*/1))) {
decoder->protected_->state = FLAC__STREAM_DECODER_MEMORY_ALLOCATION_ERROR;
return false;
}
memset (obj->comments[i].entry, 0, obj->comments[i].length) ;
if (!FLAC__bitreader_read_byte_block_aligned_no_crc(decoder->private_->input, obj->comments[i].entry, obj->comments[i].length)) {
obj->num_comments = i;
goto skip;
}
obj->comments[i].entry[obj->comments[i].length] = '\0';
}
else
obj->comments[i].entry = 0;
}
}
else
obj->comments = 0;
}
skip:
if (length > 0) {
/* This will only happen on files with invalid data in comments */
if(!FLAC__bitreader_skip_byte_block_aligned_no_crc(decoder->private_->input, length))
return false; /* read_callback_ sets the state for us */
}
return true;
}
| 173,888 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void receive_tcppacket(connection_t *c, const char *buffer, int len) {
vpn_packet_t outpkt;
outpkt.len = len;
if(c->options & OPTION_TCPONLY)
outpkt.priority = 0;
else
outpkt.priority = -1;
memcpy(outpkt.data, buffer, len);
receive_packet(c->node, &outpkt);
}
Commit Message: Drop packets forwarded via TCP if they are too big (CVE-2013-1428).
Normally all requests sent via the meta connections are checked so that they
cannot be larger than the input buffer. However, when packets are forwarded via
meta connections, they are copied into a packet buffer without checking whether
it fits into it. Since the packet buffer is allocated on the stack, this in
effect allows an authenticated remote node to cause a stack overflow.
This issue was found by Martin Schobert.
CWE ID: CWE-119 | void receive_tcppacket(connection_t *c, const char *buffer, int len) {
vpn_packet_t outpkt;
if(len > sizeof outpkt.data)
return;
outpkt.len = len;
if(c->options & OPTION_TCPONLY)
outpkt.priority = 0;
else
outpkt.priority = -1;
memcpy(outpkt.data, buffer, len);
receive_packet(c->node, &outpkt);
}
| 166,129 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: BITMAP_UPDATE* update_read_bitmap_update(rdpUpdate* update, wStream* s)
{
UINT32 i;
BITMAP_UPDATE* bitmapUpdate = calloc(1, sizeof(BITMAP_UPDATE));
if (!bitmapUpdate)
goto fail;
if (Stream_GetRemainingLength(s) < 2)
goto fail;
Stream_Read_UINT16(s, bitmapUpdate->number); /* numberRectangles (2 bytes) */
WLog_Print(update->log, WLOG_TRACE, "BitmapUpdate: %"PRIu32"", bitmapUpdate->number);
if (bitmapUpdate->number > bitmapUpdate->count)
{
UINT16 count;
BITMAP_DATA* newdata;
count = bitmapUpdate->number * 2;
newdata = (BITMAP_DATA*) realloc(bitmapUpdate->rectangles,
sizeof(BITMAP_DATA) * count);
if (!newdata)
goto fail;
bitmapUpdate->rectangles = newdata;
ZeroMemory(&bitmapUpdate->rectangles[bitmapUpdate->count],
sizeof(BITMAP_DATA) * (count - bitmapUpdate->count));
bitmapUpdate->count = count;
}
/* rectangles */
for (i = 0; i < bitmapUpdate->number; i++)
{
if (!update_read_bitmap_data(update, s, &bitmapUpdate->rectangles[i]))
goto fail;
}
return bitmapUpdate;
fail:
free_bitmap_update(update->context, bitmapUpdate);
return NULL;
}
Commit Message: Fixed CVE-2018-8786
Thanks to Eyal Itkin from Check Point Software Technologies.
CWE ID: CWE-119 | BITMAP_UPDATE* update_read_bitmap_update(rdpUpdate* update, wStream* s)
{
UINT32 i;
BITMAP_UPDATE* bitmapUpdate = calloc(1, sizeof(BITMAP_UPDATE));
if (!bitmapUpdate)
goto fail;
if (Stream_GetRemainingLength(s) < 2)
goto fail;
Stream_Read_UINT16(s, bitmapUpdate->number); /* numberRectangles (2 bytes) */
WLog_Print(update->log, WLOG_TRACE, "BitmapUpdate: %"PRIu32"", bitmapUpdate->number);
if (bitmapUpdate->number > bitmapUpdate->count)
{
UINT32 count = bitmapUpdate->number * 2;
BITMAP_DATA* newdata = (BITMAP_DATA*) realloc(bitmapUpdate->rectangles,
sizeof(BITMAP_DATA) * count);
if (!newdata)
goto fail;
bitmapUpdate->rectangles = newdata;
ZeroMemory(&bitmapUpdate->rectangles[bitmapUpdate->count],
sizeof(BITMAP_DATA) * (count - bitmapUpdate->count));
bitmapUpdate->count = count;
}
/* rectangles */
for (i = 0; i < bitmapUpdate->number; i++)
{
if (!update_read_bitmap_data(update, s, &bitmapUpdate->rectangles[i]))
goto fail;
}
return bitmapUpdate;
fail:
free_bitmap_update(update->context, bitmapUpdate);
return NULL;
}
| 169,293 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
struct ext2_xattr_header *header)
{
struct super_block *sb = inode->i_sb;
struct buffer_head *new_bh = NULL;
int error;
if (header) {
new_bh = ext2_xattr_cache_find(inode, header);
if (new_bh) {
/* We found an identical block in the cache. */
if (new_bh == old_bh) {
ea_bdebug(new_bh, "keeping this block");
} else {
/* The old block is released after updating
the inode. */
ea_bdebug(new_bh, "reusing block");
error = dquot_alloc_block(inode, 1);
if (error) {
unlock_buffer(new_bh);
goto cleanup;
}
le32_add_cpu(&HDR(new_bh)->h_refcount, 1);
ea_bdebug(new_bh, "refcount now=%d",
le32_to_cpu(HDR(new_bh)->h_refcount));
}
unlock_buffer(new_bh);
} else if (old_bh && header == HDR(old_bh)) {
/* Keep this block. No need to lock the block as we
don't need to change the reference count. */
new_bh = old_bh;
get_bh(new_bh);
ext2_xattr_cache_insert(new_bh);
} else {
/* We need to allocate a new block */
ext2_fsblk_t goal = ext2_group_first_block_no(sb,
EXT2_I(inode)->i_block_group);
int block = ext2_new_block(inode, goal, &error);
if (error)
goto cleanup;
ea_idebug(inode, "creating block %d", block);
new_bh = sb_getblk(sb, block);
if (unlikely(!new_bh)) {
ext2_free_blocks(inode, block, 1);
mark_inode_dirty(inode);
error = -ENOMEM;
goto cleanup;
}
lock_buffer(new_bh);
memcpy(new_bh->b_data, header, new_bh->b_size);
set_buffer_uptodate(new_bh);
unlock_buffer(new_bh);
ext2_xattr_cache_insert(new_bh);
ext2_xattr_update_super_block(sb);
}
mark_buffer_dirty(new_bh);
if (IS_SYNC(inode)) {
sync_dirty_buffer(new_bh);
error = -EIO;
if (buffer_req(new_bh) && !buffer_uptodate(new_bh))
goto cleanup;
}
}
/* Update the inode. */
EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
inode->i_ctime = CURRENT_TIME_SEC;
if (IS_SYNC(inode)) {
error = sync_inode_metadata(inode, 1);
/* In case sync failed due to ENOSPC the inode was actually
* written (only some dirty data were not) so we just proceed
* as if nothing happened and cleanup the unused block */
if (error && error != -ENOSPC) {
if (new_bh && new_bh != old_bh) {
dquot_free_block_nodirty(inode, 1);
mark_inode_dirty(inode);
}
goto cleanup;
}
} else
mark_inode_dirty(inode);
error = 0;
if (old_bh && old_bh != new_bh) {
struct mb_cache_entry *ce;
/*
* If there was an old block and we are no longer using it,
* release the old block.
*/
ce = mb_cache_entry_get(ext2_xattr_cache, old_bh->b_bdev,
old_bh->b_blocknr);
lock_buffer(old_bh);
if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {
/* Free the old block. */
if (ce)
mb_cache_entry_free(ce);
ea_bdebug(old_bh, "freeing");
ext2_free_blocks(inode, old_bh->b_blocknr, 1);
mark_inode_dirty(inode);
/* We let our caller release old_bh, so we
* need to duplicate the buffer before. */
get_bh(old_bh);
bforget(old_bh);
} else {
/* Decrement the refcount only. */
le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
if (ce)
mb_cache_entry_release(ce);
dquot_free_block_nodirty(inode, 1);
mark_inode_dirty(inode);
mark_buffer_dirty(old_bh);
ea_bdebug(old_bh, "refcount now=%d",
le32_to_cpu(HDR(old_bh)->h_refcount));
}
unlock_buffer(old_bh);
}
cleanup:
brelse(new_bh);
return error;
}
Commit Message: ext2: convert to mbcache2
The conversion is generally straightforward. We convert filesystem from
a global cache to per-fs one. Similarly to ext4 the tricky part is that
xattr block corresponding to found mbcache entry can get freed before we
get buffer lock for that block. So we have to check whether the entry is
still valid after getting the buffer lock.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
CWE ID: CWE-19 | ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
struct ext2_xattr_header *header)
{
struct super_block *sb = inode->i_sb;
struct buffer_head *new_bh = NULL;
int error;
struct mb2_cache *ext2_mb_cache = EXT2_SB(sb)->s_mb_cache;
if (header) {
new_bh = ext2_xattr_cache_find(inode, header);
if (new_bh) {
/* We found an identical block in the cache. */
if (new_bh == old_bh) {
ea_bdebug(new_bh, "keeping this block");
} else {
/* The old block is released after updating
the inode. */
ea_bdebug(new_bh, "reusing block");
error = dquot_alloc_block(inode, 1);
if (error) {
unlock_buffer(new_bh);
goto cleanup;
}
le32_add_cpu(&HDR(new_bh)->h_refcount, 1);
ea_bdebug(new_bh, "refcount now=%d",
le32_to_cpu(HDR(new_bh)->h_refcount));
}
unlock_buffer(new_bh);
} else if (old_bh && header == HDR(old_bh)) {
/* Keep this block. No need to lock the block as we
don't need to change the reference count. */
new_bh = old_bh;
get_bh(new_bh);
ext2_xattr_cache_insert(ext2_mb_cache, new_bh);
} else {
/* We need to allocate a new block */
ext2_fsblk_t goal = ext2_group_first_block_no(sb,
EXT2_I(inode)->i_block_group);
int block = ext2_new_block(inode, goal, &error);
if (error)
goto cleanup;
ea_idebug(inode, "creating block %d", block);
new_bh = sb_getblk(sb, block);
if (unlikely(!new_bh)) {
ext2_free_blocks(inode, block, 1);
mark_inode_dirty(inode);
error = -ENOMEM;
goto cleanup;
}
lock_buffer(new_bh);
memcpy(new_bh->b_data, header, new_bh->b_size);
set_buffer_uptodate(new_bh);
unlock_buffer(new_bh);
ext2_xattr_cache_insert(ext2_mb_cache, new_bh);
ext2_xattr_update_super_block(sb);
}
mark_buffer_dirty(new_bh);
if (IS_SYNC(inode)) {
sync_dirty_buffer(new_bh);
error = -EIO;
if (buffer_req(new_bh) && !buffer_uptodate(new_bh))
goto cleanup;
}
}
/* Update the inode. */
EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
inode->i_ctime = CURRENT_TIME_SEC;
if (IS_SYNC(inode)) {
error = sync_inode_metadata(inode, 1);
/* In case sync failed due to ENOSPC the inode was actually
* written (only some dirty data were not) so we just proceed
* as if nothing happened and cleanup the unused block */
if (error && error != -ENOSPC) {
if (new_bh && new_bh != old_bh) {
dquot_free_block_nodirty(inode, 1);
mark_inode_dirty(inode);
}
goto cleanup;
}
} else
mark_inode_dirty(inode);
error = 0;
if (old_bh && old_bh != new_bh) {
/*
* If there was an old block and we are no longer using it,
* release the old block.
*/
lock_buffer(old_bh);
if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {
__u32 hash = le32_to_cpu(HDR(old_bh)->h_hash);
/*
* This must happen under buffer lock for
* ext2_xattr_set2() to reliably detect freed block
*/
mb2_cache_entry_delete_block(ext2_mb_cache,
hash, old_bh->b_blocknr);
/* Free the old block. */
ea_bdebug(old_bh, "freeing");
ext2_free_blocks(inode, old_bh->b_blocknr, 1);
mark_inode_dirty(inode);
/* We let our caller release old_bh, so we
* need to duplicate the buffer before. */
get_bh(old_bh);
bforget(old_bh);
} else {
/* Decrement the refcount only. */
le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
dquot_free_block_nodirty(inode, 1);
mark_inode_dirty(inode);
mark_buffer_dirty(old_bh);
ea_bdebug(old_bh, "refcount now=%d",
le32_to_cpu(HDR(old_bh)->h_refcount));
}
unlock_buffer(old_bh);
}
cleanup:
brelse(new_bh);
return error;
}
| 169,984 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void NavigatorImpl::DidNavigate(
RenderFrameHostImpl* render_frame_host,
const FrameHostMsg_DidCommitProvisionalLoad_Params& params,
std::unique_ptr<NavigationHandleImpl> navigation_handle) {
FrameTree* frame_tree = render_frame_host->frame_tree_node()->frame_tree();
bool oopifs_possible = SiteIsolationPolicy::AreCrossProcessFramesPossible();
bool is_navigation_within_page = controller_->IsURLInPageNavigation(
params.url, params.origin, params.was_within_same_document,
render_frame_host);
if (is_navigation_within_page &&
render_frame_host !=
render_frame_host->frame_tree_node()
->render_manager()
->current_frame_host()) {
bad_message::ReceivedBadMessage(render_frame_host->GetProcess(),
bad_message::NI_IN_PAGE_NAVIGATION);
is_navigation_within_page = false;
}
if (ui::PageTransitionIsMainFrame(params.transition)) {
if (delegate_) {
if (delegate_->CanOverscrollContent()) {
if (!params.was_within_same_document)
controller_->TakeScreenshot();
}
delegate_->DidNavigateMainFramePreCommit(is_navigation_within_page);
}
if (!oopifs_possible)
frame_tree->root()->render_manager()->DidNavigateFrame(
render_frame_host, params.gesture == NavigationGestureUser);
}
render_frame_host->frame_tree_node()->SetCurrentOrigin(
params.origin, params.has_potentially_trustworthy_unique_origin);
render_frame_host->frame_tree_node()->SetInsecureRequestPolicy(
params.insecure_request_policy);
if (!is_navigation_within_page) {
render_frame_host->ResetContentSecurityPolicies();
render_frame_host->frame_tree_node()->ResetCspHeaders();
render_frame_host->frame_tree_node()->ResetFeaturePolicyHeader();
}
if (oopifs_possible) {
FrameTreeNode* frame = render_frame_host->frame_tree_node();
frame->render_manager()->DidNavigateFrame(
render_frame_host, params.gesture == NavigationGestureUser);
}
SiteInstanceImpl* site_instance = render_frame_host->GetSiteInstance();
if (!site_instance->HasSite() && ShouldAssignSiteForURL(params.url) &&
!params.url_is_unreachable) {
site_instance->SetSite(params.url);
}
if (ui::PageTransitionIsMainFrame(params.transition) && delegate_)
delegate_->SetMainFrameMimeType(params.contents_mime_type);
int old_entry_count = controller_->GetEntryCount();
LoadCommittedDetails details;
bool did_navigate = controller_->RendererDidNavigate(
render_frame_host, params, &details, is_navigation_within_page,
navigation_handle.get());
if (old_entry_count != controller_->GetEntryCount() ||
details.previous_entry_index !=
controller_->GetLastCommittedEntryIndex()) {
frame_tree->root()->render_manager()->SendPageMessage(
new PageMsg_SetHistoryOffsetAndLength(
MSG_ROUTING_NONE, controller_->GetLastCommittedEntryIndex(),
controller_->GetEntryCount()),
site_instance);
}
render_frame_host->frame_tree_node()->SetCurrentURL(params.url);
render_frame_host->SetLastCommittedOrigin(params.origin);
if (!params.url_is_unreachable)
render_frame_host->set_last_successful_url(params.url);
if (did_navigate && !is_navigation_within_page)
render_frame_host->ResetFeaturePolicy();
if (details.type != NAVIGATION_TYPE_NAV_IGNORE && delegate_) {
DCHECK_EQ(!render_frame_host->GetParent(),
did_navigate ? details.is_main_frame : false);
navigation_handle->DidCommitNavigation(params, did_navigate,
details.did_replace_entry,
details.previous_url, details.type,
render_frame_host);
navigation_handle.reset();
}
if (!did_navigate)
return; // No navigation happened.
RecordNavigationMetrics(details, params, site_instance);
if (delegate_) {
if (details.is_main_frame) {
delegate_->DidNavigateMainFramePostCommit(render_frame_host,
details, params);
}
delegate_->DidNavigateAnyFramePostCommit(
render_frame_host, details, params);
}
}
Commit Message: Correctly reset FP in RFHI whenever origin changes
Bug: 713364
Change-Id: Id8bb923750e20f3db6fc9358b1d44120513ac95f
CQ_INCLUDE_TRYBOTS=master.tryserver.chromium.linux:linux_site_isolation
Change-Id: Id8bb923750e20f3db6fc9358b1d44120513ac95f
Reviewed-on: https://chromium-review.googlesource.com/482380
Commit-Queue: Ian Clelland <[email protected]>
Reviewed-by: Charles Reis <[email protected]>
Cr-Commit-Position: refs/heads/master@{#466778}
CWE ID: CWE-254 | void NavigatorImpl::DidNavigate(
RenderFrameHostImpl* render_frame_host,
const FrameHostMsg_DidCommitProvisionalLoad_Params& params,
std::unique_ptr<NavigationHandleImpl> navigation_handle) {
FrameTree* frame_tree = render_frame_host->frame_tree_node()->frame_tree();
bool oopifs_possible = SiteIsolationPolicy::AreCrossProcessFramesPossible();
bool is_navigation_within_page = controller_->IsURLInPageNavigation(
params.url, params.origin, params.was_within_same_document,
render_frame_host);
if (is_navigation_within_page &&
render_frame_host !=
render_frame_host->frame_tree_node()
->render_manager()
->current_frame_host()) {
bad_message::ReceivedBadMessage(render_frame_host->GetProcess(),
bad_message::NI_IN_PAGE_NAVIGATION);
is_navigation_within_page = false;
}
if (ui::PageTransitionIsMainFrame(params.transition)) {
if (delegate_) {
if (delegate_->CanOverscrollContent()) {
if (!params.was_within_same_document)
controller_->TakeScreenshot();
}
delegate_->DidNavigateMainFramePreCommit(is_navigation_within_page);
}
if (!oopifs_possible)
frame_tree->root()->render_manager()->DidNavigateFrame(
render_frame_host, params.gesture == NavigationGestureUser);
}
render_frame_host->frame_tree_node()->SetCurrentOrigin(
params.origin, params.has_potentially_trustworthy_unique_origin);
render_frame_host->frame_tree_node()->SetInsecureRequestPolicy(
params.insecure_request_policy);
if (!is_navigation_within_page) {
render_frame_host->ResetContentSecurityPolicies();
render_frame_host->frame_tree_node()->ResetCspHeaders();
render_frame_host->frame_tree_node()->ResetFeaturePolicyHeader();
}
if (oopifs_possible) {
FrameTreeNode* frame = render_frame_host->frame_tree_node();
frame->render_manager()->DidNavigateFrame(
render_frame_host, params.gesture == NavigationGestureUser);
}
SiteInstanceImpl* site_instance = render_frame_host->GetSiteInstance();
if (!site_instance->HasSite() && ShouldAssignSiteForURL(params.url) &&
!params.url_is_unreachable) {
site_instance->SetSite(params.url);
}
if (ui::PageTransitionIsMainFrame(params.transition) && delegate_)
delegate_->SetMainFrameMimeType(params.contents_mime_type);
int old_entry_count = controller_->GetEntryCount();
LoadCommittedDetails details;
bool did_navigate = controller_->RendererDidNavigate(
render_frame_host, params, &details, is_navigation_within_page,
navigation_handle.get());
if (old_entry_count != controller_->GetEntryCount() ||
details.previous_entry_index !=
controller_->GetLastCommittedEntryIndex()) {
frame_tree->root()->render_manager()->SendPageMessage(
new PageMsg_SetHistoryOffsetAndLength(
MSG_ROUTING_NONE, controller_->GetLastCommittedEntryIndex(),
controller_->GetEntryCount()),
site_instance);
}
render_frame_host->frame_tree_node()->SetCurrentURL(params.url);
render_frame_host->SetLastCommittedOrigin(params.origin);
if (!params.url_is_unreachable)
render_frame_host->set_last_successful_url(params.url);
if (!is_navigation_within_page)
render_frame_host->ResetFeaturePolicy();
if (details.type != NAVIGATION_TYPE_NAV_IGNORE && delegate_) {
DCHECK_EQ(!render_frame_host->GetParent(),
did_navigate ? details.is_main_frame : false);
navigation_handle->DidCommitNavigation(params, did_navigate,
details.did_replace_entry,
details.previous_url, details.type,
render_frame_host);
navigation_handle.reset();
}
if (!did_navigate)
return; // No navigation happened.
RecordNavigationMetrics(details, params, site_instance);
if (delegate_) {
if (details.is_main_frame) {
delegate_->DidNavigateMainFramePostCommit(render_frame_host,
details, params);
}
delegate_->DidNavigateAnyFramePostCommit(
render_frame_host, details, params);
}
}
| 171,963 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void add_param_to_argv(char *parsestart, int line)
{
int quote_open = 0, escaped = 0, param_len = 0;
char param_buffer[1024], *curchar;
/* After fighting with strtok enough, here's now
* a 'real' parser. According to Rusty I'm now no
} else {
param_buffer[param_len++] = *curchar;
for (curchar = parsestart; *curchar; curchar++) {
if (quote_open) {
if (escaped) {
param_buffer[param_len++] = *curchar;
escaped = 0;
continue;
} else if (*curchar == '\\') {
}
switch (*curchar) {
quote_open = 0;
*curchar = '"';
} else {
param_buffer[param_len++] = *curchar;
continue;
}
} else {
continue;
}
break;
default:
/* regular character, copy to buffer */
param_buffer[param_len++] = *curchar;
if (param_len >= sizeof(param_buffer))
xtables_error(PARAMETER_PROBLEM,
case ' ':
case '\t':
case '\n':
if (!param_len) {
/* two spaces? */
continue;
}
break;
default:
/* regular character, copy to buffer */
param_buffer[param_len++] = *curchar;
if (param_len >= sizeof(param_buffer))
xtables_error(PARAMETER_PROBLEM,
"Parameter too long!");
continue;
}
param_buffer[param_len] = '\0';
/* check if table name specified */
if ((param_buffer[0] == '-' &&
param_buffer[1] != '-' &&
strchr(param_buffer, 't')) ||
(!strncmp(param_buffer, "--t", 3) &&
!strncmp(param_buffer, "--table", strlen(param_buffer)))) {
xtables_error(PARAMETER_PROBLEM,
"The -t option (seen in line %u) cannot be used in %s.\n",
line, xt_params->program_name);
}
add_argv(param_buffer, 0);
param_len = 0;
}
Commit Message:
CWE ID: CWE-119 | void add_param_to_argv(char *parsestart, int line)
{
int quote_open = 0, escaped = 0;
struct xt_param_buf param = {};
char *curchar;
/* After fighting with strtok enough, here's now
* a 'real' parser. According to Rusty I'm now no
} else {
param_buffer[param_len++] = *curchar;
for (curchar = parsestart; *curchar; curchar++) {
if (quote_open) {
if (escaped) {
add_param(¶m, curchar);
escaped = 0;
continue;
} else if (*curchar == '\\') {
}
switch (*curchar) {
quote_open = 0;
*curchar = '"';
} else {
add_param(¶m, curchar);
continue;
}
} else {
continue;
}
break;
default:
/* regular character, copy to buffer */
param_buffer[param_len++] = *curchar;
if (param_len >= sizeof(param_buffer))
xtables_error(PARAMETER_PROBLEM,
case ' ':
case '\t':
case '\n':
if (!param.len) {
/* two spaces? */
continue;
}
break;
default:
/* regular character, copy to buffer */
add_param(¶m, curchar);
continue;
}
param.buffer[param.len] = '\0';
/* check if table name specified */
if ((param.buffer[0] == '-' &&
param.buffer[1] != '-' &&
strchr(param.buffer, 't')) ||
(!strncmp(param.buffer, "--t", 3) &&
!strncmp(param.buffer, "--table", strlen(param.buffer)))) {
xtables_error(PARAMETER_PROBLEM,
"The -t option (seen in line %u) cannot be used in %s.\n",
line, xt_params->program_name);
}
add_argv(param.buffer, 0);
param.len = 0;
}
| 164,750 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: jbig2_sd_release(Jbig2Ctx *ctx, Jbig2SymbolDict *dict)
{
int i;
if (dict == NULL)
return;
for (i = 0; i < dict->n_symbols; i++)
if (dict->glyphs[i])
jbig2_image_release(ctx, dict->glyphs[i]);
jbig2_free(ctx->allocator, dict->glyphs);
jbig2_free(ctx->allocator, dict);
}
Commit Message:
CWE ID: CWE-119 | jbig2_sd_release(Jbig2Ctx *ctx, Jbig2SymbolDict *dict)
{
uint32_t i;
if (dict == NULL)
return;
for (i = 0; i < dict->n_symbols; i++)
if (dict->glyphs[i])
jbig2_image_release(ctx, dict->glyphs[i]);
jbig2_free(ctx->allocator, dict->glyphs);
jbig2_free(ctx->allocator, dict);
}
| 165,503 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void CredentialManagerImpl::OnProvisionalSaveComplete() {
DCHECK(form_manager_);
DCHECK(client_->IsSavingAndFillingEnabledForCurrentPage());
const autofill::PasswordForm& form = form_manager_->pending_credentials();
if (form_manager_->IsPendingCredentialsPublicSuffixMatch()) {
form_manager_->Save();
return;
}
if (!form.federation_origin.unique()) {
for (auto* match : form_manager_->form_fetcher()->GetFederatedMatches()) {
if (match->username_value == form.username_value &&
match->federation_origin.IsSameOriginWith(form.federation_origin)) {
form_manager_->Update(*match);
return;
}
}
} else if (!form_manager_->IsNewLogin()) {
form_manager_->Update(*form_manager_->preferred_match());
return;
}
client_->PromptUserToSaveOrUpdatePassword(std::move(form_manager_), false);
}
Commit Message: Fix Credential Management API Store() for existing Credentials
This changes fixes the Credential Management API to correctly handle
storing of already existing credentials. In the previous version
`preferred_match()` was updated, which is not necessarily the credential
selected by the user.
Bug: 795878
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_mojo
Change-Id: I269f465861f44cdd784f0ce077e755191d3bd7bd
Reviewed-on: https://chromium-review.googlesource.com/843022
Commit-Queue: Jan Wilken Dörrie <[email protected]>
Reviewed-by: Balazs Engedy <[email protected]>
Reviewed-by: Jochen Eisinger <[email protected]>
Reviewed-by: Maxim Kolosovskiy <[email protected]>
Cr-Commit-Position: refs/heads/master@{#526313}
CWE ID: CWE-125 | void CredentialManagerImpl::OnProvisionalSaveComplete() {
DCHECK(form_manager_);
DCHECK(client_->IsSavingAndFillingEnabledForCurrentPage());
const autofill::PasswordForm& form = form_manager_->pending_credentials();
if (form_manager_->IsPendingCredentialsPublicSuffixMatch()) {
form_manager_->Save();
return;
}
if (!form.federation_origin.unique()) {
for (auto* match : form_manager_->form_fetcher()->GetFederatedMatches()) {
if (match->username_value == form.username_value &&
match->federation_origin.IsSameOriginWith(form.federation_origin)) {
form_manager_->Update(*match);
return;
}
}
} else if (!form_manager_->IsNewLogin()) {
auto best_match = form_manager_->best_matches().find(form.username_value);
// NOTE: We can't use DCHECK_NE here, since std::map<>::iterator does not
// support operator<<.
DCHECK(best_match != form_manager_->best_matches().end());
form_manager_->Update(*best_match->second);
return;
}
client_->PromptUserToSaveOrUpdatePassword(std::move(form_manager_), false);
}
| 173,253 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: on_unregister_handler(TCMUService1HandlerManager1 *interface,
GDBusMethodInvocation *invocation,
gchar *subtype,
gpointer user_data)
{
struct tcmur_handler *handler = find_handler_by_subtype(subtype);
struct dbus_info *info = handler->opaque;
if (!handler) {
g_dbus_method_invocation_return_value(invocation,
g_variant_new("(bs)", FALSE,
"unknown subtype"));
return TRUE;
}
dbus_unexport_handler(handler);
tcmur_unregister_handler(handler);
g_bus_unwatch_name(info->watcher_id);
g_free(info);
g_free(handler);
g_dbus_method_invocation_return_value(invocation,
g_variant_new("(bs)", TRUE, "succeeded"));
return TRUE;
}
Commit Message: fixed local DoS when UnregisterHandler was called for a not existing handler
Any user with DBUS access could cause a SEGFAULT in tcmu-runner by
running something like this:
dbus-send --system --print-reply --dest=org.kernel.TCMUService1 /org/kernel/TCMUService1/HandlerManager1 org.kernel.TCMUService1.HandlerManager1.UnregisterHandler string:123
CWE ID: CWE-20 | on_unregister_handler(TCMUService1HandlerManager1 *interface,
GDBusMethodInvocation *invocation,
gchar *subtype,
gpointer user_data)
{
struct tcmur_handler *handler = find_handler_by_subtype(subtype);
struct dbus_info *info = handler ? handler->opaque : NULL;
if (!handler) {
g_dbus_method_invocation_return_value(invocation,
g_variant_new("(bs)", FALSE,
"unknown subtype"));
return TRUE;
}
dbus_unexport_handler(handler);
tcmur_unregister_handler(handler);
g_bus_unwatch_name(info->watcher_id);
g_free(info);
g_free(handler);
g_dbus_method_invocation_return_value(invocation,
g_variant_new("(bs)", TRUE, "succeeded"));
return TRUE;
}
| 167,630 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: mysqlnd_switch_to_ssl_if_needed(
MYSQLND_CONN_DATA * conn,
const MYSQLND_PACKET_GREET * const greet_packet,
const MYSQLND_OPTIONS * const options,
unsigned long mysql_flags
TSRMLS_DC
)
{
enum_func_status ret = FAIL;
const MYSQLND_CHARSET * charset;
MYSQLND_PACKET_AUTH * auth_packet;
DBG_ENTER("mysqlnd_switch_to_ssl_if_needed");
auth_packet = conn->protocol->m.get_auth_packet(conn->protocol, FALSE TSRMLS_CC);
if (!auth_packet) {
SET_OOM_ERROR(*conn->error_info);
goto end;
}
auth_packet->client_flags = mysql_flags;
auth_packet->max_packet_size = MYSQLND_ASSEMBLED_PACKET_MAX_SIZE;
if (options->charset_name && (charset = mysqlnd_find_charset_name(options->charset_name))) {
auth_packet->charset_no = charset->nr;
} else {
#if MYSQLND_UNICODE
auth_packet->charset_no = 200;/* utf8 - swedish collation, check mysqlnd_charset.c */
#else
auth_packet->charset_no = greet_packet->charset_no;
#endif
}
#ifdef MYSQLND_SSL_SUPPORTED
if ((greet_packet->server_capabilities & CLIENT_SSL) && (mysql_flags & CLIENT_SSL)) {
zend_bool verify = mysql_flags & CLIENT_SSL_VERIFY_SERVER_CERT? TRUE:FALSE;
DBG_INF("Switching to SSL");
if (!PACKET_WRITE(auth_packet, conn)) {
CONN_SET_STATE(conn, CONN_QUIT_SENT);
SET_CLIENT_ERROR(*conn->error_info, CR_SERVER_GONE_ERROR, UNKNOWN_SQLSTATE, mysqlnd_server_gone);
goto end;
}
conn->net->m.set_client_option(conn->net, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, (const char *) &verify TSRMLS_CC);
if (FAIL == conn->net->m.enable_ssl(conn->net TSRMLS_CC)) {
goto end;
}
}
#endif
ret = PASS;
end:
PACKET_FREE(auth_packet);
DBG_RETURN(ret);
}
Commit Message:
CWE ID: CWE-284 | mysqlnd_switch_to_ssl_if_needed(
MYSQLND_CONN_DATA * conn,
const MYSQLND_PACKET_GREET * const greet_packet,
const MYSQLND_OPTIONS * const options,
unsigned long mysql_flags
TSRMLS_DC
)
{
enum_func_status ret = FAIL;
const MYSQLND_CHARSET * charset;
MYSQLND_PACKET_AUTH * auth_packet;
DBG_ENTER("mysqlnd_switch_to_ssl_if_needed");
auth_packet = conn->protocol->m.get_auth_packet(conn->protocol, FALSE TSRMLS_CC);
if (!auth_packet) {
SET_OOM_ERROR(*conn->error_info);
goto end;
}
auth_packet->client_flags = mysql_flags;
auth_packet->max_packet_size = MYSQLND_ASSEMBLED_PACKET_MAX_SIZE;
if (options->charset_name && (charset = mysqlnd_find_charset_name(options->charset_name))) {
auth_packet->charset_no = charset->nr;
} else {
#if MYSQLND_UNICODE
auth_packet->charset_no = 200;/* utf8 - swedish collation, check mysqlnd_charset.c */
#else
auth_packet->charset_no = greet_packet->charset_no;
#endif
}
#ifdef MYSQLND_SSL_SUPPORTED
if (mysql_flags & CLIENT_SSL) {
zend_bool server_has_ssl = (greet_packet->server_capabilities & CLIENT_SSL)? TRUE:FALSE;
if (server_has_ssl == FALSE) {
goto close_conn;
} else {
zend_bool verify = mysql_flags & CLIENT_SSL_VERIFY_SERVER_CERT? TRUE:FALSE;
DBG_INF("Switching to SSL");
if (!PACKET_WRITE(auth_packet, conn)) {
goto close_conn;
}
conn->net->m.set_client_option(conn->net, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, (const char *) &verify TSRMLS_CC);
if (FAIL == conn->net->m.enable_ssl(conn->net TSRMLS_CC)) {
goto end;
}
}
}
#else
auth_packet->client_flags &= ~CLIENT_SSL;
if (!PACKET_WRITE(auth_packet, conn)) {
goto close_conn;
}
#endif
ret = PASS;
end:
PACKET_FREE(auth_packet);
DBG_RETURN(ret);
close_conn:
CONN_SET_STATE(conn, CONN_QUIT_SENT);
conn->m->send_close(conn TSRMLS_CC);
SET_CLIENT_ERROR(*conn->error_info, CR_SERVER_GONE_ERROR, UNKNOWN_SQLSTATE, mysqlnd_server_gone);
PACKET_FREE(auth_packet);
DBG_RETURN(ret);
}
| 165,275 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void btif_hl_select_monitor_callback(fd_set *p_cur_set ,fd_set *p_org_set) {
UNUSED(p_org_set);
BTIF_TRACE_DEBUG("entering %s",__FUNCTION__);
for (const list_node_t *node = list_begin(soc_queue);
node != list_end(soc_queue); node = list_next(node)) {
btif_hl_soc_cb_t *p_scb = list_node(node);
if (btif_hl_get_socket_state(p_scb) == BTIF_HL_SOC_STATE_W4_READ) {
if (FD_ISSET(p_scb->socket_id[1], p_cur_set)) {
BTIF_TRACE_DEBUG("read data state= BTIF_HL_SOC_STATE_W4_READ");
btif_hl_mdl_cb_t *p_dcb = BTIF_HL_GET_MDL_CB_PTR(p_scb->app_idx,
p_scb->mcl_idx, p_scb->mdl_idx);
assert(p_dcb != NULL);
if (p_dcb->p_tx_pkt) {
BTIF_TRACE_ERROR("Rcv new pkt but the last pkt is still not been"
" sent tx_size=%d", p_dcb->tx_size);
btif_hl_free_buf((void **) &p_dcb->p_tx_pkt);
}
p_dcb->p_tx_pkt = btif_hl_get_buf (p_dcb->mtu);
if (p_dcb) {
int r = (int)recv(p_scb->socket_id[1], p_dcb->p_tx_pkt,
p_dcb->mtu, MSG_DONTWAIT);
if (r > 0) {
BTIF_TRACE_DEBUG("btif_hl_select_monitor_callback send data r =%d", r);
p_dcb->tx_size = r;
BTIF_TRACE_DEBUG("btif_hl_select_monitor_callback send data tx_size=%d", p_dcb->tx_size );
BTA_HlSendData(p_dcb->mdl_handle, p_dcb->tx_size);
} else {
BTIF_TRACE_DEBUG("btif_hl_select_monitor_callback receive failed r=%d",r);
BTA_HlDchClose(p_dcb->mdl_handle);
}
}
}
}
}
if (list_is_empty(soc_queue))
BTIF_TRACE_DEBUG("btif_hl_select_monitor_queue is empty");
BTIF_TRACE_DEBUG("leaving %s",__FUNCTION__);
}
Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
CWE ID: CWE-284 | void btif_hl_select_monitor_callback(fd_set *p_cur_set ,fd_set *p_org_set) {
UNUSED(p_org_set);
BTIF_TRACE_DEBUG("entering %s",__FUNCTION__);
for (const list_node_t *node = list_begin(soc_queue);
node != list_end(soc_queue); node = list_next(node)) {
btif_hl_soc_cb_t *p_scb = list_node(node);
if (btif_hl_get_socket_state(p_scb) == BTIF_HL_SOC_STATE_W4_READ) {
if (FD_ISSET(p_scb->socket_id[1], p_cur_set)) {
BTIF_TRACE_DEBUG("read data state= BTIF_HL_SOC_STATE_W4_READ");
btif_hl_mdl_cb_t *p_dcb = BTIF_HL_GET_MDL_CB_PTR(p_scb->app_idx,
p_scb->mcl_idx, p_scb->mdl_idx);
assert(p_dcb != NULL);
if (p_dcb->p_tx_pkt) {
BTIF_TRACE_ERROR("Rcv new pkt but the last pkt is still not been"
" sent tx_size=%d", p_dcb->tx_size);
btif_hl_free_buf((void **) &p_dcb->p_tx_pkt);
}
p_dcb->p_tx_pkt = btif_hl_get_buf (p_dcb->mtu);
if (p_dcb) {
int r = (int)TEMP_FAILURE_RETRY(recv(p_scb->socket_id[1], p_dcb->p_tx_pkt,
p_dcb->mtu, MSG_DONTWAIT));
if (r > 0) {
BTIF_TRACE_DEBUG("btif_hl_select_monitor_callback send data r =%d", r);
p_dcb->tx_size = r;
BTIF_TRACE_DEBUG("btif_hl_select_monitor_callback send data tx_size=%d", p_dcb->tx_size );
BTA_HlSendData(p_dcb->mdl_handle, p_dcb->tx_size);
} else {
BTIF_TRACE_DEBUG("btif_hl_select_monitor_callback receive failed r=%d",r);
BTA_HlDchClose(p_dcb->mdl_handle);
}
}
}
}
}
if (list_is_empty(soc_queue))
BTIF_TRACE_DEBUG("btif_hl_select_monitor_queue is empty");
BTIF_TRACE_DEBUG("leaving %s",__FUNCTION__);
}
| 173,441 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool TextureManager::TextureInfo::ValidForTexture(
GLint face,
GLint level,
GLint xoffset,
GLint yoffset,
GLsizei width,
GLsizei height,
GLenum format,
GLenum type) const {
size_t face_index = GLTargetToFaceIndex(face);
if (level >= 0 && face_index < level_infos_.size() &&
static_cast<size_t>(level) < level_infos_[face_index].size()) {
const LevelInfo& info = level_infos_[GLTargetToFaceIndex(face)][level];
GLint right;
GLint top;
return SafeAdd(xoffset, width, &right) &&
SafeAdd(yoffset, height, &top) &&
xoffset >= 0 &&
yoffset >= 0 &&
right <= info.width &&
top <= info.height &&
format == info.internal_format &&
type == info.type;
}
return false;
}
Commit Message: Fix SafeAdd and SafeMultiply
BUG=145648,145544
Review URL: https://chromiumcodereview.appspot.com/10916165
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@155478 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-189 | bool TextureManager::TextureInfo::ValidForTexture(
GLint face,
GLint level,
GLint xoffset,
GLint yoffset,
GLsizei width,
GLsizei height,
GLenum format,
GLenum type) const {
size_t face_index = GLTargetToFaceIndex(face);
if (level >= 0 && face_index < level_infos_.size() &&
static_cast<size_t>(level) < level_infos_[face_index].size()) {
const LevelInfo& info = level_infos_[GLTargetToFaceIndex(face)][level];
int32 right;
int32 top;
return SafeAddInt32(xoffset, width, &right) &&
SafeAddInt32(yoffset, height, &top) &&
xoffset >= 0 &&
yoffset >= 0 &&
right <= info.width &&
top <= info.height &&
format == info.internal_format &&
type == info.type;
}
return false;
}
| 170,752 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: gss_get_mic_iov_length(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t qop_req, gss_iov_buffer_desc *iov,
int iov_count)
{
OM_uint32 status;
gss_union_ctx_id_t ctx;
gss_mechanism mech;
status = val_wrap_iov_args(minor_status, context_handle, 0, qop_req, NULL,
iov, iov_count);
if (status != GSS_S_COMPLETE)
return status;
/* Select the approprate underlying mechanism routine and call it. */
ctx = (gss_union_ctx_id_t)context_handle;
mech = gssint_get_mechanism(ctx->mech_type);
if (mech == NULL)
return GSS_S_BAD_MECH;
if (mech->gss_get_mic_iov_length == NULL)
return GSS_S_UNAVAILABLE;
status = mech->gss_get_mic_iov_length(minor_status, ctx->internal_ctx_id,
qop_req, iov, iov_count);
if (status != GSS_S_COMPLETE)
map_error(minor_status, mech);
return status;
}
Commit Message: Preserve GSS context on init/accept failure
After gss_init_sec_context() or gss_accept_sec_context() has created a
context, don't delete the mechglue context on failures from subsequent
calls, even if the mechanism deletes the mech-specific context (which
is allowed by RFC 2744 but not preferred). Check for union contexts
with no mechanism context in each GSS function which accepts a
gss_ctx_id_t.
CVE-2017-11462:
RFC 2744 permits a GSS-API implementation to delete an existing
security context on a second or subsequent call to
gss_init_sec_context() or gss_accept_sec_context() if the call results
in an error. This API behavior has been found to be dangerous,
leading to the possibility of memory errors in some callers. For
safety, GSS-API implementations should instead preserve existing
security contexts on error until the caller deletes them.
All versions of MIT krb5 prior to this change may delete acceptor
contexts on error. Versions 1.13.4 through 1.13.7, 1.14.1 through
1.14.5, and 1.15 through 1.15.1 may also delete initiator contexts on
error.
ticket: 8598 (new)
target_version: 1.15-next
target_version: 1.14-next
tags: pullup
CWE ID: CWE-415 | gss_get_mic_iov_length(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t qop_req, gss_iov_buffer_desc *iov,
int iov_count)
{
OM_uint32 status;
gss_union_ctx_id_t ctx;
gss_mechanism mech;
status = val_wrap_iov_args(minor_status, context_handle, 0, qop_req, NULL,
iov, iov_count);
if (status != GSS_S_COMPLETE)
return status;
/* Select the approprate underlying mechanism routine and call it. */
ctx = (gss_union_ctx_id_t)context_handle;
if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT)
return GSS_S_NO_CONTEXT;
mech = gssint_get_mechanism(ctx->mech_type);
if (mech == NULL)
return GSS_S_BAD_MECH;
if (mech->gss_get_mic_iov_length == NULL)
return GSS_S_UNAVAILABLE;
status = mech->gss_get_mic_iov_length(minor_status, ctx->internal_ctx_id,
qop_req, iov, iov_count);
if (status != GSS_S_COMPLETE)
map_error(minor_status, mech);
return status;
}
| 168,030 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int fsmVerify(const char *path, rpmfi fi)
{
int rc;
int saveerrno = errno;
struct stat dsb;
mode_t mode = rpmfiFMode(fi);
rc = fsmStat(path, 1, &dsb);
if (rc)
return rc;
if (S_ISREG(mode)) {
/* HP-UX (and other os'es) don't permit unlink on busy files. */
char *rmpath = rstrscat(NULL, path, "-RPMDELETE", NULL);
rc = fsmRename(path, rmpath);
/* XXX shouldn't we take unlink return code here? */
if (!rc)
(void) fsmUnlink(rmpath);
else
rc = RPMERR_UNLINK_FAILED;
free(rmpath);
return (rc ? rc : RPMERR_ENOENT); /* XXX HACK */
} else if (S_ISDIR(mode)) {
if (S_ISDIR(dsb.st_mode)) return 0;
if (S_ISLNK(dsb.st_mode)) {
rc = fsmStat(path, 0, &dsb);
if (rc == RPMERR_ENOENT) rc = 0;
if (rc) return rc;
errno = saveerrno;
if (S_ISDIR(dsb.st_mode)) return 0;
}
} else if (S_ISLNK(mode)) {
if (S_ISLNK(dsb.st_mode)) {
char buf[8 * BUFSIZ];
size_t len;
rc = fsmReadLink(path, buf, 8 * BUFSIZ, &len);
errno = saveerrno;
if (rc) return rc;
if (rstreq(rpmfiFLink(fi), buf)) return 0;
}
} else if (S_ISFIFO(mode)) {
if (S_ISFIFO(dsb.st_mode)) return 0;
} else if (S_ISCHR(mode) || S_ISBLK(mode)) {
if ((S_ISCHR(dsb.st_mode) || S_ISBLK(dsb.st_mode)) &&
(dsb.st_rdev == rpmfiFRdev(fi))) return 0;
} else if (S_ISSOCK(mode)) {
if (S_ISSOCK(dsb.st_mode)) return 0;
}
/* XXX shouldn't do this with commit/undo. */
rc = fsmUnlink(path);
if (rc == 0) rc = RPMERR_ENOENT;
return (rc ? rc : RPMERR_ENOENT); /* XXX HACK */
}
Commit Message: Restrict following symlinks to directories by ownership (CVE-2017-7500)
Only follow directory symlinks owned by target directory owner or root.
This prevents privilege escalation from user-writable directories via
directory symlinks to privileged directories on package upgrade, while
still allowing admin to arrange disk usage with symlinks.
The rationale is that if you can create symlinks owned by user X you *are*
user X (or root), and if you also own directory Y you can do whatever with
it already, including change permissions. So when you create a symlink to
that directory, the link ownership acts as a simple stamp of authority that
you indeed want rpm to treat this symlink as it were the directory that
you own. Such a permission can only be given by you or root, which
is just the way we want it. Plus it's almost ridiculously simple as far
as rules go, compared to trying to calculate something from the
source vs destination directory permissions etc.
In the normal case, the user arranging diskspace with symlinks is indeed
root so nothing changes, the only real change here is to links created by
non-privileged users which should be few and far between in practise.
Unfortunately our test-suite runs as a regular user via fakechroot and
thus the testcase for this fails under the new rules. Adjust the testcase
to get the ownership straight and add a second case for the illegal
behavior, basically the same as the old one but with different expectations.
CWE ID: CWE-59 | static int fsmVerify(const char *path, rpmfi fi)
static int fsmVerify(const char *path, rpmfi fi, const struct stat *fsb)
{
int rc;
int saveerrno = errno;
struct stat dsb;
mode_t mode = rpmfiFMode(fi);
rc = fsmStat(path, 1, &dsb);
if (rc)
return rc;
if (S_ISREG(mode)) {
/* HP-UX (and other os'es) don't permit unlink on busy files. */
char *rmpath = rstrscat(NULL, path, "-RPMDELETE", NULL);
rc = fsmRename(path, rmpath);
/* XXX shouldn't we take unlink return code here? */
if (!rc)
(void) fsmUnlink(rmpath);
else
rc = RPMERR_UNLINK_FAILED;
free(rmpath);
return (rc ? rc : RPMERR_ENOENT); /* XXX HACK */
} else if (S_ISDIR(mode)) {
if (S_ISDIR(dsb.st_mode)) return 0;
if (S_ISLNK(dsb.st_mode)) {
uid_t luid = dsb.st_uid;
rc = fsmStat(path, 0, &dsb);
if (rc == RPMERR_ENOENT) rc = 0;
if (rc) return rc;
errno = saveerrno;
/* Only permit directory symlinks by target owner and root */
if (S_ISDIR(dsb.st_mode) && (luid == 0 || luid == fsb->st_uid))
return 0;
}
} else if (S_ISLNK(mode)) {
if (S_ISLNK(dsb.st_mode)) {
char buf[8 * BUFSIZ];
size_t len;
rc = fsmReadLink(path, buf, 8 * BUFSIZ, &len);
errno = saveerrno;
if (rc) return rc;
if (rstreq(rpmfiFLink(fi), buf)) return 0;
}
} else if (S_ISFIFO(mode)) {
if (S_ISFIFO(dsb.st_mode)) return 0;
} else if (S_ISCHR(mode) || S_ISBLK(mode)) {
if ((S_ISCHR(dsb.st_mode) || S_ISBLK(dsb.st_mode)) &&
(dsb.st_rdev == rpmfiFRdev(fi))) return 0;
} else if (S_ISSOCK(mode)) {
if (S_ISSOCK(dsb.st_mode)) return 0;
}
/* XXX shouldn't do this with commit/undo. */
rc = fsmUnlink(path);
if (rc == 0) rc = RPMERR_ENOENT;
return (rc ? rc : RPMERR_ENOENT); /* XXX HACK */
}
| 170,176 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void LayerTreeHost::PushPropertiesTo(LayerTreeImpl* tree_impl) {
tree_impl->set_needs_full_tree_sync(needs_full_tree_sync_);
needs_full_tree_sync_ = false;
if (hud_layer_.get()) {
LayerImpl* hud_impl = tree_impl->LayerById(hud_layer_->id());
tree_impl->set_hud_layer(static_cast<HeadsUpDisplayLayerImpl*>(hud_impl));
} else {
tree_impl->set_hud_layer(nullptr);
}
tree_impl->set_background_color(background_color_);
tree_impl->set_has_transparent_background(has_transparent_background_);
tree_impl->set_have_scroll_event_handlers(have_scroll_event_handlers_);
tree_impl->set_event_listener_properties(
EventListenerClass::kTouchStartOrMove,
event_listener_properties(EventListenerClass::kTouchStartOrMove));
tree_impl->set_event_listener_properties(
EventListenerClass::kMouseWheel,
event_listener_properties(EventListenerClass::kMouseWheel));
tree_impl->set_event_listener_properties(
EventListenerClass::kTouchEndOrCancel,
event_listener_properties(EventListenerClass::kTouchEndOrCancel));
if (page_scale_layer_ && inner_viewport_scroll_layer_) {
tree_impl->SetViewportLayersFromIds(
overscroll_elasticity_layer_ ? overscroll_elasticity_layer_->id()
: Layer::INVALID_ID,
page_scale_layer_->id(), inner_viewport_scroll_layer_->id(),
outer_viewport_scroll_layer_ ? outer_viewport_scroll_layer_->id()
: Layer::INVALID_ID);
DCHECK(inner_viewport_scroll_layer_->IsContainerForFixedPositionLayers());
} else {
tree_impl->ClearViewportLayers();
}
tree_impl->RegisterSelection(selection_);
bool property_trees_changed_on_active_tree =
tree_impl->IsActiveTree() && tree_impl->property_trees()->changed;
if (root_layer_ && property_trees_changed_on_active_tree) {
if (property_trees_.sequence_number ==
tree_impl->property_trees()->sequence_number)
tree_impl->property_trees()->PushChangeTrackingTo(&property_trees_);
else
tree_impl->MoveChangeTrackingToLayers();
}
tree_impl->SetPropertyTrees(&property_trees_);
tree_impl->PushPageScaleFromMainThread(
page_scale_factor_, min_page_scale_factor_, max_page_scale_factor_);
tree_impl->set_browser_controls_shrink_blink_size(
browser_controls_shrink_blink_size_);
tree_impl->set_top_controls_height(top_controls_height_);
tree_impl->set_bottom_controls_height(bottom_controls_height_);
tree_impl->PushBrowserControlsFromMainThread(top_controls_shown_ratio_);
tree_impl->elastic_overscroll()->PushFromMainThread(elastic_overscroll_);
if (tree_impl->IsActiveTree())
tree_impl->elastic_overscroll()->PushPendingToActive();
tree_impl->set_painted_device_scale_factor(painted_device_scale_factor_);
tree_impl->SetDeviceColorSpace(device_color_space_);
if (pending_page_scale_animation_) {
tree_impl->SetPendingPageScaleAnimation(
std::move(pending_page_scale_animation_));
}
DCHECK(!tree_impl->ViewportSizeInvalid());
tree_impl->set_has_ever_been_drawn(false);
}
Commit Message: (Reland) Discard compositor frames from unloaded web content
This is a reland of https://codereview.chromium.org/2707243005/ with a
small change to fix an uninitialized memory error that fails on MSAN
bots.
BUG=672847
[email protected], [email protected]
CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_site_isolation
Review-Url: https://codereview.chromium.org/2731283003
Cr-Commit-Position: refs/heads/master@{#454954}
CWE ID: CWE-362 | void LayerTreeHost::PushPropertiesTo(LayerTreeImpl* tree_impl) {
tree_impl->set_needs_full_tree_sync(needs_full_tree_sync_);
needs_full_tree_sync_ = false;
if (hud_layer_.get()) {
LayerImpl* hud_impl = tree_impl->LayerById(hud_layer_->id());
tree_impl->set_hud_layer(static_cast<HeadsUpDisplayLayerImpl*>(hud_impl));
} else {
tree_impl->set_hud_layer(nullptr);
}
tree_impl->set_background_color(background_color_);
tree_impl->set_has_transparent_background(has_transparent_background_);
tree_impl->set_have_scroll_event_handlers(have_scroll_event_handlers_);
tree_impl->set_event_listener_properties(
EventListenerClass::kTouchStartOrMove,
event_listener_properties(EventListenerClass::kTouchStartOrMove));
tree_impl->set_event_listener_properties(
EventListenerClass::kMouseWheel,
event_listener_properties(EventListenerClass::kMouseWheel));
tree_impl->set_event_listener_properties(
EventListenerClass::kTouchEndOrCancel,
event_listener_properties(EventListenerClass::kTouchEndOrCancel));
if (page_scale_layer_ && inner_viewport_scroll_layer_) {
tree_impl->SetViewportLayersFromIds(
overscroll_elasticity_layer_ ? overscroll_elasticity_layer_->id()
: Layer::INVALID_ID,
page_scale_layer_->id(), inner_viewport_scroll_layer_->id(),
outer_viewport_scroll_layer_ ? outer_viewport_scroll_layer_->id()
: Layer::INVALID_ID);
DCHECK(inner_viewport_scroll_layer_->IsContainerForFixedPositionLayers());
} else {
tree_impl->ClearViewportLayers();
}
tree_impl->RegisterSelection(selection_);
bool property_trees_changed_on_active_tree =
tree_impl->IsActiveTree() && tree_impl->property_trees()->changed;
if (root_layer_ && property_trees_changed_on_active_tree) {
if (property_trees_.sequence_number ==
tree_impl->property_trees()->sequence_number)
tree_impl->property_trees()->PushChangeTrackingTo(&property_trees_);
else
tree_impl->MoveChangeTrackingToLayers();
}
tree_impl->SetPropertyTrees(&property_trees_);
tree_impl->PushPageScaleFromMainThread(
page_scale_factor_, min_page_scale_factor_, max_page_scale_factor_);
tree_impl->set_browser_controls_shrink_blink_size(
browser_controls_shrink_blink_size_);
tree_impl->set_top_controls_height(top_controls_height_);
tree_impl->set_bottom_controls_height(bottom_controls_height_);
tree_impl->PushBrowserControlsFromMainThread(top_controls_shown_ratio_);
tree_impl->elastic_overscroll()->PushFromMainThread(elastic_overscroll_);
if (tree_impl->IsActiveTree())
tree_impl->elastic_overscroll()->PushPendingToActive();
tree_impl->set_painted_device_scale_factor(painted_device_scale_factor_);
tree_impl->SetDeviceColorSpace(device_color_space_);
tree_impl->set_content_source_id(content_source_id_);
if (pending_page_scale_animation_) {
tree_impl->SetPendingPageScaleAnimation(
std::move(pending_page_scale_animation_));
}
DCHECK(!tree_impl->ViewportSizeInvalid());
tree_impl->set_has_ever_been_drawn(false);
}
| 172,395 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void SetPreviewDataForIndex(int index, const base::RefCountedBytes* data) {
if (index != printing::COMPLETE_PREVIEW_DOCUMENT_INDEX &&
index < printing::FIRST_PAGE_INDEX) {
return;
}
page_data_map_[index] = const_cast<base::RefCountedBytes*>(data);
}
Commit Message: Print preview: Use an ID instead of memory pointer string in WebUI.
BUG=144051
Review URL: https://chromiumcodereview.appspot.com/10870003
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@153342 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-200 | void SetPreviewDataForIndex(int index, const base::RefCountedBytes* data) {
if (IsInvalidIndex(index))
return;
page_data_map_[index] = const_cast<base::RefCountedBytes*>(data);
}
| 170,825 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void ExtensionInstalledBubbleGtk::ShowInternal() {
BrowserWindowGtk* browser_window =
BrowserWindowGtk::GetBrowserWindowForNativeWindow(
browser_->window()->GetNativeHandle());
GtkWidget* reference_widget = NULL;
if (type_ == BROWSER_ACTION) {
BrowserActionsToolbarGtk* toolbar =
browser_window->GetToolbar()->GetBrowserActionsToolbar();
if (toolbar->animating() && animation_wait_retries_-- > 0) {
MessageLoopForUI::current()->PostDelayedTask(
FROM_HERE,
base::Bind(&ExtensionInstalledBubbleGtk::ShowInternal, this),
base::TimeDelta::FromMilliseconds(kAnimationWaitMS));
return;
}
reference_widget = toolbar->GetBrowserActionWidget(extension_);
gtk_container_check_resize(GTK_CONTAINER(
browser_window->GetToolbar()->widget()));
if (reference_widget && !gtk_widget_get_visible(reference_widget)) {
reference_widget = gtk_widget_get_visible(toolbar->chevron()) ?
toolbar->chevron() : NULL;
}
} else if (type_ == PAGE_ACTION) {
LocationBarViewGtk* location_bar_view =
browser_window->GetToolbar()->GetLocationBarView();
location_bar_view->SetPreviewEnabledPageAction(extension_->page_action(),
true); // preview_enabled
reference_widget = location_bar_view->GetPageActionWidget(
extension_->page_action());
gtk_container_check_resize(GTK_CONTAINER(
browser_window->GetToolbar()->widget()));
DCHECK(reference_widget);
} else if (type_ == OMNIBOX_KEYWORD) {
LocationBarViewGtk* location_bar_view =
browser_window->GetToolbar()->GetLocationBarView();
reference_widget = location_bar_view->location_entry_widget();
DCHECK(reference_widget);
}
if (reference_widget == NULL)
reference_widget = browser_window->GetToolbar()->GetAppMenuButton();
GtkThemeService* theme_provider = GtkThemeService::GetFrom(
browser_->profile());
GtkWidget* bubble_content = gtk_hbox_new(FALSE, kHorizontalColumnSpacing);
gtk_container_set_border_width(GTK_CONTAINER(bubble_content), kContentBorder);
if (!icon_.isNull()) {
GdkPixbuf* pixbuf = gfx::GdkPixbufFromSkBitmap(&icon_);
gfx::Size size(icon_.width(), icon_.height());
if (size.width() > kIconSize || size.height() > kIconSize) {
if (size.width() > size.height()) {
size.set_height(size.height() * kIconSize / size.width());
size.set_width(kIconSize);
} else {
size.set_width(size.width() * kIconSize / size.height());
size.set_height(kIconSize);
}
GdkPixbuf* old = pixbuf;
pixbuf = gdk_pixbuf_scale_simple(pixbuf, size.width(), size.height(),
GDK_INTERP_BILINEAR);
g_object_unref(old);
}
GtkWidget* icon_column = gtk_vbox_new(FALSE, 0);
gtk_box_pack_start(GTK_BOX(bubble_content), icon_column, FALSE, FALSE,
kIconPadding);
GtkWidget* image = gtk_image_new_from_pixbuf(pixbuf);
g_object_unref(pixbuf);
gtk_box_pack_start(GTK_BOX(icon_column), image, FALSE, FALSE, 0);
}
GtkWidget* text_column = gtk_vbox_new(FALSE, kTextColumnVerticalSpacing);
gtk_box_pack_start(GTK_BOX(bubble_content), text_column, FALSE, FALSE, 0);
GtkWidget* heading_label = gtk_label_new(NULL);
string16 extension_name = UTF8ToUTF16(extension_->name());
base::i18n::AdjustStringForLocaleDirection(&extension_name);
std::string heading_text = l10n_util::GetStringFUTF8(
IDS_EXTENSION_INSTALLED_HEADING, extension_name,
l10n_util::GetStringUTF16(IDS_SHORT_PRODUCT_NAME));
char* markup = g_markup_printf_escaped("<span size=\"larger\">%s</span>",
heading_text.c_str());
gtk_label_set_markup(GTK_LABEL(heading_label), markup);
g_free(markup);
gtk_util::SetLabelWidth(heading_label, kTextColumnWidth);
gtk_box_pack_start(GTK_BOX(text_column), heading_label, FALSE, FALSE, 0);
if (type_ == PAGE_ACTION) {
GtkWidget* info_label = gtk_label_new(l10n_util::GetStringUTF8(
IDS_EXTENSION_INSTALLED_PAGE_ACTION_INFO).c_str());
gtk_util::SetLabelWidth(info_label, kTextColumnWidth);
gtk_box_pack_start(GTK_BOX(text_column), info_label, FALSE, FALSE, 0);
}
if (type_ == OMNIBOX_KEYWORD) {
GtkWidget* info_label = gtk_label_new(l10n_util::GetStringFUTF8(
IDS_EXTENSION_INSTALLED_OMNIBOX_KEYWORD_INFO,
UTF8ToUTF16(extension_->omnibox_keyword())).c_str());
gtk_util::SetLabelWidth(info_label, kTextColumnWidth);
gtk_box_pack_start(GTK_BOX(text_column), info_label, FALSE, FALSE, 0);
}
GtkWidget* manage_label = gtk_label_new(
l10n_util::GetStringUTF8(IDS_EXTENSION_INSTALLED_MANAGE_INFO).c_str());
gtk_util::SetLabelWidth(manage_label, kTextColumnWidth);
gtk_box_pack_start(GTK_BOX(text_column), manage_label, FALSE, FALSE, 0);
GtkWidget* close_column = gtk_vbox_new(FALSE, 0);
gtk_box_pack_start(GTK_BOX(bubble_content), close_column, FALSE, FALSE, 0);
close_button_.reset(CustomDrawButton::CloseButton(theme_provider));
g_signal_connect(close_button_->widget(), "clicked",
G_CALLBACK(OnButtonClick), this);
gtk_box_pack_start(GTK_BOX(close_column), close_button_->widget(),
FALSE, FALSE, 0);
BubbleGtk::ArrowLocationGtk arrow_location =
!base::i18n::IsRTL() ?
BubbleGtk::ARROW_LOCATION_TOP_RIGHT :
BubbleGtk::ARROW_LOCATION_TOP_LEFT;
gfx::Rect bounds = gtk_util::WidgetBounds(reference_widget);
if (type_ == OMNIBOX_KEYWORD) {
arrow_location =
!base::i18n::IsRTL() ?
BubbleGtk::ARROW_LOCATION_TOP_LEFT :
BubbleGtk::ARROW_LOCATION_TOP_RIGHT;
if (base::i18n::IsRTL())
bounds.Offset(bounds.width(), 0);
bounds.set_width(0);
}
bubble_ = BubbleGtk::Show(reference_widget,
&bounds,
bubble_content,
arrow_location,
true, // match_system_theme
true, // grab_input
theme_provider,
this);
}
Commit Message: [i18n-fixlet] Make strings branding specific in extension code.
IDS_EXTENSIONS_UNINSTALL
IDS_EXTENSIONS_INCOGNITO_WARNING
IDS_EXTENSION_INSTALLED_HEADING
IDS_EXTENSION_ALERT_ITEM_EXTERNAL And fix a $1 $1 bug.
IDS_EXTENSION_INLINE_INSTALL_PROMPT_TITLE
BUG=NONE
TEST=NONE
Review URL: http://codereview.chromium.org/9107061
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@118018 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | void ExtensionInstalledBubbleGtk::ShowInternal() {
BrowserWindowGtk* browser_window =
BrowserWindowGtk::GetBrowserWindowForNativeWindow(
browser_->window()->GetNativeHandle());
GtkWidget* reference_widget = NULL;
if (type_ == BROWSER_ACTION) {
BrowserActionsToolbarGtk* toolbar =
browser_window->GetToolbar()->GetBrowserActionsToolbar();
if (toolbar->animating() && animation_wait_retries_-- > 0) {
MessageLoopForUI::current()->PostDelayedTask(
FROM_HERE,
base::Bind(&ExtensionInstalledBubbleGtk::ShowInternal, this),
base::TimeDelta::FromMilliseconds(kAnimationWaitMS));
return;
}
reference_widget = toolbar->GetBrowserActionWidget(extension_);
gtk_container_check_resize(GTK_CONTAINER(
browser_window->GetToolbar()->widget()));
if (reference_widget && !gtk_widget_get_visible(reference_widget)) {
reference_widget = gtk_widget_get_visible(toolbar->chevron()) ?
toolbar->chevron() : NULL;
}
} else if (type_ == PAGE_ACTION) {
LocationBarViewGtk* location_bar_view =
browser_window->GetToolbar()->GetLocationBarView();
location_bar_view->SetPreviewEnabledPageAction(extension_->page_action(),
true); // preview_enabled
reference_widget = location_bar_view->GetPageActionWidget(
extension_->page_action());
gtk_container_check_resize(GTK_CONTAINER(
browser_window->GetToolbar()->widget()));
DCHECK(reference_widget);
} else if (type_ == OMNIBOX_KEYWORD) {
LocationBarViewGtk* location_bar_view =
browser_window->GetToolbar()->GetLocationBarView();
reference_widget = location_bar_view->location_entry_widget();
DCHECK(reference_widget);
}
if (reference_widget == NULL)
reference_widget = browser_window->GetToolbar()->GetAppMenuButton();
GtkThemeService* theme_provider = GtkThemeService::GetFrom(
browser_->profile());
GtkWidget* bubble_content = gtk_hbox_new(FALSE, kHorizontalColumnSpacing);
gtk_container_set_border_width(GTK_CONTAINER(bubble_content), kContentBorder);
if (!icon_.isNull()) {
GdkPixbuf* pixbuf = gfx::GdkPixbufFromSkBitmap(&icon_);
gfx::Size size(icon_.width(), icon_.height());
if (size.width() > kIconSize || size.height() > kIconSize) {
if (size.width() > size.height()) {
size.set_height(size.height() * kIconSize / size.width());
size.set_width(kIconSize);
} else {
size.set_width(size.width() * kIconSize / size.height());
size.set_height(kIconSize);
}
GdkPixbuf* old = pixbuf;
pixbuf = gdk_pixbuf_scale_simple(pixbuf, size.width(), size.height(),
GDK_INTERP_BILINEAR);
g_object_unref(old);
}
GtkWidget* icon_column = gtk_vbox_new(FALSE, 0);
gtk_box_pack_start(GTK_BOX(bubble_content), icon_column, FALSE, FALSE,
kIconPadding);
GtkWidget* image = gtk_image_new_from_pixbuf(pixbuf);
g_object_unref(pixbuf);
gtk_box_pack_start(GTK_BOX(icon_column), image, FALSE, FALSE, 0);
}
GtkWidget* text_column = gtk_vbox_new(FALSE, kTextColumnVerticalSpacing);
gtk_box_pack_start(GTK_BOX(bubble_content), text_column, FALSE, FALSE, 0);
GtkWidget* heading_label = gtk_label_new(NULL);
string16 extension_name = UTF8ToUTF16(extension_->name());
base::i18n::AdjustStringForLocaleDirection(&extension_name);
std::string heading_text = l10n_util::GetStringFUTF8(
IDS_EXTENSION_INSTALLED_HEADING, extension_name);
char* markup = g_markup_printf_escaped("<span size=\"larger\">%s</span>",
heading_text.c_str());
gtk_label_set_markup(GTK_LABEL(heading_label), markup);
g_free(markup);
gtk_util::SetLabelWidth(heading_label, kTextColumnWidth);
gtk_box_pack_start(GTK_BOX(text_column), heading_label, FALSE, FALSE, 0);
if (type_ == PAGE_ACTION) {
GtkWidget* info_label = gtk_label_new(l10n_util::GetStringUTF8(
IDS_EXTENSION_INSTALLED_PAGE_ACTION_INFO).c_str());
gtk_util::SetLabelWidth(info_label, kTextColumnWidth);
gtk_box_pack_start(GTK_BOX(text_column), info_label, FALSE, FALSE, 0);
}
if (type_ == OMNIBOX_KEYWORD) {
GtkWidget* info_label = gtk_label_new(l10n_util::GetStringFUTF8(
IDS_EXTENSION_INSTALLED_OMNIBOX_KEYWORD_INFO,
UTF8ToUTF16(extension_->omnibox_keyword())).c_str());
gtk_util::SetLabelWidth(info_label, kTextColumnWidth);
gtk_box_pack_start(GTK_BOX(text_column), info_label, FALSE, FALSE, 0);
}
GtkWidget* manage_label = gtk_label_new(
l10n_util::GetStringUTF8(IDS_EXTENSION_INSTALLED_MANAGE_INFO).c_str());
gtk_util::SetLabelWidth(manage_label, kTextColumnWidth);
gtk_box_pack_start(GTK_BOX(text_column), manage_label, FALSE, FALSE, 0);
GtkWidget* close_column = gtk_vbox_new(FALSE, 0);
gtk_box_pack_start(GTK_BOX(bubble_content), close_column, FALSE, FALSE, 0);
close_button_.reset(CustomDrawButton::CloseButton(theme_provider));
g_signal_connect(close_button_->widget(), "clicked",
G_CALLBACK(OnButtonClick), this);
gtk_box_pack_start(GTK_BOX(close_column), close_button_->widget(),
FALSE, FALSE, 0);
BubbleGtk::ArrowLocationGtk arrow_location =
!base::i18n::IsRTL() ?
BubbleGtk::ARROW_LOCATION_TOP_RIGHT :
BubbleGtk::ARROW_LOCATION_TOP_LEFT;
gfx::Rect bounds = gtk_util::WidgetBounds(reference_widget);
if (type_ == OMNIBOX_KEYWORD) {
arrow_location =
!base::i18n::IsRTL() ?
BubbleGtk::ARROW_LOCATION_TOP_LEFT :
BubbleGtk::ARROW_LOCATION_TOP_RIGHT;
if (base::i18n::IsRTL())
bounds.Offset(bounds.width(), 0);
bounds.set_width(0);
}
bubble_ = BubbleGtk::Show(reference_widget,
&bounds,
bubble_content,
arrow_location,
true, // match_system_theme
true, // grab_input
theme_provider,
this);
}
| 170,982 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void ChangeCurrentInputMethod(const InputMethodDescriptor& new_input_method) {
if (current_input_method_.id != new_input_method.id) {
previous_input_method_ = current_input_method_;
current_input_method_ = new_input_method;
if (!input_method::SetCurrentKeyboardLayoutByName(
current_input_method_.keyboard_layout)) {
LOG(ERROR) << "Failed to change keyboard layout to "
<< current_input_method_.keyboard_layout;
}
ObserverListBase<Observer>::Iterator it(observers_);
Observer* first_observer = it.GetNext();
if (first_observer) {
first_observer->PreferenceUpdateNeeded(this,
previous_input_method_,
current_input_method_);
}
}
const size_t num_active_input_methods = GetNumActiveInputMethods();
FOR_EACH_OBSERVER(Observer, observers_,
InputMethodChanged(this,
current_input_method_,
num_active_input_methods));
}
Commit Message: Remove use of libcros from InputMethodLibrary.
BUG=chromium-os:16238
TEST==confirm that input methods work as before on the netbook. Also confirm that the chrome builds and works on the desktop as before.
Review URL: http://codereview.chromium.org/7003086
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@89142 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | void ChangeCurrentInputMethod(const InputMethodDescriptor& new_input_method) {
void ChangeCurrentInputMethod(const input_method::InputMethodDescriptor&
new_input_method) {
if (current_input_method_.id != new_input_method.id) {
previous_input_method_ = current_input_method_;
current_input_method_ = new_input_method;
if (!input_method::SetCurrentKeyboardLayoutByName(
current_input_method_.keyboard_layout)) {
LOG(ERROR) << "Failed to change keyboard layout to "
<< current_input_method_.keyboard_layout;
}
ObserverListBase<InputMethodLibrary::Observer>::Iterator it(observers_);
InputMethodLibrary::Observer* first_observer = it.GetNext();
if (first_observer) {
first_observer->PreferenceUpdateNeeded(this,
previous_input_method_,
current_input_method_);
}
}
const size_t num_active_input_methods = GetNumActiveInputMethods();
FOR_EACH_OBSERVER(InputMethodLibrary::Observer, observers_,
InputMethodChanged(this,
current_input_method_,
num_active_input_methods));
}
| 170,478 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void BeginInstallWithManifestFunction::OnParseSuccess(
const SkBitmap& icon, DictionaryValue* parsed_manifest) {
CHECK(parsed_manifest);
icon_ = icon;
parsed_manifest_.reset(parsed_manifest);
std::string init_errors;
dummy_extension_ = Extension::Create(
FilePath(),
Extension::INTERNAL,
*static_cast<DictionaryValue*>(parsed_manifest_.get()),
Extension::NO_FLAGS,
&init_errors);
if (!dummy_extension_.get()) {
OnParseFailure(MANIFEST_ERROR, std::string(kInvalidManifestError));
return;
}
if (icon_.empty())
icon_ = Extension::GetDefaultIcon(dummy_extension_->is_app());
ShowExtensionInstallDialog(profile(),
this,
dummy_extension_.get(),
&icon_,
dummy_extension_->GetPermissionMessageStrings(),
ExtensionInstallUI::INSTALL_PROMPT);
}
Commit Message: Adding tests for new webstore beginInstallWithManifest method.
BUG=75821
TEST=none
Review URL: http://codereview.chromium.org/6900059
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@83080 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-20 | void BeginInstallWithManifestFunction::OnParseSuccess(
const SkBitmap& icon, DictionaryValue* parsed_manifest) {
CHECK(parsed_manifest);
icon_ = icon;
parsed_manifest_.reset(parsed_manifest);
std::string init_errors;
dummy_extension_ = Extension::Create(
FilePath(),
Extension::INTERNAL,
*static_cast<DictionaryValue*>(parsed_manifest_.get()),
Extension::NO_FLAGS,
&init_errors);
if (!dummy_extension_.get()) {
OnParseFailure(MANIFEST_ERROR, std::string(kInvalidManifestError));
return;
}
if (icon_.empty())
icon_ = Extension::GetDefaultIcon(dummy_extension_->is_app());
// In tests, we may have setup to proceed or abort without putting up the real
// confirmation dialog.
if (auto_confirm_for_tests != DO_NOT_SKIP) {
if (auto_confirm_for_tests == PROCEED)
this->InstallUIProceed();
else
this->InstallUIAbort();
return;
}
ShowExtensionInstallDialog(profile(),
this,
dummy_extension_.get(),
&icon_,
dummy_extension_->GetPermissionMessageStrings(),
ExtensionInstallUI::INSTALL_PROMPT);
}
| 170,405 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int32_t scsi_send_command(SCSIRequest *req, uint8_t *buf)
{
SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
int32_t len;
uint8_t command;
uint8_t *outbuf;
int rc;
command = buf[0];
outbuf = (uint8_t *)r->iov.iov_base;
DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", req->lun, req->tag, buf[0]);
#ifdef DEBUG_SCSI
{
int i;
for (i = 1; i < r->req.cmd.len; i++) {
printf(" 0x%02x", buf[i]);
}
printf("\n");
}
#endif
switch (command) {
case TEST_UNIT_READY:
case INQUIRY:
case MODE_SENSE:
case MODE_SENSE_10:
case RESERVE:
case RESERVE_10:
case RELEASE:
case RELEASE_10:
case START_STOP:
case ALLOW_MEDIUM_REMOVAL:
case READ_CAPACITY_10:
case READ_TOC:
case GET_CONFIGURATION:
case SERVICE_ACTION_IN_16:
case VERIFY_10:
rc = scsi_disk_emulate_command(r, outbuf);
if (rc < 0) {
return 0;
}
r->iov.iov_len = rc;
break;
case SYNCHRONIZE_CACHE:
bdrv_acct_start(s->bs, &r->acct, 0, BDRV_ACCT_FLUSH);
r->req.aiocb = bdrv_aio_flush(s->bs, scsi_flush_complete, r);
if (r->req.aiocb == NULL) {
scsi_flush_complete(r, -EIO);
}
return 0;
case READ_6:
case READ_10:
case READ_12:
case READ_16:
len = r->req.cmd.xfer / s->qdev.blocksize;
DPRINTF("Read (sector %" PRId64 ", count %d)\n", r->req.cmd.lba, len);
if (r->req.cmd.lba > s->max_lba)
goto illegal_lba;
r->sector = r->req.cmd.lba * s->cluster_size;
r->sector_count = len * s->cluster_size;
break;
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_16:
case WRITE_VERIFY_10:
case WRITE_VERIFY_12:
case WRITE_VERIFY_16:
len = r->req.cmd.xfer / s->qdev.blocksize;
DPRINTF("Write %s(sector %" PRId64 ", count %d)\n",
(command & 0xe) == 0xe ? "And Verify " : "",
r->req.cmd.lba, len);
if (r->req.cmd.lba > s->max_lba)
goto illegal_lba;
r->sector = r->req.cmd.lba * s->cluster_size;
r->sector_count = len * s->cluster_size;
break;
case MODE_SELECT:
DPRINTF("Mode Select(6) (len %lu)\n", (long)r->req.cmd.xfer);
/* We don't support mode parameter changes.
Allow the mode parameter header + block descriptors only. */
if (r->req.cmd.xfer > 12) {
goto fail;
}
break;
case MODE_SELECT_10:
DPRINTF("Mode Select(10) (len %lu)\n", (long)r->req.cmd.xfer);
/* We don't support mode parameter changes.
Allow the mode parameter header + block descriptors only. */
if (r->req.cmd.xfer > 16) {
goto fail;
}
break;
case SEEK_6:
case SEEK_10:
DPRINTF("Seek(%d) (sector %" PRId64 ")\n", command == SEEK_6 ? 6 : 10,
r->req.cmd.lba);
if (r->req.cmd.lba > s->max_lba) {
goto illegal_lba;
}
break;
case WRITE_SAME_16:
len = r->req.cmd.xfer / s->qdev.blocksize;
DPRINTF("WRITE SAME(16) (sector %" PRId64 ", count %d)\n",
r->req.cmd.lba, len);
if (r->req.cmd.lba > s->max_lba) {
goto illegal_lba;
}
/*
* We only support WRITE SAME with the unmap bit set for now.
*/
if (!(buf[1] & 0x8)) {
goto fail;
}
rc = bdrv_discard(s->bs, r->req.cmd.lba * s->cluster_size,
len * s->cluster_size);
if (rc < 0) {
/* XXX: better error code ?*/
goto fail;
}
break;
case REQUEST_SENSE:
abort();
default:
DPRINTF("Unknown SCSI command (%2.2x)\n", buf[0]);
scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
return 0;
fail:
scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
return 0;
illegal_lba:
scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
return 0;
}
if (r->sector_count == 0 && r->iov.iov_len == 0) {
scsi_req_complete(&r->req, GOOD);
}
len = r->sector_count * 512 + r->iov.iov_len;
if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
return -len;
} else {
if (!r->sector_count)
r->sector_count = -1;
return len;
}
}
Commit Message: scsi-disk: lazily allocate bounce buffer
It will not be needed for reads and writes if the HBA provides a sglist.
In addition, this lets scsi-disk refuse commands with an excessive
allocation length, as well as limit memory on usual well-behaved guests.
Signed-off-by: Paolo Bonzini <[email protected]>
Signed-off-by: Kevin Wolf <[email protected]>
CWE ID: CWE-119 | static int32_t scsi_send_command(SCSIRequest *req, uint8_t *buf)
{
SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
int32_t len;
uint8_t command;
int rc;
command = buf[0];
DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", req->lun, req->tag, buf[0]);
#ifdef DEBUG_SCSI
{
int i;
for (i = 1; i < r->req.cmd.len; i++) {
printf(" 0x%02x", buf[i]);
}
printf("\n");
}
#endif
switch (command) {
case TEST_UNIT_READY:
case INQUIRY:
case MODE_SENSE:
case MODE_SENSE_10:
case RESERVE:
case RESERVE_10:
case RELEASE:
case RELEASE_10:
case START_STOP:
case ALLOW_MEDIUM_REMOVAL:
case READ_CAPACITY_10:
case READ_TOC:
case GET_CONFIGURATION:
case SERVICE_ACTION_IN_16:
case VERIFY_10:
rc = scsi_disk_emulate_command(r);
if (rc < 0) {
return 0;
}
r->iov.iov_len = rc;
break;
case SYNCHRONIZE_CACHE:
bdrv_acct_start(s->bs, &r->acct, 0, BDRV_ACCT_FLUSH);
r->req.aiocb = bdrv_aio_flush(s->bs, scsi_flush_complete, r);
if (r->req.aiocb == NULL) {
scsi_flush_complete(r, -EIO);
}
return 0;
case READ_6:
case READ_10:
case READ_12:
case READ_16:
len = r->req.cmd.xfer / s->qdev.blocksize;
DPRINTF("Read (sector %" PRId64 ", count %d)\n", r->req.cmd.lba, len);
if (r->req.cmd.lba > s->max_lba)
goto illegal_lba;
r->sector = r->req.cmd.lba * s->cluster_size;
r->sector_count = len * s->cluster_size;
break;
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_16:
case WRITE_VERIFY_10:
case WRITE_VERIFY_12:
case WRITE_VERIFY_16:
len = r->req.cmd.xfer / s->qdev.blocksize;
DPRINTF("Write %s(sector %" PRId64 ", count %d)\n",
(command & 0xe) == 0xe ? "And Verify " : "",
r->req.cmd.lba, len);
if (r->req.cmd.lba > s->max_lba)
goto illegal_lba;
r->sector = r->req.cmd.lba * s->cluster_size;
r->sector_count = len * s->cluster_size;
break;
case MODE_SELECT:
DPRINTF("Mode Select(6) (len %lu)\n", (long)r->req.cmd.xfer);
/* We don't support mode parameter changes.
Allow the mode parameter header + block descriptors only. */
if (r->req.cmd.xfer > 12) {
goto fail;
}
break;
case MODE_SELECT_10:
DPRINTF("Mode Select(10) (len %lu)\n", (long)r->req.cmd.xfer);
/* We don't support mode parameter changes.
Allow the mode parameter header + block descriptors only. */
if (r->req.cmd.xfer > 16) {
goto fail;
}
break;
case SEEK_6:
case SEEK_10:
DPRINTF("Seek(%d) (sector %" PRId64 ")\n", command == SEEK_6 ? 6 : 10,
r->req.cmd.lba);
if (r->req.cmd.lba > s->max_lba) {
goto illegal_lba;
}
break;
case WRITE_SAME_16:
len = r->req.cmd.xfer / s->qdev.blocksize;
DPRINTF("WRITE SAME(16) (sector %" PRId64 ", count %d)\n",
r->req.cmd.lba, len);
if (r->req.cmd.lba > s->max_lba) {
goto illegal_lba;
}
/*
* We only support WRITE SAME with the unmap bit set for now.
*/
if (!(buf[1] & 0x8)) {
goto fail;
}
rc = bdrv_discard(s->bs, r->req.cmd.lba * s->cluster_size,
len * s->cluster_size);
if (rc < 0) {
/* XXX: better error code ?*/
goto fail;
}
break;
case REQUEST_SENSE:
abort();
default:
DPRINTF("Unknown SCSI command (%2.2x)\n", buf[0]);
scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
return 0;
fail:
scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
return 0;
illegal_lba:
scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
return 0;
}
if (r->sector_count == 0 && r->iov.iov_len == 0) {
scsi_req_complete(&r->req, GOOD);
}
len = r->sector_count * 512 + r->iov.iov_len;
if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
return -len;
} else {
if (!r->sector_count)
r->sector_count = -1;
return len;
}
}
| 166,556 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void vlan_setup(struct net_device *dev)
{
ether_setup(dev);
dev->priv_flags |= IFF_802_1Q_VLAN;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
dev->tx_queue_len = 0;
dev->netdev_ops = &vlan_netdev_ops;
dev->destructor = free_netdev;
dev->ethtool_ops = &vlan_ethtool_ops;
memset(dev->broadcast, 0, ETH_ALEN);
}
Commit Message: net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-264 | void vlan_setup(struct net_device *dev)
{
ether_setup(dev);
dev->priv_flags |= IFF_802_1Q_VLAN;
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
dev->tx_queue_len = 0;
dev->netdev_ops = &vlan_netdev_ops;
dev->destructor = free_netdev;
dev->ethtool_ops = &vlan_ethtool_ops;
memset(dev->broadcast, 0, ETH_ALEN);
}
| 165,736 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size) {
vpx_codec_err_t res_dec;
InitOnce();
REGISTER_STATE_CHECK(
res_dec = vpx_codec_decode(&decoder_,
cxdata, static_cast<unsigned int>(size),
NULL, 0));
return res_dec;
}
Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478
DO NOT MERGE - libvpx: Pull from upstream
Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06
BUG=23452792
Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
CWE ID: CWE-119 | vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size) {
return DecodeFrame(cxdata, size, NULL);
}
vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size,
void *user_priv) {
vpx_codec_err_t res_dec;
InitOnce();
API_REGISTER_STATE_CHECK(
res_dec = vpx_codec_decode(&decoder_,
cxdata, static_cast<unsigned int>(size),
user_priv, 0));
return res_dec;
}
| 174,534 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void CairoOutputDev::drawImage(GfxState *state, Object *ref, Stream *str,
int width, int height,
GfxImageColorMap *colorMap,
int *maskColors, GBool inlineImg)
{
unsigned char *buffer;
unsigned int *dest;
cairo_surface_t *image;
cairo_pattern_t *pattern;
int x, y;
ImageStream *imgStr;
Guchar *pix;
int i;
cairo_matrix_t matrix;
int is_identity_transform;
buffer = (unsigned char *)gmalloc (width * height * 4);
/* TODO: Do we want to cache these? */
imgStr = new ImageStream(str, width,
colorMap->getNumPixelComps(),
colorMap->getBits());
imgStr->reset();
/* ICCBased color space doesn't do any color correction
* so check its underlying color space as well */
is_identity_transform = colorMap->getColorSpace()->getMode() == csDeviceRGB ||
(colorMap->getColorSpace()->getMode() == csICCBased &&
((GfxICCBasedColorSpace*)colorMap->getColorSpace())->getAlt()->getMode() == csDeviceRGB);
if (maskColors) {
for (y = 0; y < height; y++) {
dest = (unsigned int *) (buffer + y * 4 * width);
pix = imgStr->getLine();
colorMap->getRGBLine (pix, dest, width);
for (x = 0; x < width; x++) {
for (i = 0; i < colorMap->getNumPixelComps(); ++i) {
if (pix[i] < maskColors[2*i] * 255||
pix[i] > maskColors[2*i+1] * 255) {
*dest = *dest | 0xff000000;
break;
}
}
pix += colorMap->getNumPixelComps();
dest++;
}
}
image = cairo_image_surface_create_for_data (buffer, CAIRO_FORMAT_ARGB32,
width, height, width * 4);
}
else {
for (y = 0; y < height; y++) {
dest = (unsigned int *) (buffer + y * 4 * width);
pix = imgStr->getLine();
colorMap->getRGBLine (pix, dest, width);
}
image = cairo_image_surface_create_for_data (buffer, CAIRO_FORMAT_RGB24,
width, height, width * 4);
}
if (image == NULL) {
delete imgStr;
return;
}
pattern = cairo_pattern_create_for_surface (image);
if (pattern == NULL) {
delete imgStr;
return;
}
LOG (printf ("drawImageMask %dx%d\n", width, height));
cairo_matrix_init_translate (&matrix, 0, height);
cairo_matrix_scale (&matrix, width, -height);
cairo_pattern_set_matrix (pattern, &matrix);
cairo_pattern_set_filter (pattern, CAIRO_FILTER_BILINEAR);
cairo_set_source (cairo, pattern);
cairo_paint (cairo);
if (cairo_shape) {
#if 0
cairo_rectangle (cairo_shape, 0., 0., width, height);
cairo_fill (cairo_shape);
#else
cairo_save (cairo_shape);
/* this should draw a rectangle the size of the image
* we use this instead of rect,fill because of the lack
* of EXTEND_PAD */
/* NOTE: this will multiply the edges of the image twice */
cairo_set_source (cairo_shape, pattern);
cairo_paint(cairo_shape);
cairo_restore (cairo_shape);
#endif
}
cairo_pattern_destroy (pattern);
cairo_surface_destroy (image);
free (buffer);
delete imgStr;
}
Commit Message:
CWE ID: CWE-189 | void CairoOutputDev::drawImage(GfxState *state, Object *ref, Stream *str,
int width, int height,
GfxImageColorMap *colorMap,
int *maskColors, GBool inlineImg)
{
unsigned char *buffer;
unsigned int *dest;
cairo_surface_t *image;
cairo_pattern_t *pattern;
int x, y;
ImageStream *imgStr;
Guchar *pix;
int i;
cairo_matrix_t matrix;
int is_identity_transform;
buffer = (unsigned char *)gmallocn3 (width, height, 4);
/* TODO: Do we want to cache these? */
imgStr = new ImageStream(str, width,
colorMap->getNumPixelComps(),
colorMap->getBits());
imgStr->reset();
/* ICCBased color space doesn't do any color correction
* so check its underlying color space as well */
is_identity_transform = colorMap->getColorSpace()->getMode() == csDeviceRGB ||
(colorMap->getColorSpace()->getMode() == csICCBased &&
((GfxICCBasedColorSpace*)colorMap->getColorSpace())->getAlt()->getMode() == csDeviceRGB);
if (maskColors) {
for (y = 0; y < height; y++) {
dest = (unsigned int *) (buffer + y * 4 * width);
pix = imgStr->getLine();
colorMap->getRGBLine (pix, dest, width);
for (x = 0; x < width; x++) {
for (i = 0; i < colorMap->getNumPixelComps(); ++i) {
if (pix[i] < maskColors[2*i] * 255||
pix[i] > maskColors[2*i+1] * 255) {
*dest = *dest | 0xff000000;
break;
}
}
pix += colorMap->getNumPixelComps();
dest++;
}
}
image = cairo_image_surface_create_for_data (buffer, CAIRO_FORMAT_ARGB32,
width, height, width * 4);
}
else {
for (y = 0; y < height; y++) {
dest = (unsigned int *) (buffer + y * 4 * width);
pix = imgStr->getLine();
colorMap->getRGBLine (pix, dest, width);
}
image = cairo_image_surface_create_for_data (buffer, CAIRO_FORMAT_RGB24,
width, height, width * 4);
}
if (image == NULL) {
delete imgStr;
return;
}
pattern = cairo_pattern_create_for_surface (image);
if (pattern == NULL) {
delete imgStr;
return;
}
LOG (printf ("drawImageMask %dx%d\n", width, height));
cairo_matrix_init_translate (&matrix, 0, height);
cairo_matrix_scale (&matrix, width, -height);
cairo_pattern_set_matrix (pattern, &matrix);
cairo_pattern_set_filter (pattern, CAIRO_FILTER_BILINEAR);
cairo_set_source (cairo, pattern);
cairo_paint (cairo);
if (cairo_shape) {
#if 0
cairo_rectangle (cairo_shape, 0., 0., width, height);
cairo_fill (cairo_shape);
#else
cairo_save (cairo_shape);
/* this should draw a rectangle the size of the image
* we use this instead of rect,fill because of the lack
* of EXTEND_PAD */
/* NOTE: this will multiply the edges of the image twice */
cairo_set_source (cairo_shape, pattern);
cairo_paint(cairo_shape);
cairo_restore (cairo_shape);
#endif
}
cairo_pattern_destroy (pattern);
cairo_surface_destroy (image);
free (buffer);
delete imgStr;
}
| 164,605 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: spnego_gss_unwrap_aead(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
gss_buffer_t input_message_buffer,
gss_buffer_t input_assoc_buffer,
gss_buffer_t output_payload_buffer,
int *conf_state,
gss_qop_t *qop_state)
{
OM_uint32 ret;
ret = gss_unwrap_aead(minor_status,
context_handle,
input_message_buffer,
input_assoc_buffer,
output_payload_buffer,
conf_state,
qop_state);
return (ret);
}
Commit Message: Fix SPNEGO context aliasing bugs [CVE-2015-2695]
The SPNEGO mechanism currently replaces its context handle with the
mechanism context handle upon establishment, under the assumption that
most GSS functions are only called after context establishment. This
assumption is incorrect, and can lead to aliasing violations for some
programs. Maintain the SPNEGO context structure after context
establishment and refer to it in all GSS methods. Add initiate and
opened flags to the SPNEGO context structure for use in
gss_inquire_context() prior to context establishment.
CVE-2015-2695:
In MIT krb5 1.5 and later, applications which call
gss_inquire_context() on a partially-established SPNEGO context can
cause the GSS-API library to read from a pointer using the wrong type,
generally causing a process crash. This bug may go unnoticed, because
the most common SPNEGO authentication scenario establishes the context
after just one call to gss_accept_sec_context(). Java server
applications using the native JGSS provider are vulnerable to this
bug. A carefully crafted SPNEGO packet might allow the
gss_inquire_context() call to succeed with attacker-determined
results, but applications should not make access control decisions
based on gss_inquire_context() results prior to context establishment.
CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C
[[email protected]: several bugfixes, style changes, and edge-case
behavior changes; commit message and CVE description]
ticket: 8244
target_version: 1.14
tags: pullup
CWE ID: CWE-18 | spnego_gss_unwrap_aead(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
gss_buffer_t input_message_buffer,
gss_buffer_t input_assoc_buffer,
gss_buffer_t output_payload_buffer,
int *conf_state,
gss_qop_t *qop_state)
{
OM_uint32 ret;
spnego_gss_ctx_id_t sc = (spnego_gss_ctx_id_t)context_handle;
if (sc->ctx_handle == GSS_C_NO_CONTEXT)
return (GSS_S_NO_CONTEXT);
ret = gss_unwrap_aead(minor_status,
sc->ctx_handle,
input_message_buffer,
input_assoc_buffer,
output_payload_buffer,
conf_state,
qop_state);
return (ret);
}
| 166,667 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static struct page *follow_page_pte(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, unsigned int flags)
{
struct mm_struct *mm = vma->vm_mm;
struct dev_pagemap *pgmap = NULL;
struct page *page;
spinlock_t *ptl;
pte_t *ptep, pte;
retry:
if (unlikely(pmd_bad(*pmd)))
return no_page_table(vma, flags);
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
pte = *ptep;
if (!pte_present(pte)) {
swp_entry_t entry;
/*
* KSM's break_ksm() relies upon recognizing a ksm page
* even while it is being migrated, so for that case we
* need migration_entry_wait().
*/
if (likely(!(flags & FOLL_MIGRATION)))
goto no_page;
if (pte_none(pte))
goto no_page;
entry = pte_to_swp_entry(pte);
if (!is_migration_entry(entry))
goto no_page;
pte_unmap_unlock(ptep, ptl);
migration_entry_wait(mm, pmd, address);
goto retry;
}
if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
if ((flags & FOLL_WRITE) && !pte_write(pte)) {
pte_unmap_unlock(ptep, ptl);
return NULL;
}
page = vm_normal_page(vma, address, pte);
if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
/*
* Only return device mapping pages in the FOLL_GET case since
* they are only valid while holding the pgmap reference.
*/
pgmap = get_dev_pagemap(pte_pfn(pte), NULL);
if (pgmap)
page = pte_page(pte);
else
goto no_page;
} else if (unlikely(!page)) {
if (flags & FOLL_DUMP) {
/* Avoid special (like zero) pages in core dumps */
page = ERR_PTR(-EFAULT);
goto out;
}
if (is_zero_pfn(pte_pfn(pte))) {
page = pte_page(pte);
} else {
int ret;
ret = follow_pfn_pte(vma, address, ptep, flags);
page = ERR_PTR(ret);
goto out;
}
}
if (flags & FOLL_SPLIT && PageTransCompound(page)) {
int ret;
get_page(page);
pte_unmap_unlock(ptep, ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (ret)
return ERR_PTR(ret);
goto retry;
}
if (flags & FOLL_GET) {
get_page(page);
/* drop the pgmap reference now that we hold the page */
if (pgmap) {
put_dev_pagemap(pgmap);
pgmap = NULL;
}
}
if (flags & FOLL_TOUCH) {
if ((flags & FOLL_WRITE) &&
!pte_dirty(pte) && !PageDirty(page))
set_page_dirty(page);
/*
* pte_mkyoung() would be more correct here, but atomic care
* is needed to avoid losing the dirty bit: it is easier to use
* mark_page_accessed().
*/
mark_page_accessed(page);
}
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
/* Do not mlock pte-mapped THP */
if (PageTransCompound(page))
goto out;
/*
* The preliminary mapping check is mainly to avoid the
* pointless overhead of lock_page on the ZERO_PAGE
* which might bounce very badly if there is contention.
*
* If the page is already locked, we don't need to
* handle it now - vmscan will handle it later if and
* when it attempts to reclaim the page.
*/
if (page->mapping && trylock_page(page)) {
lru_add_drain(); /* push cached pages to LRU */
/*
* Because we lock page here, and migration is
* blocked by the pte's page reference, and we
* know the page is still mapped, we don't even
* need to check for file-cache page truncation.
*/
mlock_vma_page(page);
unlock_page(page);
}
}
out:
pte_unmap_unlock(ptep, ptl);
return page;
no_page:
pte_unmap_unlock(ptep, ptl);
if (!pte_none(pte))
return NULL;
return no_page_table(vma, flags);
}
Commit Message: mm: remove gup_flags FOLL_WRITE games from __get_user_pages()
This is an ancient bug that was actually attempted to be fixed once
(badly) by me eleven years ago in commit 4ceb5db9757a ("Fix
get_user_pages() race for write access") but that was then undone due to
problems on s390 by commit f33ea7f404e5 ("fix get_user_pages bug").
In the meantime, the s390 situation has long been fixed, and we can now
fix it by checking the pte_dirty() bit properly (and do it better). The
s390 dirty bit was implemented in abf09bed3cce ("s390/mm: implement
software dirty bits") which made it into v3.9. Earlier kernels will
have to look at the page state itself.
Also, the VM has become more scalable, and what used a purely
theoretical race back then has become easier to trigger.
To fix it, we introduce a new internal FOLL_COW flag to mark the "yes,
we already did a COW" rather than play racy games with FOLL_WRITE that
is very fundamental, and then use the pte dirty flag to validate that
the FOLL_COW flag is still valid.
Reported-and-tested-by: Phil "not Paul" Oester <[email protected]>
Acked-by: Hugh Dickins <[email protected]>
Reviewed-by: Michal Hocko <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Willy Tarreau <[email protected]>
Cc: Nick Piggin <[email protected]>
Cc: Greg Thelen <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: CWE-362 | static struct page *follow_page_pte(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, unsigned int flags)
{
struct mm_struct *mm = vma->vm_mm;
struct dev_pagemap *pgmap = NULL;
struct page *page;
spinlock_t *ptl;
pte_t *ptep, pte;
retry:
if (unlikely(pmd_bad(*pmd)))
return no_page_table(vma, flags);
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
pte = *ptep;
if (!pte_present(pte)) {
swp_entry_t entry;
/*
* KSM's break_ksm() relies upon recognizing a ksm page
* even while it is being migrated, so for that case we
* need migration_entry_wait().
*/
if (likely(!(flags & FOLL_MIGRATION)))
goto no_page;
if (pte_none(pte))
goto no_page;
entry = pte_to_swp_entry(pte);
if (!is_migration_entry(entry))
goto no_page;
pte_unmap_unlock(ptep, ptl);
migration_entry_wait(mm, pmd, address);
goto retry;
}
if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
pte_unmap_unlock(ptep, ptl);
return NULL;
}
page = vm_normal_page(vma, address, pte);
if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
/*
* Only return device mapping pages in the FOLL_GET case since
* they are only valid while holding the pgmap reference.
*/
pgmap = get_dev_pagemap(pte_pfn(pte), NULL);
if (pgmap)
page = pte_page(pte);
else
goto no_page;
} else if (unlikely(!page)) {
if (flags & FOLL_DUMP) {
/* Avoid special (like zero) pages in core dumps */
page = ERR_PTR(-EFAULT);
goto out;
}
if (is_zero_pfn(pte_pfn(pte))) {
page = pte_page(pte);
} else {
int ret;
ret = follow_pfn_pte(vma, address, ptep, flags);
page = ERR_PTR(ret);
goto out;
}
}
if (flags & FOLL_SPLIT && PageTransCompound(page)) {
int ret;
get_page(page);
pte_unmap_unlock(ptep, ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (ret)
return ERR_PTR(ret);
goto retry;
}
if (flags & FOLL_GET) {
get_page(page);
/* drop the pgmap reference now that we hold the page */
if (pgmap) {
put_dev_pagemap(pgmap);
pgmap = NULL;
}
}
if (flags & FOLL_TOUCH) {
if ((flags & FOLL_WRITE) &&
!pte_dirty(pte) && !PageDirty(page))
set_page_dirty(page);
/*
* pte_mkyoung() would be more correct here, but atomic care
* is needed to avoid losing the dirty bit: it is easier to use
* mark_page_accessed().
*/
mark_page_accessed(page);
}
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
/* Do not mlock pte-mapped THP */
if (PageTransCompound(page))
goto out;
/*
* The preliminary mapping check is mainly to avoid the
* pointless overhead of lock_page on the ZERO_PAGE
* which might bounce very badly if there is contention.
*
* If the page is already locked, we don't need to
* handle it now - vmscan will handle it later if and
* when it attempts to reclaim the page.
*/
if (page->mapping && trylock_page(page)) {
lru_add_drain(); /* push cached pages to LRU */
/*
* Because we lock page here, and migration is
* blocked by the pte's page reference, and we
* know the page is still mapped, we don't even
* need to check for file-cache page truncation.
*/
mlock_vma_page(page);
unlock_page(page);
}
}
out:
pte_unmap_unlock(ptep, ptl);
return page;
no_page:
pte_unmap_unlock(ptep, ptl);
if (!pte_none(pte))
return NULL;
return no_page_table(vma, flags);
}
| 167,164 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
unsigned x, y;
AVFilterContext *ctx = inlink->dst;
VignetteContext *s = ctx->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
if (s->eval_mode == EVAL_MODE_FRAME)
update_context(s, inlink, in);
if (s->desc->flags & AV_PIX_FMT_FLAG_RGB) {
uint8_t *dst = out->data[0];
const uint8_t *src = in ->data[0];
const float *fmap = s->fmap;
const int dst_linesize = out->linesize[0];
const int src_linesize = in ->linesize[0];
const int fmap_linesize = s->fmap_linesize;
for (y = 0; y < inlink->h; y++) {
uint8_t *dstp = dst;
const uint8_t *srcp = src;
for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) {
const float f = fmap[x];
dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s));
dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s));
dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s));
}
dst += dst_linesize;
src += src_linesize;
fmap += fmap_linesize;
}
} else {
int plane;
for (plane = 0; plane < 4 && in->data[plane]; plane++) {
uint8_t *dst = out->data[plane];
const uint8_t *src = in ->data[plane];
const float *fmap = s->fmap;
const int dst_linesize = out->linesize[plane];
const int src_linesize = in ->linesize[plane];
const int fmap_linesize = s->fmap_linesize;
const int chroma = plane == 1 || plane == 2;
const int hsub = chroma ? s->desc->log2_chroma_w : 0;
const int vsub = chroma ? s->desc->log2_chroma_h : 0;
const int w = FF_CEIL_RSHIFT(inlink->w, hsub);
const int h = FF_CEIL_RSHIFT(inlink->h, vsub);
for (y = 0; y < h; y++) {
uint8_t *dstp = dst;
const uint8_t *srcp = src;
for (x = 0; x < w; x++) {
const double dv = get_dither_value(s);
if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv);
else *dstp++ = av_clip_uint8(fmap[x ] * *srcp++ + dv);
}
dst += dst_linesize;
src += src_linesize;
fmap += fmap_linesize << vsub;
}
}
}
return ff_filter_frame(outlink, out);
}
Commit Message: avfilter: fix plane validity checks
Fixes out of array accesses
Signed-off-by: Michael Niedermayer <[email protected]>
CWE ID: CWE-119 | static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
unsigned x, y;
AVFilterContext *ctx = inlink->dst;
VignetteContext *s = ctx->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
if (s->eval_mode == EVAL_MODE_FRAME)
update_context(s, inlink, in);
if (s->desc->flags & AV_PIX_FMT_FLAG_RGB) {
uint8_t *dst = out->data[0];
const uint8_t *src = in ->data[0];
const float *fmap = s->fmap;
const int dst_linesize = out->linesize[0];
const int src_linesize = in ->linesize[0];
const int fmap_linesize = s->fmap_linesize;
for (y = 0; y < inlink->h; y++) {
uint8_t *dstp = dst;
const uint8_t *srcp = src;
for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) {
const float f = fmap[x];
dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s));
dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s));
dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s));
}
dst += dst_linesize;
src += src_linesize;
fmap += fmap_linesize;
}
} else {
int plane;
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
uint8_t *dst = out->data[plane];
const uint8_t *src = in ->data[plane];
const float *fmap = s->fmap;
const int dst_linesize = out->linesize[plane];
const int src_linesize = in ->linesize[plane];
const int fmap_linesize = s->fmap_linesize;
const int chroma = plane == 1 || plane == 2;
const int hsub = chroma ? s->desc->log2_chroma_w : 0;
const int vsub = chroma ? s->desc->log2_chroma_h : 0;
const int w = FF_CEIL_RSHIFT(inlink->w, hsub);
const int h = FF_CEIL_RSHIFT(inlink->h, vsub);
for (y = 0; y < h; y++) {
uint8_t *dstp = dst;
const uint8_t *srcp = src;
for (x = 0; x < w; x++) {
const double dv = get_dither_value(s);
if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv);
else *dstp++ = av_clip_uint8(fmap[x ] * *srcp++ + dv);
}
dst += dst_linesize;
src += src_linesize;
fmap += fmap_linesize << vsub;
}
}
}
return ff_filter_frame(outlink, out);
}
| 166,008 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
{
struct key_user *newowner, *zapowner = NULL;
struct key *key;
key_ref_t key_ref;
long ret;
kuid_t uid;
kgid_t gid;
uid = make_kuid(current_user_ns(), user);
gid = make_kgid(current_user_ns(), group);
ret = -EINVAL;
if ((user != (uid_t) -1) && !uid_valid(uid))
goto error;
if ((group != (gid_t) -1) && !gid_valid(gid))
goto error;
ret = 0;
if (user == (uid_t) -1 && group == (gid_t) -1)
goto error;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
}
key = key_ref_to_ptr(key_ref);
/* make the changes with the locks held to prevent chown/chown races */
ret = -EACCES;
down_write(&key->sem);
if (!capable(CAP_SYS_ADMIN)) {
/* only the sysadmin can chown a key to some other UID */
if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
goto error_put;
/* only the sysadmin can set the key's GID to a group other
* than one of those that the current process subscribes to */
if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
goto error_put;
}
/* change the UID */
if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) {
ret = -ENOMEM;
newowner = key_user_lookup(uid);
if (!newowner)
goto error_put;
/* transfer the quota burden to the new user */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&newowner->lock);
if (newowner->qnkeys + 1 >= maxkeys ||
newowner->qnbytes + key->quotalen >= maxbytes ||
newowner->qnbytes + key->quotalen <
newowner->qnbytes)
goto quota_overrun;
newowner->qnkeys++;
newowner->qnbytes += key->quotalen;
spin_unlock(&newowner->lock);
spin_lock(&key->user->lock);
key->user->qnkeys--;
key->user->qnbytes -= key->quotalen;
spin_unlock(&key->user->lock);
}
atomic_dec(&key->user->nkeys);
atomic_inc(&newowner->nkeys);
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
atomic_dec(&key->user->nikeys);
atomic_inc(&newowner->nikeys);
}
zapowner = key->user;
key->user = newowner;
key->uid = uid;
}
/* change the GID */
if (group != (gid_t) -1)
key->gid = gid;
ret = 0;
error_put:
up_write(&key->sem);
key_put(key);
if (zapowner)
key_user_put(zapowner);
error:
return ret;
quota_overrun:
spin_unlock(&newowner->lock);
zapowner = newowner;
ret = -EDQUOT;
goto error_put;
}
Commit Message: KEYS: Fix race between updating and finding a negative key
Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection
error into one field such that:
(1) The instantiation state can be modified/read atomically.
(2) The error can be accessed atomically with the state.
(3) The error isn't stored unioned with the payload pointers.
This deals with the problem that the state is spread over three different
objects (two bits and a separate variable) and reading or updating them
atomically isn't practical, given that not only can uninstantiated keys
change into instantiated or rejected keys, but rejected keys can also turn
into instantiated keys - and someone accessing the key might not be using
any locking.
The main side effect of this problem is that what was held in the payload
may change, depending on the state. For instance, you might observe the
key to be in the rejected state. You then read the cached error, but if
the key semaphore wasn't locked, the key might've become instantiated
between the two reads - and you might now have something in hand that isn't
actually an error code.
The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error
code if the key is negatively instantiated. The key_is_instantiated()
function is replaced with key_is_positive() to avoid confusion as negative
keys are also 'instantiated'.
Additionally, barriering is included:
(1) Order payload-set before state-set during instantiation.
(2) Order state-read before payload-read when using the key.
Further separate barriering is necessary if RCU is being used to access the
payload content after reading the payload pointers.
Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data")
Cc: [email protected] # v4.4+
Reported-by: Eric Biggers <[email protected]>
Signed-off-by: David Howells <[email protected]>
Reviewed-by: Eric Biggers <[email protected]>
CWE ID: CWE-20 | long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
{
struct key_user *newowner, *zapowner = NULL;
struct key *key;
key_ref_t key_ref;
long ret;
kuid_t uid;
kgid_t gid;
uid = make_kuid(current_user_ns(), user);
gid = make_kgid(current_user_ns(), group);
ret = -EINVAL;
if ((user != (uid_t) -1) && !uid_valid(uid))
goto error;
if ((group != (gid_t) -1) && !gid_valid(gid))
goto error;
ret = 0;
if (user == (uid_t) -1 && group == (gid_t) -1)
goto error;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
}
key = key_ref_to_ptr(key_ref);
/* make the changes with the locks held to prevent chown/chown races */
ret = -EACCES;
down_write(&key->sem);
if (!capable(CAP_SYS_ADMIN)) {
/* only the sysadmin can chown a key to some other UID */
if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
goto error_put;
/* only the sysadmin can set the key's GID to a group other
* than one of those that the current process subscribes to */
if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
goto error_put;
}
/* change the UID */
if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) {
ret = -ENOMEM;
newowner = key_user_lookup(uid);
if (!newowner)
goto error_put;
/* transfer the quota burden to the new user */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&newowner->lock);
if (newowner->qnkeys + 1 >= maxkeys ||
newowner->qnbytes + key->quotalen >= maxbytes ||
newowner->qnbytes + key->quotalen <
newowner->qnbytes)
goto quota_overrun;
newowner->qnkeys++;
newowner->qnbytes += key->quotalen;
spin_unlock(&newowner->lock);
spin_lock(&key->user->lock);
key->user->qnkeys--;
key->user->qnbytes -= key->quotalen;
spin_unlock(&key->user->lock);
}
atomic_dec(&key->user->nkeys);
atomic_inc(&newowner->nkeys);
if (key->state != KEY_IS_UNINSTANTIATED) {
atomic_dec(&key->user->nikeys);
atomic_inc(&newowner->nikeys);
}
zapowner = key->user;
key->user = newowner;
key->uid = uid;
}
/* change the GID */
if (group != (gid_t) -1)
key->gid = gid;
ret = 0;
error_put:
up_write(&key->sem);
key_put(key);
if (zapowner)
key_user_put(zapowner);
error:
return ret;
quota_overrun:
spin_unlock(&newowner->lock);
zapowner = newowner;
ret = -EDQUOT;
goto error_put;
}
| 167,700 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: OMX_ERRORTYPE SoftAMR::internalSetParameter(
OMX_INDEXTYPE index, const OMX_PTR params) {
switch (index) {
case OMX_IndexParamStandardComponentRole:
{
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
if (mMode == MODE_NARROW) {
if (strncmp((const char *)roleParams->cRole,
"audio_decoder.amrnb",
OMX_MAX_STRINGNAME_SIZE - 1)) {
return OMX_ErrorUndefined;
}
} else {
if (strncmp((const char *)roleParams->cRole,
"audio_decoder.amrwb",
OMX_MAX_STRINGNAME_SIZE - 1)) {
return OMX_ErrorUndefined;
}
}
return OMX_ErrorNone;
}
case OMX_IndexParamAudioAmr:
{
const OMX_AUDIO_PARAM_AMRTYPE *aacParams =
(const OMX_AUDIO_PARAM_AMRTYPE *)params;
if (aacParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
return OMX_ErrorNone;
}
case OMX_IndexParamAudioPcm:
{
const OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
if (pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
return OMX_ErrorNone;
}
default:
return SimpleSoftOMXComponent::internalSetParameter(index, params);
}
}
Commit Message: DO NOT MERGE Verify OMX buffer sizes prior to access
Bug: 27207275
Change-Id: I4412825d1ee233d993af0a67708bea54304ff62d
CWE ID: CWE-119 | OMX_ERRORTYPE SoftAMR::internalSetParameter(
OMX_INDEXTYPE index, const OMX_PTR params) {
switch (index) {
case OMX_IndexParamStandardComponentRole:
{
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
if (!isValidOMXParam(roleParams)) {
return OMX_ErrorBadParameter;
}
if (mMode == MODE_NARROW) {
if (strncmp((const char *)roleParams->cRole,
"audio_decoder.amrnb",
OMX_MAX_STRINGNAME_SIZE - 1)) {
return OMX_ErrorUndefined;
}
} else {
if (strncmp((const char *)roleParams->cRole,
"audio_decoder.amrwb",
OMX_MAX_STRINGNAME_SIZE - 1)) {
return OMX_ErrorUndefined;
}
}
return OMX_ErrorNone;
}
case OMX_IndexParamAudioAmr:
{
const OMX_AUDIO_PARAM_AMRTYPE *aacParams =
(const OMX_AUDIO_PARAM_AMRTYPE *)params;
if (!isValidOMXParam(aacParams)) {
return OMX_ErrorBadParameter;
}
if (aacParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
return OMX_ErrorNone;
}
case OMX_IndexParamAudioPcm:
{
const OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
if (!isValidOMXParam(pcmParams)) {
return OMX_ErrorBadParameter;
}
if (pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
return OMX_ErrorNone;
}
default:
return SimpleSoftOMXComponent::internalSetParameter(index, params);
}
}
| 174,193 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void oinf_entry_dump(GF_OperatingPointsInformation *ptr, FILE * trace)
{
u32 i, count;
if (!ptr) {
fprintf(trace, "<OperatingPointsInformation scalability_mask=\"Multiview|Spatial scalability|Auxilary|unknown\" num_profile_tier_level=\"\" num_operating_points=\"\" dependency_layers=\"\">\n");
fprintf(trace, " <ProfileTierLevel general_profile_space=\"\" general_tier_flag=\"\" general_profile_idc=\"\" general_profile_compatibility_flags=\"\" general_constraint_indicator_flags=\"\" />\n");
fprintf(trace, "<OperatingPoint output_layer_set_idx=\"\" max_temporal_id=\"\" layer_count=\"\" minPicWidth=\"\" minPicHeight=\"\" maxPicWidth=\"\" maxPicHeight=\"\" maxChromaFormat=\"\" maxBitDepth=\"\" frame_rate_info_flag=\"\" bit_rate_info_flag=\"\" avgFrameRate=\"\" constantFrameRate=\"\" maxBitRate=\"\" avgBitRate=\"\"/>\n");
fprintf(trace, "<Layer dependent_layerID=\"\" num_layers_dependent_on=\"\" dependent_on_layerID=\"\" dimension_identifier=\"\"/>\n");
fprintf(trace, "</OperatingPointsInformation>\n");
return;
}
fprintf(trace, "<OperatingPointsInformation");
fprintf(trace, " scalability_mask=\"%u (", ptr->scalability_mask);
switch (ptr->scalability_mask) {
case 2:
fprintf(trace, "Multiview");
break;
case 4:
fprintf(trace, "Spatial scalability");
break;
case 8:
fprintf(trace, "Auxilary");
break;
default:
fprintf(trace, "unknown");
}
fprintf(trace, ")\" num_profile_tier_level=\"%u\"", gf_list_count(ptr->profile_tier_levels) );
fprintf(trace, " num_operating_points=\"%u\" dependency_layers=\"%u\"", gf_list_count(ptr->operating_points), gf_list_count(ptr->dependency_layers));
fprintf(trace, ">\n");
count=gf_list_count(ptr->profile_tier_levels);
for (i = 0; i < count; i++) {
LHEVC_ProfileTierLevel *ptl = (LHEVC_ProfileTierLevel *)gf_list_get(ptr->profile_tier_levels, i);
fprintf(trace, " <ProfileTierLevel general_profile_space=\"%u\" general_tier_flag=\"%u\" general_profile_idc=\"%u\" general_profile_compatibility_flags=\"%X\" general_constraint_indicator_flags=\""LLX"\" />\n", ptl->general_profile_space, ptl->general_tier_flag, ptl->general_profile_idc, ptl->general_profile_compatibility_flags, ptl->general_constraint_indicator_flags);
}
count=gf_list_count(ptr->operating_points);
for (i = 0; i < count; i++) {
LHEVC_OperatingPoint *op = (LHEVC_OperatingPoint *)gf_list_get(ptr->operating_points, i);
fprintf(trace, "<OperatingPoint output_layer_set_idx=\"%u\"", op->output_layer_set_idx);
fprintf(trace, " max_temporal_id=\"%u\" layer_count=\"%u\"", op->max_temporal_id, op->layer_count);
fprintf(trace, " minPicWidth=\"%u\" minPicHeight=\"%u\"", op->minPicWidth, op->minPicHeight);
fprintf(trace, " maxPicWidth=\"%u\" maxPicHeight=\"%u\"", op->maxPicWidth, op->maxPicHeight);
fprintf(trace, " maxChromaFormat=\"%u\" maxBitDepth=\"%u\"", op->maxChromaFormat, op->maxBitDepth);
fprintf(trace, " frame_rate_info_flag=\"%u\" bit_rate_info_flag=\"%u\"", op->frame_rate_info_flag, op->bit_rate_info_flag);
if (op->frame_rate_info_flag)
fprintf(trace, " avgFrameRate=\"%u\" constantFrameRate=\"%u\"", op->avgFrameRate, op->constantFrameRate);
if (op->bit_rate_info_flag)
fprintf(trace, " maxBitRate=\"%u\" avgBitRate=\"%u\"", op->maxBitRate, op->avgBitRate);
fprintf(trace, "/>\n");
}
count=gf_list_count(ptr->dependency_layers);
for (i = 0; i < count; i++) {
u32 j;
LHEVC_DependentLayer *dep = (LHEVC_DependentLayer *)gf_list_get(ptr->dependency_layers, i);
fprintf(trace, "<Layer dependent_layerID=\"%u\" num_layers_dependent_on=\"%u\"", dep->dependent_layerID, dep->num_layers_dependent_on);
if (dep->num_layers_dependent_on) {
fprintf(trace, " dependent_on_layerID=\"");
for (j = 0; j < dep->num_layers_dependent_on; j++)
fprintf(trace, "%d ", dep->dependent_on_layerID[j]);
fprintf(trace, "\"");
}
fprintf(trace, " dimension_identifier=\"");
for (j = 0; j < 16; j++)
if (ptr->scalability_mask & (1 << j))
fprintf(trace, "%d ", dep->dimension_identifier[j]);
fprintf(trace, "\"/>\n");
}
fprintf(trace, "</OperatingPointsInformation>\n");
return;
}
Commit Message: fixed 2 possible heap overflows (inc. #1088)
CWE ID: CWE-125 | static void oinf_entry_dump(GF_OperatingPointsInformation *ptr, FILE * trace)
{
u32 i, count;
if (!ptr) {
fprintf(trace, "<OperatingPointsInformation scalability_mask=\"Multiview|Spatial scalability|Auxilary|unknown\" num_profile_tier_level=\"\" num_operating_points=\"\" dependency_layers=\"\">\n");
fprintf(trace, " <ProfileTierLevel general_profile_space=\"\" general_tier_flag=\"\" general_profile_idc=\"\" general_profile_compatibility_flags=\"\" general_constraint_indicator_flags=\"\" />\n");
fprintf(trace, "<OperatingPoint output_layer_set_idx=\"\" max_temporal_id=\"\" layer_count=\"\" minPicWidth=\"\" minPicHeight=\"\" maxPicWidth=\"\" maxPicHeight=\"\" maxChromaFormat=\"\" maxBitDepth=\"\" frame_rate_info_flag=\"\" bit_rate_info_flag=\"\" avgFrameRate=\"\" constantFrameRate=\"\" maxBitRate=\"\" avgBitRate=\"\"/>\n");
fprintf(trace, "<Layer dependent_layerID=\"\" num_layers_dependent_on=\"\" dependent_on_layerID=\"\" dimension_identifier=\"\"/>\n");
fprintf(trace, "</OperatingPointsInformation>\n");
return;
}
fprintf(trace, "<OperatingPointsInformation");
fprintf(trace, " scalability_mask=\"%u (", ptr->scalability_mask);
switch (ptr->scalability_mask) {
case 2:
fprintf(trace, "Multiview");
break;
case 4:
fprintf(trace, "Spatial scalability");
break;
case 8:
fprintf(trace, "Auxilary");
break;
default:
fprintf(trace, "unknown");
}
fprintf(trace, ")\" num_profile_tier_level=\"%u\"", gf_list_count(ptr->profile_tier_levels) );
fprintf(trace, " num_operating_points=\"%u\" dependency_layers=\"%u\"", gf_list_count(ptr->operating_points), gf_list_count(ptr->dependency_layers));
fprintf(trace, ">\n");
count=gf_list_count(ptr->profile_tier_levels);
for (i = 0; i < count; i++) {
LHEVC_ProfileTierLevel *ptl = (LHEVC_ProfileTierLevel *)gf_list_get(ptr->profile_tier_levels, i);
fprintf(trace, " <ProfileTierLevel general_profile_space=\"%u\" general_tier_flag=\"%u\" general_profile_idc=\"%u\" general_profile_compatibility_flags=\"%X\" general_constraint_indicator_flags=\""LLX"\" />\n", ptl->general_profile_space, ptl->general_tier_flag, ptl->general_profile_idc, ptl->general_profile_compatibility_flags, ptl->general_constraint_indicator_flags);
}
count=gf_list_count(ptr->operating_points);
for (i = 0; i < count; i++) {
LHEVC_OperatingPoint *op = (LHEVC_OperatingPoint *)gf_list_get(ptr->operating_points, i);
fprintf(trace, "<OperatingPoint output_layer_set_idx=\"%u\"", op->output_layer_set_idx);
fprintf(trace, " max_temporal_id=\"%u\" layer_count=\"%u\"", op->max_temporal_id, op->layer_count);
fprintf(trace, " minPicWidth=\"%u\" minPicHeight=\"%u\"", op->minPicWidth, op->minPicHeight);
fprintf(trace, " maxPicWidth=\"%u\" maxPicHeight=\"%u\"", op->maxPicWidth, op->maxPicHeight);
fprintf(trace, " maxChromaFormat=\"%u\" maxBitDepth=\"%u\"", op->maxChromaFormat, op->maxBitDepth);
fprintf(trace, " frame_rate_info_flag=\"%u\" bit_rate_info_flag=\"%u\"", op->frame_rate_info_flag, op->bit_rate_info_flag);
if (op->frame_rate_info_flag)
fprintf(trace, " avgFrameRate=\"%u\" constantFrameRate=\"%u\"", op->avgFrameRate, op->constantFrameRate);
if (op->bit_rate_info_flag)
fprintf(trace, " maxBitRate=\"%u\" avgBitRate=\"%u\"", op->maxBitRate, op->avgBitRate);
fprintf(trace, "/>\n");
}
count=gf_list_count(ptr->dependency_layers);
for (i = 0; i < count; i++) {
u32 j;
LHEVC_DependentLayer *dep = (LHEVC_DependentLayer *)gf_list_get(ptr->dependency_layers, i);
fprintf(trace, "<Layer dependent_layerID=\"%u\" num_layers_dependent_on=\"%u\"", dep->dependent_layerID, dep->num_layers_dependent_on);
if (dep->num_layers_dependent_on) {
fprintf(trace, " dependent_on_layerID=\"");
for (j = 0; j < dep->num_layers_dependent_on; j++)
fprintf(trace, "%d ", dep->dependent_on_layerID[j]);
fprintf(trace, "\"");
}
fprintf(trace, " dimension_identifier=\"");
for (j = 0; j < 16; j++)
if (ptr->scalability_mask & (1 << j))
fprintf(trace, "%d ", dep->dimension_identifier[j]);
fprintf(trace, "\"/>\n");
}
fprintf(trace, "</OperatingPointsInformation>\n");
return;
}
| 169,170 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static WebTransformationMatrix blendTransformOperations(const WebTransformOperation* from, const WebTransformOperation* to, double progress)
{
WebTransformationMatrix toReturn;
if (isIdentity(from) && isIdentity(to))
return toReturn;
WebTransformOperation::Type interpolationType = WebTransformOperation::WebTransformOperationIdentity;
if (isIdentity(to))
interpolationType = from->type;
else
interpolationType = to->type;
switch (interpolationType) {
case WebTransformOperation::WebTransformOperationTranslate: {
double fromX = isIdentity(from) ? 0 : from->translate.x;
double fromY = isIdentity(from) ? 0 : from->translate.y;
double fromZ = isIdentity(from) ? 0 : from->translate.z;
double toX = isIdentity(to) ? 0 : to->translate.x;
double toY = isIdentity(to) ? 0 : to->translate.y;
double toZ = isIdentity(to) ? 0 : to->translate.z;
toReturn.translate3d(blendDoubles(fromX, toX, progress),
blendDoubles(fromY, toY, progress),
blendDoubles(fromZ, toZ, progress));
break;
}
case WebTransformOperation::WebTransformOperationRotate: {
double axisX = 0;
double axisY = 0;
double axisZ = 1;
double fromAngle = 0;
double toAngle = isIdentity(to) ? 0 : to->rotate.angle;
if (shareSameAxis(from, to, axisX, axisY, axisZ, fromAngle))
toReturn.rotate3d(axisX, axisY, axisZ, blendDoubles(fromAngle, toAngle, progress));
else {
WebTransformationMatrix toMatrix;
if (!isIdentity(to))
toMatrix = to->matrix;
WebTransformationMatrix fromMatrix;
if (!isIdentity(from))
fromMatrix = from->matrix;
toReturn = toMatrix;
toReturn.blend(fromMatrix, progress);
}
break;
}
case WebTransformOperation::WebTransformOperationScale: {
double fromX = isIdentity(from) ? 1 : from->scale.x;
double fromY = isIdentity(from) ? 1 : from->scale.y;
double fromZ = isIdentity(from) ? 1 : from->scale.z;
double toX = isIdentity(to) ? 1 : to->scale.x;
double toY = isIdentity(to) ? 1 : to->scale.y;
double toZ = isIdentity(to) ? 1 : to->scale.z;
toReturn.scale3d(blendDoubles(fromX, toX, progress),
blendDoubles(fromY, toY, progress),
blendDoubles(fromZ, toZ, progress));
break;
}
case WebTransformOperation::WebTransformOperationSkew: {
double fromX = isIdentity(from) ? 0 : from->skew.x;
double fromY = isIdentity(from) ? 0 : from->skew.y;
double toX = isIdentity(to) ? 0 : to->skew.x;
double toY = isIdentity(to) ? 0 : to->skew.y;
toReturn.skewX(blendDoubles(fromX, toX, progress));
toReturn.skewY(blendDoubles(fromY, toY, progress));
break;
}
case WebTransformOperation::WebTransformOperationPerspective: {
double fromPerspectiveDepth = isIdentity(from) ? numeric_limits<double>::max() : from->perspectiveDepth;
double toPerspectiveDepth = isIdentity(to) ? numeric_limits<double>::max() : to->perspectiveDepth;
toReturn.applyPerspective(blendDoubles(fromPerspectiveDepth, toPerspectiveDepth, progress));
break;
}
case WebTransformOperation::WebTransformOperationMatrix: {
WebTransformationMatrix toMatrix;
if (!isIdentity(to))
toMatrix = to->matrix;
WebTransformationMatrix fromMatrix;
if (!isIdentity(from))
fromMatrix = from->matrix;
toReturn = toMatrix;
toReturn.blend(fromMatrix, progress);
break;
}
case WebTransformOperation::WebTransformOperationIdentity:
break;
}
return toReturn;
}
Commit Message: [chromium] We should accelerate all transformations, except when we must blend matrices that cannot be decomposed.
https://bugs.webkit.org/show_bug.cgi?id=95855
Reviewed by James Robinson.
Source/Platform:
WebTransformOperations are now able to report if they can successfully blend.
WebTransformationMatrix::blend now returns a bool if blending would fail.
* chromium/public/WebTransformOperations.h:
(WebTransformOperations):
* chromium/public/WebTransformationMatrix.h:
(WebTransformationMatrix):
Source/WebCore:
WebTransformOperations are now able to report if they can successfully blend.
WebTransformationMatrix::blend now returns a bool if blending would fail.
Unit tests:
AnimationTranslationUtilTest.createTransformAnimationWithNonDecomposableMatrix
AnimationTranslationUtilTest.createTransformAnimationWithNonInvertibleTransform
* platform/chromium/support/WebTransformOperations.cpp:
(WebKit::blendTransformOperations):
(WebKit::WebTransformOperations::blend):
(WebKit::WebTransformOperations::canBlendWith):
(WebKit):
(WebKit::WebTransformOperations::blendInternal):
* platform/chromium/support/WebTransformationMatrix.cpp:
(WebKit::WebTransformationMatrix::blend):
* platform/graphics/chromium/AnimationTranslationUtil.cpp:
(WebCore::WebTransformAnimationCurve):
Source/WebKit/chromium:
Added the following unit tests:
AnimationTranslationUtilTest.createTransformAnimationWithNonDecomposableMatrix
AnimationTranslationUtilTest.createTransformAnimationWithNonInvertibleTransform
* tests/AnimationTranslationUtilTest.cpp:
(WebKit::TEST):
(WebKit):
git-svn-id: svn://svn.chromium.org/blink/trunk@127868 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-119 | static WebTransformationMatrix blendTransformOperations(const WebTransformOperation* from, const WebTransformOperation* to, double progress)
static bool blendTransformOperations(const WebTransformOperation* from, const WebTransformOperation* to, double progress, WebTransformationMatrix& result)
{
if (isIdentity(from) && isIdentity(to))
return true;
WebTransformOperation::Type interpolationType = WebTransformOperation::WebTransformOperationIdentity;
if (isIdentity(to))
interpolationType = from->type;
else
interpolationType = to->type;
switch (interpolationType) {
case WebTransformOperation::WebTransformOperationTranslate: {
double fromX = isIdentity(from) ? 0 : from->translate.x;
double fromY = isIdentity(from) ? 0 : from->translate.y;
double fromZ = isIdentity(from) ? 0 : from->translate.z;
double toX = isIdentity(to) ? 0 : to->translate.x;
double toY = isIdentity(to) ? 0 : to->translate.y;
double toZ = isIdentity(to) ? 0 : to->translate.z;
result.translate3d(blendDoubles(fromX, toX, progress),
blendDoubles(fromY, toY, progress),
blendDoubles(fromZ, toZ, progress));
break;
}
case WebTransformOperation::WebTransformOperationRotate: {
double axisX = 0;
double axisY = 0;
double axisZ = 1;
double fromAngle = 0;
double toAngle = isIdentity(to) ? 0 : to->rotate.angle;
if (shareSameAxis(from, to, axisX, axisY, axisZ, fromAngle))
result.rotate3d(axisX, axisY, axisZ, blendDoubles(fromAngle, toAngle, progress));
else {
WebTransformationMatrix toMatrix;
if (!isIdentity(to))
toMatrix = to->matrix;
WebTransformationMatrix fromMatrix;
if (!isIdentity(from))
fromMatrix = from->matrix;
result = toMatrix;
if (!result.blend(fromMatrix, progress))
return false;
}
break;
}
case WebTransformOperation::WebTransformOperationScale: {
double fromX = isIdentity(from) ? 1 : from->scale.x;
double fromY = isIdentity(from) ? 1 : from->scale.y;
double fromZ = isIdentity(from) ? 1 : from->scale.z;
double toX = isIdentity(to) ? 1 : to->scale.x;
double toY = isIdentity(to) ? 1 : to->scale.y;
double toZ = isIdentity(to) ? 1 : to->scale.z;
result.scale3d(blendDoubles(fromX, toX, progress),
blendDoubles(fromY, toY, progress),
blendDoubles(fromZ, toZ, progress));
break;
}
case WebTransformOperation::WebTransformOperationSkew: {
double fromX = isIdentity(from) ? 0 : from->skew.x;
double fromY = isIdentity(from) ? 0 : from->skew.y;
double toX = isIdentity(to) ? 0 : to->skew.x;
double toY = isIdentity(to) ? 0 : to->skew.y;
result.skewX(blendDoubles(fromX, toX, progress));
result.skewY(blendDoubles(fromY, toY, progress));
break;
}
case WebTransformOperation::WebTransformOperationPerspective: {
double fromPerspectiveDepth = isIdentity(from) ? numeric_limits<double>::max() : from->perspectiveDepth;
double toPerspectiveDepth = isIdentity(to) ? numeric_limits<double>::max() : to->perspectiveDepth;
result.applyPerspective(blendDoubles(fromPerspectiveDepth, toPerspectiveDepth, progress));
break;
}
case WebTransformOperation::WebTransformOperationMatrix: {
WebTransformationMatrix toMatrix;
if (!isIdentity(to))
toMatrix = to->matrix;
WebTransformationMatrix fromMatrix;
if (!isIdentity(from))
fromMatrix = from->matrix;
result = toMatrix;
if (!result.blend(fromMatrix, progress))
return false;
break;
}
case WebTransformOperation::WebTransformOperationIdentity:
break;
}
return true;
}
| 171,003 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static Image *ReadGROUP4Image(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
filename[MagickPathExtent];
FILE
*file;
Image
*image;
ImageInfo
*read_info;
int
c,
unique_file;
MagickBooleanType
status;
size_t
length;
ssize_t
offset,
strip_offset;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Write raw CCITT Group 4 wrapped as a TIFF image file.
*/
file=(FILE *) NULL;
unique_file=AcquireUniqueFileResource(filename);
if (unique_file != -1)
file=fdopen(unique_file,"wb");
if ((unique_file == -1) || (file == (FILE *) NULL))
ThrowImageException(FileOpenError,"UnableToCreateTemporaryFile");
length=fwrite("\111\111\052\000\010\000\000\000\016\000",1,10,file);
length=fwrite("\376\000\003\000\001\000\000\000\000\000\000\000",1,12,file);
length=fwrite("\000\001\004\000\001\000\000\000",1,8,file);
length=WriteLSBLong(file,image->columns);
length=fwrite("\001\001\004\000\001\000\000\000",1,8,file);
length=WriteLSBLong(file,image->rows);
length=fwrite("\002\001\003\000\001\000\000\000\001\000\000\000",1,12,file);
length=fwrite("\003\001\003\000\001\000\000\000\004\000\000\000",1,12,file);
length=fwrite("\006\001\003\000\001\000\000\000\000\000\000\000",1,12,file);
length=fwrite("\021\001\003\000\001\000\000\000",1,8,file);
strip_offset=10+(12*14)+4+8;
length=WriteLSBLong(file,(size_t) strip_offset);
length=fwrite("\022\001\003\000\001\000\000\000",1,8,file);
length=WriteLSBLong(file,(size_t) image_info->orientation);
length=fwrite("\025\001\003\000\001\000\000\000\001\000\000\000",1,12,file);
length=fwrite("\026\001\004\000\001\000\000\000",1,8,file);
length=WriteLSBLong(file,image->rows);
length=fwrite("\027\001\004\000\001\000\000\000\000\000\000\000",1,12,file);
offset=(ssize_t) ftell(file)-4;
length=fwrite("\032\001\005\000\001\000\000\000",1,8,file);
length=WriteLSBLong(file,(size_t) (strip_offset-8));
length=fwrite("\033\001\005\000\001\000\000\000",1,8,file);
length=WriteLSBLong(file,(size_t) (strip_offset-8));
length=fwrite("\050\001\003\000\001\000\000\000\002\000\000\000",1,12,file);
length=fwrite("\000\000\000\000",1,4,file);
length=WriteLSBLong(file,(long) image->resolution.x);
length=WriteLSBLong(file,1);
for (length=0; (c=ReadBlobByte(image)) != EOF; length++)
(void) fputc(c,file);
offset=(ssize_t) fseek(file,(ssize_t) offset,SEEK_SET);
length=WriteLSBLong(file,(unsigned int) length);
(void) fclose(file);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Read TIFF image.
*/
read_info=CloneImageInfo((ImageInfo *) NULL);
(void) FormatLocaleString(read_info->filename,MagickPathExtent,"%s",filename);
image=ReadTIFFImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
if (image != (Image *) NULL)
{
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick,"GROUP4",MagickPathExtent);
}
(void) RelinquishUniqueFileResource(filename);
return(image);
}
Commit Message: https://github.com/ImageMagick/ImageMagick/issues/196
CWE ID: CWE-20 | static Image *ReadGROUP4Image(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
filename[MagickPathExtent];
FILE
*file;
Image
*image;
ImageInfo
*read_info;
int
c,
unique_file;
MagickBooleanType
status;
size_t
length;
ssize_t
offset,
strip_offset;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Write raw CCITT Group 4 wrapped as a TIFF image file.
*/
file=(FILE *) NULL;
unique_file=AcquireUniqueFileResource(filename);
if (unique_file != -1)
file=fdopen(unique_file,"wb");
if ((unique_file == -1) || (file == (FILE *) NULL))
ThrowImageException(FileOpenError,"UnableToCreateTemporaryFile");
length=fwrite("\111\111\052\000\010\000\000\000\016\000",1,10,file);
length=fwrite("\376\000\003\000\001\000\000\000\000\000\000\000",1,12,file);
length=fwrite("\000\001\004\000\001\000\000\000",1,8,file);
length=WriteLSBLong(file,image->columns);
length=fwrite("\001\001\004\000\001\000\000\000",1,8,file);
length=WriteLSBLong(file,image->rows);
length=fwrite("\002\001\003\000\001\000\000\000\001\000\000\000",1,12,file);
length=fwrite("\003\001\003\000\001\000\000\000\004\000\000\000",1,12,file);
length=fwrite("\006\001\003\000\001\000\000\000\000\000\000\000",1,12,file);
length=fwrite("\021\001\003\000\001\000\000\000",1,8,file);
strip_offset=10+(12*14)+4+8;
length=WriteLSBLong(file,(size_t) strip_offset);
length=fwrite("\022\001\003\000\001\000\000\000",1,8,file);
length=WriteLSBLong(file,(size_t) image_info->orientation);
length=fwrite("\025\001\003\000\001\000\000\000\001\000\000\000",1,12,file);
length=fwrite("\026\001\004\000\001\000\000\000",1,8,file);
length=WriteLSBLong(file,image->rows);
length=fwrite("\027\001\004\000\001\000\000\000\000\000\000\000",1,12,file);
offset=(ssize_t) ftell(file)-4;
length=fwrite("\032\001\005\000\001\000\000\000",1,8,file);
length=WriteLSBLong(file,(size_t) (strip_offset-8));
length=fwrite("\033\001\005\000\001\000\000\000",1,8,file);
length=WriteLSBLong(file,(size_t) (strip_offset-8));
length=fwrite("\050\001\003\000\001\000\000\000\002\000\000\000",1,12,file);
length=fwrite("\000\000\000\000",1,4,file);
length=WriteLSBLong(file,(long) image->resolution.x);
length=WriteLSBLong(file,1);
status=MagickTrue;
for (length=0; (c=ReadBlobByte(image)) != EOF; length++)
if (fputc(c,file) != c)
status=MagickFalse;
offset=(ssize_t) fseek(file,(ssize_t) offset,SEEK_SET);
length=WriteLSBLong(file,(unsigned int) length);
(void) fclose(file);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Read TIFF image.
*/
read_info=CloneImageInfo((ImageInfo *) NULL);
(void) FormatLocaleString(read_info->filename,MagickPathExtent,"%s",filename);
image=ReadTIFFImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
if (image != (Image *) NULL)
{
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick,"GROUP4",MagickPathExtent);
}
(void) RelinquishUniqueFileResource(filename);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
| 168,627 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: WORD32 ihevcd_decode(iv_obj_t *ps_codec_obj, void *pv_api_ip, void *pv_api_op)
{
WORD32 ret = IV_SUCCESS;
codec_t *ps_codec = (codec_t *)(ps_codec_obj->pv_codec_handle);
ivd_video_decode_ip_t *ps_dec_ip;
ivd_video_decode_op_t *ps_dec_op;
WORD32 proc_idx = 0;
WORD32 prev_proc_idx = 0;
/* Initialize error code */
ps_codec->i4_error_code = 0;
ps_dec_ip = (ivd_video_decode_ip_t *)pv_api_ip;
ps_dec_op = (ivd_video_decode_op_t *)pv_api_op;
{
UWORD32 u4_size = ps_dec_op->u4_size;
memset(ps_dec_op, 0, sizeof(ivd_video_decode_op_t));
ps_dec_op->u4_size = u4_size; //Restore size field
}
if(ps_codec->i4_init_done != 1)
{
ps_dec_op->u4_error_code |= 1 << IVD_FATALERROR;
ps_dec_op->u4_error_code |= IHEVCD_INIT_NOT_DONE;
return IV_FAIL;
}
if(ps_codec->u4_pic_cnt >= NUM_FRAMES_LIMIT)
{
ps_dec_op->u4_error_code |= 1 << IVD_FATALERROR;
ps_dec_op->u4_error_code |= IHEVCD_NUM_FRAMES_LIMIT_REACHED;
return IV_FAIL;
}
/* If reset flag is set, flush the existing buffers */
if(ps_codec->i4_reset_flag)
{
ps_codec->i4_flush_mode = 1;
}
/*Data memory barries instruction,so that bitstream write by the application is complete*/
/* In case the decoder is not in flush mode check for input buffer validity */
if(0 == ps_codec->i4_flush_mode)
{
if(ps_dec_ip->pv_stream_buffer == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_FRM_BS_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->u4_num_Bytes <= MIN_START_CODE_LEN)
{
if((WORD32)ps_dec_ip->u4_num_Bytes > 0)
ps_dec_op->u4_num_bytes_consumed = ps_dec_ip->u4_num_Bytes;
else
ps_dec_op->u4_num_bytes_consumed = 0;
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_NUMBYTES_INV;
return IV_FAIL;
}
}
#ifdef APPLY_CONCEALMENT
{
WORD32 num_mbs;
num_mbs = (ps_codec->i4_wd * ps_codec->i4_ht + 255) >> 8;
/* Reset MB Count at the beginning of every process call */
ps_codec->mb_count = 0;
memset(ps_codec->mb_map, 0, ((num_mbs + 7) >> 3));
}
#endif
if(0 == ps_codec->i4_share_disp_buf && ps_codec->i4_header_mode == 0)
{
UWORD32 i;
if(ps_dec_ip->s_out_buffer.u4_num_bufs == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUFS;
return IV_FAIL;
}
for(i = 0; i < ps_dec_ip->s_out_buffer.u4_num_bufs; i++)
{
if(ps_dec_ip->s_out_buffer.pu1_bufs[i] == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_OP_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->s_out_buffer.u4_min_out_buf_size[i] == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUF_SIZE;
return IV_FAIL;
}
}
}
ps_codec->ps_out_buffer = &ps_dec_ip->s_out_buffer;
ps_codec->u4_ts = ps_dec_ip->u4_ts;
if(ps_codec->i4_flush_mode)
{
ps_dec_op->u4_pic_wd = ps_codec->i4_disp_wd;
ps_dec_op->u4_pic_ht = ps_codec->i4_disp_ht;
ps_dec_op->u4_new_seq = 0;
ps_codec->ps_disp_buf = (pic_buf_t *)ihevc_disp_mgr_get(
(disp_mgr_t *)ps_codec->pv_disp_buf_mgr, &ps_codec->i4_disp_buf_id);
/* In case of non-shared mode, then convert/copy the frame to output buffer */
/* Only if the codec is in non-shared mode or in shared mode but needs 420P output */
if((ps_codec->ps_disp_buf)
&& ((0 == ps_codec->i4_share_disp_buf)
|| (IV_YUV_420P
== ps_codec->e_chroma_fmt)))
{
process_ctxt_t *ps_proc = &ps_codec->as_process[prev_proc_idx];
if(0 == ps_proc->i4_init_done)
{
ihevcd_init_proc_ctxt(ps_proc, 0);
}
/* Set remaining number of rows to be processed */
ret = ihevcd_fmt_conv(ps_codec, &ps_codec->as_process[prev_proc_idx],
ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2], 0,
ps_codec->i4_disp_ht);
ihevc_buf_mgr_release((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->i4_disp_buf_id, BUF_MGR_DISP);
}
ihevcd_fill_outargs(ps_codec, ps_dec_ip, ps_dec_op);
if(1 == ps_dec_op->u4_output_present)
{
WORD32 xpos = ps_codec->i4_disp_wd - 32 - LOGO_WD;
WORD32 ypos = ps_codec->i4_disp_ht - 32 - LOGO_HT;
if(ypos < 0)
ypos = 0;
if(xpos < 0)
xpos = 0;
INSERT_LOGO(ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2], ps_codec->i4_disp_strd,
xpos,
ypos,
ps_codec->e_chroma_fmt,
ps_codec->i4_disp_wd,
ps_codec->i4_disp_ht);
}
if(NULL == ps_codec->ps_disp_buf)
{
/* If in flush mode and there are no more buffers to flush,
* check for the reset flag and reset the decoder */
if(ps_codec->i4_reset_flag)
{
ihevcd_init(ps_codec);
}
return (IV_FAIL);
}
return (IV_SUCCESS);
}
/* In case of shared mode, check if there is a free buffer for reconstruction */
if((0 == ps_codec->i4_header_mode) && (1 == ps_codec->i4_share_disp_buf))
{
WORD32 buf_status;
buf_status = 1;
if(ps_codec->pv_pic_buf_mgr)
buf_status = ihevc_buf_mgr_check_free((buf_mgr_t *)ps_codec->pv_pic_buf_mgr);
/* If there is no free buffer, then return with an error code */
if(0 == buf_status)
{
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
return IV_FAIL;
}
}
ps_codec->i4_bytes_remaining = ps_dec_ip->u4_num_Bytes;
ps_codec->pu1_inp_bitsbuf = (UWORD8 *)ps_dec_ip->pv_stream_buffer;
ps_codec->s_parse.i4_end_of_frame = 0;
ps_codec->i4_pic_present = 0;
ps_codec->i4_slice_error = 0;
ps_codec->ps_disp_buf = NULL;
if(ps_codec->i4_num_cores > 1)
{
ithread_set_affinity(0);
}
while(MIN_START_CODE_LEN < ps_codec->i4_bytes_remaining)
{
WORD32 nal_len;
WORD32 nal_ofst;
WORD32 bits_len;
if(ps_codec->i4_slice_error)
{
slice_header_t *ps_slice_hdr_next = ps_codec->s_parse.ps_slice_hdr_base + (ps_codec->s_parse.i4_cur_slice_idx & (MAX_SLICE_HDR_CNT - 1));
WORD32 next_slice_addr = ps_slice_hdr_next->i2_ctb_x +
ps_slice_hdr_next->i2_ctb_y * ps_codec->s_parse.ps_sps->i2_pic_wd_in_ctb;
if(ps_codec->s_parse.i4_next_ctb_indx == next_slice_addr)
ps_codec->i4_slice_error = 0;
}
if(ps_codec->pu1_bitsbuf_dynamic)
{
ps_codec->pu1_bitsbuf = ps_codec->pu1_bitsbuf_dynamic;
ps_codec->u4_bitsbuf_size = ps_codec->u4_bitsbuf_size_dynamic;
}
else
{
ps_codec->pu1_bitsbuf = ps_codec->pu1_bitsbuf_static;
ps_codec->u4_bitsbuf_size = ps_codec->u4_bitsbuf_size_static;
}
nal_ofst = ihevcd_nal_search_start_code(ps_codec->pu1_inp_bitsbuf,
ps_codec->i4_bytes_remaining);
ps_codec->i4_nal_ofst = nal_ofst;
{
WORD32 bytes_remaining = ps_codec->i4_bytes_remaining - nal_ofst;
bytes_remaining = MIN((UWORD32)bytes_remaining, ps_codec->u4_bitsbuf_size);
ihevcd_nal_remv_emuln_bytes(ps_codec->pu1_inp_bitsbuf + nal_ofst,
ps_codec->pu1_bitsbuf,
bytes_remaining,
&nal_len, &bits_len);
/* Decoder may read upto 8 extra bytes at the end of frame */
/* These are not used, but still set them to zero to avoid uninitialized reads */
if(bits_len < (WORD32)(ps_codec->u4_bitsbuf_size - 8))
{
memset(ps_codec->pu1_bitsbuf + bits_len, 0, 2 * sizeof(UWORD32));
}
}
/* This may be used to update the offsets for tiles and entropy sync row offsets */
ps_codec->i4_num_emln_bytes = nal_len - bits_len;
ps_codec->i4_nal_len = nal_len;
ihevcd_bits_init(&ps_codec->s_parse.s_bitstrm, ps_codec->pu1_bitsbuf,
bits_len);
ret = ihevcd_nal_unit(ps_codec);
/* If the frame is incomplete and
* the bytes remaining is zero or a header is received,
* complete the frame treating it to be in error */
if(ps_codec->i4_pic_present &&
(ps_codec->s_parse.i4_next_ctb_indx != ps_codec->s_parse.ps_sps->i4_pic_size_in_ctb))
{
if((ps_codec->i4_bytes_remaining - (nal_len + nal_ofst) <= MIN_START_CODE_LEN) ||
(ps_codec->i4_header_in_slice_mode))
{
slice_header_t *ps_slice_hdr_next;
ps_codec->s_parse.i4_cur_slice_idx--;
if(ps_codec->s_parse.i4_cur_slice_idx < 0)
ps_codec->s_parse.i4_cur_slice_idx = 0;
ps_slice_hdr_next = ps_codec->s_parse.ps_slice_hdr_base + ((ps_codec->s_parse.i4_cur_slice_idx + 1) & (MAX_SLICE_HDR_CNT - 1));
ps_slice_hdr_next->i2_ctb_x = 0;
ps_slice_hdr_next->i2_ctb_y = ps_codec->s_parse.ps_sps->i2_pic_ht_in_ctb;
ps_codec->i4_slice_error = 1;
continue;
}
}
if(IHEVCD_IGNORE_SLICE == ret)
{
ps_codec->pu1_inp_bitsbuf += (nal_ofst + nal_len);
ps_codec->i4_bytes_remaining -= (nal_ofst + nal_len);
continue;
}
if((IVD_RES_CHANGED == ret) ||
(IHEVCD_UNSUPPORTED_DIMENSIONS == ret))
{
break;
}
/* Update bytes remaining and bytes consumed and input bitstream pointer */
/* Do not consume the NAL in the following cases */
/* Slice header reached during header decode mode */
/* TODO: Next picture's slice reached */
if(ret != IHEVCD_SLICE_IN_HEADER_MODE)
{
if((0 == ps_codec->i4_slice_error) ||
(ps_codec->i4_bytes_remaining - (nal_len + nal_ofst) <= MIN_START_CODE_LEN))
{
ps_codec->pu1_inp_bitsbuf += (nal_ofst + nal_len);
ps_codec->i4_bytes_remaining -= (nal_ofst + nal_len);
}
if(ret != IHEVCD_SUCCESS)
break;
if(ps_codec->s_parse.i4_end_of_frame)
break;
}
else
{
ret = IHEVCD_SUCCESS;
break;
}
/* Allocate dynamic bitstream buffer once SPS is decoded */
if((ps_codec->u4_allocate_dynamic_done == 0) && ps_codec->i4_sps_done)
{
WORD32 ret;
ret = ihevcd_allocate_dynamic_bufs(ps_codec);
if(ret != IV_SUCCESS)
{
/* Free any dynamic buffers that are allocated */
ihevcd_free_dynamic_bufs(ps_codec);
ps_codec->i4_error_code = IVD_MEM_ALLOC_FAILED;
ps_dec_op->u4_error_code |= 1 << IVD_FATALERROR;
ps_dec_op->u4_error_code |= IVD_MEM_ALLOC_FAILED;
return IV_FAIL;
}
}
BREAK_AFTER_SLICE_NAL();
}
if((ps_codec->u4_pic_cnt == 0) && (ret != IHEVCD_SUCCESS))
{
ps_codec->i4_error_code = ret;
ihevcd_fill_outargs(ps_codec, ps_dec_ip, ps_dec_op);
return IV_FAIL;
}
if(1 == ps_codec->i4_pic_present)
{
WORD32 i;
sps_t *ps_sps = ps_codec->s_parse.ps_sps;
ps_codec->i4_first_pic_done = 1;
/*TODO temporary fix: end_of_frame is checked before adding format conversion to job queue */
if(ps_codec->i4_num_cores > 1 && ps_codec->s_parse.i4_end_of_frame)
{
/* Add job queue for format conversion / frame copy for each ctb row */
/* Only if the codec is in non-shared mode or in shared mode but needs 420P output */
process_ctxt_t *ps_proc;
/* i4_num_cores - 1 contexts are currently being used by other threads */
ps_proc = &ps_codec->as_process[ps_codec->i4_num_cores - 1];
if((ps_codec->ps_disp_buf) &&
((0 == ps_codec->i4_share_disp_buf) || (IV_YUV_420P == ps_codec->e_chroma_fmt)))
{
/* If format conversion jobs were not issued in pic_init() add them here */
if((0 == ps_codec->u4_enable_fmt_conv_ahead) ||
(ps_codec->i4_disp_buf_id == ps_proc->i4_cur_pic_buf_id))
for(i = 0; i < ps_sps->i2_pic_ht_in_ctb; i++)
{
proc_job_t s_job;
IHEVCD_ERROR_T ret;
s_job.i4_cmd = CMD_FMTCONV;
s_job.i2_ctb_cnt = 0;
s_job.i2_ctb_x = 0;
s_job.i2_ctb_y = i;
s_job.i2_slice_idx = 0;
s_job.i4_tu_coeff_data_ofst = 0;
ret = ihevcd_jobq_queue((jobq_t *)ps_codec->s_parse.pv_proc_jobq,
&s_job, sizeof(proc_job_t), 1);
if(ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS)
return (WORD32)ret;
}
}
/* Reached end of frame : Signal terminate */
/* The terminate flag is checked only after all the jobs are dequeued */
ret = ihevcd_jobq_terminate((jobq_t *)ps_codec->s_parse.pv_proc_jobq);
while(1)
{
IHEVCD_ERROR_T ret;
proc_job_t s_job;
process_ctxt_t *ps_proc;
/* i4_num_cores - 1 contexts are currently being used by other threads */
ps_proc = &ps_codec->as_process[ps_codec->i4_num_cores - 1];
ret = ihevcd_jobq_dequeue((jobq_t *)ps_proc->pv_proc_jobq, &s_job,
sizeof(proc_job_t), 1);
if((IHEVCD_ERROR_T)IHEVCD_SUCCESS != ret)
break;
ps_proc->i4_ctb_cnt = s_job.i2_ctb_cnt;
ps_proc->i4_ctb_x = s_job.i2_ctb_x;
ps_proc->i4_ctb_y = s_job.i2_ctb_y;
ps_proc->i4_cur_slice_idx = s_job.i2_slice_idx;
if(CMD_PROCESS == s_job.i4_cmd)
{
ihevcd_init_proc_ctxt(ps_proc, s_job.i4_tu_coeff_data_ofst);
ihevcd_process(ps_proc);
}
else if(CMD_FMTCONV == s_job.i4_cmd)
{
sps_t *ps_sps = ps_codec->s_parse.ps_sps;
WORD32 num_rows = 1 << ps_sps->i1_log2_ctb_size;
if(0 == ps_proc->i4_init_done)
{
ihevcd_init_proc_ctxt(ps_proc, 0);
}
num_rows = MIN(num_rows, (ps_codec->i4_disp_ht - (s_job.i2_ctb_y << ps_sps->i1_log2_ctb_size)));
if(num_rows < 0)
num_rows = 0;
ihevcd_fmt_conv(ps_codec, ps_proc,
ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2],
s_job.i2_ctb_y << ps_sps->i1_log2_ctb_size,
num_rows);
}
}
}
/* In case of non-shared mode and while running in single core mode, then convert/copy the frame to output buffer */
/* Only if the codec is in non-shared mode or in shared mode but needs 420P output */
else if((ps_codec->ps_disp_buf) && ((0 == ps_codec->i4_share_disp_buf) ||
(IV_YUV_420P == ps_codec->e_chroma_fmt)) &&
(ps_codec->s_parse.i4_end_of_frame))
{
process_ctxt_t *ps_proc = &ps_codec->as_process[proc_idx];
/* Set remaining number of rows to be processed */
ps_codec->s_fmt_conv.i4_num_rows = ps_codec->i4_disp_ht
- ps_codec->s_fmt_conv.i4_cur_row;
if(0 == ps_proc->i4_init_done)
{
ihevcd_init_proc_ctxt(ps_proc, 0);
}
if(ps_codec->s_fmt_conv.i4_num_rows < 0)
ps_codec->s_fmt_conv.i4_num_rows = 0;
ret = ihevcd_fmt_conv(ps_codec, ps_proc,
ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2],
ps_codec->s_fmt_conv.i4_cur_row,
ps_codec->s_fmt_conv.i4_num_rows);
ps_codec->s_fmt_conv.i4_cur_row += ps_codec->s_fmt_conv.i4_num_rows;
}
DEBUG_DUMP_MV_MAP(ps_codec);
/* Mark MV Buf as needed for reference */
ihevc_buf_mgr_set_status((buf_mgr_t *)ps_codec->pv_mv_buf_mgr,
ps_codec->as_process[proc_idx].i4_cur_mv_bank_buf_id,
BUF_MGR_REF);
/* Mark pic buf as needed for reference */
ihevc_buf_mgr_set_status((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->as_process[proc_idx].i4_cur_pic_buf_id,
BUF_MGR_REF);
/* Mark pic buf as needed for display */
ihevc_buf_mgr_set_status((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->as_process[proc_idx].i4_cur_pic_buf_id,
BUF_MGR_DISP);
/* Insert the current picture as short term reference */
ihevc_dpb_mgr_insert_ref((dpb_mgr_t *)ps_codec->pv_dpb_mgr,
ps_codec->as_process[proc_idx].ps_cur_pic,
ps_codec->as_process[proc_idx].i4_cur_pic_buf_id);
/* If a frame was displayed (in non-shared mode), then release it from display manager */
if((0 == ps_codec->i4_share_disp_buf) && (ps_codec->ps_disp_buf))
ihevc_buf_mgr_release((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->i4_disp_buf_id, BUF_MGR_DISP);
/* Wait for threads */
for(i = 0; i < (ps_codec->i4_num_cores - 1); i++)
{
if(ps_codec->ai4_process_thread_created[i])
{
ithread_join(ps_codec->apv_process_thread_handle[i], NULL);
ps_codec->ai4_process_thread_created[i] = 0;
}
}
DEBUG_VALIDATE_PADDED_REGION(&ps_codec->as_process[proc_idx]);
if(ps_codec->u4_pic_cnt > 0)
{
DEBUG_DUMP_PIC_PU(ps_codec);
}
DEBUG_DUMP_PIC_BUFFERS(ps_codec);
/* Increment the number of pictures decoded */
ps_codec->u4_pic_cnt++;
}
ihevcd_fill_outargs(ps_codec, ps_dec_ip, ps_dec_op);
if(1 == ps_dec_op->u4_output_present)
{
WORD32 xpos = ps_codec->i4_disp_wd - 32 - LOGO_WD;
WORD32 ypos = ps_codec->i4_disp_ht - 32 - LOGO_HT;
if(ypos < 0)
ypos = 0;
if(xpos < 0)
xpos = 0;
INSERT_LOGO(ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2], ps_codec->i4_disp_strd,
xpos,
ypos,
ps_codec->e_chroma_fmt,
ps_codec->i4_disp_wd,
ps_codec->i4_disp_ht);
}
return ret;
}
Commit Message: Handle invalid slice_address in slice header
If an invalid slice_address was parsed, it was resulting in an incomplete
slice header during decode stage. Fix this by not incrementing slice_idx
for ignore slice error
Bug: 32322258
Change-Id: I8638d7094d65f4409faa9b9e337ef7e7b64505de
(cherry picked from commit f4f3556e04a9776bcc776523ae0763e7d0d5c668)
CWE ID: | WORD32 ihevcd_decode(iv_obj_t *ps_codec_obj, void *pv_api_ip, void *pv_api_op)
{
WORD32 ret = IV_SUCCESS;
codec_t *ps_codec = (codec_t *)(ps_codec_obj->pv_codec_handle);
ivd_video_decode_ip_t *ps_dec_ip;
ivd_video_decode_op_t *ps_dec_op;
WORD32 proc_idx = 0;
WORD32 prev_proc_idx = 0;
/* Initialize error code */
ps_codec->i4_error_code = 0;
ps_dec_ip = (ivd_video_decode_ip_t *)pv_api_ip;
ps_dec_op = (ivd_video_decode_op_t *)pv_api_op;
{
UWORD32 u4_size = ps_dec_op->u4_size;
memset(ps_dec_op, 0, sizeof(ivd_video_decode_op_t));
ps_dec_op->u4_size = u4_size; //Restore size field
}
if(ps_codec->i4_init_done != 1)
{
ps_dec_op->u4_error_code |= 1 << IVD_FATALERROR;
ps_dec_op->u4_error_code |= IHEVCD_INIT_NOT_DONE;
return IV_FAIL;
}
if(ps_codec->u4_pic_cnt >= NUM_FRAMES_LIMIT)
{
ps_dec_op->u4_error_code |= 1 << IVD_FATALERROR;
ps_dec_op->u4_error_code |= IHEVCD_NUM_FRAMES_LIMIT_REACHED;
return IV_FAIL;
}
/* If reset flag is set, flush the existing buffers */
if(ps_codec->i4_reset_flag)
{
ps_codec->i4_flush_mode = 1;
}
/*Data memory barries instruction,so that bitstream write by the application is complete*/
/* In case the decoder is not in flush mode check for input buffer validity */
if(0 == ps_codec->i4_flush_mode)
{
if(ps_dec_ip->pv_stream_buffer == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_FRM_BS_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->u4_num_Bytes <= MIN_START_CODE_LEN)
{
if((WORD32)ps_dec_ip->u4_num_Bytes > 0)
ps_dec_op->u4_num_bytes_consumed = ps_dec_ip->u4_num_Bytes;
else
ps_dec_op->u4_num_bytes_consumed = 0;
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_NUMBYTES_INV;
return IV_FAIL;
}
}
#ifdef APPLY_CONCEALMENT
{
WORD32 num_mbs;
num_mbs = (ps_codec->i4_wd * ps_codec->i4_ht + 255) >> 8;
/* Reset MB Count at the beginning of every process call */
ps_codec->mb_count = 0;
memset(ps_codec->mb_map, 0, ((num_mbs + 7) >> 3));
}
#endif
if(0 == ps_codec->i4_share_disp_buf && ps_codec->i4_header_mode == 0)
{
UWORD32 i;
if(ps_dec_ip->s_out_buffer.u4_num_bufs == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUFS;
return IV_FAIL;
}
for(i = 0; i < ps_dec_ip->s_out_buffer.u4_num_bufs; i++)
{
if(ps_dec_ip->s_out_buffer.pu1_bufs[i] == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_OP_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->s_out_buffer.u4_min_out_buf_size[i] == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUF_SIZE;
return IV_FAIL;
}
}
}
ps_codec->ps_out_buffer = &ps_dec_ip->s_out_buffer;
ps_codec->u4_ts = ps_dec_ip->u4_ts;
if(ps_codec->i4_flush_mode)
{
ps_dec_op->u4_pic_wd = ps_codec->i4_disp_wd;
ps_dec_op->u4_pic_ht = ps_codec->i4_disp_ht;
ps_dec_op->u4_new_seq = 0;
ps_codec->ps_disp_buf = (pic_buf_t *)ihevc_disp_mgr_get(
(disp_mgr_t *)ps_codec->pv_disp_buf_mgr, &ps_codec->i4_disp_buf_id);
/* In case of non-shared mode, then convert/copy the frame to output buffer */
/* Only if the codec is in non-shared mode or in shared mode but needs 420P output */
if((ps_codec->ps_disp_buf)
&& ((0 == ps_codec->i4_share_disp_buf)
|| (IV_YUV_420P
== ps_codec->e_chroma_fmt)))
{
process_ctxt_t *ps_proc = &ps_codec->as_process[prev_proc_idx];
if(0 == ps_proc->i4_init_done)
{
ihevcd_init_proc_ctxt(ps_proc, 0);
}
/* Set remaining number of rows to be processed */
ret = ihevcd_fmt_conv(ps_codec, &ps_codec->as_process[prev_proc_idx],
ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2], 0,
ps_codec->i4_disp_ht);
ihevc_buf_mgr_release((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->i4_disp_buf_id, BUF_MGR_DISP);
}
ihevcd_fill_outargs(ps_codec, ps_dec_ip, ps_dec_op);
if(1 == ps_dec_op->u4_output_present)
{
WORD32 xpos = ps_codec->i4_disp_wd - 32 - LOGO_WD;
WORD32 ypos = ps_codec->i4_disp_ht - 32 - LOGO_HT;
if(ypos < 0)
ypos = 0;
if(xpos < 0)
xpos = 0;
INSERT_LOGO(ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2], ps_codec->i4_disp_strd,
xpos,
ypos,
ps_codec->e_chroma_fmt,
ps_codec->i4_disp_wd,
ps_codec->i4_disp_ht);
}
if(NULL == ps_codec->ps_disp_buf)
{
/* If in flush mode and there are no more buffers to flush,
* check for the reset flag and reset the decoder */
if(ps_codec->i4_reset_flag)
{
ihevcd_init(ps_codec);
}
return (IV_FAIL);
}
return (IV_SUCCESS);
}
/* In case of shared mode, check if there is a free buffer for reconstruction */
if((0 == ps_codec->i4_header_mode) && (1 == ps_codec->i4_share_disp_buf))
{
WORD32 buf_status;
buf_status = 1;
if(ps_codec->pv_pic_buf_mgr)
buf_status = ihevc_buf_mgr_check_free((buf_mgr_t *)ps_codec->pv_pic_buf_mgr);
/* If there is no free buffer, then return with an error code */
if(0 == buf_status)
{
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
return IV_FAIL;
}
}
ps_codec->i4_bytes_remaining = ps_dec_ip->u4_num_Bytes;
ps_codec->pu1_inp_bitsbuf = (UWORD8 *)ps_dec_ip->pv_stream_buffer;
ps_codec->s_parse.i4_end_of_frame = 0;
ps_codec->i4_pic_present = 0;
ps_codec->i4_slice_error = 0;
ps_codec->ps_disp_buf = NULL;
if(ps_codec->i4_num_cores > 1)
{
ithread_set_affinity(0);
}
while(MIN_START_CODE_LEN < ps_codec->i4_bytes_remaining)
{
WORD32 nal_len;
WORD32 nal_ofst;
WORD32 bits_len;
if(ps_codec->i4_slice_error)
{
slice_header_t *ps_slice_hdr_next = ps_codec->s_parse.ps_slice_hdr_base + (ps_codec->s_parse.i4_cur_slice_idx & (MAX_SLICE_HDR_CNT - 1));
WORD32 next_slice_addr = ps_slice_hdr_next->i2_ctb_x +
ps_slice_hdr_next->i2_ctb_y * ps_codec->s_parse.ps_sps->i2_pic_wd_in_ctb;
if(ps_codec->s_parse.i4_next_ctb_indx == next_slice_addr)
ps_codec->i4_slice_error = 0;
}
if(ps_codec->pu1_bitsbuf_dynamic)
{
ps_codec->pu1_bitsbuf = ps_codec->pu1_bitsbuf_dynamic;
ps_codec->u4_bitsbuf_size = ps_codec->u4_bitsbuf_size_dynamic;
}
else
{
ps_codec->pu1_bitsbuf = ps_codec->pu1_bitsbuf_static;
ps_codec->u4_bitsbuf_size = ps_codec->u4_bitsbuf_size_static;
}
nal_ofst = ihevcd_nal_search_start_code(ps_codec->pu1_inp_bitsbuf,
ps_codec->i4_bytes_remaining);
ps_codec->i4_nal_ofst = nal_ofst;
{
WORD32 bytes_remaining = ps_codec->i4_bytes_remaining - nal_ofst;
bytes_remaining = MIN((UWORD32)bytes_remaining, ps_codec->u4_bitsbuf_size);
ihevcd_nal_remv_emuln_bytes(ps_codec->pu1_inp_bitsbuf + nal_ofst,
ps_codec->pu1_bitsbuf,
bytes_remaining,
&nal_len, &bits_len);
/* Decoder may read upto 8 extra bytes at the end of frame */
/* These are not used, but still set them to zero to avoid uninitialized reads */
if(bits_len < (WORD32)(ps_codec->u4_bitsbuf_size - 8))
{
memset(ps_codec->pu1_bitsbuf + bits_len, 0, 2 * sizeof(UWORD32));
}
}
/* This may be used to update the offsets for tiles and entropy sync row offsets */
ps_codec->i4_num_emln_bytes = nal_len - bits_len;
ps_codec->i4_nal_len = nal_len;
ihevcd_bits_init(&ps_codec->s_parse.s_bitstrm, ps_codec->pu1_bitsbuf,
bits_len);
ret = ihevcd_nal_unit(ps_codec);
/* If the frame is incomplete and
* the bytes remaining is zero or a header is received,
* complete the frame treating it to be in error */
if(ps_codec->i4_pic_present &&
(ps_codec->s_parse.i4_next_ctb_indx != ps_codec->s_parse.ps_sps->i4_pic_size_in_ctb))
{
if((ps_codec->i4_bytes_remaining - (nal_len + nal_ofst) <= MIN_START_CODE_LEN) ||
(ps_codec->i4_header_in_slice_mode))
{
slice_header_t *ps_slice_hdr_next;
ps_codec->s_parse.i4_cur_slice_idx--;
if(ps_codec->s_parse.i4_cur_slice_idx < 0)
ps_codec->s_parse.i4_cur_slice_idx = 0;
ps_slice_hdr_next = ps_codec->s_parse.ps_slice_hdr_base + ((ps_codec->s_parse.i4_cur_slice_idx + 1) & (MAX_SLICE_HDR_CNT - 1));
ps_slice_hdr_next->i2_ctb_x = 0;
ps_slice_hdr_next->i2_ctb_y = ps_codec->s_parse.ps_sps->i2_pic_ht_in_ctb;
ps_codec->i4_slice_error = 1;
continue;
}
}
if(IHEVCD_IGNORE_SLICE == ret)
{
ps_codec->s_parse.i4_cur_slice_idx = MAX(0, (ps_codec->s_parse.i4_cur_slice_idx - 1));
ps_codec->pu1_inp_bitsbuf += (nal_ofst + nal_len);
ps_codec->i4_bytes_remaining -= (nal_ofst + nal_len);
continue;
}
if((IVD_RES_CHANGED == ret) ||
(IHEVCD_UNSUPPORTED_DIMENSIONS == ret))
{
break;
}
/* Update bytes remaining and bytes consumed and input bitstream pointer */
/* Do not consume the NAL in the following cases */
/* Slice header reached during header decode mode */
/* TODO: Next picture's slice reached */
if(ret != IHEVCD_SLICE_IN_HEADER_MODE)
{
if((0 == ps_codec->i4_slice_error) ||
(ps_codec->i4_bytes_remaining - (nal_len + nal_ofst) <= MIN_START_CODE_LEN))
{
ps_codec->pu1_inp_bitsbuf += (nal_ofst + nal_len);
ps_codec->i4_bytes_remaining -= (nal_ofst + nal_len);
}
if(ret != IHEVCD_SUCCESS)
break;
if(ps_codec->s_parse.i4_end_of_frame)
break;
}
else
{
ret = IHEVCD_SUCCESS;
break;
}
/* Allocate dynamic bitstream buffer once SPS is decoded */
if((ps_codec->u4_allocate_dynamic_done == 0) && ps_codec->i4_sps_done)
{
WORD32 ret;
ret = ihevcd_allocate_dynamic_bufs(ps_codec);
if(ret != IV_SUCCESS)
{
/* Free any dynamic buffers that are allocated */
ihevcd_free_dynamic_bufs(ps_codec);
ps_codec->i4_error_code = IVD_MEM_ALLOC_FAILED;
ps_dec_op->u4_error_code |= 1 << IVD_FATALERROR;
ps_dec_op->u4_error_code |= IVD_MEM_ALLOC_FAILED;
return IV_FAIL;
}
}
BREAK_AFTER_SLICE_NAL();
}
if((ps_codec->u4_pic_cnt == 0) && (ret != IHEVCD_SUCCESS))
{
ps_codec->i4_error_code = ret;
ihevcd_fill_outargs(ps_codec, ps_dec_ip, ps_dec_op);
return IV_FAIL;
}
if(1 == ps_codec->i4_pic_present)
{
WORD32 i;
sps_t *ps_sps = ps_codec->s_parse.ps_sps;
ps_codec->i4_first_pic_done = 1;
/*TODO temporary fix: end_of_frame is checked before adding format conversion to job queue */
if(ps_codec->i4_num_cores > 1 && ps_codec->s_parse.i4_end_of_frame)
{
/* Add job queue for format conversion / frame copy for each ctb row */
/* Only if the codec is in non-shared mode or in shared mode but needs 420P output */
process_ctxt_t *ps_proc;
/* i4_num_cores - 1 contexts are currently being used by other threads */
ps_proc = &ps_codec->as_process[ps_codec->i4_num_cores - 1];
if((ps_codec->ps_disp_buf) &&
((0 == ps_codec->i4_share_disp_buf) || (IV_YUV_420P == ps_codec->e_chroma_fmt)))
{
/* If format conversion jobs were not issued in pic_init() add them here */
if((0 == ps_codec->u4_enable_fmt_conv_ahead) ||
(ps_codec->i4_disp_buf_id == ps_proc->i4_cur_pic_buf_id))
for(i = 0; i < ps_sps->i2_pic_ht_in_ctb; i++)
{
proc_job_t s_job;
IHEVCD_ERROR_T ret;
s_job.i4_cmd = CMD_FMTCONV;
s_job.i2_ctb_cnt = 0;
s_job.i2_ctb_x = 0;
s_job.i2_ctb_y = i;
s_job.i2_slice_idx = 0;
s_job.i4_tu_coeff_data_ofst = 0;
ret = ihevcd_jobq_queue((jobq_t *)ps_codec->s_parse.pv_proc_jobq,
&s_job, sizeof(proc_job_t), 1);
if(ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS)
return (WORD32)ret;
}
}
/* Reached end of frame : Signal terminate */
/* The terminate flag is checked only after all the jobs are dequeued */
ret = ihevcd_jobq_terminate((jobq_t *)ps_codec->s_parse.pv_proc_jobq);
while(1)
{
IHEVCD_ERROR_T ret;
proc_job_t s_job;
process_ctxt_t *ps_proc;
/* i4_num_cores - 1 contexts are currently being used by other threads */
ps_proc = &ps_codec->as_process[ps_codec->i4_num_cores - 1];
ret = ihevcd_jobq_dequeue((jobq_t *)ps_proc->pv_proc_jobq, &s_job,
sizeof(proc_job_t), 1);
if((IHEVCD_ERROR_T)IHEVCD_SUCCESS != ret)
break;
ps_proc->i4_ctb_cnt = s_job.i2_ctb_cnt;
ps_proc->i4_ctb_x = s_job.i2_ctb_x;
ps_proc->i4_ctb_y = s_job.i2_ctb_y;
ps_proc->i4_cur_slice_idx = s_job.i2_slice_idx;
if(CMD_PROCESS == s_job.i4_cmd)
{
ihevcd_init_proc_ctxt(ps_proc, s_job.i4_tu_coeff_data_ofst);
ihevcd_process(ps_proc);
}
else if(CMD_FMTCONV == s_job.i4_cmd)
{
sps_t *ps_sps = ps_codec->s_parse.ps_sps;
WORD32 num_rows = 1 << ps_sps->i1_log2_ctb_size;
if(0 == ps_proc->i4_init_done)
{
ihevcd_init_proc_ctxt(ps_proc, 0);
}
num_rows = MIN(num_rows, (ps_codec->i4_disp_ht - (s_job.i2_ctb_y << ps_sps->i1_log2_ctb_size)));
if(num_rows < 0)
num_rows = 0;
ihevcd_fmt_conv(ps_codec, ps_proc,
ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2],
s_job.i2_ctb_y << ps_sps->i1_log2_ctb_size,
num_rows);
}
}
}
/* In case of non-shared mode and while running in single core mode, then convert/copy the frame to output buffer */
/* Only if the codec is in non-shared mode or in shared mode but needs 420P output */
else if((ps_codec->ps_disp_buf) && ((0 == ps_codec->i4_share_disp_buf) ||
(IV_YUV_420P == ps_codec->e_chroma_fmt)) &&
(ps_codec->s_parse.i4_end_of_frame))
{
process_ctxt_t *ps_proc = &ps_codec->as_process[proc_idx];
/* Set remaining number of rows to be processed */
ps_codec->s_fmt_conv.i4_num_rows = ps_codec->i4_disp_ht
- ps_codec->s_fmt_conv.i4_cur_row;
if(0 == ps_proc->i4_init_done)
{
ihevcd_init_proc_ctxt(ps_proc, 0);
}
if(ps_codec->s_fmt_conv.i4_num_rows < 0)
ps_codec->s_fmt_conv.i4_num_rows = 0;
ret = ihevcd_fmt_conv(ps_codec, ps_proc,
ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2],
ps_codec->s_fmt_conv.i4_cur_row,
ps_codec->s_fmt_conv.i4_num_rows);
ps_codec->s_fmt_conv.i4_cur_row += ps_codec->s_fmt_conv.i4_num_rows;
}
DEBUG_DUMP_MV_MAP(ps_codec);
/* Mark MV Buf as needed for reference */
ihevc_buf_mgr_set_status((buf_mgr_t *)ps_codec->pv_mv_buf_mgr,
ps_codec->as_process[proc_idx].i4_cur_mv_bank_buf_id,
BUF_MGR_REF);
/* Mark pic buf as needed for reference */
ihevc_buf_mgr_set_status((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->as_process[proc_idx].i4_cur_pic_buf_id,
BUF_MGR_REF);
/* Mark pic buf as needed for display */
ihevc_buf_mgr_set_status((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->as_process[proc_idx].i4_cur_pic_buf_id,
BUF_MGR_DISP);
/* Insert the current picture as short term reference */
ihevc_dpb_mgr_insert_ref((dpb_mgr_t *)ps_codec->pv_dpb_mgr,
ps_codec->as_process[proc_idx].ps_cur_pic,
ps_codec->as_process[proc_idx].i4_cur_pic_buf_id);
/* If a frame was displayed (in non-shared mode), then release it from display manager */
if((0 == ps_codec->i4_share_disp_buf) && (ps_codec->ps_disp_buf))
ihevc_buf_mgr_release((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->i4_disp_buf_id, BUF_MGR_DISP);
/* Wait for threads */
for(i = 0; i < (ps_codec->i4_num_cores - 1); i++)
{
if(ps_codec->ai4_process_thread_created[i])
{
ithread_join(ps_codec->apv_process_thread_handle[i], NULL);
ps_codec->ai4_process_thread_created[i] = 0;
}
}
DEBUG_VALIDATE_PADDED_REGION(&ps_codec->as_process[proc_idx]);
if(ps_codec->u4_pic_cnt > 0)
{
DEBUG_DUMP_PIC_PU(ps_codec);
}
DEBUG_DUMP_PIC_BUFFERS(ps_codec);
/* Increment the number of pictures decoded */
ps_codec->u4_pic_cnt++;
}
ihevcd_fill_outargs(ps_codec, ps_dec_ip, ps_dec_op);
if(1 == ps_dec_op->u4_output_present)
{
WORD32 xpos = ps_codec->i4_disp_wd - 32 - LOGO_WD;
WORD32 ypos = ps_codec->i4_disp_ht - 32 - LOGO_HT;
if(ypos < 0)
ypos = 0;
if(xpos < 0)
xpos = 0;
INSERT_LOGO(ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2], ps_codec->i4_disp_strd,
xpos,
ypos,
ps_codec->e_chroma_fmt,
ps_codec->i4_disp_wd,
ps_codec->i4_disp_ht);
}
return ret;
}
| 174,070 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void RenderFrameHostImpl::CreateMediaStreamDispatcherHost(
MediaStreamManager* media_stream_manager,
mojom::MediaStreamDispatcherHostRequest request) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
if (!media_stream_dispatcher_host_) {
media_stream_dispatcher_host_.reset(new MediaStreamDispatcherHost(
GetProcess()->GetID(), GetRoutingID(), media_stream_manager));
}
media_stream_dispatcher_host_->BindRequest(std::move(request));
}
Commit Message: Make MediaStreamDispatcherHost per-request instead of per-frame.
Instead of having RenderFrameHost own a single MSDH to handle all
requests from a frame, MSDH objects will be owned by a strong binding.
A consequence of this is that an additional requester ID is added to
requests to MediaStreamManager, so that an MSDH is able to cancel only
requests generated by it.
In practice, MSDH will continue to be per frame in most cases since
each frame normally makes a single request for an MSDH object.
This fixes a lifetime issue caused by the IO thread executing tasks
after the RenderFrameHost dies.
Drive-by: Fix some minor lint issues.
Bug: 912520
Change-Id: I52742ffc98b9fc57ce8e6f5093a61aed86d3e516
Reviewed-on: https://chromium-review.googlesource.com/c/1369799
Reviewed-by: Emircan Uysaler <[email protected]>
Reviewed-by: Ken Buchanan <[email protected]>
Reviewed-by: Olga Sharonova <[email protected]>
Commit-Queue: Guido Urdaneta <[email protected]>
Cr-Commit-Position: refs/heads/master@{#616347}
CWE ID: CWE-189 | void RenderFrameHostImpl::CreateMediaStreamDispatcherHost(
| 173,089 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: jbig2_sd_new(Jbig2Ctx *ctx, int n_symbols)
{
Jbig2SymbolDict *new = NULL;
if (n_symbols < 0) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "Negative number of symbols in symbol dict: %d", n_symbols);
return NULL;
}
new = jbig2_new(ctx, Jbig2SymbolDict, 1);
if (new != NULL) {
new->glyphs = jbig2_new(ctx, Jbig2Image *, n_symbols);
new->n_symbols = n_symbols;
} else {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "unable to allocate new empty symbol dict");
return NULL;
}
if (new->glyphs != NULL) {
memset(new->glyphs, 0, n_symbols * sizeof(Jbig2Image *));
} else {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "unable to allocate glyphs for new empty symbol dict");
jbig2_free(ctx->allocator, new);
return NULL;
}
return new;
}
Commit Message:
CWE ID: CWE-119 | jbig2_sd_new(Jbig2Ctx *ctx, int n_symbols)
jbig2_sd_new(Jbig2Ctx *ctx, uint32_t n_symbols)
{
Jbig2SymbolDict *new_dict = NULL;
if (n_symbols < 0) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "Negative number of symbols in symbol dict: %d", n_symbols);
return NULL;
}
new_dict = jbig2_new(ctx, Jbig2SymbolDict, 1);
if (new_dict != NULL) {
new_dict->glyphs = jbig2_new(ctx, Jbig2Image *, n_symbols);
new_dict->n_symbols = n_symbols;
} else {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "unable to allocate new empty symbol dict");
return NULL;
}
if (new_dict->glyphs != NULL) {
memset(new_dict->glyphs, 0, n_symbols * sizeof(Jbig2Image *));
} else {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "unable to allocate glyphs for new empty symbol dict");
jbig2_free(ctx->allocator, new_dict);
return NULL;
}
return new_dict;
}
| 165,502 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int fpm_stdio_init_child(struct fpm_worker_pool_s *wp) /* {{{ */
{
#ifdef HAVE_SYSLOG_H
if (fpm_globals.error_log_fd == ZLOG_SYSLOG) {
closelog(); /* ensure to close syslog not to interrupt with PHP syslog code */
} else
#endif
/* Notice: child cannot use master error_log
* because not aware when being reopen
* else, should use if (!fpm_use_error_log())
*/
if (fpm_globals.error_log_fd > 0) {
close(fpm_globals.error_log_fd);
}
fpm_globals.error_log_fd = -1;
zlog_set_fd(-1);
if (wp->listening_socket != STDIN_FILENO) {
if (0 > dup2(wp->listening_socket, STDIN_FILENO)) {
zlog(ZLOG_SYSERROR, "failed to init child stdio: dup2()");
return -1;
}
}
return 0;
}
/* }}} */
Commit Message: Fixed bug #73342
Directly listen on socket, instead of duping it to STDIN and
listening on that.
CWE ID: CWE-400 | int fpm_stdio_init_child(struct fpm_worker_pool_s *wp) /* {{{ */
{
#ifdef HAVE_SYSLOG_H
if (fpm_globals.error_log_fd == ZLOG_SYSLOG) {
closelog(); /* ensure to close syslog not to interrupt with PHP syslog code */
} else
#endif
/* Notice: child cannot use master error_log
* because not aware when being reopen
* else, should use if (!fpm_use_error_log())
*/
if (fpm_globals.error_log_fd > 0) {
close(fpm_globals.error_log_fd);
}
fpm_globals.error_log_fd = -1;
zlog_set_fd(-1);
return 0;
}
/* }}} */
| 169,452 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: const CuePoint* Cues::GetNext(const CuePoint* pCurr) const
{
if (pCurr == NULL)
return NULL;
assert(pCurr->GetTimeCode() >= 0);
assert(m_cue_points);
assert(m_count >= 1);
#if 0
const size_t count = m_count + m_preload_count;
size_t index = pCurr->m_index;
assert(index < count);
CuePoint* const* const pp = m_cue_points;
assert(pp);
assert(pp[index] == pCurr);
++index;
if (index >= count)
return NULL;
CuePoint* const pNext = pp[index];
assert(pNext);
pNext->Load(m_pSegment->m_pReader);
#else
long index = pCurr->m_index;
assert(index < m_count);
CuePoint* const* const pp = m_cue_points;
assert(pp);
assert(pp[index] == pCurr);
++index;
if (index >= m_count)
return NULL;
CuePoint* const pNext = pp[index];
assert(pNext);
assert(pNext->GetTimeCode() >= 0);
#endif
return pNext;
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | const CuePoint* Cues::GetNext(const CuePoint* pCurr) const
assert(pCurr->GetTimeCode() >= 0);
assert(m_cue_points);
assert(m_count >= 1);
#if 0
const size_t count = m_count + m_preload_count;
size_t index = pCurr->m_index;
assert(index < count);
CuePoint* const* const pp = m_cue_points;
assert(pp);
assert(pp[index] == pCurr);
++index;
if (index >= count)
return NULL;
CuePoint* const pNext = pp[index];
assert(pNext);
pNext->Load(m_pSegment->m_pReader);
#else
long index = pCurr->m_index;
assert(index < m_count);
CuePoint* const* const pp = m_cue_points;
assert(pp);
assert(pp[index] == pCurr);
++index;
if (index >= m_count)
return NULL;
CuePoint* const pNext = pp[index];
assert(pNext);
assert(pNext->GetTimeCode() >= 0);
#endif
return pNext;
}
| 174,346 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: atol10(const char *p, size_t char_cnt)
{
uint64_t l;
int digit;
l = 0;
digit = *p - '0';
while (digit >= 0 && digit < 10 && char_cnt-- > 0) {
l = (l * 10) + digit;
digit = *++p - '0';
}
return (l);
}
Commit Message: Do something sensible for empty strings to make fuzzers happy.
CWE ID: CWE-125 | atol10(const char *p, size_t char_cnt)
{
uint64_t l;
int digit;
if (char_cnt == 0)
return (0);
l = 0;
digit = *p - '0';
while (digit >= 0 && digit < 10 && char_cnt-- > 0) {
l = (l * 10) + digit;
digit = *++p - '0';
}
return (l);
}
| 167,767 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: base::SharedMemoryHandle CreateMSKPInSharedMemory() {
SkDynamicMemoryWStream stream;
sk_sp<SkDocument> doc = SkMakeMultiPictureDocument(&stream);
cc::SkiaPaintCanvas canvas(doc->beginPage(800, 600));
SkRect rect = SkRect::MakeXYWH(10, 10, 250, 250);
cc::PaintFlags flags;
flags.setAntiAlias(false);
flags.setColor(SK_ColorRED);
flags.setStyle(cc::PaintFlags::kFill_Style);
canvas.drawRect(rect, flags);
doc->endPage();
doc->close();
size_t len = stream.bytesWritten();
base::SharedMemoryCreateOptions options;
options.size = len;
options.share_read_only = true;
base::SharedMemory shared_memory;
if (shared_memory.Create(options) && shared_memory.Map(len)) {
stream.copyTo(shared_memory.memory());
return base::SharedMemory::DuplicateHandle(shared_memory.handle());
}
return base::SharedMemoryHandle();
}
Commit Message: Correct mojo::WrapSharedMemoryHandle usage
Fixes some incorrect uses of mojo::WrapSharedMemoryHandle which
were assuming that the call actually has any control over the memory
protection applied to a handle when mapped.
Where fixing usage is infeasible for this CL, TODOs are added to
annotate follow-up work.
Also updates the API and documentation to (hopefully) improve clarity
and avoid similar mistakes from being made in the future.
BUG=792900
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I0578aaa9ca3bfcb01aaf2451315d1ede95458477
Reviewed-on: https://chromium-review.googlesource.com/818282
Reviewed-by: Wei Li <[email protected]>
Reviewed-by: Lei Zhang <[email protected]>
Reviewed-by: John Abd-El-Malek <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: Sadrul Chowdhury <[email protected]>
Reviewed-by: Yuzhu Shen <[email protected]>
Reviewed-by: Robert Sesek <[email protected]>
Commit-Queue: Ken Rockot <[email protected]>
Cr-Commit-Position: refs/heads/master@{#530268}
CWE ID: CWE-787 | base::SharedMemoryHandle CreateMSKPInSharedMemory() {
SkDynamicMemoryWStream stream;
sk_sp<SkDocument> doc = SkMakeMultiPictureDocument(&stream);
cc::SkiaPaintCanvas canvas(doc->beginPage(800, 600));
SkRect rect = SkRect::MakeXYWH(10, 10, 250, 250);
cc::PaintFlags flags;
flags.setAntiAlias(false);
flags.setColor(SK_ColorRED);
flags.setStyle(cc::PaintFlags::kFill_Style);
canvas.drawRect(rect, flags);
doc->endPage();
doc->close();
size_t len = stream.bytesWritten();
base::SharedMemoryCreateOptions options;
options.size = len;
options.share_read_only = true;
base::SharedMemory shared_memory;
if (shared_memory.Create(options) && shared_memory.Map(len)) {
stream.copyTo(shared_memory.memory());
return shared_memory.GetReadOnlyHandle();
}
return base::SharedMemoryHandle();
}
| 172,857 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: u32 h264bsdInitDpb(
dpbStorage_t *dpb,
u32 picSizeInMbs,
u32 dpbSize,
u32 maxRefFrames,
u32 maxFrameNum,
u32 noReordering)
{
/* Variables */
u32 i;
/* Code */
ASSERT(picSizeInMbs);
ASSERT(maxRefFrames <= MAX_NUM_REF_PICS);
ASSERT(maxRefFrames <= dpbSize);
ASSERT(maxFrameNum);
ASSERT(dpbSize);
dpb->maxLongTermFrameIdx = NO_LONG_TERM_FRAME_INDICES;
dpb->maxRefFrames = MAX(maxRefFrames, 1);
if (noReordering)
dpb->dpbSize = dpb->maxRefFrames;
else
dpb->dpbSize = dpbSize;
dpb->maxFrameNum = maxFrameNum;
dpb->noReordering = noReordering;
dpb->fullness = 0;
dpb->numRefFrames = 0;
dpb->prevRefFrameNum = 0;
ALLOCATE(dpb->buffer, MAX_NUM_REF_IDX_L0_ACTIVE + 1, dpbPicture_t);
if (dpb->buffer == NULL)
return(MEMORY_ALLOCATION_ERROR);
H264SwDecMemset(dpb->buffer, 0,
(MAX_NUM_REF_IDX_L0_ACTIVE + 1)*sizeof(dpbPicture_t));
for (i = 0; i < dpb->dpbSize + 1; i++)
{
/* Allocate needed amount of memory, which is:
* image size + 32 + 15, where 32 cames from the fact that in ARM OpenMax
* DL implementation Functions may read beyond the end of an array,
* by a maximum of 32 bytes. And +15 cames for the need to align memory
* to 16-byte boundary */
ALLOCATE(dpb->buffer[i].pAllocatedData, (picSizeInMbs*384 + 32+15), u8);
if (dpb->buffer[i].pAllocatedData == NULL)
return(MEMORY_ALLOCATION_ERROR);
dpb->buffer[i].data = ALIGN(dpb->buffer[i].pAllocatedData, 16);
}
ALLOCATE(dpb->list, MAX_NUM_REF_IDX_L0_ACTIVE + 1, dpbPicture_t*);
ALLOCATE(dpb->outBuf, dpb->dpbSize+1, dpbOutPicture_t);
if (dpb->list == NULL || dpb->outBuf == NULL)
return(MEMORY_ALLOCATION_ERROR);
H264SwDecMemset(dpb->list, 0,
((MAX_NUM_REF_IDX_L0_ACTIVE + 1) * sizeof(dpbPicture_t*)) );
dpb->numOut = dpb->outIndex = 0;
return(HANTRO_OK);
}
Commit Message: Fix potential overflow
Bug: 28533562
Change-Id: I798ab24caa4c81f3ba564cad7c9ee019284fb702
CWE ID: CWE-119 | u32 h264bsdInitDpb(
dpbStorage_t *dpb,
u32 picSizeInMbs,
u32 dpbSize,
u32 maxRefFrames,
u32 maxFrameNum,
u32 noReordering)
{
/* Variables */
u32 i;
/* Code */
ASSERT(picSizeInMbs);
ASSERT(maxRefFrames <= MAX_NUM_REF_PICS);
ASSERT(maxRefFrames <= dpbSize);
ASSERT(maxFrameNum);
ASSERT(dpbSize);
// see comment in loop below about size calculation
if (picSizeInMbs > (UINT32_MAX - 32 - 15) / 384) {
ALOGE("b/28533562");
android_errorWriteLog(0x534e4554, "28533562");
return(MEMORY_ALLOCATION_ERROR);
}
dpb->maxLongTermFrameIdx = NO_LONG_TERM_FRAME_INDICES;
dpb->maxRefFrames = MAX(maxRefFrames, 1);
if (noReordering)
dpb->dpbSize = dpb->maxRefFrames;
else
dpb->dpbSize = dpbSize;
dpb->maxFrameNum = maxFrameNum;
dpb->noReordering = noReordering;
dpb->fullness = 0;
dpb->numRefFrames = 0;
dpb->prevRefFrameNum = 0;
ALLOCATE(dpb->buffer, MAX_NUM_REF_IDX_L0_ACTIVE + 1, dpbPicture_t);
if (dpb->buffer == NULL)
return(MEMORY_ALLOCATION_ERROR);
H264SwDecMemset(dpb->buffer, 0,
(MAX_NUM_REF_IDX_L0_ACTIVE + 1)*sizeof(dpbPicture_t));
for (i = 0; i < dpb->dpbSize + 1; i++)
{
/* Allocate needed amount of memory, which is:
* image size + 32 + 15, where 32 cames from the fact that in ARM OpenMax
* DL implementation Functions may read beyond the end of an array,
* by a maximum of 32 bytes. And +15 cames for the need to align memory
* to 16-byte boundary */
ALLOCATE(dpb->buffer[i].pAllocatedData, (picSizeInMbs*384 + 32+15), u8);
if (dpb->buffer[i].pAllocatedData == NULL)
return(MEMORY_ALLOCATION_ERROR);
dpb->buffer[i].data = ALIGN(dpb->buffer[i].pAllocatedData, 16);
}
ALLOCATE(dpb->list, MAX_NUM_REF_IDX_L0_ACTIVE + 1, dpbPicture_t*);
ALLOCATE(dpb->outBuf, dpb->dpbSize+1, dpbOutPicture_t);
if (dpb->list == NULL || dpb->outBuf == NULL)
return(MEMORY_ALLOCATION_ERROR);
H264SwDecMemset(dpb->list, 0,
((MAX_NUM_REF_IDX_L0_ACTIVE + 1) * sizeof(dpbPicture_t*)) );
dpb->numOut = dpb->outIndex = 0;
return(HANTRO_OK);
}
| 173,546 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void UserSelectionScreen::FillUserMojoStruct(
const user_manager::User* user,
bool is_owner,
bool is_signin_to_add,
proximity_auth::mojom::AuthType auth_type,
const std::vector<std::string>* public_session_recommended_locales,
ash::mojom::LoginUserInfo* user_info) {
user_info->basic_user_info = ash::mojom::UserInfo::New();
user_info->basic_user_info->type = user->GetType();
user_info->basic_user_info->account_id = user->GetAccountId();
user_info->basic_user_info->display_name =
base::UTF16ToUTF8(user->GetDisplayName());
user_info->basic_user_info->display_email = user->display_email();
user_info->basic_user_info->avatar = BuildMojoUserAvatarForUser(user);
user_info->auth_type = auth_type;
user_info->is_signed_in = user->is_logged_in();
user_info->is_device_owner = is_owner;
user_info->can_remove = CanRemoveUser(user);
user_info->allow_fingerprint_unlock = AllowFingerprintForUser(user);
if (!is_signin_to_add) {
user_info->is_multiprofile_allowed = true;
} else {
GetMultiProfilePolicy(user, &user_info->is_multiprofile_allowed,
&user_info->multiprofile_policy);
}
if (user->GetType() == user_manager::USER_TYPE_PUBLIC_ACCOUNT) {
user_info->public_account_info = ash::mojom::PublicAccountInfo::New();
std::string domain;
if (GetEnterpriseDomain(&domain))
user_info->public_account_info->enterprise_domain = domain;
std::string selected_locale;
bool has_multiple_locales;
std::unique_ptr<base::ListValue> available_locales =
GetPublicSessionLocales(public_session_recommended_locales,
&selected_locale, &has_multiple_locales);
DCHECK(available_locales);
user_info->public_account_info->available_locales =
lock_screen_utils::FromListValueToLocaleItem(
std::move(available_locales));
user_info->public_account_info->default_locale = selected_locale;
user_info->public_account_info->show_advanced_view = has_multiple_locales;
}
}
Commit Message: cros: Check initial auth type when showing views login.
Bug: 859611
Change-Id: I0298db9bbf4aed6bd40600aef2e1c5794e8cd058
Reviewed-on: https://chromium-review.googlesource.com/1123056
Reviewed-by: Xiaoyin Hu <[email protected]>
Commit-Queue: Jacob Dufault <[email protected]>
Cr-Commit-Position: refs/heads/master@{#572224}
CWE ID: | void UserSelectionScreen::FillUserMojoStruct(
| 172,201 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: v8::Handle<v8::Value> V8DirectoryEntry::getFileCallback(const v8::Arguments& args)
{
INC_STATS("DOM.DirectoryEntry.getFile");
DirectoryEntry* imp = V8DirectoryEntry::toNative(args.Holder());
if (args.Length() < 1)
return V8Proxy::throwNotEnoughArgumentsError();
STRING_TO_V8PARAMETER_EXCEPTION_BLOCK(V8Parameter<WithUndefinedOrNullCheck>, path, args[0]);
if (args.Length() <= 1) {
imp->getFile(path);
return v8::Handle<v8::Value>();
}
RefPtr<WebKitFlags> flags;
if (!isUndefinedOrNull(args[1]) && args[1]->IsObject()) {
EXCEPTION_BLOCK(v8::Handle<v8::Object>, object, v8::Handle<v8::Object>::Cast(args[1]));
flags = WebKitFlags::create();
v8::Local<v8::Value> v8Create = object->Get(v8::String::New("create"));
if (!v8Create.IsEmpty() && !isUndefinedOrNull(v8Create)) {
EXCEPTION_BLOCK(bool, isCreate, v8Create->BooleanValue());
flags->setCreate(isCreate);
}
v8::Local<v8::Value> v8Exclusive = object->Get(v8::String::New("exclusive"));
if (!v8Exclusive.IsEmpty() && !isUndefinedOrNull(v8Exclusive)) {
EXCEPTION_BLOCK(bool, isExclusive, v8Exclusive->BooleanValue());
flags->setExclusive(isExclusive);
}
}
RefPtr<EntryCallback> successCallback;
if (args.Length() > 2 && !args[2]->IsNull() && !args[2]->IsUndefined()) {
if (!args[2]->IsObject())
return throwError(TYPE_MISMATCH_ERR, args.GetIsolate());
successCallback = V8EntryCallback::create(args[2], getScriptExecutionContext());
}
RefPtr<ErrorCallback> errorCallback;
if (args.Length() > 3 && !args[3]->IsNull() && !args[3]->IsUndefined()) {
if (!args[3]->IsObject())
return throwError(TYPE_MISMATCH_ERR, args.GetIsolate());
errorCallback = V8ErrorCallback::create(args[3], getScriptExecutionContext());
}
imp->getFile(path, flags, successCallback, errorCallback);
return v8::Handle<v8::Value>();
}
Commit Message: [V8] Pass Isolate to throwNotEnoughArgumentsError()
https://bugs.webkit.org/show_bug.cgi?id=86983
Reviewed by Adam Barth.
The objective is to pass Isolate around in V8 bindings.
This patch passes Isolate to throwNotEnoughArgumentsError().
No tests. No change in behavior.
* bindings/scripts/CodeGeneratorV8.pm:
(GenerateArgumentsCountCheck):
(GenerateEventConstructorCallback):
* bindings/scripts/test/V8/V8Float64Array.cpp:
(WebCore::Float64ArrayV8Internal::fooCallback):
* bindings/scripts/test/V8/V8TestActiveDOMObject.cpp:
(WebCore::TestActiveDOMObjectV8Internal::excitingFunctionCallback):
(WebCore::TestActiveDOMObjectV8Internal::postMessageCallback):
* bindings/scripts/test/V8/V8TestCustomNamedGetter.cpp:
(WebCore::TestCustomNamedGetterV8Internal::anotherFunctionCallback):
* bindings/scripts/test/V8/V8TestEventConstructor.cpp:
(WebCore::V8TestEventConstructor::constructorCallback):
* bindings/scripts/test/V8/V8TestEventTarget.cpp:
(WebCore::TestEventTargetV8Internal::itemCallback):
(WebCore::TestEventTargetV8Internal::dispatchEventCallback):
* bindings/scripts/test/V8/V8TestInterface.cpp:
(WebCore::TestInterfaceV8Internal::supplementalMethod2Callback):
(WebCore::V8TestInterface::constructorCallback):
* bindings/scripts/test/V8/V8TestMediaQueryListListener.cpp:
(WebCore::TestMediaQueryListListenerV8Internal::methodCallback):
* bindings/scripts/test/V8/V8TestNamedConstructor.cpp:
(WebCore::V8TestNamedConstructorConstructorCallback):
* bindings/scripts/test/V8/V8TestObj.cpp:
(WebCore::TestObjV8Internal::voidMethodWithArgsCallback):
(WebCore::TestObjV8Internal::intMethodWithArgsCallback):
(WebCore::TestObjV8Internal::objMethodWithArgsCallback):
(WebCore::TestObjV8Internal::methodWithSequenceArgCallback):
(WebCore::TestObjV8Internal::methodReturningSequenceCallback):
(WebCore::TestObjV8Internal::methodThatRequiresAllArgsAndThrowsCallback):
(WebCore::TestObjV8Internal::serializedValueCallback):
(WebCore::TestObjV8Internal::idbKeyCallback):
(WebCore::TestObjV8Internal::optionsObjectCallback):
(WebCore::TestObjV8Internal::methodWithNonOptionalArgAndOptionalArgCallback):
(WebCore::TestObjV8Internal::methodWithNonOptionalArgAndTwoOptionalArgsCallback):
(WebCore::TestObjV8Internal::methodWithCallbackArgCallback):
(WebCore::TestObjV8Internal::methodWithNonCallbackArgAndCallbackArgCallback):
(WebCore::TestObjV8Internal::overloadedMethod1Callback):
(WebCore::TestObjV8Internal::overloadedMethod2Callback):
(WebCore::TestObjV8Internal::overloadedMethod3Callback):
(WebCore::TestObjV8Internal::overloadedMethod4Callback):
(WebCore::TestObjV8Internal::overloadedMethod5Callback):
(WebCore::TestObjV8Internal::overloadedMethod6Callback):
(WebCore::TestObjV8Internal::overloadedMethod7Callback):
(WebCore::TestObjV8Internal::overloadedMethod11Callback):
(WebCore::TestObjV8Internal::overloadedMethod12Callback):
(WebCore::TestObjV8Internal::enabledAtRuntimeMethod1Callback):
(WebCore::TestObjV8Internal::enabledAtRuntimeMethod2Callback):
(WebCore::TestObjV8Internal::convert1Callback):
(WebCore::TestObjV8Internal::convert2Callback):
(WebCore::TestObjV8Internal::convert3Callback):
(WebCore::TestObjV8Internal::convert4Callback):
(WebCore::TestObjV8Internal::convert5Callback):
(WebCore::TestObjV8Internal::strictFunctionCallback):
(WebCore::V8TestObj::constructorCallback):
* bindings/scripts/test/V8/V8TestSerializedScriptValueInterface.cpp:
(WebCore::TestSerializedScriptValueInterfaceV8Internal::acceptTransferListCallback):
(WebCore::V8TestSerializedScriptValueInterface::constructorCallback):
* bindings/v8/ScriptController.cpp:
(WebCore::setValueAndClosePopupCallback):
* bindings/v8/V8Proxy.cpp:
(WebCore::V8Proxy::throwNotEnoughArgumentsError):
* bindings/v8/V8Proxy.h:
(V8Proxy):
* bindings/v8/custom/V8AudioContextCustom.cpp:
(WebCore::V8AudioContext::constructorCallback):
* bindings/v8/custom/V8DataViewCustom.cpp:
(WebCore::V8DataView::getInt8Callback):
(WebCore::V8DataView::getUint8Callback):
(WebCore::V8DataView::setInt8Callback):
(WebCore::V8DataView::setUint8Callback):
* bindings/v8/custom/V8DirectoryEntryCustom.cpp:
(WebCore::V8DirectoryEntry::getDirectoryCallback):
(WebCore::V8DirectoryEntry::getFileCallback):
* bindings/v8/custom/V8IntentConstructor.cpp:
(WebCore::V8Intent::constructorCallback):
* bindings/v8/custom/V8SVGLengthCustom.cpp:
(WebCore::V8SVGLength::convertToSpecifiedUnitsCallback):
* bindings/v8/custom/V8WebGLRenderingContextCustom.cpp:
(WebCore::getObjectParameter):
(WebCore::V8WebGLRenderingContext::getAttachedShadersCallback):
(WebCore::V8WebGLRenderingContext::getExtensionCallback):
(WebCore::V8WebGLRenderingContext::getFramebufferAttachmentParameterCallback):
(WebCore::V8WebGLRenderingContext::getParameterCallback):
(WebCore::V8WebGLRenderingContext::getProgramParameterCallback):
(WebCore::V8WebGLRenderingContext::getShaderParameterCallback):
(WebCore::V8WebGLRenderingContext::getUniformCallback):
(WebCore::vertexAttribAndUniformHelperf):
(WebCore::uniformHelperi):
(WebCore::uniformMatrixHelper):
* bindings/v8/custom/V8WebKitMutationObserverCustom.cpp:
(WebCore::V8WebKitMutationObserver::constructorCallback):
(WebCore::V8WebKitMutationObserver::observeCallback):
* bindings/v8/custom/V8WebSocketCustom.cpp:
(WebCore::V8WebSocket::constructorCallback):
(WebCore::V8WebSocket::sendCallback):
* bindings/v8/custom/V8XMLHttpRequestCustom.cpp:
(WebCore::V8XMLHttpRequest::openCallback):
git-svn-id: svn://svn.chromium.org/blink/trunk@117736 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: | v8::Handle<v8::Value> V8DirectoryEntry::getFileCallback(const v8::Arguments& args)
{
INC_STATS("DOM.DirectoryEntry.getFile");
DirectoryEntry* imp = V8DirectoryEntry::toNative(args.Holder());
if (args.Length() < 1)
return V8Proxy::throwNotEnoughArgumentsError(args.GetIsolate());
STRING_TO_V8PARAMETER_EXCEPTION_BLOCK(V8Parameter<WithUndefinedOrNullCheck>, path, args[0]);
if (args.Length() <= 1) {
imp->getFile(path);
return v8::Handle<v8::Value>();
}
RefPtr<WebKitFlags> flags;
if (!isUndefinedOrNull(args[1]) && args[1]->IsObject()) {
EXCEPTION_BLOCK(v8::Handle<v8::Object>, object, v8::Handle<v8::Object>::Cast(args[1]));
flags = WebKitFlags::create();
v8::Local<v8::Value> v8Create = object->Get(v8::String::New("create"));
if (!v8Create.IsEmpty() && !isUndefinedOrNull(v8Create)) {
EXCEPTION_BLOCK(bool, isCreate, v8Create->BooleanValue());
flags->setCreate(isCreate);
}
v8::Local<v8::Value> v8Exclusive = object->Get(v8::String::New("exclusive"));
if (!v8Exclusive.IsEmpty() && !isUndefinedOrNull(v8Exclusive)) {
EXCEPTION_BLOCK(bool, isExclusive, v8Exclusive->BooleanValue());
flags->setExclusive(isExclusive);
}
}
RefPtr<EntryCallback> successCallback;
if (args.Length() > 2 && !args[2]->IsNull() && !args[2]->IsUndefined()) {
if (!args[2]->IsObject())
return throwError(TYPE_MISMATCH_ERR, args.GetIsolate());
successCallback = V8EntryCallback::create(args[2], getScriptExecutionContext());
}
RefPtr<ErrorCallback> errorCallback;
if (args.Length() > 3 && !args[3]->IsNull() && !args[3]->IsUndefined()) {
if (!args[3]->IsObject())
return throwError(TYPE_MISMATCH_ERR, args.GetIsolate());
errorCallback = V8ErrorCallback::create(args[3], getScriptExecutionContext());
}
imp->getFile(path, flags, successCallback, errorCallback);
return v8::Handle<v8::Value>();
}
| 171,117 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: CCLayerTreeHostTest()
: m_beginning(false)
, m_endWhenBeginReturns(false)
, m_running(false)
, m_timedOut(false)
{
m_webThread = adoptPtr(webKitPlatformSupport()->createThread("CCLayerTreeHostTest"));
WebCompositor::setThread(m_webThread.get());
#if USE(THREADED_COMPOSITING)
m_settings.enableCompositorThread = true;
#else
m_settings.enableCompositorThread = false;
#endif
}
Commit Message: [chromium] Fix shutdown race when posting main thread task to CCThreadProxy and enable tests
https://bugs.webkit.org/show_bug.cgi?id=70161
Reviewed by David Levin.
Source/WebCore:
Adds a weak pointer mechanism to cancel main thread tasks posted to CCThreadProxy instances from the compositor
thread. Previously there was a race condition where main thread tasks could run even after the CCThreadProxy was
destroyed.
This race does not exist in the other direction because when tearing down a CCThreadProxy we first post a quit
task to the compositor thread and then suspend execution of the main thread until all compositor tasks for the
CCThreadProxy have been drained.
Covered by the now-enabled CCLayerTreeHostTest* unit tests.
* WebCore.gypi:
* platform/graphics/chromium/cc/CCScopedMainThreadProxy.h: Added.
(WebCore::CCScopedMainThreadProxy::create):
(WebCore::CCScopedMainThreadProxy::postTask):
(WebCore::CCScopedMainThreadProxy::shutdown):
(WebCore::CCScopedMainThreadProxy::CCScopedMainThreadProxy):
(WebCore::CCScopedMainThreadProxy::runTaskIfNotShutdown):
* platform/graphics/chromium/cc/CCThreadProxy.cpp:
(WebCore::CCThreadProxy::CCThreadProxy):
(WebCore::CCThreadProxy::~CCThreadProxy):
(WebCore::CCThreadProxy::createBeginFrameAndCommitTaskOnCCThread):
* platform/graphics/chromium/cc/CCThreadProxy.h:
Source/WebKit/chromium:
Enables the CCLayerTreeHostTest* tests by default. Most tests are run twice in a single thread and multiple
thread configuration. Some tests run only in the multiple thread configuration if they depend on the compositor
thread scheduling draws by itself.
* tests/CCLayerTreeHostTest.cpp:
(::CCLayerTreeHostTest::timeout):
(::CCLayerTreeHostTest::clearTimeout):
(::CCLayerTreeHostTest::CCLayerTreeHostTest):
(::CCLayerTreeHostTest::onEndTest):
(::CCLayerTreeHostTest::TimeoutTask::TimeoutTask):
(::CCLayerTreeHostTest::TimeoutTask::clearTest):
(::CCLayerTreeHostTest::TimeoutTask::~TimeoutTask):
(::CCLayerTreeHostTest::TimeoutTask::Run):
(::CCLayerTreeHostTest::runTest):
(::CCLayerTreeHostTest::doBeginTest):
(::CCLayerTreeHostTestThreadOnly::runTest):
(::CCLayerTreeHostTestSetNeedsRedraw::commitCompleteOnCCThread):
git-svn-id: svn://svn.chromium.org/blink/trunk@97784 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-119 | CCLayerTreeHostTest()
: m_beginning(false)
, m_endWhenBeginReturns(false)
, m_timedOut(false)
{
m_webThread = adoptPtr(webKitPlatformSupport()->createThread("CCLayerTreeHostTest"));
WebCompositor::setThread(m_webThread.get());
#ifndef NDEBUG
CCProxy::setMainThread(currentThread());
#endif
ASSERT(CCProxy::isMainThread());
m_mainThreadProxy = CCScopedMainThreadProxy::create();
}
| 170,290 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void worker_process(int fd, debugger_request_t& request) {
std::string tombstone_path;
int tombstone_fd = -1;
switch (request.action) {
case DEBUGGER_ACTION_DUMP_TOMBSTONE:
case DEBUGGER_ACTION_CRASH:
tombstone_fd = open_tombstone(&tombstone_path);
if (tombstone_fd == -1) {
ALOGE("debuggerd: failed to open tombstone file: %s\n", strerror(errno));
exit(1);
}
break;
case DEBUGGER_ACTION_DUMP_BACKTRACE:
break;
default:
ALOGE("debuggerd: unexpected request action: %d", request.action);
exit(1);
}
if (ptrace(PTRACE_ATTACH, request.tid, 0, 0) != 0) {
ALOGE("debuggerd: ptrace attach failed: %s", strerror(errno));
exit(1);
}
bool attach_gdb = should_attach_gdb(request);
if (attach_gdb) {
if (init_getevent() != 0) {
ALOGE("debuggerd: failed to initialize input device, not waiting for gdb");
attach_gdb = false;
}
}
std::set<pid_t> siblings;
if (!attach_gdb) {
ptrace_siblings(request.pid, request.tid, siblings);
}
std::unique_ptr<BacktraceMap> backtrace_map(BacktraceMap::Create(request.pid));
int amfd = -1;
std::unique_ptr<std::string> amfd_data;
if (request.action == DEBUGGER_ACTION_CRASH) {
amfd = activity_manager_connect();
amfd_data.reset(new std::string);
}
bool succeeded = false;
if (!drop_privileges()) {
ALOGE("debuggerd: failed to drop privileges, exiting");
_exit(1);
}
int crash_signal = SIGKILL;
succeeded = perform_dump(request, fd, tombstone_fd, backtrace_map.get(), siblings,
&crash_signal, amfd_data.get());
if (succeeded) {
if (request.action == DEBUGGER_ACTION_DUMP_TOMBSTONE) {
if (!tombstone_path.empty()) {
android::base::WriteFully(fd, tombstone_path.c_str(), tombstone_path.length());
}
}
}
if (attach_gdb) {
if (!send_signal(request.pid, 0, SIGSTOP)) {
ALOGE("debuggerd: failed to stop process for gdb attach: %s", strerror(errno));
attach_gdb = false;
}
}
if (!attach_gdb) {
activity_manager_write(request.pid, crash_signal, amfd, *amfd_data.get());
}
if (ptrace(PTRACE_DETACH, request.tid, 0, 0) != 0) {
ALOGE("debuggerd: ptrace detach from %d failed: %s", request.tid, strerror(errno));
}
for (pid_t sibling : siblings) {
ptrace(PTRACE_DETACH, sibling, 0, 0);
}
if (!attach_gdb && request.action == DEBUGGER_ACTION_CRASH) {
if (!send_signal(request.pid, request.tid, crash_signal)) {
ALOGE("debuggerd: failed to kill process %d: %s", request.pid, strerror(errno));
}
}
if (attach_gdb) {
wait_for_user_action(request);
activity_manager_write(request.pid, crash_signal, amfd, *amfd_data.get());
if (!send_signal(request.pid, 0, SIGCONT)) {
ALOGE("debuggerd: failed to resume process %d: %s", request.pid, strerror(errno));
}
uninit_getevent();
}
close(amfd);
exit(!succeeded);
}
Commit Message: debuggerd: verify that traced threads belong to the right process.
Fix two races in debuggerd's PTRACE_ATTACH logic:
1. The target thread in a crash dump request could exit between the
/proc/<pid>/task/<tid> check and the PTRACE_ATTACH.
2. Sibling threads could exit between listing /proc/<pid>/task and the
PTRACE_ATTACH.
Bug: http://b/29555636
Change-Id: I4dfe1ea30e2c211d2389321bd66e3684dd757591
CWE ID: CWE-264 | static void worker_process(int fd, debugger_request_t& request) {
std::string tombstone_path;
int tombstone_fd = -1;
switch (request.action) {
case DEBUGGER_ACTION_DUMP_TOMBSTONE:
case DEBUGGER_ACTION_CRASH:
tombstone_fd = open_tombstone(&tombstone_path);
if (tombstone_fd == -1) {
ALOGE("debuggerd: failed to open tombstone file: %s\n", strerror(errno));
exit(1);
}
break;
case DEBUGGER_ACTION_DUMP_BACKTRACE:
break;
default:
ALOGE("debuggerd: unexpected request action: %d", request.action);
exit(1);
}
if (!ptrace_attach_thread(request.pid, request.tid)) {
ALOGE("debuggerd: ptrace attach failed: %s", strerror(errno));
exit(1);
}
// DEBUGGER_ACTION_CRASH requests can come from arbitrary processes and the tid field in the
// request is sent from the other side. If an attacker can cause a process to be spawned with the
// pid of their process, they could trick debuggerd into dumping that process by exiting after
// sending the request. Validate the trusted request.uid/gid to defend against this.
if (request.action == DEBUGGER_ACTION_CRASH) {
pid_t pid;
uid_t uid;
gid_t gid;
if (get_process_info(request.tid, &pid, &uid, &gid) != 0) {
ALOGE("debuggerd: failed to get process info for tid '%d'", request.tid);
exit(1);
}
if (pid != request.pid || uid != request.uid || gid != request.gid) {
ALOGE(
"debuggerd: attached task %d does not match request: "
"expected pid=%d,uid=%d,gid=%d, actual pid=%d,uid=%d,gid=%d",
request.tid, request.pid, request.uid, request.gid, pid, uid, gid);
exit(1);
}
}
bool attach_gdb = should_attach_gdb(request);
if (attach_gdb) {
if (init_getevent() != 0) {
ALOGE("debuggerd: failed to initialize input device, not waiting for gdb");
attach_gdb = false;
}
}
std::set<pid_t> siblings;
if (!attach_gdb) {
ptrace_siblings(request.pid, request.tid, siblings);
}
std::unique_ptr<BacktraceMap> backtrace_map(BacktraceMap::Create(request.pid));
int amfd = -1;
std::unique_ptr<std::string> amfd_data;
if (request.action == DEBUGGER_ACTION_CRASH) {
amfd = activity_manager_connect();
amfd_data.reset(new std::string);
}
bool succeeded = false;
if (!drop_privileges()) {
ALOGE("debuggerd: failed to drop privileges, exiting");
_exit(1);
}
int crash_signal = SIGKILL;
succeeded = perform_dump(request, fd, tombstone_fd, backtrace_map.get(), siblings,
&crash_signal, amfd_data.get());
if (succeeded) {
if (request.action == DEBUGGER_ACTION_DUMP_TOMBSTONE) {
if (!tombstone_path.empty()) {
android::base::WriteFully(fd, tombstone_path.c_str(), tombstone_path.length());
}
}
}
if (attach_gdb) {
if (!send_signal(request.pid, 0, SIGSTOP)) {
ALOGE("debuggerd: failed to stop process for gdb attach: %s", strerror(errno));
attach_gdb = false;
}
}
if (!attach_gdb) {
activity_manager_write(request.pid, crash_signal, amfd, *amfd_data.get());
}
if (ptrace(PTRACE_DETACH, request.tid, 0, 0) != 0) {
ALOGE("debuggerd: ptrace detach from %d failed: %s", request.tid, strerror(errno));
}
for (pid_t sibling : siblings) {
ptrace(PTRACE_DETACH, sibling, 0, 0);
}
if (!attach_gdb && request.action == DEBUGGER_ACTION_CRASH) {
if (!send_signal(request.pid, request.tid, crash_signal)) {
ALOGE("debuggerd: failed to kill process %d: %s", request.pid, strerror(errno));
}
}
if (attach_gdb) {
wait_for_user_action(request);
activity_manager_write(request.pid, crash_signal, amfd, *amfd_data.get());
if (!send_signal(request.pid, 0, SIGCONT)) {
ALOGE("debuggerd: failed to resume process %d: %s", request.pid, strerror(errno));
}
uninit_getevent();
}
close(amfd);
exit(!succeeded);
}
| 173,408 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int inet6_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct inet_sock *inet;
struct ipv6_pinfo *np;
struct sock *sk;
struct inet_protosw *answer;
struct proto *answer_prot;
unsigned char answer_flags;
int try_loading_module = 0;
int err;
/* Look for the requested type/protocol pair. */
lookup_protocol:
err = -ESOCKTNOSUPPORT;
rcu_read_lock();
list_for_each_entry_rcu(answer, &inetsw6[sock->type], list) {
err = 0;
/* Check the non-wild match. */
if (protocol == answer->protocol) {
if (protocol != IPPROTO_IP)
break;
} else {
/* Check for the two wild cases. */
if (IPPROTO_IP == protocol) {
protocol = answer->protocol;
break;
}
if (IPPROTO_IP == answer->protocol)
break;
}
err = -EPROTONOSUPPORT;
}
if (err) {
if (try_loading_module < 2) {
rcu_read_unlock();
/*
* Be more specific, e.g. net-pf-10-proto-132-type-1
* (net-pf-PF_INET6-proto-IPPROTO_SCTP-type-SOCK_STREAM)
*/
if (++try_loading_module == 1)
request_module("net-pf-%d-proto-%d-type-%d",
PF_INET6, protocol, sock->type);
/*
* Fall back to generic, e.g. net-pf-10-proto-132
* (net-pf-PF_INET6-proto-IPPROTO_SCTP)
*/
else
request_module("net-pf-%d-proto-%d",
PF_INET6, protocol);
goto lookup_protocol;
} else
goto out_rcu_unlock;
}
err = -EPERM;
if (sock->type == SOCK_RAW && !kern &&
!ns_capable(net->user_ns, CAP_NET_RAW))
goto out_rcu_unlock;
sock->ops = answer->ops;
answer_prot = answer->prot;
answer_flags = answer->flags;
rcu_read_unlock();
WARN_ON(!answer_prot->slab);
err = -ENOBUFS;
sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot, kern);
if (!sk)
goto out;
sock_init_data(sock, sk);
err = 0;
if (INET_PROTOSW_REUSE & answer_flags)
sk->sk_reuse = SK_CAN_REUSE;
inet = inet_sk(sk);
inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
if (SOCK_RAW == sock->type) {
inet->inet_num = protocol;
if (IPPROTO_RAW == protocol)
inet->hdrincl = 1;
}
sk->sk_destruct = inet_sock_destruct;
sk->sk_family = PF_INET6;
sk->sk_protocol = protocol;
sk->sk_backlog_rcv = answer->prot->backlog_rcv;
inet_sk(sk)->pinet6 = np = inet6_sk_generic(sk);
np->hop_limit = -1;
np->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
np->mc_loop = 1;
np->pmtudisc = IPV6_PMTUDISC_WANT;
np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk));
sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
/* Init the ipv4 part of the socket since we can have sockets
* using v6 API for ipv4.
*/
inet->uc_ttl = -1;
inet->mc_loop = 1;
inet->mc_ttl = 1;
inet->mc_index = 0;
inet->mc_list = NULL;
inet->rcv_tos = 0;
if (net->ipv4.sysctl_ip_no_pmtu_disc)
inet->pmtudisc = IP_PMTUDISC_DONT;
else
inet->pmtudisc = IP_PMTUDISC_WANT;
/*
* Increment only the relevant sk_prot->socks debug field, this changes
* the previous behaviour of incrementing both the equivalent to
* answer->prot->socks (inet6_sock_nr) and inet_sock_nr.
*
* This allows better debug granularity as we'll know exactly how many
* UDPv6, TCPv6, etc socks were allocated, not the sum of all IPv6
* transport protocol socks. -acme
*/
sk_refcnt_debug_inc(sk);
if (inet->inet_num) {
/* It assumes that any protocol which allows
* the user to assign a number at socket
* creation time automatically shares.
*/
inet->inet_sport = htons(inet->inet_num);
sk->sk_prot->hash(sk);
}
if (sk->sk_prot->init) {
err = sk->sk_prot->init(sk);
if (err) {
sk_common_release(sk);
goto out;
}
}
out:
return err;
out_rcu_unlock:
rcu_read_unlock();
goto out;
}
Commit Message: net: add validation for the socket syscall protocol argument
郭永刚 reported that one could simply crash the kernel as root by
using a simple program:
int socket_fd;
struct sockaddr_in addr;
addr.sin_port = 0;
addr.sin_addr.s_addr = INADDR_ANY;
addr.sin_family = 10;
socket_fd = socket(10,3,0x40000000);
connect(socket_fd , &addr,16);
AF_INET, AF_INET6 sockets actually only support 8-bit protocol
identifiers. inet_sock's skc_protocol field thus is sized accordingly,
thus larger protocol identifiers simply cut off the higher bits and
store a zero in the protocol fields.
This could lead to e.g. NULL function pointer because as a result of
the cut off inet_num is zero and we call down to inet_autobind, which
is NULL for raw sockets.
kernel: Call Trace:
kernel: [<ffffffff816db90e>] ? inet_autobind+0x2e/0x70
kernel: [<ffffffff816db9a4>] inet_dgram_connect+0x54/0x80
kernel: [<ffffffff81645069>] SYSC_connect+0xd9/0x110
kernel: [<ffffffff810ac51b>] ? ptrace_notify+0x5b/0x80
kernel: [<ffffffff810236d8>] ? syscall_trace_enter_phase2+0x108/0x200
kernel: [<ffffffff81645e0e>] SyS_connect+0xe/0x10
kernel: [<ffffffff81779515>] tracesys_phase2+0x84/0x89
I found no particular commit which introduced this problem.
CVE: CVE-2015-8543
Cc: Cong Wang <[email protected]>
Reported-by: 郭永刚 <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: | static int inet6_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct inet_sock *inet;
struct ipv6_pinfo *np;
struct sock *sk;
struct inet_protosw *answer;
struct proto *answer_prot;
unsigned char answer_flags;
int try_loading_module = 0;
int err;
if (protocol < 0 || protocol >= IPPROTO_MAX)
return -EINVAL;
/* Look for the requested type/protocol pair. */
lookup_protocol:
err = -ESOCKTNOSUPPORT;
rcu_read_lock();
list_for_each_entry_rcu(answer, &inetsw6[sock->type], list) {
err = 0;
/* Check the non-wild match. */
if (protocol == answer->protocol) {
if (protocol != IPPROTO_IP)
break;
} else {
/* Check for the two wild cases. */
if (IPPROTO_IP == protocol) {
protocol = answer->protocol;
break;
}
if (IPPROTO_IP == answer->protocol)
break;
}
err = -EPROTONOSUPPORT;
}
if (err) {
if (try_loading_module < 2) {
rcu_read_unlock();
/*
* Be more specific, e.g. net-pf-10-proto-132-type-1
* (net-pf-PF_INET6-proto-IPPROTO_SCTP-type-SOCK_STREAM)
*/
if (++try_loading_module == 1)
request_module("net-pf-%d-proto-%d-type-%d",
PF_INET6, protocol, sock->type);
/*
* Fall back to generic, e.g. net-pf-10-proto-132
* (net-pf-PF_INET6-proto-IPPROTO_SCTP)
*/
else
request_module("net-pf-%d-proto-%d",
PF_INET6, protocol);
goto lookup_protocol;
} else
goto out_rcu_unlock;
}
err = -EPERM;
if (sock->type == SOCK_RAW && !kern &&
!ns_capable(net->user_ns, CAP_NET_RAW))
goto out_rcu_unlock;
sock->ops = answer->ops;
answer_prot = answer->prot;
answer_flags = answer->flags;
rcu_read_unlock();
WARN_ON(!answer_prot->slab);
err = -ENOBUFS;
sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot, kern);
if (!sk)
goto out;
sock_init_data(sock, sk);
err = 0;
if (INET_PROTOSW_REUSE & answer_flags)
sk->sk_reuse = SK_CAN_REUSE;
inet = inet_sk(sk);
inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
if (SOCK_RAW == sock->type) {
inet->inet_num = protocol;
if (IPPROTO_RAW == protocol)
inet->hdrincl = 1;
}
sk->sk_destruct = inet_sock_destruct;
sk->sk_family = PF_INET6;
sk->sk_protocol = protocol;
sk->sk_backlog_rcv = answer->prot->backlog_rcv;
inet_sk(sk)->pinet6 = np = inet6_sk_generic(sk);
np->hop_limit = -1;
np->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
np->mc_loop = 1;
np->pmtudisc = IPV6_PMTUDISC_WANT;
np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk));
sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
/* Init the ipv4 part of the socket since we can have sockets
* using v6 API for ipv4.
*/
inet->uc_ttl = -1;
inet->mc_loop = 1;
inet->mc_ttl = 1;
inet->mc_index = 0;
inet->mc_list = NULL;
inet->rcv_tos = 0;
if (net->ipv4.sysctl_ip_no_pmtu_disc)
inet->pmtudisc = IP_PMTUDISC_DONT;
else
inet->pmtudisc = IP_PMTUDISC_WANT;
/*
* Increment only the relevant sk_prot->socks debug field, this changes
* the previous behaviour of incrementing both the equivalent to
* answer->prot->socks (inet6_sock_nr) and inet_sock_nr.
*
* This allows better debug granularity as we'll know exactly how many
* UDPv6, TCPv6, etc socks were allocated, not the sum of all IPv6
* transport protocol socks. -acme
*/
sk_refcnt_debug_inc(sk);
if (inet->inet_num) {
/* It assumes that any protocol which allows
* the user to assign a number at socket
* creation time automatically shares.
*/
inet->inet_sport = htons(inet->inet_num);
sk->sk_prot->hash(sk);
}
if (sk->sk_prot->init) {
err = sk->sk_prot->init(sk);
if (err) {
sk_common_release(sk);
goto out;
}
}
out:
return err;
out_rcu_unlock:
rcu_read_unlock();
goto out;
}
| 166,565 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: TestPaintArtifact& TestPaintArtifact::ScrollHitTest(
DisplayItemClient& client,
scoped_refptr<const TransformPaintPropertyNode> scroll_offset) {
display_item_list_.AllocateAndConstruct<ScrollHitTestDisplayItem>(
client, DisplayItem::kScrollHitTest, std::move(scroll_offset));
return *this;
}
Commit Message: Reland "[CI] Make paint property nodes non-ref-counted"
This reverts commit 887383b30842d9d9006e11bb6932660a3cb5b1b7.
Reason for revert: Retry in M69.
Original change's description:
> Revert "[CI] Make paint property nodes non-ref-counted"
>
> This reverts commit 70fc0b018c9517558b7aa2be00edf2debb449123.
>
> Reason for revert: Caused bugs found by clusterfuzz
>
> Original change's description:
> > [CI] Make paint property nodes non-ref-counted
> >
> > Now all paint property nodes are owned by ObjectPaintProperties
> > (and LocalFrameView temporarily before removing non-RLS mode).
> > Others just use raw pointers or references.
> >
> > Bug: 833496
> > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> > Change-Id: I2d544fe153bb94698623248748df63c8aa2081ae
> > Reviewed-on: https://chromium-review.googlesource.com/1031101
> > Reviewed-by: Tien-Ren Chen <[email protected]>
> > Commit-Queue: Xianzhu Wang <[email protected]>
> > Cr-Commit-Position: refs/heads/master@{#554626}
>
> [email protected],[email protected],[email protected]
>
> Change-Id: I02bb50d6744cb81a797246a0116b677e80a3c69f
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Bug: 833496,837932,837943
> Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> Reviewed-on: https://chromium-review.googlesource.com/1034292
> Reviewed-by: Xianzhu Wang <[email protected]>
> Commit-Queue: Xianzhu Wang <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#554653}
[email protected],[email protected],[email protected]
# Not skipping CQ checks because original CL landed > 1 day ago.
Bug: 833496, 837932, 837943
Change-Id: I0b4ef70db1f1f211ba97c30d617225355c750992
Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
Reviewed-on: https://chromium-review.googlesource.com/1083491
Commit-Queue: Xianzhu Wang <[email protected]>
Reviewed-by: Xianzhu Wang <[email protected]>
Cr-Commit-Position: refs/heads/master@{#563930}
CWE ID: | TestPaintArtifact& TestPaintArtifact::ScrollHitTest(
DisplayItemClient& client,
const TransformPaintPropertyNode& scroll_offset) {
display_item_list_.AllocateAndConstruct<ScrollHitTestDisplayItem>(
client, DisplayItem::kScrollHitTest, scroll_offset);
return *this;
}
| 171,849 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int ssl3_accept(SSL *s)
{
BUF_MEM *buf;
unsigned long alg_k,Time=(unsigned long)time(NULL);
void (*cb)(const SSL *ssl,int type,int val)=NULL;
int ret= -1;
int new_state,state,skip=0;
RAND_add(&Time,sizeof(Time),0);
ERR_clear_error();
clear_sys_error();
if (s->info_callback != NULL)
cb=s->info_callback;
else if (s->ctx->info_callback != NULL)
cb=s->ctx->info_callback;
/* init things to blank */
s->in_handshake++;
if (!SSL_in_init(s) || SSL_in_before(s)) SSL_clear(s);
if (s->cert == NULL)
{
SSLerr(SSL_F_SSL3_ACCEPT,SSL_R_NO_CERTIFICATE_SET);
return(-1);
}
#ifndef OPENSSL_NO_HEARTBEATS
/* If we're awaiting a HeartbeatResponse, pretend we
* already got and don't await it anymore, because
* Heartbeats don't make sense during handshakes anyway.
*/
if (s->tlsext_hb_pending)
{
s->tlsext_hb_pending = 0;
s->tlsext_hb_seq++;
}
#endif
for (;;)
{
state=s->state;
switch (s->state)
{
case SSL_ST_RENEGOTIATE:
s->renegotiate=1;
/* s->state=SSL_ST_ACCEPT; */
case SSL_ST_BEFORE:
case SSL_ST_ACCEPT:
case SSL_ST_BEFORE|SSL_ST_ACCEPT:
case SSL_ST_OK|SSL_ST_ACCEPT:
s->server=1;
if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_START,1);
if ((s->version>>8) != 3)
{
SSLerr(SSL_F_SSL3_ACCEPT, ERR_R_INTERNAL_ERROR);
return -1;
}
if (!ssl_security(s, SSL_SECOP_VERSION, 0,
s->version, NULL))
{
SSLerr(SSL_F_SSL3_ACCEPT, SSL_R_VERSION_TOO_LOW);
return -1;
}
s->type=SSL_ST_ACCEPT;
if (s->init_buf == NULL)
{
if ((buf=BUF_MEM_new()) == NULL)
{
ret= -1;
goto end;
}
if (!BUF_MEM_grow(buf,SSL3_RT_MAX_PLAIN_LENGTH))
{
BUF_MEM_free(buf);
ret= -1;
goto end;
}
s->init_buf=buf;
}
if (!ssl3_setup_buffers(s))
{
ret= -1;
goto end;
}
s->init_num=0;
s->s3->flags &= ~TLS1_FLAGS_SKIP_CERT_VERIFY;
s->s3->flags &= ~SSL3_FLAGS_CCS_OK;
/* Should have been reset by ssl3_get_finished, too. */
s->s3->change_cipher_spec = 0;
if (s->state != SSL_ST_RENEGOTIATE)
{
/* Ok, we now need to push on a buffering BIO so that
* the output is sent in a way that TCP likes :-)
*/
if (!ssl_init_wbio_buffer(s,1)) { ret= -1; goto end; }
ssl3_init_finished_mac(s);
s->state=SSL3_ST_SR_CLNT_HELLO_A;
s->ctx->stats.sess_accept++;
}
else if (!s->s3->send_connection_binding &&
!(s->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION))
{
/* Server attempting to renegotiate with
* client that doesn't support secure
* renegotiation.
*/
SSLerr(SSL_F_SSL3_ACCEPT, SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED);
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_HANDSHAKE_FAILURE);
ret = -1;
goto end;
}
else
{
/* s->state == SSL_ST_RENEGOTIATE,
* we will just send a HelloRequest */
s->ctx->stats.sess_accept_renegotiate++;
s->state=SSL3_ST_SW_HELLO_REQ_A;
}
break;
case SSL3_ST_SW_HELLO_REQ_A:
case SSL3_ST_SW_HELLO_REQ_B:
s->shutdown=0;
ret=ssl3_send_hello_request(s);
if (ret <= 0) goto end;
s->s3->tmp.next_state=SSL3_ST_SW_HELLO_REQ_C;
s->state=SSL3_ST_SW_FLUSH;
s->init_num=0;
ssl3_init_finished_mac(s);
break;
case SSL3_ST_SW_HELLO_REQ_C:
s->state=SSL_ST_OK;
break;
case SSL3_ST_SR_CLNT_HELLO_A:
case SSL3_ST_SR_CLNT_HELLO_B:
case SSL3_ST_SR_CLNT_HELLO_C:
ret=ssl3_get_client_hello(s);
if (ret <= 0) goto end;
#ifndef OPENSSL_NO_SRP
s->state = SSL3_ST_SR_CLNT_HELLO_D;
case SSL3_ST_SR_CLNT_HELLO_D:
{
int al;
if ((ret = ssl_check_srp_ext_ClientHello(s,&al)) < 0)
{
/* callback indicates firther work to be done */
s->rwstate=SSL_X509_LOOKUP;
goto end;
}
if (ret != SSL_ERROR_NONE)
{
ssl3_send_alert(s,SSL3_AL_FATAL,al);
/* This is not really an error but the only means to
for a client to detect whether srp is supported. */
if (al != TLS1_AD_UNKNOWN_PSK_IDENTITY)
SSLerr(SSL_F_SSL3_ACCEPT,SSL_R_CLIENTHELLO_TLSEXT);
ret = SSL_TLSEXT_ERR_ALERT_FATAL;
ret= -1;
goto end;
}
}
#endif
s->renegotiate = 2;
s->state=SSL3_ST_SW_SRVR_HELLO_A;
s->init_num=0;
break;
case SSL3_ST_SW_SRVR_HELLO_A:
case SSL3_ST_SW_SRVR_HELLO_B:
ret=ssl3_send_server_hello(s);
if (ret <= 0) goto end;
#ifndef OPENSSL_NO_TLSEXT
if (s->hit)
{
if (s->tlsext_ticket_expected)
s->state=SSL3_ST_SW_SESSION_TICKET_A;
else
s->state=SSL3_ST_SW_CHANGE_A;
}
#else
if (s->hit)
s->state=SSL3_ST_SW_CHANGE_A;
#endif
else
s->state = SSL3_ST_SW_CERT_A;
s->init_num = 0;
break;
case SSL3_ST_SW_CERT_A:
case SSL3_ST_SW_CERT_B:
/* Check if it is anon DH or anon ECDH, */
/* normal PSK or KRB5 or SRP */
if (!(s->s3->tmp.new_cipher->algorithm_auth & (SSL_aNULL|SSL_aKRB5|SSL_aSRP))
&& !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK))
{
ret=ssl3_send_server_certificate(s);
if (ret <= 0) goto end;
#ifndef OPENSSL_NO_TLSEXT
if (s->tlsext_status_expected)
s->state=SSL3_ST_SW_CERT_STATUS_A;
else
s->state=SSL3_ST_SW_KEY_EXCH_A;
}
else
{
skip = 1;
s->state=SSL3_ST_SW_KEY_EXCH_A;
}
#else
}
else
skip=1;
s->state=SSL3_ST_SW_KEY_EXCH_A;
#endif
s->init_num=0;
break;
case SSL3_ST_SW_KEY_EXCH_A:
case SSL3_ST_SW_KEY_EXCH_B:
alg_k = s->s3->tmp.new_cipher->algorithm_mkey;
/* clear this, it may get reset by
* send_server_key_exchange */
if ((s->options & SSL_OP_EPHEMERAL_RSA)
#ifndef OPENSSL_NO_KRB5
&& !(alg_k & SSL_kKRB5)
#endif /* OPENSSL_NO_KRB5 */
)
/* option SSL_OP_EPHEMERAL_RSA sends temporary RSA key
* even when forbidden by protocol specs
* (handshake may fail as clients are not required to
* be able to handle this) */
s->s3->tmp.use_rsa_tmp=1;
else
s->s3->tmp.use_rsa_tmp=0;
/* only send if a DH key exchange, fortezza or
* RSA but we have a sign only certificate
*
* PSK: may send PSK identity hints
*
* For ECC ciphersuites, we send a serverKeyExchange
* message only if the cipher suite is either
* ECDH-anon or ECDHE. In other cases, the
* server certificate contains the server's
* public key for key exchange.
*/
if (s->s3->tmp.use_rsa_tmp
/* PSK: send ServerKeyExchange if PSK identity
* hint if provided */
#ifndef OPENSSL_NO_PSK
|| ((alg_k & SSL_kPSK) && s->ctx->psk_identity_hint)
#endif
#ifndef OPENSSL_NO_SRP
/* SRP: send ServerKeyExchange */
|| (alg_k & SSL_kSRP)
#endif
|| (alg_k & SSL_kDHE)
|| (alg_k & SSL_kECDHE)
|| ((alg_k & SSL_kRSA)
&& (s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey == NULL
|| (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher)
&& EVP_PKEY_size(s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey)*8 > SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher)
)
)
)
)
{
ret=ssl3_send_server_key_exchange(s);
if (ret <= 0) goto end;
}
else
skip=1;
s->state=SSL3_ST_SW_CERT_REQ_A;
s->init_num=0;
break;
case SSL3_ST_SW_CERT_REQ_A:
case SSL3_ST_SW_CERT_REQ_B:
if (/* don't request cert unless asked for it: */
!(s->verify_mode & SSL_VERIFY_PEER) ||
/* if SSL_VERIFY_CLIENT_ONCE is set,
* don't request cert during re-negotiation: */
((s->session->peer != NULL) &&
(s->verify_mode & SSL_VERIFY_CLIENT_ONCE)) ||
/* never request cert in anonymous ciphersuites
* (see section "Certificate request" in SSL 3 drafts
* and in RFC 2246): */
((s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL) &&
/* ... except when the application insists on verification
* (against the specs, but s3_clnt.c accepts this for SSL 3) */
!(s->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT)) ||
/* never request cert in Kerberos ciphersuites */
(s->s3->tmp.new_cipher->algorithm_auth & SSL_aKRB5) ||
/* don't request certificate for SRP auth */
(s->s3->tmp.new_cipher->algorithm_auth & SSL_aSRP)
/* With normal PSK Certificates and
* Certificate Requests are omitted */
|| (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK))
{
/* no cert request */
skip=1;
s->s3->tmp.cert_request=0;
s->state=SSL3_ST_SW_SRVR_DONE_A;
if (s->s3->handshake_buffer)
if (!ssl3_digest_cached_records(s))
return -1;
}
else
{
s->s3->tmp.cert_request=1;
ret=ssl3_send_certificate_request(s);
if (ret <= 0) goto end;
#ifndef NETSCAPE_HANG_BUG
s->state=SSL3_ST_SW_SRVR_DONE_A;
#else
s->state=SSL3_ST_SW_FLUSH;
s->s3->tmp.next_state=SSL3_ST_SR_CERT_A;
#endif
s->init_num=0;
}
break;
case SSL3_ST_SW_SRVR_DONE_A:
case SSL3_ST_SW_SRVR_DONE_B:
ret=ssl3_send_server_done(s);
if (ret <= 0) goto end;
s->s3->tmp.next_state=SSL3_ST_SR_CERT_A;
s->state=SSL3_ST_SW_FLUSH;
s->init_num=0;
break;
case SSL3_ST_SW_FLUSH:
/* This code originally checked to see if
* any data was pending using BIO_CTRL_INFO
* and then flushed. This caused problems
* as documented in PR#1939. The proposed
* fix doesn't completely resolve this issue
* as buggy implementations of BIO_CTRL_PENDING
* still exist. So instead we just flush
* unconditionally.
*/
s->rwstate=SSL_WRITING;
if (BIO_flush(s->wbio) <= 0)
{
ret= -1;
goto end;
}
s->rwstate=SSL_NOTHING;
s->state=s->s3->tmp.next_state;
break;
case SSL3_ST_SR_CERT_A:
case SSL3_ST_SR_CERT_B:
if (s->s3->tmp.cert_request)
{
ret=ssl3_get_client_certificate(s);
if (ret <= 0) goto end;
}
s->init_num=0;
s->state=SSL3_ST_SR_KEY_EXCH_A;
break;
case SSL3_ST_SR_KEY_EXCH_A:
case SSL3_ST_SR_KEY_EXCH_B:
ret=ssl3_get_client_key_exchange(s);
if (ret <= 0)
goto end;
if (ret == 2)
{
/* For the ECDH ciphersuites when
* the client sends its ECDH pub key in
* a certificate, the CertificateVerify
* message is not sent.
* Also for GOST ciphersuites when
* the client uses its key from the certificate
* for key exchange.
*/
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG)
s->state=SSL3_ST_SR_FINISHED_A;
#else
if (s->s3->next_proto_neg_seen)
s->state=SSL3_ST_SR_NEXT_PROTO_A;
else
s->state=SSL3_ST_SR_FINISHED_A;
#endif
s->init_num = 0;
}
else if (SSL_USE_SIGALGS(s))
{
s->state=SSL3_ST_SR_CERT_VRFY_A;
s->init_num=0;
if (!s->session->peer)
break;
/* For sigalgs freeze the handshake buffer
* at this point and digest cached records.
*/
if (!s->s3->handshake_buffer)
{
SSLerr(SSL_F_SSL3_ACCEPT,ERR_R_INTERNAL_ERROR);
return -1;
}
s->s3->flags |= TLS1_FLAGS_KEEP_HANDSHAKE;
if (!ssl3_digest_cached_records(s))
return -1;
}
else
{
int offset=0;
int dgst_num;
s->state=SSL3_ST_SR_CERT_VRFY_A;
s->init_num=0;
/* We need to get hashes here so if there is
* a client cert, it can be verified
* FIXME - digest processing for CertificateVerify
* should be generalized. But it is next step
*/
if (s->s3->handshake_buffer)
if (!ssl3_digest_cached_records(s))
return -1;
for (dgst_num=0; dgst_num<SSL_MAX_DIGEST;dgst_num++)
if (s->s3->handshake_dgst[dgst_num])
{
int dgst_size;
s->method->ssl3_enc->cert_verify_mac(s,EVP_MD_CTX_type(s->s3->handshake_dgst[dgst_num]),&(s->s3->tmp.cert_verify_md[offset]));
dgst_size=EVP_MD_CTX_size(s->s3->handshake_dgst[dgst_num]);
if (dgst_size < 0)
{
ret = -1;
goto end;
}
offset+=dgst_size;
}
}
break;
case SSL3_ST_SR_CERT_VRFY_A:
case SSL3_ST_SR_CERT_VRFY_B:
/*
* This *should* be the first time we enable CCS, but be
* extra careful about surrounding code changes. We need
* to set this here because we don't know if we're
* expecting a CertificateVerify or not.
*/
if (!s->s3->change_cipher_spec)
s->s3->flags |= SSL3_FLAGS_CCS_OK;
/* we should decide if we expected this one */
ret=ssl3_get_cert_verify(s);
if (ret <= 0) goto end;
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG)
s->state=SSL3_ST_SR_FINISHED_A;
#else
if (s->s3->next_proto_neg_seen)
s->state=SSL3_ST_SR_NEXT_PROTO_A;
else
s->state=SSL3_ST_SR_FINISHED_A;
#endif
s->init_num=0;
break;
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
case SSL3_ST_SR_NEXT_PROTO_A:
case SSL3_ST_SR_NEXT_PROTO_B:
/*
* Enable CCS for resumed handshakes with NPN.
* In a full handshake with NPN, we end up here through
* SSL3_ST_SR_CERT_VRFY_B, where SSL3_FLAGS_CCS_OK was
* already set. Receiving a CCS clears the flag, so make
* sure not to re-enable it to ban duplicates.
* s->s3->change_cipher_spec is set when a CCS is
* processed in s3_pkt.c, and remains set until
* the client's Finished message is read.
*/
if (!s->s3->change_cipher_spec)
s->s3->flags |= SSL3_FLAGS_CCS_OK;
ret=ssl3_get_next_proto(s);
if (ret <= 0) goto end;
s->init_num = 0;
s->state=SSL3_ST_SR_FINISHED_A;
break;
#endif
case SSL3_ST_SR_FINISHED_A:
case SSL3_ST_SR_FINISHED_B:
/*
* Enable CCS for resumed handshakes without NPN.
* In a full handshake, we end up here through
* SSL3_ST_SR_CERT_VRFY_B, where SSL3_FLAGS_CCS_OK was
* already set. Receiving a CCS clears the flag, so make
* sure not to re-enable it to ban duplicates.
* s->s3->change_cipher_spec is set when a CCS is
* processed in s3_pkt.c, and remains set until
* the client's Finished message is read.
*/
if (!s->s3->change_cipher_spec)
s->s3->flags |= SSL3_FLAGS_CCS_OK;
ret=ssl3_get_finished(s,SSL3_ST_SR_FINISHED_A,
SSL3_ST_SR_FINISHED_B);
if (ret <= 0) goto end;
if (s->hit)
s->state=SSL_ST_OK;
#ifndef OPENSSL_NO_TLSEXT
else if (s->tlsext_ticket_expected)
s->state=SSL3_ST_SW_SESSION_TICKET_A;
#endif
else
s->state=SSL3_ST_SW_CHANGE_A;
s->init_num=0;
break;
#ifndef OPENSSL_NO_TLSEXT
case SSL3_ST_SW_SESSION_TICKET_A:
case SSL3_ST_SW_SESSION_TICKET_B:
ret=ssl3_send_newsession_ticket(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_CHANGE_A;
s->init_num=0;
break;
case SSL3_ST_SW_CERT_STATUS_A:
case SSL3_ST_SW_CERT_STATUS_B:
ret=ssl3_send_cert_status(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_KEY_EXCH_A;
s->init_num=0;
break;
#endif
case SSL3_ST_SW_CHANGE_A:
case SSL3_ST_SW_CHANGE_B:
s->session->cipher=s->s3->tmp.new_cipher;
if (!s->method->ssl3_enc->setup_key_block(s))
{ ret= -1; goto end; }
ret=ssl3_send_change_cipher_spec(s,
SSL3_ST_SW_CHANGE_A,SSL3_ST_SW_CHANGE_B);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_FINISHED_A;
s->init_num=0;
if (!s->method->ssl3_enc->change_cipher_state(s,
SSL3_CHANGE_CIPHER_SERVER_WRITE))
{
ret= -1;
goto end;
}
break;
case SSL3_ST_SW_FINISHED_A:
case SSL3_ST_SW_FINISHED_B:
ret=ssl3_send_finished(s,
SSL3_ST_SW_FINISHED_A,SSL3_ST_SW_FINISHED_B,
s->method->ssl3_enc->server_finished_label,
s->method->ssl3_enc->server_finished_label_len);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_FLUSH;
if (s->hit)
{
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG)
s->s3->tmp.next_state=SSL3_ST_SR_FINISHED_A;
#else
if (s->s3->next_proto_neg_seen)
{
s->s3->tmp.next_state=SSL3_ST_SR_NEXT_PROTO_A;
}
else
s->s3->tmp.next_state=SSL3_ST_SR_FINISHED_A;
#endif
}
else
s->s3->tmp.next_state=SSL_ST_OK;
s->init_num=0;
break;
case SSL_ST_OK:
/* clean a few things up */
ssl3_cleanup_key_block(s);
BUF_MEM_free(s->init_buf);
s->init_buf=NULL;
/* remove buffering on output */
ssl_free_wbio_buffer(s);
s->init_num=0;
if (s->renegotiate == 2) /* skipped if we just sent a HelloRequest */
{
s->renegotiate=0;
s->new_session=0;
ssl_update_cache(s,SSL_SESS_CACHE_SERVER);
s->ctx->stats.sess_accept_good++;
/* s->server=1; */
s->handshake_func=ssl3_accept;
if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_DONE,1);
}
ret = 1;
goto end;
/* break; */
default:
SSLerr(SSL_F_SSL3_ACCEPT,SSL_R_UNKNOWN_STATE);
ret= -1;
goto end;
/* break; */
}
if (!s->s3->tmp.reuse_message && !skip)
{
if (s->debug)
{
if ((ret=BIO_flush(s->wbio)) <= 0)
goto end;
}
if ((cb != NULL) && (s->state != state))
{
new_state=s->state;
s->state=state;
cb(s,SSL_CB_ACCEPT_LOOP,1);
s->state=new_state;
}
}
skip=0;
}
Commit Message: Only allow ephemeral RSA keys in export ciphersuites.
OpenSSL clients would tolerate temporary RSA keys in non-export
ciphersuites. It also had an option SSL_OP_EPHEMERAL_RSA which
enabled this server side. Remove both options as they are a
protocol violation.
Thanks to Karthikeyan Bhargavan for reporting this issue.
(CVE-2015-0204)
Reviewed-by: Matt Caswell <[email protected]>
CWE ID: CWE-310 | int ssl3_accept(SSL *s)
{
BUF_MEM *buf;
unsigned long alg_k,Time=(unsigned long)time(NULL);
void (*cb)(const SSL *ssl,int type,int val)=NULL;
int ret= -1;
int new_state,state,skip=0;
RAND_add(&Time,sizeof(Time),0);
ERR_clear_error();
clear_sys_error();
if (s->info_callback != NULL)
cb=s->info_callback;
else if (s->ctx->info_callback != NULL)
cb=s->ctx->info_callback;
/* init things to blank */
s->in_handshake++;
if (!SSL_in_init(s) || SSL_in_before(s)) SSL_clear(s);
if (s->cert == NULL)
{
SSLerr(SSL_F_SSL3_ACCEPT,SSL_R_NO_CERTIFICATE_SET);
return(-1);
}
#ifndef OPENSSL_NO_HEARTBEATS
/* If we're awaiting a HeartbeatResponse, pretend we
* already got and don't await it anymore, because
* Heartbeats don't make sense during handshakes anyway.
*/
if (s->tlsext_hb_pending)
{
s->tlsext_hb_pending = 0;
s->tlsext_hb_seq++;
}
#endif
for (;;)
{
state=s->state;
switch (s->state)
{
case SSL_ST_RENEGOTIATE:
s->renegotiate=1;
/* s->state=SSL_ST_ACCEPT; */
case SSL_ST_BEFORE:
case SSL_ST_ACCEPT:
case SSL_ST_BEFORE|SSL_ST_ACCEPT:
case SSL_ST_OK|SSL_ST_ACCEPT:
s->server=1;
if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_START,1);
if ((s->version>>8) != 3)
{
SSLerr(SSL_F_SSL3_ACCEPT, ERR_R_INTERNAL_ERROR);
return -1;
}
if (!ssl_security(s, SSL_SECOP_VERSION, 0,
s->version, NULL))
{
SSLerr(SSL_F_SSL3_ACCEPT, SSL_R_VERSION_TOO_LOW);
return -1;
}
s->type=SSL_ST_ACCEPT;
if (s->init_buf == NULL)
{
if ((buf=BUF_MEM_new()) == NULL)
{
ret= -1;
goto end;
}
if (!BUF_MEM_grow(buf,SSL3_RT_MAX_PLAIN_LENGTH))
{
BUF_MEM_free(buf);
ret= -1;
goto end;
}
s->init_buf=buf;
}
if (!ssl3_setup_buffers(s))
{
ret= -1;
goto end;
}
s->init_num=0;
s->s3->flags &= ~TLS1_FLAGS_SKIP_CERT_VERIFY;
s->s3->flags &= ~SSL3_FLAGS_CCS_OK;
/* Should have been reset by ssl3_get_finished, too. */
s->s3->change_cipher_spec = 0;
if (s->state != SSL_ST_RENEGOTIATE)
{
/* Ok, we now need to push on a buffering BIO so that
* the output is sent in a way that TCP likes :-)
*/
if (!ssl_init_wbio_buffer(s,1)) { ret= -1; goto end; }
ssl3_init_finished_mac(s);
s->state=SSL3_ST_SR_CLNT_HELLO_A;
s->ctx->stats.sess_accept++;
}
else if (!s->s3->send_connection_binding &&
!(s->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION))
{
/* Server attempting to renegotiate with
* client that doesn't support secure
* renegotiation.
*/
SSLerr(SSL_F_SSL3_ACCEPT, SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED);
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_HANDSHAKE_FAILURE);
ret = -1;
goto end;
}
else
{
/* s->state == SSL_ST_RENEGOTIATE,
* we will just send a HelloRequest */
s->ctx->stats.sess_accept_renegotiate++;
s->state=SSL3_ST_SW_HELLO_REQ_A;
}
break;
case SSL3_ST_SW_HELLO_REQ_A:
case SSL3_ST_SW_HELLO_REQ_B:
s->shutdown=0;
ret=ssl3_send_hello_request(s);
if (ret <= 0) goto end;
s->s3->tmp.next_state=SSL3_ST_SW_HELLO_REQ_C;
s->state=SSL3_ST_SW_FLUSH;
s->init_num=0;
ssl3_init_finished_mac(s);
break;
case SSL3_ST_SW_HELLO_REQ_C:
s->state=SSL_ST_OK;
break;
case SSL3_ST_SR_CLNT_HELLO_A:
case SSL3_ST_SR_CLNT_HELLO_B:
case SSL3_ST_SR_CLNT_HELLO_C:
ret=ssl3_get_client_hello(s);
if (ret <= 0) goto end;
#ifndef OPENSSL_NO_SRP
s->state = SSL3_ST_SR_CLNT_HELLO_D;
case SSL3_ST_SR_CLNT_HELLO_D:
{
int al;
if ((ret = ssl_check_srp_ext_ClientHello(s,&al)) < 0)
{
/* callback indicates firther work to be done */
s->rwstate=SSL_X509_LOOKUP;
goto end;
}
if (ret != SSL_ERROR_NONE)
{
ssl3_send_alert(s,SSL3_AL_FATAL,al);
/* This is not really an error but the only means to
for a client to detect whether srp is supported. */
if (al != TLS1_AD_UNKNOWN_PSK_IDENTITY)
SSLerr(SSL_F_SSL3_ACCEPT,SSL_R_CLIENTHELLO_TLSEXT);
ret = SSL_TLSEXT_ERR_ALERT_FATAL;
ret= -1;
goto end;
}
}
#endif
s->renegotiate = 2;
s->state=SSL3_ST_SW_SRVR_HELLO_A;
s->init_num=0;
break;
case SSL3_ST_SW_SRVR_HELLO_A:
case SSL3_ST_SW_SRVR_HELLO_B:
ret=ssl3_send_server_hello(s);
if (ret <= 0) goto end;
#ifndef OPENSSL_NO_TLSEXT
if (s->hit)
{
if (s->tlsext_ticket_expected)
s->state=SSL3_ST_SW_SESSION_TICKET_A;
else
s->state=SSL3_ST_SW_CHANGE_A;
}
#else
if (s->hit)
s->state=SSL3_ST_SW_CHANGE_A;
#endif
else
s->state = SSL3_ST_SW_CERT_A;
s->init_num = 0;
break;
case SSL3_ST_SW_CERT_A:
case SSL3_ST_SW_CERT_B:
/* Check if it is anon DH or anon ECDH, */
/* normal PSK or KRB5 or SRP */
if (!(s->s3->tmp.new_cipher->algorithm_auth & (SSL_aNULL|SSL_aKRB5|SSL_aSRP))
&& !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK))
{
ret=ssl3_send_server_certificate(s);
if (ret <= 0) goto end;
#ifndef OPENSSL_NO_TLSEXT
if (s->tlsext_status_expected)
s->state=SSL3_ST_SW_CERT_STATUS_A;
else
s->state=SSL3_ST_SW_KEY_EXCH_A;
}
else
{
skip = 1;
s->state=SSL3_ST_SW_KEY_EXCH_A;
}
#else
}
else
skip=1;
s->state=SSL3_ST_SW_KEY_EXCH_A;
#endif
s->init_num=0;
break;
case SSL3_ST_SW_KEY_EXCH_A:
case SSL3_ST_SW_KEY_EXCH_B:
alg_k = s->s3->tmp.new_cipher->algorithm_mkey;
/*
* clear this, it may get reset by
* send_server_key_exchange
*/
s->s3->tmp.use_rsa_tmp=0;
/* only send if a DH key exchange, fortezza or
* RSA but we have a sign only certificate
*
* PSK: may send PSK identity hints
*
* For ECC ciphersuites, we send a serverKeyExchange
* message only if the cipher suite is either
* ECDH-anon or ECDHE. In other cases, the
* server certificate contains the server's
* public key for key exchange.
*/
if (
/* PSK: send ServerKeyExchange if PSK identity
* hint if provided */
#ifndef OPENSSL_NO_PSK
|| ((alg_k & SSL_kPSK) && s->ctx->psk_identity_hint)
#endif
#ifndef OPENSSL_NO_SRP
/* SRP: send ServerKeyExchange */
|| (alg_k & SSL_kSRP)
#endif
|| (alg_k & SSL_kDHE)
|| (alg_k & SSL_kECDHE)
|| ((alg_k & SSL_kRSA)
&& (s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey == NULL
|| (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher)
&& EVP_PKEY_size(s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey)*8 > SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher)
)
)
)
)
{
ret=ssl3_send_server_key_exchange(s);
if (ret <= 0) goto end;
}
else
skip=1;
s->state=SSL3_ST_SW_CERT_REQ_A;
s->init_num=0;
break;
case SSL3_ST_SW_CERT_REQ_A:
case SSL3_ST_SW_CERT_REQ_B:
if (/* don't request cert unless asked for it: */
!(s->verify_mode & SSL_VERIFY_PEER) ||
/* if SSL_VERIFY_CLIENT_ONCE is set,
* don't request cert during re-negotiation: */
((s->session->peer != NULL) &&
(s->verify_mode & SSL_VERIFY_CLIENT_ONCE)) ||
/* never request cert in anonymous ciphersuites
* (see section "Certificate request" in SSL 3 drafts
* and in RFC 2246): */
((s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL) &&
/* ... except when the application insists on verification
* (against the specs, but s3_clnt.c accepts this for SSL 3) */
!(s->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT)) ||
/* never request cert in Kerberos ciphersuites */
(s->s3->tmp.new_cipher->algorithm_auth & SSL_aKRB5) ||
/* don't request certificate for SRP auth */
(s->s3->tmp.new_cipher->algorithm_auth & SSL_aSRP)
/* With normal PSK Certificates and
* Certificate Requests are omitted */
|| (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK))
{
/* no cert request */
skip=1;
s->s3->tmp.cert_request=0;
s->state=SSL3_ST_SW_SRVR_DONE_A;
if (s->s3->handshake_buffer)
if (!ssl3_digest_cached_records(s))
return -1;
}
else
{
s->s3->tmp.cert_request=1;
ret=ssl3_send_certificate_request(s);
if (ret <= 0) goto end;
#ifndef NETSCAPE_HANG_BUG
s->state=SSL3_ST_SW_SRVR_DONE_A;
#else
s->state=SSL3_ST_SW_FLUSH;
s->s3->tmp.next_state=SSL3_ST_SR_CERT_A;
#endif
s->init_num=0;
}
break;
case SSL3_ST_SW_SRVR_DONE_A:
case SSL3_ST_SW_SRVR_DONE_B:
ret=ssl3_send_server_done(s);
if (ret <= 0) goto end;
s->s3->tmp.next_state=SSL3_ST_SR_CERT_A;
s->state=SSL3_ST_SW_FLUSH;
s->init_num=0;
break;
case SSL3_ST_SW_FLUSH:
/* This code originally checked to see if
* any data was pending using BIO_CTRL_INFO
* and then flushed. This caused problems
* as documented in PR#1939. The proposed
* fix doesn't completely resolve this issue
* as buggy implementations of BIO_CTRL_PENDING
* still exist. So instead we just flush
* unconditionally.
*/
s->rwstate=SSL_WRITING;
if (BIO_flush(s->wbio) <= 0)
{
ret= -1;
goto end;
}
s->rwstate=SSL_NOTHING;
s->state=s->s3->tmp.next_state;
break;
case SSL3_ST_SR_CERT_A:
case SSL3_ST_SR_CERT_B:
if (s->s3->tmp.cert_request)
{
ret=ssl3_get_client_certificate(s);
if (ret <= 0) goto end;
}
s->init_num=0;
s->state=SSL3_ST_SR_KEY_EXCH_A;
break;
case SSL3_ST_SR_KEY_EXCH_A:
case SSL3_ST_SR_KEY_EXCH_B:
ret=ssl3_get_client_key_exchange(s);
if (ret <= 0)
goto end;
if (ret == 2)
{
/* For the ECDH ciphersuites when
* the client sends its ECDH pub key in
* a certificate, the CertificateVerify
* message is not sent.
* Also for GOST ciphersuites when
* the client uses its key from the certificate
* for key exchange.
*/
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG)
s->state=SSL3_ST_SR_FINISHED_A;
#else
if (s->s3->next_proto_neg_seen)
s->state=SSL3_ST_SR_NEXT_PROTO_A;
else
s->state=SSL3_ST_SR_FINISHED_A;
#endif
s->init_num = 0;
}
else if (SSL_USE_SIGALGS(s))
{
s->state=SSL3_ST_SR_CERT_VRFY_A;
s->init_num=0;
if (!s->session->peer)
break;
/* For sigalgs freeze the handshake buffer
* at this point and digest cached records.
*/
if (!s->s3->handshake_buffer)
{
SSLerr(SSL_F_SSL3_ACCEPT,ERR_R_INTERNAL_ERROR);
return -1;
}
s->s3->flags |= TLS1_FLAGS_KEEP_HANDSHAKE;
if (!ssl3_digest_cached_records(s))
return -1;
}
else
{
int offset=0;
int dgst_num;
s->state=SSL3_ST_SR_CERT_VRFY_A;
s->init_num=0;
/* We need to get hashes here so if there is
* a client cert, it can be verified
* FIXME - digest processing for CertificateVerify
* should be generalized. But it is next step
*/
if (s->s3->handshake_buffer)
if (!ssl3_digest_cached_records(s))
return -1;
for (dgst_num=0; dgst_num<SSL_MAX_DIGEST;dgst_num++)
if (s->s3->handshake_dgst[dgst_num])
{
int dgst_size;
s->method->ssl3_enc->cert_verify_mac(s,EVP_MD_CTX_type(s->s3->handshake_dgst[dgst_num]),&(s->s3->tmp.cert_verify_md[offset]));
dgst_size=EVP_MD_CTX_size(s->s3->handshake_dgst[dgst_num]);
if (dgst_size < 0)
{
ret = -1;
goto end;
}
offset+=dgst_size;
}
}
break;
case SSL3_ST_SR_CERT_VRFY_A:
case SSL3_ST_SR_CERT_VRFY_B:
/*
* This *should* be the first time we enable CCS, but be
* extra careful about surrounding code changes. We need
* to set this here because we don't know if we're
* expecting a CertificateVerify or not.
*/
if (!s->s3->change_cipher_spec)
s->s3->flags |= SSL3_FLAGS_CCS_OK;
/* we should decide if we expected this one */
ret=ssl3_get_cert_verify(s);
if (ret <= 0) goto end;
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG)
s->state=SSL3_ST_SR_FINISHED_A;
#else
if (s->s3->next_proto_neg_seen)
s->state=SSL3_ST_SR_NEXT_PROTO_A;
else
s->state=SSL3_ST_SR_FINISHED_A;
#endif
s->init_num=0;
break;
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
case SSL3_ST_SR_NEXT_PROTO_A:
case SSL3_ST_SR_NEXT_PROTO_B:
/*
* Enable CCS for resumed handshakes with NPN.
* In a full handshake with NPN, we end up here through
* SSL3_ST_SR_CERT_VRFY_B, where SSL3_FLAGS_CCS_OK was
* already set. Receiving a CCS clears the flag, so make
* sure not to re-enable it to ban duplicates.
* s->s3->change_cipher_spec is set when a CCS is
* processed in s3_pkt.c, and remains set until
* the client's Finished message is read.
*/
if (!s->s3->change_cipher_spec)
s->s3->flags |= SSL3_FLAGS_CCS_OK;
ret=ssl3_get_next_proto(s);
if (ret <= 0) goto end;
s->init_num = 0;
s->state=SSL3_ST_SR_FINISHED_A;
break;
#endif
case SSL3_ST_SR_FINISHED_A:
case SSL3_ST_SR_FINISHED_B:
/*
* Enable CCS for resumed handshakes without NPN.
* In a full handshake, we end up here through
* SSL3_ST_SR_CERT_VRFY_B, where SSL3_FLAGS_CCS_OK was
* already set. Receiving a CCS clears the flag, so make
* sure not to re-enable it to ban duplicates.
* s->s3->change_cipher_spec is set when a CCS is
* processed in s3_pkt.c, and remains set until
* the client's Finished message is read.
*/
if (!s->s3->change_cipher_spec)
s->s3->flags |= SSL3_FLAGS_CCS_OK;
ret=ssl3_get_finished(s,SSL3_ST_SR_FINISHED_A,
SSL3_ST_SR_FINISHED_B);
if (ret <= 0) goto end;
if (s->hit)
s->state=SSL_ST_OK;
#ifndef OPENSSL_NO_TLSEXT
else if (s->tlsext_ticket_expected)
s->state=SSL3_ST_SW_SESSION_TICKET_A;
#endif
else
s->state=SSL3_ST_SW_CHANGE_A;
s->init_num=0;
break;
#ifndef OPENSSL_NO_TLSEXT
case SSL3_ST_SW_SESSION_TICKET_A:
case SSL3_ST_SW_SESSION_TICKET_B:
ret=ssl3_send_newsession_ticket(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_CHANGE_A;
s->init_num=0;
break;
case SSL3_ST_SW_CERT_STATUS_A:
case SSL3_ST_SW_CERT_STATUS_B:
ret=ssl3_send_cert_status(s);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_KEY_EXCH_A;
s->init_num=0;
break;
#endif
case SSL3_ST_SW_CHANGE_A:
case SSL3_ST_SW_CHANGE_B:
s->session->cipher=s->s3->tmp.new_cipher;
if (!s->method->ssl3_enc->setup_key_block(s))
{ ret= -1; goto end; }
ret=ssl3_send_change_cipher_spec(s,
SSL3_ST_SW_CHANGE_A,SSL3_ST_SW_CHANGE_B);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_FINISHED_A;
s->init_num=0;
if (!s->method->ssl3_enc->change_cipher_state(s,
SSL3_CHANGE_CIPHER_SERVER_WRITE))
{
ret= -1;
goto end;
}
break;
case SSL3_ST_SW_FINISHED_A:
case SSL3_ST_SW_FINISHED_B:
ret=ssl3_send_finished(s,
SSL3_ST_SW_FINISHED_A,SSL3_ST_SW_FINISHED_B,
s->method->ssl3_enc->server_finished_label,
s->method->ssl3_enc->server_finished_label_len);
if (ret <= 0) goto end;
s->state=SSL3_ST_SW_FLUSH;
if (s->hit)
{
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG)
s->s3->tmp.next_state=SSL3_ST_SR_FINISHED_A;
#else
if (s->s3->next_proto_neg_seen)
{
s->s3->tmp.next_state=SSL3_ST_SR_NEXT_PROTO_A;
}
else
s->s3->tmp.next_state=SSL3_ST_SR_FINISHED_A;
#endif
}
else
s->s3->tmp.next_state=SSL_ST_OK;
s->init_num=0;
break;
case SSL_ST_OK:
/* clean a few things up */
ssl3_cleanup_key_block(s);
BUF_MEM_free(s->init_buf);
s->init_buf=NULL;
/* remove buffering on output */
ssl_free_wbio_buffer(s);
s->init_num=0;
if (s->renegotiate == 2) /* skipped if we just sent a HelloRequest */
{
s->renegotiate=0;
s->new_session=0;
ssl_update_cache(s,SSL_SESS_CACHE_SERVER);
s->ctx->stats.sess_accept_good++;
/* s->server=1; */
s->handshake_func=ssl3_accept;
if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_DONE,1);
}
ret = 1;
goto end;
/* break; */
default:
SSLerr(SSL_F_SSL3_ACCEPT,SSL_R_UNKNOWN_STATE);
ret= -1;
goto end;
/* break; */
}
if (!s->s3->tmp.reuse_message && !skip)
{
if (s->debug)
{
if ((ret=BIO_flush(s->wbio)) <= 0)
goto end;
}
if ((cb != NULL) && (s->state != state))
{
new_state=s->state;
s->state=state;
cb(s,SSL_CB_ACCEPT_LOOP,1);
s->state=new_state;
}
}
skip=0;
}
| 166,753 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void GDataCacheMetadataMap::Initialize(
const std::vector<FilePath>& cache_paths) {
AssertOnSequencedWorkerPool();
if (cache_paths.size() < GDataCache::NUM_CACHE_TYPES) {
LOG(ERROR) << "Size of cache_paths is invalid.";
return;
}
if (!GDataCache::CreateCacheDirectories(cache_paths))
return;
if (!ChangeFilePermissions(cache_paths[GDataCache::CACHE_TYPE_PERSISTENT],
S_IRWXU | S_IXGRP | S_IXOTH))
return;
DVLOG(1) << "Scanning directories";
ResourceIdToFilePathMap persistent_file_map;
ScanCacheDirectory(cache_paths,
GDataCache::CACHE_TYPE_PERSISTENT,
&cache_map_,
&persistent_file_map);
ResourceIdToFilePathMap tmp_file_map;
ScanCacheDirectory(cache_paths,
GDataCache::CACHE_TYPE_TMP,
&cache_map_,
&tmp_file_map);
ResourceIdToFilePathMap pinned_file_map;
ScanCacheDirectory(cache_paths,
GDataCache::CACHE_TYPE_PINNED,
&cache_map_,
&pinned_file_map);
ResourceIdToFilePathMap outgoing_file_map;
ScanCacheDirectory(cache_paths,
GDataCache::CACHE_TYPE_OUTGOING,
&cache_map_,
&outgoing_file_map);
RemoveInvalidFilesFromPersistentDirectory(persistent_file_map,
outgoing_file_map,
&cache_map_);
DVLOG(1) << "Directory scan finished";
}
Commit Message: Revert 144993 - gdata: Remove invalid files in the cache directories
Broke linux_chromeos_valgrind:
http://build.chromium.org/p/chromium.memory.fyi/builders/Chromium%20OS%20%28valgrind%29%285%29/builds/8628/steps/memory%20test%3A%20unit/logs/stdio
In theory, we shouldn't have any invalid files left in the
cache directories, but things can go wrong and invalid files
may be left if the device shuts down unexpectedly, for instance.
Besides, it's good to be defensive.
BUG=134862
TEST=added unit tests
Review URL: https://chromiumcodereview.appspot.com/10693020
[email protected]
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@145029 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | void GDataCacheMetadataMap::Initialize(
const std::vector<FilePath>& cache_paths) {
AssertOnSequencedWorkerPool();
if (cache_paths.size() < GDataCache::NUM_CACHE_TYPES) {
DLOG(ERROR) << "Size of cache_paths is invalid.";
return;
}
if (!CreateCacheDirectories(cache_paths))
return;
if (!ChangeFilePermissions(cache_paths[GDataCache::CACHE_TYPE_PERSISTENT],
S_IRWXU | S_IXGRP | S_IXOTH))
return;
DVLOG(1) << "Scanning directories";
ScanCacheDirectory(cache_paths, GDataCache::CACHE_TYPE_PERSISTENT,
&cache_map_);
ScanCacheDirectory(cache_paths, GDataCache::CACHE_TYPE_TMP, &cache_map_);
// Then scan pinned and outgoing directories to update existing entries in
// cache map, or create new ones for pinned symlinks to /dev/null which target
// nothing.
// Pinned and outgoing directories should be scanned after the persistent
// directory as we'll add PINNED and DIRTY states respectively to the existing
// files in the persistent directory per the contents of the pinned and
// outgoing directories.
ScanCacheDirectory(cache_paths, GDataCache::CACHE_TYPE_PINNED, &cache_map_);
ScanCacheDirectory(cache_paths, GDataCache::CACHE_TYPE_OUTGOING, &cache_map_);
DVLOG(1) << "Directory scan finished";
}
| 170,865 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int ext2_fill_super(struct super_block *sb, void *data, int silent)
{
struct buffer_head * bh;
struct ext2_sb_info * sbi;
struct ext2_super_block * es;
struct inode *root;
unsigned long block;
unsigned long sb_block = get_sb_block(&data);
unsigned long logic_sb_block;
unsigned long offset = 0;
unsigned long def_mount_opts;
long ret = -EINVAL;
int blocksize = BLOCK_SIZE;
int db_count;
int i, j;
__le32 features;
int err;
err = -ENOMEM;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
goto failed;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
if (!sbi->s_blockgroup_lock) {
kfree(sbi);
goto failed;
}
sb->s_fs_info = sbi;
sbi->s_sb_block = sb_block;
spin_lock_init(&sbi->s_lock);
/*
* See what the current blocksize for the device is, and
* use that as the blocksize. Otherwise (or if the blocksize
* is smaller than the default) use the default.
* This is important for devices that have a hardware
* sectorsize that is larger than the default.
*/
blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
if (!blocksize) {
ext2_msg(sb, KERN_ERR, "error: unable to set blocksize");
goto failed_sbi;
}
/*
* If the superblock doesn't start on a hardware sector boundary,
* calculate the offset.
*/
if (blocksize != BLOCK_SIZE) {
logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
offset = (sb_block*BLOCK_SIZE) % blocksize;
} else {
logic_sb_block = sb_block;
}
if (!(bh = sb_bread(sb, logic_sb_block))) {
ext2_msg(sb, KERN_ERR, "error: unable to read superblock");
goto failed_sbi;
}
/*
* Note: s_es must be initialized as soon as possible because
* some ext2 macro-instructions depend on its value
*/
es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
sbi->s_es = es;
sb->s_magic = le16_to_cpu(es->s_magic);
if (sb->s_magic != EXT2_SUPER_MAGIC)
goto cantfind_ext2;
/* Set defaults before we parse the mount options */
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
if (def_mount_opts & EXT2_DEFM_DEBUG)
set_opt(sbi->s_mount_opt, DEBUG);
if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
set_opt(sbi->s_mount_opt, GRPID);
if (def_mount_opts & EXT2_DEFM_UID16)
set_opt(sbi->s_mount_opt, NO_UID32);
#ifdef CONFIG_EXT2_FS_XATTR
if (def_mount_opts & EXT2_DEFM_XATTR_USER)
set_opt(sbi->s_mount_opt, XATTR_USER);
#endif
#ifdef CONFIG_EXT2_FS_POSIX_ACL
if (def_mount_opts & EXT2_DEFM_ACL)
set_opt(sbi->s_mount_opt, POSIX_ACL);
#endif
if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
set_opt(sbi->s_mount_opt, ERRORS_PANIC);
else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
set_opt(sbi->s_mount_opt, ERRORS_CONT);
else
set_opt(sbi->s_mount_opt, ERRORS_RO);
sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
set_opt(sbi->s_mount_opt, RESERVATION);
if (!parse_options((char *) data, sb))
goto failed_mount;
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
MS_POSIXACL : 0);
sb->s_iflags |= SB_I_CGROUPWB;
if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
(EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
ext2_msg(sb, KERN_WARNING,
"warning: feature flags set on rev 0 fs, "
"running e2fsck is recommended");
/*
* Check feature flags regardless of the revision level, since we
* previously didn't change the revision level when setting the flags,
* so there is a chance incompat flags are set on a rev 0 filesystem.
*/
features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
if (features) {
ext2_msg(sb, KERN_ERR, "error: couldn't mount because of "
"unsupported optional features (%x)",
le32_to_cpu(features));
goto failed_mount;
}
if (!(sb->s_flags & MS_RDONLY) &&
(features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of "
"unsupported optional features (%x)",
le32_to_cpu(features));
goto failed_mount;
}
blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
if (sbi->s_mount_opt & EXT2_MOUNT_DAX) {
if (blocksize != PAGE_SIZE) {
ext2_msg(sb, KERN_ERR,
"error: unsupported blocksize for dax");
goto failed_mount;
}
if (!sb->s_bdev->bd_disk->fops->direct_access) {
ext2_msg(sb, KERN_ERR,
"error: device does not support dax");
goto failed_mount;
}
}
/* If the blocksize doesn't match, re-read the thing.. */
if (sb->s_blocksize != blocksize) {
brelse(bh);
if (!sb_set_blocksize(sb, blocksize)) {
ext2_msg(sb, KERN_ERR,
"error: bad blocksize %d", blocksize);
goto failed_sbi;
}
logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
offset = (sb_block*BLOCK_SIZE) % blocksize;
bh = sb_bread(sb, logic_sb_block);
if(!bh) {
ext2_msg(sb, KERN_ERR, "error: couldn't read"
"superblock on 2nd try");
goto failed_sbi;
}
es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
sbi->s_es = es;
if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
ext2_msg(sb, KERN_ERR, "error: magic mismatch");
goto failed_mount;
}
}
sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);
sb->s_max_links = EXT2_LINK_MAX;
if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
} else {
sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
!is_power_of_2(sbi->s_inode_size) ||
(sbi->s_inode_size > blocksize)) {
ext2_msg(sb, KERN_ERR,
"error: unsupported inode size: %d",
sbi->s_inode_size);
goto failed_mount;
}
}
sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
le32_to_cpu(es->s_log_frag_size);
if (sbi->s_frag_size == 0)
goto cantfind_ext2;
sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
if (EXT2_INODE_SIZE(sb) == 0)
goto cantfind_ext2;
sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
goto cantfind_ext2;
sbi->s_itb_per_group = sbi->s_inodes_per_group /
sbi->s_inodes_per_block;
sbi->s_desc_per_block = sb->s_blocksize /
sizeof (struct ext2_group_desc);
sbi->s_sbh = bh;
sbi->s_mount_state = le16_to_cpu(es->s_state);
sbi->s_addr_per_block_bits =
ilog2 (EXT2_ADDR_PER_BLOCK(sb));
sbi->s_desc_per_block_bits =
ilog2 (EXT2_DESC_PER_BLOCK(sb));
if (sb->s_magic != EXT2_SUPER_MAGIC)
goto cantfind_ext2;
if (sb->s_blocksize != bh->b_size) {
if (!silent)
ext2_msg(sb, KERN_ERR, "error: unsupported blocksize");
goto failed_mount;
}
if (sb->s_blocksize != sbi->s_frag_size) {
ext2_msg(sb, KERN_ERR,
"error: fragsize %lu != blocksize %lu"
"(not supported yet)",
sbi->s_frag_size, sb->s_blocksize);
goto failed_mount;
}
if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR,
"error: #blocks per group too big: %lu",
sbi->s_blocks_per_group);
goto failed_mount;
}
if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR,
"error: #fragments per group too big: %lu",
sbi->s_frags_per_group);
goto failed_mount;
}
if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR,
"error: #inodes per group too big: %lu",
sbi->s_inodes_per_group);
goto failed_mount;
}
if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
goto cantfind_ext2;
sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
le32_to_cpu(es->s_first_data_block) - 1)
/ EXT2_BLOCKS_PER_GROUP(sb)) + 1;
db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
EXT2_DESC_PER_BLOCK(sb);
sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
ext2_msg(sb, KERN_ERR, "error: not enough memory");
goto failed_mount;
}
bgl_lock_init(sbi->s_blockgroup_lock);
sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
if (!sbi->s_debts) {
ext2_msg(sb, KERN_ERR, "error: not enough memory");
goto failed_mount_group_desc;
}
for (i = 0; i < db_count; i++) {
block = descriptor_loc(sb, logic_sb_block, i);
sbi->s_group_desc[i] = sb_bread(sb, block);
if (!sbi->s_group_desc[i]) {
for (j = 0; j < i; j++)
brelse (sbi->s_group_desc[j]);
ext2_msg(sb, KERN_ERR,
"error: unable to read group descriptors");
goto failed_mount_group_desc;
}
}
if (!ext2_check_descriptors (sb)) {
ext2_msg(sb, KERN_ERR, "group descriptors corrupted");
goto failed_mount2;
}
sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
/* per fileystem reservation list head & lock */
spin_lock_init(&sbi->s_rsv_window_lock);
sbi->s_rsv_window_root = RB_ROOT;
/*
* Add a single, static dummy reservation to the start of the
* reservation window list --- it gives us a placeholder for
* append-at-start-of-list which makes the allocation logic
* _much_ simpler.
*/
sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
sbi->s_rsv_window_head.rsv_alloc_hit = 0;
sbi->s_rsv_window_head.rsv_goal_size = 0;
ext2_rsv_window_add(sb, &sbi->s_rsv_window_head);
err = percpu_counter_init(&sbi->s_freeblocks_counter,
ext2_count_free_blocks(sb), GFP_KERNEL);
if (!err) {
err = percpu_counter_init(&sbi->s_freeinodes_counter,
ext2_count_free_inodes(sb), GFP_KERNEL);
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirs_counter,
ext2_count_dirs(sb), GFP_KERNEL);
}
if (err) {
ext2_msg(sb, KERN_ERR, "error: insufficient memory");
goto failed_mount3;
}
/*
* set up enough so that it can read an inode
*/
sb->s_op = &ext2_sops;
sb->s_export_op = &ext2_export_ops;
sb->s_xattr = ext2_xattr_handlers;
#ifdef CONFIG_QUOTA
sb->dq_op = &dquot_operations;
sb->s_qcop = &dquot_quotactl_ops;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
#endif
root = ext2_iget(sb, EXT2_ROOT_INO);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto failed_mount3;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
iput(root);
ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
goto failed_mount3;
}
sb->s_root = d_make_root(root);
if (!sb->s_root) {
ext2_msg(sb, KERN_ERR, "error: get root inode failed");
ret = -ENOMEM;
goto failed_mount3;
}
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
ext2_msg(sb, KERN_WARNING,
"warning: mounting ext3 filesystem as ext2");
if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
sb->s_flags |= MS_RDONLY;
ext2_write_super(sb);
return 0;
cantfind_ext2:
if (!silent)
ext2_msg(sb, KERN_ERR,
"error: can't find an ext2 filesystem on dev %s.",
sb->s_id);
goto failed_mount;
failed_mount3:
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
failed_mount_group_desc:
kfree(sbi->s_group_desc);
kfree(sbi->s_debts);
failed_mount:
brelse(bh);
failed_sbi:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
failed:
return ret;
}
Commit Message: ext2: convert to mbcache2
The conversion is generally straightforward. We convert filesystem from
a global cache to per-fs one. Similarly to ext4 the tricky part is that
xattr block corresponding to found mbcache entry can get freed before we
get buffer lock for that block. So we have to check whether the entry is
still valid after getting the buffer lock.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
CWE ID: CWE-19 | static int ext2_fill_super(struct super_block *sb, void *data, int silent)
{
struct buffer_head * bh;
struct ext2_sb_info * sbi;
struct ext2_super_block * es;
struct inode *root;
unsigned long block;
unsigned long sb_block = get_sb_block(&data);
unsigned long logic_sb_block;
unsigned long offset = 0;
unsigned long def_mount_opts;
long ret = -EINVAL;
int blocksize = BLOCK_SIZE;
int db_count;
int i, j;
__le32 features;
int err;
err = -ENOMEM;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
goto failed;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
if (!sbi->s_blockgroup_lock) {
kfree(sbi);
goto failed;
}
sb->s_fs_info = sbi;
sbi->s_sb_block = sb_block;
spin_lock_init(&sbi->s_lock);
/*
* See what the current blocksize for the device is, and
* use that as the blocksize. Otherwise (or if the blocksize
* is smaller than the default) use the default.
* This is important for devices that have a hardware
* sectorsize that is larger than the default.
*/
blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
if (!blocksize) {
ext2_msg(sb, KERN_ERR, "error: unable to set blocksize");
goto failed_sbi;
}
/*
* If the superblock doesn't start on a hardware sector boundary,
* calculate the offset.
*/
if (blocksize != BLOCK_SIZE) {
logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
offset = (sb_block*BLOCK_SIZE) % blocksize;
} else {
logic_sb_block = sb_block;
}
if (!(bh = sb_bread(sb, logic_sb_block))) {
ext2_msg(sb, KERN_ERR, "error: unable to read superblock");
goto failed_sbi;
}
/*
* Note: s_es must be initialized as soon as possible because
* some ext2 macro-instructions depend on its value
*/
es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
sbi->s_es = es;
sb->s_magic = le16_to_cpu(es->s_magic);
if (sb->s_magic != EXT2_SUPER_MAGIC)
goto cantfind_ext2;
/* Set defaults before we parse the mount options */
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
if (def_mount_opts & EXT2_DEFM_DEBUG)
set_opt(sbi->s_mount_opt, DEBUG);
if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
set_opt(sbi->s_mount_opt, GRPID);
if (def_mount_opts & EXT2_DEFM_UID16)
set_opt(sbi->s_mount_opt, NO_UID32);
#ifdef CONFIG_EXT2_FS_XATTR
if (def_mount_opts & EXT2_DEFM_XATTR_USER)
set_opt(sbi->s_mount_opt, XATTR_USER);
#endif
#ifdef CONFIG_EXT2_FS_POSIX_ACL
if (def_mount_opts & EXT2_DEFM_ACL)
set_opt(sbi->s_mount_opt, POSIX_ACL);
#endif
if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
set_opt(sbi->s_mount_opt, ERRORS_PANIC);
else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
set_opt(sbi->s_mount_opt, ERRORS_CONT);
else
set_opt(sbi->s_mount_opt, ERRORS_RO);
sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
set_opt(sbi->s_mount_opt, RESERVATION);
if (!parse_options((char *) data, sb))
goto failed_mount;
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
MS_POSIXACL : 0);
sb->s_iflags |= SB_I_CGROUPWB;
if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
(EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
ext2_msg(sb, KERN_WARNING,
"warning: feature flags set on rev 0 fs, "
"running e2fsck is recommended");
/*
* Check feature flags regardless of the revision level, since we
* previously didn't change the revision level when setting the flags,
* so there is a chance incompat flags are set on a rev 0 filesystem.
*/
features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
if (features) {
ext2_msg(sb, KERN_ERR, "error: couldn't mount because of "
"unsupported optional features (%x)",
le32_to_cpu(features));
goto failed_mount;
}
if (!(sb->s_flags & MS_RDONLY) &&
(features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of "
"unsupported optional features (%x)",
le32_to_cpu(features));
goto failed_mount;
}
blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
if (sbi->s_mount_opt & EXT2_MOUNT_DAX) {
if (blocksize != PAGE_SIZE) {
ext2_msg(sb, KERN_ERR,
"error: unsupported blocksize for dax");
goto failed_mount;
}
if (!sb->s_bdev->bd_disk->fops->direct_access) {
ext2_msg(sb, KERN_ERR,
"error: device does not support dax");
goto failed_mount;
}
}
/* If the blocksize doesn't match, re-read the thing.. */
if (sb->s_blocksize != blocksize) {
brelse(bh);
if (!sb_set_blocksize(sb, blocksize)) {
ext2_msg(sb, KERN_ERR,
"error: bad blocksize %d", blocksize);
goto failed_sbi;
}
logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
offset = (sb_block*BLOCK_SIZE) % blocksize;
bh = sb_bread(sb, logic_sb_block);
if(!bh) {
ext2_msg(sb, KERN_ERR, "error: couldn't read"
"superblock on 2nd try");
goto failed_sbi;
}
es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
sbi->s_es = es;
if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
ext2_msg(sb, KERN_ERR, "error: magic mismatch");
goto failed_mount;
}
}
sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);
sb->s_max_links = EXT2_LINK_MAX;
if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
} else {
sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
!is_power_of_2(sbi->s_inode_size) ||
(sbi->s_inode_size > blocksize)) {
ext2_msg(sb, KERN_ERR,
"error: unsupported inode size: %d",
sbi->s_inode_size);
goto failed_mount;
}
}
sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
le32_to_cpu(es->s_log_frag_size);
if (sbi->s_frag_size == 0)
goto cantfind_ext2;
sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
if (EXT2_INODE_SIZE(sb) == 0)
goto cantfind_ext2;
sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
goto cantfind_ext2;
sbi->s_itb_per_group = sbi->s_inodes_per_group /
sbi->s_inodes_per_block;
sbi->s_desc_per_block = sb->s_blocksize /
sizeof (struct ext2_group_desc);
sbi->s_sbh = bh;
sbi->s_mount_state = le16_to_cpu(es->s_state);
sbi->s_addr_per_block_bits =
ilog2 (EXT2_ADDR_PER_BLOCK(sb));
sbi->s_desc_per_block_bits =
ilog2 (EXT2_DESC_PER_BLOCK(sb));
if (sb->s_magic != EXT2_SUPER_MAGIC)
goto cantfind_ext2;
if (sb->s_blocksize != bh->b_size) {
if (!silent)
ext2_msg(sb, KERN_ERR, "error: unsupported blocksize");
goto failed_mount;
}
if (sb->s_blocksize != sbi->s_frag_size) {
ext2_msg(sb, KERN_ERR,
"error: fragsize %lu != blocksize %lu"
"(not supported yet)",
sbi->s_frag_size, sb->s_blocksize);
goto failed_mount;
}
if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR,
"error: #blocks per group too big: %lu",
sbi->s_blocks_per_group);
goto failed_mount;
}
if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR,
"error: #fragments per group too big: %lu",
sbi->s_frags_per_group);
goto failed_mount;
}
if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR,
"error: #inodes per group too big: %lu",
sbi->s_inodes_per_group);
goto failed_mount;
}
if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
goto cantfind_ext2;
sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
le32_to_cpu(es->s_first_data_block) - 1)
/ EXT2_BLOCKS_PER_GROUP(sb)) + 1;
db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
EXT2_DESC_PER_BLOCK(sb);
sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
ext2_msg(sb, KERN_ERR, "error: not enough memory");
goto failed_mount;
}
bgl_lock_init(sbi->s_blockgroup_lock);
sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
if (!sbi->s_debts) {
ext2_msg(sb, KERN_ERR, "error: not enough memory");
goto failed_mount_group_desc;
}
for (i = 0; i < db_count; i++) {
block = descriptor_loc(sb, logic_sb_block, i);
sbi->s_group_desc[i] = sb_bread(sb, block);
if (!sbi->s_group_desc[i]) {
for (j = 0; j < i; j++)
brelse (sbi->s_group_desc[j]);
ext2_msg(sb, KERN_ERR,
"error: unable to read group descriptors");
goto failed_mount_group_desc;
}
}
if (!ext2_check_descriptors (sb)) {
ext2_msg(sb, KERN_ERR, "group descriptors corrupted");
goto failed_mount2;
}
sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
/* per fileystem reservation list head & lock */
spin_lock_init(&sbi->s_rsv_window_lock);
sbi->s_rsv_window_root = RB_ROOT;
/*
* Add a single, static dummy reservation to the start of the
* reservation window list --- it gives us a placeholder for
* append-at-start-of-list which makes the allocation logic
* _much_ simpler.
*/
sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
sbi->s_rsv_window_head.rsv_alloc_hit = 0;
sbi->s_rsv_window_head.rsv_goal_size = 0;
ext2_rsv_window_add(sb, &sbi->s_rsv_window_head);
err = percpu_counter_init(&sbi->s_freeblocks_counter,
ext2_count_free_blocks(sb), GFP_KERNEL);
if (!err) {
err = percpu_counter_init(&sbi->s_freeinodes_counter,
ext2_count_free_inodes(sb), GFP_KERNEL);
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirs_counter,
ext2_count_dirs(sb), GFP_KERNEL);
}
if (err) {
ext2_msg(sb, KERN_ERR, "error: insufficient memory");
goto failed_mount3;
}
#ifdef CONFIG_EXT2_FS_XATTR
sbi->s_mb_cache = ext2_xattr_create_cache();
if (!sbi->s_mb_cache) {
ext2_msg(sb, KERN_ERR, "Failed to create an mb_cache");
goto failed_mount3;
}
#endif
/*
* set up enough so that it can read an inode
*/
sb->s_op = &ext2_sops;
sb->s_export_op = &ext2_export_ops;
sb->s_xattr = ext2_xattr_handlers;
#ifdef CONFIG_QUOTA
sb->dq_op = &dquot_operations;
sb->s_qcop = &dquot_quotactl_ops;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
#endif
root = ext2_iget(sb, EXT2_ROOT_INO);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto failed_mount3;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
iput(root);
ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
goto failed_mount3;
}
sb->s_root = d_make_root(root);
if (!sb->s_root) {
ext2_msg(sb, KERN_ERR, "error: get root inode failed");
ret = -ENOMEM;
goto failed_mount3;
}
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
ext2_msg(sb, KERN_WARNING,
"warning: mounting ext3 filesystem as ext2");
if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
sb->s_flags |= MS_RDONLY;
ext2_write_super(sb);
return 0;
cantfind_ext2:
if (!silent)
ext2_msg(sb, KERN_ERR,
"error: can't find an ext2 filesystem on dev %s.",
sb->s_id);
goto failed_mount;
failed_mount3:
if (sbi->s_mb_cache)
ext2_xattr_destroy_cache(sbi->s_mb_cache);
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
failed_mount_group_desc:
kfree(sbi->s_group_desc);
kfree(sbi->s_debts);
failed_mount:
brelse(bh);
failed_sbi:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
failed:
return ret;
}
| 169,973 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool PluginServiceImpl::GetPluginInfo(int render_process_id,
int render_view_id,
ResourceContext* context,
const GURL& url,
const GURL& page_url,
const std::string& mime_type,
bool allow_wildcard,
bool* is_stale,
webkit::WebPluginInfo* info,
std::string* actual_mime_type) {
std::vector<webkit::WebPluginInfo> plugins;
std::vector<std::string> mime_types;
bool stale = GetPluginInfoArray(
url, mime_type, allow_wildcard, &plugins, &mime_types);
if (is_stale)
*is_stale = stale;
for (size_t i = 0; i < plugins.size(); ++i) {
if (!filter_ || filter_->IsPluginEnabled(render_process_id,
render_view_id,
context,
url,
page_url,
&plugins[i])) {
*info = plugins[i];
if (actual_mime_type)
*actual_mime_type = mime_types[i];
return true;
}
}
return false;
}
Commit Message: Follow-on fixes and naming changes for https://codereview.chromium.org/12086077/
BUG=172573
Review URL: https://codereview.chromium.org/12177018
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@180600 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-287 | bool PluginServiceImpl::GetPluginInfo(int render_process_id,
int render_view_id,
ResourceContext* context,
const GURL& url,
const GURL& page_url,
const std::string& mime_type,
bool allow_wildcard,
bool* is_stale,
webkit::WebPluginInfo* info,
std::string* actual_mime_type) {
std::vector<webkit::WebPluginInfo> plugins;
std::vector<std::string> mime_types;
bool stale = GetPluginInfoArray(
url, mime_type, allow_wildcard, &plugins, &mime_types);
if (is_stale)
*is_stale = stale;
for (size_t i = 0; i < plugins.size(); ++i) {
if (!filter_ || filter_->IsPluginAvailable(render_process_id,
render_view_id,
context,
url,
page_url,
&plugins[i])) {
*info = plugins[i];
if (actual_mime_type)
*actual_mime_type = mime_types[i];
return true;
}
}
return false;
}
| 171,475 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool GpuProcessHost::LaunchGpuProcess(const std::string& channel_id) {
if (!(gpu_enabled_ &&
GpuDataManagerImpl::GetInstance()->ShouldUseSoftwareRendering()) &&
!hardware_gpu_enabled_) {
SendOutstandingReplies();
return false;
}
const CommandLine& browser_command_line = *CommandLine::ForCurrentProcess();
CommandLine::StringType gpu_launcher =
browser_command_line.GetSwitchValueNative(switches::kGpuLauncher);
#if defined(OS_LINUX)
int child_flags = gpu_launcher.empty() ? ChildProcessHost::CHILD_ALLOW_SELF :
ChildProcessHost::CHILD_NORMAL;
#else
int child_flags = ChildProcessHost::CHILD_NORMAL;
#endif
FilePath exe_path = ChildProcessHost::GetChildPath(child_flags);
if (exe_path.empty())
return false;
CommandLine* cmd_line = new CommandLine(exe_path);
cmd_line->AppendSwitchASCII(switches::kProcessType, switches::kGpuProcess);
cmd_line->AppendSwitchASCII(switches::kProcessChannelID, channel_id);
if (kind_ == GPU_PROCESS_KIND_UNSANDBOXED)
cmd_line->AppendSwitch(switches::kDisableGpuSandbox);
static const char* const kSwitchNames[] = {
switches::kDisableBreakpad,
switches::kDisableGLMultisampling,
switches::kDisableGpuSandbox,
switches::kReduceGpuSandbox,
switches::kDisableSeccompFilterSandbox,
switches::kDisableGpuSwitching,
switches::kDisableGpuVsync,
switches::kDisableGpuWatchdog,
switches::kDisableImageTransportSurface,
switches::kDisableLogging,
switches::kEnableGPUServiceLogging,
switches::kEnableLogging,
#if defined(OS_MACOSX)
switches::kEnableSandboxLogging,
#endif
#if defined(OS_CHROMEOS)
switches::kEnableVaapi,
#endif
switches::kGpuNoContextLost,
switches::kGpuStartupDialog,
switches::kLoggingLevel,
switches::kNoSandbox,
switches::kTestGLLib,
switches::kTraceStartup,
switches::kV,
switches::kVModule,
};
cmd_line->CopySwitchesFrom(browser_command_line, kSwitchNames,
arraysize(kSwitchNames));
cmd_line->CopySwitchesFrom(
browser_command_line, switches::kGpuSwitches, switches::kNumGpuSwitches);
content::GetContentClient()->browser()->AppendExtraCommandLineSwitches(
cmd_line, process_->GetData().id);
GpuDataManagerImpl::GetInstance()->AppendGpuCommandLine(cmd_line);
if (cmd_line->HasSwitch(switches::kUseGL))
software_rendering_ =
(cmd_line->GetSwitchValueASCII(switches::kUseGL) == "swiftshader");
UMA_HISTOGRAM_BOOLEAN("GPU.GPUProcessSoftwareRendering", software_rendering_);
if (!gpu_launcher.empty())
cmd_line->PrependWrapper(gpu_launcher);
process_->Launch(
#if defined(OS_WIN)
FilePath(),
#elif defined(OS_POSIX)
false, // Never use the zygote (GPU plugin can't be sandboxed).
base::EnvironmentVector(),
#endif
cmd_line);
process_launched_ = true;
UMA_HISTOGRAM_ENUMERATION("GPU.GPUProcessLifetimeEvents",
LAUNCHED, GPU_PROCESS_LIFETIME_EVENT_MAX);
return true;
}
Commit Message: Revert 137988 - VAVDA is the hardware video decode accelerator for Chrome on Linux and ChromeOS for Intel CPUs (Sandy Bridge and newer).
This CL enables VAVDA acceleration for ChromeOS, both for HTML5 video and Flash.
The feature is currently hidden behind a command line flag and can be enabled by adding the --enable-vaapi parameter to command line.
BUG=117062
TEST=Manual runs of test streams.
Change-Id: I386e16739e2ef2230f52a0a434971b33d8654699
Review URL: https://chromiumcodereview.appspot.com/9814001
This is causing crbug.com/129103
[email protected]
Review URL: https://chromiumcodereview.appspot.com/10411066
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@138208 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | bool GpuProcessHost::LaunchGpuProcess(const std::string& channel_id) {
if (!(gpu_enabled_ &&
GpuDataManagerImpl::GetInstance()->ShouldUseSoftwareRendering()) &&
!hardware_gpu_enabled_) {
SendOutstandingReplies();
return false;
}
const CommandLine& browser_command_line = *CommandLine::ForCurrentProcess();
CommandLine::StringType gpu_launcher =
browser_command_line.GetSwitchValueNative(switches::kGpuLauncher);
#if defined(OS_LINUX)
int child_flags = gpu_launcher.empty() ? ChildProcessHost::CHILD_ALLOW_SELF :
ChildProcessHost::CHILD_NORMAL;
#else
int child_flags = ChildProcessHost::CHILD_NORMAL;
#endif
FilePath exe_path = ChildProcessHost::GetChildPath(child_flags);
if (exe_path.empty())
return false;
CommandLine* cmd_line = new CommandLine(exe_path);
cmd_line->AppendSwitchASCII(switches::kProcessType, switches::kGpuProcess);
cmd_line->AppendSwitchASCII(switches::kProcessChannelID, channel_id);
if (kind_ == GPU_PROCESS_KIND_UNSANDBOXED)
cmd_line->AppendSwitch(switches::kDisableGpuSandbox);
static const char* const kSwitchNames[] = {
switches::kDisableBreakpad,
switches::kDisableGLMultisampling,
switches::kDisableGpuSandbox,
switches::kReduceGpuSandbox,
switches::kDisableSeccompFilterSandbox,
switches::kDisableGpuSwitching,
switches::kDisableGpuVsync,
switches::kDisableGpuWatchdog,
switches::kDisableImageTransportSurface,
switches::kDisableLogging,
switches::kEnableGPUServiceLogging,
switches::kEnableLogging,
#if defined(OS_MACOSX)
switches::kEnableSandboxLogging,
#endif
switches::kGpuNoContextLost,
switches::kGpuStartupDialog,
switches::kLoggingLevel,
switches::kNoSandbox,
switches::kTestGLLib,
switches::kTraceStartup,
switches::kV,
switches::kVModule,
};
cmd_line->CopySwitchesFrom(browser_command_line, kSwitchNames,
arraysize(kSwitchNames));
cmd_line->CopySwitchesFrom(
browser_command_line, switches::kGpuSwitches, switches::kNumGpuSwitches);
content::GetContentClient()->browser()->AppendExtraCommandLineSwitches(
cmd_line, process_->GetData().id);
GpuDataManagerImpl::GetInstance()->AppendGpuCommandLine(cmd_line);
if (cmd_line->HasSwitch(switches::kUseGL))
software_rendering_ =
(cmd_line->GetSwitchValueASCII(switches::kUseGL) == "swiftshader");
UMA_HISTOGRAM_BOOLEAN("GPU.GPUProcessSoftwareRendering", software_rendering_);
if (!gpu_launcher.empty())
cmd_line->PrependWrapper(gpu_launcher);
process_->Launch(
#if defined(OS_WIN)
FilePath(),
#elif defined(OS_POSIX)
false, // Never use the zygote (GPU plugin can't be sandboxed).
base::EnvironmentVector(),
#endif
cmd_line);
process_launched_ = true;
UMA_HISTOGRAM_ENUMERATION("GPU.GPUProcessLifetimeEvents",
LAUNCHED, GPU_PROCESS_LIFETIME_EVENT_MAX);
return true;
}
| 170,701 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: icmp6_nodeinfo_print(netdissect_options *ndo, u_int icmp6len, const u_char *bp, const u_char *ep)
{
const struct icmp6_nodeinfo *ni6;
const struct icmp6_hdr *dp;
const u_char *cp;
size_t siz, i;
int needcomma;
if (ep < bp)
return;
dp = (const struct icmp6_hdr *)bp;
ni6 = (const struct icmp6_nodeinfo *)bp;
siz = ep - bp;
switch (ni6->ni_type) {
case ICMP6_NI_QUERY:
if (siz == sizeof(*dp) + 4) {
/* KAME who-are-you */
ND_PRINT((ndo," who-are-you request"));
break;
}
ND_PRINT((ndo," node information query"));
ND_TCHECK2(*dp, sizeof(*ni6));
ni6 = (const struct icmp6_nodeinfo *)dp;
ND_PRINT((ndo," (")); /*)*/
switch (EXTRACT_16BITS(&ni6->ni_qtype)) {
case NI_QTYPE_NOOP:
ND_PRINT((ndo,"noop"));
break;
case NI_QTYPE_SUPTYPES:
ND_PRINT((ndo,"supported qtypes"));
i = EXTRACT_16BITS(&ni6->ni_flags);
if (i)
ND_PRINT((ndo," [%s]", (i & 0x01) ? "C" : ""));
break;
case NI_QTYPE_FQDN:
ND_PRINT((ndo,"DNS name"));
break;
case NI_QTYPE_NODEADDR:
ND_PRINT((ndo,"node addresses"));
i = ni6->ni_flags;
if (!i)
break;
/* NI_NODEADDR_FLAG_TRUNCATE undefined for query */
ND_PRINT((ndo," [%s%s%s%s%s%s]",
(i & NI_NODEADDR_FLAG_ANYCAST) ? "a" : "",
(i & NI_NODEADDR_FLAG_GLOBAL) ? "G" : "",
(i & NI_NODEADDR_FLAG_SITELOCAL) ? "S" : "",
(i & NI_NODEADDR_FLAG_LINKLOCAL) ? "L" : "",
(i & NI_NODEADDR_FLAG_COMPAT) ? "C" : "",
(i & NI_NODEADDR_FLAG_ALL) ? "A" : ""));
break;
default:
ND_PRINT((ndo,"unknown"));
break;
}
if (ni6->ni_qtype == NI_QTYPE_NOOP ||
ni6->ni_qtype == NI_QTYPE_SUPTYPES) {
if (siz != sizeof(*ni6))
if (ndo->ndo_vflag)
ND_PRINT((ndo,", invalid len"));
/*(*/
ND_PRINT((ndo,")"));
break;
}
/* XXX backward compat, icmp-name-lookup-03 */
if (siz == sizeof(*ni6)) {
ND_PRINT((ndo,", 03 draft"));
/*(*/
ND_PRINT((ndo,")"));
break;
}
switch (ni6->ni_code) {
case ICMP6_NI_SUBJ_IPV6:
if (!ND_TTEST2(*dp,
sizeof(*ni6) + sizeof(struct in6_addr)))
break;
if (siz != sizeof(*ni6) + sizeof(struct in6_addr)) {
if (ndo->ndo_vflag)
ND_PRINT((ndo,", invalid subject len"));
break;
}
ND_PRINT((ndo,", subject=%s",
ip6addr_string(ndo, ni6 + 1)));
break;
case ICMP6_NI_SUBJ_FQDN:
ND_PRINT((ndo,", subject=DNS name"));
cp = (const u_char *)(ni6 + 1);
if (cp[0] == ep - cp - 1) {
/* icmp-name-lookup-03, pascal string */
if (ndo->ndo_vflag)
ND_PRINT((ndo,", 03 draft"));
cp++;
ND_PRINT((ndo,", \""));
while (cp < ep) {
safeputchar(ndo, *cp);
cp++;
}
ND_PRINT((ndo,"\""));
} else
dnsname_print(ndo, cp, ep);
break;
case ICMP6_NI_SUBJ_IPV4:
if (!ND_TTEST2(*dp, sizeof(*ni6) + sizeof(struct in_addr)))
break;
if (siz != sizeof(*ni6) + sizeof(struct in_addr)) {
if (ndo->ndo_vflag)
ND_PRINT((ndo,", invalid subject len"));
break;
}
ND_PRINT((ndo,", subject=%s",
ipaddr_string(ndo, ni6 + 1)));
break;
default:
ND_PRINT((ndo,", unknown subject"));
break;
}
/*(*/
ND_PRINT((ndo,")"));
break;
case ICMP6_NI_REPLY:
if (icmp6len > siz) {
ND_PRINT((ndo,"[|icmp6: node information reply]"));
break;
}
needcomma = 0;
ni6 = (const struct icmp6_nodeinfo *)dp;
ND_PRINT((ndo," node information reply"));
ND_PRINT((ndo," (")); /*)*/
switch (ni6->ni_code) {
case ICMP6_NI_SUCCESS:
if (ndo->ndo_vflag) {
ND_PRINT((ndo,"success"));
needcomma++;
}
break;
case ICMP6_NI_REFUSED:
ND_PRINT((ndo,"refused"));
needcomma++;
if (siz != sizeof(*ni6))
if (ndo->ndo_vflag)
ND_PRINT((ndo,", invalid length"));
break;
case ICMP6_NI_UNKNOWN:
ND_PRINT((ndo,"unknown"));
needcomma++;
if (siz != sizeof(*ni6))
if (ndo->ndo_vflag)
ND_PRINT((ndo,", invalid length"));
break;
}
if (ni6->ni_code != ICMP6_NI_SUCCESS) {
/*(*/
ND_PRINT((ndo,")"));
break;
}
switch (EXTRACT_16BITS(&ni6->ni_qtype)) {
case NI_QTYPE_NOOP:
if (needcomma)
ND_PRINT((ndo,", "));
ND_PRINT((ndo,"noop"));
if (siz != sizeof(*ni6))
if (ndo->ndo_vflag)
ND_PRINT((ndo,", invalid length"));
break;
case NI_QTYPE_SUPTYPES:
if (needcomma)
ND_PRINT((ndo,", "));
ND_PRINT((ndo,"supported qtypes"));
i = EXTRACT_16BITS(&ni6->ni_flags);
if (i)
ND_PRINT((ndo," [%s]", (i & 0x01) ? "C" : ""));
break;
case NI_QTYPE_FQDN:
if (needcomma)
ND_PRINT((ndo,", "));
ND_PRINT((ndo,"DNS name"));
cp = (const u_char *)(ni6 + 1) + 4;
if (cp[0] == ep - cp - 1) {
/* icmp-name-lookup-03, pascal string */
if (ndo->ndo_vflag)
ND_PRINT((ndo,", 03 draft"));
cp++;
ND_PRINT((ndo,", \""));
while (cp < ep) {
safeputchar(ndo, *cp);
cp++;
}
ND_PRINT((ndo,"\""));
} else
dnsname_print(ndo, cp, ep);
if ((EXTRACT_16BITS(&ni6->ni_flags) & 0x01) != 0)
ND_PRINT((ndo," [TTL=%u]", EXTRACT_32BITS(ni6 + 1)));
break;
case NI_QTYPE_NODEADDR:
if (needcomma)
ND_PRINT((ndo,", "));
ND_PRINT((ndo,"node addresses"));
i = sizeof(*ni6);
while (i < siz) {
if (i + sizeof(struct in6_addr) + sizeof(int32_t) > siz)
break;
ND_PRINT((ndo," %s", ip6addr_string(ndo, bp + i)));
i += sizeof(struct in6_addr);
ND_PRINT((ndo,"(%d)", (int32_t)EXTRACT_32BITS(bp + i)));
i += sizeof(int32_t);
}
i = ni6->ni_flags;
if (!i)
break;
ND_PRINT((ndo," [%s%s%s%s%s%s%s]",
(i & NI_NODEADDR_FLAG_ANYCAST) ? "a" : "",
(i & NI_NODEADDR_FLAG_GLOBAL) ? "G" : "",
(i & NI_NODEADDR_FLAG_SITELOCAL) ? "S" : "",
(i & NI_NODEADDR_FLAG_LINKLOCAL) ? "L" : "",
(i & NI_NODEADDR_FLAG_COMPAT) ? "C" : "",
(i & NI_NODEADDR_FLAG_ALL) ? "A" : "",
(i & NI_NODEADDR_FLAG_TRUNCATE) ? "T" : ""));
break;
default:
if (needcomma)
ND_PRINT((ndo,", "));
ND_PRINT((ndo,"unknown"));
break;
}
/*(*/
ND_PRINT((ndo,")"));
break;
}
return;
trunc:
ND_PRINT((ndo, "[|icmp6]"));
}
Commit Message: CVE-2017-13041/ICMP6: Add more bounds checks.
This fixes a buffer over-read discovered by Kim Gwan Yeong.
Add a test using the capture file supplied by the reporter(s).
CWE ID: CWE-125 | icmp6_nodeinfo_print(netdissect_options *ndo, u_int icmp6len, const u_char *bp, const u_char *ep)
{
const struct icmp6_nodeinfo *ni6;
const struct icmp6_hdr *dp;
const u_char *cp;
size_t siz, i;
int needcomma;
if (ep < bp)
return;
dp = (const struct icmp6_hdr *)bp;
ni6 = (const struct icmp6_nodeinfo *)bp;
siz = ep - bp;
switch (ni6->ni_type) {
case ICMP6_NI_QUERY:
if (siz == sizeof(*dp) + 4) {
/* KAME who-are-you */
ND_PRINT((ndo," who-are-you request"));
break;
}
ND_PRINT((ndo," node information query"));
ND_TCHECK2(*dp, sizeof(*ni6));
ni6 = (const struct icmp6_nodeinfo *)dp;
ND_PRINT((ndo," (")); /*)*/
switch (EXTRACT_16BITS(&ni6->ni_qtype)) {
case NI_QTYPE_NOOP:
ND_PRINT((ndo,"noop"));
break;
case NI_QTYPE_SUPTYPES:
ND_PRINT((ndo,"supported qtypes"));
i = EXTRACT_16BITS(&ni6->ni_flags);
if (i)
ND_PRINT((ndo," [%s]", (i & 0x01) ? "C" : ""));
break;
case NI_QTYPE_FQDN:
ND_PRINT((ndo,"DNS name"));
break;
case NI_QTYPE_NODEADDR:
ND_PRINT((ndo,"node addresses"));
i = ni6->ni_flags;
if (!i)
break;
/* NI_NODEADDR_FLAG_TRUNCATE undefined for query */
ND_PRINT((ndo," [%s%s%s%s%s%s]",
(i & NI_NODEADDR_FLAG_ANYCAST) ? "a" : "",
(i & NI_NODEADDR_FLAG_GLOBAL) ? "G" : "",
(i & NI_NODEADDR_FLAG_SITELOCAL) ? "S" : "",
(i & NI_NODEADDR_FLAG_LINKLOCAL) ? "L" : "",
(i & NI_NODEADDR_FLAG_COMPAT) ? "C" : "",
(i & NI_NODEADDR_FLAG_ALL) ? "A" : ""));
break;
default:
ND_PRINT((ndo,"unknown"));
break;
}
if (ni6->ni_qtype == NI_QTYPE_NOOP ||
ni6->ni_qtype == NI_QTYPE_SUPTYPES) {
if (siz != sizeof(*ni6))
if (ndo->ndo_vflag)
ND_PRINT((ndo,", invalid len"));
/*(*/
ND_PRINT((ndo,")"));
break;
}
/* XXX backward compat, icmp-name-lookup-03 */
if (siz == sizeof(*ni6)) {
ND_PRINT((ndo,", 03 draft"));
/*(*/
ND_PRINT((ndo,")"));
break;
}
switch (ni6->ni_code) {
case ICMP6_NI_SUBJ_IPV6:
if (!ND_TTEST2(*dp,
sizeof(*ni6) + sizeof(struct in6_addr)))
break;
if (siz != sizeof(*ni6) + sizeof(struct in6_addr)) {
if (ndo->ndo_vflag)
ND_PRINT((ndo,", invalid subject len"));
break;
}
ND_PRINT((ndo,", subject=%s",
ip6addr_string(ndo, ni6 + 1)));
break;
case ICMP6_NI_SUBJ_FQDN:
ND_PRINT((ndo,", subject=DNS name"));
cp = (const u_char *)(ni6 + 1);
if (cp[0] == ep - cp - 1) {
/* icmp-name-lookup-03, pascal string */
if (ndo->ndo_vflag)
ND_PRINT((ndo,", 03 draft"));
cp++;
ND_PRINT((ndo,", \""));
while (cp < ep) {
safeputchar(ndo, *cp);
cp++;
}
ND_PRINT((ndo,"\""));
} else
dnsname_print(ndo, cp, ep);
break;
case ICMP6_NI_SUBJ_IPV4:
if (!ND_TTEST2(*dp, sizeof(*ni6) + sizeof(struct in_addr)))
break;
if (siz != sizeof(*ni6) + sizeof(struct in_addr)) {
if (ndo->ndo_vflag)
ND_PRINT((ndo,", invalid subject len"));
break;
}
ND_PRINT((ndo,", subject=%s",
ipaddr_string(ndo, ni6 + 1)));
break;
default:
ND_PRINT((ndo,", unknown subject"));
break;
}
/*(*/
ND_PRINT((ndo,")"));
break;
case ICMP6_NI_REPLY:
if (icmp6len > siz) {
ND_PRINT((ndo,"[|icmp6: node information reply]"));
break;
}
needcomma = 0;
ND_TCHECK2(*dp, sizeof(*ni6));
ni6 = (const struct icmp6_nodeinfo *)dp;
ND_PRINT((ndo," node information reply"));
ND_PRINT((ndo," (")); /*)*/
switch (ni6->ni_code) {
case ICMP6_NI_SUCCESS:
if (ndo->ndo_vflag) {
ND_PRINT((ndo,"success"));
needcomma++;
}
break;
case ICMP6_NI_REFUSED:
ND_PRINT((ndo,"refused"));
needcomma++;
if (siz != sizeof(*ni6))
if (ndo->ndo_vflag)
ND_PRINT((ndo,", invalid length"));
break;
case ICMP6_NI_UNKNOWN:
ND_PRINT((ndo,"unknown"));
needcomma++;
if (siz != sizeof(*ni6))
if (ndo->ndo_vflag)
ND_PRINT((ndo,", invalid length"));
break;
}
if (ni6->ni_code != ICMP6_NI_SUCCESS) {
/*(*/
ND_PRINT((ndo,")"));
break;
}
switch (EXTRACT_16BITS(&ni6->ni_qtype)) {
case NI_QTYPE_NOOP:
if (needcomma)
ND_PRINT((ndo,", "));
ND_PRINT((ndo,"noop"));
if (siz != sizeof(*ni6))
if (ndo->ndo_vflag)
ND_PRINT((ndo,", invalid length"));
break;
case NI_QTYPE_SUPTYPES:
if (needcomma)
ND_PRINT((ndo,", "));
ND_PRINT((ndo,"supported qtypes"));
i = EXTRACT_16BITS(&ni6->ni_flags);
if (i)
ND_PRINT((ndo," [%s]", (i & 0x01) ? "C" : ""));
break;
case NI_QTYPE_FQDN:
if (needcomma)
ND_PRINT((ndo,", "));
ND_PRINT((ndo,"DNS name"));
cp = (const u_char *)(ni6 + 1) + 4;
ND_TCHECK(cp[0]);
if (cp[0] == ep - cp - 1) {
/* icmp-name-lookup-03, pascal string */
if (ndo->ndo_vflag)
ND_PRINT((ndo,", 03 draft"));
cp++;
ND_PRINT((ndo,", \""));
while (cp < ep) {
safeputchar(ndo, *cp);
cp++;
}
ND_PRINT((ndo,"\""));
} else
dnsname_print(ndo, cp, ep);
if ((EXTRACT_16BITS(&ni6->ni_flags) & 0x01) != 0)
ND_PRINT((ndo," [TTL=%u]", EXTRACT_32BITS(ni6 + 1)));
break;
case NI_QTYPE_NODEADDR:
if (needcomma)
ND_PRINT((ndo,", "));
ND_PRINT((ndo,"node addresses"));
i = sizeof(*ni6);
while (i < siz) {
if (i + sizeof(struct in6_addr) + sizeof(int32_t) > siz)
break;
ND_PRINT((ndo," %s", ip6addr_string(ndo, bp + i)));
i += sizeof(struct in6_addr);
ND_PRINT((ndo,"(%d)", (int32_t)EXTRACT_32BITS(bp + i)));
i += sizeof(int32_t);
}
i = ni6->ni_flags;
if (!i)
break;
ND_PRINT((ndo," [%s%s%s%s%s%s%s]",
(i & NI_NODEADDR_FLAG_ANYCAST) ? "a" : "",
(i & NI_NODEADDR_FLAG_GLOBAL) ? "G" : "",
(i & NI_NODEADDR_FLAG_SITELOCAL) ? "S" : "",
(i & NI_NODEADDR_FLAG_LINKLOCAL) ? "L" : "",
(i & NI_NODEADDR_FLAG_COMPAT) ? "C" : "",
(i & NI_NODEADDR_FLAG_ALL) ? "A" : "",
(i & NI_NODEADDR_FLAG_TRUNCATE) ? "T" : ""));
break;
default:
if (needcomma)
ND_PRINT((ndo,", "));
ND_PRINT((ndo,"unknown"));
break;
}
/*(*/
ND_PRINT((ndo,")"));
break;
}
return;
trunc:
ND_PRINT((ndo, "[|icmp6]"));
}
| 167,834 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: cJSON *cJSON_CreateFloat( double num )
{
cJSON *item = cJSON_New_Item();
if ( item ) {
item->type = cJSON_Number;
item->valuefloat = num;
item->valueint = num;
}
return item;
}
Commit Message: Fix a buffer overflow / heap corruption issue that could occur if a
malformed JSON string was passed on the control channel. This issue,
present in the cJSON library, was already fixed upstream, so was
addressed here in iperf3 by importing a newer version of cJSON (plus
local ESnet modifications).
Discovered and reported by Dave McDaniel, Cisco Talos.
Based on a patch by @dopheide-esnet, with input from @DaveGamble.
Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001,
CVE-2016-4303
(cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40)
Signed-off-by: Bruce A. Mah <[email protected]>
CWE ID: CWE-119 | cJSON *cJSON_CreateFloat( double num )
| 167,272 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.