func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
struct virtio_net_hdr *vnet_hdr)
{
int n;
if (*len < sizeof(*vnet_hdr))
return -EINVAL;
*len -= sizeof(*vnet_hdr);
n = copy_from_iter(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter);
if (n != sizeof(*vnet_hdr))
return -EFAULT;
return __packet_snd_vnet_parse(vnet_hdr, *len);
}
| 0 |
[
"CWE-416",
"CWE-362"
] |
linux
|
84ac7260236a49c79eede91617700174c2c19b0c
| 16,751,015,010,306,997,000,000,000,000,000,000,000 | 15 |
packet: fix race condition in packet_set_ring
When packet_set_ring creates a ring buffer it will initialize a
struct timer_list if the packet version is TPACKET_V3. This value
can then be raced by a different thread calling setsockopt to
set the version to TPACKET_V1 before packet_set_ring has finished.
This leads to a use-after-free on a function pointer in the
struct timer_list when the socket is closed as the previously
initialized timer will not be deleted.
The bug is fixed by taking lock_sock(sk) in packet_setsockopt when
changing the packet version while also taking the lock at the start
of packet_set_ring.
Fixes: f6fb8f100b80 ("af-packet: TPACKET_V3 flexible buffer implementation.")
Signed-off-by: Philip Pettersson <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static Image *ReadTIFFImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
#define ThrowTIFFException(severity,message) \
{ \
if (pixel_info != (MemoryInfo *) NULL) \
pixel_info=RelinquishVirtualMemory(pixel_info); \
if (quantum_info != (QuantumInfo *) NULL) \
quantum_info=DestroyQuantumInfo(quantum_info); \
TIFFClose(tiff); \
ThrowReaderException(severity,message); \
}
const char
*option;
float
*chromaticity,
x_position,
y_position,
x_resolution,
y_resolution;
Image
*image;
int
tiff_status;
MagickBooleanType
more_frames,
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixel_info = (MemoryInfo *) NULL;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register ssize_t
i;
size_t
pad;
ssize_t
y;
TIFF
*tiff;
TIFFMethodType
method;
uint16
compress_tag,
bits_per_sample,
endian,
extra_samples,
interlace,
max_sample_value,
min_sample_value,
orientation,
pages,
photometric,
*sample_info,
sample_format,
samples_per_pixel,
units,
value;
uint32
height,
rows_per_strip,
width;
unsigned char
*pixels;
void
*sans[2] = { NULL, NULL };
/*
Open image.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
(void) SetMagickThreadValue(tiff_exception,exception);
tiff=TIFFClientOpen(image->filename,"rb",(thandle_t) image,TIFFReadBlob,
TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob,
TIFFUnmapBlob);
if (tiff == (TIFF *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (exception->severity > ErrorException)
{
TIFFClose(tiff);
image=DestroyImageList(image);
return((Image *) NULL);
}
if (image_info->number_scenes != 0)
{
/*
Generate blank images for subimage specification (e.g. image.tif[4].
We need to check the number of directores because it is possible that
the subimage(s) are stored in the photoshop profile.
*/
if (image_info->scene < (size_t) TIFFNumberOfDirectories(tiff))
{
for (i=0; i < (ssize_t) image_info->scene; i++)
{
status=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse;
if (status == MagickFalse)
{
TIFFClose(tiff);
image=DestroyImageList(image);
return((Image *) NULL);
}
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
TIFFClose(tiff);
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
}
}
}
more_frames=MagickTrue;
do
{
DisableMSCWarning(4127)
if (0 && (image_info->verbose != MagickFalse))
TIFFPrintDirectory(tiff,stdout,MagickFalse);
RestoreMSCWarning
photometric=PHOTOMETRIC_RGB;
if ((TIFFGetField(tiff,TIFFTAG_IMAGEWIDTH,&width) != 1) ||
(TIFFGetField(tiff,TIFFTAG_IMAGELENGTH,&height) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_PHOTOMETRIC,&photometric,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_COMPRESSION,&compress_tag,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_PLANARCONFIG,&interlace,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL,&samples_per_pixel,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE,&bits_per_sample,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLEFORMAT,&sample_format,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_MINSAMPLEVALUE,&min_sample_value,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_MAXSAMPLEVALUE,&max_sample_value,sans) != 1))
{
TIFFClose(tiff);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if (((sample_format != SAMPLEFORMAT_IEEEFP) || (bits_per_sample != 64)) &&
((bits_per_sample <= 0) || (bits_per_sample > 32)))
{
TIFFClose(tiff);
ThrowReaderException(CorruptImageError,"UnsupportedBitsPerPixel");
}
if (sample_format == SAMPLEFORMAT_IEEEFP)
(void) SetImageProperty(image,"quantum:format","floating-point",
exception);
switch (photometric)
{
case PHOTOMETRIC_MINISBLACK:
{
(void) SetImageProperty(image,"tiff:photometric","min-is-black",
exception);
break;
}
case PHOTOMETRIC_MINISWHITE:
{
(void) SetImageProperty(image,"tiff:photometric","min-is-white",
exception);
break;
}
case PHOTOMETRIC_PALETTE:
{
(void) SetImageProperty(image,"tiff:photometric","palette",exception);
break;
}
case PHOTOMETRIC_RGB:
{
(void) SetImageProperty(image,"tiff:photometric","RGB",exception);
break;
}
case PHOTOMETRIC_CIELAB:
{
(void) SetImageProperty(image,"tiff:photometric","CIELAB",exception);
break;
}
case PHOTOMETRIC_LOGL:
{
(void) SetImageProperty(image,"tiff:photometric","CIE Log2(L)",
exception);
break;
}
case PHOTOMETRIC_LOGLUV:
{
(void) SetImageProperty(image,"tiff:photometric","LOGLUV",exception);
break;
}
#if defined(PHOTOMETRIC_MASK)
case PHOTOMETRIC_MASK:
{
(void) SetImageProperty(image,"tiff:photometric","MASK",exception);
break;
}
#endif
case PHOTOMETRIC_SEPARATED:
{
(void) SetImageProperty(image,"tiff:photometric","separated",exception);
break;
}
case PHOTOMETRIC_YCBCR:
{
(void) SetImageProperty(image,"tiff:photometric","YCBCR",exception);
break;
}
default:
{
(void) SetImageProperty(image,"tiff:photometric","unknown",exception);
break;
}
}
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %ux%u",
(unsigned int) width,(unsigned int) height);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Interlace: %u",
interlace);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Bits per sample: %u",bits_per_sample);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Min sample value: %u",min_sample_value);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Max sample value: %u",max_sample_value);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Photometric "
"interpretation: %s",GetImageProperty(image,"tiff:photometric",
exception));
}
image->columns=(size_t) width;
image->rows=(size_t) height;
image->depth=(size_t) bits_per_sample;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Image depth: %.20g",
(double) image->depth);
image->endian=MSBEndian;
if (endian == FILLORDER_LSB2MSB)
image->endian=LSBEndian;
#if defined(MAGICKCORE_HAVE_TIFFISBIGENDIAN)
if (TIFFIsBigEndian(tiff) == 0)
{
(void) SetImageProperty(image,"tiff:endian","lsb",exception);
image->endian=LSBEndian;
}
else
{
(void) SetImageProperty(image,"tiff:endian","msb",exception);
image->endian=MSBEndian;
}
#endif
if ((photometric == PHOTOMETRIC_MINISBLACK) ||
(photometric == PHOTOMETRIC_MINISWHITE))
image->colorspace=GRAYColorspace;
if (photometric == PHOTOMETRIC_SEPARATED)
image->colorspace=CMYKColorspace;
if (photometric == PHOTOMETRIC_CIELAB)
image->colorspace=LabColorspace;
status=TIFFGetProfiles(tiff,image,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
status=TIFFGetProperties(tiff,image,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
option=GetImageOption(image_info,"tiff:exif-properties");
if (IsStringFalse(option) == MagickFalse) /* enabled by default */
TIFFGetEXIFProperties(tiff,image,exception);
if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XRESOLUTION,&x_resolution,sans) == 1) &&
(TIFFGetFieldDefaulted(tiff,TIFFTAG_YRESOLUTION,&y_resolution,sans) == 1))
{
image->resolution.x=x_resolution;
image->resolution.y=y_resolution;
}
if (TIFFGetFieldDefaulted(tiff,TIFFTAG_RESOLUTIONUNIT,&units,sans) == 1)
{
if (units == RESUNIT_INCH)
image->units=PixelsPerInchResolution;
if (units == RESUNIT_CENTIMETER)
image->units=PixelsPerCentimeterResolution;
}
if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XPOSITION,&x_position,sans) == 1) &&
(TIFFGetFieldDefaulted(tiff,TIFFTAG_YPOSITION,&y_position,sans) == 1))
{
image->page.x=(ssize_t) ceil(x_position*image->resolution.x-0.5);
image->page.y=(ssize_t) ceil(y_position*image->resolution.y-0.5);
}
if (TIFFGetFieldDefaulted(tiff,TIFFTAG_ORIENTATION,&orientation,sans) == 1)
image->orientation=(OrientationType) orientation;
if (TIFFGetField(tiff,TIFFTAG_WHITEPOINT,&chromaticity) == 1)
{
if (chromaticity != (float *) NULL)
{
image->chromaticity.white_point.x=chromaticity[0];
image->chromaticity.white_point.y=chromaticity[1];
}
}
if (TIFFGetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,&chromaticity) == 1)
{
if (chromaticity != (float *) NULL)
{
image->chromaticity.red_primary.x=chromaticity[0];
image->chromaticity.red_primary.y=chromaticity[1];
image->chromaticity.green_primary.x=chromaticity[2];
image->chromaticity.green_primary.y=chromaticity[3];
image->chromaticity.blue_primary.x=chromaticity[4];
image->chromaticity.blue_primary.y=chromaticity[5];
}
}
#if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919)
if ((compress_tag != COMPRESSION_NONE) &&
(TIFFIsCODECConfigured(compress_tag) == 0))
{
TIFFClose(tiff);
ThrowReaderException(CoderError,"CompressNotSupported");
}
#endif
switch (compress_tag)
{
case COMPRESSION_NONE: image->compression=NoCompression; break;
case COMPRESSION_CCITTFAX3: image->compression=FaxCompression; break;
case COMPRESSION_CCITTFAX4: image->compression=Group4Compression; break;
case COMPRESSION_JPEG:
{
image->compression=JPEGCompression;
#if defined(JPEG_SUPPORT)
{
char
sampling_factor[MagickPathExtent];
uint16
horizontal,
vertical;
tiff_status=TIFFGetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,&horizontal,
&vertical);
if (tiff_status == 1)
{
(void) FormatLocaleString(sampling_factor,MagickPathExtent,
"%dx%d",horizontal,vertical);
(void) SetImageProperty(image,"jpeg:sampling-factor",
sampling_factor,exception);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling Factors: %s",sampling_factor);
}
}
#endif
break;
}
case COMPRESSION_OJPEG: image->compression=JPEGCompression; break;
#if defined(COMPRESSION_LZMA)
case COMPRESSION_LZMA: image->compression=LZMACompression; break;
#endif
case COMPRESSION_LZW: image->compression=LZWCompression; break;
case COMPRESSION_DEFLATE: image->compression=ZipCompression; break;
case COMPRESSION_ADOBE_DEFLATE: image->compression=ZipCompression; break;
#if defined(COMPRESSION_WEBP)
case COMPRESSION_WEBP: image->compression=WebPCompression; break;
#endif
#if defined(COMPRESSION_ZSTD)
case COMPRESSION_ZSTD: image->compression=ZstdCompression; break;
#endif
default: image->compression=RLECompression; break;
}
quantum_info=(QuantumInfo *) NULL;
if ((photometric == PHOTOMETRIC_PALETTE) &&
(pow(2.0,1.0*bits_per_sample) <= MaxColormapSize))
{
size_t
colors;
colors=(size_t) GetQuantumRange(bits_per_sample)+1;
if (AcquireImageColormap(image,colors,exception) == MagickFalse)
{
TIFFClose(tiff);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
}
value=(unsigned short) image->scene;
if (TIFFGetFieldDefaulted(tiff,TIFFTAG_PAGENUMBER,&value,&pages,sans) == 1)
image->scene=value;
if (image->storage_class == PseudoClass)
{
size_t
range;
uint16
*blue_colormap,
*green_colormap,
*red_colormap;
/*
Initialize colormap.
*/
tiff_status=TIFFGetField(tiff,TIFFTAG_COLORMAP,&red_colormap,
&green_colormap,&blue_colormap);
if (tiff_status == 1)
{
if ((red_colormap != (uint16 *) NULL) &&
(green_colormap != (uint16 *) NULL) &&
(blue_colormap != (uint16 *) NULL))
{
range=255; /* might be old style 8-bit colormap */
for (i=0; i < (ssize_t) image->colors; i++)
if ((red_colormap[i] >= 256) || (green_colormap[i] >= 256) ||
(blue_colormap[i] >= 256))
{
range=65535;
break;
}
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=ClampToQuantum(((double)
QuantumRange*red_colormap[i])/range);
image->colormap[i].green=ClampToQuantum(((double)
QuantumRange*green_colormap[i])/range);
image->colormap[i].blue=ClampToQuantum(((double)
QuantumRange*blue_colormap[i])/range);
}
}
}
}
if (image_info->ping != MagickFalse)
{
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
goto next_tiff_frame;
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
status=SetImageColorspace(image,image->colorspace,exception);
status&=ResetImagePixels(image,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
/*
Allocate memory for the image and pixel buffer.
*/
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
if (sample_format == SAMPLEFORMAT_UINT)
status=SetQuantumFormat(image,quantum_info,UnsignedQuantumFormat);
if (sample_format == SAMPLEFORMAT_INT)
status=SetQuantumFormat(image,quantum_info,SignedQuantumFormat);
if (sample_format == SAMPLEFORMAT_IEEEFP)
status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat);
if (status == MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
status=MagickTrue;
switch (photometric)
{
case PHOTOMETRIC_MINISBLACK:
{
quantum_info->min_is_white=MagickFalse;
break;
}
case PHOTOMETRIC_MINISWHITE:
{
quantum_info->min_is_white=MagickTrue;
break;
}
default:
break;
}
tiff_status=TIFFGetFieldDefaulted(tiff,TIFFTAG_EXTRASAMPLES,&extra_samples,
&sample_info,sans);
if (tiff_status == 1)
{
(void) SetImageProperty(image,"tiff:alpha","unspecified",exception);
if (extra_samples == 0)
{
if ((samples_per_pixel == 4) && (photometric == PHOTOMETRIC_RGB))
image->alpha_trait=BlendPixelTrait;
}
else
for (i=0; i < extra_samples; i++)
{
image->alpha_trait=BlendPixelTrait;
if (sample_info[i] == EXTRASAMPLE_ASSOCALPHA)
{
SetQuantumAlphaType(quantum_info,AssociatedQuantumAlpha);
(void) SetImageProperty(image,"tiff:alpha","associated",
exception);
}
else
if (sample_info[i] == EXTRASAMPLE_UNASSALPHA)
{
SetQuantumAlphaType(quantum_info,DisassociatedQuantumAlpha);
(void) SetImageProperty(image,"tiff:alpha","unassociated",
exception);
}
}
}
if (image->alpha_trait != UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
method=ReadGenericMethod;
rows_per_strip=(uint32) image->rows;
if (TIFFGetField(tiff,TIFFTAG_ROWSPERSTRIP,&rows_per_strip) == 1)
{
char
buffer[MagickPathExtent];
(void) FormatLocaleString(buffer,MagickPathExtent,"%u",
(unsigned int) rows_per_strip);
(void) SetImageProperty(image,"tiff:rows-per-strip",buffer,exception);
method=ReadStripMethod;
if (rows_per_strip > (uint32) image->rows)
rows_per_strip=(uint32) image->rows;
}
if (TIFFIsTiled(tiff) != MagickFalse)
method=ReadTileMethod;
if (image->compression == JPEGCompression)
method=GetJPEGMethod(image,tiff,photometric,bits_per_sample,
samples_per_pixel);
if ((photometric == PHOTOMETRIC_LOGLUV) ||
(photometric == PHOTOMETRIC_YCBCR))
method=ReadGenericMethod;
quantum_info->endian=LSBEndian;
quantum_type=RGBQuantum;
if (TIFFScanlineSize(tiff) <= 0)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
if ((1.0*TIFFScanlineSize(tiff)) > (2.53*GetBlobSize(image)))
ThrowTIFFException(CorruptImageError,"InsufficientImageDataInFile");
number_pixels=MagickMax(TIFFScanlineSize(tiff),MagickMax((ssize_t)
image->columns*samples_per_pixel*pow(2.0,ceil(log(bits_per_sample)/
log(2.0))),image->columns*rows_per_strip));
pixel_info=AcquireVirtualMemory(number_pixels,sizeof(uint32));
if (pixel_info == (MemoryInfo *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
(void) memset(pixels,0,number_pixels*sizeof(uint32));
quantum_type=IndexQuantum;
pad=(size_t) MagickMax((ssize_t) samples_per_pixel-1,0);
if (image->alpha_trait != UndefinedPixelTrait)
{
if (image->storage_class == PseudoClass)
quantum_type=IndexAlphaQuantum;
else
quantum_type=samples_per_pixel == 1 ? AlphaQuantum : GrayAlphaQuantum;
}
else
if (image->storage_class != PseudoClass)
quantum_type=GrayQuantum;
if ((samples_per_pixel > 2) && (interlace != PLANARCONFIG_SEPARATE))
{
pad=(size_t) MagickMax((size_t) samples_per_pixel-3,0);
quantum_type=RGBQuantum;
if (image->alpha_trait != UndefinedPixelTrait)
{
quantum_type=RGBAQuantum;
pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0);
}
if (image->colorspace == CMYKColorspace)
{
pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0);
quantum_type=CMYKQuantum;
if (image->alpha_trait != UndefinedPixelTrait)
{
quantum_type=CMYKAQuantum;
pad=(size_t) MagickMax((size_t) samples_per_pixel-5,0);
}
}
status=SetQuantumPad(image,quantum_info,pad*((bits_per_sample+7) >> 3));
if (status == MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
}
switch (method)
{
case ReadYCCKMethod:
{
/*
Convert YCC TIFF image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
unsigned char
*p;
tiff_status=TIFFReadPixels(tiff,0,y,(char *) pixels);
if (tiff_status == -1)
break;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
p=pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelCyan(image,ScaleCharToQuantum(ClampYCC((double) *p+
(1.402*(double) *(p+2))-179.456)),q);
SetPixelMagenta(image,ScaleCharToQuantum(ClampYCC((double) *p-
(0.34414*(double) *(p+1))-(0.71414*(double ) *(p+2))+
135.45984)),q);
SetPixelYellow(image,ScaleCharToQuantum(ClampYCC((double) *p+
(1.772*(double) *(p+1))-226.816)),q);
SetPixelBlack(image,ScaleCharToQuantum((unsigned char) *(p+3)),q);
q+=GetPixelChannels(image);
p+=4;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case ReadStripMethod:
{
register unsigned char
*p;
size_t
extent;
ssize_t
stride,
strip_id;
tsize_t
strip_size;
unsigned char
*strip_pixels;
/*
Convert stripped TIFF image.
*/
extent=TIFFStripSize(tiff);
#if defined(TIFF_VERSION_BIG)
extent+=image->columns*sizeof(uint64);
#else
extent+=image->columns*sizeof(uint32);
#endif
strip_pixels=(unsigned char *) AcquireQuantumMemory(extent,
sizeof(*strip_pixels));
if (strip_pixels == (unsigned char *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(strip_pixels,0,extent*sizeof(*strip_pixels));
stride=TIFFVStripSize(tiff,1);
strip_id=0;
p=strip_pixels;
for (i=0; i < (ssize_t) samples_per_pixel; i++)
{
size_t
rows_remaining;
switch (i)
{
case 0: break;
case 1: quantum_type=GreenQuantum; break;
case 2: quantum_type=BlueQuantum; break;
case 3:
{
if (image->colorspace == CMYKColorspace)
quantum_type=BlackQuantum;
break;
}
case 4: quantum_type=AlphaQuantum; break;
}
rows_remaining=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
if (rows_remaining == 0)
{
strip_size=TIFFReadEncodedStrip(tiff,strip_id,strip_pixels,
TIFFStripSize(tiff));
if (strip_size == -1)
break;
rows_remaining=rows_per_strip;
if ((y+rows_per_strip) > image->rows)
rows_remaining=(rows_per_strip-(y+rows_per_strip-
image->rows));
p=strip_pixels;
strip_id++;
}
(void) ImportQuantumPixels(image,(CacheView *) NULL,
quantum_info,quantum_type,p,exception);
p+=stride;
rows_remaining--;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE))
break;
}
strip_pixels=(unsigned char *) RelinquishMagickMemory(strip_pixels);
break;
}
case ReadTileMethod:
{
register unsigned char
*p;
size_t
extent;
uint32
columns,
rows;
unsigned char
*tile_pixels;
/*
Convert tiled TIFF image.
*/
if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) ||
(TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1))
ThrowTIFFException(CoderError,"ImageIsNotTiled");
if ((AcquireMagickResource(WidthResource,columns) == MagickFalse) ||
(AcquireMagickResource(HeightResource,rows) == MagickFalse))
ThrowTIFFException(ImageError,"WidthOrHeightExceedsLimit");
number_pixels=(MagickSizeType) columns*rows;
if (HeapOverflowSanityCheck(rows,sizeof(*tile_pixels)) != MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
extent=TIFFTileSize(tiff);
#if defined(TIFF_VERSION_BIG)
extent+=columns*sizeof(uint64);
#else
extent+=columns*sizeof(uint32);
#endif
tile_pixels=(unsigned char *) AcquireQuantumMemory(extent,
sizeof(*tile_pixels));
if (tile_pixels == (unsigned char *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(tile_pixels,0,extent*sizeof(*tile_pixels));
for (i=0; i < (ssize_t) samples_per_pixel; i++)
{
switch (i)
{
case 0: break;
case 1: quantum_type=GreenQuantum; break;
case 2: quantum_type=BlueQuantum; break;
case 3:
{
if (image->colorspace == CMYKColorspace)
quantum_type=BlackQuantum;
break;
}
case 4: quantum_type=AlphaQuantum; break;
}
for (y=0; y < (ssize_t) image->rows; y+=rows)
{
register ssize_t
x;
size_t
rows_remaining;
rows_remaining=image->rows-y;
if ((ssize_t) (y+rows) < (ssize_t) image->rows)
rows_remaining=rows;
for (x=0; x < (ssize_t) image->columns; x+=columns)
{
size_t
columns_remaining,
row;
columns_remaining=image->columns-x;
if ((ssize_t) (x+columns) < (ssize_t) image->columns)
columns_remaining=columns;
if (TIFFReadTile(tiff,tile_pixels,(uint32) x,(uint32) y,0,i) == 0)
break;
p=tile_pixels;
for (row=0; row < rows_remaining; row++)
{
register Quantum
*magick_restrict q;
q=GetAuthenticPixels(image,x,y+row,columns_remaining,1,
exception);
if (q == (Quantum *) NULL)
break;
(void) ImportQuantumPixels(image,(CacheView *) NULL,
quantum_info,quantum_type,p,exception);
p+=TIFFTileRowSize(tiff);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE))
break;
}
tile_pixels=(unsigned char *) RelinquishMagickMemory(tile_pixels);
break;
}
case ReadGenericMethod:
default:
{
MemoryInfo
*generic_info = (MemoryInfo * ) NULL;
register uint32
*p;
uint32
*pixels;
/*
Convert generic TIFF image.
*/
if (HeapOverflowSanityCheck(image->rows,sizeof(*pixels)) != MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
number_pixels=(MagickSizeType) image->columns*image->rows;
generic_info=AcquireVirtualMemory(number_pixels,sizeof(uint32));
if (generic_info == (MemoryInfo *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(uint32 *) GetVirtualMemoryBlob(generic_info);
(void) TIFFReadRGBAImage(tiff,(uint32) image->columns,(uint32)
image->rows,(uint32 *) pixels,0);
p=pixels+number_pixels-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
q+=GetPixelChannels(image)*(image->columns-1);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
TIFFGetR(*p)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
TIFFGetG(*p)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
TIFFGetB(*p)),q);
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
TIFFGetA(*p)),q);
p--;
q-=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
generic_info=RelinquishVirtualMemory(generic_info);
break;
}
}
pixel_info=RelinquishVirtualMemory(pixel_info);
SetQuantumImageType(image,quantum_type);
next_tiff_frame:
if (quantum_info != (QuantumInfo *) NULL)
quantum_info=DestroyQuantumInfo(quantum_info);
if (photometric == PHOTOMETRIC_CIELAB)
DecodeLabImage(image,exception);
if ((photometric == PHOTOMETRIC_LOGL) ||
(photometric == PHOTOMETRIC_MINISBLACK) ||
(photometric == PHOTOMETRIC_MINISWHITE))
{
image->type=GrayscaleType;
if (bits_per_sample == 1)
image->type=BilevelType;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
more_frames=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse;
if (more_frames != MagickFalse)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
status=MagickFalse;
break;
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,image->scene-1,
image->scene);
if (status == MagickFalse)
break;
}
} while ((status != MagickFalse) && (more_frames != MagickFalse));
TIFFClose(tiff);
if ((image_info->number_scenes != 0) &&
(image_info->scene >= GetImageListLength(image)))
status=MagickFalse;
if (status == MagickFalse)
return(DestroyImageList(image));
TIFFReadPhotoshopLayers(image_info,image,exception);
return(GetFirstImageInList(image));
}
| 0 |
[
"CWE-125"
] |
ImageMagick
|
824f344ceb823e156ad6e85314d79c087933c2a0
| 251,117,934,919,138,400,000,000,000,000,000,000,000 | 974 |
Check the type of the field before performing the multiplication (details in #2132)
|
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
{
struct l2cap_conf_req *req = data;
struct l2cap_conf_rfc rfc = { .mode = chan->mode };
void *ptr = req->data;
void *endptr = data + data_size;
u16 size;
BT_DBG("chan %p", chan);
if (chan->num_conf_req || chan->num_conf_rsp)
goto done;
switch (chan->mode) {
case L2CAP_MODE_STREAMING:
case L2CAP_MODE_ERTM:
if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
break;
if (__l2cap_efs_supported(chan->conn))
set_bit(FLAG_EFS_ENABLE, &chan->flags);
/* fall through */
default:
chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
break;
}
done:
if (chan->imtu != L2CAP_DEFAULT_MTU)
l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
switch (chan->mode) {
case L2CAP_MODE_BASIC:
if (disable_ertm)
break;
if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
!(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
break;
rfc.mode = L2CAP_MODE_BASIC;
rfc.txwin_size = 0;
rfc.max_transmit = 0;
rfc.retrans_timeout = 0;
rfc.monitor_timeout = 0;
rfc.max_pdu_size = 0;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
(unsigned long) &rfc, endptr - ptr);
break;
case L2CAP_MODE_ERTM:
rfc.mode = L2CAP_MODE_ERTM;
rfc.max_transmit = chan->max_tx;
__l2cap_set_ertm_timeouts(chan, &rfc);
size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
L2CAP_FCS_SIZE);
rfc.max_pdu_size = cpu_to_le16(size);
l2cap_txwin_setup(chan);
rfc.txwin_size = min_t(u16, chan->tx_win,
L2CAP_DEFAULT_TX_WINDOW);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
(unsigned long) &rfc, endptr - ptr);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
chan->tx_win, endptr - ptr);
if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
if (chan->fcs == L2CAP_FCS_NONE ||
test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
chan->fcs = L2CAP_FCS_NONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
chan->fcs, endptr - ptr);
}
break;
case L2CAP_MODE_STREAMING:
l2cap_txwin_setup(chan);
rfc.mode = L2CAP_MODE_STREAMING;
rfc.txwin_size = 0;
rfc.max_transmit = 0;
rfc.retrans_timeout = 0;
rfc.monitor_timeout = 0;
size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
L2CAP_FCS_SIZE);
rfc.max_pdu_size = cpu_to_le16(size);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
(unsigned long) &rfc, endptr - ptr);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
if (chan->fcs == L2CAP_FCS_NONE ||
test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
chan->fcs = L2CAP_FCS_NONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
chan->fcs, endptr - ptr);
}
break;
}
req->dcid = cpu_to_le16(chan->dcid);
req->flags = cpu_to_le16(0);
return ptr - data;
}
| 0 |
[
"CWE-787"
] |
linux
|
e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3
| 1,173,180,223,522,877,500,000,000,000,000,000,000 | 121 |
Bluetooth: Properly check L2CAP config option output buffer length
Validate the output buffer length for L2CAP config requests and responses
to avoid overflowing the stack buffer used for building the option blocks.
Cc: [email protected]
Signed-off-by: Ben Seri <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
i2f(UINT8 *out_, const UINT8 *in_, int xsize) {
int x;
for (x = 0; x < xsize; x++, in_ += 4, out_ += 4) {
INT32 i;
FLOAT32 f;
memcpy(&i, in_, sizeof(i));
f = i;
memcpy(out_, &f, sizeof(f));
}
}
| 0 |
[
"CWE-120"
] |
Pillow
|
518ee3722a99d7f7d890db82a20bd81c1c0327fb
| 323,462,447,412,896,530,000,000,000,000,000,000,000 | 10 |
Use snprintf instead of sprintf
|
get_messages_arg(expand_T *xp UNUSED, int idx)
{
if (idx == 0)
return (char_u *)"clear";
return NULL;
}
| 0 |
[
"CWE-78"
] |
vim
|
8c62a08faf89663e5633dc5036cd8695c80f1075
| 152,902,621,175,833,180,000,000,000,000,000,000,000 | 6 |
patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others.
|
mch_errmsg(char *str)
{
#if !defined(MSWIN) || defined(FEAT_GUI_MSWIN)
int len;
#endif
#if (defined(UNIX) || defined(FEAT_GUI)) && !defined(ALWAYS_USE_GUI) && !defined(VIMDLL)
// On Unix use stderr if it's a tty.
// When not going to start the GUI also use stderr.
// On Mac, when started from Finder, stderr is the console.
if (
# ifdef UNIX
# ifdef MACOS_X
(isatty(2) && strcmp("/dev/console", ttyname(2)) != 0)
# else
isatty(2)
# endif
# ifdef FEAT_GUI
||
# endif
# endif
# ifdef FEAT_GUI
!(gui.in_use || gui.starting)
# endif
)
{
fprintf(stderr, "%s", str);
return;
}
#endif
#if defined(MSWIN) && (!defined(FEAT_GUI_MSWIN) || defined(VIMDLL))
# ifdef VIMDLL
if (!(gui.in_use || gui.starting))
# endif
{
mch_errmsg_c(str);
return;
}
#endif
#if !defined(MSWIN) || defined(FEAT_GUI_MSWIN)
// avoid a delay for a message that isn't there
emsg_on_display = FALSE;
len = (int)STRLEN(str) + 1;
if (error_ga.ga_growsize == 0)
{
error_ga.ga_growsize = 80;
error_ga.ga_itemsize = 1;
}
if (ga_grow(&error_ga, len) == OK)
{
mch_memmove((char_u *)error_ga.ga_data + error_ga.ga_len,
(char_u *)str, len);
# ifdef UNIX
// remove CR characters, they are displayed
{
char_u *p;
p = (char_u *)error_ga.ga_data + error_ga.ga_len;
for (;;)
{
p = vim_strchr(p, '\r');
if (p == NULL)
break;
*p = ' ';
}
}
# endif
--len; // don't count the NUL at the end
error_ga.ga_len += len;
}
#endif
}
| 0 |
[
"CWE-416"
] |
vim
|
9f1a39a5d1cd7989ada2d1cb32f97d84360e050f
| 67,064,606,906,317,030,000,000,000,000,000,000,000 | 75 |
patch 8.2.4040: keeping track of allocated lines is too complicated
Problem: Keeping track of allocated lines in user functions is too
complicated.
Solution: Instead of freeing individual lines keep them all until the end.
|
void X509Certificate::GetIssuerCert(const FunctionCallbackInfo<Value>& args) {
X509Certificate* cert;
ASSIGN_OR_RETURN_UNWRAP(&cert, args.Holder());
if (cert->issuer_cert_)
args.GetReturnValue().Set(cert->issuer_cert_->object());
}
| 0 |
[
"CWE-295"
] |
node
|
466e5415a2b7b3574ab5403acb87e89a94a980d1
| 170,920,178,649,592,080,000,000,000,000,000,000,000 | 6 |
crypto,tls: implement safe x509 GeneralName format
This change introduces JSON-compatible escaping rules for strings that
include X.509 GeneralName components (see RFC 5280). This non-standard
format avoids ambiguities and prevents injection attacks that could
previously lead to X.509 certificates being accepted even though they
were not valid for the target hostname.
These changes affect the format of subject alternative names and the
format of authority information access. The checkServerIdentity function
has been modified to safely handle the new format, eliminating the
possibility of injecting subject alternative names into the verification
logic.
Because each subject alternative name is only encoded as a JSON string
literal if necessary for security purposes, this change will only be
visible in rare cases.
This addresses CVE-2021-44532.
CVE-ID: CVE-2021-44532
PR-URL: https://github.com/nodejs-private/node-private/pull/300
Reviewed-By: Michael Dawson <[email protected]>
Reviewed-By: Rich Trott <[email protected]>
|
ftp_exec(ftpbuf_t *ftp, const char *cmd)
{
if (ftp == NULL) {
return 0;
}
if (!ftp_putcmd(ftp, "SITE EXEC", cmd)) {
return 0;
}
if (!ftp_getresp(ftp) || ftp->resp != 200) {
return 0;
}
return 1;
}
| 0 |
[
"CWE-189"
] |
php-src
|
ac2832935435556dc593784cd0087b5e576bbe4d
| 271,658,191,796,936,880,000,000,000,000,000,000,000 | 14 |
Fix bug #69545 - avoid overflow when reading list
|
QPDF::getRoot()
{
QPDFObjectHandle root = this->m->trailer.getKey("/Root");
if (! root.isDictionary())
{
throw QPDFExc(qpdf_e_damaged_pdf, this->m->file->getName(),
"", this->m->file->getLastOffset(),
"unable to find /Root dictionary");
}
return root;
}
| 0 |
[
"CWE-125"
] |
qpdf
|
1868a10f8b06631362618bfc85ca8646da4b4b71
| 83,565,039,780,894,575,000,000,000,000,000,000,000 | 11 |
Replace all atoi calls with QUtil::string_to_int
The latter catches underflow/overflow.
|
dwg_bmp (const Dwg_Data *restrict dwg, BITCODE_RL *restrict size)
{
BITCODE_RC i, num_pictures, type;
int found;
BITCODE_RL header_size, address, osize;
Bit_Chain dat = { 0 };
loglevel = dwg->opts & DWG_OPTS_LOGLEVEL;
*size = 0;
assert (dwg);
// copy the chain data. bit_* needs a full chain with opts and version
dat = *(Bit_Chain *)&dwg->thumbnail;
if (!dat.size || !dat.chain)
{
LOG_INFO ("no THUMBNAIL Image Data\n")
return NULL;
}
//dat.byte = 0; sentinel at 16
dat.bit = 0;
dat.opts = dwg->opts;
dat.from_version = dwg->header.from_version;
dat.version = dwg->header.version;
dat.fh = NULL;
#ifdef USE_TRACING
/* Before starting, set the logging level, but only do so once. */
if (!env_var_checked_p)
{
char *probe = getenv ("LIBREDWG_TRACE");
if (probe)
loglevel = atoi (probe);
env_var_checked_p = true;
}
#endif /* USE_TRACING */
osize = bit_read_RL (&dat); /* overall size of all images */
LOG_TRACE ("overall size: " FORMAT_RL " [RL]\n", osize);
if (osize > (dat.size - 4))
{
LOG_ERROR ("Preview overflow > %ld", dat.size - 4);
return NULL;
}
num_pictures = bit_read_RC (&dat);
LOG_INFO ("num_pictures: %d [RC]\n", (int)num_pictures)
found = 0;
header_size = 0;
for (i = 0; i < num_pictures; i++)
{
if (dat.byte > dat.size)
{
LOG_ERROR ("Preview overflow");
break;
}
type = bit_read_RC (&dat);
LOG_TRACE ("\t[%i] Code: %i [RC]\n", i, type)
address = bit_read_RL (&dat);
LOG_TRACE ("\t\tHeader data start: 0x%x [RL]\n", address)
if (type == 1)
{
header_size += bit_read_RL (&dat);
LOG_TRACE ("\t\tHeader data size: %i [RL]\n", header_size)
}
else if (type == 2 && found == 0)
{
*size = bit_read_RL (&dat);
found = 1;
LOG_INFO ("\t\tBMP size: %i [RL]\n", *size)
if (*size > (dat.size - 4))
{
LOG_ERROR ("BMP thumbnail overflow > %ld", dat.size - 4);
return NULL;
}
}
else if (type == 3)
{
osize = bit_read_RL (&dat);
LOG_INFO ("\t\tWMF size: %i [RL]\n", osize)
}
else if (type == 4) // type 4?
{
osize = bit_read_RL (&dat);
LOG_INFO ("\t\tPNG size: %i [RL]\n", osize)
}
else
{
osize = bit_read_RL (&dat);
LOG_TRACE ("\t\tSize of unknown type %i: %i [RL]\n", type, osize)
}
}
dat.byte += header_size;
if (*size)
LOG_TRACE ("BMP offset: %lu\n", dat.byte);
if (dat.byte > dat.size)
{
*size = 0;
LOG_ERROR ("Preview overflow");
return NULL;
}
if (*size > 0)
return (dat.chain + dat.byte);
else
return NULL;
}
| 0 |
[
"CWE-787"
] |
libredwg
|
ecf5183d8b3b286afe2a30021353b7116e0208dd
| 286,243,429,466,409,560,000,000,000,000,000,000,000 | 105 |
dwg_section_wtype: fix fuzzing overflow
with illegal and overlong section names. Fixes GH #349, #352
section names cannot be longer than 24
|
int curl_formget(struct curl_httppost *form, void *arg,
curl_formget_callback append)
{
CURLcode result;
curl_off_t size;
struct FormData *data, *ptr;
result = Curl_getformdata(NULL, &data, form, NULL, &size);
if(result)
return (int)result;
for(ptr = data; ptr; ptr = ptr->next) {
if((ptr->type == FORM_FILE) || (ptr->type == FORM_CALLBACK)) {
char buffer[8192];
size_t nread;
struct Form temp;
Curl_FormInit(&temp, ptr);
do {
nread = readfromfile(&temp, buffer, sizeof(buffer));
if((nread == (size_t) -1) ||
(nread > sizeof(buffer)) ||
(nread != append(arg, buffer, nread))) {
if(temp.fp)
fclose(temp.fp);
Curl_formclean(&data);
return -1;
}
} while(nread);
}
else {
if(ptr->length != append(arg, ptr->line, ptr->length)) {
Curl_formclean(&data);
return -1;
}
}
}
Curl_formclean(&data);
return 0;
}
| 0 |
[
"CWE-200"
] |
curl
|
b3875606925536f82fc61f3114ac42f29eaf6945
| 238,806,636,081,564,560,000,000,000,000,000,000,000 | 41 |
curl_easy_duphandle: CURLOPT_COPYPOSTFIELDS read out of bounds
When duplicating a handle, the data to post was duplicated using
strdup() when it could be binary and contain zeroes and it was not even
zero terminated! This caused read out of bounds crashes/segfaults.
Since the lib/strdup.c file no longer is easily shared with the curl
tool with this change, it now uses its own version instead.
Bug: http://curl.haxx.se/docs/adv_20141105.html
CVE: CVE-2014-3707
Reported-By: Symeon Paraschoudis
|
SPL_METHOD(FilesystemIterator, current)
{
spl_filesystem_object *intern = Z_SPLFILESYSTEM_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_PATHNAME)) {
spl_filesystem_object_get_file_name(intern);
RETURN_STRINGL(intern->file_name, intern->file_name_len);
} else if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_FILEINFO)) {
spl_filesystem_object_get_file_name(intern);
spl_filesystem_object_create_type(0, intern, SPL_FS_INFO, NULL, return_value);
} else {
ZVAL_OBJ(return_value, Z_OBJ_P(getThis()));
Z_ADDREF_P(return_value);
/*RETURN_STRING(intern->u.dir.entry.d_name, 1);*/
}
}
| 0 |
[
"CWE-74"
] |
php-src
|
a5a15965da23c8e97657278fc8dfbf1dfb20c016
| 105,039,416,605,996,550,000,000,000,000,000,000,000 | 20 |
Fix #78863: DirectoryIterator class silently truncates after a null byte
Since the constructor of DirectoryIterator and friends is supposed to
accepts paths (i.e. strings without NUL bytes), we must not accept
arbitrary strings.
|
void addDbAdminAnyDbPrivileges(PrivilegeVector* privileges) {
Privilege::addPrivilegeToPrivilegeVector(
privileges, Privilege(ResourcePattern::forClusterResource(), ActionType::listDatabases));
Privilege::addPrivilegeToPrivilegeVector(
privileges, Privilege(ResourcePattern::forAnyNormalResource(), dbAdminRoleActions));
ActionSet profileActions = readRoleActions;
profileActions.addAction(ActionType::convertToCapped);
profileActions.addAction(ActionType::createCollection);
profileActions.addAction(ActionType::dropCollection);
Privilege::addPrivilegeToPrivilegeVector(
privileges,
Privilege(ResourcePattern::forCollectionName("system.profile"), profileActions));
}
| 1 |
[
"CWE-20"
] |
mongo
|
865eccaf35aca29d1b71764d50227cdf853752d0
| 212,104,096,568,930,770,000,000,000,000,000,000,000 | 13 |
SERVER-36263 Bypassing operation validation in applyOps should require special privilege
|
static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
{
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
if (c->page)
flush_slab(s, c);
unfreeze_partials(s, c);
}
| 0 |
[] |
linux
|
fd4d9c7d0c71866ec0c2825189ebd2ce35bd95b8
| 189,831,734,059,899,170,000,000,000,000,000,000,000 | 9 |
mm: slub: add missing TID bump in kmem_cache_alloc_bulk()
When kmem_cache_alloc_bulk() attempts to allocate N objects from a percpu
freelist of length M, and N > M > 0, it will first remove the M elements
from the percpu freelist, then call ___slab_alloc() to allocate the next
element and repopulate the percpu freelist. ___slab_alloc() can re-enable
IRQs via allocate_slab(), so the TID must be bumped before ___slab_alloc()
to properly commit the freelist head change.
Fix it by unconditionally bumping c->tid when entering the slowpath.
Cc: [email protected]
Fixes: ebe909e0fdb3 ("slub: improve bulk alloc strategy")
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename, LOCKMODE lockmode)
{
Oid tablespaceId;
/* Check that the tablespace exists */
tablespaceId = get_tablespace_oid(tablespacename, false);
/* Check permissions except when moving to database's default */
if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
{
AclResult aclresult;
aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_TABLESPACE, tablespacename);
}
/* Save info for Phase 3 to do the real work */
if (OidIsValid(tab->newTableSpace))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("cannot have multiple SET TABLESPACE subcommands")));
tab->newTableSpace = tablespaceId;
}
| 0 |
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
| 60,171,508,079,599,620,000,000,000,000,000,000,000 | 25 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
|
void ndpi_process_extra_packet(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow,
const unsigned char *packet, const unsigned short packetlen,
const u_int64_t current_tick_l, struct ndpi_id_struct *src, struct ndpi_id_struct *dst) {
if(flow == NULL)
return;
if(flow->server_id == NULL)
flow->server_id = dst; /* Default */
/* need at least 20 bytes for ip header */
if(packetlen < 20) {
return;
}
flow->packet.tick_timestamp_l = current_tick_l;
flow->packet.tick_timestamp = (u_int32_t)(current_tick_l / ndpi_str->ticks_per_second);
/* parse packet */
flow->packet.iph = (struct ndpi_iphdr *) packet;
/* we are interested in ipv4 packet */
/* set up the packet headers for the extra packet function to use if it wants */
if(ndpi_init_packet_header(ndpi_str, flow, packetlen) != 0)
return;
/* detect traffic for tcp or udp only */
flow->src = src, flow->dst = dst;
ndpi_connection_tracking(ndpi_str, flow);
/* call the extra packet function (which may add more data/info to flow) */
if(flow->extra_packets_func) {
if((flow->extra_packets_func(ndpi_str, flow)) == 0)
flow->check_extra_packets = 0;
if(++flow->num_extra_packets_checked == flow->max_extra_packets_to_check)
flow->extra_packets_func = NULL; /* Enough packets detected */
}
}
| 0 |
[
"CWE-125"
] |
nDPI
|
61066fb106efa6d3d95b67e47b662de208b2b622
| 324,161,332,207,234,450,000,000,000,000,000,000,000 | 38 |
Added check for heap buffer overflow read
|
static void vrend_pipe_resource_attach_iov(struct pipe_resource *pres,
const struct iovec *iov,
int iov_count,
UNUSED void *data)
{
struct vrend_resource *res = (struct vrend_resource *)pres;
res->iov = iov;
res->num_iovs = iov_count;
if (has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY)) {
vrend_write_to_iovec(res->iov, res->num_iovs, 0,
res->ptr, res->base.width0);
}
}
| 0 |
[
"CWE-787"
] |
virglrenderer
|
95e581fd181b213c2ed7cdc63f2abc03eaaa77ec
| 110,158,708,554,711,760,000,000,000,000,000,000,000 | 15 |
vrend: Add test to resource OOB write and fix it
v2: Also check that no depth != 1 has been send when none is due
Closes: #250
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Chia-I Wu <[email protected]>
|
static const struct nft_expr_type *__nft_expr_type_get(u8 family,
struct nlattr *nla)
{
const struct nft_expr_type *type;
list_for_each_entry(type, &nf_tables_expressions, list) {
if (!nla_strcmp(nla, type->name) &&
(!type->family || type->family == family))
return type;
}
return NULL;
}
| 0 |
[
"CWE-19"
] |
nf
|
a2f18db0c68fec96631c10cad9384c196e9008ac
| 261,267,948,734,725,970,000,000,000,000,000,000,000 | 12 |
netfilter: nf_tables: fix flush ruleset chain dependencies
Jumping between chains doesn't mix well with flush ruleset. Rules
from a different chain and set elements may still refer to us.
[ 353.373791] ------------[ cut here ]------------
[ 353.373845] kernel BUG at net/netfilter/nf_tables_api.c:1159!
[ 353.373896] invalid opcode: 0000 [#1] SMP
[ 353.373942] Modules linked in: intel_powerclamp uas iwldvm iwlwifi
[ 353.374017] CPU: 0 PID: 6445 Comm: 31c3.nft Not tainted 3.18.0 #98
[ 353.374069] Hardware name: LENOVO 5129CTO/5129CTO, BIOS 6QET47WW (1.17 ) 07/14/2010
[...]
[ 353.375018] Call Trace:
[ 353.375046] [<ffffffff81964c31>] ? nf_tables_commit+0x381/0x540
[ 353.375101] [<ffffffff81949118>] nfnetlink_rcv+0x3d8/0x4b0
[ 353.375150] [<ffffffff81943fc5>] netlink_unicast+0x105/0x1a0
[ 353.375200] [<ffffffff8194438e>] netlink_sendmsg+0x32e/0x790
[ 353.375253] [<ffffffff818f398e>] sock_sendmsg+0x8e/0xc0
[ 353.375300] [<ffffffff818f36b9>] ? move_addr_to_kernel.part.20+0x19/0x70
[ 353.375357] [<ffffffff818f44f9>] ? move_addr_to_kernel+0x19/0x30
[ 353.375410] [<ffffffff819016d2>] ? verify_iovec+0x42/0xd0
[ 353.375459] [<ffffffff818f3e10>] ___sys_sendmsg+0x3f0/0x400
[ 353.375510] [<ffffffff810615fa>] ? native_sched_clock+0x2a/0x90
[ 353.375563] [<ffffffff81176697>] ? acct_account_cputime+0x17/0x20
[ 353.375616] [<ffffffff8110dc78>] ? account_user_time+0x88/0xa0
[ 353.375667] [<ffffffff818f4bbd>] __sys_sendmsg+0x3d/0x80
[ 353.375719] [<ffffffff81b184f4>] ? int_check_syscall_exit_work+0x34/0x3d
[ 353.375776] [<ffffffff818f4c0d>] SyS_sendmsg+0xd/0x20
[ 353.375823] [<ffffffff81b1826d>] system_call_fastpath+0x16/0x1b
Release objects in this order: rules -> sets -> chains -> tables, to
make sure no references to chains are held anymore.
Reported-by: Asbjoern Sloth Toennesen <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
void test_nghttp2_session_is_my_stream_id(void) {
nghttp2_session *session;
nghttp2_session_callbacks callbacks;
memset(&callbacks, 0, sizeof(nghttp2_session_callbacks));
nghttp2_session_server_new(&session, &callbacks, NULL);
CU_ASSERT(0 == nghttp2_session_is_my_stream_id(session, 0));
CU_ASSERT(0 == nghttp2_session_is_my_stream_id(session, 1));
CU_ASSERT(1 == nghttp2_session_is_my_stream_id(session, 2));
nghttp2_session_del(session);
nghttp2_session_client_new(&session, &callbacks, NULL);
CU_ASSERT(0 == nghttp2_session_is_my_stream_id(session, 0));
CU_ASSERT(1 == nghttp2_session_is_my_stream_id(session, 1));
CU_ASSERT(0 == nghttp2_session_is_my_stream_id(session, 2));
nghttp2_session_del(session);
}
| 0 |
[] |
nghttp2
|
0a6ce87c22c69438ecbffe52a2859c3a32f1620f
| 322,951,726,796,051,630,000,000,000,000,000,000,000 | 20 |
Add nghttp2_option_set_max_outbound_ack
|
bool Binary::has_encryption_info() const {
return has_command<EncryptionInfo>();
}
| 0 |
[
"CWE-703"
] |
LIEF
|
7acf0bc4224081d4f425fcc8b2e361b95291d878
| 72,675,717,517,399,885,000,000,000,000,000,000,000 | 3 |
Resolve #764
|
Error Box_hdlr::parse(BitstreamRange& range)
{
parse_full_box_header(range);
m_pre_defined = range.read32();
m_handler_type = range.read32();
for (int i=0;i<3;i++) {
m_reserved[i] = range.read32();
}
m_name = range.read_string();
return range.get_error();
}
| 0 |
[
"CWE-703"
] |
libheif
|
2710c930918609caaf0a664e9c7bc3dce05d5b58
| 200,913,358,089,732,000,000,000,000,000,000,000,000 | 15 |
force fraction to a limited resolution to finally solve those pesky numerical edge cases
|
static void __init of_unittest_find_node_by_name(void)
{
struct device_node *np;
const char *options, *name;
np = of_find_node_by_path("/testcase-data");
name = kasprintf(GFP_KERNEL, "%pOF", np);
unittest(np && !strcmp("/testcase-data", name),
"find /testcase-data failed\n");
of_node_put(np);
kfree(name);
/* Test if trailing '/' works */
np = of_find_node_by_path("/testcase-data/");
unittest(!np, "trailing '/' on /testcase-data/ should fail\n");
np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
name = kasprintf(GFP_KERNEL, "%pOF", np);
unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
"find /testcase-data/phandle-tests/consumer-a failed\n");
of_node_put(np);
kfree(name);
np = of_find_node_by_path("testcase-alias");
name = kasprintf(GFP_KERNEL, "%pOF", np);
unittest(np && !strcmp("/testcase-data", name),
"find testcase-alias failed\n");
of_node_put(np);
kfree(name);
/* Test if trailing '/' works on aliases */
np = of_find_node_by_path("testcase-alias/");
unittest(!np, "trailing '/' on testcase-alias/ should fail\n");
np = of_find_node_by_path("testcase-alias/phandle-tests/consumer-a");
name = kasprintf(GFP_KERNEL, "%pOF", np);
unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
"find testcase-alias/phandle-tests/consumer-a failed\n");
of_node_put(np);
kfree(name);
np = of_find_node_by_path("/testcase-data/missing-path");
unittest(!np, "non-existent path returned node %pOF\n", np);
of_node_put(np);
np = of_find_node_by_path("missing-alias");
unittest(!np, "non-existent alias returned node %pOF\n", np);
of_node_put(np);
np = of_find_node_by_path("testcase-alias/missing-path");
unittest(!np, "non-existent alias with relative path returned node %pOF\n", np);
of_node_put(np);
np = of_find_node_opts_by_path("/testcase-data:testoption", &options);
unittest(np && !strcmp("testoption", options),
"option path test failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("/testcase-data:test/option", &options);
unittest(np && !strcmp("test/option", options),
"option path test, subcase #1 failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("/testcase-data/testcase-device1:test/option", &options);
unittest(np && !strcmp("test/option", options),
"option path test, subcase #2 failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("/testcase-data:testoption", NULL);
unittest(np, "NULL option path test failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("testcase-alias:testaliasoption",
&options);
unittest(np && !strcmp("testaliasoption", options),
"option alias path test failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("testcase-alias:test/alias/option",
&options);
unittest(np && !strcmp("test/alias/option", options),
"option alias path test, subcase #1 failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL);
unittest(np, "NULL option alias path test failed\n");
of_node_put(np);
options = "testoption";
np = of_find_node_opts_by_path("testcase-alias", &options);
unittest(np && !options, "option clearing test failed\n");
of_node_put(np);
options = "testoption";
np = of_find_node_opts_by_path("/", &options);
unittest(np && !options, "option clearing root node test failed\n");
of_node_put(np);
}
| 0 |
[
"CWE-401"
] |
linux
|
e13de8fe0d6a51341671bbe384826d527afe8d44
| 269,748,048,192,916,800,000,000,000,000,000,000,000 | 98 |
of: unittest: fix memory leak in unittest_data_add
In unittest_data_add, a copy buffer is created via kmemdup. This buffer
is leaked if of_fdt_unflatten_tree fails. The release for the
unittest_data buffer is added.
Fixes: b951f9dc7f25 ("Enabling OF selftest to run without machine's devicetree")
Signed-off-by: Navid Emamdoost <[email protected]>
Reviewed-by: Frank Rowand <[email protected]>
Signed-off-by: Rob Herring <[email protected]>
|
static apr_status_t session_identity_decode(request_rec * r, session_rec * z)
{
char *last = NULL;
char *encoded, *pair;
const char *sep = "&";
/* sanity check - anything to decode? */
if (!z->encoded) {
return OK;
}
/* decode what we have */
encoded = apr_pstrdup(r->pool, z->encoded);
pair = apr_strtok(encoded, sep, &last);
while (pair && pair[0]) {
char *plast = NULL;
const char *psep = "=";
char *key = apr_strtok(pair, psep, &plast);
if (key && *key) {
char *val = apr_strtok(NULL, sep, &plast);
if (!val || !*val) {
apr_table_unset(z->entries, key);
}
else if (!ap_unescape_urlencoded(key) && !ap_unescape_urlencoded(val)) {
if (!strcmp(SESSION_EXPIRY, key)) {
z->expiry = (apr_time_t) apr_atoi64(val);
}
else {
apr_table_set(z->entries, key, val);
}
}
}
pair = apr_strtok(NULL, sep, &last);
}
z->encoded = NULL;
return OK;
}
| 0 |
[
"CWE-476"
] |
httpd
|
67bd9bfe6c38831e14fe7122f1d84391472498f8
| 210,721,734,400,095,360,000,000,000,000,000,000,000 | 39 |
mod_session: save one apr_strtok() in session_identity_decode().
When the encoding is invalid (missing '='), no need to parse further.
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1887050 13f79535-47bb-0310-9956-ffa450edef68
|
do_one_arg(char_u *str)
{
char_u *p;
int inbacktick;
inbacktick = FALSE;
for (p = str; *str; ++str)
{
// When the backslash is used for escaping the special meaning of a
// character we need to keep it until wildcard expansion.
if (rem_backslash(str))
{
*p++ = *str++;
*p++ = *str;
}
else
{
// An item ends at a space not in backticks
if (!inbacktick && vim_isspace(*str))
break;
if (*str == '`')
inbacktick ^= TRUE;
*p++ = *str;
}
}
str = skipwhite(str);
*p = NUL;
return str;
}
| 0 |
[
"CWE-416",
"CWE-125"
] |
vim
|
6f98371532fcff911b462d51bc64f2ce8a6ae682
| 87,606,268,886,134,080,000,000,000,000,000,000,000 | 30 |
patch 8.2.3884: crash when clearing the argument list while using it
Problem: Crash when clearing the argument list while using it.
Solution: Lock the argument list for ":all".
|
qemuProcessPrepareSEVGuestInput(virDomainObjPtr vm)
{
virDomainSEVDefPtr sev = vm->def->sev;
if (!sev)
return 0;
VIR_DEBUG("Preparing SEV guest");
if (sev->dh_cert) {
if (qemuProcessSEVCreateFile(vm, "dh_cert", sev->dh_cert) < 0)
return -1;
}
if (sev->session) {
if (qemuProcessSEVCreateFile(vm, "session", sev->session) < 0)
return -1;
}
return 0;
}
| 0 |
[
"CWE-416"
] |
libvirt
|
1ac703a7d0789e46833f4013a3876c2e3af18ec7
| 129,067,543,907,980,150,000,000,000,000,000,000,000 | 21 |
qemu: Add missing lock in qemuProcessHandleMonitorEOF
qemuMonitorUnregister will be called in multiple threads (e.g. threads
in rpc worker pool and the vm event thread). In some cases, it isn't
protected by the monitor lock, which may lead to call g_source_unref
more than one time and a use-after-free problem eventually.
Add the missing lock in qemuProcessHandleMonitorEOF (which is the only
position missing lock of monitor I found).
Suggested-by: Michal Privoznik <[email protected]>
Signed-off-by: Peng Liang <[email protected]>
Signed-off-by: Michal Privoznik <[email protected]>
Reviewed-by: Michal Privoznik <[email protected]>
|
static int __init deferred_probe_timeout_setup(char *str)
{
int timeout;
if (!kstrtoint(str, 10, &timeout))
driver_deferred_probe_timeout = timeout;
return 1;
}
| 0 |
[
"CWE-787"
] |
linux
|
aa838896d87af561a33ecefea1caa4c15a68bc47
| 195,712,348,479,886,000,000,000,000,000,000,000,000 | 8 |
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <[email protected]>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static ut64 resolve_symbols_off(RDyldCache *cache, ut64 pa) {
struct MACH0_(mach_header) mh;
if (r_buf_fread_at (cache->buf, pa, (ut8*) &mh, "8i", 1) != sizeof (struct MACH0_(mach_header))) {
return 0;
}
if (mh.magic != MH_MAGIC_64 || mh.sizeofcmds == 0) {
return 0;
}
ut64 cmds_at = pa + sizeof (struct MACH0_(mach_header));
ut64 cursor = cmds_at;
ut64 end = cursor + mh.sizeofcmds;
while (cursor < end) {
ut32 cmd = r_buf_read_le32_at (cache->buf, cursor);
if (cmd == UT32_MAX) {
return 0;
}
ut32 cmdsize = r_buf_read_le32_at (cache->buf, cursor + sizeof (ut32));
if (cmdsize == UT32_MAX) {
return 0;
}
if (cmd == LC_SEGMENT || cmd == LC_SEGMENT_64) {
char segname[17];
segname[16] = 0;
if (r_buf_read_at (cache->buf, cursor + 2 * sizeof (ut32), (ut8 *)segname, 16) != 16) {
return 0;
}
if (!strncmp (segname, "__LINKEDIT", 16)) {
ut64 vmaddr = r_buf_read_le64_at (cache->buf, cursor + 2 * sizeof (ut32) + 16);
if (vmaddr == UT64_MAX) {
return 0;
}
ut32 i,j;
for (i = 0; i < cache->n_hdr; i++) {
cache_hdr_t *hdr = &cache->hdr[i];
ut64 hdr_offset = cache->hdr_offset[i];
ut32 maps_index = cache->maps_index[i];
for (j = 0; j < hdr->mappingCount; j++) {
ut64 map_start = cache->maps[maps_index + j].address;
ut64 map_end = map_start + cache->maps[maps_index + j].size;
if (vmaddr >= map_start && vmaddr < map_end) {
return hdr_offset;
}
}
}
}
}
cursor += cmdsize;
}
return 0;
}
| 0 |
[
"CWE-787"
] |
radare2
|
c84b7232626badd075caf3ae29661b609164bac6
| 37,100,242,247,173,953,000,000,000,000,000,000,000 | 51 |
Fix heap buffer overflow in dyldcache parser ##crash
* Reported by: Lazymio via huntr.dev
* Reproducer: dyldovf
|
node_is_empty (xmlNodePtr node)
{
if (node == NULL)
return TRUE;
if (node->type == XML_TEXT_NODE)
return node->content == NULL || node->content[0] == '\0';
return node->children == NULL;
}
| 0 |
[] |
gvfs
|
f81ff2108ab3b6e370f20dcadd8708d23f499184
| 64,837,298,483,289,120,000,000,000,000,000,000,000 | 10 |
dav: don't unescape the uri twice
path_equal tries to unescape path before comparing. Unfortunately
this function is used also for already unescaped paths. Therefore
unescaping can fail. This commit reverts changes which was done in
commit 50af53d and unescape just uris, which aren't unescaped yet.
https://bugzilla.gnome.org/show_bug.cgi?id=743298
|
int __ip6_local_out(struct sk_buff *skb)
{
int len;
len = skb->len - sizeof(struct ipv6hdr);
if (len > IPV6_MAXPLEN)
len = 0;
ipv6_hdr(skb)->payload_len = htons(len);
return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
skb_dst(skb)->dev, dst_output);
}
| 0 |
[
"CWE-399"
] |
linux
|
75a493e60ac4bbe2e977e7129d6d8cbb0dd236be
| 216,634,090,562,411,550,000,000,000,000,000,000,000 | 12 |
ipv6: ip6_append_data_mtu did not care about pmtudisc and frag_size
If the socket had an IPV6_MTU value set, ip6_append_data_mtu lost track
of this when appending the second frame on a corked socket. This results
in the following splat:
[37598.993962] ------------[ cut here ]------------
[37598.994008] kernel BUG at net/core/skbuff.c:2064!
[37598.994008] invalid opcode: 0000 [#1] SMP
[37598.994008] Modules linked in: tcp_lp uvcvideo videobuf2_vmalloc videobuf2_memops videobuf2_core videodev media vfat fat usb_storage fuse ebtable_nat xt_CHECKSUM bridge stp llc ipt_MASQUERADE nf_conntrack_netbios_ns nf_conntrack_broadcast ip6table_mangle ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6 iptable_nat
+nf_nat_ipv4 nf_nat iptable_mangle nf_conntrack_ipv4 nf_defrag_ipv4 xt_conntrack nf_conntrack ebtable_filter ebtables ip6table_filter ip6_tables be2iscsi iscsi_boot_sysfs bnx2i cnic uio cxgb4i cxgb4 cxgb3i cxgb3 mdio libcxgbi ib_iser rdma_cm ib_addr iw_cm ib_cm ib_sa ib_mad ib_core iscsi_tcp libiscsi_tcp libiscsi
+scsi_transport_iscsi rfcomm bnep iTCO_wdt iTCO_vendor_support snd_hda_codec_conexant arc4 iwldvm mac80211 snd_hda_intel acpi_cpufreq mperf coretemp snd_hda_codec microcode cdc_wdm cdc_acm
[37598.994008] snd_hwdep cdc_ether snd_seq snd_seq_device usbnet mii joydev btusb snd_pcm bluetooth i2c_i801 e1000e lpc_ich mfd_core ptp iwlwifi pps_core snd_page_alloc mei cfg80211 snd_timer thinkpad_acpi snd tpm_tis soundcore rfkill tpm tpm_bios vhost_net tun macvtap macvlan kvm_intel kvm uinput binfmt_misc
+dm_crypt i915 i2c_algo_bit drm_kms_helper drm i2c_core wmi video
[37598.994008] CPU 0
[37598.994008] Pid: 27320, comm: t2 Not tainted 3.9.6-200.fc18.x86_64 #1 LENOVO 27744PG/27744PG
[37598.994008] RIP: 0010:[<ffffffff815443a5>] [<ffffffff815443a5>] skb_copy_and_csum_bits+0x325/0x330
[37598.994008] RSP: 0018:ffff88003670da18 EFLAGS: 00010202
[37598.994008] RAX: ffff88018105c018 RBX: 0000000000000004 RCX: 00000000000006c0
[37598.994008] RDX: ffff88018105a6c0 RSI: ffff88018105a000 RDI: ffff8801e1b0aa00
[37598.994008] RBP: ffff88003670da78 R08: 0000000000000000 R09: ffff88018105c040
[37598.994008] R10: ffff8801e1b0aa00 R11: 0000000000000000 R12: 000000000000fff8
[37598.994008] R13: 00000000000004fc R14: 00000000ffff0504 R15: 0000000000000000
[37598.994008] FS: 00007f28eea59740(0000) GS:ffff88023bc00000(0000) knlGS:0000000000000000
[37598.994008] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[37598.994008] CR2: 0000003d935789e0 CR3: 00000000365cb000 CR4: 00000000000407f0
[37598.994008] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[37598.994008] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
[37598.994008] Process t2 (pid: 27320, threadinfo ffff88003670c000, task ffff88022c162ee0)
[37598.994008] Stack:
[37598.994008] ffff88022e098a00 ffff88020f973fc0 0000000000000008 00000000000004c8
[37598.994008] ffff88020f973fc0 00000000000004c4 ffff88003670da78 ffff8801e1b0a200
[37598.994008] 0000000000000018 00000000000004c8 ffff88020f973fc0 00000000000004c4
[37598.994008] Call Trace:
[37598.994008] [<ffffffff815fc21f>] ip6_append_data+0xccf/0xfe0
[37598.994008] [<ffffffff8158d9f0>] ? ip_copy_metadata+0x1a0/0x1a0
[37598.994008] [<ffffffff81661f66>] ? _raw_spin_lock_bh+0x16/0x40
[37598.994008] [<ffffffff8161548d>] udpv6_sendmsg+0x1ed/0xc10
[37598.994008] [<ffffffff812a2845>] ? sock_has_perm+0x75/0x90
[37598.994008] [<ffffffff815c3693>] inet_sendmsg+0x63/0xb0
[37598.994008] [<ffffffff812a2973>] ? selinux_socket_sendmsg+0x23/0x30
[37598.994008] [<ffffffff8153a450>] sock_sendmsg+0xb0/0xe0
[37598.994008] [<ffffffff810135d1>] ? __switch_to+0x181/0x4a0
[37598.994008] [<ffffffff8153d97d>] sys_sendto+0x12d/0x180
[37598.994008] [<ffffffff810dfb64>] ? __audit_syscall_entry+0x94/0xf0
[37598.994008] [<ffffffff81020ed1>] ? syscall_trace_enter+0x231/0x240
[37598.994008] [<ffffffff8166a7e7>] tracesys+0xdd/0xe2
[37598.994008] Code: fe 07 00 00 48 c7 c7 04 28 a6 81 89 45 a0 4c 89 4d b8 44 89 5d a8 e8 1b ac b1 ff 44 8b 5d a8 4c 8b 4d b8 8b 45 a0 e9 cf fe ff ff <0f> 0b 66 0f 1f 84 00 00 00 00 00 66 66 66 66 90 55 48 89 e5 48
[37598.994008] RIP [<ffffffff815443a5>] skb_copy_and_csum_bits+0x325/0x330
[37598.994008] RSP <ffff88003670da18>
[37599.007323] ---[ end trace d69f6a17f8ac8eee ]---
While there, also check if path mtu discovery is activated for this
socket. The logic was adapted from ip6_append_data when first writing
on the corked socket.
This bug was introduced with commit
0c1833797a5a6ec23ea9261d979aa18078720b74 ("ipv6: fix incorrect ipsec
fragment").
v2:
a) Replace IPV6_PMTU_DISC_DO with IPV6_PMTUDISC_PROBE.
b) Don't pass ipv6_pinfo to ip6_append_data_mtu (suggestion by Gao
feng, thanks!).
c) Change mtu to unsigned int, else we get a warning about
non-matching types because of the min()-macro type-check.
Acked-by: Gao feng <[email protected]>
Cc: YOSHIFUJI Hideaki <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static FloppyDriveType get_fallback_drive_type(FDrive *drv)
{
return drv->fdctrl->fallback;
}
| 0 |
[
"CWE-787"
] |
qemu
|
defac5e2fbddf8423a354ff0454283a2115e1367
| 22,906,311,723,668,780,000,000,000,000,000,000,000 | 4 |
hw/block/fdc: Prevent end-of-track overrun (CVE-2021-3507)
Per the 82078 datasheet, if the end-of-track (EOT byte in
the FIFO) is more than the number of sectors per side, the
command is terminated unsuccessfully:
* 5.2.5 DATA TRANSFER TERMINATION
The 82078 supports terminal count explicitly through
the TC pin and implicitly through the underrun/over-
run and end-of-track (EOT) functions. For full sector
transfers, the EOT parameter can define the last
sector to be transferred in a single or multisector
transfer. If the last sector to be transferred is a par-
tial sector, the host can stop transferring the data in
mid-sector, and the 82078 will continue to complete
the sector as if a hardware TC was received. The
only difference between these implicit functions and
TC is that they return "abnormal termination" result
status. Such status indications can be ignored if they
were expected.
* 6.1.3 READ TRACK
This command terminates when the EOT specified
number of sectors have been read. If the 82078
does not find an I D Address Mark on the diskette
after the second· occurrence of a pulse on the
INDX# pin, then it sets the IC code in Status Regis-
ter 0 to "01" (Abnormal termination), sets the MA bit
in Status Register 1 to "1", and terminates the com-
mand.
* 6.1.6 VERIFY
Refer to Table 6-6 and Table 6-7 for information
concerning the values of MT and EC versus SC and
EOT value.
* Table 6·6. Result Phase Table
* Table 6-7. Verify Command Result Phase Table
Fix by aborting the transfer when EOT > # Sectors Per Side.
Cc: [email protected]
Cc: Hervé Poussineau <[email protected]>
Fixes: baca51faff0 ("floppy driver: disk geometry auto detect")
Reported-by: Alexander Bulekov <[email protected]>
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/339
Signed-off-by: Philippe Mathieu-Daudé <[email protected]>
Message-Id: <[email protected]>
Reviewed-by: Hanna Reitz <[email protected]>
Signed-off-by: Kevin Wolf <[email protected]>
|
njs_primitive_values_compare(njs_vm_t *vm, njs_value_t *val1, njs_value_t *val2)
{
double num1, num2;
if (njs_fast_path(njs_is_numeric(val1))) {
num1 = njs_number(val1);
if (njs_fast_path(njs_is_numeric(val2))) {
num2 = njs_number(val2);
} else {
num2 = njs_string_to_number(val2, 0);
}
} else if (njs_is_numeric(val2)) {
num1 = njs_string_to_number(val1, 0);
num2 = njs_number(val2);
} else {
return (njs_string_cmp(val1, val2) < 0) ? 1 : 0;
}
/* NaN and void values are not comparable with anything. */
if (isnan(num1) || isnan(num2)) {
return -1;
}
/* Infinities are handled correctly by comparision. */
return (num1 < num2);
}
| 0 |
[
"CWE-703",
"CWE-754"
] |
njs
|
222d6fdcf0c6485ec8e175f3a7b70d650c234b4e
| 279,848,509,704,606,760,000,000,000,000,000,000,000 | 30 |
Fixed njs_vmcode_interpreter() when "toString" conversion fails.
Previously, while interpreting a user function, njs_vmcode_interpreter()
might return prematurely when an error happens. This is not correct
because the current frame has to be unwound (or exception caught)
first.
The fix is exit through only 5 appropriate exit points to ensure
proper unwinding.
This closes #467 issue on Github.
|
static void endElementHandler(void *userData, const char *name)
{
userdata_t *ud = (userdata_t *) userData;
if (strcmp(name, "graph") == 0) {
pop_subg();
popString(&ud->elements);
ud->closedElementType = TAG_GRAPH;
} else if (strcmp(name, "node") == 0) {
char *ele_name = topString(ud->elements);
if (ud->closedElementType == TAG_GRAPH) {
Agnode_t *node = agnode(root, ele_name, 0);
if (node) agdelete(root, node);
}
popString(&ud->elements);
Current_class = TAG_GRAPH;
N = 0;
ud->closedElementType = TAG_NODE;
} else if (strcmp(name, "edge") == 0) {
Current_class = TAG_GRAPH;
E = 0;
ud->closedElementType = TAG_EDGE;
ud->edgeinverted = FALSE;
} else if (strcmp(name, "attr") == 0) {
char *name;
char *value;
char buf[SMALLBUF] = GRAPHML_COMP;
char *dynbuf = 0;
ud->closedElementType = TAG_NONE;
if (ud->compositeReadState) {
int len = sizeof(GRAPHML_COMP) + agxblen(&ud->xml_attr_name);
if (len <= SMALLBUF) {
name = buf;
} else {
name = dynbuf = N_NEW(len, char);
strcpy(name, GRAPHML_COMP);
}
strcpy(name + sizeof(GRAPHML_COMP) - 1,
agxbuse(&ud->xml_attr_name));
value = agxbuse(&ud->composite_buffer);
agxbclear(&ud->xml_attr_value);
ud->compositeReadState = FALSE;
} else {
name = agxbuse(&ud->xml_attr_name);
value = agxbuse(&ud->xml_attr_value);
}
switch (ud->globalAttrType) {
case TAG_NONE:
setAttr(name, value, ud);
break;
case TAG_NODE:
setGlobalNodeAttr(G, name, value, ud);
break;
case TAG_EDGE:
setGlobalEdgeAttr(G, name, value, ud);
break;
case TAG_GRAPH:
setGraphAttr(G, name, value, ud);
break;
}
if (dynbuf)
free(dynbuf);
ud->globalAttrType = TAG_NONE;
}
}
| 0 |
[
"CWE-476"
] |
graphviz
|
839085f8026afd6f6920a0c31ad2a9d880d97932
| 179,188,310,060,533,100,000,000,000,000,000,000,000 | 67 |
attempted fix for null pointer deference on malformed input
|
int RGWPutObj_ObjStore_S3::get_encrypt_filter(
std::unique_ptr<rgw::putobj::DataProcessor> *filter,
rgw::putobj::DataProcessor *cb)
{
int res = 0;
if (!multipart_upload_id.empty()) {
RGWMPObj mp(s->object.name, multipart_upload_id);
rgw_obj obj;
obj.init_ns(s->bucket, mp.get_meta(), RGW_OBJ_NS_MULTIPART);
obj.set_in_extra_data(true);
map<string, bufferlist> xattrs;
res = get_obj_attrs(store, s, obj, xattrs);
if (res == 0) {
std::unique_ptr<BlockCrypt> block_crypt;
/* We are adding to existing object.
* We use crypto mode that configured as if we were decrypting. */
res = rgw_s3_prepare_decrypt(s, xattrs, &block_crypt, crypt_http_responses);
if (res == 0 && block_crypt != nullptr)
filter->reset(new RGWPutObj_BlockEncrypt(s->cct, cb, std::move(block_crypt)));
}
/* it is ok, to not have encryption at all */
}
else
{
std::unique_ptr<BlockCrypt> block_crypt;
res = rgw_s3_prepare_encrypt(s, attrs, nullptr, &block_crypt, crypt_http_responses);
if (res == 0 && block_crypt != nullptr) {
filter->reset(new RGWPutObj_BlockEncrypt(s->cct, cb, std::move(block_crypt)));
}
}
return res;
}
| 0 |
[
"CWE-79"
] |
ceph
|
8f90658c731499722d5f4393c8ad70b971d05f77
| 34,038,945,946,805,463,000,000,000,000,000,000,000 | 32 |
rgw: reject unauthenticated response-header actions
Signed-off-by: Matt Benjamin <[email protected]>
Reviewed-by: Casey Bodley <[email protected]>
(cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400)
|
bool Downstream::validate_response_recv_body_length() const {
if (!expect_response_body() || resp_.fs.content_length == -1) {
return true;
}
if (resp_.fs.content_length != resp_.recv_body_length) {
if (LOG_ENABLED(INFO)) {
DLOG(INFO, this) << "response invalid bodylen: content-length="
<< resp_.fs.content_length
<< ", received=" << resp_.recv_body_length;
}
return false;
}
return true;
}
| 0 |
[] |
nghttp2
|
319d5ab1c6d916b6b8a0d85b2ae3f01b3ad04f2c
| 255,245,487,128,480,230,000,000,000,000,000,000,000 | 16 |
nghttpx: Fix request stall
Fix request stall if backend connection is reused and buffer is full.
|
static ssize_t trace_store(struct kmem_cache *s, const char *buf,
size_t length)
{
/*
* Tracing a merged cache is going to give confusing results
* as well as cause other issues like converting a mergeable
* cache into an umergeable one.
*/
if (s->refcount > 1)
return -EINVAL;
s->flags &= ~SLAB_TRACE;
if (buf[0] == '1') {
s->flags &= ~__CMPXCHG_DOUBLE;
s->flags |= SLAB_TRACE;
}
return length;
}
| 0 |
[] |
linux
|
fd4d9c7d0c71866ec0c2825189ebd2ce35bd95b8
| 166,241,882,322,097,120,000,000,000,000,000,000,000 | 18 |
mm: slub: add missing TID bump in kmem_cache_alloc_bulk()
When kmem_cache_alloc_bulk() attempts to allocate N objects from a percpu
freelist of length M, and N > M > 0, it will first remove the M elements
from the percpu freelist, then call ___slab_alloc() to allocate the next
element and repopulate the percpu freelist. ___slab_alloc() can re-enable
IRQs via allocate_slab(), so the TID must be bumped before ___slab_alloc()
to properly commit the freelist head change.
Fix it by unconditionally bumping c->tid when entering the slowpath.
Cc: [email protected]
Fixes: ebe909e0fdb3 ("slub: improve bulk alloc strategy")
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
Header& Binary::header() {
return const_cast<Header&>(static_cast<const Binary*>(this)->header());
}
| 0 |
[
"CWE-703"
] |
LIEF
|
7acf0bc4224081d4f425fcc8b2e361b95291d878
| 231,845,180,850,926,800,000,000,000,000,000,000,000 | 3 |
Resolve #764
|
TEST_P(Http2IntegrationTest, PauseAndResumeHeadersOnly) {
config_helper_.addFilter(R"EOF(
name: stop-iteration-and-continue-filter
typed_config:
"@type": type.googleapis.com/google.protobuf.Empty
)EOF");
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_);
ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_));
ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_));
ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_));
upstream_request_->encodeHeaders(default_response_headers_, true);
response->waitForEndStream();
ASSERT_TRUE(response->complete());
}
| 0 |
[
"CWE-400"
] |
envoy
|
0e49a495826ea9e29134c1bd54fdeb31a034f40c
| 280,994,759,660,334,650,000,000,000,000,000,000,000 | 19 |
http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]>
|
void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = 0UL;
if (current->flags & PF_RANDOMIZE)
random_factor = arch_mmap_rnd();
mm->mmap_legacy_base = TASK_UNMAPPED_BASE + random_factor;
if (mmap_is_legacy()) {
mm->mmap_base = mm->mmap_legacy_base;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
| 0 |
[
"CWE-254"
] |
tip
|
8b8addf891de8a00e4d39fc32f93f7c5eb8feceb
| 179,749,139,684,576,870,000,000,000,000,000,000,000 | 17 |
x86/mm/32: Enable full randomization on i386 and X86_32
Currently on i386 and on X86_64 when emulating X86_32 in legacy mode, only
the stack and the executable are randomized but not other mmapped files
(libraries, vDSO, etc.). This patch enables randomization for the
libraries, vDSO and mmap requests on i386 and in X86_32 in legacy mode.
By default on i386 there are 8 bits for the randomization of the libraries,
vDSO and mmaps which only uses 1MB of VA.
This patch preserves the original randomness, using 1MB of VA out of 3GB or
4GB. We think that 1MB out of 3GB is not a big cost for having the ASLR.
The first obvious security benefit is that all objects are randomized (not
only the stack and the executable) in legacy mode which highly increases
the ASLR effectiveness, otherwise the attackers may use these
non-randomized areas. But also sensitive setuid/setgid applications are
more secure because currently, attackers can disable the randomization of
these applications by setting the ulimit stack to "unlimited". This is a
very old and widely known trick to disable the ASLR in i386 which has been
allowed for too long.
Another trick used to disable the ASLR was to set the ADDR_NO_RANDOMIZE
personality flag, but fortunately this doesn't work on setuid/setgid
applications because there is security checks which clear Security-relevant
flags.
This patch always randomizes the mmap_legacy_base address, removing the
possibility to disable the ASLR by setting the stack to "unlimited".
Signed-off-by: Hector Marco-Gisbert <[email protected]>
Acked-by: Ismael Ripoll Ripoll <[email protected]>
Acked-by: Kees Cook <[email protected]>
Acked-by: Arjan van de Ven <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: kees Cook <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static Config::DecodedResourcesWrapper decodeResources(std::vector<MessageType> resources,
const std::string& name_field = "name") {
Config::DecodedResourcesWrapper decoded_resources;
for (const auto& resource : resources) {
auto owned_resource = std::make_unique<MessageType>(resource);
decoded_resources.owned_resources_.emplace_back(new Config::DecodedResourceImpl(
std::move(owned_resource), MessageUtil::getStringField(resource, name_field), {}, ""));
decoded_resources.refvec_.emplace_back(*decoded_resources.owned_resources_.back());
}
return decoded_resources;
}
| 0 |
[] |
envoy
|
2c60632d41555ec8b3d9ef5246242be637a2db0f
| 207,076,755,134,695,240,000,000,000,000,000,000,000 | 11 |
http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <[email protected]>
|
rsvg_release_node (RsvgDrawingCtx * ctx, RsvgNode *node)
{
if (node == NULL)
return;
g_return_if_fail (ctx->acquired_nodes != NULL);
g_return_if_fail (ctx->acquired_nodes->data == node);
ctx->acquired_nodes = g_slist_remove (ctx->acquired_nodes, node);
}
| 0 |
[] |
librsvg
|
a51919f7e1ca9c535390a746fbf6e28c8402dc61
| 51,082,183,775,995,020,000,000,000,000,000,000,000 | 10 |
rsvg: Add rsvg_acquire_node()
This function does proper recursion checks when looking up resources
from URLs and thereby helps avoiding infinite loops when cyclic
references span multiple types of elements.
|
static struct swap_info_struct *swap_info_get(swp_entry_t entry)
{
struct swap_info_struct *p;
unsigned long offset, type;
if (!entry.val)
goto out;
type = swp_type(entry);
if (type >= nr_swapfiles)
goto bad_nofile;
p = swap_info[type];
if (!(p->flags & SWP_USED))
goto bad_device;
offset = swp_offset(entry);
if (offset >= p->max)
goto bad_offset;
if (!p->swap_map[offset])
goto bad_free;
spin_lock(&swap_lock);
return p;
bad_free:
printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
goto out;
bad_offset:
printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
goto out;
bad_device:
printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
goto out;
bad_nofile:
printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
out:
return NULL;
}
| 0 |
[
"CWE-264"
] |
linux-2.6
|
1a5a9906d4e8d1976b701f889d8f35d54b928f25
| 52,500,845,726,882,470,000,000,000,000,000,000,000 | 35 |
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static long get_offset(struct module *mod, unsigned int *size,
Elf_Shdr *sechdr, unsigned int section)
{
long ret;
*size += arch_mod_section_prepend(mod, section);
ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
*size = ret + sechdr->sh_size;
return ret;
}
| 0 |
[
"CWE-362",
"CWE-347"
] |
linux
|
0c18f29aae7ce3dadd26d8ee3505d07cc982df75
| 88,415,677,812,688,880,000,000,000,000,000,000,000 | 10 |
module: limit enabling module.sig_enforce
Irrespective as to whether CONFIG_MODULE_SIG is configured, specifying
"module.sig_enforce=1" on the boot command line sets "sig_enforce".
Only allow "sig_enforce" to be set when CONFIG_MODULE_SIG is configured.
This patch makes the presence of /sys/module/module/parameters/sig_enforce
dependent on CONFIG_MODULE_SIG=y.
Fixes: fda784e50aac ("module: export module signature enforcement status")
Reported-by: Nayna Jain <[email protected]>
Tested-by: Mimi Zohar <[email protected]>
Tested-by: Jessica Yu <[email protected]>
Signed-off-by: Mimi Zohar <[email protected]>
Signed-off-by: Jessica Yu <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
const __u32 lowest_tsn,
const struct sctp_chunk *chunk)
{
struct sctp_chunk *retval;
sctp_cwrhdr_t cwr;
cwr.lowest_tsn = htonl(lowest_tsn);
retval = sctp_make_chunk(asoc, SCTP_CID_ECN_CWR, 0,
sizeof(sctp_cwrhdr_t));
if (!retval)
goto nodata;
retval->subh.ecn_cwr_hdr =
sctp_addto_chunk(retval, sizeof(cwr), &cwr);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
* An endpoint SHOULD transmit reply chunks (e.g., SACK,
* HEARTBEAT ACK, * etc.) to the same destination transport
* address from which it * received the DATA or control chunk
* to which it is replying.
*
* [Report a reduced congestion window back to where the ECNE
* came from.]
*/
if (chunk)
retval->transport = chunk->transport;
nodata:
return retval;
}
| 0 |
[
"CWE-20"
] |
linux-2.6
|
ba0166708ef4da7eeb61dd92bbba4d5a749d6561
| 12,146,725,829,360,233,000,000,000,000,000,000,000 | 33 |
sctp: Fix kernel panic while process protocol violation parameter
Since call to function sctp_sf_abort_violation() need paramter 'arg' with
'struct sctp_chunk' type, it will read the chunk type and chunk length from
the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen()
always with 'struct sctp_paramhdr' type's parameter, it will be passed to
sctp_sf_abort_violation(). This may cause kernel panic.
sctp_sf_violation_paramlen()
|-- sctp_sf_abort_violation()
|-- sctp_make_abort_violation()
This patch fixed this problem. This patch also fix two place which called
sctp_sf_violation_paramlen() with wrong paramter type.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
MaybeLocal<Value> GetExponentString(
Environment* env,
const BIOPointer& bio,
const BIGNUM* e) {
uint64_t exponent_word = static_cast<uint64_t>(BN_get_word(e));
uint32_t lo = static_cast<uint32_t>(exponent_word);
uint32_t hi = static_cast<uint32_t>(exponent_word >> 32);
if (hi == 0)
BIO_printf(bio.get(), "0x%x", lo);
else
BIO_printf(bio.get(), "0x%x%08x", hi, lo);
return ToV8Value(env, bio);
}
| 0 |
[
"CWE-295"
] |
node
|
466e5415a2b7b3574ab5403acb87e89a94a980d1
| 201,793,618,877,221,450,000,000,000,000,000,000,000 | 14 |
crypto,tls: implement safe x509 GeneralName format
This change introduces JSON-compatible escaping rules for strings that
include X.509 GeneralName components (see RFC 5280). This non-standard
format avoids ambiguities and prevents injection attacks that could
previously lead to X.509 certificates being accepted even though they
were not valid for the target hostname.
These changes affect the format of subject alternative names and the
format of authority information access. The checkServerIdentity function
has been modified to safely handle the new format, eliminating the
possibility of injecting subject alternative names into the verification
logic.
Because each subject alternative name is only encoded as a JSON string
literal if necessary for security purposes, this change will only be
visible in rare cases.
This addresses CVE-2021-44532.
CVE-ID: CVE-2021-44532
PR-URL: https://github.com/nodejs-private/node-private/pull/300
Reviewed-By: Michael Dawson <[email protected]>
Reviewed-By: Rich Trott <[email protected]>
|
static int select_cur_seq_no(HLSContext *c, struct playlist *pls)
{
int seq_no;
if (!pls->finished && !c->first_packet &&
av_gettime() - pls->last_load_time >= default_reload_interval(pls))
/* reload the playlist since it was suspended */
parse_playlist(c, pls->url, pls, NULL);
/* If playback is already in progress (we are just selecting a new
* playlist) and this is a complete file, find the matching segment
* by counting durations. */
if (pls->finished && c->cur_timestamp != AV_NOPTS_VALUE) {
find_timestamp_in_playlist(c, pls, c->cur_timestamp, &seq_no);
return seq_no;
}
if (!pls->finished) {
if (!c->first_packet && /* we are doing a segment selection during playback */
c->cur_seq_no >= pls->start_seq_no &&
c->cur_seq_no < pls->start_seq_no + pls->n_segments)
/* While spec 3.4.3 says that we cannot assume anything about the
* content at the same sequence number on different playlists,
* in practice this seems to work and doing it otherwise would
* require us to download a segment to inspect its timestamps. */
return c->cur_seq_no;
/* If this is a live stream with more than 3 segments, start at the
* third last segment. */
if (pls->n_segments > 3)
return pls->start_seq_no + pls->n_segments - 3;
}
/* Otherwise just start on the first segment. */
return pls->start_seq_no;
}
| 0 |
[
"CWE-703"
] |
FFmpeg
|
7ba100d3e6e8b1e5d5342feb960a7f081d6e15af
| 261,819,606,607,484,500,000,000,000,000,000,000,000 | 36 |
avformat/hls: Fix DoS due to infinite loop
Fixes: loop.m3u
The default max iteration count of 1000 is arbitrary and ideas for a better solution are welcome
Found-by: Xiaohei and Wangchu from Alibaba Security Team
Previous version reviewed-by: Steven Liu <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
(cherry picked from commit 7ec414892ddcad88313848494b6fc5f437c9ca4a)
Signed-off-by: Michael Niedermayer <[email protected]>
|
static std::string ToModeLetters(const Modes::ChangeList::List& list, std::string::size_type maxlinelen, Modes::ChangeList::List::const_iterator beginit, Modes::ChangeList::List::const_iterator& lastit)
{
std::string ret;
std::string::size_type paramlength = 0;
char output_pm = '\0'; // current output state, '+' or '-'
Modes::ChangeList::List::const_iterator i;
for (i = beginit; i != list.end(); ++i)
{
const Modes::Change& item = *i;
const char needed_pm = (item.adding ? '+' : '-');
if (needed_pm != output_pm)
{
output_pm = needed_pm;
ret.push_back(output_pm);
}
if (!item.param.empty())
paramlength += item.param.length() + 1;
if (ret.length() + 1 + paramlength > maxlinelen)
{
// Mode sequence is getting too long
const char c = *ret.rbegin();
if ((c == '+') || (c == '-'))
ret.erase(ret.size()-1);
break;
}
ret.push_back(item.mh->GetModeChar());
}
lastit = i;
return ret;
}
| 0 |
[
"CWE-200",
"CWE-732"
] |
inspircd
|
4350a11c663b0d75f8119743bffb7736d87abd4d
| 252,602,382,332,501,200,000,000,000,000,000,000,000 | 35 |
Fix sending malformed pong messages in some cases.
|
static void ims_pcu_process_async_firmware(const struct firmware *fw,
void *context)
{
struct ims_pcu *pcu = context;
int error;
if (!fw) {
dev_err(pcu->dev, "Failed to get firmware %s\n",
IMS_PCU_FIRMWARE_NAME);
goto out;
}
error = ihex_validate_fw(fw);
if (error) {
dev_err(pcu->dev, "Firmware %s is invalid\n",
IMS_PCU_FIRMWARE_NAME);
goto out;
}
mutex_lock(&pcu->cmd_mutex);
ims_pcu_handle_firmware_update(pcu, fw);
mutex_unlock(&pcu->cmd_mutex);
release_firmware(fw);
out:
complete(&pcu->async_firmware_done);
| 0 |
[
"CWE-703"
] |
linux
|
a0ad220c96692eda76b2e3fd7279f3dcd1d8a8ff
| 84,800,666,164,772,130,000,000,000,000,000,000,000 | 28 |
Input: ims-pcu - sanity check against missing interfaces
A malicious device missing interface can make the driver oops.
Add sanity checking.
Signed-off-by: Oliver Neukum <[email protected]>
CC: [email protected]
Signed-off-by: Dmitry Torokhov <[email protected]>
|
static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
{
return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
}
| 0 |
[
"CWE-476"
] |
linux
|
4969c06a0d83c9c3dc50b8efcdc8eeedfce896f6
| 44,536,434,122,044,110,000,000,000,000,000,000,000 | 4 |
f2fs: support swap file w/ DIO
Signed-off-by: Jaegeuk Kim <[email protected]>
|
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return TINYEXR_ERROR_INVALID_HEADER;
}
return TINYEXR_SUCCESS;
}
| 0 |
[
"CWE-20",
"CWE-190"
] |
tinyexr
|
a685e3332f61cd4e59324bf3f669d36973d64270
| 173,186,955,335,776,370,000,000,000,000,000,000,000 | 10 |
Make line_no with too large value(2**20) invalid. Fixes #124
|
PHP_FUNCTION(mb_ereg_search_init)
{
size_t argc = ZEND_NUM_ARGS();
zval *arg_str;
char *arg_pattern = NULL, *arg_options = NULL;
int arg_pattern_len = 0, arg_options_len = 0;
OnigSyntaxType *syntax = NULL;
OnigOptionType option;
if (zend_parse_parameters(argc TSRMLS_CC, "z|ss", &arg_str, &arg_pattern, &arg_pattern_len, &arg_options, &arg_options_len) == FAILURE) {
return;
}
if (argc > 1 && arg_pattern_len == 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty pattern");
RETURN_FALSE;
}
option = MBREX(regex_default_options);
syntax = MBREX(regex_default_syntax);
if (argc == 3) {
option = 0;
_php_mb_regex_init_options(arg_options, arg_options_len, &option, &syntax, NULL);
}
if (argc > 1) {
/* create regex pattern buffer */
if ((MBREX(search_re) = php_mbregex_compile_pattern(arg_pattern, arg_pattern_len, option, MBREX(current_mbctype), syntax TSRMLS_CC)) == NULL) {
RETURN_FALSE;
}
}
if (MBREX(search_str) != NULL) {
zval_ptr_dtor(&MBREX(search_str));
MBREX(search_str) = (zval *)NULL;
}
MBREX(search_str) = arg_str;
Z_ADDREF_P(MBREX(search_str));
SEPARATE_ZVAL_IF_NOT_REF(&MBREX(search_str));
MBREX(search_pos) = 0;
if (MBREX(search_regs) != NULL) {
onig_region_free(MBREX(search_regs), 1);
MBREX(search_regs) = (OnigRegion *) NULL;
}
RETURN_TRUE;
}
| 1 |
[
"CWE-415"
] |
php-src
|
5b597a2e5b28e2d5a52fc1be13f425f08f47cb62
| 228,275,562,367,383,840,000,000,000,000,000,000,000 | 51 |
Fix bug #72402: _php_mb_regex_ereg_replace_exec - double free
|
static void acct_clear(void)
{
memset(&acct_info, 0, sizeof(acct_info));
}
| 0 |
[
"CWE-20"
] |
qemu
|
0be839a2701369f669532ea5884c15bead1c6e08
| 44,364,575,970,592,110,000,000,000,000,000,000,000 | 4 |
migration: fix parameter validation on ram load
During migration, the values read from migration stream during ram load
are not validated. Especially offset in host_from_stream_offset() and
also the length of the writes in the callers of said function.
To fix this, we need to make sure that the [offset, offset + length]
range fits into one of the allocated memory regions.
Validating addr < len should be sufficient since data seems to always be
managed in TARGET_PAGE_SIZE chunks.
Fixes: CVE-2014-7840
Note: follow-up patches add extra checks on each block->host access.
Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Paolo Bonzini <[email protected]>
Reviewed-by: Dr. David Alan Gilbert <[email protected]>
Signed-off-by: Amit Shah <[email protected]>
|
version_filter(PredType type, PredIdx idx)
/* filter out capabilities we may want to suppress */
{
switch (tversion) {
case V_ALLCAPS: /* SVr4, XSI Curses */
return (TRUE);
case V_SVR1: /* System V Release 1, Ultrix */
switch (type) {
case BOOLEAN:
return ((idx <= BOOL_IDX(xon_xoff)) ? TRUE : FALSE);
case NUMBER:
return ((idx <= NUM_IDX(width_status_line)) ? TRUE : FALSE);
case STRING:
return ((idx <= STR_IDX(prtr_non)) ? TRUE : FALSE);
}
break;
case V_HPUX: /* Hewlett-Packard */
switch (type) {
case BOOLEAN:
return ((idx <= BOOL_IDX(xon_xoff)) ? TRUE : FALSE);
case NUMBER:
return ((idx <= NUM_IDX(label_width)) ? TRUE : FALSE);
case STRING:
if (idx <= STR_IDX(prtr_non))
return (TRUE);
else if (FNKEY(idx)) /* function keys */
return (TRUE);
else if (idx == STR_IDX(plab_norm)
|| idx == STR_IDX(label_on)
|| idx == STR_IDX(label_off))
return (TRUE);
else
return (FALSE);
}
break;
case V_AIX: /* AIX */
switch (type) {
case BOOLEAN:
return ((idx <= BOOL_IDX(xon_xoff)) ? TRUE : FALSE);
case NUMBER:
return ((idx <= NUM_IDX(width_status_line)) ? TRUE : FALSE);
case STRING:
if (idx <= STR_IDX(prtr_non))
return (TRUE);
else if (FNKEY(idx)) /* function keys */
return (TRUE);
else
return (FALSE);
}
break;
#define is_termcap(type) (OkIndex(idx, type##_from_termcap) && \
type##_from_termcap[idx])
case V_BSD: /* BSD */
switch (type) {
case BOOLEAN:
return is_termcap(bool);
case NUMBER:
return is_termcap(num);
case STRING:
return is_termcap(str);
}
break;
}
return (FALSE); /* pacify the compiler */
}
| 0 |
[
"CWE-125"
] |
ncurses
|
b025434573f466efe27862656a6a9d41dd2bd609
| 217,007,959,641,356,500,000,000,000,000,000,000,000 | 71 |
ncurses 6.1 - patch 20191012
+ amend recent changes to ncurses*-config and pc-files to filter out
Debian linker-flags (report by Sven Joachim, cf: 20150516).
+ clarify relationship between tic, infocmp and captoinfo in manpage.
+ check for invalid hashcode in _nc_find_type_entry and
_nc_find_name_entry.
> fix several errata in tic (reports/testcases by "zjuchenyuan"):
+ check for invalid hashcode in _nc_find_entry.
+ check for missing character after backslash in fmt_entry
+ check for acsc with odd length in dump_entry in check for one-one
mapping (cf: 20060415);
+ check length when converting from old AIX box_chars_1 capability,
overlooked in changes to eliminate strcpy (cf: 20001007).
+ amend the ncurses*-config and pc-files to take into account the rpath
|
void Smb4KUnmountJob::slotActionFinished(ActionReply reply)
{
m_processed++;
if (reply.succeeded())
{
QMutableListIterator<Smb4KShare *> it(m_shares);
while(it.hasNext())
{
Smb4KShare *share = it.next();
// Check if the unmount process reported an error
QString errorMsg(reply.data()["mh_error_message"].toString().trimmed());
if (QString::compare(share->canonicalPath(), reply.data()["mh_mountpoint"].toString()) == 0 &&
!errorMsg.isEmpty() && !m_silent)
{
Smb4KNotification::unmountingFailed(share, errorMsg);
}
else
{
// Do nothing
}
}
}
else
{
// The auth action failed. Report this.
if (!m_silent)
{
if (reply.type() == ActionReply::KAuthError)
{
Smb4KNotification::actionFailed(reply.errorCode());
}
else
{
Smb4KNotification::actionFailed();
}
}
else
{
// Do nothing
}
}
if (m_processed == m_shares.size())
{
// Give the operating system some time to process the unmounts
// before we invoke KMountPoint::currentMountPoints(). It seems
// that we need at least 500 ms, so that even slow systems have
// the opportunity to unregister the mounts.
QTimer::singleShot(500, this, SLOT(slotFinishJob()));
}
}
| 0 |
[
"CWE-20"
] |
smb4k
|
71554140bdaede27b95dbe4c9b5a028a83c83cce
| 203,434,956,363,796,400,000,000,000,000,000,000,000 | 55 |
Find the mount/umount commands in the helper
Instead of trusting what we get passed in
CVE-2017-8849
|
static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
u64 *sptep)
{
struct kvm_mmu_page *sp;
pt_element_t *gptep = gw->prefetch_ptes;
u64 *spte;
int i;
sp = sptep_to_sp(sptep);
if (sp->role.level > PG_LEVEL_4K)
return;
/*
* If addresses are being invalidated, skip prefetching to avoid
* accidentally prefetching those addresses.
*/
if (unlikely(vcpu->kvm->mmu_notifier_count))
return;
if (sp->role.direct)
return __direct_pte_prefetch(vcpu, sp, sptep);
i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
spte = sp->spt + i;
for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
if (spte == sptep)
continue;
if (is_shadow_present_pte(*spte))
continue;
if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
break;
}
}
| 0 |
[] |
linux
|
b1bd5cba3306691c771d558e94baa73e8b0b96b7
| 56,201,173,878,691,140,000,000,000,000,000,000,000 | 37 |
KVM: X86: MMU: Use the correct inherited permissions to get shadow page
When computing the access permissions of a shadow page, use the effective
permissions of the walk up to that point, i.e. the logic AND of its parents'
permissions. Two guest PxE entries that point at the same table gfn need to
be shadowed with different shadow pages if their parents' permissions are
different. KVM currently uses the effective permissions of the last
non-leaf entry for all non-leaf entries. Because all non-leaf SPTEs have
full ("uwx") permissions, and the effective permissions are recorded only
in role.access and merged into the leaves, this can lead to incorrect
reuse of a shadow page and eventually to a missing guest protection page
fault.
For example, here is a shared pagetable:
pgd[] pud[] pmd[] virtual address pointers
/->pmd1(u--)->pte1(uw-)->page1 <- ptr1 (u--)
/->pud1(uw-)--->pmd2(uw-)->pte2(uw-)->page2 <- ptr2 (uw-)
pgd-| (shared pmd[] as above)
\->pud2(u--)--->pmd1(u--)->pte1(uw-)->page1 <- ptr3 (u--)
\->pmd2(uw-)->pte2(uw-)->page2 <- ptr4 (u--)
pud1 and pud2 point to the same pmd table, so:
- ptr1 and ptr3 points to the same page.
- ptr2 and ptr4 points to the same page.
(pud1 and pud2 here are pud entries, while pmd1 and pmd2 here are pmd entries)
- First, the guest reads from ptr1 first and KVM prepares a shadow
page table with role.access=u--, from ptr1's pud1 and ptr1's pmd1.
"u--" comes from the effective permissions of pgd, pud1 and
pmd1, which are stored in pt->access. "u--" is used also to get
the pagetable for pud1, instead of "uw-".
- Then the guest writes to ptr2 and KVM reuses pud1 which is present.
The hypervisor set up a shadow page for ptr2 with pt->access is "uw-"
even though the pud1 pmd (because of the incorrect argument to
kvm_mmu_get_page in the previous step) has role.access="u--".
- Then the guest reads from ptr3. The hypervisor reuses pud1's
shadow pmd for pud2, because both use "u--" for their permissions.
Thus, the shadow pmd already includes entries for both pmd1 and pmd2.
- At last, the guest writes to ptr4. This causes no vmexit or pagefault,
because pud1's shadow page structures included an "uw-" page even though
its role.access was "u--".
Any kind of shared pagetable might have the similar problem when in
virtual machine without TDP enabled if the permissions are different
from different ancestors.
In order to fix the problem, we change pt->access to be an array, and
any access in it will not include permissions ANDed from child ptes.
The test code is: https://lore.kernel.org/kvm/[email protected]/
Remember to test it with TDP disabled.
The problem had existed long before the commit 41074d07c78b ("KVM: MMU:
Fix inherited permissions for emulated guest pte updates"), and it
is hard to find which is the culprit. So there is no fixes tag here.
Signed-off-by: Lai Jiangshan <[email protected]>
Message-Id: <[email protected]>
Cc: [email protected]
Fixes: cea0f0e7ea54 ("[PATCH] KVM: MMU: Shadow page table caching")
Signed-off-by: Paolo Bonzini <[email protected]>
|
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
int start_level)
{
bool flush = false;
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
start_level, KVM_MAX_HUGEPAGE_LEVEL,
false);
write_unlock(&kvm->mmu_lock);
}
if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock);
flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
read_unlock(&kvm->mmu_lock);
}
/*
* Flush TLBs if any SPTEs had to be write-protected to ensure that
* guest writes are reflected in the dirty bitmap before the memslot
* update completes, i.e. before enabling dirty logging is visible to
* userspace.
*
* Perform the TLB flush outside the mmu_lock to reduce the amount of
* time the lock is held. However, this does mean that another CPU can
* now grab mmu_lock and encounter a write-protected SPTE while CPUs
* still have a writable mapping for the associated GFN in their TLB.
*
* This is safe but requires KVM to be careful when making decisions
* based on the write-protection status of an SPTE. Specifically, KVM
* also write-protects SPTEs to monitor changes to guest page tables
* during shadow paging, and must guarantee no CPUs can write to those
* page before the lock is dropped. As mentioned in the previous
* paragraph, a write-protected SPTE is no guarantee that CPU cannot
* perform writes. So to determine if a TLB flush is truly required, KVM
* will clear a separate software-only bit (MMU-writable) and skip the
* flush if-and-only-if this bit was already clear.
*
* See is_writable_pte() for more details.
*/
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
}
| 0 |
[
"CWE-476"
] |
linux
|
9f46c187e2e680ecd9de7983e4d081c3391acc76
| 174,157,161,080,871,600,000,000,000,000,000,000,000 | 46 |
KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID
With shadow paging enabled, the INVPCID instruction results in a call
to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the
invlpg callback is not set and the result is a NULL pointer dereference.
Fix it trivially by checking for mmu->invlpg before every call.
There are other possibilities:
- check for CR0.PG, because KVM (like all Intel processors after P5)
flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a
nop with paging disabled
- check for EFER.LMA, because KVM syncs and flushes when switching
MMU contexts outside of 64-bit mode
All of these are tricky, go for the simple solution. This is CVE-2022-1789.
Reported-by: Yongkang Jia <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int udp_lib_lport_inuse(struct net *net, __u16 num,
const struct udp_hslot *hslot,
unsigned long *bitmap,
struct sock *sk, unsigned int log)
{
struct sock *sk2;
kuid_t uid = sock_i_uid(sk);
sk_for_each(sk2, &hslot->head) {
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
(bitmap || udp_sk(sk2)->udp_port_hash == num) &&
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
inet_rcv_saddr_equal(sk, sk2, true)) {
if (sk2->sk_reuseport && sk->sk_reuseport &&
!rcu_access_pointer(sk->sk_reuseport_cb) &&
uid_eq(uid, sock_i_uid(sk2))) {
if (!bitmap)
return 0;
} else {
if (!bitmap)
return 1;
__set_bit(udp_sk(sk2)->udp_port_hash >> log,
bitmap);
}
}
}
return 0;
}
| 0 |
[
"CWE-362"
] |
net
|
85f1bd9a7b5a79d5baa8bf44af19658f7bf77bfa
| 273,080,478,589,016,720,000,000,000,000,000,000,000 | 31 |
udp: consistently apply ufo or fragmentation
When iteratively building a UDP datagram with MSG_MORE and that
datagram exceeds MTU, consistently choose UFO or fragmentation.
Once skb_is_gso, always apply ufo. Conversely, once a datagram is
split across multiple skbs, do not consider ufo.
Sendpage already maintains the first invariant, only add the second.
IPv6 does not have a sendpage implementation to modify.
A gso skb must have a partial checksum, do not follow sk_no_check_tx
in udp_send_skb.
Found by syzkaller.
Fixes: e89e9cf539a2 ("[IPv4/IPv6]: UFO Scatter-gather approach")
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Willem de Bruijn <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
VarDimArray order() const { return order_; }
| 0 |
[
"CWE-703",
"CWE-787"
] |
tensorflow
|
8ba6fa29cd8bf9cef9b718dc31c78c73081f5b31
| 147,308,762,391,533,570,000,000,000,000,000,000,000 | 1 |
Fix heap-buffer-overflow issue with `tf.raw_ops.SparseSplit`.
PiperOrigin-RevId: 371242872
Change-Id: I482bb3d12602c7c3cc9446f97fb9f584bb98e9a4
|
int o2nm_depend_item(struct config_item *item)
{
return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
}
| 0 |
[
"CWE-476",
"CWE-284"
] |
linux
|
853bc26a7ea39e354b9f8889ae7ad1492ffa28d2
| 147,762,345,765,483,400,000,000,000,000,000,000,000 | 4 |
ocfs2: subsystem.su_mutex is required while accessing the item->ci_parent
The subsystem.su_mutex is required while accessing the item->ci_parent,
otherwise, NULL pointer dereference to the item->ci_parent will be
triggered in the following situation:
add node delete node
sys_write
vfs_write
configfs_write_file
o2nm_node_store
o2nm_node_local_write
do_rmdir
vfs_rmdir
configfs_rmdir
mutex_lock(&subsys->su_mutex);
unlink_obj
item->ci_group = NULL;
item->ci_parent = NULL;
to_o2nm_cluster_from_node
node->nd_item.ci_parent->ci_parent
BUG since of NULL pointer dereference to nd_item.ci_parent
Moreover, the o2nm_cluster also should be protected by the
subsystem.su_mutex.
[[email protected]: v2]
Link: http://lkml.kernel.org/r/[email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Alex Chen <[email protected]>
Reviewed-by: Jun Piao <[email protected]>
Reviewed-by: Joseph Qi <[email protected]>
Cc: Mark Fasheh <[email protected]>
Cc: Joel Becker <[email protected]>
Cc: Junxiao Bi <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
void Update(const int64 batch_index, const int64 cross_count,
const OutType& cross) const {
const int64 output_index = output_start_indices_[batch_index] + cross_count;
auto indices_matrix = indices_out_->matrix<int64>();
indices_matrix(output_index, 0) = batch_index;
indices_matrix(output_index, 1) = cross_count;
auto value_vec = values_out_->vec<OutType>();
value_vec(output_index) = cross;
}
| 0 |
[
"CWE-843"
] |
tensorflow
|
b1cc5e5a50e7cee09f2c6eb48eb40ee9c4125025
| 13,218,461,637,199,190,000,000,000,000,000,000,000 | 11 |
Fix `tf.raw_ops.SparseCross` failing CHECK.
PiperOrigin-RevId: 368701671
Change-Id: Id805729dd9ba0bda36e4bb309408129b55fb649d
|
static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
{
struct inode * inode;
struct dentry *dentry;
struct file * file;
int error;
error = -EINVAL;
if (length < 0)
goto out;
error = -EBADF;
file = fget(fd);
if (!file)
goto out;
/* explicitly opened as large or we are on 64-bit box */
if (file->f_flags & O_LARGEFILE)
small = 0;
dentry = file->f_path.dentry;
inode = dentry->d_inode;
error = -EINVAL;
if (!S_ISREG(inode->i_mode) || !(file->f_mode & FMODE_WRITE))
goto out_putf;
error = -EINVAL;
/* Cannot ftruncate over 2^31 bytes without large file support */
if (small && length > MAX_NON_LFS)
goto out_putf;
error = -EPERM;
if (IS_APPEND(inode))
goto out_putf;
error = locks_verify_truncate(inode, file, length);
if (!error)
error = security_path_truncate(&file->f_path);
if (!error)
error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, file);
out_putf:
fput(file);
out:
return error;
}
| 0 |
[
"CWE-732"
] |
linux-stable
|
e57712ebebbb9db7d8dcef216437b3171ddcf115
| 5,414,046,205,397,051,000,000,000,000,000,000,000 | 44 |
merge fchmod() and fchmodat() guts, kill ancient broken kludge
The kludge in question is undocumented and doesn't work for 32bit
binaries on amd64, sparc64 and s390. Passing (mode_t)-1 as
mode had (since 0.99.14v and contrary to behaviour of any
other Unix, prescriptions of POSIX, SuS and our own manpages)
was kinda-sorta no-op. Note that any software relying on
that (and looking for examples shows none) would be visibly
broken on sparc64, where practically all userland is built
32bit. No such complaints noticed...
Signed-off-by: Al Viro <[email protected]>
|
static ZIPARCHIVE_METHOD(open)
{
struct zip *intern;
char *filename;
int filename_len;
int err = 0;
long flags = 0;
char resolved_path[MAXPATHLEN];
zval *this = getThis();
ze_zip_object *ze_obj = NULL;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &filename, &filename_len, &flags) == FAILURE) {
return;
}
if (this) {
/* We do not use ZIP_FROM_OBJECT, zip init function here */
ze_obj = (ze_zip_object*) zend_object_store_get_object(this TSRMLS_CC);
}
if (filename_len == 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty string as source");
RETURN_FALSE;
}
if (strlen(filename) != filename_len) {
RETURN_FALSE;
}
if (ZIP_OPENBASEDIR_CHECKPATH(filename)) {
RETURN_FALSE;
}
if (!expand_filepath(filename, resolved_path TSRMLS_CC)) {
RETURN_FALSE;
}
if (ze_obj->za) {
/* we already have an opened zip, free it */
if (zip_close(ze_obj->za) != 0) {
_zip_free(ze_obj->za);
}
ze_obj->za = NULL;
}
if (ze_obj->filename) {
efree(ze_obj->filename);
ze_obj->filename = NULL;
}
intern = zip_open(resolved_path, flags, &err);
if (!intern || err) {
RETURN_LONG((long)err);
}
ze_obj->filename = estrdup(resolved_path);
ze_obj->filename_len = filename_len;
ze_obj->za = intern;
RETURN_TRUE;
}
| 0 |
[] |
php-src
|
ce96fd6b0761d98353761bf78d5bfb55291179fd
| 25,083,774,896,544,650,000,000,000,000,000,000,000 | 59 |
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
|
GF_Err ctts_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *)s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->nb_entries);
for (i=0; i<ptr->nb_entries; i++ ) {
gf_bs_write_u32(bs, ptr->entries[i].sampleCount);
if (ptr->version) {
gf_bs_write_int(bs, ptr->entries[i].decodingOffset, 32);
} else {
gf_bs_write_u32(bs, (u32) ptr->entries[i].decodingOffset);
}
}
return GF_OK;
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
gpac
|
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
| 271,809,752,006,002,950,000,000,000,000,000,000,000 | 19 |
prevent dref memleak on invalid input (#1183)
|
static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
const struct ip_tunnel_key *key = &info->key;
struct dst_entry *dst = NULL;
struct flowi6 fl6;
__u8 prio, ttl;
__be16 sport;
int err;
dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info);
if (IS_ERR(dst))
return PTR_ERR(dst);
skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len);
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
} else {
prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
ip_hdr(skb), skb);
if (geneve->ttl_inherit)
ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
else
ttl = key->ttl;
ttl = ttl ? : ip6_dst_hoplimit(dst);
}
err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr));
if (unlikely(err))
return err;
udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
&fl6.saddr, &fl6.daddr, prio, ttl,
info->key.label, sport, geneve->info.key.tp_dst,
!(info->key.tun_flags & TUNNEL_CSUM));
return 0;
}
| 0 |
[] |
net
|
6c8991f41546c3c472503dff1ea9daaddf9331c2
| 288,035,006,208,939,440,000,000,000,000,000,000,000 | 42 |
net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup
ipv6_stub uses the ip6_dst_lookup function to allow other modules to
perform IPv6 lookups. However, this function skips the XFRM layer
entirely.
All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the
ip_route_output_key and ip_route_output helpers) for their IPv4 lookups,
which calls xfrm_lookup_route(). This patch fixes this inconsistent
behavior by switching the stub to ip6_dst_lookup_flow, which also calls
xfrm_lookup_route().
This requires some changes in all the callers, as these two functions
take different arguments and have different return types.
Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan")
Reported-by: Xiumei Mu <[email protected]>
Signed-off-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static bool do_propagate_liveness(const struct bpf_verifier_state *state,
struct bpf_verifier_state *parent)
{
bool writes = parent == state->parent; /* Observe write marks */
bool touched = false; /* any changes made? */
int i;
if (!parent)
return touched;
/* Propagate read liveness of registers... */
BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
/* We don't need to worry about FP liveness because it's read-only */
for (i = 0; i < BPF_REG_FP; i++) {
if (parent->regs[i].live & REG_LIVE_READ)
continue;
if (writes && (state->regs[i].live & REG_LIVE_WRITTEN))
continue;
if (state->regs[i].live & REG_LIVE_READ) {
parent->regs[i].live |= REG_LIVE_READ;
touched = true;
}
}
/* ... and stack slots */
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
i < parent->allocated_stack / BPF_REG_SIZE; i++) {
if (parent->stack[i].slot_type[0] != STACK_SPILL)
continue;
if (state->stack[i].slot_type[0] != STACK_SPILL)
continue;
if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
continue;
if (writes &&
(state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN))
continue;
if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) {
parent->stack[i].spilled_ptr.live |= REG_LIVE_READ;
touched = true;
}
}
return touched;
}
| 0 |
[
"CWE-20"
] |
linux
|
c131187db2d3fa2f8bf32fdf4e9a4ef805168467
| 61,074,828,502,477,850,000,000,000,000,000,000,000 | 41 |
bpf: fix branch pruning logic
when the verifier detects that register contains a runtime constant
and it's compared with another constant it will prune exploration
of the branch that is guaranteed not to be taken at runtime.
This is all correct, but malicious program may be constructed
in such a way that it always has a constant comparison and
the other branch is never taken under any conditions.
In this case such path through the program will not be explored
by the verifier. It won't be taken at run-time either, but since
all instructions are JITed the malicious program may cause JITs
to complain about using reserved fields, etc.
To fix the issue we have to track the instructions explored by
the verifier and sanitize instructions that are dead at run time
with NOPs. We cannot reject such dead code, since llvm generates
it for valid C code, since it doesn't do as much data flow
analysis as the verifier does.
Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)")
Signed-off-by: Alexei Starovoitov <[email protected]>
Acked-by: Daniel Borkmann <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
|
ipmi_sdr_print_listentry(struct ipmi_intf *intf, struct sdr_record_list *entry)
{
int rc = 0;
switch (entry->type) {
case SDR_RECORD_TYPE_FULL_SENSOR:
case SDR_RECORD_TYPE_COMPACT_SENSOR:
rc = ipmi_sdr_print_sensor_fc(intf, entry->record.common, entry->type);
break;
case SDR_RECORD_TYPE_EVENTONLY_SENSOR:
rc = ipmi_sdr_print_sensor_eventonly(intf,
entry->record.eventonly);
break;
case SDR_RECORD_TYPE_GENERIC_DEVICE_LOCATOR:
rc = ipmi_sdr_print_sensor_generic_locator(entry->record.
genloc);
break;
case SDR_RECORD_TYPE_FRU_DEVICE_LOCATOR:
rc = ipmi_sdr_print_sensor_fru_locator(entry->record.fruloc);
break;
case SDR_RECORD_TYPE_MC_DEVICE_LOCATOR:
rc = ipmi_sdr_print_sensor_mc_locator(entry->record.mcloc);
break;
case SDR_RECORD_TYPE_ENTITY_ASSOC:
break;
case SDR_RECORD_TYPE_OEM:
rc = ipmi_sdr_print_sensor_oem(entry->record.oem);
break;
case SDR_RECORD_TYPE_DEVICE_ENTITY_ASSOC:
case SDR_RECORD_TYPE_MC_CONFIRMATION:
case SDR_RECORD_TYPE_BMC_MSG_CHANNEL_INFO:
/* not implemented yet */
break;
}
return rc;
}
| 0 |
[
"CWE-120"
] |
ipmitool
|
7ccea283dd62a05a320c1921e3d8d71a87772637
| 225,668,789,058,216,370,000,000,000,000,000,000,000 | 37 |
fru, sdr: Fix id_string buffer overflows
Final part of the fixes for CVE-2020-5208, see
https://github.com/ipmitool/ipmitool/security/advisories/GHSA-g659-9qxw-p7cp
9 variants of stack buffer overflow when parsing `id_string` field of
SDR records returned from `CMD_GET_SDR` command.
SDR record structs have an `id_code` field, and an `id_string` `char`
array.
The length of `id_string` is calculated as `(id_code & 0x1f) + 1`,
which can be larger than expected 16 characters (if `id_code = 0xff`,
then length will be `(0xff & 0x1f) + 1 = 32`).
In numerous places, this can cause stack buffer overflow when copying
into fixed buffer of size `17` bytes from this calculated length.
|
g_file_replace_readwrite_finish (GFile *file,
GAsyncResult *res,
GError **error)
{
GFileIface *iface;
g_return_val_if_fail (G_IS_FILE (file), NULL);
g_return_val_if_fail (G_IS_ASYNC_RESULT (res), NULL);
if (g_async_result_legacy_propagate_error (res, error))
return NULL;
iface = G_FILE_GET_IFACE (file);
return (* iface->replace_readwrite_finish) (file, res, error);
}
| 0 |
[
"CWE-362"
] |
glib
|
d8f8f4d637ce43f8699ba94c9b7648beda0ca174
| 92,483,065,870,928,230,000,000,000,000,000,000,000 | 15 |
gfile: Limit access to files when copying
file_copy_fallback creates new files with default permissions and
set the correct permissions after the operation is finished. This
might cause that the files can be accessible by more users during
the operation than expected. Use G_FILE_CREATE_PRIVATE for the new
files to limit access to those files.
|
}
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_DASH, ("[Dasher] Invalid descriptor %s, expecting '<' as first character\n", desc));
}
}
}
static void dasher_setup_set_defaults(GF_DasherCtx *ctx, GF_MPD_AdaptationSet *set)
{
u32 i, count;
Bool main_role_set = GF_FALSE;
//by default setup alignment
if (ctx->sseg) set->subsegment_alignment = ctx->align;
else set->segment_alignment = ctx->align;
//startWithSAP is set when the first packet comes in
//the rest depends on the various profiles/iop, to check
count = gf_list_count(set->representations);
for (i=0; i<count; i++) {
GF_MPD_Representation *rep = gf_list_get(set->representations, i);
GF_DashStream *ds = rep->playback.udta;
if (set->max_width < ds->width) set->max_width = ds->width;
if (set->max_height < ds->height) set->max_height = ds->height;
/* if (set->max_bandwidth < ds->rep->bandwidth) set->max_bandwidth = ds->rep->bandwidth;
if (set->max_framerate * ds->fps.den < ds->fps.num) set->max_framerate = (u32) (ds->fps.num / ds->fps.den);
*/
/*set trick mode*/
if (set->intra_only && (ds->stream_type==GF_STREAM_VISUAL)) {
char value[256];
GF_MPD_Descriptor* desc;
sprintf(value, "%d", ds->sync_as_id);
desc = gf_mpd_descriptor_new(NULL, "http://dashif.org/guidelines/trickmode", value);
gf_list_add(set->essential_properties, desc);
}
/*set role*/
if (ds->p_role) {
u32 j, role_count;
role_count = ds->p_role->value.string_list.nb_items;
for (j=0; j<role_count; j++) {
char *role = ds->p_role->value.string_list.vals[j];
GF_MPD_Descriptor *desc=NULL;
char *uri=NULL;
//all roles defined by dash 5th edition
if (!strcmp(role, "caption") || !strcmp(role, "subtitle") || !strcmp(role, "main")
|| !strcmp(role, "alternate") || !strcmp(role, "supplementary") || !strcmp(role, "commentary")
|| !strcmp(role, "dub") || !strcmp(role, "description") || !strcmp(role, "sign")
|| !strcmp(role, "metadata") || !strcmp(role, "enhanced-audio-intelligibility")
|| !strcmp(role, "emergency") || !strcmp(role, "forced-subtitle")
|| !strcmp(role, "easyreader") || !strcmp(role, "karaoke")
) {
uri = "urn:mpeg:dash:role:2011";
if (!strcmp(role, "main")) main_role_set = GF_TRUE;
} else {
char *sep = strrchr(role, ':');
if (sep) {
sep[0] = 0;
desc = gf_mpd_descriptor_new(NULL, role, sep+1);
sep[0] = ':';
} else {
GF_LOG(GF_LOG_WARNING, GF_LOG_DASH, ("[Dasher] Unrecognized role %s - using GPAC urn for schemaID\n", role));
uri = "urn:gpac:dash:role:2013";
}
}
if (!desc)
desc = gf_mpd_descriptor_new(NULL, uri, role);
gf_list_add(set->role, desc);
}
}
//set SRD
if (!i && ds->srd.z && ds->srd.w) {
char value[256];
GF_MPD_Descriptor *desc;
if (ds->dep_id) {
sprintf(value, "1,%d,%d,%d,%d", ds->srd.x, ds->srd.y, ds->srd.z, ds->srd.w);
desc = gf_mpd_descriptor_new(NULL, "urn:mpeg:dash:srd:2014", value);
gf_list_add(set->supplemental_properties, desc);
} else {
if (ds->tile_base) {
sprintf(value, "1,0,0,0,0,%d,%d", ds->srd.z, ds->srd.w);
} else {
const GF_PropertyValue *p = gf_filter_pid_get_property(ds->ipid, GF_PROP_PID_SRD_REF);
if (p) {
sprintf(value, "1,%d,%d,%d,%d,%d,%d", ds->srd.x, ds->srd.y, ds->srd.z, ds->srd.w, p->value.vec2i.x, p->value.vec2i.y);
} else {
sprintf(value, "1,%d,%d,%d,%d", ds->srd.x, ds->srd.y, ds->srd.z, ds->srd.w);
}
}
desc = gf_mpd_descriptor_new(NULL, "urn:mpeg:dash:srd:2014", value);
gf_list_add(set->essential_properties, desc);
}
}
//set HDR
if (ds->hdr_type > DASHER_HDR_NONE) {
char value[256];
GF_MPD_Descriptor* desc;
sprintf(value, "9");
desc = gf_mpd_descriptor_new(NULL, "urn:mpeg:mpegB:cicp:ColourPrimaries", value);
gf_list_add(set->essential_properties, desc);
sprintf(value, "9");
desc = gf_mpd_descriptor_new(NULL, "urn:mpeg:mpegB:cicp:MatrixCoefficients", value);
gf_list_add(set->essential_properties, desc);
if (ds->hdr_type==DASHER_HDR_PQ10) {
sprintf(value, "16");
desc = gf_mpd_descriptor_new(NULL, "urn:mpeg:mpegB:cicp:TransferCharacteristics", value);
gf_list_add(set->essential_properties, desc);
}
if (ds->hdr_type == DASHER_HDR_HLG) {
sprintf(value, "14");
desc = gf_mpd_descriptor_new(NULL, "urn:mpeg:mpegB:cicp:TransferCharacteristics", value);
gf_list_add(set->essential_properties, desc);
sprintf(value, "18");
desc = gf_mpd_descriptor_new(NULL, "urn:mpeg:mpegB:cicp:TransferCharacteristics", value);
gf_list_add(set->supplemental_properties, desc);
}
}
| 0 |
[
"CWE-787"
] |
gpac
|
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
| 145,210,305,083,089,250,000,000,000,000,000,000,000 | 121 |
fixed #2138
|
mysql_init(MYSQL *mysql)
{
if (mysql_server_init(0, NULL, NULL))
return NULL;
if (!mysql)
{
if (!(mysql=(MYSQL*) calloc(1, sizeof(MYSQL))))
return 0;
mysql->free_me=1;
mysql->net.pvio= 0;
mysql->net.extension= 0;
}
else
{
memset((char*) (mysql), 0, sizeof(*(mysql)));
mysql->net.pvio= 0;
mysql->free_me= 0;
mysql->net.extension= 0;
}
if (!(mysql->net.extension= (struct st_mariadb_net_extension *)
calloc(1, sizeof(struct st_mariadb_net_extension))) ||
!(mysql->extension= (struct st_mariadb_extension *)
calloc(1, sizeof(struct st_mariadb_extension))))
goto error;
mysql->options.report_data_truncation= 1;
mysql->options.connect_timeout=CONNECT_TIMEOUT;
mysql->charset= mysql_find_charset_name(MARIADB_DEFAULT_CHARSET);
mysql->methods= &MARIADB_DEFAULT_METHODS;
strcpy(mysql->net.sqlstate, "00000");
mysql->net.last_error[0]= mysql->net.last_errno= mysql->net.extension->extended_errno= 0;
if (ENABLED_LOCAL_INFILE != LOCAL_INFILE_MODE_OFF)
mysql->options.client_flag|= CLIENT_LOCAL_FILES;
mysql->extension->auto_local_infile= ENABLED_LOCAL_INFILE == LOCAL_INFILE_MODE_AUTO
? WAIT_FOR_QUERY : ALWAYS_ACCEPT;
mysql->options.reconnect= 0;
return mysql;
error:
if (mysql->free_me)
free(mysql);
return 0;
}
| 0 |
[
"CWE-20"
] |
mariadb-connector-c
|
2759b87d72926b7c9b5426437a7c8dd15ff57945
| 62,218,856,110,837,070,000,000,000,000,000,000,000 | 43 |
sanity checks for client-supplied OK packet content
reported by Matthias Kaiser, Apple Information Security
|
int32_t ZrtpQueue::cancelTimer() {
std::string s("ZRTP");
if (staticTimeoutProvider != NULL) {
staticTimeoutProvider->cancelRequest(this, s);
}
return 1;
}
| 0 |
[
"CWE-119"
] |
ZRTPCPP
|
c8617100f359b217a974938c5539a1dd8a120b0e
| 174,118,827,033,163,750,000,000,000,000,000,000,000 | 7 |
Fix vulnerabilities found and reported by Mark Dowd
- limit length of memcpy
- limit number of offered algorithms in Hello packet
- length check in PING packet
- fix a small coding error
|
int slap_sasl_rewrite_config(
const char *fname,
int lineno,
int argc,
char **argv
)
{
int rc;
char *savearg0;
/* init at first call */
if ( sasl_rwinfo == NULL ) {
sasl_rwinfo = rewrite_info_init( REWRITE_MODE_USE_DEFAULT );
}
/* strip "authid-" prefix for parsing */
savearg0 = argv[0];
argv[0] += STRLENOF( "authid-" );
rc = rewrite_parse( sasl_rwinfo, fname, lineno, argc, argv );
argv[0] = savearg0;
return rc;
}
| 0 |
[
"CWE-617"
] |
openldap
|
02dfc32d658fadc25e4040f78e36592f6e1e1ca0
| 192,504,195,510,753,700,000,000,000,000,000,000,000 | 23 |
ITS#9406 fix debug msg
|
void do_blank_screen(int entering_gfx)
{
struct vc_data *vc = vc_cons[fg_console].d;
int i;
might_sleep();
WARN_CONSOLE_UNLOCKED();
if (console_blanked) {
if (blank_state == blank_vesa_wait) {
blank_state = blank_off;
vc->vc_sw->con_blank(vc, vesa_blank_mode + 1, 0);
}
return;
}
/* entering graphics mode? */
if (entering_gfx) {
hide_cursor(vc);
save_screen(vc);
vc->vc_sw->con_blank(vc, -1, 1);
console_blanked = fg_console + 1;
blank_state = blank_off;
set_origin(vc);
return;
}
blank_state = blank_off;
/* don't blank graphics */
if (vc->vc_mode != KD_TEXT) {
console_blanked = fg_console + 1;
return;
}
hide_cursor(vc);
del_timer_sync(&console_timer);
blank_timer_expired = 0;
save_screen(vc);
/* In case we need to reset origin, blanking hook returns 1 */
i = vc->vc_sw->con_blank(vc, vesa_off_interval ? 1 : (vesa_blank_mode + 1), 0);
console_blanked = fg_console + 1;
if (i)
set_origin(vc);
if (console_blank_hook && console_blank_hook(1))
return;
if (vesa_off_interval && vesa_blank_mode) {
blank_state = blank_vesa_wait;
mod_timer(&console_timer, jiffies + vesa_off_interval);
}
vt_event_post(VT_EVENT_BLANK, vc->vc_num, vc->vc_num);
}
| 0 |
[
"CWE-125"
] |
linux
|
3c4e0dff2095c579b142d5a0693257f1c58b4804
| 251,329,651,805,289,430,000,000,000,000,000,000,000 | 56 |
vt: Disable KD_FONT_OP_COPY
It's buggy:
On Fri, Nov 06, 2020 at 10:30:08PM +0800, Minh Yuan wrote:
> We recently discovered a slab-out-of-bounds read in fbcon in the latest
> kernel ( v5.10-rc2 for now ). The root cause of this vulnerability is that
> "fbcon_do_set_font" did not handle "vc->vc_font.data" and
> "vc->vc_font.height" correctly, and the patch
> <https://lkml.org/lkml/2020/9/27/223> for VT_RESIZEX can't handle this
> issue.
>
> Specifically, we use KD_FONT_OP_SET to set a small font.data for tty6, and
> use KD_FONT_OP_SET again to set a large font.height for tty1. After that,
> we use KD_FONT_OP_COPY to assign tty6's vc_font.data to tty1's vc_font.data
> in "fbcon_do_set_font", while tty1 retains the original larger
> height. Obviously, this will cause an out-of-bounds read, because we can
> access a smaller vc_font.data with a larger vc_font.height.
Further there was only one user ever.
- Android's loadfont, busybox and console-tools only ever use OP_GET
and OP_SET
- fbset documentation only mentions the kernel cmdline font: option,
not anything else.
- systemd used OP_COPY before release 232 published in Nov 2016
Now unfortunately the crucial report seems to have gone down with
gmane, and the commit message doesn't say much. But the pull request
hints at OP_COPY being broken
https://github.com/systemd/systemd/pull/3651
So in other words, this never worked, and the only project which
foolishly every tried to use it, realized that rather quickly too.
Instead of trying to fix security issues here on dead code by adding
missing checks, fix the entire thing by removing the functionality.
Note that systemd code using the OP_COPY function ignored the return
value, so it doesn't matter what we're doing here really - just in
case a lone server somewhere happens to be extremely unlucky and
running an affected old version of systemd. The relevant code from
font_copy_to_all_vcs() in systemd was:
/* copy font from active VT, where the font was uploaded to */
cfo.op = KD_FONT_OP_COPY;
cfo.height = vcs.v_active-1; /* tty1 == index 0 */
(void) ioctl(vcfd, KDFONTOP, &cfo);
Note this just disables the ioctl, garbage collecting the now unused
callbacks is left for -next.
v2: Tetsuo found the old mail, which allowed me to find it on another
archive. Add the link too.
Acked-by: Peilin Ye <[email protected]>
Reported-by: Minh Yuan <[email protected]>
References: https://lists.freedesktop.org/archives/systemd-devel/2016-June/036935.html
References: https://github.com/systemd/systemd/pull/3651
Cc: Greg KH <[email protected]>
Cc: Peilin Ye <[email protected]>
Cc: Tetsuo Handa <[email protected]>
Signed-off-by: Daniel Vetter <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
h2v1_merged_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf)
{
switch (cinfo->out_color_space) {
case JCS_EXT_RGB:
extrgb_h2v1_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
case JCS_EXT_RGBX:
case JCS_EXT_RGBA:
extrgbx_h2v1_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
case JCS_EXT_BGR:
extbgr_h2v1_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
case JCS_EXT_BGRX:
case JCS_EXT_BGRA:
extbgrx_h2v1_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
case JCS_EXT_XBGR:
case JCS_EXT_ABGR:
extxbgr_h2v1_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
case JCS_EXT_XRGB:
case JCS_EXT_ARGB:
extxrgb_h2v1_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
default:
h2v1_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
}
}
| 0 |
[
"CWE-476"
] |
libjpeg-turbo
|
9120a247436e84c0b4eea828cb11e8f665fcde30
| 276,905,405,483,087,200,000,000,000,000,000,000,000 | 38 |
Fix jpeg_skip_scanlines() segfault w/merged upsamp
The additional segfault mentioned in #244 was due to the fact that
the merged upsamplers use a different private structure than the
non-merged upsamplers. jpeg_skip_scanlines() was assuming the latter, so
when merged upsampling was enabled, jpeg_skip_scanlines() clobbered one
of the IDCT method pointers in the merged upsampler's private structure.
For reasons unknown, the test image in #441 did not encounter this
segfault (too small?), but it encountered an issue similar to the one
fixed in 5bc43c7821df982f65aa1c738f67fbf7cba8bd69, whereby it was
necessary to set up a dummy postprocessing function in
read_and_discard_scanlines() when merged upsampling was enabled.
Failing to do so caused either a segfault in merged_2v_upsample() (due
to a NULL pointer being passed to jcopy_sample_rows()) or an error
("Corrupt JPEG data: premature end of data segment"), depending on the
number of scanlines skipped and whether the first scanline skipped was
an odd- or even-numbered row.
Fixes #441
Fixes #244 (for real this time)
|
static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
{
struct ethtool_gstrings gstrings;
const struct ethtool_ops *ops = dev->ethtool_ops;
struct ethtool_rx_ntuple_flow_spec_container *fsc;
u8 *data;
char *p;
int ret, i, num_strings = 0;
if (!ops->get_sset_count)
return -EOPNOTSUPP;
if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
return -EFAULT;
ret = ops->get_sset_count(dev, gstrings.string_set);
if (ret < 0)
return ret;
gstrings.len = ret;
data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
if (!data)
return -ENOMEM;
if (ops->get_rx_ntuple) {
/* driver-specific filter grab */
ret = ops->get_rx_ntuple(dev, gstrings.string_set, data);
goto copy;
}
/* default ethtool filter grab */
i = 0;
p = (char *)data;
list_for_each_entry(fsc, &dev->ethtool_ntuple_list.list, list) {
sprintf(p, "Filter %d:\n", i);
p += ETH_GSTRING_LEN;
num_strings++;
switch (fsc->fs.flow_type) {
case TCP_V4_FLOW:
sprintf(p, "\tFlow Type: TCP\n");
p += ETH_GSTRING_LEN;
num_strings++;
break;
case UDP_V4_FLOW:
sprintf(p, "\tFlow Type: UDP\n");
p += ETH_GSTRING_LEN;
num_strings++;
break;
case SCTP_V4_FLOW:
sprintf(p, "\tFlow Type: SCTP\n");
p += ETH_GSTRING_LEN;
num_strings++;
break;
case AH_ESP_V4_FLOW:
sprintf(p, "\tFlow Type: AH ESP\n");
p += ETH_GSTRING_LEN;
num_strings++;
break;
case ESP_V4_FLOW:
sprintf(p, "\tFlow Type: ESP\n");
p += ETH_GSTRING_LEN;
num_strings++;
break;
case IP_USER_FLOW:
sprintf(p, "\tFlow Type: Raw IP\n");
p += ETH_GSTRING_LEN;
num_strings++;
break;
case IPV4_FLOW:
sprintf(p, "\tFlow Type: IPv4\n");
p += ETH_GSTRING_LEN;
num_strings++;
break;
default:
sprintf(p, "\tFlow Type: Unknown\n");
p += ETH_GSTRING_LEN;
num_strings++;
goto unknown_filter;
}
/* now the rest of the filters */
switch (fsc->fs.flow_type) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
sprintf(p, "\tSrc IP addr: 0x%x\n",
fsc->fs.h_u.tcp_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc IP mask: 0x%x\n",
fsc->fs.m_u.tcp_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP addr: 0x%x\n",
fsc->fs.h_u.tcp_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP mask: 0x%x\n",
fsc->fs.m_u.tcp_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc Port: %d, mask: 0x%x\n",
fsc->fs.h_u.tcp_ip4_spec.psrc,
fsc->fs.m_u.tcp_ip4_spec.psrc);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest Port: %d, mask: 0x%x\n",
fsc->fs.h_u.tcp_ip4_spec.pdst,
fsc->fs.m_u.tcp_ip4_spec.pdst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tTOS: %d, mask: 0x%x\n",
fsc->fs.h_u.tcp_ip4_spec.tos,
fsc->fs.m_u.tcp_ip4_spec.tos);
p += ETH_GSTRING_LEN;
num_strings++;
break;
case AH_ESP_V4_FLOW:
case ESP_V4_FLOW:
sprintf(p, "\tSrc IP addr: 0x%x\n",
fsc->fs.h_u.ah_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc IP mask: 0x%x\n",
fsc->fs.m_u.ah_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP addr: 0x%x\n",
fsc->fs.h_u.ah_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP mask: 0x%x\n",
fsc->fs.m_u.ah_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSPI: %d, mask: 0x%x\n",
fsc->fs.h_u.ah_ip4_spec.spi,
fsc->fs.m_u.ah_ip4_spec.spi);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tTOS: %d, mask: 0x%x\n",
fsc->fs.h_u.ah_ip4_spec.tos,
fsc->fs.m_u.ah_ip4_spec.tos);
p += ETH_GSTRING_LEN;
num_strings++;
break;
case IP_USER_FLOW:
sprintf(p, "\tSrc IP addr: 0x%x\n",
fsc->fs.h_u.raw_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc IP mask: 0x%x\n",
fsc->fs.m_u.raw_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP addr: 0x%x\n",
fsc->fs.h_u.raw_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP mask: 0x%x\n",
fsc->fs.m_u.raw_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
break;
case IPV4_FLOW:
sprintf(p, "\tSrc IP addr: 0x%x\n",
fsc->fs.h_u.usr_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc IP mask: 0x%x\n",
fsc->fs.m_u.usr_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP addr: 0x%x\n",
fsc->fs.h_u.usr_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP mask: 0x%x\n",
fsc->fs.m_u.usr_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n",
fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tTOS: %d, mask: 0x%x\n",
fsc->fs.h_u.usr_ip4_spec.tos,
fsc->fs.m_u.usr_ip4_spec.tos);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tIP Version: %d, mask: 0x%x\n",
fsc->fs.h_u.usr_ip4_spec.ip_ver,
fsc->fs.m_u.usr_ip4_spec.ip_ver);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tProtocol: %d, mask: 0x%x\n",
fsc->fs.h_u.usr_ip4_spec.proto,
fsc->fs.m_u.usr_ip4_spec.proto);
p += ETH_GSTRING_LEN;
num_strings++;
break;
}
sprintf(p, "\tVLAN: %d, mask: 0x%x\n",
fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tUser-defined mask: 0x%Lx\n", fsc->fs.data_mask);
p += ETH_GSTRING_LEN;
num_strings++;
if (fsc->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
sprintf(p, "\tAction: Drop\n");
else
sprintf(p, "\tAction: Direct to queue %d\n",
fsc->fs.action);
p += ETH_GSTRING_LEN;
num_strings++;
unknown_filter:
i++;
}
copy:
/* indicate to userspace how many strings we actually have */
gstrings.len = num_strings;
ret = -EFAULT;
if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
goto out;
useraddr += sizeof(gstrings);
if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
goto out;
ret = 0;
out:
kfree(data);
return ret;
}
| 0 |
[
"CWE-190"
] |
linux-2.6
|
db048b69037e7fa6a7d9e95a1271a50dc08ae233
| 71,309,385,319,300,920,000,000,000,000,000,000,000 | 240 |
ethtool: Fix potential kernel buffer overflow in ETHTOOL_GRXCLSRLALL
On a 32-bit machine, info.rule_cnt >= 0x40000000 leads to integer
overflow and the buffer may be smaller than needed. Since
ETHTOOL_GRXCLSRLALL is unprivileged, this can presumably be used for at
least denial of service.
Signed-off-by: Ben Hutchings <[email protected]>
Cc: [email protected]
Signed-off-by: David S. Miller <[email protected]>
|
static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
{
int bearer_id = n->active_links[sel & 1];
if (unlikely(bearer_id == INVALID_BEARER_ID))
return NULL;
return n->links[bearer_id].link;
}
| 0 |
[] |
linux
|
0217ed2848e8538bcf9172d97ed2eeb4a26041bb
| 284,585,291,115,564,230,000,000,000,000,000,000,000 | 9 |
tipc: better validate user input in tipc_nl_retrieve_key()
Before calling tipc_aead_key_size(ptr), we need to ensure
we have enough data to dereference ptr->keylen.
We probably also want to make sure tipc_aead_key_size()
wont overflow with malicious ptr->keylen values.
Syzbot reported:
BUG: KMSAN: uninit-value in __tipc_nl_node_set_key net/tipc/node.c:2971 [inline]
BUG: KMSAN: uninit-value in tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023
CPU: 0 PID: 21060 Comm: syz-executor.5 Not tainted 5.11.0-rc7-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:79 [inline]
dump_stack+0x21c/0x280 lib/dump_stack.c:120
kmsan_report+0xfb/0x1e0 mm/kmsan/kmsan_report.c:118
__msan_warning+0x5f/0xa0 mm/kmsan/kmsan_instr.c:197
__tipc_nl_node_set_key net/tipc/node.c:2971 [inline]
tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023
genl_family_rcv_msg_doit net/netlink/genetlink.c:739 [inline]
genl_family_rcv_msg net/netlink/genetlink.c:783 [inline]
genl_rcv_msg+0x1319/0x1610 net/netlink/genetlink.c:800
netlink_rcv_skb+0x6fa/0x810 net/netlink/af_netlink.c:2494
genl_rcv+0x63/0x80 net/netlink/genetlink.c:811
netlink_unicast_kernel net/netlink/af_netlink.c:1304 [inline]
netlink_unicast+0x11d6/0x14a0 net/netlink/af_netlink.c:1330
netlink_sendmsg+0x1740/0x1840 net/netlink/af_netlink.c:1919
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg net/socket.c:672 [inline]
____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345
___sys_sendmsg net/socket.c:2399 [inline]
__sys_sendmsg+0x714/0x830 net/socket.c:2432
__compat_sys_sendmsg net/compat.c:347 [inline]
__do_compat_sys_sendmsg net/compat.c:354 [inline]
__se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351
__ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351
do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline]
__do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141
do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166
do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209
entry_SYSENTER_compat_after_hwframe+0x4d/0x5c
RIP: 0023:0xf7f60549
Code: 03 74 c0 01 10 05 03 74 b8 01 10 06 03 74 b4 01 10 07 03 74 b0 01 10 08 03 74 d8 01 00 00 00 00 00 51 52 55 89 e5 0f 34 cd 80 <5d> 5a 59 c3 90 90 90 90 8d b4 26 00 00 00 00 8d b4 26 00 00 00 00
RSP: 002b:00000000f555a5fc EFLAGS: 00000296 ORIG_RAX: 0000000000000172
RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000020000200
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
Uninit was created at:
kmsan_save_stack_with_flags mm/kmsan/kmsan.c:121 [inline]
kmsan_internal_poison_shadow+0x5c/0xf0 mm/kmsan/kmsan.c:104
kmsan_slab_alloc+0x8d/0xe0 mm/kmsan/kmsan_hooks.c:76
slab_alloc_node mm/slub.c:2907 [inline]
__kmalloc_node_track_caller+0xa37/0x1430 mm/slub.c:4527
__kmalloc_reserve net/core/skbuff.c:142 [inline]
__alloc_skb+0x2f8/0xb30 net/core/skbuff.c:210
alloc_skb include/linux/skbuff.h:1099 [inline]
netlink_alloc_large_skb net/netlink/af_netlink.c:1176 [inline]
netlink_sendmsg+0xdbc/0x1840 net/netlink/af_netlink.c:1894
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg net/socket.c:672 [inline]
____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345
___sys_sendmsg net/socket.c:2399 [inline]
__sys_sendmsg+0x714/0x830 net/socket.c:2432
__compat_sys_sendmsg net/compat.c:347 [inline]
__do_compat_sys_sendmsg net/compat.c:354 [inline]
__se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351
__ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351
do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline]
__do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141
do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166
do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209
entry_SYSENTER_compat_after_hwframe+0x4d/0x5c
Fixes: e1f32190cf7d ("tipc: add support for AEAD key setting via netlink")
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Tuong Lien <[email protected]>
Cc: Jon Maloy <[email protected]>
Cc: Ying Xue <[email protected]>
Reported-by: syzbot <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int parse_attachments (BUFFER *buf, BUFFER *s, unsigned long data, BUFFER *err)
{
char op, *category;
LIST **listp;
mutt_extract_token(buf, s, 0);
if (!buf->data || *buf->data == '\0') {
strfcpy(err->data, _("attachments: no disposition"), err->dsize);
return -1;
}
category = buf->data;
op = *category++;
if (op == '?') {
mutt_endwin (NULL);
fflush (stdout);
printf("\nCurrent attachments settings:\n\n");
print_attach_list(AttachAllow, '+', "A");
print_attach_list(AttachExclude, '-', "A");
print_attach_list(InlineAllow, '+', "I");
print_attach_list(InlineExclude, '-', "I");
set_option (OPTFORCEREDRAWINDEX);
set_option (OPTFORCEREDRAWPAGER);
mutt_any_key_to_continue (NULL);
return 0;
}
if (op != '+' && op != '-') {
op = '+';
category--;
}
if (!ascii_strncasecmp(category, "attachment", strlen(category))) {
if (op == '+')
listp = &AttachAllow;
else
listp = &AttachExclude;
}
else if (!ascii_strncasecmp(category, "inline", strlen(category))) {
if (op == '+')
listp = &InlineAllow;
else
listp = &InlineExclude;
}
else {
strfcpy(err->data, _("attachments: invalid disposition"), err->dsize);
return -1;
}
return parse_attach_list(buf, s, listp, err);
}
| 0 |
[
"CWE-668"
] |
mutt
|
6d0624411a979e2e1d76af4dd97d03f47679ea4a
| 136,119,951,403,829,100,000,000,000,000,000,000,000 | 51 |
use a 64-bit random value in temporary filenames.
closes #3158
|
bytes_rsplit_impl(PyBytesObject*self, PyObject *sep, Py_ssize_t maxsplit)
/*[clinic end generated code: output=0b6570b977911d88 input=0f86c9f28f7d7b7b]*/
{
Py_ssize_t len = PyBytes_GET_SIZE(self), n;
const char *s = PyBytes_AS_STRING(self), *sub;
Py_buffer vsub;
PyObject *list;
if (maxsplit < 0)
maxsplit = PY_SSIZE_T_MAX;
if (sep == Py_None)
return stringlib_rsplit_whitespace((PyObject*) self, s, len, maxsplit);
if (PyObject_GetBuffer(sep, &vsub, PyBUF_SIMPLE) != 0)
return NULL;
sub = vsub.buf;
n = vsub.len;
list = stringlib_rsplit((PyObject*) self, s, len, sub, n, maxsplit);
PyBuffer_Release(&vsub);
return list;
}
| 0 |
[
"CWE-190"
] |
cpython
|
fd8614c5c5466a14a945db5b059c10c0fb8f76d9
| 158,018,262,246,898,800,000,000,000,000,000,000,000 | 21 |
bpo-30657: Fix CVE-2017-1000158 (#4664)
Fixes possible integer overflow in PyBytes_DecodeEscape.
Co-Authored-By: Jay Bosamiya <[email protected]>
|
static int on_body(http_parser* self_, const char* at, size_t length)
{
HTTPParser* self = static_cast<HTTPParser*>(self_);
self->req.body.insert(self->req.body.end(), at, at + length);
return 0;
}
| 0 |
[
"CWE-416"
] |
Crow
|
fba01dc76d6ea940ad7c8392e8f39f9647241d8e
| 97,612,870,950,272,080,000,000,000,000,000,000,000 | 6 |
Prevent HTTP pipelining which Crow doesn't support.
|
virtual void ComputeWithReservedSpace(OpKernelContext* context,
bool use_reserved_space) {
Tensor x = context->input(0);
const Tensor& scale = context->input(1);
const Tensor& offset = context->input(2);
const Tensor& estimated_mean = context->input(3);
const Tensor& estimated_variance = context->input(4);
const Tensor* side_input = has_side_input_ ? &context->input(5) : nullptr;
OP_REQUIRES(context, x.dims() == 4 || x.dims() == 5,
errors::InvalidArgument("input must be 4 or 5-dimensional",
x.shape().DebugString()));
OP_REQUIRES(context, scale.dims() == 1,
errors::InvalidArgument("scale must be 1-dimensional",
scale.shape().DebugString()));
OP_REQUIRES(context, offset.dims() == 1,
errors::InvalidArgument("offset must be 1-dimensional",
offset.shape().DebugString()));
OP_REQUIRES(context, estimated_mean.dims() == 1,
errors::InvalidArgument("estimated_mean must be 1-dimensional",
estimated_mean.shape().DebugString()));
OP_REQUIRES(
context, estimated_variance.dims() == 1,
errors::InvalidArgument("estimated_variance must be 1-dimensional",
estimated_variance.shape().DebugString()));
bool use_reshape = (x.dims() == 5);
auto x_shape = x.shape();
TensorShape dest_shape;
if (use_reshape) {
const int64_t in_batch = GetTensorDim(x, tensor_format_, 'N');
int64_t in_planes = GetTensorDim(x, tensor_format_, '0');
int64_t in_rows = GetTensorDim(x, tensor_format_, '1');
int64_t in_cols = GetTensorDim(x, tensor_format_, '2');
const int64_t in_depth = GetTensorDim(x, tensor_format_, 'C');
dest_shape = ShapeFromFormat(tensor_format_, in_batch,
{{in_planes, in_rows * in_cols}}, in_depth);
OP_REQUIRES(context, x.CopyFrom(x, dest_shape),
errors::InvalidArgument("Error during tensor copy."));
}
const auto num_channels = GetTensorDim(x, tensor_format_, 'C');
OP_REQUIRES(
context, scale.NumElements() == num_channels,
errors::InvalidArgument("scale must have the same number of elements "
"as the channels of x, got ",
scale.NumElements(), " and ", num_channels));
OP_REQUIRES(
context, offset.NumElements() == num_channels,
errors::InvalidArgument("offset must have the same number of elements "
"as the channels of x, got ",
offset.NumElements(), " and ", num_channels));
if (!is_training_ || exponential_avg_factor_ != 1.) {
std::string prefix_msg = is_training_ ? "When exponential_avg_factor != 1"
: "When is_training=false";
OP_REQUIRES(context, estimated_mean.NumElements() == num_channels,
errors::InvalidArgument(
prefix_msg,
", mean must have the same number "
"of elements as the channels of x, got ",
estimated_mean.NumElements(), " and ", num_channels));
OP_REQUIRES(context, estimated_variance.NumElements() == num_channels,
errors::InvalidArgument(
prefix_msg,
", variance must have the same "
"number of elements as the channels of x, got ",
estimated_variance.NumElements(), " and ", num_channels));
}
if (has_side_input_) {
OP_REQUIRES(context, side_input->shape() == x.shape(),
errors::InvalidArgument(
"side_input shape must be equal to input shape: ",
side_input->shape().DebugString(),
" != ", x.shape().DebugString()));
}
if (activation_mode_ != FbnActivationMode::kIdentity) {
// NOTE(ezhulenev): This requirement is coming from implementation
// details of cudnnBatchNormalizationForwardTrainingEx.
OP_REQUIRES(
context, !is_training_ || num_channels % 4 == 0,
errors::InvalidArgument("FusedBatchNorm with activation requires "
"channel dimension to be a multiple of 4."));
}
Tensor* y = nullptr;
auto alloc_shape = use_reshape ? dest_shape : x_shape;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{0}, 0, alloc_shape, &y));
Tensor* batch_mean = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{3}, 1, scale.shape(), &batch_mean));
Tensor* batch_var = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{4}, 2, scale.shape(), &batch_var));
Tensor* saved_mean = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(3, scale.shape(), &saved_mean));
Tensor* saved_maybe_inv_var = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(4, scale.shape(),
&saved_maybe_inv_var));
if (is_training_) {
functor::FusedBatchNorm<Device, T, U, true>()(
context, x, scale, offset, estimated_mean, estimated_variance,
side_input, epsilon_, exponential_avg_factor_, activation_mode_, y,
batch_mean, batch_var, saved_mean, saved_maybe_inv_var,
tensor_format_, use_reserved_space);
} else {
functor::FusedBatchNorm<Device, T, U, false>()(
context, x, scale, offset, estimated_mean, estimated_variance,
side_input, epsilon_, exponential_avg_factor_, activation_mode_, y,
batch_mean, batch_var, saved_mean, saved_maybe_inv_var,
tensor_format_, use_reserved_space);
}
if (use_reshape) {
OP_REQUIRES(context, y->CopyFrom(*y, x_shape),
errors::InvalidArgument("Error during tensor copy."));
}
}
| 0 |
[
"CWE-125"
] |
tensorflow
|
aab9998916c2ffbd8f0592059fad352622f89cda
| 203,062,994,520,113,920,000,000,000,000,000,000,000 | 121 |
Add shape checks to FusedBatchNorm kernels.
PiperOrigin-RevId: 399755576
Change-Id: If8049fde109cc33badb5509d174b9b95aee1ea5e
|
xmlSetBufferAllocationScheme(xmlBufferAllocationScheme scheme) {
if ((scheme == XML_BUFFER_ALLOC_EXACT) ||
(scheme == XML_BUFFER_ALLOC_DOUBLEIT) ||
(scheme == XML_BUFFER_ALLOC_HYBRID))
xmlBufferAllocScheme = scheme;
}
| 0 |
[
"CWE-20"
] |
libxml2
|
bdd66182ef53fe1f7209ab6535fda56366bd7ac9
| 288,191,987,832,883,500,000,000,000,000,000,000,000 | 6 |
Avoid building recursive entities
For https://bugzilla.gnome.org/show_bug.cgi?id=762100
When we detect a recusive entity we should really not
build the associated data, moreover if someone bypass
libxml2 fatal errors and still tries to serialize a broken
entity make sure we don't risk to get ito a recursion
* parser.c: xmlParserEntityCheck() don't build if entity loop
were found and remove the associated text content
* tree.c: xmlStringGetNodeList() avoid a potential recursion
|
static int checkout_remaining_wd_items(
checkout_data *data,
git_iterator *workdir,
const git_index_entry *wd,
git_vector *spec)
{
int error = 0;
while (wd && !error)
error = checkout_action_wd_only(data, workdir, &wd, spec);
if (error == GIT_ITEROVER)
error = 0;
return error;
}
| 0 |
[
"CWE-20",
"CWE-706"
] |
libgit2
|
64c612cc3e25eff5fb02c59ef5a66ba7a14751e4
| 118,638,410,321,144,240,000,000,000,000,000,000,000 | 16 |
Protect against 8.3 "short name" attacks also on Linux/macOS
The Windows Subsystem for Linux (WSL) is getting increasingly popular,
in particular because it makes it _so_ easy to run Linux software on
Windows' files, via the auto-mounted Windows drives (`C:\` is mapped to
`/mnt/c/`, no need to set that up manually).
Unfortunately, files/directories on the Windows drives can be accessed
via their _short names_, if that feature is enabled (which it is on the
`C:` drive by default).
Which means that we have to safeguard even our Linux users against the
short name attacks.
Further, while the default options of CIFS/SMB-mounts seem to disallow
accessing files on network shares via their short names on Linux/macOS,
it _is_ possible to do so with the right options.
So let's just safe-guard against short name attacks _everywhere_.
Signed-off-by: Johannes Schindelin <[email protected]>
|
static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry,
struct nfs_fh *fhandle, struct nfs_fattr *fattr,
struct nfs4_label *label)
{
int status;
struct rpc_clnt *client = NFS_CLIENT(dir);
status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr, label);
if (client != NFS_CLIENT(dir)) {
rpc_shutdown_client(client);
nfs_fixup_secinfo_attributes(fattr);
}
return status;
}
| 0 |
[
"CWE-787"
] |
linux
|
b4487b93545214a9db8cbf32e86411677b0cca21
| 130,426,534,996,720,150,000,000,000,000,000,000,000 | 14 |
nfs: Fix getxattr kernel panic and memory overflow
Move the buffer size check to decode_attr_security_label() before memcpy()
Only call memcpy() if the buffer is large enough
Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS")
Signed-off-by: Jeffrey Mitchell <[email protected]>
[Trond: clean up duplicate test of label->len != 0]
Signed-off-by: Trond Myklebust <[email protected]>
|
static double mp_complex_mul(_cimg_math_parser& mp) {
const double
*ptr1 = &_mp_arg(2) + 1, *ptr2 = &_mp_arg(3) + 1,
r1 = *(ptr1++), i1 = *ptr1,
r2 = *(ptr2++), i2 = *ptr2;
double *ptrd = &_mp_arg(1) + 1;
*(ptrd++) = r1*r2 - i1*i2;
*(ptrd++) = r1*i2 + r2*i1;
return cimg::type<double>::nan();
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 125,904,172,830,591,680,000,000,000,000,000,000,000 | 10 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
int gnutls_x509_crt_get_raw_dn(gnutls_x509_crt_t cert, gnutls_datum_t * dn)
{
if (cert->raw_dn.size > 0) {
return _gnutls_set_datum(dn, cert->raw_dn.data, cert->raw_dn.size);
} else {
return _gnutls_x509_get_raw_field(cert->cert, "tbsCertificate.subject.rdnSequence", dn);
}
}
| 0 |
[
"CWE-295"
] |
gnutls
|
6e76e9b9fa845b76b0b9a45f05f4b54a052578ff
| 186,630,192,187,344,650,000,000,000,000,000,000,000 | 8 |
on certificate import check whether the two signature algorithms match
|
SDL_AllocPalette(int ncolors)
{
SDL_Palette *palette;
/* Input validation */
if (ncolors < 1) {
SDL_InvalidParamError("ncolors");
return NULL;
}
palette = (SDL_Palette *) SDL_malloc(sizeof(*palette));
if (!palette) {
SDL_OutOfMemory();
return NULL;
}
palette->colors =
(SDL_Color *) SDL_malloc(ncolors * sizeof(*palette->colors));
if (!palette->colors) {
SDL_free(palette);
return NULL;
}
palette->ncolors = ncolors;
palette->version = 1;
palette->refcount = 1;
SDL_memset(palette->colors, 0xFF, ncolors * sizeof(*palette->colors));
return palette;
}
| 0 |
[
"CWE-703",
"CWE-787"
] |
SDL
|
8c91cf7dba5193f5ce12d06db1336515851c9ee9
| 212,790,561,302,869,920,000,000,000,000,000,000,000 | 29 |
Always create a full 256-entry map in case color values are out of range
Fixes https://github.com/libsdl-org/SDL/issues/5042
|
static int atrtr_ioctl(unsigned int cmd, void *arg)
{
struct rtentry rt;
if (copy_from_user(&rt, arg, sizeof(rt)))
return -EFAULT;
switch (cmd) {
case SIOCDELRT:
if (rt.rt_dst.sa_family != AF_APPLETALK)
return -EINVAL;
return atrtr_delete(&((struct sockaddr_at *)
&rt.rt_dst)->sat_addr);
case SIOCADDRT: {
struct net_device *dev = NULL;
/*
* FIXME: the name of the device is still in user
* space, isn't it?
*/
if (rt.rt_dev) {
dev = __dev_get_by_name(rt.rt_dev);
if (!dev)
return -ENODEV;
}
return atrtr_create(&rt, dev);
}
}
return -EINVAL;
}
| 0 |
[] |
history
|
7ab442d7e0a76402c12553ee256f756097cae2d2
| 25,481,420,792,984,085,000,000,000,000,000,000,000 | 30 |
[DDP]: Convert to new protocol interface.
Convert ddp to the new protocol interface which means it has to
handle fragmented skb's. The only big change is in the checksum
routine which has to do more work (like skb_checksum).
Minor speedup is folding the carry to avoid a branch.
Tested against a 2.4 system and by running both code over
a range of packets.
|
long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
{
key_ref_t keyring_ref, key_ref;
long ret;
keyring_ref = lookup_user_key(ringid, 0, KEY_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error;
}
key_ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
}
ret = key_unlink(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref));
key_ref_put(key_ref);
error2:
key_ref_put(keyring_ref);
error:
return ret;
} /* end keyctl_keyring_unlink() */
| 0 |
[
"CWE-476"
] |
linux-2.6
|
9d1ac65a9698513d00e5608d93fca0c53f536c14
| 16,996,101,191,850,474,000,000,000,000,000,000,000 | 26 |
KEYS: Fix RCU no-lock warning in keyctl_session_to_parent()
There's an protected access to the parent process's credentials in the middle
of keyctl_session_to_parent(). This results in the following RCU warning:
===================================================
[ INFO: suspicious rcu_dereference_check() usage. ]
---------------------------------------------------
security/keys/keyctl.c:1291 invoked rcu_dereference_check() without protection!
other info that might help us debug this:
rcu_scheduler_active = 1, debug_locks = 0
1 lock held by keyctl-session-/2137:
#0: (tasklist_lock){.+.+..}, at: [<ffffffff811ae2ec>] keyctl_session_to_parent+0x60/0x236
stack backtrace:
Pid: 2137, comm: keyctl-session- Not tainted 2.6.36-rc2-cachefs+ #1
Call Trace:
[<ffffffff8105606a>] lockdep_rcu_dereference+0xaa/0xb3
[<ffffffff811ae379>] keyctl_session_to_parent+0xed/0x236
[<ffffffff811af77e>] sys_keyctl+0xb4/0xb6
[<ffffffff81001eab>] system_call_fastpath+0x16/0x1b
The code should take the RCU read lock to make sure the parents credentials
don't go away, even though it's holding a spinlock and has IRQ disabled.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
my_exit_policy_rejects(const tor_addr_t *addr,
uint16_t port,
const char **why_rejected)
{
if (router_compare_to_my_exit_policy(addr, port)) {
*why_rejected = "";
return 1;
} else if (tor_addr_family(addr) == AF_INET6 && !get_options()->IPv6Exit) {
*why_rejected = " (IPv6 address without IPv6Exit configured)";
return 1;
}
return 0;
}
| 0 |
[
"CWE-20",
"CWE-617"
] |
tor
|
79b59a2dfcb68897ee89d98587d09e55f07e68d7
| 193,922,593,309,672,760,000,000,000,000,000,000,000 | 13 |
TROVE-2017-004: Fix assertion failure in relay_send_end_cell_from_edge_
This fixes an assertion failure in relay_send_end_cell_from_edge_() when an
origin circuit and a cpath_layer = NULL were passed.
A service rendezvous circuit could do such a thing when a malformed BEGIN cell
is received but shouldn't in the first place because the service needs to send
an END cell on the circuit for which it can not do without a cpath_layer.
Fixes #22493
Reported-by: Roger Dingledine <[email protected]>
Signed-off-by: David Goulet <[email protected]>
|
PHP_FUNCTION(imagecolorset)
{
zval *IM;
long color, red, green, blue, alpha = 0;
int col;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll|l", &IM, &color, &red, &green, &blue, &alpha) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
col = color;
if (col >= 0 && col < gdImageColorsTotal(im)) {
im->red[col] = red;
im->green[col] = green;
im->blue[col] = blue;
im->alpha[col] = alpha;
} else {
RETURN_FALSE;
}
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
php-src
|
2938329ce19cb8c4197dec146c3ec887c6f61d01
| 133,490,781,209,185,470,000,000,000,000,000,000,000 | 24 |
Fixed bug #66356 (Heap Overflow Vulnerability in imagecrop())
And also fixed the bug: arguments are altered after some calls
|
static void call_nt_transact_create(connection_struct *conn,
struct smb_request *req,
uint16 **ppsetup, uint32 setup_count,
char **ppparams, uint32 parameter_count,
char **ppdata, uint32 data_count,
uint32 max_data_count)
{
struct smb_filename *smb_fname = NULL;
char *fname = NULL;
char *params = *ppparams;
char *data = *ppdata;
/* Breakout the oplock request bits so we can set the reply bits separately. */
uint32 fattr=0;
SMB_OFF_T file_len = 0;
int info = 0;
files_struct *fsp = NULL;
char *p = NULL;
uint32 flags;
uint32 access_mask;
uint32 file_attributes;
uint32 share_access;
uint32 create_disposition;
uint32 create_options;
uint32 sd_len;
struct security_descriptor *sd = NULL;
uint32 ea_len;
uint16 root_dir_fid;
struct timespec create_timespec;
struct timespec c_timespec;
struct timespec a_timespec;
struct timespec m_timespec;
struct timespec write_time_ts;
struct ea_list *ea_list = NULL;
NTSTATUS status;
size_t param_len;
uint64_t allocation_size;
int oplock_request;
uint8_t oplock_granted;
struct case_semantics_state *case_state = NULL;
TALLOC_CTX *ctx = talloc_tos();
DEBUG(5,("call_nt_transact_create\n"));
/*
* If it's an IPC, use the pipe handler.
*/
if (IS_IPC(conn)) {
if (lp_nt_pipe_support()) {
do_nt_transact_create_pipe(
conn, req,
ppsetup, setup_count,
ppparams, parameter_count,
ppdata, data_count);
goto out;
}
reply_nterror(req, NT_STATUS_ACCESS_DENIED);
goto out;
}
/*
* Ensure minimum number of parameters sent.
*/
if(parameter_count < 54) {
DEBUG(0,("call_nt_transact_create - insufficient parameters (%u)\n", (unsigned int)parameter_count));
reply_nterror(req, NT_STATUS_INVALID_PARAMETER);
goto out;
}
flags = IVAL(params,0);
access_mask = IVAL(params,8);
file_attributes = IVAL(params,20);
share_access = IVAL(params,24);
create_disposition = IVAL(params,28);
create_options = IVAL(params,32);
sd_len = IVAL(params,36);
ea_len = IVAL(params,40);
root_dir_fid = (uint16)IVAL(params,4);
allocation_size = (uint64_t)IVAL(params,12);
#ifdef LARGE_SMB_OFF_T
allocation_size |= (((uint64_t)IVAL(params,16)) << 32);
#endif
/*
* we need to remove ignored bits when they come directly from the client
* because we reuse some of them for internal stuff
*/
create_options &= ~NTCREATEX_OPTIONS_MUST_IGNORE_MASK;
/* Ensure the data_len is correct for the sd and ea values given. */
if ((ea_len + sd_len > data_count)
|| (ea_len > data_count) || (sd_len > data_count)
|| (ea_len + sd_len < ea_len) || (ea_len + sd_len < sd_len)) {
DEBUG(10, ("call_nt_transact_create - ea_len = %u, sd_len = "
"%u, data_count = %u\n", (unsigned int)ea_len,
(unsigned int)sd_len, (unsigned int)data_count));
reply_nterror(req, NT_STATUS_INVALID_PARAMETER);
goto out;
}
if (sd_len) {
DEBUG(10, ("call_nt_transact_create - sd_len = %d\n",
sd_len));
status = unmarshall_sec_desc(ctx, (uint8_t *)data, sd_len,
&sd);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(10, ("call_nt_transact_create: "
"unmarshall_sec_desc failed: %s\n",
nt_errstr(status)));
reply_nterror(req, status);
goto out;
}
}
if (ea_len) {
if (!lp_ea_support(SNUM(conn))) {
DEBUG(10, ("call_nt_transact_create - ea_len = %u but "
"EA's not supported.\n",
(unsigned int)ea_len));
reply_nterror(req, NT_STATUS_EAS_NOT_SUPPORTED);
goto out;
}
if (ea_len < 10) {
DEBUG(10,("call_nt_transact_create - ea_len = %u - "
"too small (should be more than 10)\n",
(unsigned int)ea_len ));
reply_nterror(req, NT_STATUS_INVALID_PARAMETER);
goto out;
}
/* We have already checked that ea_len <= data_count here. */
ea_list = read_nttrans_ea_list(talloc_tos(), data + sd_len,
ea_len);
if (ea_list == NULL) {
reply_nterror(req, NT_STATUS_INVALID_PARAMETER);
goto out;
}
}
srvstr_get_path(ctx, params, req->flags2, &fname,
params+53, parameter_count-53,
STR_TERMINATE, &status);
if (!NT_STATUS_IS_OK(status)) {
reply_nterror(req, status);
goto out;
}
if (file_attributes & FILE_FLAG_POSIX_SEMANTICS) {
case_state = set_posix_case_semantics(ctx, conn);
if (!case_state) {
reply_nterror(req, NT_STATUS_NO_MEMORY);
goto out;
}
}
status = filename_convert(ctx,
conn,
req->flags2 & FLAGS2_DFS_PATHNAMES,
fname,
0,
NULL,
&smb_fname);
TALLOC_FREE(case_state);
if (!NT_STATUS_IS_OK(status)) {
if (NT_STATUS_EQUAL(status,NT_STATUS_PATH_NOT_COVERED)) {
reply_botherror(req,
NT_STATUS_PATH_NOT_COVERED,
ERRSRV, ERRbadpath);
goto out;
}
reply_nterror(req, status);
goto out;
}
oplock_request = (flags & REQUEST_OPLOCK) ? EXCLUSIVE_OPLOCK : 0;
if (oplock_request) {
oplock_request |= (flags & REQUEST_BATCH_OPLOCK)
? BATCH_OPLOCK : 0;
}
/*
* Bug #6898 - clients using Windows opens should
* never be able to set this attribute into the
* VFS.
*/
file_attributes &= ~FILE_FLAG_POSIX_SEMANTICS;
status = SMB_VFS_CREATE_FILE(
conn, /* conn */
req, /* req */
root_dir_fid, /* root_dir_fid */
smb_fname, /* fname */
access_mask, /* access_mask */
share_access, /* share_access */
create_disposition, /* create_disposition*/
create_options, /* create_options */
file_attributes, /* file_attributes */
oplock_request, /* oplock_request */
allocation_size, /* allocation_size */
sd, /* sd */
ea_list, /* ea_list */
&fsp, /* result */
&info); /* pinfo */
if(!NT_STATUS_IS_OK(status)) {
if (open_was_deferred(req->mid)) {
/* We have re-scheduled this call, no error. */
return;
}
reply_openerror(req, status);
goto out;
}
/* Ensure we're pointing at the correct stat struct. */
TALLOC_FREE(smb_fname);
smb_fname = fsp->fsp_name;
/*
* If the caller set the extended oplock request bit
* and we granted one (by whatever means) - set the
* correct bit for extended oplock reply.
*/
if (oplock_request &&
(lp_fake_oplocks(SNUM(conn))
|| EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type))) {
/*
* Exclusive oplock granted
*/
if (flags & REQUEST_BATCH_OPLOCK) {
oplock_granted = BATCH_OPLOCK_RETURN;
} else {
oplock_granted = EXCLUSIVE_OPLOCK_RETURN;
}
} else if (fsp->oplock_type == LEVEL_II_OPLOCK) {
oplock_granted = LEVEL_II_OPLOCK_RETURN;
} else {
oplock_granted = NO_OPLOCK_RETURN;
}
file_len = smb_fname->st.st_ex_size;
/* Realloc the size of parameters and data we will return */
if (flags & EXTENDED_RESPONSE_REQUIRED) {
/* Extended response is 32 more byyes. */
param_len = 101;
} else {
param_len = 69;
}
params = nttrans_realloc(ppparams, param_len);
if(params == NULL) {
reply_nterror(req, NT_STATUS_NO_MEMORY);
goto out;
}
p = params;
SCVAL(p, 0, oplock_granted);
p += 2;
SSVAL(p,0,fsp->fnum);
p += 2;
if ((create_disposition == FILE_SUPERSEDE)
&& (info == FILE_WAS_OVERWRITTEN)) {
SIVAL(p,0,FILE_WAS_SUPERSEDED);
} else {
SIVAL(p,0,info);
}
p += 8;
fattr = dos_mode(conn, smb_fname);
if (fattr == 0) {
fattr = FILE_ATTRIBUTE_NORMAL;
}
/* Deal with other possible opens having a modified
write time. JRA. */
ZERO_STRUCT(write_time_ts);
get_file_infos(fsp->file_id, NULL, &write_time_ts);
if (!null_timespec(write_time_ts)) {
update_stat_ex_mtime(&smb_fname->st, write_time_ts);
}
/* Create time. */
create_timespec = get_create_timespec(conn, fsp, smb_fname);
a_timespec = smb_fname->st.st_ex_atime;
m_timespec = smb_fname->st.st_ex_mtime;
c_timespec = get_change_timespec(conn, fsp, smb_fname);
if (lp_dos_filetime_resolution(SNUM(conn))) {
dos_filetime_timespec(&create_timespec);
dos_filetime_timespec(&a_timespec);
dos_filetime_timespec(&m_timespec);
dos_filetime_timespec(&c_timespec);
}
put_long_date_timespec(conn->ts_res, p, create_timespec); /* create time. */
p += 8;
put_long_date_timespec(conn->ts_res, p, a_timespec); /* access time */
p += 8;
put_long_date_timespec(conn->ts_res, p, m_timespec); /* write time */
p += 8;
put_long_date_timespec(conn->ts_res, p, c_timespec); /* change time */
p += 8;
SIVAL(p,0,fattr); /* File Attributes. */
p += 4;
SOFF_T(p, 0, SMB_VFS_GET_ALLOC_SIZE(conn, fsp, &smb_fname->st));
p += 8;
SOFF_T(p,0,file_len);
p += 8;
if (flags & EXTENDED_RESPONSE_REQUIRED) {
SSVAL(p,2,0x7);
}
p += 4;
SCVAL(p,0,fsp->is_directory ? 1 : 0);
if (flags & EXTENDED_RESPONSE_REQUIRED) {
uint32 perms = 0;
p += 25;
if (fsp->is_directory ||
can_write_to_file(conn, smb_fname)) {
perms = FILE_GENERIC_ALL;
} else {
perms = FILE_GENERIC_READ|FILE_EXECUTE;
}
SIVAL(p,0,perms);
}
DEBUG(5,("call_nt_transact_create: open name = %s\n",
smb_fname_str_dbg(smb_fname)));
/* Send the required number of replies */
send_nt_replies(conn, req, NT_STATUS_OK, params, param_len, *ppdata, 0);
out:
return;
}
| 0 |
[
"CWE-189"
] |
samba
|
6ef0e33fe8afa0ebb81652b9d42b42d20efadf04
| 190,023,225,303,178,470,000,000,000,000,000,000,000 | 342 |
Fix bug #10010 - Missing integer wrap protection in EA list reading can cause server to loop with DOS.
Ensure we never wrap whilst adding client provided input.
CVE-2013-4124
Signed-off-by: Jeremy Allison <[email protected]>
|
static void out_of_memory(conn *c, char *ascii_error) {
const static char error_prefix[] = "SERVER_ERROR ";
const static int error_prefix_len = sizeof(error_prefix) - 1;
if (c->protocol == binary_prot) {
/* Strip off the generic error prefix; it's irrelevant in binary */
if (!strncmp(ascii_error, error_prefix, error_prefix_len)) {
ascii_error += error_prefix_len;
}
write_bin_error(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, ascii_error, 0);
} else {
out_string(c, ascii_error);
}
}
| 0 |
[
"CWE-190"
] |
memcached
|
bd578fc34b96abe0f8d99c1409814a09f51ee71c
| 268,985,692,419,184,900,000,000,000,000,000,000,000 | 14 |
CVE reported by cisco talos
|
int2octets (unsigned char **r_frame, gcry_mpi_t value, size_t nbytes)
{
gpg_err_code_t rc;
size_t nframe, noff, n;
unsigned char *frame;
rc = _gcry_mpi_print (GCRYMPI_FMT_USG, NULL, 0, &nframe, value);
if (rc)
return rc;
if (nframe > nbytes)
return GPG_ERR_TOO_LARGE; /* Value too long to fit into NBYTES. */
noff = (nframe < nbytes)? nbytes - nframe : 0;
n = nframe + noff;
frame = mpi_is_secure (value)? xtrymalloc_secure (n) : xtrymalloc (n);
if (!frame)
return gpg_err_code_from_syserror ();
if (noff)
memset (frame, 0, noff);
nframe += noff;
rc = _gcry_mpi_print (GCRYMPI_FMT_USG, frame+noff, nframe-noff, NULL, value);
if (rc)
{
xfree (frame);
return rc;
}
*r_frame = frame;
return 0;
}
| 0 |
[
"CWE-203"
] |
libgcrypt
|
7c2943309d14407b51c8166c4dcecb56a3628567
| 314,290,283,015,757,880,000,000,000,000,000,000,000 | 30 |
dsa,ecdsa: Fix use of nonce, use larger one.
* cipher/dsa-common.c (_gcry_dsa_modify_k): New.
* cipher/pubkey-internal.h (_gcry_dsa_modify_k): New.
* cipher/dsa.c (sign): Use _gcry_dsa_modify_k.
* cipher/ecc-ecdsa.c (_gcry_ecc_ecdsa_sign): Likewise.
* cipher/ecc-gost.c (_gcry_ecc_gost_sign): Likewise.
CVE-id: CVE-2019-13627
GnuPG-bug-id: 4626
Signed-off-by: NIIBE Yutaka <[email protected]>
|
static int wait_serial_change(struct acm *acm, unsigned long arg)
{
int rv = 0;
DECLARE_WAITQUEUE(wait, current);
struct async_icount old, new;
if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD ))
return -EINVAL;
do {
spin_lock_irq(&acm->read_lock);
old = acm->oldcount;
new = acm->iocount;
acm->oldcount = new;
spin_unlock_irq(&acm->read_lock);
if ((arg & TIOCM_DSR) &&
old.dsr != new.dsr)
break;
if ((arg & TIOCM_CD) &&
old.dcd != new.dcd)
break;
if ((arg & TIOCM_RI) &&
old.rng != new.rng)
break;
add_wait_queue(&acm->wioctl, &wait);
set_current_state(TASK_INTERRUPTIBLE);
schedule();
remove_wait_queue(&acm->wioctl, &wait);
if (acm->disconnected) {
if (arg & TIOCM_CD)
break;
else
rv = -ENODEV;
} else {
if (signal_pending(current))
rv = -ERESTARTSYS;
}
} while (!rv);
return rv;
}
| 0 |
[
"CWE-703"
] |
linux
|
8835ba4a39cf53f705417b3b3a94eb067673f2c9
| 201,962,330,608,950,400,000,000,000,000,000,000,000 | 44 |
USB: cdc-acm: more sanity checking
An attack has become available which pretends to be a quirky
device circumventing normal sanity checks and crashes the kernel
by an insufficient number of interfaces. This patch adds a check
to the code path for quirky devices.
Signed-off-by: Oliver Neukum <[email protected]>
CC: [email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static void x509_end_cert(const br_x509_class **ctx)
{
struct x509_context *x509 = (struct x509_context *)ctx;
x509->minimal.vtable->end_cert(&x509->minimal.vtable);
}
| 0 |
[
"CWE-290"
] |
curl
|
b09c8ee15771c614c4bf3ddac893cdb12187c844
| 248,904,295,643,254,700,000,000,000,000,000,000,000 | 6 |
vtls: add 'isproxy' argument to Curl_ssl_get/addsessionid()
To make sure we set and extract the correct session.
Reported-by: Mingtao Yang
Bug: https://curl.se/docs/CVE-2021-22890.html
CVE-2021-22890
|
static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *rdst, *fdst = NULL;
const struct ip_tunnel_info *info;
bool did_rsc = false;
struct vxlan_fdb *f;
struct ethhdr *eth;
__be32 vni = 0;
info = skb_tunnel_info(skb);
skb_reset_mac_header(skb);
if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
if (info && info->mode & IP_TUNNEL_INFO_BRIDGE &&
info->mode & IP_TUNNEL_INFO_TX) {
vni = tunnel_id_to_key32(info->key.tun_id);
} else {
if (info && info->mode & IP_TUNNEL_INFO_TX)
vxlan_xmit_one(skb, dev, vni, NULL, false);
else
kfree_skb(skb);
return NETDEV_TX_OK;
}
}
if (vxlan->cfg.flags & VXLAN_F_PROXY) {
eth = eth_hdr(skb);
if (ntohs(eth->h_proto) == ETH_P_ARP)
return arp_reduce(dev, skb, vni);
#if IS_ENABLED(CONFIG_IPV6)
else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
pskb_may_pull(skb, sizeof(struct ipv6hdr) +
sizeof(struct nd_msg)) &&
ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1);
if (m->icmph.icmp6_code == 0 &&
m->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
return neigh_reduce(dev, skb, vni);
}
#endif
}
eth = eth_hdr(skb);
f = vxlan_find_mac(vxlan, eth->h_dest, vni);
did_rsc = false;
if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) &&
(ntohs(eth->h_proto) == ETH_P_IP ||
ntohs(eth->h_proto) == ETH_P_IPV6)) {
did_rsc = route_shortcircuit(dev, skb);
if (did_rsc)
f = vxlan_find_mac(vxlan, eth->h_dest, vni);
}
if (f == NULL) {
f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
if (f == NULL) {
if ((vxlan->cfg.flags & VXLAN_F_L2MISS) &&
!is_multicast_ether_addr(eth->h_dest))
vxlan_fdb_miss(vxlan, eth->h_dest);
dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
}
list_for_each_entry_rcu(rdst, &f->remotes, list) {
struct sk_buff *skb1;
if (!fdst) {
fdst = rdst;
continue;
}
skb1 = skb_clone(skb, GFP_ATOMIC);
if (skb1)
vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc);
}
if (fdst)
vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
else
kfree_skb(skb);
return NETDEV_TX_OK;
}
| 0 |
[] |
net
|
6c8991f41546c3c472503dff1ea9daaddf9331c2
| 58,288,486,337,542,240,000,000,000,000,000,000,000 | 88 |
net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup
ipv6_stub uses the ip6_dst_lookup function to allow other modules to
perform IPv6 lookups. However, this function skips the XFRM layer
entirely.
All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the
ip_route_output_key and ip_route_output helpers) for their IPv4 lookups,
which calls xfrm_lookup_route(). This patch fixes this inconsistent
behavior by switching the stub to ip6_dst_lookup_flow, which also calls
xfrm_lookup_route().
This requires some changes in all the callers, as these two functions
take different arguments and have different return types.
Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan")
Reported-by: Xiumei Mu <[email protected]>
Signed-off-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
const struct sk_buff *skb)
{
return skb_queue_prev(&sk->sk_write_queue, skb);
}
| 0 |
[
"CWE-416",
"CWE-269"
] |
linux
|
bb1fceca22492109be12640d49f5ea5a544c6bb4
| 244,613,986,103,607,830,000,000,000,000,000,000,000 | 5 |
tcp: fix use after free in tcp_xmit_retransmit_queue()
When tcp_sendmsg() allocates a fresh and empty skb, it puts it at the
tail of the write queue using tcp_add_write_queue_tail()
Then it attempts to copy user data into this fresh skb.
If the copy fails, we undo the work and remove the fresh skb.
Unfortunately, this undo lacks the change done to tp->highest_sack and
we can leave a dangling pointer (to a freed skb)
Later, tcp_xmit_retransmit_queue() can dereference this pointer and
access freed memory. For regular kernels where memory is not unmapped,
this might cause SACK bugs because tcp_highest_sack_seq() is buggy,
returning garbage instead of tp->snd_nxt, but with various debug
features like CONFIG_DEBUG_PAGEALLOC, this can crash the kernel.
This bug was found by Marco Grassi thanks to syzkaller.
Fixes: 6859d49475d4 ("[TCP]: Abstract tp->highest_sack accessing & point to next skb")
Reported-by: Marco Grassi <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Ilpo Järvinen <[email protected]>
Cc: Yuchung Cheng <[email protected]>
Cc: Neal Cardwell <[email protected]>
Acked-by: Neal Cardwell <[email protected]>
Reviewed-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
| 0 |
[
"CWE-20",
"CWE-190"
] |
tinyexr
|
a685e3332f61cd4e59324bf3f669d36973d64270
| 289,089,316,300,135,500,000,000,000,000,000,000,000 | 9 |
Make line_no with too large value(2**20) invalid. Fixes #124
|
SpoolssReplyClosePrinter_r(tvbuff_t *tvb, int offset,
packet_info *pinfo, proto_tree *tree,
dcerpc_info *di, guint8 *drep)
{
/* Parse packet */
offset = dissect_nt_policy_hnd(
tvb, offset, pinfo, tree, di, drep, hf_hnd, NULL, NULL,
FALSE, FALSE);
offset = dissect_doserror(
tvb, offset, pinfo, tree, di, drep, hf_rc, NULL);
return offset;
}
| 0 |
[
"CWE-399"
] |
wireshark
|
b4d16b4495b732888e12baf5b8a7e9bf2665e22b
| 165,525,744,864,127,970,000,000,000,000,000,000,000 | 15 |
SPOOLSS: Try to avoid an infinite loop.
Use tvb_reported_length_remaining in dissect_spoolss_uint16uni. Make
sure our offset always increments in dissect_spoolss_keybuffer.
Change-Id: I7017c9685bb2fa27161d80a03b8fca4ef630e793
Reviewed-on: https://code.wireshark.org/review/14687
Reviewed-by: Gerald Combs <[email protected]>
Petri-Dish: Gerald Combs <[email protected]>
Tested-by: Petri Dish Buildbot <[email protected]>
Reviewed-by: Michael Mann <[email protected]>
|
static u32 temac_setoptions(struct net_device *ndev, u32 options)
{
struct temac_local *lp = netdev_priv(ndev);
struct temac_option *tp = &temac_options[0];
int reg;
unsigned long flags;
spin_lock_irqsave(lp->indirect_lock, flags);
while (tp->opt) {
reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
if (options & tp->opt) {
reg |= tp->m_or;
temac_indirect_out32_locked(lp, tp->reg, reg);
}
tp++;
}
spin_unlock_irqrestore(lp->indirect_lock, flags);
lp->options |= options;
return 0;
}
| 0 |
[
"CWE-120",
"CWE-787"
] |
linux
|
c364df2489b8ef2f5e3159b1dff1ff1fdb16040d
| 304,577,154,130,698,070,000,000,000,000,000,000,000 | 21 |
net: ll_temac: Fix TX BD buffer overwrite
Just as the initial check, we need to ensure num_frag+1 buffers available,
as that is the number of buffers we are going to use.
This fixes a buffer overflow, which might be seen during heavy network
load. Complete lockup of TEMAC was reproducible within about 10 minutes of
a particular load.
Fixes: 84823ff80f74 ("net: ll_temac: Fix race condition causing TX hang")
Cc: [email protected] # v5.4+
Signed-off-by: Esben Haabendal <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
rsvg_title_handler_characters (RsvgSaxHandler * self, const char *ch, int len)
{
RsvgSaxHandlerTitle *z = (RsvgSaxHandlerTitle *) self;
RsvgHandle *ctx = z->ctx;
/* This isn't quite the correct behavior - in theory, any graphics
element may contain a title or desc element */
if (!ch || !len)
return;
if (!g_utf8_validate ((char *) ch, len, NULL)) {
char *utf8;
utf8 = rsvg_make_valid_utf8 ((char *) ch, len);
g_string_append (ctx->priv->title, utf8);
g_free (utf8);
} else {
g_string_append_len (ctx->priv->title, (char *) ch, len);
}
}
| 0 |
[] |
librsvg
|
34c95743ca692ea0e44778e41a7c0a129363de84
| 142,154,403,676,551,120,000,000,000,000,000,000,000 | 20 |
Store node type separately in RsvgNode
The node name (formerly RsvgNode:type) cannot be used to infer
the sub-type of RsvgNode that we're dealing with, since for unknown
elements we put type = node-name. This lead to a (potentially exploitable)
crash e.g. when the element name started with "fe" which tricked
the old code into considering it as a RsvgFilterPrimitive.
CVE-2011-3146
https://bugzilla.gnome.org/show_bug.cgi?id=658014
|
GF_Err audio_sample_entry_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_MPEGAudioSampleEntryBox *ptr;
char *data;
u8 a, b, c, d;
u32 i, size, v, nb_alnum;
GF_Err e;
u64 pos, start;
ptr = (GF_MPEGAudioSampleEntryBox *)s;
start = gf_bs_get_position(bs);
gf_bs_seek(bs, start + 8);
v = gf_bs_read_u16(bs);
if (v)
ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_ON_NOEXT;
//try to disambiguate QTFF v1 and MP4 v1 audio sample entries ...
if (v==1) {
//go to end of ISOM audio sample entry, skip 4 byte (box size field), read 4 bytes (box type) and check if this looks like a box
gf_bs_seek(bs, start + 8 + 20 + 4);
a = gf_bs_read_u8(bs);
b = gf_bs_read_u8(bs);
c = gf_bs_read_u8(bs);
d = gf_bs_read_u8(bs);
nb_alnum = 0;
if (isalnum(a)) nb_alnum++;
if (isalnum(b)) nb_alnum++;
if (isalnum(c)) nb_alnum++;
if (isalnum(d)) nb_alnum++;
if (nb_alnum>2) ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE;
}
gf_bs_seek(bs, start);
e = gf_isom_audio_sample_entry_read((GF_AudioSampleEntryBox*)s, bs);
if (e) return e;
pos = gf_bs_get_position(bs);
size = (u32) s->size;
//when cookie is set on bs, always convert qtff-style mp4a to isobmff-style
//since the conversion is done in addBox and we don't have the bitstream there (arg...), flag the box
if (gf_bs_get_cookie(bs) & GF_ISOM_BS_COOKIE_QT_CONV) {
ptr->qtff_mode |= GF_ISOM_AUDIO_QTFF_CONVERT_FLAG;
}
e = gf_isom_box_array_read(s, bs);
if (!e) {
if (s->type==GF_ISOM_BOX_TYPE_ENCA) {
GF_ProtectionSchemeInfoBox *sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(s->child_boxes, GF_ISOM_BOX_TYPE_SINF);
if (sinf && sinf->original_format) {
u32 type = sinf->original_format->data_format;
switch (type) {
case GF_ISOM_SUBTYPE_3GP_AMR:
case GF_ISOM_SUBTYPE_3GP_AMR_WB:
case GF_ISOM_SUBTYPE_3GP_EVRC:
case GF_ISOM_SUBTYPE_3GP_QCELP:
case GF_ISOM_SUBTYPE_3GP_SMV:
if (ptr->cfg_3gpp) ptr->cfg_3gpp->cfg.type = type;
break;
}
}
}
return GF_OK;
}
if (size<8) return GF_ISOM_INVALID_FILE;
/*hack for some weird files (possibly recorded with live.com tools, needs further investigations)*/
gf_bs_seek(bs, pos);
data = (char*)gf_malloc(sizeof(char) * size);
if (!data) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, data, size);
for (i=0; i<size-8; i++) {
if (GF_4CC((u32)data[i+4], (u8)data[i+5], (u8)data[i+6], (u8)data[i+7]) == GF_ISOM_BOX_TYPE_ESDS) {
GF_BitStream *mybs = gf_bs_new(data + i, size - i, GF_BITSTREAM_READ);
if (ptr->esd) gf_isom_box_del_parent(&ptr->child_boxes, (GF_Box *)ptr->esd);
ptr->esd = NULL;
e = gf_isom_box_parse((GF_Box **)&ptr->esd, mybs);
gf_bs_del(mybs);
if (e==GF_OK) {
if (!ptr->child_boxes) ptr->child_boxes = gf_list_new();
gf_list_add(ptr->child_boxes, ptr->esd);
} else if (ptr->esd) {
gf_isom_box_del((GF_Box *)ptr->esd);
ptr->esd = NULL;
}
break;
}
}
gf_free(data);
return e;
}
| 0 |
[
"CWE-476",
"CWE-787"
] |
gpac
|
b8f8b202d4fc23eb0ab4ce71ae96536ca6f5d3f8
| 123,512,647,535,188,890,000,000,000,000,000,000,000 | 94 |
fixed #1757
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.