instruction
stringclasses 1
value | input
stringlengths 31
235k
| output
class label 2
classes |
---|---|---|
Categorize the following code snippet as vulnerable or not. True or False | static gboolean set_pinfo_desegment ( packet_info * pinfo , gint next_offset , gint addition_bytes_needed ) {
if ( pinfo -> can_desegment ) {
pinfo -> desegment_offset = next_offset ;
pinfo -> desegment_len = addition_bytes_needed ;
return TRUE ;
}
return FALSE ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | const EVP_CIPHER * EVP_aes_ ## keylen ## _ ## mode ( void ) \ {
return & aes_ ## keylen ## _ ## mode ;
}
# endif # if defined ( OPENSSL_CPUID_OBJ ) && ( defined ( __arm__ ) || defined ( __arm ) || defined ( __aarch64__ ) ) # include "arm_arch.h" # if __ARM_MAX_ARCH__ >= 7 # if defined ( BSAES_ASM ) # define BSAES_CAPABLE ( OPENSSL_armcap_P & ARMV7_NEON ) # endif # if defined ( VPAES_ASM ) # define VPAES_CAPABLE ( OPENSSL_armcap_P & ARMV7_NEON ) # endif # define HWAES_CAPABLE ( OPENSSL_armcap_P & ARMV8_AES ) # define HWAES_set_encrypt_key aes_v8_set_encrypt_key # define HWAES_set_decrypt_key aes_v8_set_decrypt_key # define HWAES_encrypt aes_v8_encrypt # define HWAES_decrypt aes_v8_decrypt # define HWAES_cbc_encrypt aes_v8_cbc_encrypt # define HWAES_ctr32_encrypt_blocks aes_v8_ctr32_encrypt_blocks # endif # endif # if defined ( HWAES_CAPABLE ) int HWAES_set_encrypt_key ( const unsigned char * userKey , const int bits , AES_KEY * key ) ;
int HWAES_set_decrypt_key ( const unsigned char * userKey , const int bits , AES_KEY * key ) ;
void HWAES_encrypt ( const unsigned char * in , unsigned char * out , const AES_KEY * key ) ;
void HWAES_decrypt ( const unsigned char * in , unsigned char * out , const AES_KEY * key ) ;
void HWAES_cbc_encrypt ( const unsigned char * in , unsigned char * out , size_t length , const AES_KEY * key , unsigned char * ivec , const int enc ) ;
void HWAES_ctr32_encrypt_blocks ( const unsigned char * in , unsigned char * out , size_t len , const AES_KEY * key , const unsigned char ivec [ 16 ] ) ;
void HWAES_xts_encrypt ( const unsigned char * inp , unsigned char * out , size_t len , const AES_KEY * key1 , const AES_KEY * key2 , const unsigned char iv [ 16 ] ) ;
void HWAES_xts_decrypt ( const unsigned char * inp , unsigned char * out , size_t len , const AES_KEY * key1 , const AES_KEY * key2 , const unsigned char iv [ 16 ] ) ;
# endif # define BLOCK_CIPHER_generic_pack ( nid , keylen , flags ) \ BLOCK_CIPHER_generic ( nid , keylen , 16 , 16 , cbc , cbc , CBC , flags | EVP_CIPH_FLAG_DEFAULT_ASN1 ) \ BLOCK_CIPHER_generic ( nid , keylen , 16 , 0 , ecb , ecb , ECB , flags | EVP_CIPH_FLAG_DEFAULT_ASN1 ) \ BLOCK_CIPHER_generic ( nid , keylen , 1 , 16 , ofb128 , ofb , OFB , flags | EVP_CIPH_FLAG_DEFAULT_ASN1 ) \ BLOCK_CIPHER_generic ( nid , keylen , 1 , 16 , cfb128 , cfb , CFB , flags | EVP_CIPH_FLAG_DEFAULT_ASN1 ) \ BLOCK_CIPHER_generic ( nid , keylen , 1 , 16 , cfb1 , cfb1 , CFB , flags ) \ BLOCK_CIPHER_generic ( nid , keylen , 1 , 16 , cfb8 , cfb8 , CFB , flags ) \ BLOCK_CIPHER_generic ( nid , keylen , 1 , 16 , ctr , ctr , CTR , flags ) static int aes_init_key ( EVP_CIPHER_CTX * ctx , const unsigned char * key , const unsigned char * iv , int enc ) {
int ret , mode ;
EVP_AES_KEY * dat = EVP_C_DATA ( EVP_AES_KEY , ctx ) ;
mode = EVP_CIPHER_CTX_mode ( ctx ) ;
if ( ( mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE ) && ! enc ) {
# ifdef HWAES_CAPABLE if ( HWAES_CAPABLE ) {
ret = HWAES_set_decrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 8 , & dat -> ks . ks ) ;
dat -> block = ( block128_f ) HWAES_decrypt ;
dat -> stream . cbc = NULL ;
# ifdef HWAES_cbc_encrypt if ( mode == EVP_CIPH_CBC_MODE ) dat -> stream . cbc = ( cbc128_f ) HWAES_cbc_encrypt ;
# endif }
else # endif # ifdef BSAES_CAPABLE if ( BSAES_CAPABLE && mode == EVP_CIPH_CBC_MODE ) {
ret = AES_set_decrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 8 , & dat -> ks . ks ) ;
dat -> block = ( block128_f ) AES_decrypt ;
dat -> stream . cbc = ( cbc128_f ) bsaes_cbc_encrypt ;
}
else # endif # ifdef VPAES_CAPABLE if ( VPAES_CAPABLE ) {
ret = vpaes_set_decrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 8 , & dat -> ks . ks ) ;
dat -> block = ( block128_f ) vpaes_decrypt ;
dat -> stream . cbc = mode == EVP_CIPH_CBC_MODE ? ( cbc128_f ) vpaes_cbc_encrypt : NULL ;
}
else # endif {
ret = AES_set_decrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 8 , & dat -> ks . ks ) ;
dat -> block = ( block128_f ) AES_decrypt ;
dat -> stream . cbc = mode == EVP_CIPH_CBC_MODE ? ( cbc128_f ) AES_cbc_encrypt : NULL ;
}
}
else # ifdef HWAES_CAPABLE if ( HWAES_CAPABLE ) {
ret = HWAES_set_encrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 8 , & dat -> ks . ks ) ;
dat -> block = ( block128_f ) HWAES_encrypt ;
dat -> stream . cbc = NULL ;
# ifdef HWAES_cbc_encrypt if ( mode == EVP_CIPH_CBC_MODE ) dat -> stream . cbc = ( cbc128_f ) HWAES_cbc_encrypt ;
else # endif # ifdef HWAES_ctr32_encrypt_blocks if ( mode == EVP_CIPH_CTR_MODE ) dat -> stream . ctr = ( ctr128_f ) HWAES_ctr32_encrypt_blocks ;
else # endif ( void ) 0 ;
}
else # endif # ifdef BSAES_CAPABLE if ( BSAES_CAPABLE && mode == EVP_CIPH_CTR_MODE ) {
ret = AES_set_encrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 8 , & dat -> ks . ks ) ;
dat -> block = ( block128_f ) AES_encrypt ;
dat -> stream . ctr = ( ctr128_f ) bsaes_ctr32_encrypt_blocks ;
}
else # endif # ifdef VPAES_CAPABLE if ( VPAES_CAPABLE ) {
ret = vpaes_set_encrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 8 , & dat -> ks . ks ) ;
dat -> block = ( block128_f ) vpaes_encrypt ;
dat -> stream . cbc = mode == EVP_CIPH_CBC_MODE ? ( cbc128_f ) vpaes_cbc_encrypt : NULL ;
}
else # endif {
ret = AES_set_encrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 8 , & dat -> ks . ks ) ;
dat -> block = ( block128_f ) AES_encrypt ;
dat -> stream . cbc = mode == EVP_CIPH_CBC_MODE ? ( cbc128_f ) AES_cbc_encrypt : NULL ;
# ifdef AES_CTR_ASM if ( mode == EVP_CIPH_CTR_MODE ) dat -> stream . ctr = ( ctr128_f ) AES_ctr32_encrypt ;
# endif }
if ( ret < 0 ) {
EVPerr ( EVP_F_AES_INIT_KEY , EVP_R_AES_KEY_SETUP_FAILED ) ;
return 0 ;
}
return 1 ;
}
static int aes_cbc_cipher ( EVP_CIPHER_CTX * ctx , unsigned char * out , const unsigned char * in , size_t len ) {
EVP_AES_KEY * dat = EVP_C_DATA ( EVP_AES_KEY , ctx ) ;
if ( dat -> stream . cbc ) ( * dat -> stream . cbc ) ( in , out , len , & dat -> ks , EVP_CIPHER_CTX_iv_noconst ( ctx ) , EVP_CIPHER_CTX_encrypting ( ctx ) ) ;
else if ( EVP_CIPHER_CTX_encrypting ( ctx ) ) CRYPTO_cbc128_encrypt ( in , out , len , & dat -> ks , EVP_CIPHER_CTX_iv_noconst ( ctx ) , dat -> block ) ;
else CRYPTO_cbc128_decrypt ( in , out , len , & dat -> ks , EVP_CIPHER_CTX_iv_noconst ( ctx ) , dat -> block ) ;
return 1 ;
}
static int aes_ecb_cipher ( EVP_CIPHER_CTX * ctx , unsigned char * out , const unsigned char * in , size_t len ) {
size_t bl = EVP_CIPHER_CTX_block_size ( ctx ) ;
size_t i ;
EVP_AES_KEY * dat = EVP_C_DATA ( EVP_AES_KEY , ctx ) ;
if ( len < bl ) return 1 ;
for ( i = 0 , len -= bl ;
i <= len ;
i += bl ) ( * dat -> block ) ( in + i , out + i , & dat -> ks ) ;
return 1 ;
}
static int aes_ofb_cipher ( EVP_CIPHER_CTX * ctx , unsigned char * out , const unsigned char * in , size_t len ) {
EVP_AES_KEY * dat = EVP_C_DATA ( EVP_AES_KEY , ctx ) ;
int num = EVP_CIPHER_CTX_num ( ctx ) ;
CRYPTO_ofb128_encrypt ( in , out , len , & dat -> ks , EVP_CIPHER_CTX_iv_noconst ( ctx ) , & num , dat -> block ) ;
EVP_CIPHER_CTX_set_num ( ctx , num ) ;
return 1 ;
}
static int aes_cfb_cipher ( EVP_CIPHER_CTX * ctx , unsigned char * out , const unsigned char * in , size_t len ) {
EVP_AES_KEY * dat = EVP_C_DATA ( EVP_AES_KEY , ctx ) ;
int num = EVP_CIPHER_CTX_num ( ctx ) ;
CRYPTO_cfb128_encrypt ( in , out , len , & dat -> ks , EVP_CIPHER_CTX_iv_noconst ( ctx ) , & num , EVP_CIPHER_CTX_encrypting ( ctx ) , dat -> block ) ;
EVP_CIPHER_CTX_set_num ( ctx , num ) ;
return 1 ;
}
static int aes_cfb8_cipher ( EVP_CIPHER_CTX * ctx , unsigned char * out , const unsigned char * in , size_t len ) {
EVP_AES_KEY * dat = EVP_C_DATA ( EVP_AES_KEY , ctx ) ;
int num = EVP_CIPHER_CTX_num ( ctx ) ;
CRYPTO_cfb128_8_encrypt ( in , out , len , & dat -> ks , EVP_CIPHER_CTX_iv_noconst ( ctx ) , & num , EVP_CIPHER_CTX_encrypting ( ctx ) , dat -> block ) ;
EVP_CIPHER_CTX_set_num ( ctx , num ) ;
return 1 ;
}
static int aes_cfb1_cipher ( EVP_CIPHER_CTX * ctx , unsigned char * out , const unsigned char * in , size_t len ) {
EVP_AES_KEY * dat = EVP_C_DATA ( EVP_AES_KEY , ctx ) ;
if ( EVP_CIPHER_CTX_test_flags ( ctx , EVP_CIPH_FLAG_LENGTH_BITS ) ) {
int num = EVP_CIPHER_CTX_num ( ctx ) ;
CRYPTO_cfb128_1_encrypt ( in , out , len , & dat -> ks , EVP_CIPHER_CTX_iv_noconst ( ctx ) , & num , EVP_CIPHER_CTX_encrypting ( ctx ) , dat -> block ) ;
EVP_CIPHER_CTX_set_num ( ctx , num ) ;
return 1 ;
}
while ( len >= MAXBITCHUNK ) {
int num = EVP_CIPHER_CTX_num ( ctx ) ;
CRYPTO_cfb128_1_encrypt ( in , out , MAXBITCHUNK * 8 , & dat -> ks , EVP_CIPHER_CTX_iv_noconst ( ctx ) , & num , EVP_CIPHER_CTX_encrypting ( ctx ) , dat -> block ) ;
EVP_CIPHER_CTX_set_num ( ctx , num ) ;
len -= MAXBITCHUNK ;
}
if ( len ) {
int num = EVP_CIPHER_CTX_num ( ctx ) ;
CRYPTO_cfb128_1_encrypt ( in , out , len * 8 , & dat -> ks , EVP_CIPHER_CTX_iv_noconst ( ctx ) , & num , EVP_CIPHER_CTX_encrypting ( ctx ) , dat -> block ) ;
EVP_CIPHER_CTX_set_num ( ctx , num ) ;
}
return 1 ;
}
static int aes_ctr_cipher ( EVP_CIPHER_CTX * ctx , unsigned char * out , const unsigned char * in , size_t len ) {
unsigned int num = EVP_CIPHER_CTX_num ( ctx ) ;
EVP_AES_KEY * dat = EVP_C_DATA ( EVP_AES_KEY , ctx ) ;
if ( dat -> stream . ctr ) CRYPTO_ctr128_encrypt_ctr32 ( in , out , len , & dat -> ks , EVP_CIPHER_CTX_iv_noconst ( ctx ) , EVP_CIPHER_CTX_buf_noconst ( ctx ) , & num , dat -> stream . ctr ) ;
else CRYPTO_ctr128_encrypt ( in , out , len , & dat -> ks , EVP_CIPHER_CTX_iv_noconst ( ctx ) , EVP_CIPHER_CTX_buf_noconst ( ctx ) , & num , dat -> block ) ;
EVP_CIPHER_CTX_set_num ( ctx , num ) ;
return 1 ;
}
BLOCK_CIPHER_generic_pack ( NID_aes , 128 , 0 ) BLOCK_CIPHER_generic_pack ( NID_aes , 192 , 0 ) BLOCK_CIPHER_generic_pack ( NID_aes , 256 , 0 ) static int aes_gcm_cleanup ( EVP_CIPHER_CTX * c ) {
EVP_AES_GCM_CTX * gctx = EVP_C_DATA ( EVP_AES_GCM_CTX , c ) ;
OPENSSL_cleanse ( & gctx -> gcm , sizeof ( gctx -> gcm ) ) ;
if ( gctx -> iv != EVP_CIPHER_CTX_iv_noconst ( c ) ) OPENSSL_free ( gctx -> iv ) ;
return 1 ;
}
static void ctr64_inc ( unsigned char * counter ) {
int n = 8 ;
unsigned char c ;
do {
-- n ;
c = counter [ n ] ;
++ c ;
counter [ n ] = c ;
if ( c ) return ;
}
while ( n ) ;
}
static int aes_gcm_ctrl ( EVP_CIPHER_CTX * c , int type , int arg , void * ptr ) {
EVP_AES_GCM_CTX * gctx = EVP_C_DATA ( EVP_AES_GCM_CTX , c ) ;
switch ( type ) {
case EVP_CTRL_INIT : gctx -> key_set = 0 ;
gctx -> iv_set = 0 ;
gctx -> ivlen = EVP_CIPHER_CTX_iv_length ( c ) ;
gctx -> iv = EVP_CIPHER_CTX_iv_noconst ( c ) ;
gctx -> taglen = - 1 ;
gctx -> iv_gen = 0 ;
gctx -> tls_aad_len = - 1 ;
return 1 ;
case EVP_CTRL_AEAD_SET_IVLEN : if ( arg <= 0 ) return 0 ;
if ( ( arg > EVP_MAX_IV_LENGTH ) && ( arg > gctx -> ivlen ) ) {
if ( gctx -> iv != EVP_CIPHER_CTX_iv_noconst ( c ) ) OPENSSL_free ( gctx -> iv ) ;
gctx -> iv = OPENSSL_malloc ( arg ) ;
if ( gctx -> iv == NULL ) return 0 ;
}
gctx -> ivlen = arg ;
return 1 ;
case EVP_CTRL_AEAD_SET_TAG : if ( arg <= 0 || arg > 16 || EVP_CIPHER_CTX_encrypting ( c ) ) return 0 ;
memcpy ( EVP_CIPHER_CTX_buf_noconst ( c ) , ptr , arg ) ;
gctx -> taglen = arg ;
return 1 ;
case EVP_CTRL_AEAD_GET_TAG : if ( arg <= 0 || arg > 16 || ! EVP_CIPHER_CTX_encrypting ( c ) || gctx -> taglen < 0 ) return 0 ;
memcpy ( ptr , EVP_CIPHER_CTX_buf_noconst ( c ) , arg ) ;
return 1 ;
case EVP_CTRL_GCM_SET_IV_FIXED : if ( arg == - 1 ) {
memcpy ( gctx -> iv , ptr , gctx -> ivlen ) ;
gctx -> iv_gen = 1 ;
return 1 ;
}
if ( ( arg < 4 ) || ( gctx -> ivlen - arg ) < 8 ) return 0 ;
if ( arg ) memcpy ( gctx -> iv , ptr , arg ) ;
if ( EVP_CIPHER_CTX_encrypting ( c ) && RAND_bytes ( gctx -> iv + arg , gctx -> ivlen - arg ) <= 0 ) return 0 ;
gctx -> iv_gen = 1 ;
return 1 ;
case EVP_CTRL_GCM_IV_GEN : if ( gctx -> iv_gen == 0 || gctx -> key_set == 0 ) return 0 ;
CRYPTO_gcm128_setiv ( & gctx -> gcm , gctx -> iv , gctx -> ivlen ) ;
if ( arg <= 0 || arg > gctx -> ivlen ) arg = gctx -> ivlen ;
memcpy ( ptr , gctx -> iv + gctx -> ivlen - arg , arg ) ;
ctr64_inc ( gctx -> iv + gctx -> ivlen - 8 ) ;
gctx -> iv_set = 1 ;
return 1 ;
case EVP_CTRL_GCM_SET_IV_INV : if ( gctx -> iv_gen == 0 || gctx -> key_set == 0 || EVP_CIPHER_CTX_encrypting ( c ) ) return 0 ;
memcpy ( gctx -> iv + gctx -> ivlen - arg , ptr , arg ) ;
CRYPTO_gcm128_setiv ( & gctx -> gcm , gctx -> iv , gctx -> ivlen ) ;
gctx -> iv_set = 1 ;
return 1 ;
case EVP_CTRL_AEAD_TLS1_AAD : if ( arg != EVP_AEAD_TLS1_AAD_LEN ) return 0 ;
memcpy ( EVP_CIPHER_CTX_buf_noconst ( c ) , ptr , arg ) ;
gctx -> tls_aad_len = arg ;
{
unsigned int len = EVP_CIPHER_CTX_buf_noconst ( c ) [ arg - 2 ] << 8 | EVP_CIPHER_CTX_buf_noconst ( c ) [ arg - 1 ] ;
if ( len < EVP_GCM_TLS_EXPLICIT_IV_LEN ) return 0 ;
len -= EVP_GCM_TLS_EXPLICIT_IV_LEN ;
if ( ! EVP_CIPHER_CTX_encrypting ( c ) ) {
if ( len < EVP_GCM_TLS_TAG_LEN ) return 0 ;
len -= EVP_GCM_TLS_TAG_LEN ;
}
EVP_CIPHER_CTX_buf_noconst ( c ) [ arg - 2 ] = len >> 8 ;
EVP_CIPHER_CTX_buf_noconst ( c ) [ arg - 1 ] = len & 0xff ;
}
return EVP_GCM_TLS_TAG_LEN ;
case EVP_CTRL_COPY : {
EVP_CIPHER_CTX * out = ptr ;
EVP_AES_GCM_CTX * gctx_out = EVP_C_DATA ( EVP_AES_GCM_CTX , out ) ;
if ( gctx -> gcm . key ) {
if ( gctx -> gcm . key != & gctx -> ks ) return 0 ;
gctx_out -> gcm . key = & gctx_out -> ks ;
}
if ( gctx -> iv == EVP_CIPHER_CTX_iv_noconst ( c ) ) gctx_out -> iv = EVP_CIPHER_CTX_iv_noconst ( out ) ;
else {
gctx_out -> iv = OPENSSL_malloc ( gctx -> ivlen ) ;
if ( gctx_out -> iv == NULL ) return 0 ;
memcpy ( gctx_out -> iv , gctx -> iv , gctx -> ivlen ) ;
}
return 1 ;
}
default : return - 1 ;
}
}
static int aes_gcm_init_key ( EVP_CIPHER_CTX * ctx , const unsigned char * key , const unsigned char * iv , int enc ) {
EVP_AES_GCM_CTX * gctx = EVP_C_DATA ( EVP_AES_GCM_CTX , ctx ) ;
if ( ! iv && ! key ) return 1 ;
if ( key ) {
do {
# ifdef HWAES_CAPABLE if ( HWAES_CAPABLE ) {
HWAES_set_encrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 8 , & gctx -> ks . ks ) ;
CRYPTO_gcm128_init ( & gctx -> gcm , & gctx -> ks , ( block128_f ) HWAES_encrypt ) ;
# ifdef HWAES_ctr32_encrypt_blocks gctx -> ctr = ( ctr128_f ) HWAES_ctr32_encrypt_blocks ;
# else gctx -> ctr = NULL ;
# endif break ;
}
else # endif # ifdef BSAES_CAPABLE if ( BSAES_CAPABLE ) {
AES_set_encrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 8 , & gctx -> ks . ks ) ;
CRYPTO_gcm128_init ( & gctx -> gcm , & gctx -> ks , ( block128_f ) AES_encrypt ) ;
gctx -> ctr = ( ctr128_f ) bsaes_ctr32_encrypt_blocks ;
break ;
}
else # endif # ifdef VPAES_CAPABLE if ( VPAES_CAPABLE ) {
vpaes_set_encrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 8 , & gctx -> ks . ks ) ;
CRYPTO_gcm128_init ( & gctx -> gcm , & gctx -> ks , ( block128_f ) vpaes_encrypt ) ;
gctx -> ctr = NULL ;
break ;
}
else # endif ( void ) 0 ;
AES_set_encrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 8 , & gctx -> ks . ks ) ;
CRYPTO_gcm128_init ( & gctx -> gcm , & gctx -> ks , ( block128_f ) AES_encrypt ) ;
# ifdef AES_CTR_ASM gctx -> ctr = ( ctr128_f ) AES_ctr32_encrypt ;
# else gctx -> ctr = NULL ;
# endif }
while ( 0 ) ;
if ( iv == NULL && gctx -> iv_set ) iv = gctx -> iv ;
if ( iv ) {
CRYPTO_gcm128_setiv ( & gctx -> gcm , iv , gctx -> ivlen ) ;
gctx -> iv_set = 1 ;
}
gctx -> key_set = 1 ;
}
else {
if ( gctx -> key_set ) CRYPTO_gcm128_setiv ( & gctx -> gcm , iv , gctx -> ivlen ) ;
else memcpy ( gctx -> iv , iv , gctx -> ivlen ) ;
gctx -> iv_set = 1 ;
gctx -> iv_gen = 0 ;
}
return 1 ;
}
static int aes_gcm_tls_cipher ( EVP_CIPHER_CTX * ctx , unsigned char * out , const unsigned char * in , size_t len ) {
EVP_AES_GCM_CTX * gctx = EVP_C_DATA ( EVP_AES_GCM_CTX , ctx ) ;
int rv = - 1 ;
if ( out != in || len < ( EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN ) ) return - 1 ;
if ( EVP_CIPHER_CTX_ctrl ( ctx , EVP_CIPHER_CTX_encrypting ( ctx ) ? EVP_CTRL_GCM_IV_GEN : EVP_CTRL_GCM_SET_IV_INV , EVP_GCM_TLS_EXPLICIT_IV_LEN , out ) <= 0 ) goto err ;
if ( CRYPTO_gcm128_aad ( & gctx -> gcm , EVP_CIPHER_CTX_buf_noconst ( ctx ) , gctx -> tls_aad_len ) ) goto err ;
in += EVP_GCM_TLS_EXPLICIT_IV_LEN ;
out += EVP_GCM_TLS_EXPLICIT_IV_LEN ;
len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN ;
if ( EVP_CIPHER_CTX_encrypting ( ctx ) ) {
if ( gctx -> ctr ) {
size_t bulk = 0 ;
# if defined ( AES_GCM_ASM ) if ( len >= 32 && AES_GCM_ASM ( gctx ) ) {
if ( CRYPTO_gcm128_encrypt ( & gctx -> gcm , NULL , NULL , 0 ) ) return - 1 ;
bulk = AES_gcm_encrypt ( in , out , len , gctx -> gcm . key , gctx -> gcm . Yi . c , gctx -> gcm . Xi . u ) ;
gctx -> gcm . len . u [ 1 ] += bulk ;
}
# endif if ( CRYPTO_gcm128_encrypt_ctr32 ( & gctx -> gcm , in + bulk , out + bulk , len - bulk , gctx -> ctr ) ) goto err ;
}
else {
size_t bulk = 0 ;
# if defined ( AES_GCM_ASM2 ) if ( len >= 32 && AES_GCM_ASM2 ( gctx ) ) {
if ( CRYPTO_gcm128_encrypt ( & gctx -> gcm , NULL , NULL , 0 ) ) return - 1 ;
bulk = AES_gcm_encrypt ( in , out , len , gctx -> gcm . key , gctx -> gcm . Yi . c , gctx -> gcm . Xi . u ) ;
gctx -> gcm . len . u [ 1 ] += bulk ;
}
# endif if ( CRYPTO_gcm128_encrypt ( & gctx -> gcm , in + bulk , out + bulk , len - bulk ) ) goto err ;
}
out += len ;
CRYPTO_gcm128_tag ( & gctx -> gcm , out , EVP_GCM_TLS_TAG_LEN ) ;
rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN ;
}
else {
if ( gctx -> ctr ) {
size_t bulk = 0 ;
# if defined ( AES_GCM_ASM ) if ( len >= 16 && AES_GCM_ASM ( gctx ) ) {
if ( CRYPTO_gcm128_decrypt ( & gctx -> gcm , NULL , NULL , 0 ) ) return - 1 ;
bulk = AES_gcm_decrypt ( in , out , len , gctx -> gcm . key , gctx -> gcm . Yi . c , gctx -> gcm . Xi . u ) ;
gctx -> gcm . len . u [ 1 ] += bulk ;
}
# endif if ( CRYPTO_gcm128_decrypt_ctr32 ( & gctx -> gcm , in + bulk , out + bulk , len - bulk , gctx -> ctr ) ) goto err ;
}
else {
size_t bulk = 0 ;
# if defined ( AES_GCM_ASM2 ) if ( len >= 16 && AES_GCM_ASM2 ( gctx ) ) {
if ( CRYPTO_gcm128_decrypt ( & gctx -> gcm , NULL , NULL , 0 ) ) return - 1 ;
bulk = AES_gcm_decrypt ( in , out , len , gctx -> gcm . key , gctx -> gcm . Yi . c , gctx -> gcm . Xi . u ) ;
gctx -> gcm . len . u [ 1 ] += bulk ;
}
# endif if ( CRYPTO_gcm128_decrypt ( & gctx -> gcm , in + bulk , out + bulk , len - bulk ) ) goto err ;
}
CRYPTO_gcm128_tag ( & gctx -> gcm , EVP_CIPHER_CTX_buf_noconst ( ctx ) , EVP_GCM_TLS_TAG_LEN ) ;
if ( CRYPTO_memcmp ( EVP_CIPHER_CTX_buf_noconst ( ctx ) , in + len , EVP_GCM_TLS_TAG_LEN ) ) {
OPENSSL_cleanse ( out , len ) ;
goto err ;
}
rv = len ;
}
err : gctx -> iv_set = 0 ;
gctx -> tls_aad_len = - 1 ;
return rv ;
}
static int aes_gcm_cipher ( EVP_CIPHER_CTX * ctx , unsigned char * out , const unsigned char * in , size_t len ) {
EVP_AES_GCM_CTX * gctx = EVP_C_DATA ( EVP_AES_GCM_CTX , ctx ) ;
if ( ! gctx -> key_set ) return - 1 ;
if ( gctx -> tls_aad_len >= 0 ) return aes_gcm_tls_cipher ( ctx , out , in , len ) ;
if ( ! gctx -> iv_set ) return - 1 ;
if ( in ) {
if ( out == NULL ) {
if ( CRYPTO_gcm128_aad ( & gctx -> gcm , in , len ) ) return - 1 ;
}
else if ( EVP_CIPHER_CTX_encrypting ( ctx ) ) {
if ( gctx -> ctr ) {
size_t bulk = 0 ;
# if defined ( AES_GCM_ASM ) if ( len >= 32 && AES_GCM_ASM ( gctx ) ) {
size_t res = ( 16 - gctx -> gcm . mres ) % 16 ;
if ( CRYPTO_gcm128_encrypt ( & gctx -> gcm , in , out , res ) ) return - 1 ;
bulk = AES_gcm_encrypt ( in + res , out + res , len - res , gctx -> gcm . key , gctx -> gcm . Yi . c , gctx -> gcm . Xi . u ) ;
gctx -> gcm . len . u [ 1 ] += bulk ;
bulk += res ;
}
# endif if ( CRYPTO_gcm128_encrypt_ctr32 ( & gctx -> gcm , in + bulk , out + bulk , len - bulk , gctx -> ctr ) ) return - 1 ;
}
else {
size_t bulk = 0 ;
# if defined ( AES_GCM_ASM2 ) if ( len >= 32 && AES_GCM_ASM2 ( gctx ) ) {
size_t res = ( 16 - gctx -> gcm . mres ) % 16 ;
if ( CRYPTO_gcm128_encrypt ( & gctx -> gcm , in , out , res ) ) return - 1 ;
bulk = AES_gcm_encrypt ( in + res , out + res , len - res , gctx -> gcm . key , gctx -> gcm . Yi . c , gctx -> gcm . Xi . u ) ;
gctx -> gcm . len . u [ 1 ] += bulk ;
bulk += res ;
}
# endif if ( CRYPTO_gcm128_encrypt ( & gctx -> gcm , in + bulk , out + bulk , len - bulk ) ) return - 1 ;
}
}
else {
if ( gctx -> ctr ) {
size_t bulk = 0 ;
# if defined ( AES_GCM_ASM ) if ( len >= 16 && AES_GCM_ASM ( gctx ) ) {
size_t res = ( 16 - gctx -> gcm . mres ) % 16 ;
if ( CRYPTO_gcm128_decrypt ( & gctx -> gcm , in , out , res ) ) return - 1 ;
bulk = AES_gcm_decrypt ( in + res , out + res , len - res , gctx -> gcm . key , gctx -> gcm . Yi . c , gctx -> gcm . Xi . u ) ;
gctx -> gcm . len . u [ 1 ] += bulk ;
bulk += res ;
}
# endif if ( CRYPTO_gcm128_decrypt_ctr32 ( & gctx -> gcm , in + bulk , out + bulk , len - bulk , gctx -> ctr ) ) return - 1 ;
}
else {
size_t bulk = 0 ;
# if defined ( AES_GCM_ASM2 ) if ( len >= 16 && AES_GCM_ASM2 ( gctx ) ) {
size_t res = ( 16 - gctx -> gcm . mres ) % 16 ;
if ( CRYPTO_gcm128_decrypt ( & gctx -> gcm , in , out , res ) ) return - 1 ;
bulk = AES_gcm_decrypt ( in + res , out + res , len - res , gctx -> gcm . key , gctx -> gcm . Yi . c , gctx -> gcm . Xi . u ) ;
gctx -> gcm . len . u [ 1 ] += bulk ;
bulk += res ;
}
# endif if ( CRYPTO_gcm128_decrypt ( & gctx -> gcm , in + bulk , out + bulk , len - bulk ) ) return - 1 ;
}
}
return len ;
}
else {
if ( ! EVP_CIPHER_CTX_encrypting ( ctx ) ) {
if ( gctx -> taglen < 0 ) return - 1 ;
if ( CRYPTO_gcm128_finish ( & gctx -> gcm , EVP_CIPHER_CTX_buf_noconst ( ctx ) , gctx -> taglen ) != 0 ) return - 1 ;
gctx -> iv_set = 0 ;
return 0 ;
}
CRYPTO_gcm128_tag ( & gctx -> gcm , EVP_CIPHER_CTX_buf_noconst ( ctx ) , 16 ) ;
gctx -> taglen = 16 ;
gctx -> iv_set = 0 ;
return 0 ;
}
}
# define CUSTOM_FLAGS ( EVP_CIPH_FLAG_DEFAULT_ASN1 \ | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \ | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \ | EVP_CIPH_CUSTOM_COPY ) BLOCK_CIPHER_custom ( NID_aes , 128 , 1 , 12 , gcm , GCM , EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS ) BLOCK_CIPHER_custom ( NID_aes , 192 , 1 , 12 , gcm , GCM , EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS ) BLOCK_CIPHER_custom ( NID_aes , 256 , 1 , 12 , gcm , GCM , EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS ) static int aes_xts_ctrl ( EVP_CIPHER_CTX * c , int type , int arg , void * ptr ) {
EVP_AES_XTS_CTX * xctx = EVP_C_DATA ( EVP_AES_XTS_CTX , c ) ;
if ( type == EVP_CTRL_COPY ) {
EVP_CIPHER_CTX * out = ptr ;
EVP_AES_XTS_CTX * xctx_out = EVP_C_DATA ( EVP_AES_XTS_CTX , out ) ;
if ( xctx -> xts . key1 ) {
if ( xctx -> xts . key1 != & xctx -> ks1 ) return 0 ;
xctx_out -> xts . key1 = & xctx_out -> ks1 ;
}
if ( xctx -> xts . key2 ) {
if ( xctx -> xts . key2 != & xctx -> ks2 ) return 0 ;
xctx_out -> xts . key2 = & xctx_out -> ks2 ;
}
return 1 ;
}
else if ( type != EVP_CTRL_INIT ) return - 1 ;
xctx -> xts . key1 = NULL ;
xctx -> xts . key2 = NULL ;
return 1 ;
}
static int aes_xts_init_key ( EVP_CIPHER_CTX * ctx , const unsigned char * key , const unsigned char * iv , int enc ) {
EVP_AES_XTS_CTX * xctx = EVP_C_DATA ( EVP_AES_XTS_CTX , ctx ) ;
if ( ! iv && ! key ) return 1 ;
if ( key ) do {
# ifdef AES_XTS_ASM xctx -> stream = enc ? AES_xts_encrypt : AES_xts_decrypt ;
# else xctx -> stream = NULL ;
# endif # ifdef HWAES_CAPABLE if ( HWAES_CAPABLE ) {
if ( enc ) {
HWAES_set_encrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 4 , & xctx -> ks1 . ks ) ;
xctx -> xts . block1 = ( block128_f ) HWAES_encrypt ;
# ifdef HWAES_xts_encrypt xctx -> stream = HWAES_xts_encrypt ;
# endif }
else {
HWAES_set_decrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 4 , & xctx -> ks1 . ks ) ;
xctx -> xts . block1 = ( block128_f ) HWAES_decrypt ;
# ifdef HWAES_xts_decrypt xctx -> stream = HWAES_xts_decrypt ;
# endif }
HWAES_set_encrypt_key ( key + EVP_CIPHER_CTX_key_length ( ctx ) / 2 , EVP_CIPHER_CTX_key_length ( ctx ) * 4 , & xctx -> ks2 . ks ) ;
xctx -> xts . block2 = ( block128_f ) HWAES_encrypt ;
xctx -> xts . key1 = & xctx -> ks1 ;
break ;
}
else # endif # ifdef BSAES_CAPABLE if ( BSAES_CAPABLE ) xctx -> stream = enc ? bsaes_xts_encrypt : bsaes_xts_decrypt ;
else # endif # ifdef VPAES_CAPABLE if ( VPAES_CAPABLE ) {
if ( enc ) {
vpaes_set_encrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 4 , & xctx -> ks1 . ks ) ;
xctx -> xts . block1 = ( block128_f ) vpaes_encrypt ;
}
else {
vpaes_set_decrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 4 , & xctx -> ks1 . ks ) ;
xctx -> xts . block1 = ( block128_f ) vpaes_decrypt ;
}
vpaes_set_encrypt_key ( key + EVP_CIPHER_CTX_key_length ( ctx ) / 2 , EVP_CIPHER_CTX_key_length ( ctx ) * 4 , & xctx -> ks2 . ks ) ;
xctx -> xts . block2 = ( block128_f ) vpaes_encrypt ;
xctx -> xts . key1 = & xctx -> ks1 ;
break ;
}
else # endif ( void ) 0 ;
if ( enc ) {
AES_set_encrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 4 , & xctx -> ks1 . ks ) ;
xctx -> xts . block1 = ( block128_f ) AES_encrypt ;
}
else {
AES_set_decrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 4 , & xctx -> ks1 . ks ) ;
xctx -> xts . block1 = ( block128_f ) AES_decrypt ;
}
AES_set_encrypt_key ( key + EVP_CIPHER_CTX_key_length ( ctx ) / 2 , EVP_CIPHER_CTX_key_length ( ctx ) * 4 , & xctx -> ks2 . ks ) ;
xctx -> xts . block2 = ( block128_f ) AES_encrypt ;
xctx -> xts . key1 = & xctx -> ks1 ;
}
while ( 0 ) ;
if ( iv ) {
xctx -> xts . key2 = & xctx -> ks2 ;
memcpy ( EVP_CIPHER_CTX_iv_noconst ( ctx ) , iv , 16 ) ;
}
return 1 ;
}
static int aes_xts_cipher ( EVP_CIPHER_CTX * ctx , unsigned char * out , const unsigned char * in , size_t len ) {
EVP_AES_XTS_CTX * xctx = EVP_C_DATA ( EVP_AES_XTS_CTX , ctx ) ;
if ( ! xctx -> xts . key1 || ! xctx -> xts . key2 ) return 0 ;
if ( ! out || ! in || len < AES_BLOCK_SIZE ) return 0 ;
if ( xctx -> stream ) ( * xctx -> stream ) ( in , out , len , xctx -> xts . key1 , xctx -> xts . key2 , EVP_CIPHER_CTX_iv_noconst ( ctx ) ) ;
else if ( CRYPTO_xts128_encrypt ( & xctx -> xts , EVP_CIPHER_CTX_iv_noconst ( ctx ) , in , out , len , EVP_CIPHER_CTX_encrypting ( ctx ) ) ) return 0 ;
return 1 ;
}
# define aes_xts_cleanup NULL # define XTS_FLAGS ( EVP_CIPH_FLAG_DEFAULT_ASN1 | EVP_CIPH_CUSTOM_IV \ | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \ | EVP_CIPH_CUSTOM_COPY ) BLOCK_CIPHER_custom ( NID_aes , 128 , 1 , 16 , xts , XTS , XTS_FLAGS ) BLOCK_CIPHER_custom ( NID_aes , 256 , 1 , 16 , xts , XTS , XTS_FLAGS ) static int aes_ccm_ctrl ( EVP_CIPHER_CTX * c , int type , int arg , void * ptr ) {
EVP_AES_CCM_CTX * cctx = EVP_C_DATA ( EVP_AES_CCM_CTX , c ) ;
switch ( type ) {
case EVP_CTRL_INIT : cctx -> key_set = 0 ;
cctx -> iv_set = 0 ;
cctx -> L = 8 ;
cctx -> M = 12 ;
cctx -> tag_set = 0 ;
cctx -> len_set = 0 ;
cctx -> tls_aad_len = - 1 ;
return 1 ;
case EVP_CTRL_AEAD_TLS1_AAD : if ( arg != EVP_AEAD_TLS1_AAD_LEN ) return 0 ;
memcpy ( EVP_CIPHER_CTX_buf_noconst ( c ) , ptr , arg ) ;
cctx -> tls_aad_len = arg ;
{
uint16_t len = EVP_CIPHER_CTX_buf_noconst ( c ) [ arg - 2 ] << 8 | EVP_CIPHER_CTX_buf_noconst ( c ) [ arg - 1 ] ;
if ( len < EVP_CCM_TLS_EXPLICIT_IV_LEN ) return 0 ;
len -= EVP_CCM_TLS_EXPLICIT_IV_LEN ;
if ( ! EVP_CIPHER_CTX_encrypting ( c ) ) {
if ( len < cctx -> M ) return 0 ;
len -= cctx -> M ;
}
EVP_CIPHER_CTX_buf_noconst ( c ) [ arg - 2 ] = len >> 8 ;
EVP_CIPHER_CTX_buf_noconst ( c ) [ arg - 1 ] = len & 0xff ;
}
return cctx -> M ;
case EVP_CTRL_CCM_SET_IV_FIXED : if ( arg != EVP_CCM_TLS_FIXED_IV_LEN ) return 0 ;
memcpy ( EVP_CIPHER_CTX_iv_noconst ( c ) , ptr , arg ) ;
return 1 ;
case EVP_CTRL_AEAD_SET_IVLEN : arg = 15 - arg ;
case EVP_CTRL_CCM_SET_L : if ( arg < 2 || arg > 8 ) return 0 ;
cctx -> L = arg ;
return 1 ;
case EVP_CTRL_AEAD_SET_TAG : if ( ( arg & 1 ) || arg < 4 || arg > 16 ) return 0 ;
if ( EVP_CIPHER_CTX_encrypting ( c ) && ptr ) return 0 ;
if ( ptr ) {
cctx -> tag_set = 1 ;
memcpy ( EVP_CIPHER_CTX_buf_noconst ( c ) , ptr , arg ) ;
}
cctx -> M = arg ;
return 1 ;
case EVP_CTRL_AEAD_GET_TAG : if ( ! EVP_CIPHER_CTX_encrypting ( c ) || ! cctx -> tag_set ) return 0 ;
if ( ! CRYPTO_ccm128_tag ( & cctx -> ccm , ptr , ( size_t ) arg ) ) return 0 ;
cctx -> tag_set = 0 ;
cctx -> iv_set = 0 ;
cctx -> len_set = 0 ;
return 1 ;
case EVP_CTRL_COPY : {
EVP_CIPHER_CTX * out = ptr ;
EVP_AES_CCM_CTX * cctx_out = EVP_C_DATA ( EVP_AES_CCM_CTX , out ) ;
if ( cctx -> ccm . key ) {
if ( cctx -> ccm . key != & cctx -> ks ) return 0 ;
cctx_out -> ccm . key = & cctx_out -> ks ;
}
return 1 ;
}
default : return - 1 ;
}
}
static int aes_ccm_init_key ( EVP_CIPHER_CTX * ctx , const unsigned char * key , const unsigned char * iv , int enc ) {
EVP_AES_CCM_CTX * cctx = EVP_C_DATA ( EVP_AES_CCM_CTX , ctx ) ;
if ( ! iv && ! key ) return 1 ;
if ( key ) do {
# ifdef HWAES_CAPABLE if ( HWAES_CAPABLE ) {
HWAES_set_encrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 8 , & cctx -> ks . ks ) ;
CRYPTO_ccm128_init ( & cctx -> ccm , cctx -> M , cctx -> L , & cctx -> ks , ( block128_f ) HWAES_encrypt ) ;
cctx -> str = NULL ;
cctx -> key_set = 1 ;
break ;
}
else # endif # ifdef VPAES_CAPABLE if ( VPAES_CAPABLE ) {
vpaes_set_encrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 8 , & cctx -> ks . ks ) ;
CRYPTO_ccm128_init ( & cctx -> ccm , cctx -> M , cctx -> L , & cctx -> ks , ( block128_f ) vpaes_encrypt ) ;
cctx -> str = NULL ;
cctx -> key_set = 1 ;
break ;
}
# endif AES_set_encrypt_key ( key , EVP_CIPHER_CTX_key_length ( ctx ) * 8 , & cctx -> ks . ks ) ;
CRYPTO_ccm128_init ( & cctx -> ccm , cctx -> M , cctx -> L , & cctx -> ks , ( block128_f ) AES_encrypt ) ;
cctx -> str = NULL ;
cctx -> key_set = 1 ;
}
while ( 0 ) ;
if ( iv ) {
memcpy ( EVP_CIPHER_CTX_iv_noconst ( ctx ) , iv , 15 - cctx -> L ) ;
cctx -> iv_set = 1 ;
}
return 1 ;
}
static int aes_ccm_tls_cipher ( EVP_CIPHER_CTX * ctx , unsigned char * out , const unsigned char * in , size_t len ) {
EVP_AES_CCM_CTX * cctx = EVP_C_DATA ( EVP_AES_CCM_CTX , ctx ) ;
CCM128_CONTEXT * ccm = & cctx -> ccm ;
if ( out != in || len < ( EVP_CCM_TLS_EXPLICIT_IV_LEN + ( size_t ) cctx -> M ) ) return - 1 ;
if ( EVP_CIPHER_CTX_encrypting ( ctx ) ) memcpy ( out , EVP_CIPHER_CTX_buf_noconst ( ctx ) , EVP_CCM_TLS_EXPLICIT_IV_LEN ) ;
memcpy ( EVP_CIPHER_CTX_iv_noconst ( ctx ) + EVP_CCM_TLS_FIXED_IV_LEN , in , EVP_CCM_TLS_EXPLICIT_IV_LEN ) ;
len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx -> M ;
if ( CRYPTO_ccm128_setiv ( ccm , EVP_CIPHER_CTX_iv_noconst ( ctx ) , 15 - cctx -> L , len ) ) return - 1 ;
CRYPTO_ccm128_aad ( ccm , EVP_CIPHER_CTX_buf_noconst ( ctx ) , cctx -> tls_aad_len ) ;
in += EVP_CCM_TLS_EXPLICIT_IV_LEN ;
out += EVP_CCM_TLS_EXPLICIT_IV_LEN ;
if ( EVP_CIPHER_CTX_encrypting ( ctx ) ) {
if ( cctx -> str ? CRYPTO_ccm128_encrypt_ccm64 ( ccm , in , out , len , cctx -> str ) : CRYPTO_ccm128_encrypt ( ccm , in , out , len ) ) return - 1 ;
if ( ! CRYPTO_ccm128_tag ( ccm , out + len , cctx -> M ) ) return - 1 ;
return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx -> M ;
}
else {
if ( cctx -> str ? ! CRYPTO_ccm128_decrypt_ccm64 ( ccm , in , out , len , cctx -> str ) : ! CRYPTO_ccm128_decrypt ( ccm , in , out , len ) ) {
unsigned char tag [ 16 ] ;
if ( CRYPTO_ccm128_tag ( ccm , tag , cctx -> M ) ) {
if ( ! CRYPTO_memcmp ( tag , in + len , cctx -> M ) ) return len ;
}
}
OPENSSL_cleanse ( out , len ) ;
return - 1 ;
}
}
static int aes_ccm_cipher ( EVP_CIPHER_CTX * ctx , unsigned char * out , const unsigned char * in , size_t len ) {
EVP_AES_CCM_CTX * cctx = EVP_C_DATA ( EVP_AES_CCM_CTX , ctx ) ;
CCM128_CONTEXT * ccm = & cctx -> ccm ;
if ( ! cctx -> key_set ) return - 1 ;
if ( cctx -> tls_aad_len >= 0 ) return aes_ccm_tls_cipher ( ctx , out , in , len ) ;
if ( ! cctx -> iv_set ) return - 1 ;
if ( ! EVP_CIPHER_CTX_encrypting ( ctx ) && ! cctx -> tag_set ) return - 1 ;
if ( ! out ) {
if ( ! in ) {
if ( CRYPTO_ccm128_setiv ( ccm , EVP_CIPHER_CTX_iv_noconst ( ctx ) , 15 - cctx -> L , len ) ) return - 1 ;
cctx -> len_set = 1 ;
return len ;
}
if ( ! cctx -> len_set && len ) return - 1 ;
CRYPTO_ccm128_aad ( ccm , in , len ) ;
return len ;
}
if ( ! in ) return 0 ;
if ( ! cctx -> len_set ) {
if ( CRYPTO_ccm128_setiv ( ccm , EVP_CIPHER_CTX_iv_noconst ( ctx ) , 15 - cctx -> L , len ) ) return - 1 ;
cctx -> len_set = 1 ;
}
if ( EVP_CIPHER_CTX_encrypting ( ctx ) ) {
if ( cctx -> str ? CRYPTO_ccm128_encrypt_ccm64 ( ccm , in , out , len , cctx -> str ) : CRYPTO_ccm128_encrypt ( ccm , in , out , len ) ) return - 1 ;
cctx -> tag_set = 1 ;
return len ;
}
else {
int rv = - 1 ;
if ( cctx -> str ? ! CRYPTO_ccm128_decrypt_ccm64 ( ccm , in , out , len , cctx -> str ) : ! CRYPTO_ccm128_decrypt ( ccm , in , out , len ) ) {
unsigned char tag [ 16 ] ;
if ( CRYPTO_ccm128_tag ( ccm , tag , cctx -> M ) ) {
if ( ! CRYPTO_memcmp ( tag , EVP_CIPHER_CTX_buf_noconst ( ctx ) , cctx -> M ) ) rv = len ;
}
}
if ( rv == - 1 ) OPENSSL_cleanse ( out , len ) ;
cctx -> iv_set = 0 ;
cctx -> tag_set = 0 ;
cctx -> len_set = 0 ;
return rv ;
}
}
# define aes_ccm_cleanup NULL BLOCK_CIPHER_custom ( NID_aes , 128 , 1 , 12 , ccm , CCM , EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS ) | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int qio_channel_websock_decode_payload ( QIOChannelWebsock * ioc , Error * * errp ) {
size_t i ;
size_t payload_len = 0 ;
uint32_t * payload32 ;
if ( ioc -> payload_remain ) {
if ( ioc -> encinput . offset < ioc -> payload_remain ) {
if ( ioc -> opcode & QIO_CHANNEL_WEBSOCK_CONTROL_OPCODE_MASK ) {
return QIO_CHANNEL_ERR_BLOCK ;
}
payload_len = ioc -> encinput . offset - ( ioc -> encinput . offset % 4 ) ;
}
else {
payload_len = ioc -> payload_remain ;
}
if ( payload_len == 0 ) {
return QIO_CHANNEL_ERR_BLOCK ;
}
ioc -> payload_remain -= payload_len ;
payload32 = ( uint32_t * ) ioc -> encinput . buffer ;
for ( i = 0 ;
i < payload_len / 4 ;
i ++ ) {
payload32 [ i ] ^= ioc -> mask . u ;
}
for ( i *= 4 ;
i < payload_len ;
i ++ ) {
ioc -> encinput . buffer [ i ] ^= ioc -> mask . c [ i % 4 ] ;
}
}
trace_qio_channel_websock_payload_decode ( ioc , ioc -> opcode , ioc -> payload_remain ) ;
if ( ioc -> opcode == QIO_CHANNEL_WEBSOCK_OPCODE_BINARY_FRAME ) {
if ( payload_len ) {
buffer_reserve ( & ioc -> rawinput , payload_len ) ;
buffer_append ( & ioc -> rawinput , ioc -> encinput . buffer , payload_len ) ;
}
}
else if ( ioc -> opcode == QIO_CHANNEL_WEBSOCK_OPCODE_CLOSE ) {
error_setg ( errp , "websocket closed by peer" ) ;
if ( payload_len ) {
qio_channel_websock_encode_buffer ( ioc , & ioc -> encoutput , QIO_CHANNEL_WEBSOCK_OPCODE_CLOSE , & ioc -> encinput ) ;
qio_channel_websock_write_wire ( ioc , NULL ) ;
qio_channel_shutdown ( ioc -> master , QIO_CHANNEL_SHUTDOWN_BOTH , NULL ) ;
}
else {
qio_channel_websock_write_close ( ioc , QIO_CHANNEL_WEBSOCK_STATUS_NORMAL , "peer requested close" ) ;
}
return - 1 ;
}
else if ( ioc -> opcode == QIO_CHANNEL_WEBSOCK_OPCODE_PING ) {
buffer_reset ( & ioc -> ping_reply ) ;
qio_channel_websock_encode_buffer ( ioc , & ioc -> ping_reply , QIO_CHANNEL_WEBSOCK_OPCODE_PONG , & ioc -> encinput ) ;
}
if ( payload_len ) {
buffer_advance ( & ioc -> encinput , payload_len ) ;
}
return 0 ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | IN_PROC_BROWSER_TEST_F ( MediaStreamPermissionTest , TestSecureOriginDenyIsSticky ) {
content : : WebContents * tab_contents = LoadTestPageInTab ( ) ;
EXPECT_TRUE ( content : : IsOriginSecure ( tab_contents -> GetLastCommittedURL ( ) ) ) ;
GetUserMediaAndDeny ( tab_contents ) ;
GetUserMediaAndExpectAutoDenyWithoutPrompt ( tab_contents ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | unpeer_node * create_unpeer_node ( address_node * addr ) {
unpeer_node * my_node ;
u_int u ;
char * pch ;
my_node = emalloc_zero ( sizeof ( * my_node ) ) ;
pch = addr -> address ;
while ( * pch && isdigit ( * pch ) ) pch ++ ;
if ( ! * pch && 1 == sscanf ( addr -> address , "%u" , & u ) && u <= ASSOCID_MAX ) {
my_node -> assocID = ( associd_t ) u ;
destroy_address_node ( addr ) ;
my_node -> addr = NULL ;
}
else {
my_node -> assocID = 0 ;
my_node -> addr = addr ;
}
return my_node ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int dissect_btgatt_nordic_dfu_packet ( tvbuff_t * tvb , packet_info * pinfo _U_ , proto_tree * tree , void * data ) {
btatt_data_t * att_data = ( btatt_data_t * ) data ;
if ( bluetooth_gatt_has_no_parameter ( att_data -> opcode ) ) return - 1 ;
proto_tree_add_item ( tree , hf_gatt_nordic_dfu_packet , tvb , 0 , tvb_captured_length ( tvb ) , ENC_NA ) ;
return tvb_captured_length ( tvb ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | int dissect_ber_UTCTime ( gboolean implicit_tag , asn1_ctx_t * actx , proto_tree * tree , tvbuff_t * tvb , int offset , gint hf_id ) {
char outstr [ 33 ] ;
char * outstrptr = outstr ;
const guint8 * instr ;
gint8 ber_class ;
gboolean pc ;
gint32 tag ;
guint32 len , i , n ;
int hoffset ;
proto_item * cause ;
proto_tree * error_tree ;
const gchar * error_str = NULL ;
if ( ! implicit_tag ) {
hoffset = offset ;
offset = dissect_ber_identifier ( actx -> pinfo , tree , tvb , offset , & ber_class , & pc , & tag ) ;
offset = dissect_ber_length ( actx -> pinfo , tree , tvb , offset , & len , NULL ) ;
if ( ( ber_class != BER_CLASS_UNI ) || ( tag != BER_UNI_TAG_UTCTime ) ) {
tvb_ensure_bytes_exist ( tvb , hoffset , 2 ) ;
cause = proto_tree_add_string_format_value ( tree , hf_ber_error , tvb , offset , len , "utctime_expected" , "UTCTime expected but class:%s(%d) %s tag:%d was unexpected" , val_to_str_const ( ber_class , ber_class_codes , "Unknown" ) , ber_class , pc ? ber_pc_codes_short . true_string : ber_pc_codes_short . false_string , tag ) ;
expert_add_info ( actx -> pinfo , cause , & ei_ber_expected_utc_time ) ;
if ( decode_unexpected ) {
proto_tree * unknown_tree = proto_item_add_subtree ( cause , ett_ber_unknown ) ;
dissect_unknown_ber ( actx -> pinfo , tvb , hoffset , unknown_tree ) ;
}
return offset + len ;
}
}
else {
len = tvb_reported_length_remaining ( tvb , offset ) ;
}
if ( ( len < 10 ) || ( len > 19 ) ) {
error_str = wmem_strdup_printf ( wmem_packet_scope ( ) , "BER Error: UTCTime invalid length: %u" , len ) ;
instr = tvb_get_string_enc ( wmem_packet_scope ( ) , tvb , offset , len > 19 ? 19 : len , ENC_ASCII ) ;
goto malformed ;
}
instr = tvb_get_string_enc ( wmem_packet_scope ( ) , tvb , offset , len , ENC_ASCII ) ;
for ( i = 0 ;
i < 10 ;
i ++ ) {
if ( ( instr [ i ] < '0' ) || ( instr [ i ] > '9' ) ) {
error_str = "BER Error: malformed UTCTime encoding, " "first 10 octets have to contain YYMMDDhhmm in digits" ;
goto malformed ;
}
}
g_snprintf ( outstrptr , 15 , "%.2s-%.2s-%.2s %.2s:%.2s" , instr , instr + 2 , instr + 4 , instr + 6 , instr + 8 ) ;
outstrptr += 14 ;
if ( len >= 12 ) {
if ( ( instr [ i ] >= '0' ) && ( instr [ i ] <= '9' ) ) {
i ++ ;
if ( ( instr [ i ] >= '0' ) && ( instr [ i ] <= '9' ) ) {
i ++ ;
g_snprintf ( outstrptr , 4 , ":%.2s" , instr + 10 ) ;
outstrptr += 3 ;
}
else {
error_str = "BER Error: malformed UTCTime encoding, " "if 11th octet is a digit for seconds, " "the 12th octet has to be a digit, too" ;
goto malformed ;
}
}
}
switch ( instr [ i ] ) {
case 'Z' : if ( len != ( i + 1 ) ) {
error_str = "BER Error: malformed UTCTime encoding, " "there must be no further octets after \'Z\'" ;
goto malformed ;
}
g_snprintf ( outstrptr , 7 , " (UTC)" ) ;
i ++ ;
break ;
case '-' : case '+' : if ( len != ( i + 5 ) ) {
error_str = "BER Error: malformed UTCTime encoding, " "4 digits must follow on \'+\' resp. \'-\'" ;
goto malformed ;
}
for ( n = i + 1 ;
n < i + 5 ;
n ++ ) {
if ( ( instr [ n ] < '0' ) || ( instr [ n ] > '9' ) ) {
error_str = "BER Error: malformed UTCTime encoding, " "4 digits must follow on \'+\' resp. \'-\'" ;
goto malformed ;
}
}
g_snprintf ( outstrptr , 12 , " (UTC%c%.4s)" , instr [ i ] , instr + i + 1 ) ;
i += 5 ;
break ;
default : error_str = wmem_strdup_printf ( wmem_packet_scope ( ) , "BER Error: malformed UTCTime encoding, " "unexpected character in %dth octet, " "must be \'Z\', \'+\' or \'-\'" , i + 1 ) ;
goto malformed ;
break ;
}
if ( len != i ) {
error_str = wmem_strdup_printf ( wmem_packet_scope ( ) , "BER Error: malformed UTCTime encoding, %d unexpected character%s after %dth octet" , len - i , ( len == ( i - 1 ) ? "s" : "" ) , i ) ;
goto malformed ;
}
if ( hf_id >= 0 ) {
proto_tree_add_string ( tree , hf_id , tvb , offset , len , outstr ) ;
}
return offset + len ;
malformed : if ( hf_id >= 0 ) {
cause = proto_tree_add_string ( tree , hf_id , tvb , offset , len , instr ) ;
error_tree = proto_item_add_subtree ( cause , ett_ber_unknown ) ;
}
else {
error_tree = tree ;
}
cause = proto_tree_add_string_format ( error_tree , hf_ber_error , tvb , offset , len , "invalid_utctime" , "%s" , error_str ) ;
expert_add_info ( actx -> pinfo , cause , & ei_ber_invalid_format_utctime ) ;
return offset + len ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int dissect_h245_ATMParameters ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_sequence ( tvb , offset , actx , tree , hf_index , ett_h245_ATMParameters , ATMParameters_sequence ) ;
return offset ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | gx_color_index mem_mapped_map_rgb_color ( gx_device * dev , const gx_color_value cv [ ] ) {
gx_device_memory * const mdev = ( gx_device_memory * ) dev ;
byte br = gx_color_value_to_byte ( cv [ 0 ] ) ;
register const byte * pptr = mdev -> palette . data ;
int cnt = mdev -> palette . size ;
const byte * which = 0 ;
int best = 256 * 3 ;
if ( mdev -> color_info . num_components != 1 ) {
byte bg = gx_color_value_to_byte ( cv [ 1 ] ) ;
byte bb = gx_color_value_to_byte ( cv [ 2 ] ) ;
while ( ( cnt -= 3 ) >= 0 ) {
register int diff = * pptr - br ;
if ( diff < 0 ) diff = - diff ;
if ( diff < best ) {
int dg = pptr [ 1 ] - bg ;
if ( dg < 0 ) dg = - dg ;
if ( ( diff += dg ) < best ) {
int db = pptr [ 2 ] - bb ;
if ( db < 0 ) db = - db ;
if ( ( diff += db ) < best ) which = pptr , best = diff ;
}
}
if ( diff == 0 ) break ;
pptr += 3 ;
}
}
else {
while ( ( cnt -= 3 ) >= 0 ) {
register int diff = * pptr - br ;
if ( diff < 0 ) diff = - diff ;
if ( diff < best ) {
which = pptr , best = diff ;
}
if ( diff == 0 ) break ;
pptr += 3 ;
}
}
return ( gx_color_index ) ( ( which - mdev -> palette . data ) / 3 ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void add_rr_to_tree ( proto_tree * rr_tree , tvbuff_t * tvb , int offset , const guchar * name , int namelen , int type , packet_info * pinfo , gboolean is_mdns ) {
proto_item * ttl_item ;
gchar * * srv_rr_info ;
if ( type == T_SRV ) {
srv_rr_info = g_strsplit ( name , "." , 3 ) ;
proto_tree_add_string ( rr_tree , hf_dns_srv_service , tvb , offset , namelen , srv_rr_info [ 0 ] ) ;
if ( srv_rr_info [ 1 ] ) {
proto_tree_add_string ( rr_tree , hf_dns_srv_proto , tvb , offset , namelen , srv_rr_info [ 1 ] ) ;
if ( srv_rr_info [ 2 ] ) {
proto_tree_add_string ( rr_tree , hf_dns_srv_name , tvb , offset , namelen , srv_rr_info [ 2 ] ) ;
}
}
g_strfreev ( srv_rr_info ) ;
}
else {
proto_tree_add_string ( rr_tree , hf_dns_rr_name , tvb , offset , namelen , name ) ;
}
offset += namelen ;
proto_tree_add_item ( rr_tree , hf_dns_rr_type , tvb , offset , 2 , ENC_BIG_ENDIAN ) ;
offset += 2 ;
if ( is_mdns ) {
proto_tree_add_item ( rr_tree , hf_dns_rr_class_mdns , tvb , offset , 2 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( rr_tree , hf_dns_rr_cache_flush , tvb , offset , 2 , ENC_BIG_ENDIAN ) ;
}
else {
proto_tree_add_item ( rr_tree , hf_dns_rr_class , tvb , offset , 2 , ENC_BIG_ENDIAN ) ;
}
offset += 2 ;
ttl_item = proto_tree_add_item ( rr_tree , hf_dns_rr_ttl , tvb , offset , 4 , ENC_BIG_ENDIAN | ENC_TIME_TIMESPEC ) ;
if ( tvb_get_ntohl ( tvb , offset ) & 0x80000000 ) {
expert_add_info ( pinfo , ttl_item , & ei_ttl_negative ) ;
}
offset += 4 ;
proto_tree_add_item ( rr_tree , hf_dns_rr_len , tvb , offset , 2 , ENC_BIG_ENDIAN ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | IN_PROC_BROWSER_TEST_F ( SingleClientSessionsSyncTest , MAYBE_Sanity ) {
ASSERT_TRUE ( SetupSync ( ) ) << "SetupSync() failed." ;
ASSERT_TRUE ( CheckInitialState ( 0 ) ) ;
ScopedWindowMap old_windows ;
ASSERT_TRUE ( OpenTabAndGetLocalWindows ( 0 , GURL ( "http://127.0.0.1/bubba" ) , old_windows . GetMutable ( ) ) ) ;
ASSERT_TRUE ( AwaitCommitActivityCompletion ( GetSyncService ( ( 0 ) ) ) ) ;
SyncedSessionVector sessions ;
ASSERT_FALSE ( GetSessionData ( 0 , & sessions ) ) ;
ASSERT_EQ ( 0U , sessions . size ( ) ) ;
ScopedWindowMap new_windows ;
ASSERT_TRUE ( GetLocalWindows ( 0 , new_windows . GetMutable ( ) ) ) ;
ASSERT_TRUE ( WindowsMatch ( * old_windows . Get ( ) , * new_windows . Get ( ) ) ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static char * ext_t_0_wv_cspc_12 ( tvbuff_t * tvb _U_ , guint32 value , guint32 str_tbl _U_ ) {
char * str = wmem_strdup_printf ( wmem_packet_scope ( ) , "Common Value: '%s'" , val_to_str ( value , vals_wv_csp_12_element_value_tokens , "<Unknown WV-CSP 1.2 Common Value token 0x%X>" ) ) ;
return str ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | int ff_h263_get_gob_height ( MpegEncContext * s ) {
if ( s -> height <= 400 ) return 1 ;
else if ( s -> height <= 800 ) return 2 ;
else return 4 ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static inline void pxa2xx_rtc_swal1_tick ( void * opaque ) {
PXA2xxRTCState * s = ( PXA2xxRTCState * ) opaque ;
s -> rtsr |= ( 1 << 8 ) ;
pxa2xx_rtc_alarm_update ( s , s -> rtsr ) ;
pxa2xx_rtc_int_update ( s ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int dissect_pcp_message_profile ( tvbuff_t * tvb , packet_info * pinfo , proto_tree * tree , int offset ) {
proto_item * pcp_profile_item ;
proto_tree * pcp_profile_tree ;
proto_item * pcp_profile_profile_item ;
proto_tree * pcp_profile_profile_tree ;
guint32 num_prof ;
guint32 i ;
col_append_fstr ( pinfo -> cinfo , COL_INFO , "[%s]" , val_to_str ( PCP_PDU_PROFILE , packettypenames , "Unknown Type:0x%02x" ) ) ;
pcp_profile_item = proto_tree_add_item ( tree , hf_pcp_profile , tvb , offset , - 1 , ENC_NA ) ;
pcp_profile_tree = proto_item_add_subtree ( pcp_profile_item , ett_pcp ) ;
proto_tree_add_item ( pcp_profile_tree , hf_pcp_ctxnum , tvb , offset , 4 , ENC_BIG_ENDIAN ) ;
offset += 4 ;
proto_tree_add_item ( pcp_profile_tree , hf_pcp_profile_g_state , tvb , offset , 4 , ENC_BIG_ENDIAN ) ;
offset += 4 ;
proto_tree_add_item ( pcp_profile_tree , hf_pcp_profile_numprof , tvb , offset , 4 , ENC_BIG_ENDIAN ) ;
num_prof = tvb_get_ntohl ( tvb , offset ) ;
offset += 4 ;
proto_tree_add_item ( pcp_profile_tree , hf_pcp_pdu_padding , tvb , offset , 4 , ENC_NA ) ;
offset += 4 ;
for ( i = 0 ;
i < num_prof ;
i ++ ) {
pcp_profile_profile_item = proto_tree_add_item ( pcp_profile_tree , hf_pcp_profile_profile , tvb , offset , 32 , ENC_NA ) ;
pcp_profile_profile_tree = proto_item_add_subtree ( pcp_profile_profile_item , ett_pcp ) ;
proto_tree_add_item ( pcp_profile_profile_tree , hf_pcp_instance_indom , tvb , offset , 4 , ENC_BIG_ENDIAN ) ;
offset += 4 ;
proto_tree_add_item ( pcp_profile_profile_tree , hf_pcp_profile_profile_state , tvb , offset , 4 , ENC_BIG_ENDIAN ) ;
offset += 4 ;
proto_tree_add_item ( pcp_profile_profile_tree , hf_pcp_profile_profile_numinst , tvb , offset , 4 , ENC_BIG_ENDIAN ) ;
offset += 4 ;
proto_tree_add_item ( pcp_profile_tree , hf_pcp_pdu_padding , tvb , offset , 4 , ENC_NA ) ;
offset += 4 ;
}
return offset ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void pk_transaction_get_files_local ( PkTransaction * transaction , GVariant * params , GDBusMethodInvocation * context ) {
gboolean ret ;
GError * error_local = NULL ;
guint i ;
guint length ;
g_autoptr ( GError ) error = NULL ;
g_autofree gchar * content_type = NULL ;
g_autofree gchar * files_temp = NULL ;
g_autofree gchar * * full_paths = NULL ;
g_return_if_fail ( PK_IS_TRANSACTION ( transaction ) ) ;
g_return_if_fail ( transaction -> priv -> tid != NULL ) ;
g_variant_get ( params , "(^a&s)" , & full_paths ) ;
files_temp = pk_package_ids_to_string ( full_paths ) ;
g_debug ( "GetFilesLocal method called: %s" , files_temp ) ;
if ( ! pk_backend_is_implemented ( transaction -> priv -> backend , PK_ROLE_ENUM_GET_FILES_LOCAL ) ) {
g_set_error ( & error , PK_TRANSACTION_ERROR , PK_TRANSACTION_ERROR_NOT_SUPPORTED , "GetFilesLocal not supported by backend" ) ;
pk_transaction_set_state ( transaction , PK_TRANSACTION_STATE_ERROR ) ;
goto out ;
}
length = g_strv_length ( full_paths ) ;
if ( length == 0 ) {
g_set_error_literal ( & error , PK_TRANSACTION_ERROR , PK_TRANSACTION_ERROR_NUMBER_OF_PACKAGES_INVALID , "No filenames listed" ) ;
pk_transaction_set_state ( transaction , PK_TRANSACTION_STATE_ERROR ) ;
goto out ;
}
if ( length > PK_TRANSACTION_MAX_PACKAGES_TO_PROCESS ) {
g_set_error ( & error , PK_TRANSACTION_ERROR , PK_TRANSACTION_ERROR_NUMBER_OF_PACKAGES_INVALID , "Too many files to process (%i/%i)" , length , PK_TRANSACTION_MAX_PACKAGES_TO_PROCESS ) ;
pk_transaction_set_state ( transaction , PK_TRANSACTION_STATE_ERROR ) ;
goto out ;
}
length = g_strv_length ( full_paths ) ;
for ( i = 0 ;
i < length ;
i ++ ) {
ret = g_file_test ( full_paths [ i ] , G_FILE_TEST_EXISTS ) ;
if ( ! ret ) {
g_set_error ( & error , PK_TRANSACTION_ERROR , PK_TRANSACTION_ERROR_NO_SUCH_FILE , "No such file %s" , full_paths [ i ] ) ;
pk_transaction_set_state ( transaction , PK_TRANSACTION_STATE_ERROR ) ;
goto out ;
}
content_type = pk_transaction_get_content_type_for_file ( full_paths [ i ] , & error_local ) ;
if ( content_type == NULL ) {
g_set_error ( & error , PK_TRANSACTION_ERROR , PK_TRANSACTION_ERROR_MIME_TYPE_NOT_SUPPORTED , "Failed to get content type for file %s" , full_paths [ i ] ) ;
pk_transaction_set_state ( transaction , PK_TRANSACTION_STATE_ERROR ) ;
goto out ;
}
ret = pk_transaction_is_supported_content_type ( transaction , content_type ) ;
if ( ! ret ) {
g_set_error ( & error , PK_TRANSACTION_ERROR , PK_TRANSACTION_ERROR_MIME_TYPE_NOT_SUPPORTED , "MIME type '%s' not supported %s" , content_type , full_paths [ i ] ) ;
pk_transaction_set_state ( transaction , PK_TRANSACTION_STATE_ERROR ) ;
goto out ;
}
}
transaction -> priv -> cached_full_paths = g_strdupv ( full_paths ) ;
pk_transaction_set_role ( transaction , PK_ROLE_ENUM_GET_FILES_LOCAL ) ;
pk_transaction_set_state ( transaction , PK_TRANSACTION_STATE_READY ) ;
out : pk_transaction_dbus_return ( context , error ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | TEST_F ( BaseSearchProviderTest , PreserveAnswersWhenDeduplicating ) {
TemplateURLData data ;
data . SetURL ( "http://foo.com/url?bar={
searchTerms}
" ) ;
std : : unique_ptr < TemplateURL > template_url ( new TemplateURL ( data ) ) ;
TestBaseSearchProvider : : MatchMap map ;
base : : string16 query = base : : ASCIIToUTF16 ( "weather los angeles" ) ;
base : : string16 answer_contents = base : : ASCIIToUTF16 ( "some answer content" ) ;
base : : string16 answer_type = base : : ASCIIToUTF16 ( "2334" ) ;
std : : unique_ptr < SuggestionAnswer > answer ( new SuggestionAnswer ( ) ) ;
answer -> set_type ( 2334 ) ;
EXPECT_CALL ( * provider_ , GetInput ( _ ) ) . WillRepeatedly ( Return ( AutocompleteInput ( ) ) ) ;
EXPECT_CALL ( * provider_ , GetTemplateURL ( _ ) ) . WillRepeatedly ( Return ( template_url . get ( ) ) ) ;
SearchSuggestionParser : : SuggestResult more_relevant ( query , AutocompleteMatchType : : SEARCH_HISTORY , query , base : : string16 ( ) , base : : string16 ( ) , base : : string16 ( ) , base : : string16 ( ) , nullptr , std : : string ( ) , std : : string ( ) , false , 1300 , true , false , query ) ;
provider_ -> AddMatchToMap ( more_relevant , std : : string ( ) , TemplateURLRef : : NO_SUGGESTION_CHOSEN , false , false , & map ) ;
SearchSuggestionParser : : SuggestResult less_relevant ( query , AutocompleteMatchType : : SEARCH_SUGGEST , query , base : : string16 ( ) , base : : string16 ( ) , answer_contents , answer_type , SuggestionAnswer : : copy ( answer . get ( ) ) , std : : string ( ) , std : : string ( ) , false , 850 , true , false , query ) ;
provider_ -> AddMatchToMap ( less_relevant , std : : string ( ) , TemplateURLRef : : NO_SUGGESTION_CHOSEN , false , false , & map ) ;
ASSERT_EQ ( 1U , map . size ( ) ) ;
AutocompleteMatch match = map . begin ( ) -> second ;
ASSERT_EQ ( 1U , match . duplicate_matches . size ( ) ) ;
AutocompleteMatch duplicate = match . duplicate_matches [ 0 ] ;
EXPECT_EQ ( answer_contents , match . answer_contents ) ;
EXPECT_EQ ( answer_type , match . answer_type ) ;
EXPECT_TRUE ( answer -> Equals ( * match . answer ) ) ;
EXPECT_EQ ( AutocompleteMatchType : : SEARCH_HISTORY , match . type ) ;
EXPECT_EQ ( 1300 , match . relevance ) ;
EXPECT_EQ ( answer_contents , duplicate . answer_contents ) ;
EXPECT_EQ ( answer_type , duplicate . answer_type ) ;
EXPECT_TRUE ( answer -> Equals ( * duplicate . answer ) ) ;
EXPECT_EQ ( AutocompleteMatchType : : SEARCH_SUGGEST , duplicate . type ) ;
EXPECT_EQ ( 850 , duplicate . relevance ) ;
map . clear ( ) ;
base : : string16 answer_contents2 = base : : ASCIIToUTF16 ( "different answer" ) ;
base : : string16 answer_type2 = base : : ASCIIToUTF16 ( "8242" ) ;
std : : unique_ptr < SuggestionAnswer > answer2 ( new SuggestionAnswer ( ) ) ;
answer2 -> set_type ( 8242 ) ;
more_relevant = SearchSuggestionParser : : SuggestResult ( query , AutocompleteMatchType : : SEARCH_HISTORY , query , base : : string16 ( ) , base : : string16 ( ) , answer_contents2 , answer_type2 , SuggestionAnswer : : copy ( answer2 . get ( ) ) , std : : string ( ) , std : : string ( ) , false , 1300 , true , false , query ) ;
provider_ -> AddMatchToMap ( more_relevant , std : : string ( ) , TemplateURLRef : : NO_SUGGESTION_CHOSEN , false , false , & map ) ;
provider_ -> AddMatchToMap ( less_relevant , std : : string ( ) , TemplateURLRef : : NO_SUGGESTION_CHOSEN , false , false , & map ) ;
ASSERT_EQ ( 1U , map . size ( ) ) ;
match = map . begin ( ) -> second ;
ASSERT_EQ ( 1U , match . duplicate_matches . size ( ) ) ;
duplicate = match . duplicate_matches [ 0 ] ;
EXPECT_EQ ( answer_contents2 , match . answer_contents ) ;
EXPECT_EQ ( answer_type2 , match . answer_type ) ;
EXPECT_TRUE ( answer2 -> Equals ( * match . answer ) ) ;
EXPECT_EQ ( AutocompleteMatchType : : SEARCH_HISTORY , match . type ) ;
EXPECT_EQ ( 1300 , match . relevance ) ;
EXPECT_EQ ( answer_contents , duplicate . answer_contents ) ;
EXPECT_EQ ( answer_type , duplicate . answer_type ) ;
EXPECT_TRUE ( answer -> Equals ( * duplicate . answer ) ) ;
EXPECT_EQ ( AutocompleteMatchType : : SEARCH_SUGGEST , duplicate . type ) ;
EXPECT_EQ ( 850 , duplicate . relevance ) ;
} | 1True
|
Categorize the following code snippet as vulnerable or not. True or False | int main ( void ) {
static void ( * test_functions [ ] ) ( void ) = {
test_rfc822_parse_quoted_string , test_rfc822_parse_content_param , NULL }
;
return test_run ( test_functions ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | void vp9_dec_build_inter_predictors_sb ( MACROBLOCKD * xd , int mi_row , int mi_col , BLOCK_SIZE bsize ) {
int plane ;
const int mi_x = mi_col * MI_SIZE ;
const int mi_y = mi_row * MI_SIZE ;
for ( plane = 0 ;
plane < MAX_MB_PLANE ;
++ plane ) {
const BLOCK_SIZE plane_bsize = get_plane_block_size ( bsize , & xd -> plane [ plane ] ) ;
const int num_4x4_w = num_4x4_blocks_wide_lookup [ plane_bsize ] ;
const int num_4x4_h = num_4x4_blocks_high_lookup [ plane_bsize ] ;
const int bw = 4 * num_4x4_w ;
const int bh = 4 * num_4x4_h ;
if ( xd -> mi [ 0 ] -> mbmi . sb_type < BLOCK_8X8 ) {
int i = 0 , x , y ;
assert ( bsize == BLOCK_8X8 ) ;
for ( y = 0 ;
y < num_4x4_h ;
++ y ) for ( x = 0 ;
x < num_4x4_w ;
++ x ) dec_build_inter_predictors ( xd , plane , i ++ , bw , bh , * x , 4 * y , 4 , 4 , mi_x , mi_y ) ;
}
else {
dec_build_inter_predictors ( xd , plane , 0 , bw , bh , 0 , 0 , bw , bh , mi_x , mi_y ) ;
}
}
} | 1True
|
Categorize the following code snippet as vulnerable or not. True or False | static int estimate_qp ( MpegEncContext * s , int dry_run ) {
if ( s -> next_lambda ) {
s -> current_picture_ptr -> f . quality = s -> current_picture . f . quality = s -> next_lambda ;
if ( ! dry_run ) s -> next_lambda = 0 ;
}
else if ( ! s -> fixed_qscale ) {
s -> current_picture_ptr -> f . quality = s -> current_picture . f . quality = ff_rate_estimate_qscale ( s , dry_run ) ;
if ( s -> current_picture . f . quality < 0 ) return - 1 ;
}
if ( s -> adaptive_quant ) {
switch ( s -> codec_id ) {
case AV_CODEC_ID_MPEG4 : if ( CONFIG_MPEG4_ENCODER ) ff_clean_mpeg4_qscales ( s ) ;
break ;
case AV_CODEC_ID_H263 : case AV_CODEC_ID_H263P : case AV_CODEC_ID_FLV1 : if ( CONFIG_H263_ENCODER ) ff_clean_h263_qscales ( s ) ;
break ;
default : ff_init_qscale_tab ( s ) ;
}
s -> lambda = s -> lambda_table [ 0 ] ;
}
else s -> lambda = s -> current_picture . f . quality ;
update_qscale ( s ) ;
return 0 ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | char * cluster_conn_opts ( ClusterInfo * cluster ) {
static PQExpBuffer buf ;
if ( buf == NULL ) buf = createPQExpBuffer ( ) ;
else resetPQExpBuffer ( buf ) ;
if ( cluster -> sockdir ) {
appendPQExpBufferStr ( buf , "--host " ) ;
appendShellString ( buf , cluster -> sockdir ) ;
appendPQExpBufferChar ( buf , ' ' ) ;
}
appendPQExpBuffer ( buf , "--port %d --username " , cluster -> port ) ;
appendShellString ( buf , os_info . user ) ;
return buf -> data ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int32_t u_printf_spellout_handler ( const u_printf_stream_handler * handler , void * context , ULocaleBundle * formatBundle , const u_printf_spec_info * info , const ufmt_args * args ) {
double num = ( double ) ( args [ 0 ] . doubleValue ) ;
UNumberFormat * format ;
UChar result [ UPRINTF_BUFFER_SIZE ] ;
UChar prefixBuffer [ UPRINTF_BUFFER_SIZE ] ;
int32_t prefixBufferLen = sizeof ( prefixBuffer ) ;
int32_t minDecimalDigits ;
int32_t maxDecimalDigits ;
int32_t resultLen ;
UErrorCode status = U_ZERO_ERROR ;
prefixBuffer [ 0 ] = 0 ;
format = u_locbund_getNumberFormat ( formatBundle , UNUM_SPELLOUT ) ;
if ( format == 0 ) return 0 ;
minDecimalDigits = unum_getAttribute ( format , UNUM_MIN_FRACTION_DIGITS ) ;
maxDecimalDigits = unum_getAttribute ( format , UNUM_MAX_FRACTION_DIGITS ) ;
if ( info -> fPrecision != - 1 ) {
unum_setAttribute ( format , UNUM_FRACTION_DIGITS , info -> fPrecision ) ;
}
else if ( info -> fAlt ) {
unum_setAttribute ( format , UNUM_FRACTION_DIGITS , 6 ) ;
}
else {
unum_setAttribute ( format , UNUM_FRACTION_DIGITS , 6 ) ;
}
if ( info -> fShowSign ) {
u_printf_set_sign ( format , info , prefixBuffer , & prefixBufferLen , & status ) ;
}
resultLen = unum_formatDouble ( format , num , result , UPRINTF_BUFFER_SIZE , 0 , & status ) ;
if ( U_FAILURE ( status ) ) {
resultLen = 0 ;
}
unum_setAttribute ( format , UNUM_MIN_FRACTION_DIGITS , minDecimalDigits ) ;
unum_setAttribute ( format , UNUM_MAX_FRACTION_DIGITS , maxDecimalDigits ) ;
if ( info -> fShowSign ) {
UErrorCode localStatus = U_ZERO_ERROR ;
u_printf_reset_sign ( format , info , prefixBuffer , & prefixBufferLen , & localStatus ) ;
}
return handler -> pad_and_justify ( context , info , result , resultLen ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | int parse_CRowsetProperties ( tvbuff_t * tvb , int offset , proto_tree * parent_tree , proto_tree * pad_tree _U_ , const char * fmt , ... ) {
proto_item * item ;
proto_tree * tree ;
const char * txt ;
va_list ap ;
va_start ( ap , fmt ) ;
txt = wmem_strdup_vprintf ( wmem_packet_scope ( ) , fmt , ap ) ;
va_end ( ap ) ;
tree = proto_tree_add_subtree ( parent_tree , tvb , offset , 0 , ett_CRowsetProperties , & item , txt ) ;
proto_tree_add_bitmask_with_flags ( tree , tvb , offset , hf_mswsp_bool_options , ett_mswsp_bool_options , mswsp_bool_options , ENC_LITTLE_ENDIAN , BMT_NO_APPEND ) ;
offset += 4 ;
proto_tree_add_item ( tree , hf_mswsp_crowsetprops_ulmaxopenrows , tvb , offset , 4 , ENC_LITTLE_ENDIAN ) ;
offset += 4 ;
proto_tree_add_item ( tree , hf_mswsp_crowsetprops_ulmemusage , tvb , offset , 4 , ENC_LITTLE_ENDIAN ) ;
offset += 4 ;
proto_tree_add_item ( tree , hf_mswsp_crowsetprops_cmaxresults , tvb , offset , 4 , ENC_LITTLE_ENDIAN ) ;
offset += 4 ;
proto_tree_add_item ( tree , hf_mswsp_crowsetprops_ccmdtimeout , tvb , offset , 4 , ENC_LITTLE_ENDIAN ) ;
offset += 4 ;
proto_item_set_end ( item , tvb , offset ) ;
return offset ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void update_mbgraph_frame_stats ( VP9_COMP * cpi , MBGRAPH_FRAME_STATS * stats , YV12_BUFFER_CONFIG * buf , YV12_BUFFER_CONFIG * golden_ref , YV12_BUFFER_CONFIG * alt_ref ) {
MACROBLOCK * const x = & cpi -> mb ;
MACROBLOCKD * const xd = & x -> e_mbd ;
VP9_COMMON * const cm = & cpi -> common ;
int mb_col , mb_row , offset = 0 ;
int mb_y_offset = 0 , arf_y_offset = 0 , gld_y_offset = 0 ;
MV gld_top_mv = {
0 , 0 }
;
MODE_INFO mi_local ;
vp9_zero ( mi_local ) ;
x -> mv_row_min = - BORDER_MV_PIXELS_B16 ;
x -> mv_row_max = ( cm -> mb_rows - 1 ) * 8 + BORDER_MV_PIXELS_B16 ;
xd -> up_available = 0 ;
xd -> plane [ 0 ] . dst . stride = buf -> y_stride ;
xd -> plane [ 0 ] . pre [ 0 ] . stride = buf -> y_stride ;
xd -> plane [ 1 ] . dst . stride = buf -> uv_stride ;
xd -> mi [ 0 ] . src_mi = & mi_local ;
mi_local . mbmi . sb_type = BLOCK_16X16 ;
mi_local . mbmi . ref_frame [ 0 ] = LAST_FRAME ;
mi_local . mbmi . ref_frame [ 1 ] = NONE ;
for ( mb_row = 0 ;
mb_row < cm -> mb_rows ;
mb_row ++ ) {
MV gld_left_mv = gld_top_mv ;
int mb_y_in_offset = mb_y_offset ;
int arf_y_in_offset = arf_y_offset ;
int gld_y_in_offset = gld_y_offset ;
x -> mv_col_min = - BORDER_MV_PIXELS_B16 ;
x -> mv_col_max = ( cm -> mb_cols - 1 ) * 8 + BORDER_MV_PIXELS_B16 ;
xd -> left_available = 0 ;
for ( mb_col = 0 ;
mb_col < cm -> mb_cols ;
mb_col ++ ) {
MBGRAPH_MB_STATS * mb_stats = & stats -> mb_stats [ offset + mb_col ] ;
update_mbgraph_mb_stats ( cpi , mb_stats , buf , mb_y_in_offset , golden_ref , & gld_left_mv , alt_ref , mb_row , mb_col ) ;
gld_left_mv = mb_stats -> ref [ GOLDEN_FRAME ] . m . mv . as_mv ;
if ( mb_col == 0 ) {
gld_top_mv = gld_left_mv ;
}
xd -> left_available = 1 ;
mb_y_in_offset += 16 ;
gld_y_in_offset += 16 ;
arf_y_in_offset += 16 ;
x -> mv_col_min -= 16 ;
x -> mv_col_max -= 16 ;
}
xd -> up_available = 1 ;
mb_y_offset += buf -> y_stride * 16 ;
gld_y_offset += golden_ref -> y_stride * 16 ;
if ( alt_ref ) arf_y_offset += alt_ref -> y_stride * 16 ;
x -> mv_row_min -= 16 ;
x -> mv_row_max -= 16 ;
offset += cm -> mb_cols ;
}
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void nvram_init ( M48t59State * nvram , uint8_t * macaddr , const char * cmdline , const char * boot_devices , ram_addr_t RAM_size , uint32_t kernel_size , int width , int height , int depth , int nvram_machine_id , const char * arch ) {
unsigned int i ;
uint32_t start , end ;
uint8_t image [ 0x1ff0 ] ;
struct OpenBIOS_nvpart_v1 * part_header ;
memset ( image , '\0' , sizeof ( image ) ) ;
start = 0 ;
part_header = ( struct OpenBIOS_nvpart_v1 * ) & image [ start ] ;
part_header -> signature = OPENBIOS_PART_SYSTEM ;
pstrcpy ( part_header -> name , sizeof ( part_header -> name ) , "system" ) ;
end = start + sizeof ( struct OpenBIOS_nvpart_v1 ) ;
for ( i = 0 ;
i < nb_prom_envs ;
i ++ ) end = OpenBIOS_set_var ( image , end , prom_envs [ i ] ) ;
image [ end ++ ] = '\0' ;
end = start + ( ( end - start + 15 ) & ~ 15 ) ;
OpenBIOS_finish_partition ( part_header , end - start ) ;
start = end ;
part_header = ( struct OpenBIOS_nvpart_v1 * ) & image [ start ] ;
part_header -> signature = OPENBIOS_PART_FREE ;
pstrcpy ( part_header -> name , sizeof ( part_header -> name ) , "free" ) ;
end = 0x1fd0 ;
OpenBIOS_finish_partition ( part_header , end - start ) ;
Sun_init_header ( ( struct Sun_nvram * ) & image [ 0x1fd8 ] , macaddr , nvram_machine_id ) ;
for ( i = 0 ;
i < sizeof ( image ) ;
i ++ ) m48t59_write ( nvram , i , image [ i ] ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void dealloc_compressor_data ( VP9_COMP * cpi ) {
VP9_COMMON * const cm = & cpi -> common ;
int i ;
vpx_free ( cpi -> segmentation_map ) ;
cpi -> segmentation_map = NULL ;
vpx_free ( cm -> last_frame_seg_map ) ;
cm -> last_frame_seg_map = NULL ;
vpx_free ( cpi -> coding_context . last_frame_seg_map_copy ) ;
cpi -> coding_context . last_frame_seg_map_copy = NULL ;
vpx_free ( cpi -> complexity_map ) ;
cpi -> complexity_map = NULL ;
vpx_free ( cpi -> nmvcosts [ 0 ] ) ;
vpx_free ( cpi -> nmvcosts [ 1 ] ) ;
cpi -> nmvcosts [ 0 ] = NULL ;
cpi -> nmvcosts [ 1 ] = NULL ;
vpx_free ( cpi -> nmvcosts_hp [ 0 ] ) ;
vpx_free ( cpi -> nmvcosts_hp [ 1 ] ) ;
cpi -> nmvcosts_hp [ 0 ] = NULL ;
cpi -> nmvcosts_hp [ 1 ] = NULL ;
vpx_free ( cpi -> nmvsadcosts [ 0 ] ) ;
vpx_free ( cpi -> nmvsadcosts [ 1 ] ) ;
cpi -> nmvsadcosts [ 0 ] = NULL ;
cpi -> nmvsadcosts [ 1 ] = NULL ;
vpx_free ( cpi -> nmvsadcosts_hp [ 0 ] ) ;
vpx_free ( cpi -> nmvsadcosts_hp [ 1 ] ) ;
cpi -> nmvsadcosts_hp [ 0 ] = NULL ;
cpi -> nmvsadcosts_hp [ 1 ] = NULL ;
vp9_cyclic_refresh_free ( cpi -> cyclic_refresh ) ;
cpi -> cyclic_refresh = NULL ;
vp9_free_ref_frame_buffers ( cm ) ;
vp9_free_context_buffers ( cm ) ;
vp9_free_frame_buffer ( & cpi -> last_frame_uf ) ;
vp9_free_frame_buffer ( & cpi -> scaled_source ) ;
vp9_free_frame_buffer ( & cpi -> scaled_last_source ) ;
vp9_free_frame_buffer ( & cpi -> alt_ref_buffer ) ;
vp9_lookahead_destroy ( cpi -> lookahead ) ;
vpx_free ( cpi -> tok ) ;
cpi -> tok = 0 ;
vp9_free_pc_tree ( cpi ) ;
for ( i = 0 ;
i < cpi -> svc . number_spatial_layers ;
++ i ) {
LAYER_CONTEXT * const lc = & cpi -> svc . layer_context [ i ] ;
vpx_free ( lc -> rc_twopass_stats_in . buf ) ;
lc -> rc_twopass_stats_in . buf = NULL ;
lc -> rc_twopass_stats_in . sz = 0 ;
}
if ( cpi -> source_diff_var != NULL ) {
vpx_free ( cpi -> source_diff_var ) ;
cpi -> source_diff_var = NULL ;
}
for ( i = 0 ;
i < MAX_LAG_BUFFERS ;
++ i ) {
vp9_free_frame_buffer ( & cpi -> svc . scaled_frames [ i ] ) ;
}
vpx_memset ( & cpi -> svc . scaled_frames [ 0 ] , 0 , MAX_LAG_BUFFERS * sizeof ( cpi -> svc . scaled_frames [ 0 ] ) ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static gboolean has_problem ( NautilusDirectory * directory , NautilusFile * file , FileCheck problem ) {
GList * node ;
if ( file != NULL ) {
return ( * problem ) ( file ) ;
}
for ( node = directory -> details -> file_list ;
node != NULL ;
node = node -> next ) {
if ( ( * problem ) ( node -> data ) ) {
return TRUE ;
}
}
return FALSE ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static guint iax_circuit_hash ( gconstpointer v ) {
const iax_circuit_key * key = ( const iax_circuit_key * ) v ;
guint hash_val ;
hash_val = 0 ;
ADD_ADDRESS_TO_HASH ( hash_val , & key -> addr ) ;
hash_val += ( guint ) ( key -> ptype ) ;
hash_val += ( guint ) ( key -> port ) ;
hash_val += ( guint ) ( key -> callno ) ;
# ifdef DEBUG_HASHING g_debug ( "+++ Hashing key: %s, result %#x" , key_to_str ( key ) , hash_val ) ;
# endif return ( guint ) hash_val ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | void do_mkdir ( struct st_command * command ) {
int error ;
static DYNAMIC_STRING ds_dirname ;
const struct command_arg mkdir_args [ ] = {
{
"dirname" , ARG_STRING , TRUE , & ds_dirname , "Directory to create" }
}
;
DBUG_ENTER ( "do_mkdir" ) ;
check_command_args ( command , command -> first_argument , mkdir_args , sizeof ( mkdir_args ) / sizeof ( struct command_arg ) , ' ' ) ;
DBUG_PRINT ( "info" , ( "creating directory: %s" , ds_dirname . str ) ) ;
error = my_mkdir ( ds_dirname . str , 0777 , MYF ( MY_WME ) ) != 0 ;
handle_command_error ( command , error , my_errno ) ;
dynstr_free ( & ds_dirname ) ;
DBUG_VOID_RETURN ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void dissect_dns_common ( tvbuff_t * tvb , packet_info * pinfo , proto_tree * tree , gboolean is_tcp , gboolean is_mdns , gboolean is_llmnr ) {
int offset = is_tcp ? 2 : 0 ;
int dns_data_offset ;
proto_tree * dns_tree = NULL , * field_tree ;
proto_item * ti , * tf ;
guint16 flags , opcode , rcode , quest , ans , auth , add ;
guint id ;
int cur_off ;
gboolean isupdate ;
conversation_t * conversation ;
dns_conv_info_t * dns_info ;
dns_transaction_t * dns_trans ;
wmem_tree_key_t key [ 3 ] ;
struct DnsTap * dns_stats ;
guint qtype = 0 ;
guint qclass = 0 ;
const guchar * name ;
int name_len ;
dns_data_offset = offset ;
col_clear ( pinfo -> cinfo , COL_INFO ) ;
id = tvb_get_ntohs ( tvb , offset + DNS_ID ) ;
flags = tvb_get_ntohs ( tvb , offset + DNS_FLAGS ) ;
opcode = ( guint16 ) ( ( flags & F_OPCODE ) >> OPCODE_SHIFT ) ;
rcode = ( guint16 ) ( flags & F_RCODE ) ;
col_add_fstr ( pinfo -> cinfo , COL_INFO , "%s%s 0x%04x " , val_to_str ( opcode , opcode_vals , "Unknown operation (%u)" ) , ( flags & F_RESPONSE ) ? " response" : "" , id ) ;
if ( flags & F_RESPONSE ) {
if ( rcode != RCODE_NOERROR ) {
col_append_str ( pinfo -> cinfo , COL_INFO , val_to_str ( rcode , rcode_vals , "Unknown error (%u)" ) ) ;
}
}
if ( opcode == OPCODE_UPDATE ) {
isupdate = TRUE ;
}
else {
isupdate = FALSE ;
}
if ( tree ) {
if ( is_llmnr ) {
ti = proto_tree_add_protocol_format ( tree , proto_dns , tvb , 0 , - 1 , "Link-local Multicast Name Resolution (%s)" , ( flags & F_RESPONSE ) ? "response" : "query" ) ;
}
else {
ti = proto_tree_add_protocol_format ( tree , proto_dns , tvb , 0 , - 1 , "Domain Name System (%s)" , ( flags & F_RESPONSE ) ? "response" : "query" ) ;
}
dns_tree = proto_item_add_subtree ( ti , ett_dns ) ;
}
conversation = find_or_create_conversation ( pinfo ) ;
dns_info = ( dns_conv_info_t * ) conversation_get_proto_data ( conversation , proto_dns ) ;
if ( ! dns_info ) {
dns_info = wmem_new ( wmem_file_scope ( ) , dns_conv_info_t ) ;
dns_info -> pdus = wmem_tree_new ( wmem_file_scope ( ) ) ;
conversation_add_proto_data ( conversation , proto_dns , dns_info ) ;
}
key [ 0 ] . length = 1 ;
key [ 0 ] . key = & id ;
key [ 1 ] . length = 1 ;
key [ 1 ] . key = & pinfo -> fd -> num ;
key [ 2 ] . length = 0 ;
key [ 2 ] . key = NULL ;
if ( ! pinfo -> fd -> flags . visited ) {
if ( ! ( flags & F_RESPONSE ) ) {
dns_trans = wmem_new ( wmem_file_scope ( ) , dns_transaction_t ) ;
dns_trans -> req_frame = pinfo -> fd -> num ;
dns_trans -> rep_frame = 0 ;
dns_trans -> req_time = pinfo -> fd -> abs_ts ;
dns_trans -> id = id ;
wmem_tree_insert32_array ( dns_info -> pdus , key , ( void * ) dns_trans ) ;
}
else {
dns_trans = ( dns_transaction_t * ) wmem_tree_lookup32_array_le ( dns_info -> pdus , key ) ;
if ( dns_trans ) {
if ( dns_trans -> id != id ) {
dns_trans = NULL ;
}
else {
dns_trans -> rep_frame = pinfo -> fd -> num ;
}
}
}
}
else {
dns_trans = ( dns_transaction_t * ) wmem_tree_lookup32_array_le ( dns_info -> pdus , key ) ;
if ( dns_trans && dns_trans -> id != id ) {
dns_trans = NULL ;
}
}
if ( ! dns_trans ) {
dns_trans = wmem_new ( wmem_packet_scope ( ) , dns_transaction_t ) ;
dns_trans -> req_frame = 0 ;
dns_trans -> rep_frame = 0 ;
dns_trans -> req_time = pinfo -> fd -> abs_ts ;
}
if ( ! ( flags & F_RESPONSE ) ) {
if ( dns_trans -> rep_frame ) {
proto_item * it ;
it = proto_tree_add_uint ( dns_tree , hf_dns_response_in , tvb , 0 , 0 , dns_trans -> rep_frame ) ;
PROTO_ITEM_SET_GENERATED ( it ) ;
}
}
else {
if ( dns_trans -> req_frame ) {
proto_item * it ;
nstime_t ns ;
it = proto_tree_add_uint ( dns_tree , hf_dns_response_to , tvb , 0 , 0 , dns_trans -> req_frame ) ;
PROTO_ITEM_SET_GENERATED ( it ) ;
nstime_delta ( & ns , & pinfo -> fd -> abs_ts , & dns_trans -> req_time ) ;
it = proto_tree_add_time ( dns_tree , hf_dns_time , tvb , 0 , 0 , & ns ) ;
PROTO_ITEM_SET_GENERATED ( it ) ;
}
}
if ( is_tcp ) {
proto_tree_add_item ( dns_tree , hf_dns_length , tvb , offset - 2 , 2 , ENC_BIG_ENDIAN ) ;
}
proto_tree_add_uint ( dns_tree , hf_dns_transaction_id , tvb , offset + DNS_ID , 2 , id ) ;
tf = proto_tree_add_item ( dns_tree , hf_dns_flags , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
proto_item_append_text ( tf , " %s" , val_to_str_const ( opcode , opcode_vals , "Unknown operation" ) ) ;
if ( flags & F_RESPONSE ) {
proto_item_append_text ( tf , " response, %s" , val_to_str_const ( rcode , rcode_vals , "Unknown error" ) ) ;
}
field_tree = proto_item_add_subtree ( tf , ett_dns_flags ) ;
proto_tree_add_item ( field_tree , hf_dns_flags_response , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( field_tree , hf_dns_flags_opcode , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
if ( is_llmnr ) {
if ( flags & F_RESPONSE ) {
proto_tree_add_item ( field_tree , hf_dns_flags_conflict_response , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
}
else {
proto_tree_add_item ( field_tree , hf_dns_flags_conflict_query , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
}
proto_tree_add_item ( field_tree , hf_dns_flags_truncated , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( field_tree , hf_dns_flags_tentative , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
if ( flags & F_RESPONSE ) {
proto_tree_add_item ( field_tree , hf_dns_flags_rcode , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
}
}
else {
if ( flags & F_RESPONSE ) {
proto_tree_add_item ( field_tree , hf_dns_flags_authoritative , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
}
proto_tree_add_item ( field_tree , hf_dns_flags_truncated , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( field_tree , hf_dns_flags_recdesired , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
if ( flags & F_RESPONSE ) {
proto_tree_add_item ( field_tree , hf_dns_flags_recavail , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
}
proto_tree_add_item ( field_tree , hf_dns_flags_z , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
if ( flags & F_RESPONSE ) {
proto_tree_add_item ( field_tree , hf_dns_flags_authenticated , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
}
else if ( flags & F_AUTHENTIC ) {
proto_tree_add_item ( field_tree , hf_dns_flags_ad , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
}
proto_tree_add_item ( field_tree , hf_dns_flags_checkdisable , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
if ( flags & F_RESPONSE ) {
proto_tree_add_item ( field_tree , hf_dns_flags_rcode , tvb , offset + DNS_FLAGS , 2 , ENC_BIG_ENDIAN ) ;
}
}
quest = tvb_get_ntohs ( tvb , offset + DNS_QUEST ) ;
if ( tree ) {
if ( isupdate ) {
proto_tree_add_uint ( dns_tree , hf_dns_count_zones , tvb , offset + DNS_QUEST , 2 , quest ) ;
}
else {
proto_tree_add_uint ( dns_tree , hf_dns_count_questions , tvb , offset + DNS_QUEST , 2 , quest ) ;
}
}
ans = tvb_get_ntohs ( tvb , offset + DNS_ANS ) ;
if ( tree ) {
if ( isupdate ) {
proto_tree_add_uint ( dns_tree , hf_dns_count_prerequisites , tvb , offset + DNS_ANS , 2 , ans ) ;
}
else {
proto_tree_add_uint ( dns_tree , hf_dns_count_answers , tvb , offset + DNS_ANS , 2 , ans ) ;
}
}
auth = tvb_get_ntohs ( tvb , offset + DNS_AUTH ) ;
if ( tree ) {
if ( isupdate ) {
proto_tree_add_uint ( dns_tree , hf_dns_count_updates , tvb , offset + DNS_AUTH , 2 , auth ) ;
}
else {
proto_tree_add_uint ( dns_tree , hf_dns_count_auth_rr , tvb , offset + DNS_AUTH , 2 , auth ) ;
}
}
add = tvb_get_ntohs ( tvb , offset + DNS_ADD ) ;
if ( tree ) {
proto_tree_add_uint ( dns_tree , hf_dns_count_add_rr , tvb , offset + DNS_ADD , 2 , add ) ;
}
cur_off = offset + DNS_HDRLEN ;
dns_stats = wmem_new0 ( wmem_packet_scope ( ) , struct DnsTap ) ;
dns_stats -> packet_rcode = rcode ;
dns_stats -> packet_opcode = opcode ;
dns_stats -> packet_qr = flags >> 15 ;
get_dns_name_type_class ( tvb , cur_off , dns_data_offset , & name , & name_len , & qtype , & qclass ) ;
dns_stats -> packet_qtype = qtype ;
dns_stats -> packet_qclass = qclass ;
dns_stats -> payload_size = tvb_captured_length ( tvb ) ;
dns_stats -> nquestions = quest ;
dns_stats -> nanswers = ans ;
dns_stats -> nauthorities = auth ;
dns_stats -> nadditionals = add ;
if ( quest > 0 ) {
cur_off += dissect_query_records ( tvb , cur_off , dns_data_offset , quest , ( ! ( flags & F_RESPONSE ) ? pinfo -> cinfo : NULL ) , dns_tree , isupdate , is_mdns ) ;
dns_stats -> qname_len = name_len ;
dns_stats -> qname_labels = qname_labels_count ( name , name_len ) ;
}
if ( ans > 0 ) {
cur_off += dissect_answer_records ( tvb , cur_off , dns_data_offset , ans , ( ( flags & F_RESPONSE ) ? pinfo -> cinfo : NULL ) , dns_tree , ( isupdate ? "Prerequisites" : "Answers" ) , pinfo , is_mdns ) ;
}
if ( auth > 0 ) {
cur_off += dissect_answer_records ( tvb , cur_off , dns_data_offset , auth , NULL , dns_tree , ( isupdate ? "Updates" : "Authoritative nameservers" ) , pinfo , is_mdns ) ;
}
if ( add > 0 ) {
dissect_answer_records ( tvb , cur_off , dns_data_offset , add , NULL , dns_tree , "Additional records" , pinfo , is_mdns ) ;
}
tap_queue_packet ( dns_tap , pinfo , dns_stats ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static guint16 de_auth_resp_param_ext ( tvbuff_t * tvb , proto_tree * tree , packet_info * pinfo _U_ , guint32 offset , guint len , gchar * add_string _U_ , int string_len _U_ ) {
proto_tree_add_item ( tree , hf_gsm_a_dtap_xres , tvb , offset , len , ENC_NA ) ;
return ( len ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void generate_psnr_packet ( VP9_COMP * cpi ) {
struct vpx_codec_cx_pkt pkt ;
int i ;
PSNR_STATS psnr ;
calc_psnr ( cpi -> Source , cpi -> common . frame_to_show , & psnr ) ;
for ( i = 0 ;
i < 4 ;
++ i ) {
pkt . data . psnr . samples [ i ] = psnr . samples [ i ] ;
pkt . data . psnr . sse [ i ] = psnr . sse [ i ] ;
pkt . data . psnr . psnr [ i ] = psnr . psnr [ i ] ;
}
pkt . kind = VPX_CODEC_PSNR_PKT ;
if ( is_two_pass_svc ( cpi ) ) cpi -> svc . layer_context [ cpi -> svc . spatial_layer_id ] . psnr_pkt = pkt . data . psnr ;
else vpx_codec_pkt_list_add ( cpi -> output_pkt_list , & pkt ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static struct ewah_bitmap * lookup_stored_bitmap ( struct stored_bitmap * st ) {
struct ewah_bitmap * parent ;
struct ewah_bitmap * composed ;
if ( st -> xor == NULL ) return st -> root ;
composed = ewah_pool_new ( ) ;
parent = lookup_stored_bitmap ( st -> xor ) ;
ewah_xor ( st -> root , parent , composed ) ;
ewah_pool_free ( st -> root ) ;
st -> root = composed ;
st -> xor = NULL ;
return composed ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static PyObject * string_index ( PyStringObject * self , PyObject * args ) {
Py_ssize_t result = string_find_internal ( self , args , + 1 ) ;
if ( result == - 2 ) return NULL ;
if ( result == - 1 ) {
PyErr_SetString ( PyExc_ValueError , "substring not found" ) ;
return NULL ;
}
return PyInt_FromSsize_t ( result ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | Expr * make_notclause ( Expr * notclause ) {
BoolExpr * expr = makeNode ( BoolExpr ) ;
expr -> boolop = NOT_EXPR ;
expr -> args = list_make1 ( notclause ) ;
expr -> location = - 1 ;
return ( Expr * ) expr ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void _throttle_start ( int * active_rpc_cnt ) {
slurm_mutex_lock ( & throttle_mutex ) ;
while ( 1 ) {
if ( * active_rpc_cnt == 0 ) {
( * active_rpc_cnt ) ++ ;
break ;
}
# if 1 slurm_cond_wait ( & throttle_cond , & throttle_mutex ) ;
# else server_thread_decr ( ) ;
slurm_cond_wait ( & throttle_cond , & throttle_mutex ) ;
server_thread_incr ( ) ;
# endif }
slurm_mutex_unlock ( & throttle_mutex ) ;
if ( LOTS_OF_AGENTS ) usleep ( 1000 ) ;
else usleep ( 1 ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static Asn1Generic * DecodeAsn1DerIA5String ( const unsigned char * buffer , uint32_t max_size , uint8_t depth , uint32_t * errcode ) {
const unsigned char * d_ptr = buffer ;
uint32_t length , numbytes ;
Asn1Generic * a ;
unsigned char c ;
d_ptr ++ ;
c = d_ptr [ 0 ] ;
if ( ( c & ( 1 << 7 ) ) >> 7 == 0 ) {
length = c ;
d_ptr ++ ;
}
else {
numbytes = c & 0x7f ;
d_ptr ++ ;
if ( DecodeAsn1BuildValue ( & d_ptr , & length , numbytes , errcode ) == - 1 ) {
return NULL ;
}
}
if ( length > max_size ) return NULL ;
a = Asn1GenericNew ( ) ;
if ( a == NULL ) return NULL ;
a -> type = ASN1_IA5STRING ;
a -> strlen = length ;
a -> str = SCMalloc ( length + 1 ) ;
if ( a -> str == NULL ) {
SCFree ( a ) ;
return NULL ;
}
strlcpy ( a -> str , ( const char * ) d_ptr , length + 1 ) ;
d_ptr += length ;
a -> length = ( d_ptr - buffer ) ;
return a ;
} | 1True
|
Categorize the following code snippet as vulnerable or not. True or False | static union _zend_function * incomplete_class_get_method ( zval * * object , char * method , int method_len , const zend_literal * key TSRMLS_DC ) {
incomplete_class_message ( * object , E_ERROR TSRMLS_CC ) ;
return NULL ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | int tmx_init_pretran_table ( void ) {
int n ;
int pn ;
pn = get_max_procs ( ) ;
if ( pn <= 0 ) return - 1 ;
if ( _tmx_ptran_table != NULL ) return - 1 ;
n = - 1 ;
while ( pn >> ++ n > 0 ) ;
n -- ;
if ( n <= 1 ) n = 2 ;
if ( n > 8 ) n = 8 ;
_tmx_ptran_size = 1 << n ;
_tmx_ptran_table = ( pretran_slot_t * ) shm_malloc ( _tmx_ptran_size * sizeof ( pretran_slot_t ) ) ;
if ( _tmx_ptran_table == NULL ) {
LM_ERR ( "not enough shared memory\n" ) ;
return - 1 ;
}
memset ( _tmx_ptran_table , 0 , _tmx_ptran_size * sizeof ( pretran_slot_t ) ) ;
for ( n = 0 ;
n < _tmx_ptran_size ;
n ++ ) {
if ( lock_init ( & _tmx_ptran_table [ n ] . lock ) == NULL ) {
LM_ERR ( "cannot init the lock %d\n" , n ) ;
n -- ;
while ( n >= 0 ) {
lock_destroy ( & _tmx_ptran_table [ n ] . lock ) ;
n -- ;
}
shm_free ( _tmx_ptran_table ) ;
_tmx_ptran_table = 0 ;
_tmx_ptran_size = 0 ;
return - 1 ;
}
}
return 0 ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int dissect_h245_INTEGER_1_65025 ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_constrained_integer ( tvb , offset , actx , tree , hf_index , 1U , 65025U , NULL , FALSE ) ;
return offset ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int ts_lua_http_config_int_get ( lua_State * L ) {
int conf ;
int64_t value ;
ts_lua_http_ctx * http_ctx ;
GET_HTTP_CONTEXT ( http_ctx , L ) ;
conf = luaL_checkinteger ( L , 1 ) ;
TSHttpTxnConfigIntGet ( http_ctx -> txnp , conf , & value ) ;
lua_pushnumber ( L , value ) ;
return 1 ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void check_src_altref ( VP9_COMP * cpi , const struct lookahead_entry * source ) {
RATE_CONTROL * const rc = & cpi -> rc ;
if ( cpi -> oxcf . pass == 2 ) {
const GF_GROUP * const gf_group = & cpi -> twopass . gf_group ;
rc -> is_src_frame_alt_ref = ( gf_group -> update_type [ gf_group -> index ] == OVERLAY_UPDATE ) ;
}
else {
rc -> is_src_frame_alt_ref = cpi -> alt_ref_source && ( source == cpi -> alt_ref_source ) ;
}
if ( rc -> is_src_frame_alt_ref ) {
cpi -> alt_ref_source = NULL ;
cpi -> refresh_last_frame = 0 ;
}
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int minima_cmp ( const void * a , const void * b ) {
const Minima * da = ( const Minima * ) a ;
const Minima * db = ( const Minima * ) b ;
return da -> height - db -> height ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | void * jas_calloc ( size_t num_elements , size_t element_size ) {
void * ptr ;
size_t size ;
if ( ! jas_safe_size_mul ( num_elements , element_size , & size ) ) {
return 0 ;
}
if ( ! ( ptr = jas_malloc ( size ) ) ) {
return 0 ;
}
memset ( ptr , 0 , size ) ;
return ptr ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static uint_fast32_t jpc_abstorelstepsize ( jpc_fix_t absdelta , int scaleexpn ) {
int p ;
uint_fast32_t mant ;
uint_fast32_t expn ;
int n ;
if ( absdelta < 0 ) {
abort ( ) ;
}
p = jpc_firstone ( absdelta ) - JPC_FIX_FRACBITS ;
n = 11 - jpc_firstone ( absdelta ) ;
mant = ( ( n < 0 ) ? ( absdelta >> ( - n ) ) : ( absdelta << n ) ) & 0x7ff ;
expn = scaleexpn - p ;
if ( scaleexpn < p ) {
abort ( ) ;
}
return JPC_QCX_EXPN ( expn ) | JPC_QCX_MANT ( mant ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int h264_frame_start ( H264Context * h ) {
Picture * pic ;
int i , ret ;
const int pixel_shift = h -> pixel_shift ;
int c [ 4 ] = {
1 << ( h -> sps . bit_depth_luma - 1 ) , 1 << ( h -> sps . bit_depth_chroma - 1 ) , 1 << ( h -> sps . bit_depth_chroma - 1 ) , - 1 }
;
if ( ! ff_thread_can_start_frame ( h -> avctx ) ) {
av_log ( h -> avctx , AV_LOG_ERROR , "Attempt to start a frame outside SETUP state\n" ) ;
return - 1 ;
}
release_unused_pictures ( h , 1 ) ;
h -> cur_pic_ptr = NULL ;
i = find_unused_picture ( h ) ;
if ( i < 0 ) {
av_log ( h -> avctx , AV_LOG_ERROR , "no frame buffer available\n" ) ;
return i ;
}
pic = & h -> DPB [ i ] ;
pic -> reference = h -> droppable ? 0 : h -> picture_structure ;
pic -> f . coded_picture_number = h -> coded_picture_number ++ ;
pic -> field_picture = h -> picture_structure != PICT_FRAME ;
pic -> f . key_frame = 0 ;
pic -> mmco_reset = 0 ;
pic -> recovered = 0 ;
if ( ( ret = alloc_picture ( h , pic ) ) < 0 ) return ret ;
if ( ! h -> frame_recovered && ! h -> avctx -> hwaccel && ! ( h -> avctx -> codec -> capabilities & CODEC_CAP_HWACCEL_VDPAU ) ) avpriv_color_frame ( & pic -> f , c ) ;
h -> cur_pic_ptr = pic ;
unref_picture ( h , & h -> cur_pic ) ;
if ( CONFIG_ERROR_RESILIENCE ) {
h -> er . cur_pic = NULL ;
}
if ( ( ret = ref_picture ( h , & h -> cur_pic , h -> cur_pic_ptr ) ) < 0 ) return ret ;
if ( CONFIG_ERROR_RESILIENCE ) {
ff_er_frame_start ( & h -> er ) ;
h -> er . last_pic = h -> er . next_pic = NULL ;
}
assert ( h -> linesize && h -> uvlinesize ) ;
for ( i = 0 ;
i < 16 ;
i ++ ) {
h -> block_offset [ i ] = ( 4 * ( ( scan8 [ i ] - scan8 [ 0 ] ) & 7 ) << pixel_shift ) + 4 * h -> linesize * ( ( scan8 [ i ] - scan8 [ 0 ] ) >> 3 ) ;
h -> block_offset [ 48 + i ] = ( 4 * ( ( scan8 [ i ] - scan8 [ 0 ] ) & 7 ) << pixel_shift ) + 8 * h -> linesize * ( ( scan8 [ i ] - scan8 [ 0 ] ) >> 3 ) ;
}
for ( i = 0 ;
i < 16 ;
i ++ ) {
h -> block_offset [ 16 + i ] = h -> block_offset [ 32 + i ] = ( 4 * ( ( scan8 [ i ] - scan8 [ 0 ] ) & 7 ) << pixel_shift ) + 4 * h -> uvlinesize * ( ( scan8 [ i ] - scan8 [ 0 ] ) >> 3 ) ;
h -> block_offset [ 48 + 16 + i ] = h -> block_offset [ 48 + 32 + i ] = ( 4 * ( ( scan8 [ i ] - scan8 [ 0 ] ) & 7 ) << pixel_shift ) + 8 * h -> uvlinesize * ( ( scan8 [ i ] - scan8 [ 0 ] ) >> 3 ) ;
}
h -> cur_pic_ptr -> reference = 0 ;
h -> cur_pic_ptr -> field_poc [ 0 ] = h -> cur_pic_ptr -> field_poc [ 1 ] = INT_MAX ;
h -> next_output_pic = NULL ;
assert ( h -> cur_pic_ptr -> long_ref == 0 ) ;
return 0 ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void rtc_write ( void * opaque , hwaddr addr , uint64_t val , unsigned size ) {
cpu_outw ( 0x71 , val & 0xff ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void CONCAT ( send_hextile_tile_ , NAME ) ( VncState * vs , int x , int y , int w , int h , void * last_bg_ , void * last_fg_ , int * has_bg , int * has_fg ) {
uint8_t * row = ( ds_get_data ( vs -> ds ) + y * ds_get_linesize ( vs -> ds ) + x * ds_get_bytes_per_pixel ( vs -> ds ) ) ;
pixel_t * irow = ( pixel_t * ) row ;
int j , i ;
pixel_t * last_bg = ( pixel_t * ) last_bg_ ;
pixel_t * last_fg = ( pixel_t * ) last_fg_ ;
pixel_t bg = 0 ;
pixel_t fg = 0 ;
int n_colors = 0 ;
int bg_count = 0 ;
int fg_count = 0 ;
int flags = 0 ;
uint8_t data [ ( vs -> clientds . pf . bytes_per_pixel + 2 ) * 16 * 16 ] ;
int n_data = 0 ;
int n_subtiles = 0 ;
for ( j = 0 ;
j < h ;
j ++ ) {
for ( i = 0 ;
i < w ;
i ++ ) {
switch ( n_colors ) {
case 0 : bg = irow [ i ] ;
n_colors = 1 ;
break ;
case 1 : if ( irow [ i ] != bg ) {
fg = irow [ i ] ;
n_colors = 2 ;
}
break ;
case 2 : if ( irow [ i ] != bg && irow [ i ] != fg ) {
n_colors = 3 ;
}
else {
if ( irow [ i ] == bg ) bg_count ++ ;
else if ( irow [ i ] == fg ) fg_count ++ ;
}
break ;
default : break ;
}
}
if ( n_colors > 2 ) break ;
irow += ds_get_linesize ( vs -> ds ) / sizeof ( pixel_t ) ;
}
if ( n_colors > 1 && fg_count > bg_count ) {
pixel_t tmp = fg ;
fg = bg ;
bg = tmp ;
}
if ( ! * has_bg || * last_bg != bg ) {
flags |= 0x02 ;
* has_bg = 1 ;
* last_bg = bg ;
}
if ( ! * has_fg || * last_fg != fg ) {
flags |= 0x04 ;
* has_fg = 1 ;
* last_fg = fg ;
}
switch ( n_colors ) {
case 1 : n_data = 0 ;
break ;
case 2 : flags |= 0x08 ;
irow = ( pixel_t * ) row ;
for ( j = 0 ;
j < h ;
j ++ ) {
int min_x = - 1 ;
for ( i = 0 ;
i < w ;
i ++ ) {
if ( irow [ i ] == fg ) {
if ( min_x == - 1 ) min_x = i ;
}
else if ( min_x != - 1 ) {
hextile_enc_cord ( data + n_data , min_x , j , i - min_x , 1 ) ;
n_data += 2 ;
n_subtiles ++ ;
min_x = - 1 ;
}
}
if ( min_x != - 1 ) {
hextile_enc_cord ( data + n_data , min_x , j , i - min_x , 1 ) ;
n_data += 2 ;
n_subtiles ++ ;
}
irow += ds_get_linesize ( vs -> ds ) / sizeof ( pixel_t ) ;
}
break ;
case 3 : flags |= 0x18 ;
irow = ( pixel_t * ) row ;
if ( ! * has_bg || * last_bg != bg ) flags |= 0x02 ;
for ( j = 0 ;
j < h ;
j ++ ) {
int has_color = 0 ;
int min_x = - 1 ;
pixel_t color = 0 ;
for ( i = 0 ;
i < w ;
i ++ ) {
if ( ! has_color ) {
if ( irow [ i ] == bg ) continue ;
color = irow [ i ] ;
min_x = i ;
has_color = 1 ;
}
else if ( irow [ i ] != color ) {
has_color = 0 ;
# ifdef GENERIC vnc_convert_pixel ( vs , data + n_data , color ) ;
n_data += vs -> clientds . pf . bytes_per_pixel ;
# else memcpy ( data + n_data , & color , sizeof ( color ) ) ;
n_data += sizeof ( pixel_t ) ;
# endif hextile_enc_cord ( data + n_data , min_x , j , i - min_x , 1 ) ;
n_data += 2 ;
n_subtiles ++ ;
min_x = - 1 ;
if ( irow [ i ] != bg ) {
color = irow [ i ] ;
min_x = i ;
has_color = 1 ;
}
}
}
if ( has_color ) {
# ifdef GENERIC vnc_convert_pixel ( vs , data + n_data , color ) ;
n_data += vs -> clientds . pf . bytes_per_pixel ;
# else memcpy ( data + n_data , & color , sizeof ( color ) ) ;
n_data += sizeof ( pixel_t ) ;
# endif hextile_enc_cord ( data + n_data , min_x , j , i - min_x , 1 ) ;
n_data += 2 ;
n_subtiles ++ ;
}
irow += ds_get_linesize ( vs -> ds ) / sizeof ( pixel_t ) ;
}
* has_fg = 0 ;
if ( n_data > ( w * h * sizeof ( pixel_t ) ) ) {
n_colors = 4 ;
flags = 0x01 ;
* has_bg = 0 ;
}
default : break ;
}
if ( n_colors > 3 ) {
flags = 0x01 ;
* has_fg = 0 ;
* has_bg = 0 ;
n_colors = 4 ;
}
vnc_write_u8 ( vs , flags ) ;
if ( n_colors < 4 ) {
if ( flags & 0x02 ) vs -> write_pixels ( vs , last_bg , sizeof ( pixel_t ) ) ;
if ( flags & 0x04 ) vs -> write_pixels ( vs , last_fg , sizeof ( pixel_t ) ) ;
if ( n_subtiles ) {
vnc_write_u8 ( vs , n_subtiles ) ;
vnc_write ( vs , data , n_data ) ;
}
}
else {
for ( j = 0 ;
j < h ;
j ++ ) {
vs -> write_pixels ( vs , row , w * ds_get_bytes_per_pixel ( vs -> ds ) ) ;
row += ds_get_linesize ( vs -> ds ) ;
}
}
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void vmport_class_initfn ( ObjectClass * klass , void * data ) {
DeviceClass * dc = DEVICE_CLASS ( klass ) ;
dc -> realize = vmport_realizefn ;
dc -> no_user = 1 ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static inline int ohci_put_hcca ( OHCIState * ohci , dma_addr_t addr , struct ohci_hcca * hcca ) {
return dma_memory_write ( ohci -> as , addr + ohci -> localmem_base + HCCA_WRITEBACK_OFFSET , ( char * ) hcca + HCCA_WRITEBACK_OFFSET , HCCA_WRITEBACK_SIZE ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int test_source_cksum_offset ( xd3_stream * stream , int ignore ) {
xd3_source source ;
struct {
xoff_t cpos ;
xoff_t ipos ;
xoff_t size ;
usize_t input ;
xoff_t output ;
}
cksum_test [ ] = {
{
1 , 1 , 1 , 1 , 1 }
, # if XD3_USE_LARGEFILE64 {
0x100100000ULL , 0x100000000ULL , 0x100200000ULL , 0x00000000UL , 0x100000000ULL }
, {
0x100100000ULL , 0x100000000ULL , 0x100200000ULL , 0xF0000000UL , 0x0F0000000ULL }
, {
0x100200000ULL , 0x100100000ULL , 0x100200000ULL , 0x00300000UL , 0x000300000ULL }
, {
25771983104ULL , 25770000000ULL , 26414808769ULL , 2139216707UL , 23614053187ULL }
, # endif {
0 , 0 , 0 , 0 , 0 }
, }
, * test_ptr ;
stream -> src = & source ;
for ( test_ptr = cksum_test ;
test_ptr -> cpos ;
test_ptr ++ ) {
xoff_t r ;
stream -> srcwin_cksum_pos = test_ptr -> cpos ;
stream -> total_in = test_ptr -> ipos ;
r = xd3_source_cksum_offset ( stream , test_ptr -> input ) ;
CHECK ( r == test_ptr -> output ) ;
}
return 0 ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int dissect_h245_AddConnectionResp ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_sequence ( tvb , offset , actx , tree , hf_index , ett_h245_AddConnectionResp , AddConnectionResp_sequence ) ;
return offset ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void imdct12 ( INTFLOAT * out , INTFLOAT * in ) {
INTFLOAT in0 , in1 , in2 , in3 , in4 , in5 , t1 , t2 ;
in0 = in [ 0 * 3 ] ;
in1 = in [ 1 * 3 ] + in [ 0 * 3 ] ;
in2 = in [ 2 * 3 ] + in [ 1 * 3 ] ;
in3 = in [ 3 * 3 ] + in [ 2 * 3 ] ;
in4 = in [ 4 * 3 ] + in [ 3 * 3 ] ;
in5 = in [ 5 * 3 ] + in [ 4 * 3 ] ;
in5 += in3 ;
in3 += in1 ;
in2 = MULH3 ( in2 , C3 , 2 ) ;
in3 = MULH3 ( in3 , C3 , 4 ) ;
t1 = in0 - in4 ;
t2 = MULH3 ( in1 - in5 , C4 , 2 ) ;
out [ 7 ] = out [ 10 ] = t1 + t2 ;
out [ 1 ] = out [ 4 ] = t1 - t2 ;
in0 += SHR ( in4 , 1 ) ;
in4 = in0 + in2 ;
in5 += 2 * in1 ;
in1 = MULH3 ( in5 + in3 , C5 , 1 ) ;
out [ 8 ] = out [ 9 ] = in4 + in1 ;
out [ 2 ] = out [ 3 ] = in4 - in1 ;
in0 -= in2 ;
in5 = MULH3 ( in5 - in3 , C6 , 2 ) ;
out [ 0 ] = out [ 5 ] = in0 - in5 ;
out [ 6 ] = out [ 11 ] = in0 + in5 ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int msrle_decode_frame ( AVCodecContext * avctx , void * data , int * got_frame , AVPacket * avpkt ) {
const uint8_t * buf = avpkt -> data ;
int buf_size = avpkt -> size ;
MsrleContext * s = avctx -> priv_data ;
int istride = FFALIGN ( avctx -> width * avctx -> bits_per_coded_sample , 32 ) / 8 ;
int ret ;
s -> buf = buf ;
s -> size = buf_size ;
if ( ( ret = ff_reget_buffer ( avctx , & s -> frame ) ) < 0 ) {
av_log ( avctx , AV_LOG_ERROR , "reget_buffer() failed\n" ) ;
return ret ;
}
if ( avctx -> bits_per_coded_sample <= 8 ) {
const uint8_t * pal = av_packet_get_side_data ( avpkt , AV_PKT_DATA_PALETTE , NULL ) ;
if ( pal ) {
s -> frame . palette_has_changed = 1 ;
memcpy ( s -> pal , pal , AVPALETTE_SIZE ) ;
}
memcpy ( s -> frame . data [ 1 ] , s -> pal , AVPALETTE_SIZE ) ;
}
if ( avctx -> height * istride == avpkt -> size ) {
int linesize = avctx -> width * avctx -> bits_per_coded_sample / 8 ;
uint8_t * ptr = s -> frame . data [ 0 ] ;
uint8_t * buf = avpkt -> data + ( avctx -> height - 1 ) * istride ;
int i , j ;
for ( i = 0 ;
i < avctx -> height ;
i ++ ) {
if ( avctx -> bits_per_coded_sample == 4 ) {
for ( j = 0 ;
j < avctx -> width - 1 ;
j += 2 ) {
ptr [ j + 0 ] = buf [ j >> 1 ] >> 4 ;
ptr [ j + 1 ] = buf [ j >> 1 ] & 0xF ;
}
if ( avctx -> width & 1 ) ptr [ j + 0 ] = buf [ j >> 1 ] >> 4 ;
}
else {
memcpy ( ptr , buf , linesize ) ;
}
buf -= istride ;
ptr += s -> frame . linesize [ 0 ] ;
}
}
else {
bytestream2_init ( & s -> gb , buf , buf_size ) ;
ff_msrle_decode ( avctx , ( AVPicture * ) & s -> frame , avctx -> bits_per_coded_sample , & s -> gb ) ;
}
if ( ( ret = av_frame_ref ( data , & s -> frame ) ) < 0 ) return ret ;
* got_frame = 1 ;
return buf_size ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | void vp9_setup_in_frame_q_adj ( VP9_COMP * cpi ) {
VP9_COMMON * const cm = & cpi -> common ;
struct segmentation * const seg = & cm -> seg ;
vp9_clear_system_state ( ) ;
if ( cm -> frame_type == KEY_FRAME || cpi -> refresh_alt_ref_frame || ( cpi -> refresh_golden_frame && ! cpi -> rc . is_src_frame_alt_ref ) ) {
int segment ;
const int aq_strength = get_aq_c_strength ( cm -> base_qindex , cm -> bit_depth ) ;
const int active_segments = aq_c_active_segments [ aq_strength ] ;
vpx_memset ( cpi -> segmentation_map , 0 , cm -> mi_rows * cm -> mi_cols ) ;
vpx_memset ( cpi -> complexity_map , 0 , cm -> mi_rows * cm -> mi_cols ) ;
vp9_clearall_segfeatures ( seg ) ;
if ( cpi -> rc . sb64_target_rate < 256 ) {
vp9_disable_segmentation ( seg ) ;
return ;
}
vp9_enable_segmentation ( seg ) ;
seg -> abs_delta = SEGMENT_DELTADATA ;
vp9_disable_segfeature ( seg , 0 , SEG_LVL_ALT_Q ) ;
for ( segment = 1 ;
segment < active_segments ;
++ segment ) {
int qindex_delta = vp9_compute_qdelta_by_rate ( & cpi -> rc , cm -> frame_type , cm -> base_qindex , aq_c_q_adj_factor [ aq_strength ] [ segment ] , cm -> bit_depth ) ;
if ( ( cm -> base_qindex != 0 ) && ( ( cm -> base_qindex + qindex_delta ) == 0 ) ) {
qindex_delta = - cm -> base_qindex + 1 ;
}
if ( ( cm -> base_qindex + qindex_delta ) > 0 ) {
vp9_enable_segfeature ( seg , segment , SEG_LVL_ALT_Q ) ;
vp9_set_segdata ( seg , segment , SEG_LVL_ALT_Q , qindex_delta ) ;
}
}
}
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | void vp9_update_reference_frames ( VP9_COMP * cpi ) {
VP9_COMMON * const cm = & cpi -> common ;
if ( cm -> frame_type == KEY_FRAME ) {
ref_cnt_fb ( cm -> frame_bufs , & cm -> ref_frame_map [ cpi -> gld_fb_idx ] , cm -> new_fb_idx ) ;
ref_cnt_fb ( cm -> frame_bufs , & cm -> ref_frame_map [ cpi -> alt_fb_idx ] , cm -> new_fb_idx ) ;
}
else if ( vp9_preserve_existing_gf ( cpi ) ) {
int tmp ;
ref_cnt_fb ( cm -> frame_bufs , & cm -> ref_frame_map [ cpi -> alt_fb_idx ] , cm -> new_fb_idx ) ;
tmp = cpi -> alt_fb_idx ;
cpi -> alt_fb_idx = cpi -> gld_fb_idx ;
cpi -> gld_fb_idx = tmp ;
if ( is_two_pass_svc ( cpi ) ) {
cpi -> svc . layer_context [ 0 ] . gold_ref_idx = cpi -> gld_fb_idx ;
cpi -> svc . layer_context [ 0 ] . alt_ref_idx = cpi -> alt_fb_idx ;
}
}
else {
if ( cpi -> refresh_alt_ref_frame ) {
int arf_idx = cpi -> alt_fb_idx ;
if ( ( cpi -> oxcf . pass == 2 ) && cpi -> multi_arf_allowed ) {
const GF_GROUP * const gf_group = & cpi -> twopass . gf_group ;
arf_idx = gf_group -> arf_update_idx [ gf_group -> index ] ;
}
ref_cnt_fb ( cm -> frame_bufs , & cm -> ref_frame_map [ arf_idx ] , cm -> new_fb_idx ) ;
vpx_memcpy ( cpi -> interp_filter_selected [ ALTREF_FRAME ] , cpi -> interp_filter_selected [ 0 ] , sizeof ( cpi -> interp_filter_selected [ 0 ] ) ) ;
}
if ( cpi -> refresh_golden_frame ) {
ref_cnt_fb ( cm -> frame_bufs , & cm -> ref_frame_map [ cpi -> gld_fb_idx ] , cm -> new_fb_idx ) ;
if ( ! cpi -> rc . is_src_frame_alt_ref ) vpx_memcpy ( cpi -> interp_filter_selected [ GOLDEN_FRAME ] , cpi -> interp_filter_selected [ 0 ] , sizeof ( cpi -> interp_filter_selected [ 0 ] ) ) ;
else vpx_memcpy ( cpi -> interp_filter_selected [ GOLDEN_FRAME ] , cpi -> interp_filter_selected [ ALTREF_FRAME ] , sizeof ( cpi -> interp_filter_selected [ ALTREF_FRAME ] ) ) ;
}
}
if ( cpi -> refresh_last_frame ) {
ref_cnt_fb ( cm -> frame_bufs , & cm -> ref_frame_map [ cpi -> lst_fb_idx ] , cm -> new_fb_idx ) ;
if ( ! cpi -> rc . is_src_frame_alt_ref ) vpx_memcpy ( cpi -> interp_filter_selected [ LAST_FRAME ] , cpi -> interp_filter_selected [ 0 ] , sizeof ( cpi -> interp_filter_selected [ 0 ] ) ) ;
}
# if CONFIG_VP9_TEMPORAL_DENOISING if ( cpi -> oxcf . noise_sensitivity > 0 ) {
vp9_denoiser_update_frame_info ( & cpi -> denoiser , * cpi -> Source , cpi -> common . frame_type , cpi -> refresh_alt_ref_frame , cpi -> refresh_golden_frame , cpi -> refresh_last_frame ) ;
}
# endif } | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int dissect_h245_BMPString_SIZE_1_128 ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_BMPString ( tvb , offset , actx , tree , hf_index , 1 , 128 , FALSE ) ;
return offset ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void _zip_write4 ( unsigned int i , FILE * fp ) {
putc ( i & 0xff , fp ) ;
putc ( ( i >> 8 ) & 0xff , fp ) ;
putc ( ( i >> 16 ) & 0xff , fp ) ;
putc ( ( i >> 24 ) & 0xff , fp ) ;
return ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | int qemuMonitorTextAttachPCIDiskController ( qemuMonitorPtr mon , const char * bus , virDomainDevicePCIAddress * guestAddr ) {
char * cmd = NULL ;
char * reply = NULL ;
int tryOldSyntax = 0 ;
int ret = - 1 ;
try_command : if ( virAsprintf ( & cmd , "pci_add %s storage if=%s" , ( tryOldSyntax ? "0" : "pci_addr=auto" ) , bus ) < 0 ) {
virReportOOMError ( ) ;
goto cleanup ;
}
if ( qemuMonitorHMPCommand ( mon , cmd , & reply ) < 0 ) {
qemuReportError ( VIR_ERR_OPERATION_FAILED , _ ( "cannot attach %s disk controller" ) , bus ) ;
goto cleanup ;
}
if ( qemuMonitorTextParsePciAddReply ( mon , reply , guestAddr ) < 0 ) {
if ( ! tryOldSyntax && strstr ( reply , "invalid char in expression" ) ) {
VIR_FREE ( reply ) ;
VIR_FREE ( cmd ) ;
tryOldSyntax = 1 ;
goto try_command ;
}
qemuReportError ( VIR_ERR_OPERATION_FAILED , _ ( "adding %s disk controller failed: %s" ) , bus , reply ) ;
goto cleanup ;
}
ret = 0 ;
cleanup : VIR_FREE ( cmd ) ;
VIR_FREE ( reply ) ;
return ret ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | proto_tree * proto_tree_get_root ( proto_tree * tree ) {
if ( ! tree ) return NULL ;
while ( tree -> parent ) {
tree = tree -> parent ;
}
return tree ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int read_huffman_tables ( HYuvContext * s , const uint8_t * src , int length ) {
GetBitContext gb ;
int i ;
init_get_bits ( & gb , src , length * 8 ) ;
for ( i = 0 ;
i < 3 ;
i ++ ) {
if ( read_len_table ( s -> len [ i ] , & gb ) < 0 ) return - 1 ;
if ( generate_bits_table ( s -> bits [ i ] , s -> len [ i ] ) < 0 ) {
return - 1 ;
}
ff_free_vlc ( & s -> vlc [ i ] ) ;
init_vlc ( & s -> vlc [ i ] , VLC_BITS , 256 , s -> len [ i ] , 1 , 1 , s -> bits [ i ] , 4 , 4 , 0 ) ;
}
generate_joint_tables ( s ) ;
return ( get_bits_count ( & gb ) + 7 ) / 8 ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int dissect_h245_MobileMultilinkReconfigurationCommand ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_sequence ( tvb , offset , actx , tree , hf_index , ett_h245_MobileMultilinkReconfigurationCommand , MobileMultilinkReconfigurationCommand_sequence ) ;
return offset ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | int lcc_network_buffer_set_security_level ( lcc_network_buffer_t * nb , lcc_security_level_t level , const char * username , const char * password ) {
char * username_copy ;
char * password_copy ;
if ( level == NONE ) {
free ( nb -> username ) ;
free ( nb -> password ) ;
nb -> username = NULL ;
nb -> password = NULL ;
nb -> seclevel = NONE ;
lcc_network_buffer_initialize ( nb ) ;
return ( 0 ) ;
}
if ( ! have_gcrypt ( ) ) return ( ENOTSUP ) ;
username_copy = strdup ( username ) ;
password_copy = strdup ( password ) ;
if ( ( username_copy == NULL ) || ( password_copy == NULL ) ) {
free ( username_copy ) ;
free ( password_copy ) ;
return ( ENOMEM ) ;
}
free ( nb -> username ) ;
free ( nb -> password ) ;
nb -> username = username_copy ;
nb -> password = password_copy ;
nb -> seclevel = level ;
lcc_network_buffer_initialize ( nb ) ;
return ( 0 ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | void evhttp_connection_free ( struct evhttp_connection * evcon ) {
struct evhttp_request * req ;
if ( evcon -> fd != - 1 ) {
if ( evhttp_connected ( evcon ) && evcon -> closecb != NULL ) ( * evcon -> closecb ) ( evcon , evcon -> closecb_arg ) ;
}
while ( ( req = TAILQ_FIRST ( & evcon -> requests ) ) != NULL ) {
TAILQ_REMOVE ( & evcon -> requests , req , next ) ;
evhttp_request_free ( req ) ;
}
if ( evcon -> http_server != NULL ) {
struct evhttp * http = evcon -> http_server ;
TAILQ_REMOVE ( & http -> connections , evcon , next ) ;
}
if ( event_initialized ( & evcon -> close_ev ) ) event_del ( & evcon -> close_ev ) ;
if ( event_initialized ( & evcon -> ev ) ) event_del ( & evcon -> ev ) ;
if ( evcon -> fd != - 1 ) EVUTIL_CLOSESOCKET ( evcon -> fd ) ;
if ( evcon -> bind_address != NULL ) free ( evcon -> bind_address ) ;
if ( evcon -> address != NULL ) free ( evcon -> address ) ;
if ( evcon -> input_buffer != NULL ) evbuffer_free ( evcon -> input_buffer ) ;
if ( evcon -> output_buffer != NULL ) evbuffer_free ( evcon -> output_buffer ) ;
free ( evcon ) ;
} | 1True
|
Categorize the following code snippet as vulnerable or not. True or False | static time_t time_from_tm ( struct tm * t ) {
# if HAVE_TIMEGM return ( timegm ( t ) ) ;
# elif HAVE__MKGMTIME64 return ( _mkgmtime64 ( t ) ) ;
# else if ( mktime ( t ) == ( time_t ) - 1 ) return ( ( time_t ) - 1 ) ;
return ( t -> tm_sec + t -> tm_min * 60 + t -> tm_hour * 3600 + t -> tm_yday * 86400 + ( t -> tm_year - 70 ) * 31536000 + ( ( t -> tm_year - 69 ) / 4 ) * 86400 - ( ( t -> tm_year - 1 ) / 100 ) * 86400 + ( ( t -> tm_year + 299 ) / 400 ) * 86400 ) ;
# endif } | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int dissect_h245_RequestMultiplexEntryRejectionDescriptionsCause ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_choice ( tvb , offset , actx , tree , hf_index , ett_h245_RequestMultiplexEntryRejectionDescriptionsCause , RequestMultiplexEntryRejectionDescriptionsCause_choice , NULL ) ;
return offset ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void display_sorted_nicks ( CHANNEL_REC * channel , GSList * nicklist ) {
WINDOW_REC * window ;
TEXT_DEST_REC dest ;
GString * str ;
GSList * tmp ;
char * format , * stripped , * prefix_format ;
char * aligned_nick , nickmode [ 2 ] = {
0 , 0 }
;
int * columns , cols , rows , last_col_rows , col , row , max_width ;
int item_extra , formatnum ;
window = window_find_closest ( channel -> server , channel -> visible_name , MSGLEVEL_CLIENTCRAP ) ;
max_width = window -> width ;
format = format_get_text ( MODULE_NAME , NULL , channel -> server , channel -> visible_name , TXT_NAMES_NICK , " " , "" ) ;
stripped = strip_codes ( format ) ;
item_extra = strlen ( stripped ) ;
g_free ( stripped ) ;
g_free ( format ) ;
if ( settings_get_int ( "names_max_width" ) > 0 && settings_get_int ( "names_max_width" ) < max_width ) max_width = settings_get_int ( "names_max_width" ) ;
format_create_dest ( & dest , channel -> server , channel -> visible_name , MSGLEVEL_CLIENTCRAP , NULL ) ;
format = format_get_line_start ( current_theme , & dest , time ( NULL ) ) ;
if ( format != NULL ) {
stripped = strip_codes ( format ) ;
max_width -= strlen ( stripped ) ;
g_free ( stripped ) ;
g_free ( format ) ;
}
prefix_format = format_get_text ( MODULE_NAME , NULL , channel -> server , channel -> visible_name , TXT_NAMES_PREFIX , channel -> visible_name ) ;
if ( prefix_format != NULL ) {
stripped = strip_codes ( prefix_format ) ;
max_width -= strlen ( stripped ) ;
g_free ( stripped ) ;
}
if ( max_width <= 0 ) {
max_width = 10 ;
}
cols = get_max_column_count ( nicklist , get_nick_length , max_width , settings_get_int ( "names_max_columns" ) , item_extra , 3 , & columns , & rows ) ;
nicklist = columns_sort_list ( nicklist , rows ) ;
last_col_rows = rows - ( cols * rows - g_slist_length ( nicklist ) ) ;
if ( last_col_rows == 0 ) last_col_rows = rows ;
str = g_string_new ( prefix_format ) ;
col = 0 ;
row = 0 ;
for ( tmp = nicklist ;
tmp != NULL ;
tmp = tmp -> next ) {
NICK_REC * rec = tmp -> data ;
if ( rec -> prefixes [ 0 ] ) nickmode [ 0 ] = rec -> prefixes [ 0 ] ;
else nickmode [ 0 ] = ' ' ;
aligned_nick = get_alignment ( rec -> nick , columns [ col ] - item_extra , ALIGN_PAD , ' ' ) ;
formatnum = rec -> op ? TXT_NAMES_NICK_OP : rec -> halfop ? TXT_NAMES_NICK_HALFOP : rec -> voice ? TXT_NAMES_NICK_VOICE : TXT_NAMES_NICK ;
format = format_get_text ( MODULE_NAME , NULL , channel -> server , channel -> visible_name , formatnum , nickmode , aligned_nick ) ;
g_string_append ( str , format ) ;
g_free ( aligned_nick ) ;
g_free ( format ) ;
if ( ++ col == cols ) {
printtext ( channel -> server , channel -> visible_name , MSGLEVEL_CLIENTCRAP , "%s" , str -> str ) ;
g_string_truncate ( str , 0 ) ;
if ( prefix_format != NULL ) g_string_assign ( str , prefix_format ) ;
col = 0 ;
row ++ ;
if ( row == last_col_rows ) cols -- ;
}
}
if ( prefix_format != NULL && str -> len > strlen ( prefix_format ) ) {
printtext ( channel -> server , channel -> visible_name , MSGLEVEL_CLIENTCRAP , "%s" , str -> str ) ;
}
g_slist_free ( nicklist ) ;
g_string_free ( str , TRUE ) ;
g_free_not_null ( columns ) ;
g_free_not_null ( prefix_format ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | void stp_print ( netdissect_options * ndo , const u_char * p , u_int length ) {
const struct stp_bpdu_ * stp_bpdu ;
u_int mstp_len ;
u_int spb_len ;
stp_bpdu = ( const struct stp_bpdu_ * ) p ;
if ( length < 4 ) goto trunc ;
ND_TCHECK ( stp_bpdu -> protocol_id ) ;
if ( EXTRACT_16BITS ( & stp_bpdu -> protocol_id ) ) {
ND_PRINT ( ( ndo , "unknown STP version, length %u" , length ) ) ;
return ;
}
ND_TCHECK ( stp_bpdu -> protocol_version ) ;
ND_PRINT ( ( ndo , "STP %s" , tok2str ( stp_proto_values , "Unknown STP protocol (0x%02x)" , stp_bpdu -> protocol_version ) ) ) ;
switch ( stp_bpdu -> protocol_version ) {
case STP_PROTO_REGULAR : case STP_PROTO_RAPID : case STP_PROTO_MSTP : case STP_PROTO_SPB : break ;
default : return ;
}
ND_TCHECK ( stp_bpdu -> bpdu_type ) ;
ND_PRINT ( ( ndo , ", %s" , tok2str ( stp_bpdu_type_values , "Unknown BPDU Type (0x%02x)" , stp_bpdu -> bpdu_type ) ) ) ;
switch ( stp_bpdu -> bpdu_type ) {
case STP_BPDU_TYPE_CONFIG : if ( length < sizeof ( struct stp_bpdu_ ) - 1 ) {
goto trunc ;
}
if ( ! stp_print_config_bpdu ( ndo , stp_bpdu , length ) ) goto trunc ;
break ;
case STP_BPDU_TYPE_RSTP : if ( stp_bpdu -> protocol_version == STP_PROTO_RAPID ) {
if ( length < sizeof ( struct stp_bpdu_ ) ) {
goto trunc ;
}
if ( ! stp_print_config_bpdu ( ndo , stp_bpdu , length ) ) goto trunc ;
}
else if ( stp_bpdu -> protocol_version == STP_PROTO_MSTP || stp_bpdu -> protocol_version == STP_PROTO_SPB ) {
if ( length < STP_BPDU_MSTP_MIN_LEN ) {
goto trunc ;
}
ND_TCHECK ( stp_bpdu -> v1_length ) ;
if ( stp_bpdu -> v1_length != 0 ) {
goto trunc ;
}
ND_TCHECK_16BITS ( p + MST_BPDU_VER3_LEN_OFFSET ) ;
mstp_len = EXTRACT_16BITS ( p + MST_BPDU_VER3_LEN_OFFSET ) ;
mstp_len += 2 ;
if ( length < ( sizeof ( struct stp_bpdu_ ) + mstp_len ) ) {
goto trunc ;
}
if ( ! stp_print_mstp_bpdu ( ndo , stp_bpdu , length ) ) goto trunc ;
if ( stp_bpdu -> protocol_version == STP_PROTO_SPB ) {
ND_TCHECK_16BITS ( p + MST_BPDU_VER3_LEN_OFFSET + mstp_len ) ;
spb_len = EXTRACT_16BITS ( p + MST_BPDU_VER3_LEN_OFFSET + mstp_len ) ;
spb_len += 2 ;
if ( length < ( sizeof ( struct stp_bpdu_ ) + mstp_len + spb_len ) || spb_len < SPB_BPDU_MIN_LEN ) {
goto trunc ;
}
if ( ! stp_print_spb_bpdu ( ndo , stp_bpdu , ( sizeof ( struct stp_bpdu_ ) + mstp_len ) ) ) goto trunc ;
}
}
break ;
case STP_BPDU_TYPE_TOPO_CHANGE : break ;
default : break ;
}
return ;
trunc : ND_PRINT ( ( ndo , "[|stp %d]" , length ) ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int dissect_h245_MultimediaSystemControlMessage ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_choice ( tvb , offset , actx , tree , hf_index , ett_h245_MultimediaSystemControlMessage , MultimediaSystemControlMessage_choice , NULL ) ;
return offset ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | void completion_last_message_rename ( const char * oldnick , const char * newnick ) {
LAST_MSG_REC * rec ;
g_return_if_fail ( oldnick != NULL ) ;
g_return_if_fail ( newnick != NULL ) ;
rec = last_msg_find ( global_lastmsgs , oldnick ) ;
if ( rec != NULL ) {
g_free ( rec -> nick ) ;
rec -> nick = g_strdup ( newnick ) ;
}
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | char * evhttp_htmlescape ( const char * html ) {
int i , new_size = 0 , old_size = strlen ( html ) ;
char * escaped_html , * p ;
char scratch_space [ 2 ] ;
for ( i = 0 ;
i < old_size ;
++ i ) new_size += strlen ( html_replace ( html [ i ] , scratch_space ) ) ;
p = escaped_html = malloc ( new_size + 1 ) ;
if ( escaped_html == NULL ) event_err ( 1 , "%s: malloc(%d)" , __func__ , new_size + 1 ) ;
for ( i = 0 ;
i < old_size ;
++ i ) {
const char * replaced = html_replace ( html [ i ] , scratch_space ) ;
strcpy ( p , replaced ) ;
p += strlen ( replaced ) ;
}
* p = '\0' ;
return ( escaped_html ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static __inline__ int TLV_CHECK_TYPE ( struct tlv_desc * tlv , __u16 type ) {
return ( ntohs ( tlv -> tlv_type ) == type ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int ipv4_local_port_range ( ctl_table * table , int write , void __user * buffer , size_t * lenp , loff_t * ppos ) {
int ret ;
int range [ 2 ] ;
ctl_table tmp = {
. data = & range , . maxlen = sizeof ( range ) , . mode = table -> mode , . extra1 = & ip_local_port_range_min , . extra2 = & ip_local_port_range_max , }
;
inet_get_local_port_range ( range , range + 1 ) ;
ret = proc_dointvec_minmax ( & tmp , write , buffer , lenp , ppos ) ;
if ( write && ret == 0 ) {
if ( range [ 1 ] < range [ 0 ] ) ret = - EINVAL ;
else set_local_port_range ( range ) ;
}
return ret ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | void IGDstartelt ( void * d , const char * name , int l ) {
struct IGDdatas * datas = ( struct IGDdatas * ) d ;
memcpy ( datas -> cureltname , name , l ) ;
datas -> cureltname [ l ] = '\0' ;
datas -> level ++ ;
if ( ( l == 7 ) && ! memcmp ( name , "service" , l ) ) {
datas -> tmp . controlurl [ 0 ] = '\0' ;
datas -> tmp . eventsuburl [ 0 ] = '\0' ;
datas -> tmp . scpdurl [ 0 ] = '\0' ;
datas -> tmp . servicetype [ 0 ] = '\0' ;
}
} | 1True
|
Categorize the following code snippet as vulnerable or not. True or False | int qemuMonitorJSONGetBlockStatsParamsNumber ( qemuMonitorPtr mon , int * nparams ) {
int ret , i , num = 0 ;
virJSONValuePtr cmd = qemuMonitorJSONMakeCommand ( "query-blockstats" , NULL ) ;
virJSONValuePtr reply = NULL ;
virJSONValuePtr devices = NULL ;
virJSONValuePtr dev = NULL ;
virJSONValuePtr stats = NULL ;
if ( ! cmd ) return - 1 ;
ret = qemuMonitorJSONCommand ( mon , cmd , & reply ) ;
if ( ret == 0 ) ret = qemuMonitorJSONCheckError ( cmd , reply ) ;
if ( ret < 0 ) goto cleanup ;
ret = - 1 ;
devices = virJSONValueObjectGet ( reply , "return" ) ;
if ( ! devices || devices -> type != VIR_JSON_TYPE_ARRAY ) {
qemuReportError ( VIR_ERR_INTERNAL_ERROR , "%s" , _ ( "blockstats reply was missing device list" ) ) ;
goto cleanup ;
}
dev = virJSONValueArrayGet ( devices , 0 ) ;
if ( ! dev || dev -> type != VIR_JSON_TYPE_OBJECT ) {
qemuReportError ( VIR_ERR_INTERNAL_ERROR , "%s" , _ ( "blockstats device entry was not in expected format" ) ) ;
goto cleanup ;
}
if ( ( stats = virJSONValueObjectGet ( dev , "stats" ) ) == NULL || stats -> type != VIR_JSON_TYPE_OBJECT ) {
qemuReportError ( VIR_ERR_INTERNAL_ERROR , "%s" , _ ( "blockstats stats entry was not in expected format" ) ) ;
goto cleanup ;
}
for ( i = 0 ;
i < stats -> data . object . npairs ;
i ++ ) {
const char * key = stats -> data . object . pairs [ i ] . key ;
if ( STREQ ( key , "rd_bytes" ) || STREQ ( key , "rd_operations" ) || STREQ ( key , "rd_total_times_ns" ) || STREQ ( key , "wr_bytes" ) || STREQ ( key , "wr_operations" ) || STREQ ( key , "wr_total_times_ns" ) || STREQ ( key , "flush_operations" ) || STREQ ( key , "flush_total_times_ns" ) ) {
num ++ ;
}
else {
if ( STRNEQ ( key , "wr_highest_offset" ) ) VIR_DEBUG ( "Missed block stat: %s" , key ) ;
}
}
* nparams = num ;
ret = 0 ;
cleanup : virJSONValueFree ( cmd ) ;
virJSONValueFree ( reply ) ;
return ret ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | bool contain_leaked_vars ( Node * clause ) {
return contain_leaked_vars_walker ( clause , NULL ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static char * xml_entities ( const char * s ) {
struct strbuf buf = STRBUF_INIT ;
strbuf_addstr_xml_quoted ( & buf , s ) ;
return strbuf_detach ( & buf , NULL ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static bool is_outer_table ( TABLE_LIST * table , SELECT_LEX * select ) {
DBUG_ASSERT ( table -> select_lex != select ) ;
TABLE_LIST * tl ;
if ( table -> belong_to_view && table -> belong_to_view -> select_lex == select ) return FALSE ;
for ( tl = select -> master_unit ( ) -> derived ;
tl && tl -> is_merged_derived ( ) ;
select = tl -> select_lex , tl = select -> master_unit ( ) -> derived ) {
if ( tl -> select_lex == table -> select_lex ) return FALSE ;
}
return TRUE ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | IN_PROC_BROWSER_TEST_F ( FastUnloadTest , PRE_WindowCloseFinishesUnload ) {
NavigateToPage ( "no_listeners" ) ;
NavigateToPageInNewTab ( "unload_sleep_before_cookie" ) ;
EXPECT_EQ ( 2 , browser ( ) -> tab_strip_model ( ) -> count ( ) ) ;
EXPECT_EQ ( "" , GetCookies ( "no_listeners" ) ) ;
content : : WindowedNotificationObserver window_observer ( chrome : : NOTIFICATION_BROWSER_CLOSED , content : : NotificationService : : AllSources ( ) ) ;
chrome : : CloseWindow ( browser ( ) ) ;
window_observer . Wait ( ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void * Type_Curve_Dup ( struct _cms_typehandler_struct * self , const void * Ptr , cmsUInt32Number n ) {
return ( void * ) cmsDupToneCurve ( ( cmsToneCurve * ) Ptr ) ;
cmsUNUSED_PARAMETER ( n ) ;
cmsUNUSED_PARAMETER ( self ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | const char * get_errname_from_code ( uint error_code ) {
const char * name ;
if ( ( name = get_errname_from_code ( error_code , global_error_names ) ) != unknown_error ) return name ;
return get_errname_from_code ( error_code , handler_error_names ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static MIMEFieldSDKHandle * sdk_alloc_field_handle ( TSMBuffer , MIMEHdrImpl * mh ) {
MIMEFieldSDKHandle * handle = mHandleAllocator . alloc ( ) ;
sdk_assert ( sdk_sanity_check_null_ptr ( ( void * ) handle ) == TS_SUCCESS ) ;
obj_init_header ( handle , HDR_HEAP_OBJ_FIELD_SDK_HANDLE , sizeof ( MIMEFieldSDKHandle ) , 0 ) ;
handle -> mh = mh ;
return handle ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | bool vmxnet_tx_pkt_parse ( struct VmxnetTxPkt * pkt ) {
return vmxnet_tx_pkt_parse_headers ( pkt ) && vmxnet_tx_pkt_rebuild_payload ( pkt ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void vble_restore_plane ( VBLEContext * ctx , AVFrame * pic , int plane , int offset , int width , int height ) {
uint8_t * dst = pic -> data [ plane ] ;
uint8_t * val = ctx -> val + offset ;
int stride = pic -> linesize [ plane ] ;
int i , j , left , left_top ;
for ( i = 0 ;
i < height ;
i ++ ) {
for ( j = 0 ;
j < width ;
j ++ ) val [ j ] = ( val [ j ] >> 1 ) ^ - ( val [ j ] & 1 ) ;
if ( i ) {
left = 0 ;
left_top = dst [ - stride ] ;
ctx -> dsp . add_hfyu_median_prediction ( dst , dst - stride , val , width , & left , & left_top ) ;
}
else {
dst [ 0 ] = val [ 0 ] ;
for ( j = 1 ;
j < width ;
j ++ ) dst [ j ] = val [ j ] + dst [ j - 1 ] ;
}
dst += stride ;
val += width ;
}
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static uint16_t inv_bits ( uint16_t val , int nbits ) {
uint16_t res ;
if ( nbits <= 8 ) {
res = ff_reverse [ val ] >> ( 8 - nbits ) ;
}
else res = ( ( ff_reverse [ val & 0xFF ] << 8 ) + ( ff_reverse [ val >> 8 ] ) ) >> ( 16 - nbits ) ;
return res ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | SPL_METHOD ( SplFileObject , fseek ) {
spl_filesystem_object * intern = ( spl_filesystem_object * ) zend_object_store_get_object ( getThis ( ) TSRMLS_CC ) ;
long pos , whence = SEEK_SET ;
if ( zend_parse_parameters ( ZEND_NUM_ARGS ( ) TSRMLS_CC , "l|l" , & pos , & whence ) == FAILURE ) {
return ;
}
spl_filesystem_file_free_line ( intern TSRMLS_CC ) ;
RETURN_LONG ( php_stream_seek ( intern -> u . file . stream , pos , whence ) ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int xhci_xfer_create_sgl ( XHCITransfer * xfer , int in_xfer ) {
XHCIState * xhci = xfer -> epctx -> xhci ;
int i ;
xfer -> int_req = false ;
pci_dma_sglist_init ( & xfer -> sgl , PCI_DEVICE ( xhci ) , xfer -> trb_count ) ;
for ( i = 0 ;
i < xfer -> trb_count ;
i ++ ) {
XHCITRB * trb = & xfer -> trbs [ i ] ;
dma_addr_t addr ;
unsigned int chunk = 0 ;
if ( trb -> control & TRB_TR_IOC ) {
xfer -> int_req = true ;
}
switch ( TRB_TYPE ( * trb ) ) {
case TR_DATA : if ( ( ! ( trb -> control & TRB_TR_DIR ) ) != ( ! in_xfer ) ) {
DPRINTF ( "xhci: data direction mismatch for TR_DATA\n" ) ;
goto err ;
}
case TR_NORMAL : case TR_ISOCH : addr = xhci_mask64 ( trb -> parameter ) ;
chunk = trb -> status & 0x1ffff ;
if ( trb -> control & TRB_TR_IDT ) {
if ( chunk > 8 || in_xfer ) {
DPRINTF ( "xhci: invalid immediate data TRB\n" ) ;
goto err ;
}
qemu_sglist_add ( & xfer -> sgl , trb -> addr , chunk ) ;
}
else {
qemu_sglist_add ( & xfer -> sgl , addr , chunk ) ;
}
break ;
}
}
return 0 ;
err : qemu_sglist_destroy ( & xfer -> sgl ) ;
xhci_die ( xhci ) ;
return - 1 ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | void * srtp_get_user_data ( srtp_t ctx ) {
return ctx -> user_data ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static ssize_t qio_channel_websock_readv ( QIOChannel * ioc , const struct iovec * iov , size_t niov , int * * fds , size_t * nfds , Error * * errp ) {
QIOChannelWebsock * wioc = QIO_CHANNEL_WEBSOCK ( ioc ) ;
size_t i ;
ssize_t got = 0 ;
ssize_t ret ;
if ( wioc -> io_err ) {
error_propagate ( errp , error_copy ( wioc -> io_err ) ) ;
return - 1 ;
}
if ( ! wioc -> rawinput . offset ) {
ret = qio_channel_websock_read_wire ( QIO_CHANNEL_WEBSOCK ( ioc ) , errp ) ;
if ( ret < 0 ) {
return ret ;
}
}
for ( i = 0 ;
i < niov ;
i ++ ) {
size_t want = iov [ i ] . iov_len ;
if ( want > ( wioc -> rawinput . offset - got ) ) {
want = ( wioc -> rawinput . offset - got ) ;
}
memcpy ( iov [ i ] . iov_base , wioc -> rawinput . buffer + got , want ) ;
got += want ;
if ( want < iov [ i ] . iov_len ) {
break ;
}
}
buffer_advance ( & wioc -> rawinput , got ) ;
qio_channel_websock_set_watch ( wioc ) ;
return got ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | attr_val * create_attr_sval ( int attr , char * s ) {
attr_val * my_val ;
my_val = emalloc_zero ( sizeof ( * my_val ) ) ;
my_val -> attr = attr ;
if ( NULL == s ) s = estrdup ( "" ) ;
my_val -> value . s = s ;
my_val -> type = T_String ;
return my_val ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void dtap_cc_facility ( tvbuff_t * tvb , proto_tree * tree , packet_info * pinfo _U_ , guint32 offset , guint len ) {
guint32 curr_offset ;
guint32 consumed ;
guint curr_len ;
curr_offset = offset ;
curr_len = len ;
is_uplink = IS_UPLINK_TRUE ;
ELEM_MAND_LV ( GSM_A_PDU_TYPE_DTAP , DE_FACILITY , NULL ) ;
ELEM_OPT_TLV ( 0x7f , GSM_A_PDU_TYPE_DTAP , DE_SS_VER_IND , NULL ) ;
EXTRANEOUS_DATA_CHECK ( curr_len , 0 , pinfo , & ei_gsm_a_dtap_extraneous_data ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | unsigned char * gcry_pk_get_keygrip ( gcry_sexp_t key , unsigned char * array ) {
gcry_sexp_t list = NULL , l2 = NULL ;
gcry_pk_spec_t * pubkey = NULL ;
gcry_module_t module = NULL ;
pk_extra_spec_t * extraspec ;
const char * s ;
char * name = NULL ;
int idx ;
const char * elems ;
gcry_md_hd_t md = NULL ;
int okay = 0 ;
REGISTER_DEFAULT_PUBKEYS ;
list = gcry_sexp_find_token ( key , "public-key" , 0 ) ;
if ( ! list ) list = gcry_sexp_find_token ( key , "private-key" , 0 ) ;
if ( ! list ) list = gcry_sexp_find_token ( key , "protected-private-key" , 0 ) ;
if ( ! list ) list = gcry_sexp_find_token ( key , "shadowed-private-key" , 0 ) ;
if ( ! list ) return NULL ;
l2 = gcry_sexp_cadr ( list ) ;
gcry_sexp_release ( list ) ;
list = l2 ;
l2 = NULL ;
name = _gcry_sexp_nth_string ( list , 0 ) ;
if ( ! name ) goto fail ;
ath_mutex_lock ( & pubkeys_registered_lock ) ;
module = gcry_pk_lookup_name ( name ) ;
ath_mutex_unlock ( & pubkeys_registered_lock ) ;
if ( ! module ) goto fail ;
pubkey = ( gcry_pk_spec_t * ) module -> spec ;
extraspec = module -> extraspec ;
elems = pubkey -> elements_grip ;
if ( ! elems ) goto fail ;
if ( gcry_md_open ( & md , GCRY_MD_SHA1 , 0 ) ) goto fail ;
if ( extraspec && extraspec -> comp_keygrip ) {
if ( extraspec -> comp_keygrip ( md , list ) ) goto fail ;
}
else {
for ( idx = 0 , s = elems ;
* s ;
s ++ , idx ++ ) {
const char * data ;
size_t datalen ;
char buf [ 30 ] ;
l2 = gcry_sexp_find_token ( list , s , 1 ) ;
if ( ! l2 ) goto fail ;
data = gcry_sexp_nth_data ( l2 , 1 , & datalen ) ;
if ( ! data ) goto fail ;
snprintf ( buf , sizeof buf , "(1:%c%u:" , * s , ( unsigned int ) datalen ) ;
gcry_md_write ( md , buf , strlen ( buf ) ) ;
gcry_md_write ( md , data , datalen ) ;
gcry_sexp_release ( l2 ) ;
l2 = NULL ;
gcry_md_write ( md , ")" , 1 ) ;
}
}
if ( ! array ) {
array = gcry_malloc ( 20 ) ;
if ( ! array ) goto fail ;
}
memcpy ( array , gcry_md_read ( md , GCRY_MD_SHA1 ) , 20 ) ;
okay = 1 ;
fail : gcry_free ( name ) ;
gcry_sexp_release ( l2 ) ;
gcry_md_close ( md ) ;
gcry_sexp_release ( list ) ;
return okay ? array : NULL ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int dissect_h245_MaximumHeaderIntervalReq ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_sequence ( tvb , offset , actx , tree , hf_index , ett_h245_MaximumHeaderIntervalReq , MaximumHeaderIntervalReq_sequence ) ;
return offset ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void rv34_pred_4x4_block ( RV34DecContext * r , uint8_t * dst , int stride , int itype , int up , int left , int down , int right ) {
uint8_t * prev = dst - stride + 4 ;
uint32_t topleft ;
if ( ! up && ! left ) itype = DC_128_PRED ;
else if ( ! up ) {
if ( itype == VERT_PRED ) itype = HOR_PRED ;
if ( itype == DC_PRED ) itype = LEFT_DC_PRED ;
}
else if ( ! left ) {
if ( itype == HOR_PRED ) itype = VERT_PRED ;
if ( itype == DC_PRED ) itype = TOP_DC_PRED ;
if ( itype == DIAG_DOWN_LEFT_PRED ) itype = DIAG_DOWN_LEFT_PRED_RV40_NODOWN ;
}
if ( ! down ) {
if ( itype == DIAG_DOWN_LEFT_PRED ) itype = DIAG_DOWN_LEFT_PRED_RV40_NODOWN ;
if ( itype == HOR_UP_PRED ) itype = HOR_UP_PRED_RV40_NODOWN ;
if ( itype == VERT_LEFT_PRED ) itype = VERT_LEFT_PRED_RV40_NODOWN ;
}
if ( ! right && up ) {
topleft = dst [ - stride + 3 ] * 0x01010101u ;
prev = ( uint8_t * ) & topleft ;
}
r -> h . pred4x4 [ itype ] ( dst , prev , stride ) ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | METHOD ( x509_t , get_serial , chunk_t , private_x509_cert_t * this ) {
return this -> serialNumber ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int decode_cabac_field_decoding_flag ( H264Context * h ) {
const long mbb_xy = h -> mb_xy - 2L * h -> mb_stride ;
unsigned long ctx = 0 ;
ctx += h -> mb_field_decoding_flag & ! ! h -> mb_x ;
ctx += ( h -> cur_pic . f . mb_type [ mbb_xy ] >> 7 ) & ( h -> slice_table [ mbb_xy ] == h -> slice_num ) ;
return get_cabac_noinline ( & h -> cabac , & ( h -> cabac_state + 70 ) [ ctx ] ) ;
} | 1True
|
Categorize the following code snippet as vulnerable or not. True or False | static inline void init_interlaced_ref ( MpegEncContext * s , int ref_index ) {
MotionEstContext * const c = & s -> me ;
c -> ref [ 1 + ref_index ] [ 0 ] = c -> ref [ 0 + ref_index ] [ 0 ] + s -> linesize ;
c -> src [ 1 ] [ 0 ] = c -> src [ 0 ] [ 0 ] + s -> linesize ;
if ( c -> flags & FLAG_CHROMA ) {
c -> ref [ 1 + ref_index ] [ 1 ] = c -> ref [ 0 + ref_index ] [ 1 ] + s -> uvlinesize ;
c -> ref [ 1 + ref_index ] [ 2 ] = c -> ref [ 0 + ref_index ] [ 2 ] + s -> uvlinesize ;
c -> src [ 1 ] [ 1 ] = c -> src [ 0 ] [ 1 ] + s -> uvlinesize ;
c -> src [ 1 ] [ 2 ] = c -> src [ 0 ] [ 2 ] + s -> uvlinesize ;
}
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | void vp9_set_speed_features ( VP9_COMP * cpi ) {
SPEED_FEATURES * const sf = & cpi -> sf ;
VP9_COMMON * const cm = & cpi -> common ;
const VP9EncoderConfig * const oxcf = & cpi -> oxcf ;
int i ;
sf -> frame_parameter_update = 1 ;
sf -> mv . search_method = NSTEP ;
sf -> recode_loop = ALLOW_RECODE ;
sf -> mv . subpel_search_method = SUBPEL_TREE ;
sf -> mv . subpel_iters_per_step = 2 ;
sf -> mv . subpel_force_stop = 0 ;
sf -> optimize_coefficients = ! is_lossless_requested ( & cpi -> oxcf ) ;
sf -> mv . reduce_first_step_size = 0 ;
sf -> mv . auto_mv_step_size = 0 ;
sf -> mv . fullpel_search_step_param = 6 ;
sf -> comp_inter_joint_search_thresh = BLOCK_4X4 ;
sf -> adaptive_rd_thresh = 0 ;
sf -> use_lastframe_partitioning = LAST_FRAME_PARTITION_OFF ;
sf -> tx_size_search_method = USE_FULL_RD ;
sf -> use_lp32x32fdct = 0 ;
sf -> adaptive_motion_search = 0 ;
sf -> adaptive_pred_interp_filter = 0 ;
sf -> adaptive_mode_search = 0 ;
sf -> cb_pred_filter_search = 0 ;
sf -> cb_partition_search = 0 ;
sf -> motion_field_mode_search = 0 ;
sf -> alt_ref_search_fp = 0 ;
sf -> use_quant_fp = 0 ;
sf -> reference_masking = 0 ;
sf -> partition_search_type = SEARCH_PARTITION ;
sf -> less_rectangular_check = 0 ;
sf -> use_square_partition_only = 0 ;
sf -> auto_min_max_partition_size = NOT_IN_USE ;
sf -> max_partition_size = BLOCK_64X64 ;
sf -> min_partition_size = BLOCK_4X4 ;
sf -> adjust_partitioning_from_last_frame = 0 ;
sf -> last_partitioning_redo_frequency = 4 ;
sf -> constrain_copy_partition = 0 ;
sf -> disable_split_mask = 0 ;
sf -> mode_search_skip_flags = 0 ;
sf -> force_frame_boost = 0 ;
sf -> max_delta_qindex = 0 ;
sf -> disable_filter_search_var_thresh = 0 ;
sf -> adaptive_interp_filter_search = 0 ;
for ( i = 0 ;
i < TX_SIZES ;
i ++ ) {
sf -> intra_y_mode_mask [ i ] = INTRA_ALL ;
sf -> intra_uv_mode_mask [ i ] = INTRA_ALL ;
}
sf -> use_rd_breakout = 0 ;
sf -> skip_encode_sb = 0 ;
sf -> use_uv_intra_rd_estimate = 0 ;
sf -> allow_skip_recode = 0 ;
sf -> lpf_pick = LPF_PICK_FROM_FULL_IMAGE ;
sf -> use_fast_coef_updates = TWO_LOOP ;
sf -> use_fast_coef_costing = 0 ;
sf -> mode_skip_start = MAX_MODES ;
sf -> schedule_mode_search = 0 ;
sf -> use_nonrd_pick_mode = 0 ;
for ( i = 0 ;
i < BLOCK_SIZES ;
++ i ) sf -> inter_mode_mask [ i ] = INTER_ALL ;
sf -> max_intra_bsize = BLOCK_64X64 ;
sf -> reuse_inter_pred_sby = 0 ;
sf -> always_this_block_size = BLOCK_16X16 ;
sf -> search_type_check_frequency = 50 ;
sf -> encode_breakout_thresh = 0 ;
sf -> elevate_newmv_thresh = 0 ;
sf -> recode_tolerance = 25 ;
sf -> default_interp_filter = SWITCHABLE ;
sf -> tx_size_search_breakout = 0 ;
sf -> partition_search_breakout_dist_thr = 0 ;
sf -> partition_search_breakout_rate_thr = 0 ;
if ( oxcf -> mode == REALTIME ) set_rt_speed_feature ( cpi , sf , oxcf -> speed , oxcf -> content ) ;
else if ( oxcf -> mode == GOOD ) set_good_speed_feature ( cpi , cm , sf , oxcf -> speed ) ;
cpi -> full_search_sad = vp9_full_search_sad ;
cpi -> diamond_search_sad = oxcf -> mode == BEST ? vp9_full_range_search : vp9_diamond_search_sad ;
cpi -> refining_search_sad = vp9_refining_search_sad ;
if ( oxcf -> pass == 1 ) sf -> optimize_coefficients = 0 ;
if ( oxcf -> pass == 0 ) {
sf -> recode_loop = DISALLOW_RECODE ;
sf -> optimize_coefficients = 0 ;
}
if ( sf -> mv . subpel_search_method == SUBPEL_TREE ) {
cpi -> find_fractional_mv_step = vp9_find_best_sub_pixel_tree ;
}
else if ( sf -> mv . subpel_search_method == SUBPEL_TREE_PRUNED ) {
cpi -> find_fractional_mv_step = vp9_find_best_sub_pixel_tree_pruned ;
}
cpi -> mb . optimize = sf -> optimize_coefficients == 1 && oxcf -> pass != 1 ;
if ( sf -> disable_split_mask == DISABLE_ALL_SPLIT ) sf -> adaptive_pred_interp_filter = 0 ;
if ( ! cpi -> oxcf . frame_periodic_boost ) {
sf -> max_delta_qindex = 0 ;
}
if ( cpi -> encode_breakout && oxcf -> mode == REALTIME && sf -> encode_breakout_thresh > cpi -> encode_breakout ) cpi -> encode_breakout = sf -> encode_breakout_thresh ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static int dissect_h245_GenericMessage ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
# line 605 "../../asn1/h245/h245.cnf" void * priv_data = actx -> private_data ;
gef_ctx_t * gefx ;
gefx = gef_ctx_get ( actx -> private_data ) ;
if ( ! gefx ) {
gefx = gef_ctx_alloc ( NULL , "GenericMessage" ) ;
actx -> private_data = gefx ;
}
offset = dissect_per_sequence ( tvb , offset , actx , tree , hf_index , ett_h245_GenericMessage , GenericMessage_sequence ) ;
# line 615 "../../asn1/h245/h245.cnf" actx -> private_data = priv_data ;
return offset ;
} | 0False
|
Categorize the following code snippet as vulnerable or not. True or False | static void dtap_cc_congestion_control ( tvbuff_t * tvb , proto_tree * tree , packet_info * pinfo _U_ , guint32 offset , guint len ) {
guint32 curr_offset ;
guint32 consumed ;
guint curr_len ;
guint8 oct ;
proto_tree * subtree ;
curr_offset = offset ;
curr_len = len ;
is_uplink = IS_UPLINK_FALSE ;
oct = tvb_get_guint8 ( tvb , curr_offset ) ;
proto_tree_add_bits_item ( tree , hf_gsm_a_spare_bits , tvb , curr_offset << 3 , 4 , ENC_BIG_ENDIAN ) ;
subtree = proto_tree_add_subtree ( tree , tvb , curr_offset , 1 , ett_gsm_dtap_elem [ DE_CONGESTION ] , NULL , val_to_str_ext_const ( DE_CONGESTION , & gsm_dtap_elem_strings_ext , "" ) ) ;
proto_tree_add_uint_format_value ( subtree , hf_gsm_a_dtap_congestion_level , tvb , curr_offset , 1 , oct , "%s" , val_to_str_const ( oct & 0xF , gsm_a_dtap_congestion_level_vals , "Reserved" ) ) ;
curr_offset ++ ;
curr_len -- ;
if ( ( signed ) curr_len <= 0 ) return ;
ELEM_OPT_TLV ( 0x08 , GSM_A_PDU_TYPE_DTAP , DE_CAUSE , NULL ) ;
EXTRANEOUS_DATA_CHECK ( curr_len , 0 , pinfo , & ei_gsm_a_dtap_extraneous_data ) ;
} | 0False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.