instruction
stringclasses 1
value | input
stringlengths 31
235k
| output
class label 2
classes |
---|---|---|
Categorize the following code snippet as vulnerable or not. True or False
|
TEST_F ( FullscreenControllerStateUnitTest , TwoFullscreenedTabsOneCaptured ) {
content : : WebContentsDelegate * const wc_delegate = static_cast < content : : WebContentsDelegate * > ( browser ( ) ) ;
ASSERT_TRUE ( wc_delegate -> EmbedsFullscreenWidget ( ) ) ;
AddTab ( browser ( ) , GURL ( url : : kAboutBlankURL ) ) ;
AddTab ( browser ( ) , GURL ( url : : kAboutBlankURL ) ) ;
content : : WebContents * const first_tab = browser ( ) -> tab_strip_model ( ) -> GetWebContentsAt ( 0 ) ;
content : : WebContents * const second_tab = browser ( ) -> tab_strip_model ( ) -> GetWebContentsAt ( 1 ) ;
browser ( ) -> tab_strip_model ( ) -> ActivateTabAt ( 0 , true ) ;
const gfx : : Size kCaptureSize ( 1280 , 720 ) ;
first_tab -> IncrementCapturerCount ( kCaptureSize ) ;
ASSERT_TRUE ( InvokeEvent ( TAB_FULLSCREEN_TRUE ) ) ;
EXPECT_FALSE ( browser ( ) -> window ( ) -> IsFullscreen ( ) ) ;
EXPECT_TRUE ( wc_delegate -> IsFullscreenForTabOrPending ( first_tab ) ) ;
EXPECT_FALSE ( wc_delegate -> IsFullscreenForTabOrPending ( second_tab ) ) ;
EXPECT_FALSE ( GetFullscreenController ( ) -> IsWindowFullscreenForTabOrPending ( ) ) ;
browser ( ) -> tab_strip_model ( ) -> ActivateTabAt ( 1 , true ) ;
ASSERT_TRUE ( InvokeEvent ( TAB_FULLSCREEN_TRUE ) ) ;
ASSERT_TRUE ( InvokeEvent ( WINDOW_CHANGE ) ) ;
EXPECT_TRUE ( browser ( ) -> window ( ) -> IsFullscreen ( ) ) ;
EXPECT_TRUE ( wc_delegate -> IsFullscreenForTabOrPending ( first_tab ) ) ;
EXPECT_TRUE ( wc_delegate -> IsFullscreenForTabOrPending ( second_tab ) ) ;
EXPECT_TRUE ( GetFullscreenController ( ) -> IsWindowFullscreenForTabOrPending ( ) ) ;
ASSERT_TRUE ( InvokeEvent ( TAB_FULLSCREEN_FALSE ) ) ;
ASSERT_TRUE ( InvokeEvent ( WINDOW_CHANGE ) ) ;
EXPECT_FALSE ( browser ( ) -> window ( ) -> IsFullscreen ( ) ) ;
EXPECT_TRUE ( wc_delegate -> IsFullscreenForTabOrPending ( first_tab ) ) ;
EXPECT_FALSE ( wc_delegate -> IsFullscreenForTabOrPending ( second_tab ) ) ;
EXPECT_FALSE ( GetFullscreenController ( ) -> IsWindowFullscreenForTabOrPending ( ) ) ;
browser ( ) -> tab_strip_model ( ) -> ActivateTabAt ( 0 , true ) ;
ASSERT_TRUE ( InvokeEvent ( TAB_FULLSCREEN_FALSE ) ) ;
EXPECT_FALSE ( browser ( ) -> window ( ) -> IsFullscreen ( ) ) ;
EXPECT_FALSE ( wc_delegate -> IsFullscreenForTabOrPending ( first_tab ) ) ;
EXPECT_FALSE ( wc_delegate -> IsFullscreenForTabOrPending ( second_tab ) ) ;
EXPECT_FALSE ( GetFullscreenController ( ) -> IsWindowFullscreenForTabOrPending ( ) ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void jas_image_setbbox ( jas_image_t * image ) {
jas_image_cmpt_t * cmpt ;
int cmptno ;
int_fast32_t x ;
int_fast32_t y ;
if ( image -> numcmpts_ > 0 ) {
cmpt = image -> cmpts_ [ 0 ] ;
image -> tlx_ = cmpt -> tlx_ ;
image -> tly_ = cmpt -> tly_ ;
image -> brx_ = cmpt -> tlx_ + cmpt -> hstep_ * ( cmpt -> width_ - 1 ) + 1 ;
image -> bry_ = cmpt -> tly_ + cmpt -> vstep_ * ( cmpt -> height_ - 1 ) + 1 ;
for ( cmptno = 1 ;
cmptno < image -> numcmpts_ ;
++ cmptno ) {
cmpt = image -> cmpts_ [ cmptno ] ;
if ( image -> tlx_ > cmpt -> tlx_ ) {
image -> tlx_ = cmpt -> tlx_ ;
}
if ( image -> tly_ > cmpt -> tly_ ) {
image -> tly_ = cmpt -> tly_ ;
}
x = cmpt -> tlx_ + cmpt -> hstep_ * ( cmpt -> width_ - 1 ) + 1 ;
if ( image -> brx_ < x ) {
image -> brx_ = x ;
}
y = cmpt -> tly_ + cmpt -> vstep_ * ( cmpt -> height_ - 1 ) + 1 ;
if ( image -> bry_ < y ) {
image -> bry_ = y ;
}
}
}
else {
image -> tlx_ = 0 ;
image -> tly_ = 0 ;
image -> brx_ = 0 ;
image -> bry_ = 0 ;
}
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static ossl_inline STACK_OF ( t1 ) * sk_ ## t1 ## _dup ( const STACK_OF ( t1 ) * sk ) {
return ( STACK_OF ( t1 ) * ) OPENSSL_sk_dup ( ( const OPENSSL_STACK * ) sk ) ;
}
static ossl_inline STACK_OF ( t1 ) * sk_ ## t1 ## _deep_copy ( const STACK_OF ( t1 ) * sk , sk_ ## t1 ## _copyfunc copyfunc , sk_ ## t1 ## _freefunc freefunc ) {
return ( STACK_OF ( t1 ) * ) OPENSSL_sk_deep_copy ( ( const OPENSSL_STACK * ) sk , ( OPENSSL_sk_copyfunc ) copyfunc , ( OPENSSL_sk_freefunc ) freefunc ) ;
}
static ossl_inline sk_ ## t1 ## _compfunc sk_ ## t1 ## _set_cmp_func ( STACK_OF ( t1 ) * sk , sk_ ## t1 ## _compfunc compare ) {
return ( sk_ ## t1 ## _compfunc ) OPENSSL_sk_set_cmp_func ( ( OPENSSL_STACK * ) sk , ( OPENSSL_sk_compfunc ) compare ) ;
}
# define DEFINE_SPECIAL_STACK_OF ( t1 , t2 ) SKM_DEFINE_STACK_OF ( t1 , t2 , t2 ) # define DEFINE_STACK_OF ( t ) SKM_DEFINE_STACK_OF ( t , t , t ) # define DEFINE_SPECIAL_STACK_OF_CONST ( t1 , t2 ) SKM_DEFINE_STACK_OF ( t1 , const t2 , t2 ) # define DEFINE_STACK_OF_CONST ( t ) SKM_DEFINE_STACK_OF ( t , const t , t ) typedef char * OPENSSL_STRING ;
typedef const char * OPENSSL_CSTRING ;
DEFINE_SPECIAL_STACK_OF ( OPENSSL_STRING , char ) DEFINE_SPECIAL_STACK_OF_CONST ( OPENSSL_CSTRING , char )
| 1True
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int dissect_h225_T_connectionParameters ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_sequence ( tvb , offset , actx , tree , hf_index , ett_h225_T_connectionParameters , T_connectionParameters_sequence ) ;
return offset ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void fix_dependencies ( ArchiveHandle * AH ) {
TocEntry * te ;
int i ;
for ( te = AH -> toc -> next ;
te != AH -> toc ;
te = te -> next ) {
te -> depCount = te -> nDeps ;
te -> revDeps = NULL ;
te -> nRevDeps = 0 ;
te -> par_prev = NULL ;
te -> par_next = NULL ;
}
repoint_table_dependencies ( AH ) ;
if ( AH -> version < K_VERS_1_11 ) {
for ( te = AH -> toc -> next ;
te != AH -> toc ;
te = te -> next ) {
if ( strcmp ( te -> desc , "BLOB COMMENTS" ) == 0 && te -> nDeps == 0 ) {
TocEntry * te2 ;
for ( te2 = AH -> toc -> next ;
te2 != AH -> toc ;
te2 = te2 -> next ) {
if ( strcmp ( te2 -> desc , "BLOBS" ) == 0 ) {
te -> dependencies = ( DumpId * ) pg_malloc ( sizeof ( DumpId ) ) ;
te -> dependencies [ 0 ] = te2 -> dumpId ;
te -> nDeps ++ ;
te -> depCount ++ ;
break ;
}
}
break ;
}
}
}
for ( te = AH -> toc -> next ;
te != AH -> toc ;
te = te -> next ) {
for ( i = 0 ;
i < te -> nDeps ;
i ++ ) {
DumpId depid = te -> dependencies [ i ] ;
if ( depid <= AH -> maxDumpId && AH -> tocsByDumpId [ depid ] != NULL ) AH -> tocsByDumpId [ depid ] -> nRevDeps ++ ;
else te -> depCount -- ;
}
}
for ( te = AH -> toc -> next ;
te != AH -> toc ;
te = te -> next ) {
if ( te -> nRevDeps > 0 ) te -> revDeps = ( DumpId * ) pg_malloc ( te -> nRevDeps * sizeof ( DumpId ) ) ;
te -> nRevDeps = 0 ;
}
for ( te = AH -> toc -> next ;
te != AH -> toc ;
te = te -> next ) {
for ( i = 0 ;
i < te -> nDeps ;
i ++ ) {
DumpId depid = te -> dependencies [ i ] ;
if ( depid <= AH -> maxDumpId && AH -> tocsByDumpId [ depid ] != NULL ) {
TocEntry * otherte = AH -> tocsByDumpId [ depid ] ;
otherte -> revDeps [ otherte -> nRevDeps ++ ] = te -> dumpId ;
}
}
}
for ( te = AH -> toc -> next ;
te != AH -> toc ;
te = te -> next ) {
te -> lockDeps = NULL ;
te -> nLockDeps = 0 ;
identify_locking_dependencies ( AH , te ) ;
}
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void check_1_6_dummy ( kadm5_principal_ent_t entry , long mask , int n_ks_tuple , krb5_key_salt_tuple * ks_tuple , char * * passptr ) {
int i ;
char * password = * passptr ;
if ( password == NULL || ! ( mask & KADM5_ATTRIBUTES ) || ! ( entry -> attributes & KRB5_KDB_DISALLOW_ALL_TIX ) ) return ;
for ( i = 0 ;
( unsigned char ) password [ i ] == i + 1 ;
i ++ ) ;
if ( password [ i ] != '\0' || i != 255 ) return ;
* passptr = NULL ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
IN_PROC_BROWSER_TEST_F ( PageLoadMetricsBrowserTest , DocumentWriteAsync ) {
ASSERT_TRUE ( embedded_test_server ( ) -> Start ( ) ) ;
auto waiter = CreatePageLoadMetricsWaiter ( ) ;
waiter -> AddPageExpectation ( TimingField : : FIRST_CONTENTFUL_PAINT ) ;
ui_test_utils : : NavigateToURL ( browser ( ) , embedded_test_server ( ) -> GetURL ( "/page_load_metrics/document_write_async_script.html" ) ) ;
waiter -> Wait ( ) ;
histogram_tester_ . ExpectTotalCount ( internal : : kHistogramFirstContentfulPaint , 1 ) ;
histogram_tester_ . ExpectTotalCount ( internal : : kHistogramDocWriteBlockParseStartToFirstContentfulPaint , 0 ) ;
histogram_tester_ . ExpectTotalCount ( internal : : kHistogramDocWriteBlockCount , 0 ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void s_aes_release ( stream_state * ss ) {
stream_aes_state * const state = ( stream_aes_state * ) ss ;
if ( state -> ctx != NULL ) gs_free_object ( state -> memory , state -> ctx , "aes context structure" ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
void free_last_set ( REP_SETS * sets ) {
sets -> count -- ;
sets -> extra ++ ;
return ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void patch_instruction ( VAPICROMState * s , X86CPU * cpu , target_ulong ip ) {
CPUState * cs = CPU ( cpu ) ;
CPUX86State * env = & cpu -> env ;
VAPICHandlers * handlers ;
uint8_t opcode [ 2 ] ;
uint32_t imm32 ;
target_ulong current_pc = 0 ;
target_ulong current_cs_base = 0 ;
int current_flags = 0 ;
if ( smp_cpus == 1 ) {
handlers = & s -> rom_state . up ;
}
else {
handlers = & s -> rom_state . mp ;
}
if ( ! kvm_enabled ( ) ) {
cpu_restore_state ( env , env -> mem_io_pc ) ;
cpu_get_tb_cpu_state ( env , & current_pc , & current_cs_base , & current_flags ) ;
}
pause_all_vcpus ( ) ;
cpu_memory_rw_debug ( env , ip , opcode , sizeof ( opcode ) , 0 ) ;
switch ( opcode [ 0 ] ) {
case 0x89 : patch_byte ( env , ip , 0x50 + modrm_reg ( opcode [ 1 ] ) ) ;
patch_call ( s , env , ip + 1 , handlers -> set_tpr ) ;
break ;
case 0x8b : patch_byte ( env , ip , 0x90 ) ;
patch_call ( s , env , ip + 1 , handlers -> get_tpr [ modrm_reg ( opcode [ 1 ] ) ] ) ;
break ;
case 0xa1 : patch_call ( s , env , ip , handlers -> get_tpr [ 0 ] ) ;
break ;
case 0xa3 : patch_call ( s , env , ip , handlers -> set_tpr_eax ) ;
break ;
case 0xc7 : patch_byte ( env , ip , 0x68 ) ;
cpu_memory_rw_debug ( env , ip + 6 , ( void * ) & imm32 , sizeof ( imm32 ) , 0 ) ;
cpu_memory_rw_debug ( env , ip + 1 , ( void * ) & imm32 , sizeof ( imm32 ) , 1 ) ;
patch_call ( s , env , ip + 5 , handlers -> set_tpr ) ;
break ;
case 0xff : patch_byte ( env , ip , 0x50 ) ;
patch_call ( s , env , ip + 1 , handlers -> get_tpr_stack ) ;
break ;
default : abort ( ) ;
}
resume_all_vcpus ( ) ;
if ( ! kvm_enabled ( ) ) {
cs -> current_tb = NULL ;
tb_gen_code ( env , current_pc , current_cs_base , current_flags , 1 ) ;
cpu_resume_from_signal ( env , NULL ) ;
}
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
void vp9_lpf_vertical_8_dual_sse2 ( uint8_t * s , int p , const uint8_t * blimit0 , const uint8_t * limit0 , const uint8_t * thresh0 , const uint8_t * blimit1 , const uint8_t * limit1 , const uint8_t * thresh1 ) {
DECLARE_ALIGNED_ARRAY ( 16 , unsigned char , t_dst , 16 * 8 ) ;
unsigned char * src [ 2 ] ;
unsigned char * dst [ 2 ] ;
transpose8x16 ( s - 4 , s - 4 + p * 8 , p , t_dst , 16 ) ;
vp9_lpf_horizontal_8_dual_sse2 ( t_dst + 4 * 16 , 16 , blimit0 , limit0 , thresh0 , blimit1 , limit1 , thresh1 ) ;
src [ 0 ] = t_dst ;
src [ 1 ] = t_dst + 8 ;
dst [ 0 ] = s - 4 ;
dst [ 1 ] = s - 4 + p * 8 ;
transpose ( src , 16 , dst , p , 2 ) ;
}
| 1True
|
Categorize the following code snippet as vulnerable or not. True or False
|
static ASN1_STRING * obj_to_asn1str ( VALUE obj ) {
ASN1_STRING * str ;
StringValue ( obj ) ;
if ( ! ( str = ASN1_STRING_new ( ) ) ) ossl_raise ( eASN1Error , NULL ) ;
ASN1_STRING_set ( str , RSTRING_PTR ( obj ) , RSTRING_LENINT ( obj ) ) ;
return str ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
int doqueryex ( int opcode , associd_t associd , int auth , int qsize , const char * qdata , u_short * rstatus , int * rsize , const char * * rdata , int quiet ) {
int res ;
int done ;
if ( ! havehost ) {
fprintf ( stderr , "***No host open, use `host' command\n" ) ;
return - 1 ;
}
done = 0 ;
sequence ++ ;
again : res = sendrequest ( opcode , associd , auth , qsize , qdata ) ;
if ( res != 0 ) return res ;
res = getresponse ( opcode , associd , rstatus , rsize , rdata , done ) ;
if ( res > 0 ) {
if ( ! done && ( res == ERR_TIMEOUT || res == ERR_INCOMPLETE ) ) {
if ( res == ERR_INCOMPLETE ) {
sequence ++ ;
}
done = 1 ;
goto again ;
}
if ( ! quiet ) show_error_msg ( res , associd ) ;
}
return res ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
int nntp_check_msgid ( struct Context * ctx , const char * msgid ) {
struct NntpData * nntp_data = ctx -> data ;
char buf [ LONG_STRING ] ;
FILE * fp = mutt_file_mkstemp ( ) ;
if ( ! fp ) {
mutt_perror ( "mutt_file_mkstemp() failed!" ) ;
return - 1 ;
}
snprintf ( buf , sizeof ( buf ) , "HEAD %s\r\n" , msgid ) ;
int rc = nntp_fetch_lines ( nntp_data , buf , sizeof ( buf ) , NULL , fetch_tempfile , fp ) ;
if ( rc ) {
mutt_file_fclose ( & fp ) ;
if ( rc < 0 ) return - 1 ;
if ( mutt_str_strncmp ( "430" , buf , 3 ) == 0 ) return 1 ;
mutt_error ( "HEAD: %s" , buf ) ;
return - 1 ;
}
if ( ctx -> msgcount == ctx -> hdrmax ) mx_alloc_memory ( ctx ) ;
struct Header * hdr = ctx -> hdrs [ ctx -> msgcount ] = mutt_header_new ( ) ;
hdr -> data = mutt_mem_calloc ( 1 , sizeof ( struct NntpHeaderData ) ) ;
hdr -> env = mutt_rfc822_read_header ( fp , hdr , 0 , 0 ) ;
mutt_file_fclose ( & fp ) ;
if ( hdr -> env -> xref ) nntp_parse_xref ( ctx , hdr ) ;
else {
snprintf ( buf , sizeof ( buf ) , "STAT %s\r\n" , msgid ) ;
if ( nntp_query ( nntp_data , buf , sizeof ( buf ) ) < 0 ) {
mutt_header_free ( & hdr ) ;
return - 1 ;
}
sscanf ( buf + 4 , ANUM , & NHDR ( hdr ) -> article_num ) ;
}
hdr -> read = false ;
hdr -> old = false ;
hdr -> deleted = false ;
hdr -> changed = true ;
hdr -> received = hdr -> date_sent ;
hdr -> index = ctx -> msgcount ++ ;
mx_update_context ( ctx , 1 ) ;
return 0 ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
void configure_icount ( const char * option ) {
vmstate_register ( NULL , 0 , & vmstate_timers , & timers_state ) ;
if ( ! option ) {
return ;
}
icount_warp_timer = qemu_new_timer_ns ( rt_clock , icount_warp_rt , NULL ) ;
if ( strcmp ( option , "auto" ) != 0 ) {
icount_time_shift = strtol ( option , NULL , 0 ) ;
use_icount = 1 ;
return ;
}
use_icount = 2 ;
icount_time_shift = 3 ;
icount_rt_timer = qemu_new_timer_ms ( rt_clock , icount_adjust_rt , NULL ) ;
qemu_mod_timer ( icount_rt_timer , qemu_get_clock_ms ( rt_clock ) + 1000 ) ;
icount_vm_timer = qemu_new_timer_ns ( vm_clock , icount_adjust_vm , NULL ) ;
qemu_mod_timer ( icount_vm_timer , qemu_get_clock_ns ( vm_clock ) + get_ticks_per_sec ( ) / 10 ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
IN_PROC_BROWSER_TEST_F ( HttpsEngagementPageLoadMetricsBrowserTest , Navigate_Https ) {
StartHttpsServer ( false ) ;
NavigateTwiceInTabAndClose ( https_test_server_ -> GetURL ( "/simple.html" ) , GURL ( chrome : : kChromeUIVersionURL ) ) ;
histogram_tester_ . ExpectTotalCount ( internal : : kHttpEngagementHistogram , 0 ) ;
histogram_tester_ . ExpectTotalCount ( internal : : kHttpsEngagementHistogram , 1 ) ;
FakeUserMetricsUpload ( ) ;
histogram_tester_ . ExpectTotalCount ( internal : : kHttpsEngagementSessionPercentage , 1 ) ;
int32_t ratio_bucket = histogram_tester_ . GetAllSamples ( internal : : kHttpsEngagementSessionPercentage ) [ 0 ] . min ;
EXPECT_EQ ( 100 , ratio_bucket ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int last_msg_cmp ( LAST_MSG_REC * m1 , LAST_MSG_REC * m2 ) {
return m1 -> time < m2 -> time ? 1 : - 1 ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
IN_PROC_BROWSER_TEST_F ( MessageCenterNotificationsTest , QueueWhenCenterVisible ) {
# if defined ( OS_WIN ) && defined ( USE_ASH ) if ( base : : CommandLine : : ForCurrentProcess ( ) -> HasSwitch ( switches : : kAshBrowserTests ) ) return ;
# endif TestAddObserver observer ( message_center ( ) ) ;
TestDelegate * delegate ;
TestDelegate * delegate2 ;
manager ( ) -> Add ( CreateTestNotification ( "n" , & delegate ) , profile ( ) ) ;
const std : : string id_n = manager ( ) -> GetMessageCenterNotificationIdForTest ( "n" , profile ( ) ) ;
message_center ( ) -> SetVisibility ( message_center : : VISIBILITY_MESSAGE_CENTER ) ;
manager ( ) -> Add ( CreateTestNotification ( "n2" , & delegate2 ) , profile ( ) ) ;
const std : : string id_n2 = manager ( ) -> GetMessageCenterNotificationIdForTest ( "n2" , profile ( ) ) ;
EXPECT_EQ ( base : : StringPrintf ( "add-%s_update-%s_update-%s" , id_n . c_str ( ) , id_n . c_str ( ) , id_n . c_str ( ) ) , observer . log ( id_n ) ) ;
message_center ( ) -> SetVisibility ( message_center : : VISIBILITY_TRANSIENT ) ;
EXPECT_EQ ( base : : StringPrintf ( "add-%s" , id_n2 . c_str ( ) ) , observer . log ( id_n2 ) ) ;
delegate -> Release ( ) ;
delegate2 -> Release ( ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void dissect_zcl_pwr_prof_getoverallschedpricersp ( tvbuff_t * tvb , proto_tree * tree , guint * offset ) {
proto_tree_add_item ( tree , hf_zbee_zcl_pwr_prof_currency , tvb , * offset , 2 , ENC_LITTLE_ENDIAN ) ;
* offset += 2 ;
proto_tree_add_item ( tree , hf_zbee_zcl_pwr_prof_price , tvb , * offset , 4 , ENC_LITTLE_ENDIAN ) ;
* offset += 4 ;
proto_tree_add_item ( tree , hf_zbee_zcl_pwr_prof_price_trailing_digit , tvb , * offset , 1 , ENC_NA ) ;
* offset += 1 ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
int send_xmessage_using_uids ( struct proclistlist * pll , char * message ) {
int num_users ;
int lokke ;
int * uids = get_userlist ( pll , & num_users ) ;
for ( lokke = 0 ;
lokke < num_users ;
lokke ++ ) {
char xauthpath [ 5000 ] ;
struct passwd * pass = getpwuid ( uids [ lokke ] ) ;
sprintf ( xauthpath , "%s/.Xauthority" , pass -> pw_dir ) ;
if ( send_xmessage ( xauthpath , message ) == 1 ) {
free ( uids ) ;
return 1 ;
}
}
free ( uids ) ;
return 0 ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void encode_mv_component ( vp9_writer * w , int comp , const nmv_component * mvcomp , int usehp ) {
int offset ;
const int sign = comp < 0 ;
const int mag = sign ? - comp : comp ;
const int mv_class = vp9_get_mv_class ( mag - 1 , & offset ) ;
const int d = offset >> 3 ;
const int fr = ( offset >> 1 ) & 3 ;
const int hp = offset & 1 ;
assert ( comp != 0 ) ;
vp9_write ( w , sign , mvcomp -> sign ) ;
vp9_write_token ( w , vp9_mv_class_tree , mvcomp -> classes , & mv_class_encodings [ mv_class ] ) ;
if ( mv_class == MV_CLASS_0 ) {
vp9_write_token ( w , vp9_mv_class0_tree , mvcomp -> class0 , & mv_class0_encodings [ d ] ) ;
}
else {
int i ;
const int n = mv_class + CLASS0_BITS - 1 ;
for ( i = 0 ;
i < n ;
++ i ) vp9_write ( w , ( d >> i ) & 1 , mvcomp -> bits [ i ] ) ;
}
vp9_write_token ( w , vp9_mv_fp_tree , mv_class == MV_CLASS_0 ? mvcomp -> class0_fp [ d ] : mvcomp -> fp , & mv_fp_encodings [ fr ] ) ;
if ( usehp ) vp9_write ( w , hp , mv_class == MV_CLASS_0 ? mvcomp -> class0_hp : mvcomp -> hp ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static inline key_ref_t __key_update ( key_ref_t key_ref , struct key_preparsed_payload * prep ) {
struct key * key = key_ref_to_ptr ( key_ref ) ;
int ret ;
ret = key_permission ( key_ref , KEY_NEED_WRITE ) ;
if ( ret < 0 ) goto error ;
ret = - EEXIST ;
if ( ! key -> type -> update ) goto error ;
down_write ( & key -> sem ) ;
ret = key -> type -> update ( key , prep ) ;
if ( ret == 0 ) clear_bit ( KEY_FLAG_NEGATIVE , & key -> flags ) ;
up_write ( & key -> sem ) ;
if ( ret < 0 ) goto error ;
out : return key_ref ;
error : key_put ( key ) ;
key_ref = ERR_PTR ( ret ) ;
goto out ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void copy_task_thread_func ( GTask * task , gpointer source_object , gpointer task_data , GCancellable * cancellable ) {
CopyMoveJob * job ;
CommonJob * common ;
SourceInfo source_info ;
TransferInfo transfer_info ;
char * dest_fs_id ;
GFile * dest ;
job = task_data ;
common = & job -> common ;
dest_fs_id = NULL ;
nautilus_progress_info_start ( job -> common . progress ) ;
scan_sources ( job -> files , & source_info , common , OP_KIND_COPY ) ;
if ( job_aborted ( common ) ) {
goto aborted ;
}
if ( job -> destination ) {
dest = g_object_ref ( job -> destination ) ;
}
else {
dest = g_file_get_parent ( job -> files -> data ) ;
}
verify_destination ( & job -> common , dest , & dest_fs_id , source_info . num_bytes ) ;
g_object_unref ( dest ) ;
if ( job_aborted ( common ) ) {
goto aborted ;
}
g_timer_start ( job -> common . time ) ;
memset ( & transfer_info , 0 , sizeof ( transfer_info ) ) ;
copy_files ( job , dest_fs_id , & source_info , & transfer_info ) ;
aborted : g_free ( dest_fs_id ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
TEST_F ( BrowsingDataRemoverImplTest , EarlyShutdown ) {
BrowsingDataRemoverImpl * remover = static_cast < BrowsingDataRemoverImpl * > ( BrowsingDataRemoverFactory : : GetForBrowserContext ( GetBrowserContext ( ) ) ) ;
InspectableCompletionObserver completion_observer ( remover ) ;
BrowsingDataRemoverCompletionInhibitor completion_inhibitor ;
remover -> RemoveAndReply ( base : : Time ( ) , base : : Time : : Max ( ) , BrowsingDataRemover : : REMOVE_HISTORY , BrowsingDataHelper : : UNPROTECTED_WEB , & completion_observer ) ;
completion_inhibitor . BlockUntilNearCompletion ( ) ;
EXPECT_TRUE ( remover -> is_removing ( ) ) ;
EXPECT_FALSE ( completion_observer . called ( ) ) ;
DestroyBrowserContext ( ) ;
EXPECT_TRUE ( completion_observer . called ( ) ) ;
completion_inhibitor . ContinueToCompletion ( ) ;
completion_observer . BlockUntilCompletion ( ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
int get_vp9_frame_buffer ( void * cb_priv , size_t min_size , vpx_codec_frame_buffer_t * fb ) {
int i ;
struct ExternalFrameBufferList * const ext_fb_list = ( struct ExternalFrameBufferList * ) cb_priv ;
if ( ext_fb_list == NULL ) return - 1 ;
for ( i = 0 ;
i < ext_fb_list -> num_external_frame_buffers ;
++ i ) {
if ( ! ext_fb_list -> ext_fb [ i ] . in_use ) break ;
}
if ( i == ext_fb_list -> num_external_frame_buffers ) return - 1 ;
if ( ext_fb_list -> ext_fb [ i ] . size < min_size ) {
free ( ext_fb_list -> ext_fb [ i ] . data ) ;
ext_fb_list -> ext_fb [ i ] . data = ( uint8_t * ) malloc ( min_size ) ;
if ( ! ext_fb_list -> ext_fb [ i ] . data ) return - 1 ;
ext_fb_list -> ext_fb [ i ] . size = min_size ;
}
fb -> data = ext_fb_list -> ext_fb [ i ] . data ;
fb -> size = ext_fb_list -> ext_fb [ i ] . size ;
ext_fb_list -> ext_fb [ i ] . in_use = 1 ;
fb -> priv = & ext_fb_list -> ext_fb [ i ] ;
return 0 ;
}
| 1True
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void kvm_getput_reg ( __u64 * kvm_reg , target_ulong * qemu_reg , int set ) {
if ( set ) {
* kvm_reg = * qemu_reg ;
}
else {
* qemu_reg = * kvm_reg ;
}
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
TEST_F ( ProfileInfoCacheTest , DeleteProfile ) {
EXPECT_EQ ( 0u , GetCache ( ) -> GetNumberOfProfiles ( ) ) ;
base : : FilePath path_1 = GetProfilePath ( "path_1" ) ;
GetCache ( ) -> AddProfileToCache ( path_1 , ASCIIToUTF16 ( "name_1" ) , base : : string16 ( ) , 0 , std : : string ( ) ) ;
EXPECT_EQ ( 1u , GetCache ( ) -> GetNumberOfProfiles ( ) ) ;
base : : FilePath path_2 = GetProfilePath ( "path_2" ) ;
base : : string16 name_2 = ASCIIToUTF16 ( "name_2" ) ;
GetCache ( ) -> AddProfileToCache ( path_2 , name_2 , base : : string16 ( ) , 0 , std : : string ( ) ) ;
EXPECT_EQ ( 2u , GetCache ( ) -> GetNumberOfProfiles ( ) ) ;
GetCache ( ) -> DeleteProfileFromCache ( path_1 ) ;
EXPECT_EQ ( 1u , GetCache ( ) -> GetNumberOfProfiles ( ) ) ;
EXPECT_EQ ( name_2 , GetCache ( ) -> GetNameOfProfileAtIndex ( 0 ) ) ;
GetCache ( ) -> DeleteProfileFromCache ( path_2 ) ;
EXPECT_EQ ( 0u , GetCache ( ) -> GetNumberOfProfiles ( ) ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
int PEM_write_ ## name ( FILE * fp , type * x ) ;
# define DECLARE_PEM_write_fp_const ( name , type ) int PEM_write_ ## name ( FILE * fp , const type * x ) ;
# define DECLARE_PEM_write_cb_fp ( name , type ) int PEM_write_ ## name ( FILE * fp , type * x , const EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * cb , void * u ) ;
# endif # define DECLARE_PEM_read_bio ( name , type ) type * PEM_read_bio_ ## name ( BIO * bp , type * * x , pem_password_cb * cb , void * u ) ;
# define DECLARE_PEM_write_bio ( name , type ) int PEM_write_bio_ ## name ( BIO * bp , type * x ) ;
# define DECLARE_PEM_write_bio_const ( name , type ) int PEM_write_bio_ ## name ( BIO * bp , const type * x ) ;
# define DECLARE_PEM_write_cb_bio ( name , type ) int PEM_write_bio_ ## name ( BIO * bp , type * x , const EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * cb , void * u ) ;
# define DECLARE_PEM_write ( name , type ) DECLARE_PEM_write_bio ( name , type ) DECLARE_PEM_write_fp ( name , type ) # define DECLARE_PEM_write_const ( name , type ) DECLARE_PEM_write_bio_const ( name , type ) DECLARE_PEM_write_fp_const ( name , type ) # define DECLARE_PEM_write_cb ( name , type ) DECLARE_PEM_write_cb_bio ( name , type ) DECLARE_PEM_write_cb_fp ( name , type ) # define DECLARE_PEM_read ( name , type ) DECLARE_PEM_read_bio ( name , type ) DECLARE_PEM_read_fp ( name , type ) # define DECLARE_PEM_rw ( name , type ) DECLARE_PEM_read ( name , type ) DECLARE_PEM_write ( name , type ) # define DECLARE_PEM_rw_const ( name , type ) DECLARE_PEM_read ( name , type ) DECLARE_PEM_write_const ( name , type ) # define DECLARE_PEM_rw_cb ( name , type ) DECLARE_PEM_read ( name , type ) DECLARE_PEM_write_cb ( name , type ) typedef int pem_password_cb ( char * buf , int size , int rwflag , void * userdata ) ;
int PEM_get_EVP_CIPHER_INFO ( char * header , EVP_CIPHER_INFO * cipher ) ;
int PEM_do_header ( EVP_CIPHER_INFO * cipher , unsigned char * data , long * len , pem_password_cb * callback , void * u ) ;
int PEM_read_bio ( BIO * bp , char * * name , char * * header , unsigned char * * data , long * len ) ;
# define PEM_FLAG_SECURE 0x1 # define PEM_FLAG_EAY_COMPATIBLE 0x2 # define PEM_FLAG_ONLY_B64 0x4 int PEM_read_bio_ex ( BIO * bp , char * * name , char * * header , unsigned char * * data , long * len , unsigned int flags ) ;
int PEM_bytes_read_bio_secmem ( unsigned char * * pdata , long * plen , char * * pnm , const char * name , BIO * bp , pem_password_cb * cb , void * u ) ;
int PEM_write_bio ( BIO * bp , const char * name , const char * hdr , const unsigned char * data , long len ) ;
int PEM_bytes_read_bio ( unsigned char * * pdata , long * plen , char * * pnm , const char * name , BIO * bp , pem_password_cb * cb , void * u ) ;
void * PEM_ASN1_read_bio ( d2i_of_void * d2i , const char * name , BIO * bp , void * * x , pem_password_cb * cb , void * u ) ;
int PEM_ASN1_write_bio ( i2d_of_void * i2d , const char * name , BIO * bp , void * x , const EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * cb , void * u ) ;
STACK_OF ( X509_INFO ) * PEM_X509_INFO_read_bio ( BIO * bp , STACK_OF ( X509_INFO ) * sk , pem_password_cb * cb , void * u ) ;
int PEM_X509_INFO_write_bio ( BIO * bp , X509_INFO * xi , EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * cd , void * u ) ;
# ifndef OPENSSL_NO_STDIO int PEM_read ( FILE * fp , char * * name , char * * header , unsigned char * * data , long * len ) ;
int PEM_write ( FILE * fp , const char * name , const char * hdr , const unsigned char * data , long len ) ;
void * PEM_ASN1_read ( d2i_of_void * d2i , const char * name , FILE * fp , void * * x , pem_password_cb * cb , void * u ) ;
int PEM_ASN1_write ( i2d_of_void * i2d , const char * name , FILE * fp , void * x , const EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * callback , void * u ) ;
STACK_OF ( X509_INFO ) * PEM_X509_INFO_read ( FILE * fp , STACK_OF ( X509_INFO ) * sk , pem_password_cb * cb , void * u ) ;
# endif int PEM_SignInit ( EVP_MD_CTX * ctx , EVP_MD * type ) ;
int PEM_SignUpdate ( EVP_MD_CTX * ctx , unsigned char * d , unsigned int cnt ) ;
int PEM_SignFinal ( EVP_MD_CTX * ctx , unsigned char * sigret , unsigned int * siglen , EVP_PKEY * pkey ) ;
int PEM_def_callback ( char * buf , int num , int rwflag , void * userdata ) ;
void PEM_proc_type ( char * buf , int type ) ;
void PEM_dek_info ( char * buf , const char * type , int len , char * str ) ;
# include < openssl / symhacks . h > DECLARE_PEM_rw ( X509 , X509 ) DECLARE_PEM_rw ( X509_AUX , X509 )
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int dissect_h245_RequestChannelCloseRejectCause ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_choice ( tvb , offset , actx , tree , hf_index , ett_h245_RequestChannelCloseRejectCause , RequestChannelCloseRejectCause_choice , NULL ) ;
return offset ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
TEST_F ( NativeBackendLibsecretTest , RemoveLoginsSyncedBetween ) {
CheckRemoveLoginsBetween ( SYNCED ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void prc_destroy ( jpc_enc_prc_t * prc ) {
jpc_enc_cblk_t * cblk ;
uint_fast32_t cblkno ;
if ( prc -> cblks ) {
for ( cblkno = 0 , cblk = prc -> cblks ;
cblkno < prc -> numcblks ;
++ cblkno , ++ cblk ) {
cblk_destroy ( cblk ) ;
}
jas_free ( prc -> cblks ) ;
}
if ( prc -> incltree ) {
jpc_tagtree_destroy ( prc -> incltree ) ;
}
if ( prc -> nlibtree ) {
jpc_tagtree_destroy ( prc -> nlibtree ) ;
}
if ( prc -> savincltree ) {
jpc_tagtree_destroy ( prc -> savincltree ) ;
}
if ( prc -> savnlibtree ) {
jpc_tagtree_destroy ( prc -> savnlibtree ) ;
}
}
| 1True
|
Categorize the following code snippet as vulnerable or not. True or False
|
tvbuff_t * ptvcursor_tvbuff ( ptvcursor_t * ptvc ) {
return ptvc -> tvb ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
clump_t * clump_splay_walk_bwd_init ( clump_splay_walker * sw , const gs_ref_memory_t * mem ) {
clump_t * cp = mem -> root ;
if ( cp ) {
SANITY_CHECK ( cp ) ;
sw -> from = SPLAY_FROM_RIGHT ;
while ( cp -> right ) {
cp = cp -> right ;
}
}
sw -> cp = cp ;
sw -> end = NULL ;
return cp ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
IN_PROC_BROWSER_TEST_F ( PageLoadMetricsBrowserTest , UseCounterFeaturesMixedContent ) {
net : : EmbeddedTestServer https_server ( net : : EmbeddedTestServer : : TYPE_HTTPS ) ;
https_server . AddDefaultHandlers ( base : : FilePath ( FILE_PATH_LITERAL ( "chrome/test/data" ) ) ) ;
ASSERT_TRUE ( https_server . Start ( ) ) ;
auto waiter = CreatePageLoadMetricsWaiter ( ) ;
waiter -> AddPageExpectation ( TimingField : : LOAD_EVENT ) ;
ui_test_utils : : NavigateToURL ( browser ( ) , https_server . GetURL ( "/page_load_metrics/use_counter_features.html" ) ) ;
waiter -> Wait ( ) ;
NavigateToUntrackedUrl ( ) ;
histogram_tester_ . ExpectBucketCount ( internal : : kFeaturesHistogramName , static_cast < int32_t > ( WebFeature : : kMixedContentAudio ) , 1 ) ;
histogram_tester_ . ExpectBucketCount ( internal : : kFeaturesHistogramName , static_cast < int32_t > ( WebFeature : : kMixedContentImage ) , 1 ) ;
histogram_tester_ . ExpectBucketCount ( internal : : kFeaturesHistogramName , static_cast < int32_t > ( WebFeature : : kMixedContentVideo ) , 1 ) ;
histogram_tester_ . ExpectBucketCount ( internal : : kFeaturesHistogramName , static_cast < int32_t > ( WebFeature : : kPageVisits ) , 1 ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
TEST_F ( ProtocolHandlerRegistryTest , TestIsSameOrigin ) {
ProtocolHandler ph1 = CreateProtocolHandler ( "mailto" , GURL ( "http://test.com/%s" ) ) ;
ProtocolHandler ph2 = CreateProtocolHandler ( "mailto" , GURL ( "http://test.com/updated-url/%s" ) ) ;
ProtocolHandler ph3 = CreateProtocolHandler ( "mailto" , GURL ( "http://other.com/%s" ) ) ;
ASSERT_EQ ( ph1 . url ( ) . GetOrigin ( ) == ph2 . url ( ) . GetOrigin ( ) , ph1 . IsSameOrigin ( ph2 ) ) ;
ASSERT_EQ ( ph1 . url ( ) . GetOrigin ( ) == ph2 . url ( ) . GetOrigin ( ) , ph2 . IsSameOrigin ( ph1 ) ) ;
ASSERT_EQ ( ph2 . url ( ) . GetOrigin ( ) == ph3 . url ( ) . GetOrigin ( ) , ph2 . IsSameOrigin ( ph3 ) ) ;
ASSERT_EQ ( ph3 . url ( ) . GetOrigin ( ) == ph2 . url ( ) . GetOrigin ( ) , ph3 . IsSameOrigin ( ph2 ) ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
int dtls1_retransmit_buffered_messages ( SSL * s ) {
pqueue * sent = s -> d1 -> sent_messages ;
piterator iter ;
pitem * item ;
hm_fragment * frag ;
int found = 0 ;
iter = pqueue_iterator ( sent ) ;
for ( item = pqueue_next ( & iter ) ;
item != NULL ;
item = pqueue_next ( & iter ) ) {
frag = ( hm_fragment * ) item -> data ;
if ( dtls1_retransmit_message ( s , ( unsigned short ) dtls1_get_queue_priority ( frag -> msg_header . seq , frag -> msg_header . is_ccs ) , & found ) <= 0 ) return - 1 ;
}
return 1 ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
void vp9_iht8x8_64_add_c ( const tran_low_t * input , uint8_t * dest , int stride , int tx_type ) {
int i , j ;
tran_low_t out [ 8 * 8 ] ;
tran_low_t * outptr = out ;
tran_low_t temp_in [ 8 ] , temp_out [ 8 ] ;
const transform_2d ht = IHT_8 [ tx_type ] ;
for ( i = 0 ;
i < 8 ;
++ i ) {
ht . rows ( input , outptr ) ;
input += 8 ;
outptr += 8 ;
}
for ( i = 0 ;
i < 8 ;
++ i ) {
for ( j = 0 ;
j < 8 ;
++ j ) temp_in [ j ] = out [ j * 8 + i ] ;
ht . cols ( temp_in , temp_out ) ;
for ( j = 0 ;
j < 8 ;
++ j ) dest [ j * stride + i ] = clip_pixel ( ROUND_POWER_OF_TWO ( temp_out [ j ] , 5 ) + dest [ j * stride + i ] ) ;
}
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
void proto_item_append_text ( proto_item * pi , const char * format , ... ) {
field_info * fi = NULL ;
size_t curlen ;
va_list ap ;
TRY_TO_FAKE_THIS_REPR_VOID ( pi ) ;
fi = PITEM_FINFO ( pi ) ;
if ( fi == NULL ) {
return ;
}
if ( ! PROTO_ITEM_IS_HIDDEN ( pi ) ) {
if ( fi -> rep == NULL ) {
ITEM_LABEL_NEW ( PNODE_POOL ( pi ) , fi -> rep ) ;
proto_item_fill_label ( fi , fi -> rep -> representation ) ;
}
curlen = strlen ( fi -> rep -> representation ) ;
if ( ITEM_LABEL_LENGTH > curlen ) {
va_start ( ap , format ) ;
g_vsnprintf ( fi -> rep -> representation + curlen , ITEM_LABEL_LENGTH - ( gulong ) curlen , format , ap ) ;
va_end ( ap ) ;
}
}
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int decode_frame ( AVCodecContext * avctx , void * data , int * got_frame , AVPacket * avpkt ) {
const uint8_t * buf = avpkt -> data ;
int buf_size = avpkt -> size ;
NuvContext * c = avctx -> priv_data ;
AVFrame * picture = data ;
int orig_size = buf_size ;
int keyframe ;
int result , init_frame = ! avctx -> frame_number ;
enum {
NUV_UNCOMPRESSED = '0' , NUV_RTJPEG = '1' , NUV_RTJPEG_IN_LZO = '2' , NUV_LZO = '3' , NUV_BLACK = 'N' , NUV_COPY_LAST = 'L' }
comptype ;
if ( buf_size < 12 ) {
av_log ( avctx , AV_LOG_ERROR , "coded frame too small\n" ) ;
return AVERROR_INVALIDDATA ;
}
if ( buf [ 0 ] == 'D' && buf [ 1 ] == 'R' ) {
int ret ;
buf = & buf [ 12 ] ;
buf_size -= 12 ;
ret = get_quant ( avctx , c , buf , buf_size ) ;
if ( ret < 0 ) return ret ;
ff_rtjpeg_decode_init ( & c -> rtj , & c -> dsp , c -> width , c -> height , c -> lq , c -> cq ) ;
return orig_size ;
}
if ( buf [ 0 ] != 'V' || buf_size < 12 ) {
av_log ( avctx , AV_LOG_ERROR , "not a nuv video frame\n" ) ;
return AVERROR_INVALIDDATA ;
}
comptype = buf [ 1 ] ;
switch ( comptype ) {
case NUV_RTJPEG_IN_LZO : case NUV_RTJPEG : keyframe = ! buf [ 2 ] ;
break ;
case NUV_COPY_LAST : keyframe = 0 ;
break ;
default : keyframe = 1 ;
break ;
}
buf = & buf [ 12 ] ;
buf_size -= 12 ;
if ( comptype == NUV_RTJPEG_IN_LZO || comptype == NUV_LZO ) {
int outlen = c -> decomp_size , inlen = buf_size ;
if ( av_lzo1x_decode ( c -> decomp_buf , & outlen , buf , & inlen ) ) av_log ( avctx , AV_LOG_ERROR , "error during lzo decompression\n" ) ;
buf = c -> decomp_buf ;
buf_size = c -> decomp_size ;
}
if ( c -> codec_frameheader ) {
int w , h , q ;
if ( buf_size < RTJPEG_HEADER_SIZE || buf [ 4 ] != RTJPEG_HEADER_SIZE || buf [ 5 ] != RTJPEG_FILE_VERSION ) {
av_log ( avctx , AV_LOG_ERROR , "invalid nuv video frame\n" ) ;
return AVERROR_INVALIDDATA ;
}
w = AV_RL16 ( & buf [ 6 ] ) ;
h = AV_RL16 ( & buf [ 8 ] ) ;
q = buf [ 10 ] ;
if ( ( result = codec_reinit ( avctx , w , h , q ) ) < 0 ) return result ;
buf = & buf [ RTJPEG_HEADER_SIZE ] ;
buf_size -= RTJPEG_HEADER_SIZE ;
}
if ( keyframe && c -> pic . data [ 0 ] ) {
avctx -> release_buffer ( avctx , & c -> pic ) ;
init_frame = 1 ;
}
c -> pic . reference = 3 ;
c -> pic . buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE ;
result = avctx -> reget_buffer ( avctx , & c -> pic ) ;
if ( result < 0 ) {
av_log ( avctx , AV_LOG_ERROR , "get_buffer() failed\n" ) ;
return result ;
}
if ( init_frame ) {
memset ( c -> pic . data [ 0 ] , 0 , avctx -> height * c -> pic . linesize [ 0 ] ) ;
memset ( c -> pic . data [ 1 ] , 0x80 , avctx -> height * c -> pic . linesize [ 1 ] / 2 ) ;
memset ( c -> pic . data [ 2 ] , 0x80 , avctx -> height * c -> pic . linesize [ 2 ] / 2 ) ;
}
c -> pic . pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P ;
c -> pic . key_frame = keyframe ;
switch ( comptype ) {
case NUV_LZO : case NUV_UNCOMPRESSED : {
int height = c -> height ;
if ( buf_size < c -> width * height * 3 / 2 ) {
av_log ( avctx , AV_LOG_ERROR , "uncompressed frame too short\n" ) ;
height = buf_size / c -> width / 3 * 2 ;
}
copy_frame ( & c -> pic , buf , c -> width , height ) ;
break ;
}
case NUV_RTJPEG_IN_LZO : case NUV_RTJPEG : ff_rtjpeg_decode_frame_yuv420 ( & c -> rtj , & c -> pic , buf , buf_size ) ;
break ;
case NUV_BLACK : memset ( c -> pic . data [ 0 ] , 0 , c -> width * c -> height ) ;
memset ( c -> pic . data [ 1 ] , 128 , c -> width * c -> height / 4 ) ;
memset ( c -> pic . data [ 2 ] , 128 , c -> width * c -> height / 4 ) ;
break ;
case NUV_COPY_LAST : break ;
default : av_log ( avctx , AV_LOG_ERROR , "unknown compression\n" ) ;
return AVERROR_INVALIDDATA ;
}
* picture = c -> pic ;
* got_frame = 1 ;
return orig_size ;
}
| 1True
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int addXMLCommand ( XMLRPCCmd * xml ) {
if ( XMLRPCCMD == NULL ) XMLRPCCMD = mowgli_patricia_create ( strcasecanon ) ;
mowgli_patricia_add ( XMLRPCCMD , xml -> name , xml ) ;
return XMLRPC_ERR_OK ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
int ff_rv34_get_start_offset ( GetBitContext * gb , int mb_size ) {
int i ;
for ( i = 0 ;
i < 5 ;
i ++ ) if ( rv34_mb_max_sizes [ i ] >= mb_size - 1 ) break ;
return rv34_mb_bits_sizes [ i ] ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
void var_query_set ( VAR * var , const char * query , const char * * query_end ) {
char * end = ( char * ) ( ( query_end && * query_end ) ? * query_end : query + strlen ( query ) ) ;
MYSQL_RES * res ;
MYSQL_ROW row ;
MYSQL * mysql = cur_con -> mysql ;
DYNAMIC_STRING ds_query ;
DBUG_ENTER ( "var_query_set" ) ;
LINT_INIT ( res ) ;
if ( ! mysql ) {
struct st_command command ;
memset ( & command , 0 , sizeof ( command ) ) ;
command . query = ( char * ) query ;
command . first_word_len = ( * query_end - query ) ;
command . first_argument = command . query + command . first_word_len ;
command . end = ( char * ) * query_end ;
command . abort_on_error = 1 ;
handle_no_active_connection ( & command , cur_con , & ds_res ) ;
DBUG_VOID_RETURN ;
}
while ( end > query && * end != '`' ) {
if ( * end && ( * end != ' ' && * end != '\t' && * end != '\n' && * end != ')' ) ) die ( "Spurious text after `query` expression" ) ;
-- end ;
}
if ( query == end ) die ( "Syntax error in query, missing '`'" ) ;
++ query ;
init_dynamic_string ( & ds_query , 0 , ( end - query ) + 32 , 256 ) ;
do_eval ( & ds_query , query , end , FALSE ) ;
if ( mysql_real_query ( mysql , ds_query . str , ds_query . length ) ) {
handle_error ( curr_command , mysql_errno ( mysql ) , mysql_error ( mysql ) , mysql_sqlstate ( mysql ) , & ds_res ) ;
dynstr_free ( & ds_query ) ;
eval_expr ( var , "" , 0 ) ;
DBUG_VOID_RETURN ;
}
if ( ! ( res = mysql_store_result ( mysql ) ) ) {
report_or_die ( "Query '%s' didn't return a result set" , ds_query . str ) ;
dynstr_free ( & ds_query ) ;
eval_expr ( var , "" , 0 ) ;
return ;
}
dynstr_free ( & ds_query ) ;
if ( ( row = mysql_fetch_row ( res ) ) && row [ 0 ] ) {
DYNAMIC_STRING result ;
uint i ;
ulong * lengths ;
init_dynamic_string ( & result , "" , 512 , 512 ) ;
lengths = mysql_fetch_lengths ( res ) ;
for ( i = 0 ;
i < mysql_num_fields ( res ) ;
i ++ ) {
if ( row [ i ] ) {
char * val = row [ i ] ;
int len = lengths [ i ] ;
if ( glob_replace_regex ) {
if ( ! multi_reg_replace ( glob_replace_regex , ( char * ) val ) ) {
val = glob_replace_regex -> buf ;
len = strlen ( val ) ;
}
}
if ( glob_replace ) replace_strings_append ( glob_replace , & result , val , len ) ;
else dynstr_append_mem ( & result , val , len ) ;
}
dynstr_append_mem ( & result , "\t" , 1 ) ;
}
end = result . str + result . length - 1 ;
eval_expr ( var , result . str , ( const char * * ) & end , false , false ) ;
dynstr_free ( & result ) ;
}
else eval_expr ( var , "" , 0 ) ;
mysql_free_result ( res ) ;
DBUG_VOID_RETURN ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
proto_tree * proto_item_get_subtree ( proto_item * pi ) {
field_info * fi ;
if ( ! pi ) return NULL ;
fi = PITEM_FINFO ( pi ) ;
if ( ( ! fi ) || ( fi -> tree_type == - 1 ) ) return NULL ;
return ( proto_tree * ) pi ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int dissect_h225_OCTET_STRING_SIZE_1_256 ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_octet_string ( tvb , offset , actx , tree , hf_index , 1 , 256 , FALSE , NULL ) ;
return offset ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
IN_PROC_BROWSER_TEST_F ( FramebustBlockBrowserTest , SimpleFramebust_Blocked ) {
ui_test_utils : : NavigateToURL ( browser ( ) , embedded_test_server ( ) -> GetURL ( "/iframe.html" ) ) ;
GURL child_url = embedded_test_server ( ) -> GetURL ( "a.com" , "/title1.html" ) ;
NavigateIframeToUrlWithoutGesture ( GetWebContents ( ) , "test" , child_url ) ;
content : : RenderFrameHost * child = content : : ChildFrameAt ( GetWebContents ( ) -> GetMainFrame ( ) , 0 ) ;
EXPECT_EQ ( child_url , child -> GetLastCommittedURL ( ) ) ;
GURL redirect_url = embedded_test_server ( ) -> GetURL ( "b.com" , "/title1.html" ) ;
base : : RunLoop block_waiter ;
blocked_url_added_closure_ = block_waiter . QuitClosure ( ) ;
child -> ExecuteJavaScriptForTests ( base : : ASCIIToUTF16 ( base : : StringPrintf ( "window.top.location = '%s';
" , redirect_url . spec ( ) . c_str ( ) ) ) ) ;
block_waiter . Run ( ) ;
EXPECT_TRUE ( base : : ContainsValue ( GetFramebustTabHelper ( ) -> blocked_urls ( ) , redirect_url ) ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void std_conv_pixmap ( fz_context * ctx , fz_pixmap * dst , fz_pixmap * src , fz_colorspace * prf , const fz_default_colorspaces * default_cs , const fz_color_params * color_params , int copy_spots ) {
float srcv [ FZ_MAX_COLORS ] ;
float dstv [ FZ_MAX_COLORS ] ;
int srcn , dstn ;
int k , i ;
size_t w = src -> w ;
int h = src -> h ;
ptrdiff_t d_line_inc = dst -> stride - w * dst -> n ;
ptrdiff_t s_line_inc = src -> stride - w * src -> n ;
int da = dst -> alpha ;
int sa = src -> alpha ;
fz_colorspace * ss = src -> colorspace ;
fz_colorspace * ds = dst -> colorspace ;
unsigned char * s = src -> samples ;
unsigned char * d = dst -> samples ;
if ( ( int ) w < 0 || h < 0 ) return ;
if ( color_params == NULL ) color_params = fz_default_color_params ( ctx ) ;
srcn = ss -> n ;
dstn = ds -> n ;
assert ( src -> w == dst -> w && src -> h == dst -> h ) ;
assert ( src -> n == srcn + sa ) ;
assert ( dst -> n == dstn + da ) ;
if ( d_line_inc == 0 && s_line_inc == 0 ) {
w *= h ;
h = 1 ;
}
if ( ( fz_colorspace_is_lab ( ctx , ss ) || fz_colorspace_is_lab_icc ( ctx , ss ) ) && srcn == 3 ) {
fz_color_converter cc ;
fz_find_color_converter ( ctx , & cc , NULL , ds , ss , color_params ) ;
while ( h -- ) {
size_t ww = w ;
while ( ww -- ) {
srcv [ 0 ] = * s ++ / 255.0f * 100 ;
srcv [ 1 ] = * s ++ - 128 ;
srcv [ 2 ] = * s ++ - 128 ;
cc . convert ( ctx , & cc , dstv , srcv ) ;
for ( k = 0 ;
k < dstn ;
k ++ ) * d ++ = dstv [ k ] * 255 ;
if ( da ) * d ++ = ( sa ? * s : 255 ) ;
s += sa ;
}
d += d_line_inc ;
s += s_line_inc ;
}
fz_drop_color_converter ( ctx , & cc ) ;
}
else if ( w * h < 256 ) {
fz_color_converter cc ;
fz_find_color_converter ( ctx , & cc , NULL , ds , ss , color_params ) ;
while ( h -- ) {
size_t ww = w ;
while ( ww -- ) {
for ( k = 0 ;
k < srcn ;
k ++ ) srcv [ k ] = * s ++ / 255.0f ;
cc . convert ( ctx , & cc , dstv , srcv ) ;
for ( k = 0 ;
k < dstn ;
k ++ ) * d ++ = dstv [ k ] * 255 ;
if ( da ) * d ++ = ( sa ? * s : 255 ) ;
s += sa ;
}
d += d_line_inc ;
s += s_line_inc ;
}
fz_drop_color_converter ( ctx , & cc ) ;
}
else if ( srcn == 1 ) {
unsigned char lookup [ FZ_MAX_COLORS * 256 ] ;
fz_color_converter cc ;
fz_find_color_converter ( ctx , & cc , NULL , ds , ss , color_params ) ;
for ( i = 0 ;
i < 256 ;
i ++ ) {
srcv [ 0 ] = i / 255.0f ;
cc . convert ( ctx , & cc , dstv , srcv ) ;
for ( k = 0 ;
k < dstn ;
k ++ ) lookup [ i * dstn + k ] = dstv [ k ] * 255 ;
}
fz_drop_color_converter ( ctx , & cc ) ;
while ( h -- ) {
size_t ww = w ;
while ( ww -- ) {
i = * s ++ ;
for ( k = 0 ;
k < dstn ;
k ++ ) * d ++ = lookup [ i * dstn + k ] ;
if ( da ) * d ++ = ( sa ? * s : 255 ) ;
s += sa ;
}
d += d_line_inc ;
s += s_line_inc ;
}
}
else {
fz_hash_table * lookup ;
unsigned char * color ;
unsigned char dummy = s [ 0 ] ^ 255 ;
unsigned char * sold = & dummy ;
unsigned char * dold ;
fz_color_converter cc ;
lookup = fz_new_hash_table ( ctx , 509 , srcn , - 1 , NULL ) ;
fz_find_color_converter ( ctx , & cc , NULL , ds , ss , color_params ) ;
fz_try ( ctx ) {
while ( h -- ) {
size_t ww = w ;
while ( ww -- ) {
if ( * s == * sold && memcmp ( sold , s , srcn ) == 0 ) {
sold = s ;
memcpy ( d , dold , dstn ) ;
d += dstn ;
s += srcn ;
if ( da ) * d ++ = ( sa ? * s : 255 ) ;
s += sa ;
}
else {
sold = s ;
dold = d ;
color = fz_hash_find ( ctx , lookup , s ) ;
if ( color ) {
memcpy ( d , color , dstn ) ;
s += srcn ;
d += dstn ;
if ( da ) * d ++ = ( sa ? * s : 255 ) ;
s += sa ;
}
else {
for ( k = 0 ;
k < srcn ;
k ++ ) srcv [ k ] = * s ++ / 255.0f ;
cc . convert ( ctx , & cc , dstv , srcv ) ;
for ( k = 0 ;
k < dstn ;
k ++ ) * d ++ = dstv [ k ] * 255 ;
fz_hash_insert ( ctx , lookup , s - srcn , d - dstn ) ;
if ( da ) * d ++ = ( sa ? * s : 255 ) ;
s += sa ;
}
}
}
d += d_line_inc ;
s += s_line_inc ;
}
}
fz_always ( ctx ) fz_drop_color_converter ( ctx , & cc ) ;
fz_catch ( ctx ) fz_rethrow ( ctx ) ;
fz_drop_hash_table ( ctx , lookup ) ;
}
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
void DefaultTTFEnglishNames ( struct ttflangname * dummy , SplineFont * sf ) {
time_t now ;
struct tm * tm ;
char buffer [ 200 ] ;
if ( dummy -> names [ ttf_copyright ] == NULL || * dummy -> names [ ttf_copyright ] == '\0' ) dummy -> names [ ttf_copyright ] = utf8_verify_copy ( sf -> copyright ) ;
if ( dummy -> names [ ttf_family ] == NULL || * dummy -> names [ ttf_family ] == '\0' ) dummy -> names [ ttf_family ] = utf8_verify_copy ( sf -> familyname ) ;
if ( dummy -> names [ ttf_subfamily ] == NULL || * dummy -> names [ ttf_subfamily ] == '\0' ) dummy -> names [ ttf_subfamily ] = utf8_verify_copy ( SFGetModifiers ( sf ) ) ;
if ( dummy -> names [ ttf_uniqueid ] == NULL || * dummy -> names [ ttf_uniqueid ] == '\0' ) {
time ( & now ) ;
tm = localtime ( & now ) ;
snprintf ( buffer , sizeof ( buffer ) , "%s : %s : %d-%d-%d" , BDFFoundry ? BDFFoundry : TTFFoundry ? TTFFoundry : "FontForge 2.0" , sf -> fullname != NULL ? sf -> fullname : sf -> fontname , tm -> tm_mday , tm -> tm_mon + 1 , tm -> tm_year + 1900 ) ;
dummy -> names [ ttf_uniqueid ] = copy ( buffer ) ;
}
if ( dummy -> names [ ttf_fullname ] == NULL || * dummy -> names [ ttf_fullname ] == '\0' ) dummy -> names [ ttf_fullname ] = utf8_verify_copy ( sf -> fullname ) ;
if ( dummy -> names [ ttf_version ] == NULL || * dummy -> names [ ttf_version ] == '\0' ) {
if ( sf -> subfontcnt != 0 ) sprintf ( buffer , "Version %f " , ( double ) sf -> cidversion ) ;
else if ( sf -> version != NULL ) sprintf ( buffer , "Version %.20s " , sf -> version ) ;
else strcpy ( buffer , "Version 1.0" ) ;
dummy -> names [ ttf_version ] = copy ( buffer ) ;
}
if ( dummy -> names [ ttf_postscriptname ] == NULL || * dummy -> names [ ttf_postscriptname ] == '\0' ) dummy -> names [ ttf_postscriptname ] = utf8_verify_copy ( sf -> fontname ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static Int decCompare ( const decNumber * lhs , const decNumber * rhs , Flag abs_c ) {
Int result ;
Int sigr ;
Int compare ;
result = 1 ;
if ( ISZERO ( lhs ) ) result = 0 ;
if ( abs_c ) {
if ( ISZERO ( rhs ) ) return result ;
if ( result == 0 ) return - 1 ;
}
else {
if ( result && decNumberIsNegative ( lhs ) ) result = - 1 ;
sigr = 1 ;
if ( ISZERO ( rhs ) ) sigr = 0 ;
else if ( decNumberIsNegative ( rhs ) ) sigr = - 1 ;
if ( result > sigr ) return + 1 ;
if ( result < sigr ) return - 1 ;
if ( result == 0 ) return 0 ;
}
if ( ( lhs -> bits | rhs -> bits ) & DECINF ) {
if ( decNumberIsInfinite ( rhs ) ) {
if ( decNumberIsInfinite ( lhs ) ) result = 0 ;
else result = - result ;
}
return result ;
}
if ( lhs -> exponent > rhs -> exponent ) {
const decNumber * temp = lhs ;
lhs = rhs ;
rhs = temp ;
result = - result ;
}
compare = decUnitCompare ( lhs -> lsu , D2U ( lhs -> digits ) , rhs -> lsu , D2U ( rhs -> digits ) , rhs -> exponent - lhs -> exponent ) ;
if ( compare != BADINT ) compare *= result ;
return compare ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int dissect_pvfs2_mgmt_perf_mon_response ( tvbuff_t * tvb , proto_tree * tree , int offset ) {
guint32 perf_array_count , i ;
proto_tree_add_item ( tree , hf_pvfs_mgmt_perf_mon_response_suggested_next_id , tvb , offset , 4 , ENC_LITTLE_ENDIAN ) ;
offset += 4 ;
offset += 4 ;
offset = dissect_pvfs_uint64 ( tvb , tree , offset , hf_pvfs_end_time_ms , NULL ) ;
offset = dissect_pvfs_uint64 ( tvb , tree , offset , hf_pvfs_cur_time_ms , NULL ) ;
offset += 4 ;
perf_array_count = tvb_get_letohl ( tvb , offset ) ;
proto_tree_add_item ( tree , hf_pvfs_mgmt_perf_mon_response_perf_array_count , tvb , offset , 4 , ENC_LITTLE_ENDIAN ) ;
offset += 4 ;
for ( i = 0 ;
i < perf_array_count ;
i ++ ) offset = dissect_pvfs_mgmt_perf_stat ( tvb , tree , offset , i ) ;
return offset ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
int archive_string_append_from_wcs ( struct archive_string * as , const wchar_t * w , size_t len ) {
( void ) as ;
( void ) w ;
( void ) len ;
errno = ENOSYS ;
return ( - 1 ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
int ssl_choose_server_version ( SSL * s ) {
int server_version = s -> method -> version ;
int client_version = s -> client_version ;
const version_info * vent ;
const version_info * table ;
int disabled = 0 ;
switch ( server_version ) {
default : if ( version_cmp ( s , client_version , s -> version ) < 0 ) return SSL_R_WRONG_SSL_VERSION ;
return 0 ;
case TLS_ANY_VERSION : table = tls_version_table ;
break ;
case DTLS_ANY_VERSION : table = dtls_version_table ;
break ;
}
for ( vent = table ;
vent -> version != 0 ;
++ vent ) {
const SSL_METHOD * method ;
if ( vent -> smeth == NULL || version_cmp ( s , client_version , vent -> version ) < 0 ) continue ;
method = vent -> smeth ( ) ;
if ( ssl_method_error ( s , method ) == 0 ) {
s -> version = vent -> version ;
s -> method = method ;
return 0 ;
}
disabled = 1 ;
}
return disabled ? SSL_R_UNSUPPORTED_PROTOCOL : SSL_R_VERSION_TOO_LOW ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static inline int get_bits_diff ( MpegEncContext * s ) {
const int bits = put_bits_count ( & s -> pb ) ;
const int last = s -> last_bits ;
s -> last_bits = bits ;
return bits - last ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
op_array_table * get_op_array ( const gs_memory_t * mem , int size ) {
gs_main_instance * minst = get_minst_from_memory ( mem ) ;
return op_index_op_array_table ( minst -> i_ctx_p , size ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
proto_item * proto_tree_add_guid ( proto_tree * tree , int hfindex , tvbuff_t * tvb , gint start , gint length , const e_guid_t * value_ptr ) {
proto_item * pi ;
header_field_info * hfinfo ;
CHECK_FOR_NULL_TREE ( tree ) ;
TRY_TO_FAKE_THIS_ITEM ( tree , hfindex , hfinfo ) ;
DISSECTOR_ASSERT_FIELD_TYPE ( hfinfo , FT_GUID ) ;
pi = proto_tree_add_pi ( tree , hfinfo , tvb , start , & length ) ;
proto_tree_set_guid ( PNODE_FINFO ( pi ) , value_ptr ) ;
return pi ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
int cpu_exec ( CPUArchState * env ) {
CPUState * cpu = ENV_GET_CPU ( env ) ;
# if ! ( defined ( CONFIG_USER_ONLY ) && ( defined ( TARGET_M68K ) || defined ( TARGET_PPC ) || defined ( TARGET_S390X ) ) ) CPUClass * cc = CPU_GET_CLASS ( cpu ) ;
# endif int ret , interrupt_request ;
TranslationBlock * tb ;
uint8_t * tc_ptr ;
tcg_target_ulong next_tb ;
if ( cpu -> halted ) {
if ( ! cpu_has_work ( cpu ) ) {
return EXCP_HALTED ;
}
cpu -> halted = 0 ;
}
current_cpu = cpu ;
smp_mb ( ) ;
if ( unlikely ( exit_request ) ) {
cpu -> exit_request = 1 ;
}
# if defined ( TARGET_I386 ) CC_SRC = env -> eflags & ( CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C ) ;
env -> df = 1 - ( 2 * ( ( env -> eflags >> 10 ) & 1 ) ) ;
CC_OP = CC_OP_EFLAGS ;
env -> eflags &= ~ ( DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C ) ;
# elif defined ( TARGET_SPARC ) # elif defined ( TARGET_M68K ) env -> cc_op = CC_OP_FLAGS ;
env -> cc_dest = env -> sr & 0xf ;
env -> cc_x = ( env -> sr >> 4 ) & 1 ;
# elif defined ( TARGET_ALPHA ) # elif defined ( TARGET_ARM ) # elif defined ( TARGET_UNICORE32 ) # elif defined ( TARGET_PPC ) env -> reserve_addr = - 1 ;
# elif defined ( TARGET_LM32 ) # elif defined ( TARGET_MICROBLAZE ) # elif defined ( TARGET_MIPS ) # elif defined ( TARGET_MOXIE ) # elif defined ( TARGET_OPENRISC ) # elif defined ( TARGET_SH4 ) # elif defined ( TARGET_CRIS ) # elif defined ( TARGET_S390X ) # elif defined ( TARGET_XTENSA ) # else # error unsupported target CPU # endif env -> exception_index = - 1 ;
for ( ;
;
) {
if ( sigsetjmp ( env -> jmp_env , 0 ) == 0 ) {
if ( env -> exception_index >= 0 ) {
if ( env -> exception_index >= EXCP_INTERRUPT ) {
ret = env -> exception_index ;
if ( ret == EXCP_DEBUG ) {
cpu_handle_debug_exception ( env ) ;
}
break ;
}
else {
# if defined ( CONFIG_USER_ONLY ) # if defined ( TARGET_I386 ) cc -> do_interrupt ( cpu ) ;
# endif ret = env -> exception_index ;
break ;
# else cc -> do_interrupt ( cpu ) ;
env -> exception_index = - 1 ;
# endif }
}
next_tb = 0 ;
for ( ;
;
) {
interrupt_request = cpu -> interrupt_request ;
if ( unlikely ( interrupt_request ) ) {
if ( unlikely ( env -> singlestep_enabled & SSTEP_NOIRQ ) ) {
interrupt_request &= ~ CPU_INTERRUPT_SSTEP_MASK ;
}
if ( interrupt_request & CPU_INTERRUPT_DEBUG ) {
cpu -> interrupt_request &= ~ CPU_INTERRUPT_DEBUG ;
env -> exception_index = EXCP_DEBUG ;
cpu_loop_exit ( env ) ;
}
# if defined ( TARGET_ARM ) || defined ( TARGET_SPARC ) || defined ( TARGET_MIPS ) || defined ( TARGET_PPC ) || defined ( TARGET_ALPHA ) || defined ( TARGET_CRIS ) || defined ( TARGET_MICROBLAZE ) || defined ( TARGET_LM32 ) || defined ( TARGET_UNICORE32 ) if ( interrupt_request & CPU_INTERRUPT_HALT ) {
cpu -> interrupt_request &= ~ CPU_INTERRUPT_HALT ;
cpu -> halted = 1 ;
env -> exception_index = EXCP_HLT ;
cpu_loop_exit ( env ) ;
}
# endif # if defined ( TARGET_I386 ) # if ! defined ( CONFIG_USER_ONLY ) if ( interrupt_request & CPU_INTERRUPT_POLL ) {
cpu -> interrupt_request &= ~ CPU_INTERRUPT_POLL ;
apic_poll_irq ( env -> apic_state ) ;
}
# endif if ( interrupt_request & CPU_INTERRUPT_INIT ) {
cpu_svm_check_intercept_param ( env , SVM_EXIT_INIT , 0 ) ;
do_cpu_init ( x86_env_get_cpu ( env ) ) ;
env -> exception_index = EXCP_HALTED ;
cpu_loop_exit ( env ) ;
}
else if ( interrupt_request & CPU_INTERRUPT_SIPI ) {
do_cpu_sipi ( x86_env_get_cpu ( env ) ) ;
}
else if ( env -> hflags2 & HF2_GIF_MASK ) {
if ( ( interrupt_request & CPU_INTERRUPT_SMI ) && ! ( env -> hflags & HF_SMM_MASK ) ) {
cpu_svm_check_intercept_param ( env , SVM_EXIT_SMI , 0 ) ;
cpu -> interrupt_request &= ~ CPU_INTERRUPT_SMI ;
do_smm_enter ( env ) ;
next_tb = 0 ;
}
else if ( ( interrupt_request & CPU_INTERRUPT_NMI ) && ! ( env -> hflags2 & HF2_NMI_MASK ) ) {
cpu -> interrupt_request &= ~ CPU_INTERRUPT_NMI ;
env -> hflags2 |= HF2_NMI_MASK ;
do_interrupt_x86_hardirq ( env , EXCP02_NMI , 1 ) ;
next_tb = 0 ;
}
else if ( interrupt_request & CPU_INTERRUPT_MCE ) {
cpu -> interrupt_request &= ~ CPU_INTERRUPT_MCE ;
do_interrupt_x86_hardirq ( env , EXCP12_MCHK , 0 ) ;
next_tb = 0 ;
}
else if ( ( interrupt_request & CPU_INTERRUPT_HARD ) && ( ( ( env -> hflags2 & HF2_VINTR_MASK ) && ( env -> hflags2 & HF2_HIF_MASK ) ) || ( ! ( env -> hflags2 & HF2_VINTR_MASK ) && ( env -> eflags & IF_MASK && ! ( env -> hflags & HF_INHIBIT_IRQ_MASK ) ) ) ) ) {
int intno ;
cpu_svm_check_intercept_param ( env , SVM_EXIT_INTR , 0 ) ;
cpu -> interrupt_request &= ~ ( CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ ) ;
intno = cpu_get_pic_interrupt ( env ) ;
qemu_log_mask ( CPU_LOG_TB_IN_ASM , "Servicing hardware INT=0x%02x\n" , intno ) ;
do_interrupt_x86_hardirq ( env , intno , 1 ) ;
next_tb = 0 ;
# if ! defined ( CONFIG_USER_ONLY ) }
else if ( ( interrupt_request & CPU_INTERRUPT_VIRQ ) && ( env -> eflags & IF_MASK ) && ! ( env -> hflags & HF_INHIBIT_IRQ_MASK ) ) {
int intno ;
cpu_svm_check_intercept_param ( env , SVM_EXIT_VINTR , 0 ) ;
intno = ldl_phys ( env -> vm_vmcb + offsetof ( struct vmcb , control . int_vector ) ) ;
qemu_log_mask ( CPU_LOG_TB_IN_ASM , "Servicing virtual hardware INT=0x%02x\n" , intno ) ;
do_interrupt_x86_hardirq ( env , intno , 1 ) ;
cpu -> interrupt_request &= ~ CPU_INTERRUPT_VIRQ ;
next_tb = 0 ;
# endif }
}
# elif defined ( TARGET_PPC ) if ( ( interrupt_request & CPU_INTERRUPT_RESET ) ) {
cpu_reset ( cpu ) ;
}
if ( interrupt_request & CPU_INTERRUPT_HARD ) {
ppc_hw_interrupt ( env ) ;
if ( env -> pending_interrupts == 0 ) {
cpu -> interrupt_request &= ~ CPU_INTERRUPT_HARD ;
}
next_tb = 0 ;
}
# elif defined ( TARGET_LM32 ) if ( ( interrupt_request & CPU_INTERRUPT_HARD ) && ( env -> ie & IE_IE ) ) {
env -> exception_index = EXCP_IRQ ;
cc -> do_interrupt ( cpu ) ;
next_tb = 0 ;
}
# elif defined ( TARGET_MICROBLAZE ) if ( ( interrupt_request & CPU_INTERRUPT_HARD ) && ( env -> sregs [ SR_MSR ] & MSR_IE ) && ! ( env -> sregs [ SR_MSR ] & ( MSR_EIP | MSR_BIP ) ) && ! ( env -> iflags & ( D_FLAG | IMM_FLAG ) ) ) {
env -> exception_index = EXCP_IRQ ;
cc -> do_interrupt ( cpu ) ;
next_tb = 0 ;
}
# elif defined ( TARGET_MIPS ) if ( ( interrupt_request & CPU_INTERRUPT_HARD ) && cpu_mips_hw_interrupts_pending ( env ) ) {
env -> exception_index = EXCP_EXT_INTERRUPT ;
env -> error_code = 0 ;
cc -> do_interrupt ( cpu ) ;
next_tb = 0 ;
}
# elif defined ( TARGET_OPENRISC ) {
int idx = - 1 ;
if ( ( interrupt_request & CPU_INTERRUPT_HARD ) && ( env -> sr & SR_IEE ) ) {
idx = EXCP_INT ;
}
if ( ( interrupt_request & CPU_INTERRUPT_TIMER ) && ( env -> sr & SR_TEE ) ) {
idx = EXCP_TICK ;
}
if ( idx >= 0 ) {
env -> exception_index = idx ;
cc -> do_interrupt ( cpu ) ;
next_tb = 0 ;
}
}
# elif defined ( TARGET_SPARC ) if ( interrupt_request & CPU_INTERRUPT_HARD ) {
if ( cpu_interrupts_enabled ( env ) && env -> interrupt_index > 0 ) {
int pil = env -> interrupt_index & 0xf ;
int type = env -> interrupt_index & 0xf0 ;
if ( ( ( type == TT_EXTINT ) && cpu_pil_allowed ( env , pil ) ) || type != TT_EXTINT ) {
env -> exception_index = env -> interrupt_index ;
cc -> do_interrupt ( cpu ) ;
next_tb = 0 ;
}
}
}
# elif defined ( TARGET_ARM ) if ( interrupt_request & CPU_INTERRUPT_FIQ && ! ( env -> uncached_cpsr & CPSR_F ) ) {
env -> exception_index = EXCP_FIQ ;
cc -> do_interrupt ( cpu ) ;
next_tb = 0 ;
}
if ( interrupt_request & CPU_INTERRUPT_HARD && ( ( IS_M ( env ) && env -> regs [ 15 ] < 0xfffffff0 ) || ! ( env -> uncached_cpsr & CPSR_I ) ) ) {
env -> exception_index = EXCP_IRQ ;
cc -> do_interrupt ( cpu ) ;
next_tb = 0 ;
}
# elif defined ( TARGET_UNICORE32 ) if ( interrupt_request & CPU_INTERRUPT_HARD && ! ( env -> uncached_asr & ASR_I ) ) {
env -> exception_index = UC32_EXCP_INTR ;
cc -> do_interrupt ( cpu ) ;
next_tb = 0 ;
}
# elif defined ( TARGET_SH4 ) if ( interrupt_request & CPU_INTERRUPT_HARD ) {
cc -> do_interrupt ( cpu ) ;
next_tb = 0 ;
}
# elif defined ( TARGET_ALPHA ) {
int idx = - 1 ;
switch ( env -> pal_mode ? 7 : env -> ps & PS_INT_MASK ) {
case 0 ... 3 : if ( interrupt_request & CPU_INTERRUPT_HARD ) {
idx = EXCP_DEV_INTERRUPT ;
}
case 4 : if ( interrupt_request & CPU_INTERRUPT_TIMER ) {
idx = EXCP_CLK_INTERRUPT ;
}
case 5 : if ( interrupt_request & CPU_INTERRUPT_SMP ) {
idx = EXCP_SMP_INTERRUPT ;
}
case 6 : if ( interrupt_request & CPU_INTERRUPT_MCHK ) {
idx = EXCP_MCHK ;
}
}
if ( idx >= 0 ) {
env -> exception_index = idx ;
env -> error_code = 0 ;
cc -> do_interrupt ( cpu ) ;
next_tb = 0 ;
}
}
# elif defined ( TARGET_CRIS ) if ( interrupt_request & CPU_INTERRUPT_HARD && ( env -> pregs [ PR_CCS ] & I_FLAG ) && ! env -> locked_irq ) {
env -> exception_index = EXCP_IRQ ;
cc -> do_interrupt ( cpu ) ;
next_tb = 0 ;
}
if ( interrupt_request & CPU_INTERRUPT_NMI ) {
unsigned int m_flag_archval ;
if ( env -> pregs [ PR_VR ] < 32 ) {
m_flag_archval = M_FLAG_V10 ;
}
else {
m_flag_archval = M_FLAG_V32 ;
}
if ( ( env -> pregs [ PR_CCS ] & m_flag_archval ) ) {
env -> exception_index = EXCP_NMI ;
cc -> do_interrupt ( cpu ) ;
next_tb = 0 ;
}
}
# elif defined ( TARGET_M68K ) if ( interrupt_request & CPU_INTERRUPT_HARD && ( ( env -> sr & SR_I ) >> SR_I_SHIFT ) < env -> pending_level ) {
env -> exception_index = env -> pending_vector ;
do_interrupt_m68k_hardirq ( env ) ;
next_tb = 0 ;
}
# elif defined ( TARGET_S390X ) && ! defined ( CONFIG_USER_ONLY ) if ( ( interrupt_request & CPU_INTERRUPT_HARD ) && ( env -> psw . mask & PSW_MASK_EXT ) ) {
cc -> do_interrupt ( cpu ) ;
next_tb = 0 ;
}
# elif defined ( TARGET_XTENSA ) if ( interrupt_request & CPU_INTERRUPT_HARD ) {
env -> exception_index = EXC_IRQ ;
cc -> do_interrupt ( cpu ) ;
next_tb = 0 ;
}
# endif if ( cpu -> interrupt_request & CPU_INTERRUPT_EXITTB ) {
cpu -> interrupt_request &= ~ CPU_INTERRUPT_EXITTB ;
next_tb = 0 ;
}
}
if ( unlikely ( cpu -> exit_request ) ) {
cpu -> exit_request = 0 ;
env -> exception_index = EXCP_INTERRUPT ;
cpu_loop_exit ( env ) ;
}
# if defined ( DEBUG_DISAS ) if ( qemu_loglevel_mask ( CPU_LOG_TB_CPU ) ) {
# if defined ( TARGET_I386 ) log_cpu_state ( env , CPU_DUMP_CCOP ) ;
# elif defined ( TARGET_M68K ) cpu_m68k_flush_flags ( env , env -> cc_op ) ;
env -> cc_op = CC_OP_FLAGS ;
env -> sr = ( env -> sr & 0xffe0 ) | env -> cc_dest | ( env -> cc_x << 4 ) ;
log_cpu_state ( env , 0 ) ;
# else log_cpu_state ( env , 0 ) ;
# endif }
# endif spin_lock ( & tcg_ctx . tb_ctx . tb_lock ) ;
tb = tb_find_fast ( env ) ;
if ( tcg_ctx . tb_ctx . tb_invalidated_flag ) {
next_tb = 0 ;
tcg_ctx . tb_ctx . tb_invalidated_flag = 0 ;
}
if ( qemu_loglevel_mask ( CPU_LOG_EXEC ) ) {
qemu_log ( "Trace %p [" TARGET_FMT_lx "] %s\n" , tb -> tc_ptr , tb -> pc , lookup_symbol ( tb -> pc ) ) ;
}
if ( next_tb != 0 && tb -> page_addr [ 1 ] == - 1 ) {
tb_add_jump ( ( TranslationBlock * ) ( next_tb & ~ TB_EXIT_MASK ) , next_tb & TB_EXIT_MASK , tb ) ;
}
spin_unlock ( & tcg_ctx . tb_ctx . tb_lock ) ;
cpu -> current_tb = tb ;
barrier ( ) ;
if ( likely ( ! cpu -> exit_request ) ) {
tc_ptr = tb -> tc_ptr ;
next_tb = cpu_tb_exec ( cpu , tc_ptr ) ;
switch ( next_tb & TB_EXIT_MASK ) {
case TB_EXIT_REQUESTED : tb = ( TranslationBlock * ) ( next_tb & ~ TB_EXIT_MASK ) ;
next_tb = 0 ;
break ;
case TB_EXIT_ICOUNT_EXPIRED : {
int insns_left ;
tb = ( TranslationBlock * ) ( next_tb & ~ TB_EXIT_MASK ) ;
insns_left = env -> icount_decr . u32 ;
if ( env -> icount_extra && insns_left >= 0 ) {
env -> icount_extra += insns_left ;
if ( env -> icount_extra > 0xffff ) {
insns_left = 0xffff ;
}
else {
insns_left = env -> icount_extra ;
}
env -> icount_extra -= insns_left ;
env -> icount_decr . u16 . low = insns_left ;
}
else {
if ( insns_left > 0 ) {
cpu_exec_nocache ( env , insns_left , tb ) ;
}
env -> exception_index = EXCP_INTERRUPT ;
next_tb = 0 ;
cpu_loop_exit ( env ) ;
}
break ;
}
default : break ;
}
}
cpu -> current_tb = NULL ;
}
}
else {
cpu = current_cpu ;
env = cpu -> env_ptr ;
}
}
# if defined ( TARGET_I386 ) env -> eflags = env -> eflags | cpu_cc_compute_all ( env , CC_OP ) | ( env -> df & DF_MASK ) ;
# elif defined ( TARGET_ARM ) # elif defined ( TARGET_UNICORE32 ) # elif defined ( TARGET_SPARC ) # elif defined ( TARGET_PPC ) # elif defined ( TARGET_LM32 ) # elif defined ( TARGET_M68K ) cpu_m68k_flush_flags ( env , env -> cc_op ) ;
env -> cc_op = CC_OP_FLAGS ;
env -> sr = ( env -> sr & 0xffe0 ) | env -> cc_dest | ( env -> cc_x << 4 ) ;
# elif defined ( TARGET_MICROBLAZE ) # elif defined ( TARGET_MIPS ) # elif defined ( TARGET_MOXIE ) # elif defined ( TARGET_OPENRISC ) # elif defined ( TARGET_SH4 ) # elif defined ( TARGET_ALPHA ) # elif defined ( TARGET_CRIS ) # elif defined ( TARGET_S390X ) # elif defined ( TARGET_XTENSA ) # else # error unsupported target CPU # endif current_cpu = NULL ;
return ret ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void version ( ) {
fprintf ( stderr , "iodine IP over DNS tunneling server\n" ) ;
fprintf ( stderr , "Git version: %s\n" , GITREVISION ) ;
exit ( 0 ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static inline int is_specified_interface ( char * interface , const char * interface_prefix ) {
return ! strncmp ( interface , interface_prefix , strlen ( interface_prefix ) ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int dca_decode_frame ( AVCodecContext * avctx , void * data , int * got_frame_ptr , AVPacket * avpkt ) {
AVFrame * frame = data ;
const uint8_t * buf = avpkt -> data ;
int buf_size = avpkt -> size ;
int lfe_samples ;
int num_core_channels = 0 ;
int i , ret ;
float * * samples_flt ;
DCAContext * s = avctx -> priv_data ;
int channels , full_channels ;
int core_ss_end ;
s -> xch_present = 0 ;
s -> dca_buffer_size = ff_dca_convert_bitstream ( buf , buf_size , s -> dca_buffer , DCA_MAX_FRAME_SIZE + DCA_MAX_EXSS_HEADER_SIZE ) ;
if ( s -> dca_buffer_size == AVERROR_INVALIDDATA ) {
av_log ( avctx , AV_LOG_ERROR , "Not a valid DCA frame\n" ) ;
return AVERROR_INVALIDDATA ;
}
init_get_bits ( & s -> gb , s -> dca_buffer , s -> dca_buffer_size * 8 ) ;
if ( ( ret = dca_parse_frame_header ( s ) ) < 0 ) {
return ret ;
}
avctx -> sample_rate = s -> sample_rate ;
avctx -> bit_rate = s -> bit_rate ;
s -> profile = FF_PROFILE_DTS ;
for ( i = 0 ;
i < ( s -> sample_blocks / 8 ) ;
i ++ ) {
if ( ( ret = dca_decode_block ( s , 0 , i ) ) ) {
av_log ( avctx , AV_LOG_ERROR , "error decoding block\n" ) ;
return ret ;
}
}
num_core_channels = s -> prim_channels ;
if ( s -> ext_coding ) s -> core_ext_mask = dca_ext_audio_descr_mask [ s -> ext_descr ] ;
else s -> core_ext_mask = 0 ;
core_ss_end = FFMIN ( s -> frame_size , s -> dca_buffer_size ) * 8 ;
if ( s -> core_ext_mask < 0 || s -> core_ext_mask & DCA_EXT_XCH ) {
s -> core_ext_mask = FFMAX ( s -> core_ext_mask , 0 ) ;
skip_bits_long ( & s -> gb , ( - get_bits_count ( & s -> gb ) ) & 31 ) ;
while ( core_ss_end - get_bits_count ( & s -> gb ) >= 32 ) {
uint32_t bits = get_bits_long ( & s -> gb , 32 ) ;
switch ( bits ) {
case 0x5a5a5a5a : {
int ext_amode , xch_fsize ;
s -> xch_base_channel = s -> prim_channels ;
xch_fsize = show_bits ( & s -> gb , 10 ) ;
if ( ( s -> frame_size != ( get_bits_count ( & s -> gb ) >> 3 ) - 4 + xch_fsize ) && ( s -> frame_size != ( get_bits_count ( & s -> gb ) >> 3 ) - 4 + xch_fsize + 1 ) ) continue ;
skip_bits ( & s -> gb , 10 ) ;
s -> core_ext_mask |= DCA_EXT_XCH ;
if ( ( ext_amode = get_bits ( & s -> gb , 4 ) ) != 1 ) {
av_log ( avctx , AV_LOG_ERROR , "XCh extension amode %d not" " supported!\n" , ext_amode ) ;
continue ;
}
dca_parse_audio_coding_header ( s , s -> xch_base_channel ) ;
for ( i = 0 ;
i < ( s -> sample_blocks / 8 ) ;
i ++ ) if ( ( ret = dca_decode_block ( s , s -> xch_base_channel , i ) ) ) {
av_log ( avctx , AV_LOG_ERROR , "error decoding XCh extension\n" ) ;
continue ;
}
s -> xch_present = 1 ;
break ;
}
case 0x47004a03 : s -> core_ext_mask |= DCA_EXT_XXCH ;
break ;
case 0x1d95f262 : {
int fsize96 = show_bits ( & s -> gb , 12 ) + 1 ;
if ( s -> frame_size != ( get_bits_count ( & s -> gb ) >> 3 ) - 4 + fsize96 ) continue ;
av_log ( avctx , AV_LOG_DEBUG , "X96 extension found at %d bits\n" , get_bits_count ( & s -> gb ) ) ;
skip_bits ( & s -> gb , 12 ) ;
av_log ( avctx , AV_LOG_DEBUG , "FSIZE96 = %d bytes\n" , fsize96 ) ;
av_log ( avctx , AV_LOG_DEBUG , "REVNO = %d\n" , get_bits ( & s -> gb , 4 ) ) ;
s -> core_ext_mask |= DCA_EXT_X96 ;
break ;
}
}
skip_bits_long ( & s -> gb , ( - get_bits_count ( & s -> gb ) ) & 31 ) ;
}
}
else {
skip_bits_long ( & s -> gb , core_ss_end - get_bits_count ( & s -> gb ) ) ;
}
if ( s -> core_ext_mask & DCA_EXT_X96 ) s -> profile = FF_PROFILE_DTS_96_24 ;
else if ( s -> core_ext_mask & ( DCA_EXT_XCH | DCA_EXT_XXCH ) ) s -> profile = FF_PROFILE_DTS_ES ;
if ( s -> dca_buffer_size - s -> frame_size > 32 && get_bits_long ( & s -> gb , 32 ) == DCA_HD_MARKER ) dca_exss_parse_header ( s ) ;
avctx -> profile = s -> profile ;
full_channels = channels = s -> prim_channels + ! ! s -> lfe ;
if ( s -> amode < 16 ) {
avctx -> channel_layout = dca_core_channel_layout [ s -> amode ] ;
if ( s -> xch_present && ( ! avctx -> request_channels || avctx -> request_channels > num_core_channels + ! ! s -> lfe ) ) {
avctx -> channel_layout |= AV_CH_BACK_CENTER ;
if ( s -> lfe ) {
avctx -> channel_layout |= AV_CH_LOW_FREQUENCY ;
s -> channel_order_tab = dca_channel_reorder_lfe_xch [ s -> amode ] ;
}
else {
s -> channel_order_tab = dca_channel_reorder_nolfe_xch [ s -> amode ] ;
}
}
else {
channels = num_core_channels + ! ! s -> lfe ;
s -> xch_present = 0 ;
if ( s -> lfe ) {
avctx -> channel_layout |= AV_CH_LOW_FREQUENCY ;
s -> channel_order_tab = dca_channel_reorder_lfe [ s -> amode ] ;
}
else s -> channel_order_tab = dca_channel_reorder_nolfe [ s -> amode ] ;
}
if ( channels > ! ! s -> lfe && s -> channel_order_tab [ channels - 1 - ! ! s -> lfe ] < 0 ) return AVERROR_INVALIDDATA ;
if ( avctx -> request_channels == 2 && s -> prim_channels > 2 ) {
channels = 2 ;
s -> output = DCA_STEREO ;
avctx -> channel_layout = AV_CH_LAYOUT_STEREO ;
}
}
else {
av_log ( avctx , AV_LOG_ERROR , "Non standard configuration %d !\n" , s -> amode ) ;
return AVERROR_INVALIDDATA ;
}
avctx -> channels = channels ;
frame -> nb_samples = 256 * ( s -> sample_blocks / 8 ) ;
if ( ( ret = ff_get_buffer ( avctx , frame , 0 ) ) < 0 ) {
av_log ( avctx , AV_LOG_ERROR , "get_buffer() failed\n" ) ;
return ret ;
}
samples_flt = ( float * * ) frame -> extended_data ;
if ( avctx -> channels < full_channels ) {
ret = av_samples_get_buffer_size ( NULL , full_channels - channels , frame -> nb_samples , avctx -> sample_fmt , 0 ) ;
if ( ret < 0 ) return ret ;
av_fast_malloc ( & s -> extra_channels_buffer , & s -> extra_channels_buffer_size , ret ) ;
if ( ! s -> extra_channels_buffer ) return AVERROR ( ENOMEM ) ;
ret = av_samples_fill_arrays ( ( uint8_t * * ) s -> extra_channels , NULL , s -> extra_channels_buffer , full_channels - channels , frame -> nb_samples , avctx -> sample_fmt , 0 ) ;
if ( ret < 0 ) return ret ;
}
for ( i = 0 ;
i < ( s -> sample_blocks / 8 ) ;
i ++ ) {
int ch ;
for ( ch = 0 ;
ch < channels ;
ch ++ ) s -> samples_chanptr [ ch ] = samples_flt [ ch ] + i * 256 ;
for ( ;
ch < full_channels ;
ch ++ ) s -> samples_chanptr [ ch ] = s -> extra_channels [ ch - channels ] + i * 256 ;
dca_filter_channels ( s , i ) ;
if ( ( s -> source_pcm_res & 1 ) && s -> xch_present ) {
float * back_chan = s -> samples_chanptr [ s -> channel_order_tab [ s -> xch_base_channel ] ] ;
float * lt_chan = s -> samples_chanptr [ s -> channel_order_tab [ s -> xch_base_channel - 2 ] ] ;
float * rt_chan = s -> samples_chanptr [ s -> channel_order_tab [ s -> xch_base_channel - 1 ] ] ;
s -> fdsp . vector_fmac_scalar ( lt_chan , back_chan , - M_SQRT1_2 , 256 ) ;
s -> fdsp . vector_fmac_scalar ( rt_chan , back_chan , - M_SQRT1_2 , 256 ) ;
}
}
lfe_samples = 2 * s -> lfe * ( s -> sample_blocks / 8 ) ;
for ( i = 0 ;
i < 2 * s -> lfe * 4 ;
i ++ ) s -> lfe_data [ i ] = s -> lfe_data [ i + lfe_samples ] ;
* got_frame_ptr = 1 ;
return buf_size ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
IN_PROC_BROWSER_TEST_F ( PageLoadMetricsBrowserTest , LoadingMetrics ) {
ASSERT_TRUE ( embedded_test_server ( ) -> Start ( ) ) ;
auto waiter = CreatePageLoadMetricsWaiter ( ) ;
waiter -> AddPageExpectation ( TimingField : : LOAD_TIMING_INFO ) ;
ui_test_utils : : NavigateToURL ( browser ( ) , embedded_test_server ( ) -> GetURL ( "/title1.html" ) ) ;
waiter -> Wait ( ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
int keyring_delete_keyblock ( KEYRING_HANDLE hd ) {
int rc ;
if ( ! hd -> found . kr ) return - 1 ;
if ( hd -> found . kr -> read_only ) return gpg_error ( GPG_ERR_EACCES ) ;
if ( ! hd -> found . n_packets ) {
rc = keyring_get_keyblock ( hd , NULL ) ;
if ( rc ) {
log_error ( "re-reading keyblock failed: %s\n" , gpg_strerror ( rc ) ) ;
return rc ;
}
if ( ! hd -> found . n_packets ) BUG ( ) ;
}
iobuf_close ( hd -> current . iobuf ) ;
hd -> current . iobuf = NULL ;
rc = do_copy ( 2 , hd -> found . kr -> fname , NULL , hd -> found . offset , hd -> found . n_packets ) ;
if ( ! rc ) {
hd -> found . kr = NULL ;
hd -> found . offset = 0 ;
}
return rc ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int32_t u_scanf_ustring_handler ( UFILE * input , u_scanf_spec_info * info , ufmt_args * args , const UChar * fmt , int32_t * fmtConsumed , int32_t * argConverted ) {
UChar * arg = ( UChar * ) ( args [ 0 ] . ptrValue ) ;
UChar * alias = arg ;
int32_t count ;
int32_t skipped = 0 ;
UChar c ;
UBool isNotEOF = FALSE ;
if ( info -> fIsString ) {
skipped = u_scanf_skip_leading_ws ( input , info -> fPadChar ) ;
}
count = 0 ;
while ( ( info -> fWidth == - 1 || count < info -> fWidth ) && ( isNotEOF = ufile_getch ( input , & c ) ) && ( ! info -> fIsString || ( c != info -> fPadChar && ! u_isWhitespace ( c ) ) ) ) {
if ( ! info -> fSkipArg ) {
* alias ++ = c ;
}
++ count ;
}
if ( ! info -> fSkipArg ) {
if ( ( info -> fWidth == - 1 || count < info -> fWidth ) && isNotEOF ) {
u_fungetc ( c , input ) ;
}
if ( info -> fIsString ) {
* alias = 0x0000 ;
}
}
* argConverted = ! info -> fSkipArg ;
return count + skipped ;
}
| 1True
|
Categorize the following code snippet as vulnerable or not. True or False
|
static gboolean request_is_satisfied ( NautilusDirectory * directory , NautilusFile * file , Request request ) {
if ( REQUEST_WANTS_TYPE ( request , REQUEST_FILE_LIST ) && ! ( directory -> details -> directory_loaded && directory -> details -> directory_loaded_sent_notification ) ) {
return FALSE ;
}
if ( REQUEST_WANTS_TYPE ( request , REQUEST_DIRECTORY_COUNT ) ) {
if ( has_problem ( directory , file , lacks_directory_count ) ) {
return FALSE ;
}
}
if ( REQUEST_WANTS_TYPE ( request , REQUEST_FILE_INFO ) ) {
if ( has_problem ( directory , file , lacks_info ) ) {
return FALSE ;
}
}
if ( REQUEST_WANTS_TYPE ( request , REQUEST_FILESYSTEM_INFO ) ) {
if ( has_problem ( directory , file , lacks_filesystem_info ) ) {
return FALSE ;
}
}
if ( REQUEST_WANTS_TYPE ( request , REQUEST_DEEP_COUNT ) ) {
if ( has_problem ( directory , file , lacks_deep_count ) ) {
return FALSE ;
}
}
if ( REQUEST_WANTS_TYPE ( request , REQUEST_THUMBNAIL ) ) {
if ( has_problem ( directory , file , lacks_thumbnail ) ) {
return FALSE ;
}
}
if ( REQUEST_WANTS_TYPE ( request , REQUEST_MOUNT ) ) {
if ( has_problem ( directory , file , lacks_mount ) ) {
return FALSE ;
}
}
if ( REQUEST_WANTS_TYPE ( request , REQUEST_MIME_LIST ) ) {
if ( has_problem ( directory , file , lacks_mime_list ) ) {
return FALSE ;
}
}
if ( REQUEST_WANTS_TYPE ( request , REQUEST_LINK_INFO ) ) {
if ( has_problem ( directory , file , lacks_link_info ) ) {
return FALSE ;
}
}
return TRUE ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
void tmx_pretran_unlink ( void ) {
int slotid ;
if ( _tmx_proc_ptran == NULL ) return ;
slotid = _tmx_proc_ptran -> hid & ( _tmx_ptran_size - 1 ) ;
lock_get ( & _tmx_ptran_table [ slotid ] . lock ) ;
tmx_pretran_unlink_safe ( slotid ) ;
lock_release ( & _tmx_ptran_table [ slotid ] . lock ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void writer_register_all ( void ) {
static int initialized ;
if ( initialized ) return ;
initialized = 1 ;
writer_register ( & default_writer ) ;
writer_register ( & compact_writer ) ;
writer_register ( & csv_writer ) ;
writer_register ( & flat_writer ) ;
writer_register ( & ini_writer ) ;
writer_register ( & json_writer ) ;
writer_register ( & xml_writer ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int rawv6_bind ( struct sock * sk , struct sockaddr * uaddr , int addr_len ) {
struct inet_sock * inet = inet_sk ( sk ) ;
struct ipv6_pinfo * np = inet6_sk ( sk ) ;
struct sockaddr_in6 * addr = ( struct sockaddr_in6 * ) uaddr ;
__be32 v4addr = 0 ;
int addr_type ;
int err ;
if ( addr_len < SIN6_LEN_RFC2133 ) return - EINVAL ;
addr_type = ipv6_addr_type ( & addr -> sin6_addr ) ;
if ( addr_type == IPV6_ADDR_MAPPED ) return - EADDRNOTAVAIL ;
lock_sock ( sk ) ;
err = - EINVAL ;
if ( sk -> sk_state != TCP_CLOSE ) goto out ;
rcu_read_lock ( ) ;
if ( addr_type != IPV6_ADDR_ANY ) {
struct net_device * dev = NULL ;
if ( addr_type & IPV6_ADDR_LINKLOCAL ) {
if ( addr_len >= sizeof ( struct sockaddr_in6 ) && addr -> sin6_scope_id ) {
sk -> sk_bound_dev_if = addr -> sin6_scope_id ;
}
if ( ! sk -> sk_bound_dev_if ) goto out_unlock ;
err = - ENODEV ;
dev = dev_get_by_index_rcu ( sock_net ( sk ) , sk -> sk_bound_dev_if ) ;
if ( ! dev ) goto out_unlock ;
}
v4addr = LOOPBACK4_IPV6 ;
if ( ! ( addr_type & IPV6_ADDR_MULTICAST ) ) {
err = - EADDRNOTAVAIL ;
if ( ! ipv6_chk_addr ( sock_net ( sk ) , & addr -> sin6_addr , dev , 0 ) ) {
goto out_unlock ;
}
}
}
inet -> inet_rcv_saddr = inet -> inet_saddr = v4addr ;
ipv6_addr_copy ( & np -> rcv_saddr , & addr -> sin6_addr ) ;
if ( ! ( addr_type & IPV6_ADDR_MULTICAST ) ) ipv6_addr_copy ( & np -> saddr , & addr -> sin6_addr ) ;
err = 0 ;
out_unlock : rcu_read_unlock ( ) ;
out : release_sock ( sk ) ;
return err ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void purple_sync_settings ( account_t * acc , PurpleAccount * pa ) {
PurplePlugin * prpl = purple_plugins_find_with_id ( pa -> protocol_id ) ;
PurplePluginProtocolInfo * pi = prpl -> info -> extra_info ;
GList * i ;
for ( i = pi -> protocol_options ;
i ;
i = i -> next ) {
PurpleAccountOption * o = i -> data ;
const char * name ;
set_t * s ;
name = purple_account_option_get_setting ( o ) ;
s = set_find ( & acc -> set , name ) ;
if ( s -> value == NULL ) {
continue ;
}
switch ( purple_account_option_get_type ( o ) ) {
case PURPLE_PREF_STRING : case PURPLE_PREF_STRING_LIST : purple_account_set_string ( pa , name , set_getstr ( & acc -> set , name ) ) ;
break ;
case PURPLE_PREF_INT : purple_account_set_int ( pa , name , set_getint ( & acc -> set , name ) ) ;
break ;
case PURPLE_PREF_BOOLEAN : purple_account_set_bool ( pa , name , set_getbool ( & acc -> set , name ) ) ;
break ;
default : break ;
}
}
if ( pi -> options & OPT_PROTO_MAIL_CHECK ) {
purple_account_set_check_mail ( pa , set_getbool ( & acc -> set , "mail_notifications" ) ) ;
}
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int dissect_h225_Progress_UUIE ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
# line 414 "./asn1/h225/h225.cnf" h225_packet_info * h225_pi ;
offset = dissect_per_sequence ( tvb , offset , actx , tree , hf_index , ett_h225_Progress_UUIE , Progress_UUIE_sequence ) ;
# line 418 "./asn1/h225/h225.cnf" h225_pi = ( h225_packet_info * ) p_get_proto_data ( wmem_packet_scope ( ) , actx -> pinfo , proto_h225 , 0 ) ;
if ( h225_pi != NULL ) {
h225_pi -> cs_type = H225_PROGRESS ;
if ( contains_faststart ) {
char temp [ 50 ] ;
g_snprintf ( temp , 50 , "%s OLC (%s)" , val_to_str ( h225_pi -> cs_type , T_h323_message_body_vals , "<unknown>" ) , h225_pi -> frame_label ) ;
g_strlcpy ( h225_pi -> frame_label , temp , 50 ) ;
}
else g_snprintf ( h225_pi -> frame_label , 50 , "%s" , val_to_str ( h225_pi -> cs_type , T_h323_message_body_vals , "<unknown>" ) ) ;
}
return offset ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
int cont_schedule_handler ( TSCont contp , TSEvent event , void * ) {
if ( event == TS_EVENT_IMMEDIATE ) {
SDK_RPRINT ( SDK_ContSchedule_test , "TSContSchedule" , "TestCase1" , TC_PASS , "ok" ) ;
tc1_count ++ ;
}
else if ( event == TS_EVENT_TIMEOUT ) {
SDK_RPRINT ( SDK_ContSchedule_test , "TSContSchedule" , "TestCase2" , TC_PASS , "ok" ) ;
tc2_count ++ ;
}
else {
SDK_RPRINT ( SDK_ContSchedule_test , "TSContSchedule" , "TestCase1|2" , TC_FAIL , "received unexpected event number %d" , event ) ;
* SDK_ContSchedule_pstatus = REGRESSION_TEST_FAILED ;
return 0 ;
}
if ( ( tc1_count == 1 ) && ( tc2_count == 1 ) ) {
* SDK_ContSchedule_pstatus = REGRESSION_TEST_PASSED ;
}
else if ( tc1_count + tc2_count >= 2 ) {
* SDK_ContSchedule_pstatus = REGRESSION_TEST_FAILED ;
}
TSContDestroy ( contp ) ;
return 0 ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static ssize_t _warc_rdlen ( const char * buf , size_t bsz ) {
static const char _key [ ] = "\r\nContent-Length:" ;
const char * val , * eol ;
char * on = NULL ;
long int len ;
if ( ( val = xmemmem ( buf , bsz , _key , sizeof ( _key ) - 1U ) ) == NULL ) {
return - 1 ;
}
val += sizeof ( _key ) - 1U ;
if ( ( eol = _warc_find_eol ( val , buf + bsz - val ) ) == NULL ) {
return - 1 ;
}
while ( val < eol && ( * val == ' ' || * val == '\t' ) ) val ++ ;
if ( ! isdigit ( ( unsigned char ) * val ) ) return - 1 ;
len = strtol ( val , & on , 10 ) ;
if ( on != eol ) {
return - 1 ;
}
return ( size_t ) len ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void optst ( struct vars * v , struct subre * t ) {
return ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static fz_iccprofile * fz_icc_from_cal ( fz_context * ctx , const fz_colorspace * cs ) {
fz_cal_colorspace * cal_data = cs -> data ;
fz_iccprofile * profile ;
if ( cal_data -> profile != NULL ) return cal_data -> profile ;
profile = fz_malloc_struct ( ctx , fz_iccprofile ) ;
fz_try ( ctx ) {
profile -> buffer = fz_new_icc_data_from_cal_colorspace ( ctx , cal_data ) ;
fz_md5_icc ( ctx , profile ) ;
cal_data -> profile = profile ;
}
fz_catch ( ctx ) {
fz_free ( ctx , profile ) ;
fz_rethrow ( ctx ) ;
}
return profile ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static afs_int32 addWildCards ( struct ubik_trans * tt , prlist * alist , afs_uint32 host ) {
afs_int32 temp ;
struct prentry tentry ;
prlist wlist ;
unsigned wild = htonl ( 0xffffff00 ) ;
struct in_addr iaddr ;
afs_int32 hostid ;
int size = 0 , i , code ;
int added = 0 ;
char hoststr [ 16 ] ;
while ( ( host = ( host & wild ) ) ) {
wild = htonl ( ntohl ( wild ) << 8 ) ;
iaddr . s_addr = host ;
code = NameToID ( tt , afs_inet_ntoa_r ( iaddr . s_addr , hoststr ) , & hostid ) ;
if ( code == PRSUCCESS && hostid != 0 ) {
temp = FindByID ( tt , hostid ) ;
if ( temp ) {
code = pr_ReadEntry ( tt , 0 , temp , & tentry ) ;
if ( code != PRSUCCESS ) continue ;
}
else continue ;
}
else continue ;
wlist . prlist_len = 0 ;
wlist . prlist_val = NULL ;
code = GetList ( tt , & tentry , & wlist , 0 ) ;
if ( code ) return code ;
added += wlist . prlist_len ;
for ( i = 0 ;
i < wlist . prlist_len ;
i ++ ) {
if ( ! inCPS ( * alist , wlist . prlist_val [ i ] ) ) if ( ( code = AddToPRList ( alist , & size , wlist . prlist_val [ i ] ) ) ) {
free ( wlist . prlist_val ) ;
return ( code ) ;
}
}
if ( wlist . prlist_val ) free ( wlist . prlist_val ) ;
}
if ( added ) qsort ( alist -> prlist_val , alist -> prlist_len , sizeof ( afs_int32 ) , IDCmp ) ;
return 0 ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void _SCSUFromUnicodeWithOffsets ( UConverterFromUnicodeArgs * pArgs , UErrorCode * pErrorCode ) {
UConverter * cnv ;
SCSUData * scsu ;
const UChar * source , * sourceLimit ;
uint8_t * target ;
int32_t targetCapacity ;
int32_t * offsets ;
UBool isSingleByteMode ;
uint8_t dynamicWindow ;
uint32_t currentOffset ;
uint32_t c , delta ;
int32_t sourceIndex , nextSourceIndex ;
int32_t length ;
uint32_t offset ;
UChar lead , trail ;
int code ;
int8_t window ;
cnv = pArgs -> converter ;
scsu = ( SCSUData * ) cnv -> extraInfo ;
source = pArgs -> source ;
sourceLimit = pArgs -> sourceLimit ;
target = ( uint8_t * ) pArgs -> target ;
targetCapacity = ( int32_t ) ( pArgs -> targetLimit - pArgs -> target ) ;
offsets = pArgs -> offsets ;
isSingleByteMode = scsu -> fromUIsSingleByteMode ;
dynamicWindow = scsu -> fromUDynamicWindow ;
currentOffset = scsu -> fromUDynamicOffsets [ dynamicWindow ] ;
c = cnv -> fromUChar32 ;
sourceIndex = c == 0 ? 0 : - 1 ;
nextSourceIndex = 0 ;
loop : if ( isSingleByteMode ) {
if ( c != 0 && targetCapacity > 0 ) {
goto getTrailSingle ;
}
while ( source < sourceLimit ) {
if ( targetCapacity <= 0 ) {
* pErrorCode = U_BUFFER_OVERFLOW_ERROR ;
break ;
}
c = * source ++ ;
++ nextSourceIndex ;
if ( ( c - 0x20 ) <= 0x5f ) {
* target ++ = ( uint8_t ) c ;
if ( offsets != NULL ) {
* offsets ++ = sourceIndex ;
}
-- targetCapacity ;
}
else if ( c < 0x20 ) {
if ( ( 1UL << c ) & 0x2601 ) {
* target ++ = ( uint8_t ) c ;
if ( offsets != NULL ) {
* offsets ++ = sourceIndex ;
}
-- targetCapacity ;
}
else {
c |= SQ0 << 8 ;
length = 2 ;
goto outputBytes ;
}
}
else if ( ( delta = c - currentOffset ) <= 0x7f ) {
* target ++ = ( uint8_t ) ( delta | 0x80 ) ;
if ( offsets != NULL ) {
* offsets ++ = sourceIndex ;
}
-- targetCapacity ;
}
else if ( U16_IS_SURROGATE ( c ) ) {
if ( U16_IS_SURROGATE_LEAD ( c ) ) {
getTrailSingle : lead = ( UChar ) c ;
if ( source < sourceLimit ) {
trail = * source ;
if ( U16_IS_TRAIL ( trail ) ) {
++ source ;
++ nextSourceIndex ;
c = U16_GET_SUPPLEMENTARY ( c , trail ) ;
}
else {
* pErrorCode = U_ILLEGAL_CHAR_FOUND ;
goto endloop ;
}
}
else {
break ;
}
}
else {
* pErrorCode = U_ILLEGAL_CHAR_FOUND ;
goto endloop ;
}
if ( ( delta = c - currentOffset ) <= 0x7f ) {
* target ++ = ( uint8_t ) ( delta | 0x80 ) ;
if ( offsets != NULL ) {
* offsets ++ = sourceIndex ;
}
-- targetCapacity ;
}
else if ( ( window = getWindow ( scsu -> fromUDynamicOffsets , c ) ) >= 0 ) {
dynamicWindow = window ;
currentOffset = scsu -> fromUDynamicOffsets [ dynamicWindow ] ;
useDynamicWindow ( scsu , dynamicWindow ) ;
c = ( ( uint32_t ) ( SC0 + dynamicWindow ) << 8 ) | ( c - currentOffset ) | 0x80 ;
length = 2 ;
goto outputBytes ;
}
else if ( ( code = getDynamicOffset ( c , & offset ) ) >= 0 ) {
code -= 0x200 ;
dynamicWindow = getNextDynamicWindow ( scsu ) ;
currentOffset = scsu -> fromUDynamicOffsets [ dynamicWindow ] = offset ;
useDynamicWindow ( scsu , dynamicWindow ) ;
c = ( ( uint32_t ) SDX << 24 ) | ( ( uint32_t ) dynamicWindow << 21 ) | ( ( uint32_t ) code << 8 ) | ( c - currentOffset ) | 0x80 ;
length = 4 ;
goto outputBytes ;
}
else {
isSingleByteMode = FALSE ;
* target ++ = ( uint8_t ) SCU ;
if ( offsets != NULL ) {
* offsets ++ = sourceIndex ;
}
-- targetCapacity ;
c = ( ( uint32_t ) lead << 16 ) | trail ;
length = 4 ;
goto outputBytes ;
}
}
else if ( c < 0xa0 ) {
c = ( c & 0x7f ) | ( SQ0 + 1 ) << 8 ;
length = 2 ;
goto outputBytes ;
}
else if ( c == 0xfeff || c >= 0xfff0 ) {
c |= SQU << 16 ;
length = 3 ;
goto outputBytes ;
}
else {
if ( ( window = getWindow ( scsu -> fromUDynamicOffsets , c ) ) >= 0 ) {
if ( source >= sourceLimit || isInOffsetWindowOrDirect ( scsu -> fromUDynamicOffsets [ window ] , * source ) ) {
dynamicWindow = window ;
currentOffset = scsu -> fromUDynamicOffsets [ dynamicWindow ] ;
useDynamicWindow ( scsu , dynamicWindow ) ;
c = ( ( uint32_t ) ( SC0 + dynamicWindow ) << 8 ) | ( c - currentOffset ) | 0x80 ;
length = 2 ;
goto outputBytes ;
}
else {
c = ( ( uint32_t ) ( SQ0 + window ) << 8 ) | ( c - scsu -> fromUDynamicOffsets [ window ] ) | 0x80 ;
length = 2 ;
goto outputBytes ;
}
}
else if ( ( window = getWindow ( staticOffsets , c ) ) >= 0 ) {
c = ( ( uint32_t ) ( SQ0 + window ) << 8 ) | ( c - staticOffsets [ window ] ) ;
length = 2 ;
goto outputBytes ;
}
else if ( ( code = getDynamicOffset ( c , & offset ) ) >= 0 ) {
dynamicWindow = getNextDynamicWindow ( scsu ) ;
currentOffset = scsu -> fromUDynamicOffsets [ dynamicWindow ] = offset ;
useDynamicWindow ( scsu , dynamicWindow ) ;
c = ( ( uint32_t ) ( SD0 + dynamicWindow ) << 16 ) | ( ( uint32_t ) code << 8 ) | ( c - currentOffset ) | 0x80 ;
length = 3 ;
goto outputBytes ;
}
else if ( ( uint32_t ) ( c - 0x3400 ) < ( 0xd800 - 0x3400 ) && ( source >= sourceLimit || ( uint32_t ) ( * source - 0x3400 ) < ( 0xd800 - 0x3400 ) ) ) {
isSingleByteMode = FALSE ;
c |= SCU << 16 ;
length = 3 ;
goto outputBytes ;
}
else {
c |= SQU << 16 ;
length = 3 ;
goto outputBytes ;
}
}
c = 0 ;
sourceIndex = nextSourceIndex ;
}
}
else {
if ( c != 0 && targetCapacity > 0 ) {
goto getTrailUnicode ;
}
while ( source < sourceLimit ) {
if ( targetCapacity <= 0 ) {
* pErrorCode = U_BUFFER_OVERFLOW_ERROR ;
break ;
}
c = * source ++ ;
++ nextSourceIndex ;
if ( ( uint32_t ) ( c - 0x3400 ) < ( 0xd800 - 0x3400 ) ) {
if ( targetCapacity >= 2 ) {
* target ++ = ( uint8_t ) ( c >> 8 ) ;
* target ++ = ( uint8_t ) c ;
if ( offsets != NULL ) {
* offsets ++ = sourceIndex ;
* offsets ++ = sourceIndex ;
}
targetCapacity -= 2 ;
}
else {
length = 2 ;
goto outputBytes ;
}
}
else if ( ( uint32_t ) ( c - 0x3400 ) >= ( 0xf300 - 0x3400 ) ) {
if ( ! ( source < sourceLimit && ( uint32_t ) ( * source - 0x3400 ) < ( 0xd800 - 0x3400 ) ) ) {
if ( ( ( uint32_t ) ( c - 0x30 ) < 10 || ( uint32_t ) ( c - 0x61 ) < 26 || ( uint32_t ) ( c - 0x41 ) < 26 ) ) {
isSingleByteMode = TRUE ;
c |= ( ( uint32_t ) ( UC0 + dynamicWindow ) << 8 ) | c ;
length = 2 ;
goto outputBytes ;
}
else if ( ( window = getWindow ( scsu -> fromUDynamicOffsets , c ) ) >= 0 ) {
isSingleByteMode = TRUE ;
dynamicWindow = window ;
currentOffset = scsu -> fromUDynamicOffsets [ dynamicWindow ] ;
useDynamicWindow ( scsu , dynamicWindow ) ;
c = ( ( uint32_t ) ( UC0 + dynamicWindow ) << 8 ) | ( c - currentOffset ) | 0x80 ;
length = 2 ;
goto outputBytes ;
}
else if ( ( code = getDynamicOffset ( c , & offset ) ) >= 0 ) {
isSingleByteMode = TRUE ;
dynamicWindow = getNextDynamicWindow ( scsu ) ;
currentOffset = scsu -> fromUDynamicOffsets [ dynamicWindow ] = offset ;
useDynamicWindow ( scsu , dynamicWindow ) ;
c = ( ( uint32_t ) ( UD0 + dynamicWindow ) << 16 ) | ( ( uint32_t ) code << 8 ) | ( c - currentOffset ) | 0x80 ;
length = 3 ;
goto outputBytes ;
}
}
length = 2 ;
goto outputBytes ;
}
else if ( c < 0xe000 ) {
if ( U16_IS_SURROGATE_LEAD ( c ) ) {
getTrailUnicode : lead = ( UChar ) c ;
if ( source < sourceLimit ) {
trail = * source ;
if ( U16_IS_TRAIL ( trail ) ) {
++ source ;
++ nextSourceIndex ;
c = U16_GET_SUPPLEMENTARY ( c , trail ) ;
}
else {
* pErrorCode = U_ILLEGAL_CHAR_FOUND ;
goto endloop ;
}
}
else {
break ;
}
}
else {
* pErrorCode = U_ILLEGAL_CHAR_FOUND ;
goto endloop ;
}
if ( ( window = getWindow ( scsu -> fromUDynamicOffsets , c ) ) >= 0 && ! ( source < sourceLimit && ( uint32_t ) ( * source - 0x3400 ) < ( 0xd800 - 0x3400 ) ) ) {
isSingleByteMode = TRUE ;
dynamicWindow = window ;
currentOffset = scsu -> fromUDynamicOffsets [ dynamicWindow ] ;
useDynamicWindow ( scsu , dynamicWindow ) ;
c = ( ( uint32_t ) ( UC0 + dynamicWindow ) << 8 ) | ( c - currentOffset ) | 0x80 ;
length = 2 ;
goto outputBytes ;
}
else if ( source < sourceLimit && lead == * source && ( code = getDynamicOffset ( c , & offset ) ) >= 0 ) {
isSingleByteMode = TRUE ;
code -= 0x200 ;
dynamicWindow = getNextDynamicWindow ( scsu ) ;
currentOffset = scsu -> fromUDynamicOffsets [ dynamicWindow ] = offset ;
useDynamicWindow ( scsu , dynamicWindow ) ;
c = ( ( uint32_t ) UDX << 24 ) | ( ( uint32_t ) dynamicWindow << 21 ) | ( ( uint32_t ) code << 8 ) | ( c - currentOffset ) | 0x80 ;
length = 4 ;
goto outputBytes ;
}
else {
c = ( ( uint32_t ) lead << 16 ) | trail ;
length = 4 ;
goto outputBytes ;
}
}
else {
c |= UQU << 16 ;
length = 3 ;
goto outputBytes ;
}
c = 0 ;
sourceIndex = nextSourceIndex ;
}
}
endloop : scsu -> fromUIsSingleByteMode = isSingleByteMode ;
scsu -> fromUDynamicWindow = dynamicWindow ;
cnv -> fromUChar32 = c ;
pArgs -> source = source ;
pArgs -> target = ( char * ) target ;
pArgs -> offsets = offsets ;
return ;
outputBytes : if ( length <= targetCapacity ) {
if ( offsets == NULL ) {
switch ( length ) {
case 4 : * target ++ = ( uint8_t ) ( c >> 24 ) ;
U_FALLTHROUGH ;
case 3 : * target ++ = ( uint8_t ) ( c >> 16 ) ;
U_FALLTHROUGH ;
case 2 : * target ++ = ( uint8_t ) ( c >> 8 ) ;
U_FALLTHROUGH ;
case 1 : * target ++ = ( uint8_t ) c ;
U_FALLTHROUGH ;
default : break ;
}
}
else {
switch ( length ) {
case 4 : * target ++ = ( uint8_t ) ( c >> 24 ) ;
* offsets ++ = sourceIndex ;
U_FALLTHROUGH ;
case 3 : * target ++ = ( uint8_t ) ( c >> 16 ) ;
* offsets ++ = sourceIndex ;
U_FALLTHROUGH ;
case 2 : * target ++ = ( uint8_t ) ( c >> 8 ) ;
* offsets ++ = sourceIndex ;
U_FALLTHROUGH ;
case 1 : * target ++ = ( uint8_t ) c ;
* offsets ++ = sourceIndex ;
U_FALLTHROUGH ;
default : break ;
}
}
targetCapacity -= length ;
c = 0 ;
sourceIndex = nextSourceIndex ;
goto loop ;
}
else {
uint8_t * p ;
length -= targetCapacity ;
p = ( uint8_t * ) cnv -> charErrorBuffer ;
switch ( length ) {
case 4 : * p ++ = ( uint8_t ) ( c >> 24 ) ;
U_FALLTHROUGH ;
case 3 : * p ++ = ( uint8_t ) ( c >> 16 ) ;
U_FALLTHROUGH ;
case 2 : * p ++ = ( uint8_t ) ( c >> 8 ) ;
U_FALLTHROUGH ;
case 1 : * p = ( uint8_t ) c ;
U_FALLTHROUGH ;
default : break ;
}
cnv -> charErrorBufferLength = ( int8_t ) length ;
c >>= 8 * length ;
switch ( targetCapacity ) {
case 3 : * target ++ = ( uint8_t ) ( c >> 16 ) ;
if ( offsets != NULL ) {
* offsets ++ = sourceIndex ;
}
U_FALLTHROUGH ;
case 2 : * target ++ = ( uint8_t ) ( c >> 8 ) ;
if ( offsets != NULL ) {
* offsets ++ = sourceIndex ;
}
U_FALLTHROUGH ;
case 1 : * target ++ = ( uint8_t ) c ;
if ( offsets != NULL ) {
* offsets ++ = sourceIndex ;
}
U_FALLTHROUGH ;
default : break ;
}
targetCapacity = 0 ;
* pErrorCode = U_BUFFER_OVERFLOW_ERROR ;
c = 0 ;
goto endloop ;
}
}
| 1True
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int parse_CSortSet ( tvbuff_t * tvb , int offset , proto_tree * parent_tree , proto_tree * pad_tree , const char * fmt , ... ) {
guint32 count , i ;
proto_item * item ;
proto_tree * tree ;
const char * txt ;
va_list ap ;
va_start ( ap , fmt ) ;
txt = wmem_strdup_vprintf ( wmem_packet_scope ( ) , fmt , ap ) ;
va_end ( ap ) ;
tree = proto_tree_add_subtree ( parent_tree , tvb , offset , 0 , ett_CSortSet , & item , txt ) ;
count = tvb_get_letohl ( tvb , offset ) ;
proto_tree_add_uint ( tree , hf_mswsp_cscortset_count , tvb , offset , 4 , count ) ;
offset += 4 ;
for ( i = 0 ;
i < count ;
i ++ ) {
offset = parse_padding ( tvb , offset , 4 , tree , "padding_sortArray[%u]" , i ) ;
offset = parse_CSort ( tvb , offset , tree , pad_tree , "sortArray[%u]" , i ) ;
}
proto_item_set_end ( item , tvb , offset ) ;
return offset ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int dsa_param_print ( BIO * bp , const EVP_PKEY * pkey , int indent , ASN1_PCTX * ctx ) {
return do_dsa_print ( bp , pkey -> pkey . dsa , indent , 0 ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int tgv_decode_frame ( AVCodecContext * avctx , void * data , int * got_frame , AVPacket * avpkt ) {
const uint8_t * buf = avpkt -> data ;
int buf_size = avpkt -> size ;
TgvContext * s = avctx -> priv_data ;
const uint8_t * buf_end = buf + buf_size ;
int chunk_type , ret ;
chunk_type = AV_RL32 ( & buf [ 0 ] ) ;
buf += EA_PREAMBLE_SIZE ;
if ( chunk_type == kVGT_TAG ) {
int pal_count , i ;
if ( buf + 12 > buf_end ) {
av_log ( avctx , AV_LOG_WARNING , "truncated header\n" ) ;
return AVERROR_INVALIDDATA ;
}
s -> width = AV_RL16 ( & buf [ 0 ] ) ;
s -> height = AV_RL16 ( & buf [ 2 ] ) ;
if ( s -> avctx -> width != s -> width || s -> avctx -> height != s -> height ) {
avcodec_set_dimensions ( s -> avctx , s -> width , s -> height ) ;
cond_release_buffer ( & s -> frame ) ;
cond_release_buffer ( & s -> last_frame ) ;
}
pal_count = AV_RL16 ( & buf [ 6 ] ) ;
buf += 12 ;
for ( i = 0 ;
i < pal_count && i < AVPALETTE_COUNT && buf + 2 < buf_end ;
i ++ ) {
s -> palette [ i ] = AV_RB24 ( buf ) ;
buf += 3 ;
}
}
if ( ( ret = av_image_check_size ( s -> width , s -> height , 0 , avctx ) ) < 0 ) return ret ;
FFSWAP ( AVFrame , s -> frame , s -> last_frame ) ;
if ( ! s -> frame . data [ 0 ] ) {
s -> frame . reference = 1 ;
s -> frame . buffer_hints = FF_BUFFER_HINTS_VALID ;
s -> frame . linesize [ 0 ] = s -> width ;
s -> frame . data [ 0 ] = av_malloc ( s -> width * s -> height ) ;
if ( ! s -> frame . data [ 0 ] ) return AVERROR ( ENOMEM ) ;
s -> frame . data [ 1 ] = av_malloc ( AVPALETTE_SIZE ) ;
if ( ! s -> frame . data [ 1 ] ) {
av_freep ( & s -> frame . data [ 0 ] ) ;
return AVERROR ( ENOMEM ) ;
}
}
memcpy ( s -> frame . data [ 1 ] , s -> palette , AVPALETTE_SIZE ) ;
if ( chunk_type == kVGT_TAG ) {
s -> frame . key_frame = 1 ;
s -> frame . pict_type = AV_PICTURE_TYPE_I ;
if ( unpack ( buf , buf_end , s -> frame . data [ 0 ] , s -> avctx -> width , s -> avctx -> height ) < 0 ) {
av_log ( avctx , AV_LOG_WARNING , "truncated intra frame\n" ) ;
return AVERROR_INVALIDDATA ;
}
}
else {
if ( ! s -> last_frame . data [ 0 ] ) {
av_log ( avctx , AV_LOG_WARNING , "inter frame without corresponding intra frame\n" ) ;
return buf_size ;
}
s -> frame . key_frame = 0 ;
s -> frame . pict_type = AV_PICTURE_TYPE_P ;
if ( tgv_decode_inter ( s , buf , buf_end ) < 0 ) {
av_log ( avctx , AV_LOG_WARNING , "truncated inter frame\n" ) ;
return AVERROR_INVALIDDATA ;
}
}
* got_frame = 1 ;
* ( AVFrame * ) data = s -> frame ;
return buf_size ;
}
| 1True
|
Categorize the following code snippet as vulnerable or not. True or False
|
INDIC_TABLE_ELEMENT_TYPE hb_indic_get_categories ( hb_codepoint_t u ) {
switch ( u >> 12 ) {
case 0x0u : if ( hb_in_range ( u , 0x0028u , 0x0040u ) ) return indic_table [ u - 0x0028u + indic_offset_0x0028u ] ;
if ( hb_in_range ( u , 0x00D0u , 0x00D8u ) ) return indic_table [ u - 0x00D0u + indic_offset_0x00d0u ] ;
if ( hb_in_range ( u , 0x0900u , 0x0DF8u ) ) return indic_table [ u - 0x0900u + indic_offset_0x0900u ] ;
if ( unlikely ( u == 0x00A0u ) ) return _ ( CP , x ) ;
break ;
case 0x1u : if ( hb_in_range ( u , 0x1000u , 0x10A0u ) ) return indic_table [ u - 0x1000u + indic_offset_0x1000u ] ;
if ( hb_in_range ( u , 0x1700u , 0x17F0u ) ) return indic_table [ u - 0x1700u + indic_offset_0x1700u ] ;
if ( hb_in_range ( u , 0x1900u , 0x1AA0u ) ) return indic_table [ u - 0x1900u + indic_offset_0x1900u ] ;
if ( hb_in_range ( u , 0x1B00u , 0x1C50u ) ) return indic_table [ u - 0x1B00u + indic_offset_0x1b00u ] ;
if ( hb_in_range ( u , 0x1CD0u , 0x1CF8u ) ) return indic_table [ u - 0x1CD0u + indic_offset_0x1cd0u ] ;
break ;
case 0x2u : if ( hb_in_range ( u , 0x2008u , 0x2018u ) ) return indic_table [ u - 0x2008u + indic_offset_0x2008u ] ;
if ( unlikely ( u == 0x25CCu ) ) return _ ( CP , x ) ;
break ;
case 0xAu : if ( hb_in_range ( u , 0xA800u , 0xAAF8u ) ) return indic_table [ u - 0xA800u + indic_offset_0xa800u ] ;
if ( hb_in_range ( u , 0xABC0u , 0xAC00u ) ) return indic_table [ u - 0xABC0u + indic_offset_0xabc0u ] ;
break ;
case 0x10u : if ( hb_in_range ( u , 0x10A00u , 0x10A48u ) ) return indic_table [ u - 0x10A00u + indic_offset_0x10a00u ] ;
break ;
case 0x11u : if ( hb_in_range ( u , 0x11000u , 0x110C0u ) ) return indic_table [ u - 0x11000u + indic_offset_0x11000u ] ;
if ( hb_in_range ( u , 0x11100u , 0x11238u ) ) return indic_table [ u - 0x11100u + indic_offset_0x11100u ] ;
if ( hb_in_range ( u , 0x112B0u , 0x11378u ) ) return indic_table [ u - 0x112B0u + indic_offset_0x112b0u ] ;
if ( hb_in_range ( u , 0x11480u , 0x114E0u ) ) return indic_table [ u - 0x11480u + indic_offset_0x11480u ] ;
if ( hb_in_range ( u , 0x11580u , 0x115C8u ) ) return indic_table [ u - 0x11580u + indic_offset_0x11580u ] ;
if ( hb_in_range ( u , 0x11600u , 0x116D0u ) ) return indic_table [ u - 0x11600u + indic_offset_0x11600u ] ;
break ;
default : break ;
}
return _ ( x , x ) ;
}
| 1True
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void quit ( struct parse * pcmd , FILE * fp ) {
if ( havehost ) closesocket ( sockfd ) ;
exit ( 0 ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static gboolean async_job_start ( NautilusDirectory * directory , const char * job ) {
# ifdef DEBUG_ASYNC_JOBS char * key ;
# endif # ifdef DEBUG_START_STOP g_message ( "starting %s in %p" , job , directory -> details -> location ) ;
# endif g_assert ( async_job_count >= 0 ) ;
g_assert ( async_job_count <= MAX_ASYNC_JOBS ) ;
if ( async_job_count >= MAX_ASYNC_JOBS ) {
if ( waiting_directories == NULL ) {
waiting_directories = g_hash_table_new ( NULL , NULL ) ;
}
g_hash_table_insert ( waiting_directories , directory , directory ) ;
return FALSE ;
}
# ifdef DEBUG_ASYNC_JOBS {
char * uri ;
if ( async_jobs == NULL ) {
async_jobs = g_hash_table_new ( g_str_hash , g_str_equal ) ;
}
uri = nautilus_directory_get_uri ( directory ) ;
key = g_strconcat ( uri , ": " , job , NULL ) ;
if ( g_hash_table_lookup ( async_jobs , key ) != NULL ) {
g_warning ( "same job twice: %s in %s" , job , uri ) ;
}
g_free ( uri ) ;
g_hash_table_insert ( async_jobs , key , directory ) ;
}
# endif async_job_count += 1 ;
return TRUE ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void pred_spatial_direct_motion ( H264Context * const h , int * mb_type ) {
int b8_stride = 2 ;
int b4_stride = h -> b_stride ;
int mb_xy = h -> mb_xy , mb_y = h -> mb_y ;
int mb_type_col [ 2 ] ;
const int16_t ( * l1mv0 ) [ 2 ] , ( * l1mv1 ) [ 2 ] ;
const int8_t * l1ref0 , * l1ref1 ;
const int is_b8x8 = IS_8X8 ( * mb_type ) ;
unsigned int sub_mb_type = MB_TYPE_L0L1 ;
int i8 , i4 ;
int ref [ 2 ] ;
int mv [ 2 ] ;
int list ;
assert ( h -> ref_list [ 1 ] [ 0 ] . reference & 3 ) ;
await_reference_mb_row ( h , & h -> ref_list [ 1 ] [ 0 ] , h -> mb_y + ! ! IS_INTERLACED ( * mb_type ) ) ;
# define MB_TYPE_16x16_OR_INTRA ( MB_TYPE_16x16 | MB_TYPE_INTRA4x4 | MB_TYPE_INTRA16x16 | MB_TYPE_INTRA_PCM ) for ( list = 0 ;
list < 2 ;
list ++ ) {
int left_ref = h -> ref_cache [ list ] [ scan8 [ 0 ] - 1 ] ;
int top_ref = h -> ref_cache [ list ] [ scan8 [ 0 ] - 8 ] ;
int refc = h -> ref_cache [ list ] [ scan8 [ 0 ] - 8 + 4 ] ;
const int16_t * C = h -> mv_cache [ list ] [ scan8 [ 0 ] - 8 + 4 ] ;
if ( refc == PART_NOT_AVAILABLE ) {
refc = h -> ref_cache [ list ] [ scan8 [ 0 ] - 8 - 1 ] ;
C = h -> mv_cache [ list ] [ scan8 [ 0 ] - 8 - 1 ] ;
}
ref [ list ] = FFMIN3 ( ( unsigned ) left_ref , ( unsigned ) top_ref , ( unsigned ) refc ) ;
if ( ref [ list ] >= 0 ) {
const int16_t * const A = h -> mv_cache [ list ] [ scan8 [ 0 ] - 1 ] ;
const int16_t * const B = h -> mv_cache [ list ] [ scan8 [ 0 ] - 8 ] ;
int match_count = ( left_ref == ref [ list ] ) + ( top_ref == ref [ list ] ) + ( refc == ref [ list ] ) ;
if ( match_count > 1 ) {
mv [ list ] = pack16to32 ( mid_pred ( A [ 0 ] , B [ 0 ] , C [ 0 ] ) , mid_pred ( A [ 1 ] , B [ 1 ] , C [ 1 ] ) ) ;
}
else {
assert ( match_count == 1 ) ;
if ( left_ref == ref [ list ] ) {
mv [ list ] = AV_RN32A ( A ) ;
}
else if ( top_ref == ref [ list ] ) {
mv [ list ] = AV_RN32A ( B ) ;
}
else {
mv [ list ] = AV_RN32A ( C ) ;
}
}
}
else {
int mask = ~ ( MB_TYPE_L0 << ( 2 * list ) ) ;
mv [ list ] = 0 ;
ref [ list ] = - 1 ;
if ( ! is_b8x8 ) * mb_type &= mask ;
sub_mb_type &= mask ;
}
}
if ( ref [ 0 ] < 0 && ref [ 1 ] < 0 ) {
ref [ 0 ] = ref [ 1 ] = 0 ;
if ( ! is_b8x8 ) * mb_type |= MB_TYPE_L0L1 ;
sub_mb_type |= MB_TYPE_L0L1 ;
}
if ( ! ( is_b8x8 | mv [ 0 ] | mv [ 1 ] ) ) {
fill_rectangle ( & h -> ref_cache [ 0 ] [ scan8 [ 0 ] ] , 4 , 4 , 8 , ( uint8_t ) ref [ 0 ] , 1 ) ;
fill_rectangle ( & h -> ref_cache [ 1 ] [ scan8 [ 0 ] ] , 4 , 4 , 8 , ( uint8_t ) ref [ 1 ] , 1 ) ;
fill_rectangle ( & h -> mv_cache [ 0 ] [ scan8 [ 0 ] ] , 4 , 4 , 8 , 0 , 4 ) ;
fill_rectangle ( & h -> mv_cache [ 1 ] [ scan8 [ 0 ] ] , 4 , 4 , 8 , 0 , 4 ) ;
* mb_type = ( * mb_type & ~ ( MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_P1L0 | MB_TYPE_P1L1 ) ) | MB_TYPE_16x16 | MB_TYPE_DIRECT2 ;
return ;
}
if ( IS_INTERLACED ( h -> ref_list [ 1 ] [ 0 ] . mb_type [ mb_xy ] ) ) {
if ( ! IS_INTERLACED ( * mb_type ) ) {
mb_y = ( h -> mb_y & ~ 1 ) + h -> col_parity ;
mb_xy = h -> mb_x + ( ( h -> mb_y & ~ 1 ) + h -> col_parity ) * h -> mb_stride ;
b8_stride = 0 ;
}
else {
mb_y += h -> col_fieldoff ;
mb_xy += h -> mb_stride * h -> col_fieldoff ;
}
goto single_col ;
}
else {
if ( IS_INTERLACED ( * mb_type ) ) {
mb_y = h -> mb_y & ~ 1 ;
mb_xy = h -> mb_x + ( h -> mb_y & ~ 1 ) * h -> mb_stride ;
mb_type_col [ 0 ] = h -> ref_list [ 1 ] [ 0 ] . mb_type [ mb_xy ] ;
mb_type_col [ 1 ] = h -> ref_list [ 1 ] [ 0 ] . mb_type [ mb_xy + h -> mb_stride ] ;
b8_stride = 2 + 4 * h -> mb_stride ;
b4_stride *= 6 ;
if ( IS_INTERLACED ( mb_type_col [ 0 ] ) != IS_INTERLACED ( mb_type_col [ 1 ] ) ) {
mb_type_col [ 0 ] &= ~ MB_TYPE_INTERLACED ;
mb_type_col [ 1 ] &= ~ MB_TYPE_INTERLACED ;
}
sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2 ;
if ( ( mb_type_col [ 0 ] & MB_TYPE_16x16_OR_INTRA ) && ( mb_type_col [ 1 ] & MB_TYPE_16x16_OR_INTRA ) && ! is_b8x8 ) {
* mb_type |= MB_TYPE_16x8 | MB_TYPE_DIRECT2 ;
}
else {
* mb_type |= MB_TYPE_8x8 ;
}
}
else {
single_col : mb_type_col [ 0 ] = mb_type_col [ 1 ] = h -> ref_list [ 1 ] [ 0 ] . mb_type [ mb_xy ] ;
sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2 ;
if ( ! is_b8x8 && ( mb_type_col [ 0 ] & MB_TYPE_16x16_OR_INTRA ) ) {
* mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2 ;
}
else if ( ! is_b8x8 && ( mb_type_col [ 0 ] & ( MB_TYPE_16x8 | MB_TYPE_8x16 ) ) ) {
* mb_type |= MB_TYPE_DIRECT2 | ( mb_type_col [ 0 ] & ( MB_TYPE_16x8 | MB_TYPE_8x16 ) ) ;
}
else {
if ( ! h -> sps . direct_8x8_inference_flag ) {
sub_mb_type += ( MB_TYPE_8x8 - MB_TYPE_16x16 ) ;
}
* mb_type |= MB_TYPE_8x8 ;
}
}
}
await_reference_mb_row ( h , & h -> ref_list [ 1 ] [ 0 ] , mb_y ) ;
l1mv0 = & h -> ref_list [ 1 ] [ 0 ] . motion_val [ 0 ] [ h -> mb2b_xy [ mb_xy ] ] ;
l1mv1 = & h -> ref_list [ 1 ] [ 0 ] . motion_val [ 1 ] [ h -> mb2b_xy [ mb_xy ] ] ;
l1ref0 = & h -> ref_list [ 1 ] [ 0 ] . ref_index [ 0 ] [ 4 * mb_xy ] ;
l1ref1 = & h -> ref_list [ 1 ] [ 0 ] . ref_index [ 1 ] [ 4 * mb_xy ] ;
if ( ! b8_stride ) {
if ( h -> mb_y & 1 ) {
l1ref0 += 2 ;
l1ref1 += 2 ;
l1mv0 += 2 * b4_stride ;
l1mv1 += 2 * b4_stride ;
}
}
if ( IS_INTERLACED ( * mb_type ) != IS_INTERLACED ( mb_type_col [ 0 ] ) ) {
int n = 0 ;
for ( i8 = 0 ;
i8 < 4 ;
i8 ++ ) {
int x8 = i8 & 1 ;
int y8 = i8 >> 1 ;
int xy8 = x8 + y8 * b8_stride ;
int xy4 = 3 * x8 + y8 * b4_stride ;
int a , b ;
if ( is_b8x8 && ! IS_DIRECT ( h -> sub_mb_type [ i8 ] ) ) continue ;
h -> sub_mb_type [ i8 ] = sub_mb_type ;
fill_rectangle ( & h -> ref_cache [ 0 ] [ scan8 [ i8 * 4 ] ] , 2 , 2 , 8 , ( uint8_t ) ref [ 0 ] , 1 ) ;
fill_rectangle ( & h -> ref_cache [ 1 ] [ scan8 [ i8 * 4 ] ] , 2 , 2 , 8 , ( uint8_t ) ref [ 1 ] , 1 ) ;
if ( ! IS_INTRA ( mb_type_col [ y8 ] ) && ! h -> ref_list [ 1 ] [ 0 ] . long_ref && ( ( l1ref0 [ xy8 ] == 0 && FFABS ( l1mv0 [ xy4 ] [ 0 ] ) <= 1 && FFABS ( l1mv0 [ xy4 ] [ 1 ] ) <= 1 ) || ( l1ref0 [ xy8 ] < 0 && l1ref1 [ xy8 ] == 0 && FFABS ( l1mv1 [ xy4 ] [ 0 ] ) <= 1 && FFABS ( l1mv1 [ xy4 ] [ 1 ] ) <= 1 ) ) ) {
a = b = 0 ;
if ( ref [ 0 ] > 0 ) a = mv [ 0 ] ;
if ( ref [ 1 ] > 0 ) b = mv [ 1 ] ;
n ++ ;
}
else {
a = mv [ 0 ] ;
b = mv [ 1 ] ;
}
fill_rectangle ( & h -> mv_cache [ 0 ] [ scan8 [ i8 * 4 ] ] , 2 , 2 , 8 , a , 4 ) ;
fill_rectangle ( & h -> mv_cache [ 1 ] [ scan8 [ i8 * 4 ] ] , 2 , 2 , 8 , b , 4 ) ;
}
if ( ! is_b8x8 && ! ( n & 3 ) ) * mb_type = ( * mb_type & ~ ( MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_P1L0 | MB_TYPE_P1L1 ) ) | MB_TYPE_16x16 | MB_TYPE_DIRECT2 ;
}
else if ( IS_16X16 ( * mb_type ) ) {
int a , b ;
fill_rectangle ( & h -> ref_cache [ 0 ] [ scan8 [ 0 ] ] , 4 , 4 , 8 , ( uint8_t ) ref [ 0 ] , 1 ) ;
fill_rectangle ( & h -> ref_cache [ 1 ] [ scan8 [ 0 ] ] , 4 , 4 , 8 , ( uint8_t ) ref [ 1 ] , 1 ) ;
if ( ! IS_INTRA ( mb_type_col [ 0 ] ) && ! h -> ref_list [ 1 ] [ 0 ] . long_ref && ( ( l1ref0 [ 0 ] == 0 && FFABS ( l1mv0 [ 0 ] [ 0 ] ) <= 1 && FFABS ( l1mv0 [ 0 ] [ 1 ] ) <= 1 ) || ( l1ref0 [ 0 ] < 0 && l1ref1 [ 0 ] == 0 && FFABS ( l1mv1 [ 0 ] [ 0 ] ) <= 1 && FFABS ( l1mv1 [ 0 ] [ 1 ] ) <= 1 && h -> x264_build > 33U ) ) ) {
a = b = 0 ;
if ( ref [ 0 ] > 0 ) a = mv [ 0 ] ;
if ( ref [ 1 ] > 0 ) b = mv [ 1 ] ;
}
else {
a = mv [ 0 ] ;
b = mv [ 1 ] ;
}
fill_rectangle ( & h -> mv_cache [ 0 ] [ scan8 [ 0 ] ] , 4 , 4 , 8 , a , 4 ) ;
fill_rectangle ( & h -> mv_cache [ 1 ] [ scan8 [ 0 ] ] , 4 , 4 , 8 , b , 4 ) ;
}
else {
int n = 0 ;
for ( i8 = 0 ;
i8 < 4 ;
i8 ++ ) {
const int x8 = i8 & 1 ;
const int y8 = i8 >> 1 ;
if ( is_b8x8 && ! IS_DIRECT ( h -> sub_mb_type [ i8 ] ) ) continue ;
h -> sub_mb_type [ i8 ] = sub_mb_type ;
fill_rectangle ( & h -> mv_cache [ 0 ] [ scan8 [ i8 * 4 ] ] , 2 , 2 , 8 , mv [ 0 ] , 4 ) ;
fill_rectangle ( & h -> mv_cache [ 1 ] [ scan8 [ i8 * 4 ] ] , 2 , 2 , 8 , mv [ 1 ] , 4 ) ;
fill_rectangle ( & h -> ref_cache [ 0 ] [ scan8 [ i8 * 4 ] ] , 2 , 2 , 8 , ( uint8_t ) ref [ 0 ] , 1 ) ;
fill_rectangle ( & h -> ref_cache [ 1 ] [ scan8 [ i8 * 4 ] ] , 2 , 2 , 8 , ( uint8_t ) ref [ 1 ] , 1 ) ;
assert ( b8_stride == 2 ) ;
if ( ! IS_INTRA ( mb_type_col [ 0 ] ) && ! h -> ref_list [ 1 ] [ 0 ] . long_ref && ( l1ref0 [ i8 ] == 0 || ( l1ref0 [ i8 ] < 0 && l1ref1 [ i8 ] == 0 && h -> x264_build > 33U ) ) ) {
const int16_t ( * l1mv ) [ 2 ] = l1ref0 [ i8 ] == 0 ? l1mv0 : l1mv1 ;
if ( IS_SUB_8X8 ( sub_mb_type ) ) {
const int16_t * mv_col = l1mv [ x8 * 3 + y8 * 3 * b4_stride ] ;
if ( FFABS ( mv_col [ 0 ] ) <= 1 && FFABS ( mv_col [ 1 ] ) <= 1 ) {
if ( ref [ 0 ] == 0 ) fill_rectangle ( & h -> mv_cache [ 0 ] [ scan8 [ i8 * 4 ] ] , 2 , 2 , 8 , 0 , 4 ) ;
if ( ref [ 1 ] == 0 ) fill_rectangle ( & h -> mv_cache [ 1 ] [ scan8 [ i8 * 4 ] ] , 2 , 2 , 8 , 0 , 4 ) ;
n += 4 ;
}
}
else {
int m = 0 ;
for ( i4 = 0 ;
i4 < 4 ;
i4 ++ ) {
const int16_t * mv_col = l1mv [ x8 * 2 + ( i4 & 1 ) + ( y8 * 2 + ( i4 >> 1 ) ) * b4_stride ] ;
if ( FFABS ( mv_col [ 0 ] ) <= 1 && FFABS ( mv_col [ 1 ] ) <= 1 ) {
if ( ref [ 0 ] == 0 ) AV_ZERO32 ( h -> mv_cache [ 0 ] [ scan8 [ i8 * 4 + i4 ] ] ) ;
if ( ref [ 1 ] == 0 ) AV_ZERO32 ( h -> mv_cache [ 1 ] [ scan8 [ i8 * 4 + i4 ] ] ) ;
m ++ ;
}
}
if ( ! ( m & 3 ) ) h -> sub_mb_type [ i8 ] += MB_TYPE_16x16 - MB_TYPE_8x8 ;
n += m ;
}
}
}
if ( ! is_b8x8 && ! ( n & 15 ) ) * mb_type = ( * mb_type & ~ ( MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_P1L0 | MB_TYPE_P1L1 ) ) | MB_TYPE_16x16 | MB_TYPE_DIRECT2 ;
}
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static uint16_t * encodeRunShort ( uint16_t * buffer , uint16_t * bufLimit , uint16_t value , int32_t length , UErrorCode * status ) {
int32_t num = 0 ;
if ( length < 4 ) {
int j = 0 ;
for ( ;
j < length ;
++ j ) {
if ( value == ( int32_t ) ESCAPE ) {
APPEND ( buffer , bufLimit , ESCAPE , num , status ) ;
}
APPEND ( buffer , bufLimit , value , num , status ) ;
}
}
else {
if ( length == ( int32_t ) ESCAPE ) {
if ( value == ( int32_t ) ESCAPE ) {
APPEND ( buffer , bufLimit , ESCAPE , num , status ) ;
}
APPEND ( buffer , bufLimit , value , num , status ) ;
-- length ;
}
APPEND ( buffer , bufLimit , ESCAPE , num , status ) ;
APPEND ( buffer , bufLimit , ( uint16_t ) length , num , status ) ;
APPEND ( buffer , bufLimit , ( uint16_t ) value , num , status ) ;
}
return buffer ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
TSAction TSNetConnectTransparent ( TSCont contp , sockaddr const * client_addr , sockaddr const * server_addr ) {
sdk_assert ( sdk_sanity_check_continuation ( contp ) == TS_SUCCESS ) ;
sdk_assert ( ats_is_ip ( server_addr ) ) ;
sdk_assert ( ats_ip_are_compatible ( client_addr , server_addr ) ) ;
NetVCOptions opt ;
opt . addr_binding = NetVCOptions : : FOREIGN_ADDR ;
opt . local_ip . assign ( client_addr ) ;
opt . local_port = ats_ip_port_host_order ( client_addr ) ;
FORCE_PLUGIN_SCOPED_MUTEX ( contp ) ;
return reinterpret_cast < TSAction > ( netProcessor . connect_re ( reinterpret_cast < INKContInternal * > ( contp ) , server_addr , & opt ) ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static inline PixelTrait GetPixelBlackTraits ( const Image * restrict image ) {
return ( image -> channel_map [ BlackPixelChannel ] . traits ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void report_preparing_link_progress ( CopyMoveJob * link_job , int total , int left ) {
CommonJob * job ;
job = ( CommonJob * ) link_job ;
nautilus_progress_info_take_status ( job -> progress , f ( _ ( "Creating links in “%B”" ) , link_job -> destination ) ) ;
nautilus_progress_info_take_details ( job -> progress , f ( ngettext ( "Making link to %'d file" , "Making links to %'d files" , left ) , left ) ) ;
nautilus_progress_info_set_progress ( job -> progress , left , total ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
const char * TSUrlUserGet ( TSMBuffer bufp , TSMLoc obj , int * length ) {
return URLPartGet ( bufp , obj , length , & URL : : user_get ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int pxa2xx_cppmnc_read ( CPUARMState * env , const ARMCPRegInfo * ri , uint64_t * value ) {
PXA2xxState * s = ( PXA2xxState * ) ri -> opaque ;
* value = s -> pmnc ;
return 0 ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int ipvideo_decode_block_opcode_0xD ( IpvideoContext * s , AVFrame * frame ) {
int y ;
unsigned char P [ 2 ] ;
for ( y = 0 ;
y < 8 ;
y ++ ) {
if ( ! ( y & 3 ) ) {
P [ 0 ] = bytestream2_get_byte ( & s -> stream_ptr ) ;
P [ 1 ] = bytestream2_get_byte ( & s -> stream_ptr ) ;
}
memset ( s -> pixel_ptr , P [ 0 ] , 4 ) ;
memset ( s -> pixel_ptr + 4 , P [ 1 ] , 4 ) ;
s -> pixel_ptr += s -> stride ;
}
return 0 ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static cmsBool Type_MPE_Write ( struct _cms_typehandler_struct * self , cmsIOHANDLER * io , void * Ptr , cmsUInt32Number nItems ) {
cmsUInt32Number i , BaseOffset , DirectoryPos , CurrentPos ;
int inputChan , outputChan ;
cmsUInt32Number ElemCount ;
cmsUInt32Number * ElementOffsets = NULL , * ElementSizes = NULL , Before ;
cmsStageSignature ElementSig ;
cmsPipeline * Lut = ( cmsPipeline * ) Ptr ;
cmsStage * Elem = Lut -> Elements ;
cmsTagTypeHandler * TypeHandler ;
_cmsTagTypePluginChunkType * MPETypePluginChunk = ( _cmsTagTypePluginChunkType * ) _cmsContextGetClientChunk ( self -> ContextID , MPEPlugin ) ;
BaseOffset = io -> Tell ( io ) - sizeof ( _cmsTagBase ) ;
inputChan = cmsPipelineInputChannels ( Lut ) ;
outputChan = cmsPipelineOutputChannels ( Lut ) ;
ElemCount = cmsPipelineStageCount ( Lut ) ;
ElementOffsets = ( cmsUInt32Number * ) _cmsCalloc ( self -> ContextID , ElemCount , sizeof ( cmsUInt32Number ) ) ;
if ( ElementOffsets == NULL ) goto Error ;
ElementSizes = ( cmsUInt32Number * ) _cmsCalloc ( self -> ContextID , ElemCount , sizeof ( cmsUInt32Number ) ) ;
if ( ElementSizes == NULL ) goto Error ;
if ( ! _cmsWriteUInt16Number ( io , ( cmsUInt16Number ) inputChan ) ) goto Error ;
if ( ! _cmsWriteUInt16Number ( io , ( cmsUInt16Number ) outputChan ) ) goto Error ;
if ( ! _cmsWriteUInt32Number ( io , ( cmsUInt16Number ) ElemCount ) ) goto Error ;
DirectoryPos = io -> Tell ( io ) ;
for ( i = 0 ;
i < ElemCount ;
i ++ ) {
if ( ! _cmsWriteUInt32Number ( io , 0 ) ) goto Error ;
if ( ! _cmsWriteUInt32Number ( io , 0 ) ) goto Error ;
}
for ( i = 0 ;
i < ElemCount ;
i ++ ) {
ElementOffsets [ i ] = io -> Tell ( io ) - BaseOffset ;
ElementSig = Elem -> Type ;
TypeHandler = GetHandler ( ( cmsTagTypeSignature ) ElementSig , MPETypePluginChunk -> TagTypes , SupportedMPEtypes ) ;
if ( TypeHandler == NULL ) {
char String [ 5 ] ;
_cmsTagSignature2String ( String , ( cmsTagSignature ) ElementSig ) ;
cmsSignalError ( self -> ContextID , cmsERROR_UNKNOWN_EXTENSION , "Found unknown MPE type '%s'" , String ) ;
goto Error ;
}
if ( ! _cmsWriteUInt32Number ( io , ElementSig ) ) goto Error ;
if ( ! _cmsWriteUInt32Number ( io , 0 ) ) goto Error ;
Before = io -> Tell ( io ) ;
if ( ! TypeHandler -> WritePtr ( self , io , Elem , 1 ) ) goto Error ;
if ( ! _cmsWriteAlignment ( io ) ) goto Error ;
ElementSizes [ i ] = io -> Tell ( io ) - Before ;
Elem = Elem -> Next ;
}
CurrentPos = io -> Tell ( io ) ;
if ( ! io -> Seek ( io , DirectoryPos ) ) goto Error ;
for ( i = 0 ;
i < ElemCount ;
i ++ ) {
if ( ! _cmsWriteUInt32Number ( io , ElementOffsets [ i ] ) ) goto Error ;
if ( ! _cmsWriteUInt32Number ( io , ElementSizes [ i ] ) ) goto Error ;
}
if ( ! io -> Seek ( io , CurrentPos ) ) goto Error ;
if ( ElementOffsets != NULL ) _cmsFree ( self -> ContextID , ElementOffsets ) ;
if ( ElementSizes != NULL ) _cmsFree ( self -> ContextID , ElementSizes ) ;
return TRUE ;
Error : if ( ElementOffsets != NULL ) _cmsFree ( self -> ContextID , ElementOffsets ) ;
if ( ElementSizes != NULL ) _cmsFree ( self -> ContextID , ElementSizes ) ;
return FALSE ;
cmsUNUSED_PARAMETER ( nItems ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void vp3_draw_horiz_band ( Vp3DecodeContext * s , int y ) {
int h , cy , i ;
int offset [ AV_NUM_DATA_POINTERS ] ;
if ( HAVE_THREADS && s -> avctx -> active_thread_type & FF_THREAD_FRAME ) {
int y_flipped = s -> flipped_image ? s -> avctx -> height - y : y ;
ff_thread_report_progress ( & s -> current_frame , y_flipped == s -> avctx -> height ? INT_MAX : y_flipped - 1 , 0 ) ;
}
if ( s -> avctx -> draw_horiz_band == NULL ) return ;
h = y - s -> last_slice_end ;
s -> last_slice_end = y ;
y -= h ;
if ( ! s -> flipped_image ) {
y = s -> avctx -> height - y - h ;
}
cy = y >> s -> chroma_y_shift ;
offset [ 0 ] = s -> current_frame . f -> linesize [ 0 ] * y ;
offset [ 1 ] = s -> current_frame . f -> linesize [ 1 ] * cy ;
offset [ 2 ] = s -> current_frame . f -> linesize [ 2 ] * cy ;
for ( i = 3 ;
i < AV_NUM_DATA_POINTERS ;
i ++ ) offset [ i ] = 0 ;
emms_c ( ) ;
s -> avctx -> draw_horiz_band ( s -> avctx , s -> current_frame . f , offset , y , 3 , h ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int dissect_h245_T_aal5 ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_sequence ( tvb , offset , actx , tree , hf_index , ett_h245_T_aal5 , T_aal5_sequence ) ;
return offset ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
enum mbfl_no_encoding mbfl_encoding_detector_judge ( mbfl_encoding_detector * identd ) {
const mbfl_encoding * encoding = mbfl_encoding_detector_judge2 ( identd ) ;
return ! encoding ? mbfl_no_encoding_invalid : encoding -> no_encoding ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int pxa2xx_i2c_rx ( I2CSlave * i2c ) {
PXA2xxI2CSlaveState * slave = FROM_I2C_SLAVE ( PXA2xxI2CSlaveState , i2c ) ;
PXA2xxI2CState * s = slave -> host ;
if ( ( s -> control & ( 1 << 14 ) ) || ! ( s -> control & ( 1 << 6 ) ) ) return 0 ;
if ( s -> status & ( 1 << 0 ) ) {
s -> status |= 1 << 6 ;
}
pxa2xx_i2c_update ( s ) ;
return s -> data ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void xan_unpack ( unsigned char * dest , int dest_len , const unsigned char * src , int src_len ) {
unsigned char opcode ;
int size ;
unsigned char * dest_org = dest ;
unsigned char * dest_end = dest + dest_len ;
GetByteContext ctx ;
bytestream2_init ( & ctx , src , src_len ) ;
while ( dest < dest_end && bytestream2_get_bytes_left ( & ctx ) ) {
opcode = bytestream2_get_byte ( & ctx ) ;
if ( opcode < 0xe0 ) {
int size2 , back ;
if ( ( opcode & 0x80 ) == 0 ) {
size = opcode & 3 ;
back = ( ( opcode & 0x60 ) << 3 ) + bytestream2_get_byte ( & ctx ) + 1 ;
size2 = ( ( opcode & 0x1c ) >> 2 ) + 3 ;
}
else if ( ( opcode & 0x40 ) == 0 ) {
size = bytestream2_peek_byte ( & ctx ) >> 6 ;
back = ( bytestream2_get_be16 ( & ctx ) & 0x3fff ) + 1 ;
size2 = ( opcode & 0x3f ) + 4 ;
}
else {
size = opcode & 3 ;
back = ( ( opcode & 0x10 ) << 12 ) + bytestream2_get_be16 ( & ctx ) + 1 ;
size2 = ( ( opcode & 0x0c ) << 6 ) + bytestream2_get_byte ( & ctx ) + 5 ;
}
if ( dest_end - dest < size + size2 || dest + size - dest_org < back || bytestream2_get_bytes_left ( & ctx ) < size ) return ;
bytestream2_get_buffer ( & ctx , dest , size ) ;
dest += size ;
av_memcpy_backptr ( dest , back , size2 ) ;
dest += size2 ;
}
else {
int finish = opcode >= 0xfc ;
size = finish ? opcode & 3 : ( ( opcode & 0x1f ) << 2 ) + 4 ;
if ( dest_end - dest < size || bytestream2_get_bytes_left ( & ctx ) < size ) return ;
bytestream2_get_buffer ( & ctx , dest , size ) ;
dest += size ;
if ( finish ) return ;
}
}
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static int dissect_pbb_tlvblock ( tvbuff_t * tvb , packet_info * pinfo , proto_tree * tree , guint offset , guint maxoffset , gint8 addrCount , guint tlvCat ) {
guint16 tlvblockLength ;
guint tlvblockEnd ;
proto_tree * tlvblock_tree = NULL ;
proto_tree * tlv_tree = NULL ;
proto_tree * tlv_flags_tree = NULL ;
proto_tree * tlvValue_tree = NULL ;
proto_item * tlvBlock_item = NULL ;
proto_item * tlv_item = NULL ;
proto_item * tlvFlags_item = NULL ;
proto_item * tlvValue_item = NULL ;
proto_item * ti = NULL ;
int tlvCount = 0 ;
int hf_packetbb_tlv_type = 0 ;
const value_string * tlv_type_vals = NULL ;
if ( maxoffset < offset + 2 ) {
proto_tree_add_expert_format ( tree , pinfo , & ei_packetbb_error , tvb , offset , maxoffset - offset , "Not enough octets for minimal tlvblock" ) ;
return maxoffset ;
}
tlvblockLength = tvb_get_ntohs ( tvb , offset ) ;
tlvblockEnd = offset + 2 + tlvblockLength ;
if ( maxoffset < tlvblockEnd ) {
proto_tree_add_expert_format ( tree , pinfo , & ei_packetbb_error , tvb , offset , maxoffset - offset , "Not enough octets for tlvblock" ) ;
return maxoffset ;
}
tlvBlock_item = proto_tree_add_item ( tree , hf_packetbb_tlvblock , tvb , offset , tlvblockEnd - offset , ENC_NA ) ;
tlvblock_tree = proto_item_add_subtree ( tlvBlock_item , ett_packetbb_tlvblock ) ;
proto_tree_add_item ( tlvblock_tree , hf_packetbb_tlvblock_length , tvb , offset , 2 , ENC_BIG_ENDIAN ) ;
offset += 2 ;
while ( offset < tlvblockEnd ) {
guint tlvStart , tlvLength ;
guint8 tlvType , tlvFlags , tlvExtType , indexStart , indexEnd ;
guint16 length = 0 ;
tlvStart = offset ;
tlvType = tvb_get_guint8 ( tvb , offset ++ ) ;
tlvFlags = tvb_get_guint8 ( tvb , offset ++ ) ;
indexStart = 0 ;
indexEnd = addrCount ? ( addrCount - 1 ) : 0 ;
tlvExtType = 0 ;
if ( ( tlvFlags & TLV_HAS_TYPEEXT ) != 0 ) {
tlvExtType = tvb_get_guint8 ( tvb , offset ++ ) ;
}
if ( ( tlvFlags & TLV_HAS_SINGLEINDEX ) != 0 ) {
indexStart = indexEnd = tvb_get_guint8 ( tvb , offset ++ ) ;
}
else if ( ( tlvFlags & TLV_HAS_MULTIINDEX ) != 0 ) {
indexStart = tvb_get_guint8 ( tvb , offset ++ ) ;
indexEnd = tvb_get_guint8 ( tvb , offset ++ ) ;
}
if ( ( tlvFlags & TLV_HAS_VALUE ) != 0 ) {
if ( ( tlvFlags & TLV_HAS_EXTLEN ) != 0 ) {
length = tvb_get_ntohs ( tvb , offset ++ ) ;
}
else {
length = tvb_get_guint8 ( tvb , offset ++ ) ;
}
}
tlvLength = offset - tlvStart + length ;
offset = tlvStart ;
tlv_item = proto_tree_add_item ( tlvBlock_item , hf_packetbb_tlv , tvb , tlvStart , tlvLength , ENC_NA ) ;
tlv_tree = proto_item_add_subtree ( tlv_item , ett_packetbb_tlv [ tlvType ] ) ;
if ( tlvCat == TLV_CAT_PACKET ) {
hf_packetbb_tlv_type = hf_packetbb_pkttlv_type ;
tlv_type_vals = pkttlv_type_vals ;
}
else if ( tlvCat == TLV_CAT_MESSAGE ) {
hf_packetbb_tlv_type = hf_packetbb_msgtlv_type ;
tlv_type_vals = msgtlv_type_vals ;
}
else {
hf_packetbb_tlv_type = hf_packetbb_addrtlv_type ;
tlv_type_vals = addrtlv_type_vals ;
}
if ( ( tlvFlags & TLV_HAS_TYPEEXT ) == 0 ) {
proto_item_append_text ( tlv_item , " (%s)" , val_to_str_const ( tlvType , tlv_type_vals , "Unknown type" ) ) ;
}
else {
proto_item_append_text ( tlv_item , " (%s / %d)" , val_to_str_const ( tlvType , tlv_type_vals , "Unknown type" ) , tlvExtType ) ;
}
proto_tree_add_item ( tlv_tree , hf_packetbb_tlv_type , tvb , offset ++ , 1 , ENC_BIG_ENDIAN ) ;
tlvFlags_item = proto_tree_add_item ( tlv_tree , hf_packetbb_tlv_flags , tvb , offset , 1 , ENC_BIG_ENDIAN ) ;
tlv_flags_tree = proto_item_add_subtree ( tlvFlags_item , ett_packetbb_tlv_flags ) ;
proto_tree_add_item ( tlv_flags_tree , hf_packetbb_tlv_flags_hastypext , tvb , offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tlv_flags_tree , hf_packetbb_tlv_flags_hassingleindex , tvb , offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tlv_flags_tree , hf_packetbb_tlv_flags_hasmultiindex , tvb , offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tlv_flags_tree , hf_packetbb_tlv_flags_hasvalue , tvb , offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tlv_flags_tree , hf_packetbb_tlv_flags_hasextlen , tvb , offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( tlv_flags_tree , hf_packetbb_tlv_flags_hasmultivalue , tvb , offset , 1 , ENC_BIG_ENDIAN ) ;
offset ++ ;
if ( ( tlvFlags & TLV_HAS_TYPEEXT ) != 0 ) {
proto_tree_add_item ( tlv_tree , hf_packetbb_tlv_typeext , tvb , offset ++ , 1 , ENC_BIG_ENDIAN ) ;
}
if ( addrCount > 0 ) {
if ( ( tlvFlags & TLV_HAS_SINGLEINDEX ) != 0 ) {
proto_tree_add_uint ( tlv_tree , hf_packetbb_tlv_indexstart , tvb , offset ++ , 1 , indexStart ) ;
ti = proto_tree_add_uint ( tlv_tree , hf_packetbb_tlv_indexend , tvb , offset , 0 , indexEnd ) ;
proto_item_append_text ( ti , " (implicit)" ) ;
}
else if ( ( tlvFlags & TLV_HAS_MULTIINDEX ) != 0 ) {
proto_tree_add_uint ( tlv_tree , hf_packetbb_tlv_indexstart , tvb , offset ++ , 1 , indexStart ) ;
proto_tree_add_uint ( tlv_tree , hf_packetbb_tlv_indexend , tvb , offset ++ , 1 , indexEnd ) ;
}
else {
ti = proto_tree_add_uint ( tlv_tree , hf_packetbb_tlv_indexstart , tvb , offset , 0 , indexStart ) ;
proto_item_append_text ( ti , " (implicit)" ) ;
ti = proto_tree_add_uint ( tlv_tree , hf_packetbb_tlv_indexend , tvb , offset , 0 , indexEnd ) ;
proto_item_append_text ( ti , " (implicit)" ) ;
}
}
if ( ( tlvFlags & TLV_HAS_VALUE ) != 0 ) {
if ( ( tlvFlags & TLV_HAS_EXTLEN ) != 0 ) {
proto_tree_add_uint ( tlv_tree , hf_packetbb_tlv_length , tvb , offset , 2 , length ) ;
offset += 2 ;
}
else {
proto_tree_add_uint ( tlv_tree , hf_packetbb_tlv_length , tvb , offset ++ , 1 , length ) ;
}
}
else {
ti = proto_tree_add_uint ( tlv_tree , hf_packetbb_tlv_length , tvb , offset , 0 , 0 ) ;
proto_item_append_text ( ti , " (implicit)" ) ;
}
if ( length > 0 ) {
tlvValue_item = proto_tree_add_item ( tlv_tree , hf_packetbb_tlv_value , tvb , offset , length , ENC_NA ) ;
if ( ( tlvFlags & TLV_HAS_MULTIVALUE ) == 0 ) {
offset += length ;
}
else {
int i ;
guint8 c = indexEnd - indexStart + 1 ;
tlvValue_tree = proto_item_add_subtree ( tlvValue_item , ett_packetbb_tlv_value ) ;
for ( i = indexStart ;
i <= indexEnd ;
i ++ ) {
proto_tree_add_item ( tlvValue_tree , hf_packetbb_tlv_multivalue , tvb , offset , length / c , ENC_NA ) ;
offset += ( length / c ) ;
}
}
}
tlvCount ++ ;
}
proto_item_append_text ( tlvBlock_item , " (%d TLVs)" , tlvCount ) ;
return offset ;
}
| 1True
|
Categorize the following code snippet as vulnerable or not. True or False
|
guint16 de_sup_codec_list ( tvbuff_t * tvb , proto_tree * tree , packet_info * pinfo _U_ , guint32 offset , guint len , gchar * add_string _U_ , int string_len _U_ ) {
guint32 curr_offset ;
guint8 length ;
proto_tree * subtree ;
guint8 sysid_counter ;
curr_offset = offset ;
sysid_counter = 0 ;
while ( len > ( curr_offset - offset ) ) {
sysid_counter ++ ;
proto_tree_add_item ( tree , hf_gsm_a_dtap_sysid , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
curr_offset ++ ;
proto_tree_add_item ( tree , hf_gsm_a_dtap_bitmap_length , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
length = tvb_get_guint8 ( tvb , curr_offset ) ;
curr_offset ++ ;
if ( length > 0 ) {
subtree = proto_tree_add_subtree_format ( tree , tvb , curr_offset , length , ett_gsm_dtap_elem [ DE_SUP_CODEC_LIST ] , NULL , "Codec Bitmap for SysID %u" , sysid_counter ) ;
proto_tree_add_item ( subtree , hf_gsm_a_dtap_codec_tdma_efr , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( subtree , hf_gsm_a_dtap_codec_umts_amr_2 , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( subtree , hf_gsm_a_dtap_codec_umts_amr , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( subtree , hf_gsm_a_dtap_codec_hr_amr , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( subtree , hf_gsm_a_dtap_codec_fr_amr , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( subtree , hf_gsm_a_dtap_codec_gsm_efr , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( subtree , hf_gsm_a_dtap_codec_gsm_hr , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( subtree , hf_gsm_a_dtap_codec_gsm_fr , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
curr_offset ++ ;
length -- ;
if ( length > 0 ) {
proto_tree_add_bits_item ( subtree , hf_gsm_a_spare_bits , tvb , curr_offset << 3 , 2 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( subtree , hf_gsm_a_dtap_codec_ohr_amr_wb , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( subtree , hf_gsm_a_dtap_codec_ofr_amr_wb , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( subtree , hf_gsm_a_dtap_codec_ohr_amr , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( subtree , hf_gsm_a_dtap_codec_umts_amr_wb , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( subtree , hf_gsm_a_dtap_codec_fr_amr_wb , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
proto_tree_add_item ( subtree , hf_gsm_a_dtap_codec_pdc_efr , tvb , curr_offset , 1 , ENC_BIG_ENDIAN ) ;
curr_offset ++ ;
length -- ;
}
}
curr_offset = curr_offset + length ;
}
return ( curr_offset - offset ) ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
fz_colorspace * pdf_xobject_colorspace ( fz_context * ctx , pdf_xobject * xobj ) {
pdf_obj * group = pdf_dict_get ( ctx , xobj -> obj , PDF_NAME_Group ) ;
if ( group ) {
pdf_obj * cs = pdf_dict_get ( ctx , group , PDF_NAME_CS ) ;
if ( cs ) {
fz_colorspace * colorspace = NULL ;
fz_try ( ctx ) colorspace = pdf_load_colorspace ( ctx , cs ) ;
fz_catch ( ctx ) fz_warn ( ctx , "cannot load xobject colorspace" ) ;
return colorspace ;
}
}
return NULL ;
}
| 0False
|
Categorize the following code snippet as vulnerable or not. True or False
|
static ossl_inline int sk_ ## t1 ## _unshift ( STACK_OF ( t1 ) * sk , t2 * ptr ) {
return OPENSSL_sk_unshift ( ( OPENSSL_STACK * ) sk , ( const void * ) ptr ) ;
}
static ossl_inline t2 * sk_ ## t1 ## _pop ( STACK_OF ( t1 ) * sk ) {
return ( t2 * ) OPENSSL_sk_pop ( ( OPENSSL_STACK * ) sk ) ;
}
static ossl_inline t2 * sk_ ## t1 ## _shift ( STACK_OF ( t1 ) * sk ) {
return ( t2 * ) OPENSSL_sk_shift ( ( OPENSSL_STACK * ) sk ) ;
}
static ossl_inline void sk_ ## t1 ## _pop_free ( STACK_OF ( t1 ) * sk , sk_ ## t1 ## _freefunc freefunc ) {
OPENSSL_sk_pop_free ( ( OPENSSL_STACK * ) sk , ( OPENSSL_sk_freefunc ) freefunc ) ;
}
static ossl_inline int sk_ ## t1 ## _insert ( STACK_OF ( t1 ) * sk , t2 * ptr , int idx ) {
return OPENSSL_sk_insert ( ( OPENSSL_STACK * ) sk , ( const void * ) ptr , idx ) ;
}
static ossl_inline t2 * sk_ ## t1 ## _set ( STACK_OF ( t1 ) * sk , int idx , t2 * ptr ) {
return ( t2 * ) OPENSSL_sk_set ( ( OPENSSL_STACK * ) sk , idx , ( const void * ) ptr ) ;
}
static ossl_inline int sk_ ## t1 ## _find ( STACK_OF ( t1 ) * sk , t2 * ptr ) {
return OPENSSL_sk_find ( ( OPENSSL_STACK * ) sk , ( const void * ) ptr ) ;
}
static ossl_inline int sk_ ## t1 ## _find_ex ( STACK_OF ( t1 ) * sk , t2 * ptr ) {
return OPENSSL_sk_find_ex ( ( OPENSSL_STACK * ) sk , ( const void * ) ptr ) ;
}
static ossl_inline void sk_ ## t1 ## _sort ( STACK_OF ( t1 ) * sk ) {
OPENSSL_sk_sort ( ( OPENSSL_STACK * ) sk ) ;
}
static ossl_inline int sk_ ## t1 ## _is_sorted ( const STACK_OF ( t1 ) * sk ) {
return OPENSSL_sk_is_sorted ( ( const OPENSSL_STACK * ) sk ) ;
}
static ossl_inline STACK_OF ( t1 ) * sk_ ## t1 ## _dup ( const STACK_OF ( t1 ) * sk ) {
return ( STACK_OF ( t1 ) * ) OPENSSL_sk_dup ( ( const OPENSSL_STACK * ) sk ) ;
}
static ossl_inline STACK_OF ( t1 ) * sk_ ## t1 ## _deep_copy ( const STACK_OF ( t1 ) * sk , sk_ ## t1 ## _copyfunc copyfunc , sk_ ## t1 ## _freefunc freefunc ) {
return ( STACK_OF ( t1 ) * ) OPENSSL_sk_deep_copy ( ( const OPENSSL_STACK * ) sk , ( OPENSSL_sk_copyfunc ) copyfunc , ( OPENSSL_sk_freefunc ) freefunc ) ;
}
static ossl_inline sk_ ## t1 ## _compfunc sk_ ## t1 ## _set_cmp_func ( STACK_OF ( t1 ) * sk , sk_ ## t1 ## _compfunc compare ) {
return ( sk_ ## t1 ## _compfunc ) OPENSSL_sk_set_cmp_func ( ( OPENSSL_STACK * ) sk , ( OPENSSL_sk_compfunc ) compare ) ;
}
# define DEFINE_SPECIAL_STACK_OF ( t1 , t2 ) SKM_DEFINE_STACK_OF ( t1 , t2 , t2 ) # define DEFINE_STACK_OF ( t ) SKM_DEFINE_STACK_OF ( t , t , t ) # define DEFINE_SPECIAL_STACK_OF_CONST ( t1 , t2 ) SKM_DEFINE_STACK_OF ( t1 , const t2 , t2 ) # define DEFINE_STACK_OF_CONST ( t ) SKM_DEFINE_STACK_OF ( t , const t , t ) typedef char * OPENSSL_STRING ;
typedef const char * OPENSSL_CSTRING ;
DEFINE_SPECIAL_STACK_OF ( OPENSSL_STRING , char ) DEFINE_SPECIAL_STACK_OF_CONST ( OPENSSL_CSTRING , char ) typedef void * OPENSSL_BLOCK ;
DEFINE_SPECIAL_STACK_OF ( OPENSSL_BLOCK , void )
| 1True
|
Categorize the following code snippet as vulnerable or not. True or False
|
GType hb_gobject_ ## name ## _get_type ( void ) \ {
static gsize type_id = 0 ;
if ( g_once_init_enter ( & type_id ) ) {
GType id = g_boxed_type_register_static ( g_intern_static_string ( "hb_" # name "_t" ) , ( GBoxedCopyFunc ) copy_func , ( GBoxedFreeFunc ) free_func ) ;
g_once_init_leave ( & type_id , id ) ;
}
return type_id ;
\ }
# define HB_DEFINE_OBJECT_TYPE ( name ) HB_DEFINE_BOXED_TYPE ( name , hb_ ## name ## _reference , hb_ ## name ## _destroy ) ;
HB_DEFINE_OBJECT_TYPE ( buffer ) HB_DEFINE_OBJECT_TYPE ( blob ) HB_DEFINE_OBJECT_TYPE ( face ) HB_DEFINE_OBJECT_TYPE ( font ) HB_DEFINE_OBJECT_TYPE ( font_funcs ) HB_DEFINE_OBJECT_TYPE ( set )
| 1True
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void show_object ( struct object * object , const struct name_path * path , const char * last , void * data ) {
struct bitmap * base = data ;
bitmap_set ( base , find_object_pos ( object -> oid . hash ) ) ;
mark_as_seen ( object ) ;
}
| 1True
|
Categorize the following code snippet as vulnerable or not. True or False
|
static void rac_normalise ( RangeCoder * c ) {
for ( ;
;
) {
c -> range <<= 8 ;
c -> low <<= 8 ;
if ( c -> src < c -> src_end ) {
c -> low |= * c -> src ++ ;
}
else if ( ! c -> low ) {
c -> got_error = 1 ;
return ;
}
if ( c -> range >= RAC_BOTTOM ) return ;
}
}
| 0False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.