instruction
stringclasses
1 value
input
stringlengths
31
235k
output
class label
2 classes
Categorize the following code snippet as vulnerable or not. True or False
GType hb_gobject_ ## name ## _get_type ( void ) \ { static gsize type_id = 0 ; if ( g_once_init_enter ( & type_id ) ) { GType id = g_boxed_type_register_static ( g_intern_static_string ( "hb_" # name "_t" ) , ( GBoxedCopyFunc ) copy_func , ( GBoxedFreeFunc ) free_func ) ; g_once_init_leave ( & type_id , id ) ; } return type_id ; \ } # define HB_DEFINE_OBJECT_TYPE ( name ) HB_DEFINE_BOXED_TYPE ( name , hb_ ## name ## _reference , hb_ ## name ## _destroy ) ; # define HB_DEFINE_VALUE_TYPE ( name ) static hb_ ## name ## _t * _hb_ ## name ## _reference ( const hb_ ## name ## _t * l ) { hb_ ## name ## _t * c = ( hb_ ## name ## _t * ) calloc ( 1 , sizeof ( hb_ ## name ## _t ) ) ; if ( unlikely ( ! c ) ) return NULL ; * c = * l ; return c ; } static void _hb_ ## name ## _destroy ( hb_ ## name ## _t * l ) { free ( l ) ; } HB_DEFINE_BOXED_TYPE ( name , _hb_ ## name ## _reference , _hb_ ## name ## _destroy ) ; HB_DEFINE_OBJECT_TYPE ( buffer ) HB_DEFINE_OBJECT_TYPE ( blob ) HB_DEFINE_OBJECT_TYPE ( face ) HB_DEFINE_OBJECT_TYPE ( font ) HB_DEFINE_OBJECT_TYPE ( font_funcs )
0False
Categorize the following code snippet as vulnerable or not. True or False
IN_PROC_BROWSER_TEST_F ( SessionRestorePageLoadMetricsBrowserTest , InitialVisibilityOfMultipleRestoredTabs ) { ui_test_utils : : NavigateToURL ( browser ( ) , GetTestURL ( ) ) ; ui_test_utils : : NavigateToURLWithDisposition ( browser ( ) , GetTestURL ( ) , WindowOpenDisposition : : NEW_BACKGROUND_TAB , ui_test_utils : : BROWSER_TEST_WAIT_FOR_NAVIGATION ) ; histogram_tester_ . ExpectTotalCount ( page_load_metrics : : internal : : kPageLoadStartedInForeground , 2 ) ; histogram_tester_ . ExpectBucketCount ( page_load_metrics : : internal : : kPageLoadStartedInForeground , false , 1 ) ; Browser * new_browser = QuitBrowserAndRestore ( browser ( ) ) ; ASSERT_NO_FATAL_FAILURE ( WaitForTabsToLoad ( new_browser ) ) ; TabStripModel * tab_strip = new_browser -> tab_strip_model ( ) ; ASSERT_TRUE ( tab_strip ) ; ASSERT_EQ ( 2 , tab_strip -> count ( ) ) ; histogram_tester_ . ExpectTotalCount ( page_load_metrics : : internal : : kPageLoadStartedInForeground , 4 ) ; histogram_tester_ . ExpectBucketCount ( page_load_metrics : : internal : : kPageLoadStartedInForeground , true , 2 ) ; histogram_tester_ . ExpectBucketCount ( page_load_metrics : : internal : : kPageLoadStartedInForeground , false , 2 ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void dissect_rsvp_juniper ( proto_tree * ti _U_ , proto_tree * rsvp_object_tree , tvbuff_t * tvb , int offset , int obj_length , int rsvp_class _U_ , int type ) { proto_item * hidden_item ; hidden_item = proto_tree_add_item ( rsvp_object_tree , hf_rsvp_filter [ RSVPF_JUNIPER ] , tvb , offset , obj_length , ENC_NA ) ; PROTO_ITEM_SET_HIDDEN ( hidden_item ) ; proto_tree_add_uint ( rsvp_object_tree , hf_rsvp_ctype , tvb , offset + 3 , 1 , type ) ; offset += 4 ; if ( type == 1 ) { guint tlvs , pad ; tlvs = tvb_get_ntohs ( tvb , offset ) ; proto_tree_add_item ( rsvp_object_tree , hf_rsvp_juniper_numtlvs , tvb , offset , 2 , ENC_BIG_ENDIAN ) ; offset += 2 ; pad = tvb_get_ntohs ( tvb , offset ) ; proto_tree_add_item ( rsvp_object_tree , hf_rsvp_juniper_padlength , tvb , offset , 2 , ENC_BIG_ENDIAN ) ; offset += 2 ; while ( tlvs > 0 ) { guint8 t , l ; t = tvb_get_guint8 ( tvb , offset ) ; proto_tree_add_item ( rsvp_object_tree , hf_rsvp_juniper_type , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; offset += 1 ; l = tvb_get_guint8 ( tvb , offset ) ; proto_tree_add_item ( rsvp_object_tree , hf_rsvp_juniper_length , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; offset += 1 ; switch ( t ) { case 0x01 : proto_tree_add_item ( rsvp_object_tree , hf_rsvp_juniper_attrib_cos , tvb , offset , l - 2 , ENC_BIG_ENDIAN ) ; offset += ( l - 2 ) ; break ; case 0x02 : proto_tree_add_item ( rsvp_object_tree , hf_rsvp_juniper_attrib_metric1 , tvb , offset , l - 2 , ENC_BIG_ENDIAN ) ; offset += ( l - 2 ) ; break ; case 0x04 : proto_tree_add_item ( rsvp_object_tree , hf_rsvp_juniper_attrib_metric2 , tvb , offset , l - 2 , ENC_BIG_ENDIAN ) ; offset += ( l - 2 ) ; break ; case 0x08 : proto_tree_add_item ( rsvp_object_tree , hf_rsvp_juniper_attrib_ccc_status , tvb , offset , l - 2 , ENC_BIG_ENDIAN ) ; offset += ( l - 2 ) ; break ; case 0x10 : proto_tree_add_item ( rsvp_object_tree , hf_rsvp_juniper_attrib_path , tvb , offset , l - 2 , ENC_BIG_ENDIAN ) ; offset += ( l - 2 ) ; break ; default : proto_tree_add_item ( rsvp_object_tree , hf_rsvp_juniper_attrib_unknown , tvb , offset , l - 2 , ENC_NA ) ; offset += ( l - 2 ) ; break ; } tlvs -- ; } proto_tree_add_item ( rsvp_object_tree , hf_rsvp_juniper_pad , tvb , offset , pad , ENC_NA ) ; } else if ( obj_length > 4 ) { proto_tree_add_item ( rsvp_object_tree , hf_rsvp_juniper_unknown , tvb , offset , obj_length , ENC_NA ) ; } }
0False
Categorize the following code snippet as vulnerable or not. True or False
int recreate_table ( MI_CHECK * param , MI_INFO * * org_info , char * filename ) { int error ; MI_INFO info ; MYISAM_SHARE share ; MI_KEYDEF * keyinfo , * key , * key_end ; HA_KEYSEG * keysegs , * keyseg ; MI_COLUMNDEF * recdef , * rec , * end ; MI_UNIQUEDEF * uniquedef , * u_ptr , * u_end ; MI_STATUS_INFO status_info ; uint unpack , key_parts ; ha_rows max_records ; ulonglong file_length , tmp_length ; MI_CREATE_INFO create_info ; DBUG_ENTER ( "recreate_table" ) ; error = 1 ; info = * * org_info ; status_info = ( * org_info ) -> state [ 0 ] ; info . state = & status_info ; share = * ( * org_info ) -> s ; unpack = ( share . options & HA_OPTION_COMPRESS_RECORD ) && ( param -> testflag & T_UNPACK ) ; if ( ! ( keyinfo = ( MI_KEYDEF * ) my_alloca ( sizeof ( MI_KEYDEF ) * share . base . keys ) ) ) DBUG_RETURN ( 0 ) ; memcpy ( ( uchar * ) keyinfo , ( uchar * ) share . keyinfo , ( size_t ) ( sizeof ( MI_KEYDEF ) * share . base . keys ) ) ; key_parts = share . base . all_key_parts ; if ( ! ( keysegs = ( HA_KEYSEG * ) my_alloca ( sizeof ( HA_KEYSEG ) * ( key_parts + share . base . keys ) ) ) ) { my_afree ( ( uchar * ) keyinfo ) ; DBUG_RETURN ( 1 ) ; } if ( ! ( recdef = ( MI_COLUMNDEF * ) my_alloca ( sizeof ( MI_COLUMNDEF ) * ( share . base . fields + 1 ) ) ) ) { my_afree ( ( uchar * ) keyinfo ) ; my_afree ( ( uchar * ) keysegs ) ; DBUG_RETURN ( 1 ) ; } if ( ! ( uniquedef = ( MI_UNIQUEDEF * ) my_alloca ( sizeof ( MI_UNIQUEDEF ) * ( share . state . header . uniques + 1 ) ) ) ) { my_afree ( ( uchar * ) recdef ) ; my_afree ( ( uchar * ) keyinfo ) ; my_afree ( ( uchar * ) keysegs ) ; DBUG_RETURN ( 1 ) ; } memcpy ( ( uchar * ) recdef , ( uchar * ) share . rec , ( size_t ) ( sizeof ( MI_COLUMNDEF ) * ( share . base . fields + 1 ) ) ) ; for ( rec = recdef , end = recdef + share . base . fields ; rec != end ; rec ++ ) { if ( unpack && ! ( share . options & HA_OPTION_PACK_RECORD ) && rec -> type != FIELD_BLOB && rec -> type != FIELD_VARCHAR && rec -> type != FIELD_CHECK ) rec -> type = ( int ) FIELD_NORMAL ; } memcpy ( ( uchar * ) keysegs , ( uchar * ) share . keyparts , ( size_t ) ( sizeof ( HA_KEYSEG ) * ( key_parts + share . base . keys + share . state . header . uniques ) ) ) ; keyseg = keysegs ; for ( key = keyinfo , key_end = keyinfo + share . base . keys ; key != key_end ; key ++ ) { key -> seg = keyseg ; for ( ; keyseg -> type ; keyseg ++ ) { if ( param -> language ) keyseg -> language = param -> language ; } keyseg ++ ; } memcpy ( ( uchar * ) uniquedef , ( uchar * ) share . uniqueinfo , ( size_t ) ( sizeof ( MI_UNIQUEDEF ) * ( share . state . header . uniques ) ) ) ; for ( u_ptr = uniquedef , u_end = uniquedef + share . state . header . uniques ; u_ptr != u_end ; u_ptr ++ ) { u_ptr -> seg = keyseg ; keyseg += u_ptr -> keysegs + 1 ; } unpack = ( share . options & HA_OPTION_COMPRESS_RECORD ) && ( param -> testflag & T_UNPACK ) ; share . options &= ~ HA_OPTION_TEMP_COMPRESS_RECORD ; file_length = ( ulonglong ) mysql_file_seek ( info . dfile , 0L , MY_SEEK_END , MYF ( 0 ) ) ; tmp_length = file_length + file_length / 10 ; set_if_bigger ( file_length , param -> max_data_file_length ) ; set_if_bigger ( file_length , tmp_length ) ; set_if_bigger ( file_length , ( ulonglong ) share . base . max_data_file_length ) ; if ( share . options & HA_OPTION_COMPRESS_RECORD ) share . base . records = max_records = info . state -> records ; else if ( ! ( share . options & HA_OPTION_PACK_RECORD ) ) max_records = ( ha_rows ) ( file_length / share . base . pack_reclength ) ; else max_records = 0 ; ( void ) mi_close ( * org_info ) ; bzero ( ( char * ) & create_info , sizeof ( create_info ) ) ; create_info . max_rows = max_records ; create_info . reloc_rows = share . base . reloc ; create_info . old_options = ( share . options | ( unpack ? HA_OPTION_TEMP_COMPRESS_RECORD : 0 ) ) ; create_info . data_file_length = file_length ; create_info . auto_increment = share . state . auto_increment ; create_info . language = ( param -> language ? param -> language : share . state . header . language ) ; create_info . key_file_length = status_info . key_file_length ; create_info . with_auto_increment = TRUE ; if ( mi_create ( filename , share . base . keys - share . state . header . uniques , keyinfo , share . base . fields , recdef , share . state . header . uniques , uniquedef , & create_info , HA_DONT_TOUCH_DATA ) ) { mi_check_print_error ( param , "Got error %d when trying to recreate indexfile" , my_errno ) ; goto end ; } * org_info = mi_open ( filename , O_RDWR , ( param -> testflag & T_WAIT_FOREVER ) ? HA_OPEN_WAIT_IF_LOCKED : ( param -> testflag & T_DESCRIPT ) ? HA_OPEN_IGNORE_IF_LOCKED : HA_OPEN_ABORT_IF_LOCKED ) ; if ( ! * org_info ) { mi_check_print_error ( param , "Got error %d when trying to open re-created indexfile" , my_errno ) ; goto end ; } ( * org_info ) -> s -> options &= ~ HA_OPTION_READ_ONLY_DATA ; ( void ) _mi_readinfo ( * org_info , F_WRLCK , 0 ) ; ( * org_info ) -> state -> records = info . state -> records ; if ( share . state . create_time ) ( * org_info ) -> s -> state . create_time = share . state . create_time ; ( * org_info ) -> s -> state . unique = ( * org_info ) -> this_unique = share . state . unique ; ( * org_info ) -> state -> checksum = info . state -> checksum ; ( * org_info ) -> state -> del = info . state -> del ; ( * org_info ) -> s -> state . dellink = share . state . dellink ; ( * org_info ) -> state -> empty = info . state -> empty ; ( * org_info ) -> state -> data_file_length = info . state -> data_file_length ; if ( update_state_info ( param , * org_info , UPDATE_TIME | UPDATE_STAT | UPDATE_OPEN_COUNT ) ) goto end ; error = 0 ; end : my_afree ( ( uchar * ) uniquedef ) ; my_afree ( ( uchar * ) keyinfo ) ; my_afree ( ( uchar * ) recdef ) ; my_afree ( ( uchar * ) keysegs ) ; DBUG_RETURN ( error ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static char * pathToFullPath ( const char * path , const char * source ) { int32_t length ; int32_t newLength ; char * fullPath ; int32_t n ; length = ( uint32_t ) ( uprv_strlen ( path ) + 1 ) ; newLength = ( length + 1 + ( int32_t ) uprv_strlen ( source ) ) ; fullPath = ( char * ) uprv_malloc ( newLength ) ; if ( source != NULL ) { uprv_strcpy ( fullPath , source ) ; uprv_strcat ( fullPath , U_FILE_SEP_STRING ) ; } else { fullPath [ 0 ] = 0 ; } n = ( int32_t ) uprv_strlen ( fullPath ) ; fullPath [ n ] = 0 ; uprv_strcat ( fullPath , path ) ; # if ( U_FILE_ALT_SEP_CHAR != U_TREE_ENTRY_SEP_CHAR ) # if ( U_FILE_ALT_SEP_CHAR != U_FILE_SEP_CHAR ) for ( ; fullPath [ n ] ; n ++ ) { if ( fullPath [ n ] == U_FILE_ALT_SEP_CHAR ) { fullPath [ n ] = U_FILE_SEP_CHAR ; } } # endif # endif # if ( U_FILE_SEP_CHAR != U_TREE_ENTRY_SEP_CHAR ) for ( ; fullPath [ n ] ; n ++ ) { if ( fullPath [ n ] == U_TREE_ENTRY_SEP_CHAR ) { fullPath [ n ] = U_FILE_SEP_CHAR ; } } # endif return fullPath ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
int getnetnum ( const char * hname , sockaddr_u * num , char * fullhost , int af ) { struct addrinfo hints , * ai = NULL ; ZERO ( hints ) ; hints . ai_flags = AI_CANONNAME ; # ifdef AI_ADDRCONFIG hints . ai_flags |= AI_ADDRCONFIG ; # endif if ( decodenetnum ( hname , num ) ) { if ( fullhost != NULL ) getnameinfo ( & num -> sa , SOCKLEN ( num ) , fullhost , LENHOSTNAME , NULL , 0 , 0 ) ; return 1 ; } else if ( getaddrinfo ( hname , "ntp" , & hints , & ai ) == 0 ) { INSIST ( sizeof ( * num ) >= ai -> ai_addrlen ) ; memcpy ( num , ai -> ai_addr , ai -> ai_addrlen ) ; if ( fullhost != NULL ) { if ( ai -> ai_canonname != NULL ) strlcpy ( fullhost , ai -> ai_canonname , LENHOSTNAME ) ; else getnameinfo ( & num -> sa , SOCKLEN ( num ) , fullhost , LENHOSTNAME , NULL , 0 , 0 ) ; } freeaddrinfo ( ai ) ; return 1 ; } fprintf ( stderr , "***Can't find host %s\n" , hname ) ; return 0 ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
void ff_MPV_common_end ( MpegEncContext * s ) { int i ; if ( s -> slice_context_count > 1 ) { for ( i = 0 ; i < s -> slice_context_count ; i ++ ) { free_duplicate_context ( s -> thread_context [ i ] ) ; } for ( i = 1 ; i < s -> slice_context_count ; i ++ ) { av_freep ( & s -> thread_context [ i ] ) ; } s -> slice_context_count = 1 ; } else free_duplicate_context ( s ) ; av_freep ( & s -> parse_context . buffer ) ; s -> parse_context . buffer_size = 0 ; av_freep ( & s -> bitstream_buffer ) ; s -> allocated_bitstream_buffer_size = 0 ; av_freep ( & s -> avctx -> stats_out ) ; av_freep ( & s -> ac_stats ) ; av_freep ( & s -> q_intra_matrix ) ; av_freep ( & s -> q_inter_matrix ) ; av_freep ( & s -> q_intra_matrix16 ) ; av_freep ( & s -> q_inter_matrix16 ) ; av_freep ( & s -> input_picture ) ; av_freep ( & s -> reordered_input_picture ) ; av_freep ( & s -> dct_offset ) ; if ( s -> picture ) { for ( i = 0 ; i < MAX_PICTURE_COUNT ; i ++ ) { free_picture_tables ( & s -> picture [ i ] ) ; ff_mpeg_unref_picture ( s , & s -> picture [ i ] ) ; } } av_freep ( & s -> picture ) ; free_picture_tables ( & s -> last_picture ) ; ff_mpeg_unref_picture ( s , & s -> last_picture ) ; free_picture_tables ( & s -> current_picture ) ; ff_mpeg_unref_picture ( s , & s -> current_picture ) ; free_picture_tables ( & s -> next_picture ) ; ff_mpeg_unref_picture ( s , & s -> next_picture ) ; free_picture_tables ( & s -> new_picture ) ; ff_mpeg_unref_picture ( s , & s -> new_picture ) ; free_context_frame ( s ) ; s -> context_initialized = 0 ; s -> last_picture_ptr = s -> next_picture_ptr = s -> current_picture_ptr = NULL ; s -> linesize = s -> uvlinesize = 0 ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int lag_decode_line ( LagarithContext * l , lag_rac * rac , uint8_t * dst , int width , int stride , int esc_count ) { int i = 0 ; int ret = 0 ; if ( ! esc_count ) esc_count = - 1 ; handle_zeros : if ( l -> zeros_rem ) { int count = FFMIN ( l -> zeros_rem , width - i ) ; memset ( dst + i , 0 , count ) ; i += count ; l -> zeros_rem -= count ; } while ( i < width ) { dst [ i ] = lag_get_rac ( rac ) ; ret ++ ; if ( dst [ i ] ) l -> zeros = 0 ; else l -> zeros ++ ; i ++ ; if ( l -> zeros == esc_count ) { int index = lag_get_rac ( rac ) ; ret ++ ; l -> zeros = 0 ; l -> zeros_rem = lag_calc_zero_run ( index ) ; goto handle_zeros ; } } return ret ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
void qdev_init_gpio_out ( DeviceState * dev , qemu_irq * pins , int n ) { assert ( dev -> num_gpio_out == 0 ) ; dev -> num_gpio_out = n ; dev -> gpio_out = pins ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static inline void set_intra_mode_default ( AVSContext * h ) { if ( h -> stream_revision > 0 ) { h -> pred_mode_Y [ 3 ] = h -> pred_mode_Y [ 6 ] = NOT_AVAIL ; h -> top_pred_Y [ h -> mbx * 2 + 0 ] = h -> top_pred_Y [ h -> mbx * 2 + 1 ] = NOT_AVAIL ; } else { h -> pred_mode_Y [ 3 ] = h -> pred_mode_Y [ 6 ] = INTRA_L_LP ; h -> top_pred_Y [ h -> mbx * 2 + 0 ] = h -> top_pred_Y [ h -> mbx * 2 + 1 ] = INTRA_L_LP ; } }
0False
Categorize the following code snippet as vulnerable or not. True or False
KEYDB_HANDLE keydb_new ( void ) { KEYDB_HANDLE hd ; int i , j ; if ( DBG_CLOCK ) log_clock ( "keydb_new" ) ; hd = xmalloc_clear ( sizeof * hd ) ; hd -> found = - 1 ; assert ( used_resources <= MAX_KEYDB_RESOURCES ) ; for ( i = j = 0 ; i < used_resources ; i ++ ) { switch ( all_resources [ i ] . type ) { case KEYDB_RESOURCE_TYPE_NONE : break ; case KEYDB_RESOURCE_TYPE_KEYRING : hd -> active [ j ] . type = all_resources [ i ] . type ; hd -> active [ j ] . token = all_resources [ i ] . token ; hd -> active [ j ] . u . kr = keyring_new ( all_resources [ i ] . token ) ; if ( ! hd -> active [ j ] . u . kr ) { xfree ( hd ) ; return NULL ; } j ++ ; break ; case KEYDB_RESOURCE_TYPE_KEYBOX : hd -> active [ j ] . type = all_resources [ i ] . type ; hd -> active [ j ] . token = all_resources [ i ] . token ; hd -> active [ j ] . u . kb = keybox_new_openpgp ( all_resources [ i ] . token , 0 ) ; if ( ! hd -> active [ j ] . u . kb ) { xfree ( hd ) ; return NULL ; } j ++ ; break ; } } hd -> used = j ; active_handles ++ ; return hd ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static gchar get_priority ( const guint8 priority ) { static gchar priorities [ ] = "??VDIWEFS" ; if ( priority >= ( guint8 ) sizeof ( priorities ) ) return '?' ; return priorities [ priority ] ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
VirtIOS390Device * s390_virtio_bus_find_vring ( VirtIOS390Bus * bus , ram_addr_t mem , int * vq_num ) { VirtIOS390Device * _dev ; DeviceState * dev ; int i ; QLIST_FOREACH ( dev , & bus -> bus . children , sibling ) { _dev = ( VirtIOS390Device * ) dev ; for ( i = 0 ; i < VIRTIO_PCI_QUEUE_MAX ; i ++ ) { if ( ! virtio_queue_get_addr ( _dev -> vdev , i ) ) break ; if ( virtio_queue_get_addr ( _dev -> vdev , i ) == mem ) { if ( vq_num ) { * vq_num = i ; } return _dev ; } } } return NULL ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void cdxl_decode_rgb ( CDXLVideoContext * c , AVFrame * frame ) { uint32_t * new_palette = ( uint32_t * ) frame -> data [ 1 ] ; import_palette ( c , new_palette ) ; import_format ( c , frame -> linesize [ 0 ] , frame -> data [ 0 ] ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int dissect_h245_Mc_type ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { offset = dissect_per_choice ( tvb , offset , actx , tree , hf_index , ett_h245_Mc_type , Mc_type_choice , NULL ) ; return offset ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static inline int check_for_slice ( AVSContext * h ) { GetBitContext * gb = & h -> gb ; int align ; if ( h -> mbx ) return 0 ; align = ( - get_bits_count ( gb ) ) & 7 ; if ( ! align && ( show_bits ( gb , 8 ) == 0x80 ) ) align = 8 ; if ( ( show_bits_long ( gb , 24 + align ) & 0xFFFFFF ) == 0x000001 ) { skip_bits_long ( gb , 24 + align ) ; h -> stc = get_bits ( gb , 8 ) ; if ( h -> stc >= h -> mb_height ) return 0 ; decode_slice_header ( h , gb ) ; return 1 ; } return 0 ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void e1000e_set_eewr ( E1000ECore * core , int index , uint32_t val ) { uint32_t addr = ( val >> E1000_EERW_ADDR_SHIFT ) & E1000_EERW_ADDR_MASK ; uint32_t data = ( val >> E1000_EERW_DATA_SHIFT ) & E1000_EERW_DATA_MASK ; uint32_t flags = 0 ; if ( ( addr < E1000E_EEPROM_SIZE ) && ( val & E1000_EERW_START ) ) { core -> eeprom [ addr ] = data ; flags = E1000_EERW_DONE ; } core -> mac [ EERD ] = flags | ( addr << E1000_EERW_ADDR_SHIFT ) | ( data << E1000_EERW_DATA_SHIFT ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void vc1_apply_p_loop_filter ( VC1Context * v ) { MpegEncContext * s = & v -> s ; int i ; for ( i = 0 ; i < 6 ; i ++ ) { vc1_apply_p_v_loop_filter ( v , i ) ; } if ( s -> mb_x ) { for ( i = 0 ; i < 6 ; i ++ ) { vc1_apply_p_h_loop_filter ( v , i ) ; } if ( s -> mb_x == s -> mb_width - 1 ) { s -> mb_x ++ ; ff_update_block_index ( s ) ; for ( i = 0 ; i < 6 ; i ++ ) { vc1_apply_p_h_loop_filter ( v , i ) ; } } } }
0False
Categorize the following code snippet as vulnerable or not. True or False
void free_pointer_array ( POINTER_ARRAY * pa ) { if ( pa -> typelib . count ) { pa -> typelib . count = 0 ; my_free ( pa -> typelib . type_names ) ; pa -> typelib . type_names = 0 ; my_free ( pa -> str ) ; } }
0False
Categorize the following code snippet as vulnerable or not. True or False
hb_blob_t * hb_blob_create ( const char * data , unsigned int length , hb_memory_mode_t mode , void * user_data , hb_destroy_func_t destroy ) { hb_blob_t * blob ; if ( ! length || length >= 1u << 31 || data + length < data || ! ( blob = hb_object_create < hb_blob_t > ( ) ) ) { if ( destroy ) destroy ( user_data ) ; return hb_blob_get_empty ( ) ; } blob -> data = data ; blob -> length = length ; blob -> mode = mode ; blob -> user_data = user_data ; blob -> destroy = destroy ; if ( blob -> mode == HB_MEMORY_MODE_DUPLICATE ) { blob -> mode = HB_MEMORY_MODE_READONLY ; if ( ! _try_writable ( blob ) ) { hb_blob_destroy ( blob ) ; return hb_blob_get_empty ( ) ; } } return blob ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int dissect_h225_OCTET_STRING_SIZE_1_20 ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { offset = dissect_per_octet_string ( tvb , offset , actx , tree , hf_index , 1 , 20 , FALSE , NULL ) ; return offset ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
int ssl3_get_record ( SSL * s ) { int ssl_major , ssl_minor , al ; int enc_err , n , i , ret = - 1 ; SSL3_RECORD * rr ; SSL3_BUFFER * rbuf ; SSL_SESSION * sess ; unsigned char * p ; unsigned char md [ EVP_MAX_MD_SIZE ] ; short version ; unsigned mac_size ; unsigned int num_recs = 0 ; unsigned int max_recs ; unsigned int j ; rr = RECORD_LAYER_get_rrec ( & s -> rlayer ) ; rbuf = RECORD_LAYER_get_rbuf ( & s -> rlayer ) ; max_recs = s -> max_pipelines ; if ( max_recs == 0 ) max_recs = 1 ; sess = s -> session ; do { if ( ( RECORD_LAYER_get_rstate ( & s -> rlayer ) != SSL_ST_READ_BODY ) || ( RECORD_LAYER_get_packet_length ( & s -> rlayer ) < SSL3_RT_HEADER_LENGTH ) ) { n = ssl3_read_n ( s , SSL3_RT_HEADER_LENGTH , SSL3_BUFFER_get_len ( rbuf ) , 0 , num_recs == 0 ? 1 : 0 ) ; if ( n <= 0 ) return ( n ) ; RECORD_LAYER_set_rstate ( & s -> rlayer , SSL_ST_READ_BODY ) ; p = RECORD_LAYER_get_packet ( & s -> rlayer ) ; if ( s -> server && RECORD_LAYER_is_first_record ( & s -> rlayer ) && ( p [ 0 ] & 0x80 ) && ( p [ 2 ] == SSL2_MT_CLIENT_HELLO ) ) { rr [ num_recs ] . type = SSL3_RT_HANDSHAKE ; rr [ num_recs ] . rec_version = SSL2_VERSION ; rr [ num_recs ] . length = ( ( p [ 0 ] & 0x7f ) << 8 ) | p [ 1 ] ; if ( rr [ num_recs ] . length > SSL3_BUFFER_get_len ( rbuf ) - SSL2_RT_HEADER_LENGTH ) { al = SSL_AD_RECORD_OVERFLOW ; SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_PACKET_LENGTH_TOO_LONG ) ; goto f_err ; } if ( rr [ num_recs ] . length < MIN_SSL2_RECORD_LEN ) { al = SSL_AD_HANDSHAKE_FAILURE ; SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_LENGTH_TOO_SHORT ) ; goto f_err ; } } else { if ( s -> msg_callback ) s -> msg_callback ( 0 , 0 , SSL3_RT_HEADER , p , 5 , s , s -> msg_callback_arg ) ; rr [ num_recs ] . type = * ( p ++ ) ; ssl_major = * ( p ++ ) ; ssl_minor = * ( p ++ ) ; version = ( ssl_major << 8 ) | ssl_minor ; rr [ num_recs ] . rec_version = version ; n2s ( p , rr [ num_recs ] . length ) ; if ( ! s -> first_packet && version != s -> version ) { SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_WRONG_VERSION_NUMBER ) ; if ( ( s -> version & 0xFF00 ) == ( version & 0xFF00 ) && ! s -> enc_write_ctx && ! s -> write_hash ) { if ( rr -> type == SSL3_RT_ALERT ) { goto err ; } s -> version = ( unsigned short ) version ; } al = SSL_AD_PROTOCOL_VERSION ; goto f_err ; } if ( ( version >> 8 ) != SSL3_VERSION_MAJOR ) { if ( RECORD_LAYER_is_first_record ( & s -> rlayer ) ) { p = RECORD_LAYER_get_packet ( & s -> rlayer ) ; if ( strncmp ( ( char * ) p , "GET " , 4 ) == 0 || strncmp ( ( char * ) p , "POST " , 5 ) == 0 || strncmp ( ( char * ) p , "HEAD " , 5 ) == 0 || strncmp ( ( char * ) p , "PUT " , 4 ) == 0 ) { SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_HTTP_REQUEST ) ; goto err ; } else if ( strncmp ( ( char * ) p , "CONNE" , 5 ) == 0 ) { SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_HTTPS_PROXY_REQUEST ) ; goto err ; } SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_WRONG_VERSION_NUMBER ) ; goto err ; } else { SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_WRONG_VERSION_NUMBER ) ; al = SSL_AD_PROTOCOL_VERSION ; goto f_err ; } } if ( rr [ num_recs ] . length > SSL3_BUFFER_get_len ( rbuf ) - SSL3_RT_HEADER_LENGTH ) { al = SSL_AD_RECORD_OVERFLOW ; SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_PACKET_LENGTH_TOO_LONG ) ; goto f_err ; } } } if ( rr [ num_recs ] . rec_version == SSL2_VERSION ) { i = rr [ num_recs ] . length + SSL2_RT_HEADER_LENGTH - SSL3_RT_HEADER_LENGTH ; } else { i = rr [ num_recs ] . length ; } if ( i > 0 ) { n = ssl3_read_n ( s , i , i , 1 , 0 ) ; if ( n <= 0 ) return ( n ) ; } RECORD_LAYER_set_rstate ( & s -> rlayer , SSL_ST_READ_HEADER ) ; if ( rr [ num_recs ] . rec_version == SSL2_VERSION ) { rr [ num_recs ] . input = & ( RECORD_LAYER_get_packet ( & s -> rlayer ) [ SSL2_RT_HEADER_LENGTH ] ) ; } else { rr [ num_recs ] . input = & ( RECORD_LAYER_get_packet ( & s -> rlayer ) [ SSL3_RT_HEADER_LENGTH ] ) ; } if ( rr [ num_recs ] . length > SSL3_RT_MAX_ENCRYPTED_LENGTH ) { al = SSL_AD_RECORD_OVERFLOW ; SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_ENCRYPTED_LENGTH_TOO_LONG ) ; goto f_err ; } rr [ num_recs ] . data = rr [ num_recs ] . input ; rr [ num_recs ] . orig_len = rr [ num_recs ] . length ; rr [ num_recs ] . read = 0 ; num_recs ++ ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; RECORD_LAYER_clear_first_record ( & s -> rlayer ) ; } while ( num_recs < max_recs && rr [ num_recs - 1 ] . type == SSL3_RT_APPLICATION_DATA && SSL_USE_EXPLICIT_IV ( s ) && s -> enc_read_ctx != NULL && ( EVP_CIPHER_flags ( EVP_CIPHER_CTX_cipher ( s -> enc_read_ctx ) ) & EVP_CIPH_FLAG_PIPELINE ) && ssl3_record_app_data_waiting ( s ) ) ; if ( SSL_USE_ETM ( s ) && s -> read_hash ) { unsigned char * mac ; mac_size = EVP_MD_CTX_size ( s -> read_hash ) ; OPENSSL_assert ( mac_size <= EVP_MAX_MD_SIZE ) ; for ( j = 0 ; j < num_recs ; j ++ ) { if ( rr [ j ] . length < mac_size ) { al = SSL_AD_DECODE_ERROR ; SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_LENGTH_TOO_SHORT ) ; goto f_err ; } rr [ j ] . length -= mac_size ; mac = rr [ j ] . data + rr [ j ] . length ; i = s -> method -> ssl3_enc -> mac ( s , & rr [ j ] , md , 0 ) ; if ( i < 0 || CRYPTO_memcmp ( md , mac , ( size_t ) mac_size ) != 0 ) { al = SSL_AD_BAD_RECORD_MAC ; SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC ) ; goto f_err ; } } } enc_err = s -> method -> ssl3_enc -> enc ( s , rr , num_recs , 0 ) ; if ( enc_err == 0 ) { al = SSL_AD_DECRYPTION_FAILED ; SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_BLOCK_CIPHER_PAD_IS_WRONG ) ; goto f_err ; } # ifdef SSL_DEBUG printf ( "dec %d\n" , rr -> length ) ; { unsigned int z ; for ( z = 0 ; z < rr -> length ; z ++ ) printf ( "%02X%c" , rr -> data [ z ] , ( ( z + 1 ) % 16 ) ? ' ' : '\n' ) ; } printf ( "\n" ) ; # endif if ( ( sess != NULL ) && ( s -> enc_read_ctx != NULL ) && ( EVP_MD_CTX_md ( s -> read_hash ) != NULL ) && ! SSL_USE_ETM ( s ) ) { unsigned char * mac = NULL ; unsigned char mac_tmp [ EVP_MAX_MD_SIZE ] ; mac_size = EVP_MD_CTX_size ( s -> read_hash ) ; OPENSSL_assert ( mac_size <= EVP_MAX_MD_SIZE ) ; for ( j = 0 ; j < num_recs ; j ++ ) { if ( rr [ j ] . orig_len < mac_size || ( EVP_CIPHER_CTX_mode ( s -> enc_read_ctx ) == EVP_CIPH_CBC_MODE && rr [ j ] . orig_len < mac_size + 1 ) ) { al = SSL_AD_DECODE_ERROR ; SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_LENGTH_TOO_SHORT ) ; goto f_err ; } if ( EVP_CIPHER_CTX_mode ( s -> enc_read_ctx ) == EVP_CIPH_CBC_MODE ) { mac = mac_tmp ; ssl3_cbc_copy_mac ( mac_tmp , & rr [ j ] , mac_size ) ; rr [ j ] . length -= mac_size ; } else { rr [ j ] . length -= mac_size ; mac = & rr [ j ] . data [ rr [ j ] . length ] ; } i = s -> method -> ssl3_enc -> mac ( s , & rr [ j ] , md , 0 ) ; if ( i < 0 || mac == NULL || CRYPTO_memcmp ( md , mac , ( size_t ) mac_size ) != 0 ) enc_err = - 1 ; if ( rr -> length > SSL3_RT_MAX_COMPRESSED_LENGTH + mac_size ) enc_err = - 1 ; } } if ( enc_err < 0 ) { al = SSL_AD_BAD_RECORD_MAC ; SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC ) ; goto f_err ; } for ( j = 0 ; j < num_recs ; j ++ ) { if ( s -> expand != NULL ) { if ( rr [ j ] . length > SSL3_RT_MAX_COMPRESSED_LENGTH ) { al = SSL_AD_RECORD_OVERFLOW ; SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_COMPRESSED_LENGTH_TOO_LONG ) ; goto f_err ; } if ( ! ssl3_do_uncompress ( s , & rr [ j ] ) ) { al = SSL_AD_DECOMPRESSION_FAILURE ; SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_BAD_DECOMPRESSION ) ; goto f_err ; } } if ( rr [ j ] . length > SSL3_RT_MAX_PLAIN_LENGTH ) { al = SSL_AD_RECORD_OVERFLOW ; SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_DATA_LENGTH_TOO_LONG ) ; goto f_err ; } rr [ j ] . off = 0 ; if ( rr [ j ] . length == 0 ) { RECORD_LAYER_inc_empty_record_count ( & s -> rlayer ) ; if ( RECORD_LAYER_get_empty_record_count ( & s -> rlayer ) > MAX_EMPTY_RECORDS ) { al = SSL_AD_UNEXPECTED_MESSAGE ; SSLerr ( SSL_F_SSL3_GET_RECORD , SSL_R_RECORD_TOO_SMALL ) ; goto f_err ; } } else { RECORD_LAYER_reset_empty_record_count ( & s -> rlayer ) ; } } RECORD_LAYER_set_numrpipes ( & s -> rlayer , num_recs ) ; return 1 ; f_err : ssl3_send_alert ( s , SSL3_AL_FATAL , al ) ; err : return ret ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
void remoteDispatchOOMError ( remote_error * rerr ) { remoteDispatchStringError ( rerr , VIR_ERR_NO_MEMORY , "out of memory" ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int mimic_decode_frame ( AVCodecContext * avctx , void * data , int * got_frame , AVPacket * avpkt ) { const uint8_t * buf = avpkt -> data ; int buf_size = avpkt -> size ; int swap_buf_size = buf_size - MIMIC_HEADER_SIZE ; MimicContext * ctx = avctx -> priv_data ; GetByteContext gb ; int is_pframe ; int width , height ; int quality , num_coeffs ; int res ; if ( buf_size <= MIMIC_HEADER_SIZE ) { av_log ( avctx , AV_LOG_ERROR , "insufficient data\n" ) ; return AVERROR_INVALIDDATA ; } bytestream2_init ( & gb , buf , MIMIC_HEADER_SIZE ) ; bytestream2_skip ( & gb , 2 ) ; quality = bytestream2_get_le16u ( & gb ) ; width = bytestream2_get_le16u ( & gb ) ; height = bytestream2_get_le16u ( & gb ) ; bytestream2_skip ( & gb , 4 ) ; is_pframe = bytestream2_get_le32u ( & gb ) ; num_coeffs = bytestream2_get_byteu ( & gb ) ; bytestream2_skip ( & gb , 3 ) ; if ( ! ctx -> avctx ) { int i ; if ( ! ( width == 160 && height == 120 ) && ! ( width == 320 && height == 240 ) ) { av_log ( avctx , AV_LOG_ERROR , "invalid width/height!\n" ) ; return AVERROR_INVALIDDATA ; } ctx -> avctx = avctx ; avctx -> width = width ; avctx -> height = height ; avctx -> pix_fmt = AV_PIX_FMT_YUV420P ; for ( i = 0 ; i < 3 ; i ++ ) { ctx -> num_vblocks [ i ] = - ( ( - height ) >> ( 3 + ! ! i ) ) ; ctx -> num_hblocks [ i ] = width >> ( 3 + ! ! i ) ; } } else if ( width != ctx -> avctx -> width || height != ctx -> avctx -> height ) { av_log_missing_feature ( avctx , "resolution changing" , 1 ) ; return AVERROR_PATCHWELCOME ; } if ( is_pframe && ! ctx -> buf_ptrs [ ctx -> prev_index ] . data [ 0 ] ) { av_log ( avctx , AV_LOG_ERROR , "decoding must start with keyframe\n" ) ; return AVERROR_INVALIDDATA ; } ctx -> buf_ptrs [ ctx -> cur_index ] . reference = 1 ; ctx -> buf_ptrs [ ctx -> cur_index ] . pict_type = is_pframe ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I ; if ( ( res = ff_thread_get_buffer ( avctx , & ctx -> buf_ptrs [ ctx -> cur_index ] ) ) < 0 ) { av_log ( avctx , AV_LOG_ERROR , "get_buffer() failed\n" ) ; return res ; } ctx -> next_prev_index = ctx -> cur_index ; ctx -> next_cur_index = ( ctx -> cur_index - 1 ) & 15 ; prepare_avpic ( ctx , & ctx -> flipped_ptrs [ ctx -> cur_index ] , & ctx -> buf_ptrs [ ctx -> cur_index ] ) ; ff_thread_finish_setup ( avctx ) ; av_fast_padded_malloc ( & ctx -> swap_buf , & ctx -> swap_buf_size , swap_buf_size ) ; if ( ! ctx -> swap_buf ) return AVERROR ( ENOMEM ) ; ctx -> dsp . bswap_buf ( ctx -> swap_buf , ( const uint32_t * ) ( buf + MIMIC_HEADER_SIZE ) , swap_buf_size >> 2 ) ; init_get_bits ( & ctx -> gb , ctx -> swap_buf , swap_buf_size << 3 ) ; res = decode ( ctx , quality , num_coeffs , ! is_pframe ) ; ff_thread_report_progress ( & ctx -> buf_ptrs [ ctx -> cur_index ] , INT_MAX , 0 ) ; if ( res < 0 ) { if ( ! ( avctx -> active_thread_type & FF_THREAD_FRAME ) ) { ff_thread_release_buffer ( avctx , & ctx -> buf_ptrs [ ctx -> cur_index ] ) ; return res ; } } * ( AVFrame * ) data = ctx -> buf_ptrs [ ctx -> cur_index ] ; * got_frame = 1 ; ctx -> prev_index = ctx -> next_prev_index ; ctx -> cur_index = ctx -> next_cur_index ; if ( ctx -> buf_ptrs [ ctx -> cur_index ] . data [ 0 ] ) ff_thread_release_buffer ( avctx , & ctx -> buf_ptrs [ ctx -> cur_index ] ) ; return buf_size ; }
1True
Categorize the following code snippet as vulnerable or not. True or False
static bool cache_thread ( ) { mysql_mutex_assert_owner ( & LOCK_thread_count ) ; if ( cached_thread_count < thread_cache_size && ! abort_loop && ! kill_cached_threads ) { DBUG_PRINT ( "info" , ( "Adding thread to cache" ) ) ; cached_thread_count ++ ; # ifdef HAVE_PSI_INTERFACE if ( likely ( PSI_server != NULL ) ) PSI_server -> delete_current_thread ( ) ; # endif while ( ! abort_loop && ! wake_thread && ! kill_cached_threads ) mysql_cond_wait ( & COND_thread_cache , & LOCK_thread_count ) ; cached_thread_count -- ; if ( kill_cached_threads ) mysql_cond_signal ( & COND_flush_thread_cache ) ; if ( wake_thread ) { THD * thd ; wake_thread -- ; thd = thread_cache . get ( ) ; thd -> thread_stack = ( char * ) & thd ; ( void ) thd -> store_globals ( ) ; # ifdef HAVE_PSI_INTERFACE if ( likely ( PSI_server != NULL ) ) { PSI_thread * psi = PSI_server -> new_thread ( key_thread_one_connection , thd , thd -> thread_id ) ; if ( likely ( psi != NULL ) ) PSI_server -> set_thread ( psi ) ; } # endif thd -> mysys_var -> abort = 0 ; thd -> thr_create_utime = microsecond_interval_timer ( ) ; thd -> start_utime = thd -> thr_create_utime ; threads . append ( thd ) ; return ( 1 ) ; } } return ( 0 ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static const struct ogg_codec * ogg_find_codec ( uint8_t * buf , int size ) { int i ; for ( i = 0 ; ogg_codecs [ i ] ; i ++ ) if ( size >= ogg_codecs [ i ] -> magicsize && ! memcmp ( buf , ogg_codecs [ i ] -> magic , ogg_codecs [ i ] -> magicsize ) ) return ogg_codecs [ i ] ; return NULL ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int dissect_h245_MiscellaneousCommand ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { offset = dissect_per_sequence ( tvb , offset , actx , tree , hf_index , ett_h245_MiscellaneousCommand , MiscellaneousCommand_sequence ) ; return offset ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int rv34_decode_intra_mb_header ( RV34DecContext * r , int8_t * intra_types ) { MpegEncContext * s = & r -> s ; GetBitContext * gb = & s -> gb ; int mb_pos = s -> mb_x + s -> mb_y * s -> mb_stride ; int t ; r -> is16 = get_bits1 ( gb ) ; if ( r -> is16 ) { s -> current_picture_ptr -> f . mb_type [ mb_pos ] = MB_TYPE_INTRA16x16 ; r -> block_type = RV34_MB_TYPE_INTRA16x16 ; t = get_bits ( gb , 2 ) ; fill_rectangle ( intra_types , 4 , 4 , r -> intra_types_stride , t , sizeof ( intra_types [ 0 ] ) ) ; r -> luma_vlc = 2 ; } else { if ( ! r -> rv30 ) { if ( ! get_bits1 ( gb ) ) av_log ( s -> avctx , AV_LOG_ERROR , "Need DQUANT\n" ) ; } s -> current_picture_ptr -> f . mb_type [ mb_pos ] = MB_TYPE_INTRA ; r -> block_type = RV34_MB_TYPE_INTRA ; if ( r -> decode_intra_types ( r , gb , intra_types ) < 0 ) return - 1 ; r -> luma_vlc = 1 ; } r -> chroma_vlc = 0 ; r -> cur_vlcs = choose_vlc_set ( r -> si . quant , r -> si . vlc_set , 0 ) ; return rv34_decode_cbp ( gb , r -> cur_vlcs , r -> is16 ) ; }
1True
Categorize the following code snippet as vulnerable or not. True or False
static void print_mpi_2 ( const char * text , const char * text2 , gcry_mpi_t a ) { gcry_error_t err ; char * buf ; void * bufaddr = & buf ; err = gcry_mpi_aprint ( GCRYMPI_FMT_HEX , bufaddr , NULL , a ) ; if ( err ) fprintf ( stderr , "%s%s: [error printing number: %s]\n" , text , text2 ? text2 : "" , gpg_strerror ( err ) ) ; else { fprintf ( stderr , "%s%s: %s\n" , text , text2 ? text2 : "" , buf ) ; gcry_free ( buf ) ; } }
0False
Categorize the following code snippet as vulnerable or not. True or False
TSReturnCode TSTextLogObjectCreate ( const char * filename , int mode , TSTextLogObject * new_object ) { sdk_assert ( sdk_sanity_check_null_ptr ( ( void * ) filename ) == TS_SUCCESS ) ; sdk_assert ( sdk_sanity_check_null_ptr ( ( void * ) new_object ) == TS_SUCCESS ) ; if ( mode < 0 || mode >= TS_LOG_MODE_INVALID_FLAG ) { * new_object = nullptr ; return TS_ERROR ; } TextLogObject * tlog = new TextLogObject ( filename , Log : : config -> logfile_dir , ( bool ) mode & TS_LOG_MODE_ADD_TIMESTAMP , nullptr , Log : : config -> rolling_enabled , Log : : config -> collation_preproc_threads , Log : : config -> rolling_interval_sec , Log : : config -> rolling_offset_hr , Log : : config -> rolling_size_mb ) ; if ( tlog == nullptr ) { * new_object = nullptr ; return TS_ERROR ; } int err = ( mode & TS_LOG_MODE_DO_NOT_RENAME ? Log : : config -> log_object_manager . manage_api_object ( tlog , 0 ) : Log : : config -> log_object_manager . manage_api_object ( tlog ) ) ; if ( err != LogObjectManager : : NO_FILENAME_CONFLICTS ) { delete tlog ; * new_object = nullptr ; return TS_ERROR ; } * new_object = ( TSTextLogObject ) tlog ; return TS_SUCCESS ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int altinfo_hook_handler ( TSCont contp , TSEvent event , void * edata ) { AltInfoTestData * data = nullptr ; TSHttpTxn txnp = nullptr ; CHECK_SPURIOUS_EVENT ( contp , event , edata ) ; data = ( AltInfoTestData * ) TSContDataGet ( contp ) ; switch ( event ) { case TS_EVENT_HTTP_READ_REQUEST_HDR : txnp = ( TSHttpTxn ) edata ; TSSkipRemappingSet ( txnp , 1 ) ; TSHttpTxnReenable ( txnp , TS_EVENT_HTTP_CONTINUE ) ; break ; case TS_EVENT_HTTP_SELECT_ALT : { TSMBuffer clientreqbuf ; TSMBuffer cachereqbuf ; TSMBuffer cacherespbuf ; TSMLoc clientreqhdr ; TSMLoc cachereqhdr ; TSMLoc cacheresphdr ; TSHttpAltInfo infop = ( TSHttpAltInfo ) edata ; data -> run_at_least_once = true ; if ( TSHttpAltInfoClientReqGet ( infop , & clientreqbuf , & clientreqhdr ) != TS_SUCCESS ) { SDK_RPRINT ( data -> test , "TSHttpAltInfoClientReqGet" , "TestCase" , TC_FAIL , "TSHttpAltInfoClientReqGet doesn't return TS_SUCCESS" ) ; data -> test_passed_txn_alt_info_client_req_get = false ; } else { if ( ( clientreqbuf == reinterpret_cast < TSMBuffer > ( & ( ( ( HttpAltInfo * ) infop ) -> m_client_req ) ) ) && ( clientreqhdr == reinterpret_cast < TSMLoc > ( ( ( HttpAltInfo * ) infop ) -> m_client_req . m_http ) ) ) { SDK_RPRINT ( data -> test , "TSHttpAltInfoClientReqGet" , "TestCase" , TC_PASS , "ok" ) ; } else { SDK_RPRINT ( data -> test , "TSHttpAltInfoClientReqGet" , "TestCase" , TC_FAIL , "Value's Mismatch" ) ; data -> test_passed_txn_alt_info_client_req_get = false ; } } if ( TSHttpAltInfoCachedReqGet ( infop , & cachereqbuf , & cachereqhdr ) != TS_SUCCESS ) { SDK_RPRINT ( data -> test , "TSHttpAltInfoCachedReqGet" , "TestCase" , TC_FAIL , "TSHttpAltInfoCachedReqGet doesn't return TS_SUCCESS" ) ; data -> test_passed_txn_alt_info_cached_req_get = false ; } else { if ( ( cachereqbuf == reinterpret_cast < TSMBuffer > ( & ( ( ( HttpAltInfo * ) infop ) -> m_cached_req ) ) ) && ( cachereqhdr == reinterpret_cast < TSMLoc > ( ( ( HttpAltInfo * ) infop ) -> m_cached_req . m_http ) ) ) { SDK_RPRINT ( data -> test , "TSHttpAltInfoCachedReqGet" , "TestCase" , TC_PASS , "ok" ) ; } else { SDK_RPRINT ( data -> test , "TSHttpAltInfoCachedReqGet" , "TestCase" , TC_FAIL , "Value's Mismatch" ) ; data -> test_passed_txn_alt_info_cached_req_get = false ; } } if ( TSHttpAltInfoCachedRespGet ( infop , & cacherespbuf , & cacheresphdr ) != TS_SUCCESS ) { SDK_RPRINT ( data -> test , "TSHttpAltInfoCachedRespGet" , "TestCase" , TC_FAIL , "TSHttpAltInfoCachedRespGet doesn't return TS_SUCCESS" ) ; data -> test_passed_txn_alt_info_cached_resp_get = false ; } else { if ( ( cacherespbuf == reinterpret_cast < TSMBuffer > ( & ( ( ( HttpAltInfo * ) infop ) -> m_cached_resp ) ) ) && ( cacheresphdr == reinterpret_cast < TSMLoc > ( ( ( HttpAltInfo * ) infop ) -> m_cached_resp . m_http ) ) ) { SDK_RPRINT ( data -> test , "TSHttpAltInfoCachedRespGet" , "TestCase" , TC_PASS , "ok" ) ; } else { SDK_RPRINT ( data -> test , "TSHttpAltInfoCachedRespGet" , "TestCase" , TC_FAIL , "Value's Mismatch" ) ; data -> test_passed_txn_alt_info_cached_resp_get = false ; } } TSHttpAltInfoQualitySet ( infop , 0.5 ) ; SDK_RPRINT ( data -> test , "TSHttpAltInfoQualitySet" , "TestCase" , TC_PASS , "ok" ) ; } break ; case TS_EVENT_IMMEDIATE : case TS_EVENT_TIMEOUT : if ( data -> first_time == true ) { if ( ( data -> browser1 -> status == REQUEST_INPROGRESS ) || ( data -> browser2 -> status == REQUEST_INPROGRESS ) ) { TSContSchedule ( contp , 25 , TS_THREAD_POOL_DEFAULT ) ; return 0 ; } } else { if ( data -> browser3 -> status == REQUEST_INPROGRESS ) { TSContSchedule ( contp , 25 , TS_THREAD_POOL_DEFAULT ) ; return 0 ; } } { if ( data -> first_time == true ) { data -> first_time = false ; synserver_delete ( data -> os ) ; data -> os = nullptr ; synclient_txn_send_request ( data -> browser3 , data -> request3 ) ; TSHttpHookAdd ( TS_HTTP_SELECT_ALT_HOOK , contp ) ; TSContSchedule ( contp , 25 , TS_THREAD_POOL_DEFAULT ) ; return 0 ; } if ( ( data -> browser3 -> status == REQUEST_SUCCESS ) && ( data -> test_passed_txn_alt_info_client_req_get == true ) && ( data -> test_passed_txn_alt_info_cached_req_get == true ) && ( data -> test_passed_txn_alt_info_cached_resp_get == true ) && ( data -> test_passed_txn_alt_info_quality_set == true ) && ( data -> run_at_least_once == true ) ) { * ( data -> pstatus ) = REGRESSION_TEST_PASSED ; } else { if ( data -> run_at_least_once == false ) { SDK_RPRINT ( data -> test , "TSHttpAltInfo" , "All" , TC_FAIL , "Test not executed even once" ) ; } * ( data -> pstatus ) = REGRESSION_TEST_FAILED ; } synclient_txn_delete ( data -> browser1 ) ; synclient_txn_delete ( data -> browser2 ) ; synclient_txn_delete ( data -> browser3 ) ; TSfree ( data -> request1 ) ; TSfree ( data -> request2 ) ; TSfree ( data -> request3 ) ; data -> magic = MAGIC_DEAD ; TSfree ( data ) ; TSContDataSet ( contp , nullptr ) ; } break ; default : * ( data -> pstatus ) = REGRESSION_TEST_FAILED ; SDK_RPRINT ( data -> test , "TSHttpTxnCache" , "TestCase1" , TC_FAIL , "Unexpected event %d" , event ) ; break ; } return 0 ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int sb_has_motion ( const VP9_COMMON * cm , MODE_INFO * prev_mi_8x8 , const int motion_thresh ) { const int mis = cm -> mi_stride ; int block_row , block_col ; if ( cm -> prev_mi ) { for ( block_row = 0 ; block_row < 8 ; ++ block_row ) { for ( block_col = 0 ; block_col < 8 ; ++ block_col ) { const MODE_INFO * prev_mi = prev_mi_8x8 [ block_row * mis + block_col ] . src_mi ; if ( prev_mi ) { if ( abs ( prev_mi -> mbmi . mv [ 0 ] . as_mv . row ) > motion_thresh || abs ( prev_mi -> mbmi . mv [ 0 ] . as_mv . col ) > motion_thresh ) return 1 ; } } } } return 0 ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
SplineChar * PSCharStringToSplines ( uint8 * type1 , int len , struct pscontext * context , struct pschars * subrs , struct pschars * gsubrs , const char * name ) { int is_type2 = context -> is_type2 ; real stack [ 50 ] ; int sp = 0 , v ; real transient [ 32 ] ; SplineChar * ret = SplineCharCreate ( 2 ) ; SplinePointList * cur = NULL , * oldcur = NULL ; RefChar * r1 , * r2 , * rlast = NULL ; DBasePoint current ; real dx , dy , dx2 , dy2 , dx3 , dy3 , dx4 , dy4 , dx5 , dy5 , dx6 , dy6 ; SplinePoint * pt ; struct substate { unsigned char * type1 ; int len ; int subnum ; } pcstack [ 11 ] ; int pcsp = 0 ; StemInfo * hint , * hp ; real pops [ 30 ] ; int popsp = 0 ; int base , polarity ; real coord ; struct pschars * s ; int hint_cnt = 0 ; StemInfo * activeh = NULL , * activev = NULL , * sameh ; HintMask * pending_hm = NULL ; HintMask * counters [ 96 ] ; int cp = 0 ; real unblended [ 2 ] [ MmMax ] ; int last_was_b1 = false , old_last_was_b1 ; if ( ! is_type2 && context -> instance_count > 1 ) memset ( unblended , 0 , sizeof ( unblended ) ) ; ret -> name = copy ( name ) ; ret -> unicodeenc = - 1 ; ret -> width = ( int16 ) 0x8000 ; if ( name == NULL ) name = "unnamed" ; ret -> manualhints = true ; current . x = current . y = 0 ; while ( len > 0 ) { if ( sp > 48 ) { LogError ( _ ( "Stack got too big in %s\n" ) , name ) ; sp = 48 ; } base = 0 ; -- len ; if ( ( v = * type1 ++ ) >= 32 ) { if ( v <= 246 ) { stack [ sp ++ ] = v - 139 ; } else if ( v <= 250 ) { stack [ sp ++ ] = ( v - 247 ) * 256 + * type1 ++ + 108 ; -- len ; } else if ( v <= 254 ) { stack [ sp ++ ] = - ( v - 251 ) * 256 - * type1 ++ - 108 ; -- len ; } else { if ( len < 4 ) { LogError ( _ ( "Not enough data: %d < 4" ) , len ) ; len = 0 ; break ; } int val = ( * type1 << 24 ) | ( type1 [ 1 ] << 16 ) | ( type1 [ 2 ] << 8 ) | type1 [ 3 ] ; stack [ sp ++ ] = val ; type1 += 4 ; len -= 4 ; if ( is_type2 ) { # ifndef PSFixed_Is_TTF stack [ sp - 1 ] /= 65536. ; # else int mant = val & 0xffff ; stack [ sp - 1 ] = ( val >> 16 ) + mant / 65536. ; # endif } } } else if ( v == 28 ) { stack [ sp ++ ] = ( short ) ( ( type1 [ 0 ] << 8 ) | type1 [ 1 ] ) ; type1 += 2 ; len -= 2 ; } else if ( v == 12 ) { old_last_was_b1 = last_was_b1 ; last_was_b1 = false ; v = * type1 ++ ; -- len ; switch ( v ) { case 0 : if ( is_type2 ) LogError ( _ ( "%s\'s dotsection operator is deprecated for Type2\n" ) , name ) ; sp = 0 ; break ; case 1 : if ( sp < 6 ) LogError ( _ ( "Stack underflow on vstem3 in %s\n" ) , name ) ; if ( is_type2 ) LogError ( _ ( "%s\'s vstem3 operator is not supported for Type2\n" ) , name ) ; sameh = NULL ; if ( ! is_type2 ) sameh = SameH ( ret -> vstem , stack [ 0 ] + ret -> lsidebearing , stack [ 1 ] , unblended , 0 ) ; hint = HintNew ( stack [ 0 ] + ret -> lsidebearing , stack [ 1 ] ) ; hint -> hintnumber = sameh != NULL ? sameh -> hintnumber : hint_cnt ++ ; if ( activev == NULL ) activev = hp = hint ; else { for ( hp = activev ; hp -> next != NULL ; hp = hp -> next ) ; hp -> next = hint ; hp = hint ; } sameh = NULL ; if ( ! is_type2 ) sameh = SameH ( ret -> vstem , stack [ 2 ] + ret -> lsidebearing , stack [ 3 ] , unblended , 0 ) ; hp -> next = HintNew ( stack [ 2 ] + ret -> lsidebearing , stack [ 3 ] ) ; hp -> next -> hintnumber = sameh != NULL ? sameh -> hintnumber : hint_cnt ++ ; if ( ! is_type2 ) sameh = SameH ( ret -> vstem , stack [ 4 ] + ret -> lsidebearing , stack [ 5 ] , unblended , 0 ) ; hp -> next -> next = HintNew ( stack [ 4 ] + ret -> lsidebearing , stack [ 5 ] ) ; hp -> next -> next -> hintnumber = sameh != NULL ? sameh -> hintnumber : hint_cnt ++ ; if ( ! is_type2 && hp -> next -> next -> hintnumber < 96 ) { if ( pending_hm == NULL ) pending_hm = chunkalloc ( sizeof ( HintMask ) ) ; ( * pending_hm ) [ hint -> hintnumber >> 3 ] |= 0x80 >> ( hint -> hintnumber & 0x7 ) ; ( * pending_hm ) [ hint -> next -> hintnumber >> 3 ] |= 0x80 >> ( hint -> next -> hintnumber & 0x7 ) ; ( * pending_hm ) [ hint -> next -> next -> hintnumber >> 3 ] |= 0x80 >> ( hint -> next -> next -> hintnumber & 0x7 ) ; } hp = hp -> next -> next ; sp = 0 ; break ; case 2 : if ( sp < 6 ) LogError ( _ ( "Stack underflow on hstem3 in %s\n" ) , name ) ; if ( is_type2 ) LogError ( _ ( "%s\'s vstem3 operator is not supported for Type2\n" ) , name ) ; sameh = NULL ; if ( ! is_type2 ) sameh = SameH ( ret -> hstem , stack [ 0 ] , stack [ 1 ] , unblended , 0 ) ; hint = HintNew ( stack [ 0 ] , stack [ 1 ] ) ; hint -> hintnumber = sameh != NULL ? sameh -> hintnumber : hint_cnt ++ ; if ( activeh == NULL ) activeh = hp = hint ; else { for ( hp = activeh ; hp -> next != NULL ; hp = hp -> next ) ; hp -> next = hint ; hp = hint ; } sameh = NULL ; if ( ! is_type2 ) sameh = SameH ( ret -> hstem , stack [ 2 ] , stack [ 3 ] , unblended , 0 ) ; hp -> next = HintNew ( stack [ 2 ] , stack [ 3 ] ) ; hp -> next -> hintnumber = sameh != NULL ? sameh -> hintnumber : hint_cnt ++ ; sameh = NULL ; if ( ! is_type2 ) sameh = SameH ( ret -> hstem , stack [ 4 ] , stack [ 5 ] , unblended , 0 ) ; hp -> next -> next = HintNew ( stack [ 4 ] , stack [ 5 ] ) ; hp -> next -> next -> hintnumber = sameh != NULL ? sameh -> hintnumber : hint_cnt ++ ; if ( ! is_type2 && hp -> next -> next -> hintnumber < 96 ) { if ( pending_hm == NULL ) pending_hm = chunkalloc ( sizeof ( HintMask ) ) ; ( * pending_hm ) [ hint -> hintnumber >> 3 ] |= 0x80 >> ( hint -> hintnumber & 0x7 ) ; ( * pending_hm ) [ hint -> next -> hintnumber >> 3 ] |= 0x80 >> ( hint -> next -> hintnumber & 0x7 ) ; ( * pending_hm ) [ hint -> next -> next -> hintnumber >> 3 ] |= 0x80 >> ( hint -> next -> next -> hintnumber & 0x7 ) ; } hp = hp -> next -> next ; sp = 0 ; break ; case 6 : seac : if ( sp < 5 ) LogError ( _ ( "Stack underflow on seac in %s\n" ) , name ) ; if ( is_type2 ) { if ( v == 6 ) LogError ( _ ( "%s\'s SEAC operator is invalid for Type2\n" ) , name ) ; else LogError ( _ ( "%s\'s SEAC-like endchar operator is deprecated for Type2\n" ) , name ) ; } r1 = RefCharCreate ( ) ; r2 = RefCharCreate ( ) ; r2 -> transform [ 0 ] = 1 ; r2 -> transform [ 3 ] = 1 ; r2 -> transform [ 4 ] = stack [ 1 ] - ( stack [ 0 ] - ret -> lsidebearing ) ; r2 -> transform [ 5 ] = stack [ 2 ] ; r1 -> transform [ 0 ] = 1 ; r1 -> transform [ 3 ] = 1 ; r1 -> adobe_enc = stack [ 3 ] ; r2 -> adobe_enc = stack [ 4 ] ; if ( stack [ 3 ] < 0 || stack [ 3 ] >= 256 || stack [ 4 ] < 0 || stack [ 4 ] >= 256 ) { LogError ( _ ( "Reference encoding out of bounds in %s\n" ) , name ) ; r1 -> adobe_enc = 0 ; r2 -> adobe_enc = 0 ; } r1 -> next = r2 ; if ( rlast != NULL ) rlast -> next = r1 ; else ret -> layers [ ly_fore ] . refs = r1 ; ret -> changedsincelasthinted = true ; rlast = r2 ; sp = 0 ; break ; case 7 : if ( sp < 4 ) LogError ( _ ( "Stack underflow on sbw in %s\n" ) , name ) ; if ( is_type2 ) LogError ( _ ( "%s\'s sbw operator is not supported for Type2\n" ) , name ) ; ret -> lsidebearing = stack [ 0 ] ; ret -> width = stack [ 2 ] ; sp = 0 ; break ; case 5 : case 9 : case 14 : case 26 : if ( sp < 1 ) LogError ( _ ( "Stack underflow on unary operator in %s\n" ) , name ) ; switch ( v ) { case 5 : stack [ sp - 1 ] = ( stack [ sp - 1 ] == 0 ) ; break ; case 9 : if ( stack [ sp - 1 ] < 0 ) stack [ sp - 1 ] = - stack [ sp - 1 ] ; break ; case 14 : stack [ sp - 1 ] = - stack [ sp - 1 ] ; break ; case 26 : stack [ sp - 1 ] = sqrt ( stack [ sp - 1 ] ) ; break ; default : break ; } break ; case 3 : case 4 : case 10 : case 11 : case 12 : case 15 : case 24 : if ( sp < 2 ) LogError ( _ ( "Stack underflow on binary operator in %s\n" ) , name ) ; else switch ( v ) { case 3 : stack [ sp - 2 ] = ( stack [ sp - 1 ] != 0 && stack [ sp - 2 ] != 0 ) ; break ; case 4 : stack [ sp - 2 ] = ( stack [ sp - 1 ] != 0 || stack [ sp - 2 ] != 0 ) ; break ; case 10 : stack [ sp - 2 ] += stack [ sp - 1 ] ; break ; case 11 : stack [ sp - 2 ] -= stack [ sp - 1 ] ; break ; case 12 : stack [ sp - 2 ] /= stack [ sp - 1 ] ; break ; case 24 : stack [ sp - 2 ] *= stack [ sp - 1 ] ; break ; case 15 : stack [ sp - 2 ] = ( stack [ sp - 1 ] == stack [ sp - 2 ] ) ; break ; default : break ; } -- sp ; break ; case 22 : if ( sp < 4 ) LogError ( _ ( "Stack underflow on ifelse in %s\n" ) , name ) ; else { if ( stack [ sp - 2 ] > stack [ sp - 1 ] ) stack [ sp - 4 ] = stack [ sp - 3 ] ; sp -= 3 ; } break ; case 23 : do { stack [ sp ] = ( rand ( ) / ( RAND_MAX - 1 ) ) ; } while ( stack [ sp ] == 0 || stack [ sp ] > 1 ) ; ++ sp ; break ; case 16 : if ( is_type2 ) LogError ( _ ( "Type2 fonts do not support the Type1 callothersubrs operator" ) ) ; if ( sp < 2 || sp < 2 + stack [ sp - 2 ] ) { LogError ( _ ( "Stack underflow on callothersubr in %s\n" ) , name ) ; sp = 0 ; } else { int tot = stack [ sp - 2 ] , i , k , j ; popsp = 0 ; for ( k = sp - 3 ; k >= sp - 2 - tot ; -- k ) pops [ popsp ++ ] = stack [ k ] ; switch ( ( int ) stack [ sp - 1 ] ) { case 3 : { ret -> manualhints = false ; ret -> hstem = HintsAppend ( ret -> hstem , activeh ) ; activeh = NULL ; ret -> vstem = HintsAppend ( ret -> vstem , activev ) ; activev = NULL ; } break ; case 1 : { is_type2 = false ; if ( cur != NULL ) { oldcur = cur ; cur -> next = NULL ; } else LogError ( _ ( "Bad flex subroutine in %s\n" ) , name ) ; } break ; case 2 : { ; } break ; case 0 : if ( oldcur != NULL ) { SplinePointList * spl = oldcur -> next ; if ( spl != NULL && spl -> next != NULL && spl -> next -> next != NULL && spl -> next -> next -> next != NULL && spl -> next -> next -> next -> next != NULL && spl -> next -> next -> next -> next -> next != NULL && spl -> next -> next -> next -> next -> next -> next != NULL ) { BasePoint old_nextcp , mid_prevcp , mid , mid_nextcp , end_prevcp , end ; old_nextcp = spl -> next -> first -> me ; mid_prevcp = spl -> next -> next -> first -> me ; mid = spl -> next -> next -> next -> first -> me ; mid_nextcp = spl -> next -> next -> next -> next -> first -> me ; end_prevcp = spl -> next -> next -> next -> next -> next -> first -> me ; end = spl -> next -> next -> next -> next -> next -> next -> first -> me ; cur = oldcur ; if ( cur != NULL && cur -> first != NULL && ( cur -> first != cur -> last || cur -> first -> next == NULL ) ) { cur -> last -> nextcp = old_nextcp ; cur -> last -> nonextcp = false ; pt = chunkalloc ( sizeof ( SplinePoint ) ) ; pt -> hintmask = pending_hm ; pending_hm = NULL ; pt -> prevcp = mid_prevcp ; pt -> me = mid ; pt -> nextcp = mid_nextcp ; CheckMake ( cur -> last , pt ) ; SplineMake3 ( cur -> last , pt ) ; cur -> last = pt ; pt = chunkalloc ( sizeof ( SplinePoint ) ) ; pt -> prevcp = end_prevcp ; pt -> me = end ; pt -> nonextcp = true ; CheckMake ( cur -> last , pt ) ; SplineMake3 ( cur -> last , pt ) ; cur -> last = pt ; } else LogError ( _ ( "No previous point on path in curveto from flex 0 in %s\n" ) , name ) ; } else { pt = chunkalloc ( sizeof ( SplinePoint ) ) ; pt -> me . x = pops [ 1 ] ; pt -> me . y = pops [ 0 ] ; pt -> noprevcp = true ; pt -> nonextcp = true ; SplinePointListFree ( oldcur -> next ) ; oldcur -> next = NULL ; spl = NULL ; cur = oldcur ; if ( cur != NULL && cur -> first != NULL && ( cur -> first != cur -> last || cur -> first -> next == NULL ) ) { CheckMake ( cur -> last , pt ) ; SplineMake3 ( cur -> last , pt ) ; cur -> last = pt ; } else LogError ( _ ( "No previous point on path in lineto from flex 0 in %s\n" ) , name ) ; } -- popsp ; cur -> next = NULL ; SplinePointListsFree ( spl ) ; oldcur = NULL ; } else LogError ( _ ( "Bad flex subroutine in %s\n" ) , name ) ; is_type2 = context -> is_type2 ; break ; case 14 : case 15 : case 16 : case 17 : case 18 : { int cnt = stack [ sp - 1 ] - 13 ; if ( cnt == 5 ) cnt = 6 ; if ( context -> instance_count == 0 ) LogError ( _ ( "Attempt to use a multiple master subroutine in a non-mm font in %s.\n" ) , name ) ; else if ( tot != cnt * context -> instance_count ) LogError ( _ ( "Multiple master subroutine called with the wrong number of arguments in %s.\n" ) , name ) ; else { if ( cnt == 1 && ! is_type2 ) { if ( sp - 2 - tot >= 1 && ( ! old_last_was_b1 || stack [ 0 ] != Blend ( unblended [ 1 ] , context ) ) ) { unblended [ 0 ] [ 0 ] = stack [ 0 ] ; for ( i = 1 ; i < context -> instance_count ; ++ i ) unblended [ 0 ] [ i ] = 0 ; } else memcpy ( unblended , unblended + 1 , context -> instance_count * sizeof ( real ) ) ; for ( j = 0 ; j < context -> instance_count ; ++ j ) unblended [ 1 ] [ j ] = stack [ sp - 2 - tot + j ] ; } else if ( cnt == 2 && ! is_type2 ) { unblended [ 0 ] [ 0 ] = stack [ sp - 2 - tot ] ; unblended [ 1 ] [ 0 ] = stack [ sp - 2 - tot + 1 ] ; for ( i = 0 ; i < 2 ; ++ i ) for ( j = 1 ; j < context -> instance_count ; ++ j ) unblended [ i ] [ j ] = stack [ sp - 2 - tot + 2 + i * ( context -> instance_count - 1 ) + ( j - 1 ) ] ; } popsp = 0 ; for ( i = 0 ; i < cnt ; ++ i ) { double sum = stack [ sp - 2 - tot + i ] ; for ( j = 1 ; j < context -> instance_count ; ++ j ) sum += context -> blend_values [ j ] * stack [ sp - 2 - tot + cnt + i * ( context -> instance_count - 1 ) + j - 1 ] ; pops [ cnt - 1 - popsp ++ ] = sum ; } } } break ; } sp = k + 1 ; } break ; case 20 : if ( sp < 2 ) LogError ( _ ( "Too few items on stack for put in %s\n" ) , name ) ; else if ( stack [ sp - 1 ] < 0 || stack [ sp - 1 ] >= 32 ) LogError ( _ ( "Reference to transient memory out of bounds in put in %s\n" ) , name ) ; else { transient [ ( int ) stack [ sp - 1 ] ] = stack [ sp - 2 ] ; sp -= 2 ; } break ; case 21 : if ( sp < 1 ) LogError ( _ ( "Too few items on stack for get in %s\n" ) , name ) ; else if ( stack [ sp - 1 ] < 0 || stack [ sp - 1 ] >= 32 ) LogError ( _ ( "Reference to transient memory out of bounds in put in %s\n" ) , name ) ; else stack [ sp - 1 ] = transient [ ( int ) stack [ sp - 1 ] ] ; break ; case 17 : if ( popsp <= 0 ) LogError ( _ ( "Pop stack underflow on pop in %s\n" ) , name ) ; else stack [ sp ++ ] = pops [ -- popsp ] ; break ; case 18 : if ( sp > 0 ) -- sp ; break ; case 27 : if ( sp >= 1 ) { stack [ sp ] = stack [ sp - 1 ] ; ++ sp ; } break ; case 28 : if ( sp >= 2 ) { real temp = stack [ sp - 1 ] ; stack [ sp - 1 ] = stack [ sp - 2 ] ; stack [ sp - 2 ] = temp ; } break ; case 29 : if ( sp >= 1 ) { int index = stack [ -- sp ] ; if ( index < 0 || sp < index + 1 ) LogError ( _ ( "Index out of range in %s\n" ) , name ) ; else { stack [ sp ] = stack [ sp - index - 1 ] ; ++ sp ; } } break ; case 30 : if ( sp >= 2 ) { int j = stack [ sp - 1 ] , N = stack [ sp - 2 ] ; if ( N > sp || j >= N || j < 0 || N < 0 ) LogError ( _ ( "roll out of range in %s\n" ) , name ) ; else if ( j == 0 || N == 0 ) ; else { real * temp = malloc ( N * sizeof ( real ) ) ; int i ; for ( i = 0 ; i < N ; ++ i ) temp [ i ] = stack [ sp - N + i ] ; for ( i = 0 ; i < N ; ++ i ) stack [ sp - N + i ] = temp [ ( i + j ) % N ] ; free ( temp ) ; } } break ; case 33 : if ( is_type2 ) LogError ( _ ( "Type2 fonts do not support the Type1 setcurrentpoint operator" ) ) ; if ( sp < 2 ) LogError ( _ ( "Stack underflow on setcurrentpoint in %s\n" ) , name ) ; else { current . x = stack [ 0 ] ; current . y = stack [ 1 ] ; } sp = 0 ; break ; case 34 : case 35 : case 36 : case 37 : dy = dy3 = dy4 = dy5 = dy6 = 0 ; dx = stack [ base ++ ] ; if ( v != 34 ) dy = stack [ base ++ ] ; dx2 = stack [ base ++ ] ; dy2 = stack [ base ++ ] ; dx3 = stack [ base ++ ] ; if ( v != 34 && v != 36 ) dy3 = stack [ base ++ ] ; dx4 = stack [ base ++ ] ; if ( v != 34 && v != 36 ) dy4 = stack [ base ++ ] ; dx5 = stack [ base ++ ] ; if ( v == 34 ) dy5 = - dy2 ; else dy5 = stack [ base ++ ] ; switch ( v ) { real xt , yt ; case 35 : dx6 = stack [ base ++ ] ; dy6 = stack [ base ++ ] ; break ; case 34 : dx6 = stack [ base ++ ] ; break ; case 36 : dx6 = stack [ base ++ ] ; dy6 = - dy - dy2 - dy5 ; break ; case 37 : xt = dx + dx2 + dx3 + dx4 + dx5 ; yt = dy + dy2 + dy3 + dy4 + dy5 ; if ( xt < 0 ) xt = - xt ; if ( yt < 0 ) yt = - yt ; if ( xt > yt ) { dx6 = stack [ base ++ ] ; dy6 = - dy - dy2 - dy3 - dy4 - dy5 ; } else { dy6 = stack [ base ++ ] ; dx6 = - dx - dx2 - dx3 - dx4 - dx5 ; } break ; } if ( cur != NULL && cur -> first != NULL && ( cur -> first != cur -> last || cur -> first -> next == NULL ) ) { current . x = rint ( ( current . x + dx ) * 1024 ) / 1024 ; current . y = rint ( ( current . y + dy ) * 1024 ) / 1024 ; cur -> last -> nextcp . x = current . x ; cur -> last -> nextcp . y = current . y ; cur -> last -> nonextcp = false ; current . x = rint ( ( current . x + dx2 ) * 1024 ) / 1024 ; current . y = rint ( ( current . y + dy2 ) * 1024 ) / 1024 ; pt = chunkalloc ( sizeof ( SplinePoint ) ) ; pt -> hintmask = pending_hm ; pending_hm = NULL ; pt -> prevcp . x = current . x ; pt -> prevcp . y = current . y ; current . x = rint ( ( current . x + dx3 ) * 1024 ) / 1024 ; current . y = rint ( ( current . y + dy3 ) * 1024 ) / 1024 ; pt -> me . x = current . x ; pt -> me . y = current . y ; pt -> nonextcp = true ; CheckMake ( cur -> last , pt ) ; SplineMake3 ( cur -> last , pt ) ; cur -> last = pt ; current . x = rint ( ( current . x + dx4 ) * 1024 ) / 1024 ; current . y = rint ( ( current . y + dy4 ) * 1024 ) / 1024 ; cur -> last -> nextcp . x = current . x ; cur -> last -> nextcp . y = current . y ; cur -> last -> nonextcp = false ; current . x = rint ( ( current . x + dx5 ) * 1024 ) / 1024 ; current . y = rint ( ( current . y + dy5 ) * 1024 ) / 1024 ; pt = chunkalloc ( sizeof ( SplinePoint ) ) ; pt -> prevcp . x = current . x ; pt -> prevcp . y = current . y ; current . x = rint ( ( current . x + dx6 ) * 1024 ) / 1024 ; current . y = rint ( ( current . y + dy6 ) * 1024 ) / 1024 ; pt -> me . x = current . x ; pt -> me . y = current . y ; pt -> nonextcp = true ; CheckMake ( cur -> last , pt ) ; SplineMake3 ( cur -> last , pt ) ; cur -> last = pt ; } else LogError ( _ ( "No previous point on path in flex operator in %s\n" ) , name ) ; sp = 0 ; break ; default : LogError ( _ ( "Uninterpreted opcode 12,%d in %s\n" ) , v , name ) ; break ; } } else { last_was_b1 = false ; switch ( v ) { case 1 : case 18 : base = 0 ; if ( ( sp & 1 ) && ret -> width == ( int16 ) 0x8000 ) ret -> width = stack [ 0 ] ; if ( sp & 1 ) base = 1 ; if ( sp - base < 2 ) LogError ( _ ( "Stack underflow on hstem in %s\n" ) , name ) ; coord = 0 ; hp = NULL ; if ( activeh != NULL ) for ( hp = activeh ; hp -> next != NULL ; hp = hp -> next ) ; while ( sp - base >= 2 ) { sameh = NULL ; if ( ! is_type2 ) sameh = SameH ( ret -> hstem , stack [ base ] + coord , stack [ base + 1 ] , unblended , context -> instance_count ) ; hint = HintNew ( stack [ base ] + coord , stack [ base + 1 ] ) ; hint -> hintnumber = sameh != NULL ? sameh -> hintnumber : hint_cnt ++ ; if ( ! is_type2 && context -> instance_count != 0 ) { hint -> u . unblended = chunkalloc ( sizeof ( real [ 2 ] [ MmMax ] ) ) ; memcpy ( hint -> u . unblended , unblended , sizeof ( real [ 2 ] [ MmMax ] ) ) ; } if ( activeh == NULL ) activeh = hint ; else hp -> next = hint ; hp = hint ; if ( ! is_type2 && hint -> hintnumber < 96 ) { if ( pending_hm == NULL ) pending_hm = chunkalloc ( sizeof ( HintMask ) ) ; ( * pending_hm ) [ hint -> hintnumber >> 3 ] |= 0x80 >> ( hint -> hintnumber & 0x7 ) ; } base += 2 ; coord = hint -> start + hint -> width ; } sp = 0 ; break ; case 19 : case 20 : case 3 : case 23 : base = 0 ; if ( cur == NULL || v == 3 || v == 23 ) { if ( ( sp & 1 ) && is_type2 && ret -> width == ( int16 ) 0x8000 ) { ret -> width = stack [ 0 ] ; } if ( sp & 1 ) base = 1 ; if ( sp - base < 2 && v != 19 && v != 20 ) LogError ( _ ( "Stack underflow on vstem in %s\n" ) , name ) ; coord = ret -> lsidebearing ; hp = NULL ; if ( activev != NULL ) for ( hp = activev ; hp -> next != NULL ; hp = hp -> next ) ; while ( sp - base >= 2 ) { sameh = NULL ; if ( ! is_type2 ) sameh = SameH ( ret -> vstem , stack [ base ] + coord , stack [ base + 1 ] , unblended , context -> instance_count ) ; hint = HintNew ( stack [ base ] + coord , stack [ base + 1 ] ) ; hint -> hintnumber = sameh != NULL ? sameh -> hintnumber : hint_cnt ++ ; if ( ! is_type2 && context -> instance_count != 0 ) { hint -> u . unblended = chunkalloc ( sizeof ( real [ 2 ] [ MmMax ] ) ) ; memcpy ( hint -> u . unblended , unblended , sizeof ( real [ 2 ] [ MmMax ] ) ) ; } if ( ! is_type2 && hint -> hintnumber < 96 ) { if ( pending_hm == NULL ) pending_hm = chunkalloc ( sizeof ( HintMask ) ) ; ( * pending_hm ) [ hint -> hintnumber >> 3 ] |= 0x80 >> ( hint -> hintnumber & 0x7 ) ; } if ( activev == NULL ) activev = hint ; else hp -> next = hint ; hp = hint ; base += 2 ; coord = hint -> start + hint -> width ; } sp = 0 ; } if ( v == 19 || v == 20 ) { int bytes = ( hint_cnt + 7 ) / 8 ; if ( bytes > sizeof ( HintMask ) ) bytes = sizeof ( HintMask ) ; if ( v == 19 ) { ret -> hstem = HintsAppend ( ret -> hstem , activeh ) ; activeh = NULL ; ret -> vstem = HintsAppend ( ret -> vstem , activev ) ; activev = NULL ; if ( pending_hm == NULL ) pending_hm = chunkalloc ( sizeof ( HintMask ) ) ; memcpy ( pending_hm , type1 , bytes ) ; } else if ( cp < sizeof ( counters ) / sizeof ( counters [ 0 ] ) ) { counters [ cp ] = chunkalloc ( sizeof ( HintMask ) ) ; memcpy ( counters [ cp ] , type1 , bytes ) ; ++ cp ; } if ( bytes != hint_cnt / 8 ) { int mask = 0xff >> ( hint_cnt & 7 ) ; if ( type1 [ bytes - 1 ] & mask ) LogError ( _ ( "Hint mask (or counter mask) with too many hints in %s\n" ) , name ) ; } type1 += bytes ; len -= bytes ; } break ; case 14 : if ( ( sp & 1 ) && is_type2 && ret -> width == ( int16 ) 0x8000 ) ret -> width = stack [ 0 ] ; if ( context -> painttype != 2 ) closepath ( cur , is_type2 ) ; pcsp = 0 ; if ( sp == 4 ) { stack [ 4 ] = stack [ 3 ] ; stack [ 3 ] = stack [ 2 ] ; stack [ 2 ] = stack [ 1 ] ; stack [ 1 ] = stack [ 0 ] ; stack [ 0 ] = 0 ; sp = 5 ; goto seac ; } else if ( sp == 5 ) { stack [ 0 ] = 0 ; goto seac ; } goto done ; break ; case 13 : if ( sp < 2 ) LogError ( _ ( "Stack underflow on hsbw in %s\n" ) , name ) ; ret -> lsidebearing = stack [ 0 ] ; current . x = stack [ 0 ] ; ret -> width = stack [ 1 ] ; sp = 0 ; break ; case 9 : sp = 0 ; closepath ( cur , is_type2 ) ; break ; case 21 : case 22 : case 4 : if ( is_type2 ) { if ( ( ( v == 21 && sp == 3 ) || ( v != 21 && sp == 2 ) ) && ret -> width == ( int16 ) 0x8000 ) ret -> width = stack [ 0 ] ; if ( v == 21 && sp > 2 ) { stack [ 0 ] = stack [ sp - 2 ] ; stack [ 1 ] = stack [ sp - 1 ] ; sp = 2 ; } else if ( v != 21 && sp > 1 ) { stack [ 0 ] = stack [ sp - 1 ] ; sp = 1 ; } if ( context -> painttype != 2 ) closepath ( cur , true ) ; } case 5 : case 6 : case 7 : polarity = 0 ; base = 0 ; while ( base < sp ) { dx = dy = 0 ; if ( v == 5 || v == 21 ) { if ( sp < base + 2 ) { LogError ( _ ( "Stack underflow on rlineto/rmoveto in %s\n" ) , name ) ; break ; } dx = stack [ base ++ ] ; dy = stack [ base ++ ] ; } else if ( ( v == 6 && ! ( polarity & 1 ) ) || ( v == 7 && ( polarity & 1 ) ) || v == 22 ) { if ( sp <= base ) { LogError ( _ ( "Stack underflow on hlineto/hmoveto in %s\n" ) , name ) ; break ; } dx = stack [ base ++ ] ; } else { if ( sp <= base ) { LogError ( _ ( "Stack underflow on vlineto/vmoveto in %s\n" ) , name ) ; break ; } dy = stack [ base ++ ] ; } ++ polarity ; current . x = rint ( ( current . x + dx ) * 1024 ) / 1024 ; current . y = rint ( ( current . y + dy ) * 1024 ) / 1024 ; pt = chunkalloc ( sizeof ( SplinePoint ) ) ; pt -> hintmask = pending_hm ; pending_hm = NULL ; pt -> me . x = current . x ; pt -> me . y = current . y ; pt -> noprevcp = true ; pt -> nonextcp = true ; if ( v == 4 || v == 21 || v == 22 ) { if ( cur != NULL && cur -> first == cur -> last && cur -> first -> prev == NULL && is_type2 ) { cur -> first -> me . x = current . x ; cur -> first -> me . y = current . y ; SplinePointFree ( pt ) ; } else { SplinePointList * spl = chunkalloc ( sizeof ( SplinePointList ) ) ; spl -> first = spl -> last = pt ; if ( cur != NULL ) cur -> next = spl ; else ret -> layers [ ly_fore ] . splines = spl ; cur = spl ; } break ; } else { if ( cur != NULL && cur -> first != NULL && ( cur -> first != cur -> last || cur -> first -> next == NULL ) ) { CheckMake ( cur -> last , pt ) ; SplineMake3 ( cur -> last , pt ) ; cur -> last = pt ; } else LogError ( _ ( "No previous point on path in lineto in %s\n" ) , name ) ; if ( ! is_type2 ) break ; } } sp = 0 ; break ; case 25 : base = 0 ; while ( sp > base + 6 ) { current . x = rint ( ( current . x + stack [ base ++ ] ) * 1024 ) / 1024 ; current . y = rint ( ( current . y + stack [ base ++ ] ) * 1024 ) / 1024 ; if ( cur != NULL ) { pt = chunkalloc ( sizeof ( SplinePoint ) ) ; pt -> hintmask = pending_hm ; pending_hm = NULL ; pt -> me . x = current . x ; pt -> me . y = current . y ; pt -> noprevcp = true ; pt -> nonextcp = true ; CheckMake ( cur -> last , pt ) ; SplineMake3 ( cur -> last , pt ) ; cur -> last = pt ; } } case 24 : case 8 : case 31 : case 30 : case 27 : case 26 : polarity = 0 ; while ( sp > base + 2 ) { dx = dy = dx2 = dy2 = dx3 = dy3 = 0 ; if ( v == 8 || v == 25 || v == 24 ) { if ( sp < 6 + base ) { LogError ( _ ( "Stack underflow on rrcurveto in %s\n" ) , name ) ; base = sp ; } else { dx = stack [ base ++ ] ; dy = stack [ base ++ ] ; dx2 = stack [ base ++ ] ; dy2 = stack [ base ++ ] ; dx3 = stack [ base ++ ] ; dy3 = stack [ base ++ ] ; } } else if ( v == 27 ) { if ( sp < 4 + base ) { LogError ( _ ( "Stack underflow on hhcurveto in %s\n" ) , name ) ; base = sp ; } else { if ( ( sp - base ) & 1 ) dy = stack [ base ++ ] ; dx = stack [ base ++ ] ; dx2 = stack [ base ++ ] ; dy2 = stack [ base ++ ] ; dx3 = stack [ base ++ ] ; } } else if ( v == 26 ) { if ( sp < 4 + base ) { LogError ( _ ( "Stack underflow on hhcurveto in %s\n" ) , name ) ; base = sp ; } else { if ( ( sp - base ) & 1 ) dx = stack [ base ++ ] ; dy = stack [ base ++ ] ; dx2 = stack [ base ++ ] ; dy2 = stack [ base ++ ] ; dy3 = stack [ base ++ ] ; } } else if ( ( v == 31 && ! ( polarity & 1 ) ) || ( v == 30 && ( polarity & 1 ) ) ) { if ( sp < 4 + base ) { LogError ( _ ( "Stack underflow on hvcurveto in %s\n" ) , name ) ; base = sp ; } else { dx = stack [ base ++ ] ; dx2 = stack [ base ++ ] ; dy2 = stack [ base ++ ] ; dy3 = stack [ base ++ ] ; if ( sp == base + 1 ) dx3 = stack [ base ++ ] ; } } else { if ( sp < 4 + base ) { LogError ( _ ( "Stack underflow on vhcurveto in %s\n" ) , name ) ; base = sp ; } else { dy = stack [ base ++ ] ; dx2 = stack [ base ++ ] ; dy2 = stack [ base ++ ] ; dx3 = stack [ base ++ ] ; if ( sp == base + 1 ) dy3 = stack [ base ++ ] ; } } ++ polarity ; if ( cur != NULL && cur -> first != NULL && ( cur -> first != cur -> last || cur -> first -> next == NULL ) ) { current . x = rint ( ( current . x + dx ) * 1024 ) / 1024 ; current . y = rint ( ( current . y + dy ) * 1024 ) / 1024 ; cur -> last -> nextcp . x = current . x ; cur -> last -> nextcp . y = current . y ; cur -> last -> nonextcp = false ; current . x = rint ( ( current . x + dx2 ) * 1024 ) / 1024 ; current . y = rint ( ( current . y + dy2 ) * 1024 ) / 1024 ; pt = chunkalloc ( sizeof ( SplinePoint ) ) ; pt -> hintmask = pending_hm ; pending_hm = NULL ; pt -> prevcp . x = current . x ; pt -> prevcp . y = current . y ; current . x = rint ( ( current . x + dx3 ) * 1024 ) / 1024 ; current . y = rint ( ( current . y + dy3 ) * 1024 ) / 1024 ; pt -> me . x = current . x ; pt -> me . y = current . y ; pt -> nonextcp = true ; CheckMake ( cur -> last , pt ) ; SplineMake3 ( cur -> last , pt ) ; cur -> last = pt ; } else LogError ( _ ( "No previous point on path in curveto in %s\n" ) , name ) ; } if ( v == 24 ) { current . x = rint ( ( current . x + stack [ base ++ ] ) * 1024 ) / 1024 ; current . y = rint ( ( current . y + stack [ base ++ ] ) * 1024 ) / 1024 ; if ( cur != NULL ) { pt = chunkalloc ( sizeof ( SplinePoint ) ) ; pt -> hintmask = pending_hm ; pending_hm = NULL ; pt -> me . x = current . x ; pt -> me . y = current . y ; pt -> noprevcp = true ; pt -> nonextcp = true ; CheckMake ( cur -> last , pt ) ; SplineMake3 ( cur -> last , pt ) ; cur -> last = pt ; } } sp = 0 ; break ; case 29 : case 10 : if ( sp < 1 ) { LogError ( _ ( "Stack underflow on callsubr in %s\n" ) , name ) ; break ; } else if ( pcsp > 10 ) { LogError ( _ ( "Too many subroutine calls in %s\n" ) , name ) ; break ; } s = subrs ; if ( v == 29 ) s = gsubrs ; if ( s != NULL ) stack [ sp - 1 ] += s -> bias ; if ( s == NULL || stack [ sp - 1 ] >= s -> cnt || stack [ sp - 1 ] < 0 || s -> values [ ( int ) stack [ sp - 1 ] ] == NULL ) LogError ( _ ( "Subroutine number out of bounds in %s\n" ) , name ) ; else { pcstack [ pcsp ] . type1 = type1 ; pcstack [ pcsp ] . len = len ; pcstack [ pcsp ] . subnum = stack [ sp - 1 ] ; ++ pcsp ; type1 = s -> values [ ( int ) stack [ sp - 1 ] ] ; len = s -> lens [ ( int ) stack [ sp - 1 ] ] ; } if ( -- sp < 0 ) sp = 0 ; break ; case 11 : if ( pcsp < 1 ) LogError ( _ ( "return when not in subroutine in %s\n" ) , name ) ; else { -- pcsp ; type1 = pcstack [ pcsp ] . type1 ; len = pcstack [ pcsp ] . len ; } break ; case 16 : { int cnt , i , j ; if ( context -> instance_count == 0 ) LogError ( _ ( "Attempt to use a multiple master subroutine in a non-mm font.\n" ) ) ; else if ( sp < 1 || sp < context -> instance_count * stack [ sp - 1 ] + 1 ) LogError ( _ ( "Too few items on stack for blend in %s\n" ) , name ) ; else { if ( ! context -> blend_warn ) { LogError ( _ ( "Use of obsolete blend operator.\n" ) ) ; context -> blend_warn = true ; } cnt = stack [ sp - 1 ] ; sp -= context -> instance_count * stack [ sp - 1 ] + 1 ; for ( i = 0 ; i < cnt ; ++ i ) { for ( j = 1 ; j < context -> instance_count ; ++ j ) stack [ sp + i ] += context -> blend_values [ j ] * stack [ sp + cnt + i * ( context -> instance_count - 1 ) + j - 1 ] ; } sp += cnt ; } } break ; default : LogError ( _ ( "Uninterpreted opcode %d in %s\n" ) , v , name ) ; break ; } } } done : if ( pcsp != 0 ) LogError ( _ ( "end of subroutine reached with no return in %s\n" ) , name ) ; SCCategorizePoints ( ret ) ; ret -> hstem = HintsAppend ( ret -> hstem , activeh ) ; activeh = NULL ; ret -> vstem = HintsAppend ( ret -> vstem , activev ) ; activev = NULL ; if ( cp != 0 ) { int i ; ret -> countermasks = malloc ( cp * sizeof ( HintMask ) ) ; ret -> countermask_cnt = cp ; for ( i = 0 ; i < cp ; ++ i ) { memcpy ( & ret -> countermasks [ i ] , counters [ i ] , sizeof ( HintMask ) ) ; chunkfree ( counters [ i ] , sizeof ( HintMask ) ) ; } } if ( ! is_type2 && ! context -> painttype ) for ( cur = ret -> layers [ ly_fore ] . splines ; cur != NULL ; cur = cur -> next ) if ( cur -> first -> prev == NULL ) { CheckMake ( cur -> last , cur -> first ) ; SplineMake3 ( cur -> last , cur -> first ) ; cur -> last = cur -> first ; } for ( cur = ret -> layers [ ly_fore ] . splines ; cur != NULL ; cur = cur -> next ) SplineSetReverse ( cur ) ; if ( ret -> hstem == NULL && ret -> vstem == NULL ) ret -> manualhints = false ; if ( ! is_type2 && context -> instance_count != 0 ) { UnblendFree ( ret -> hstem ) ; UnblendFree ( ret -> vstem ) ; } ret -> hstem = HintCleanup ( ret -> hstem , true , context -> instance_count ) ; ret -> vstem = HintCleanup ( ret -> vstem , true , context -> instance_count ) ; SCGuessHHintInstancesList ( ret , ly_fore ) ; SCGuessVHintInstancesList ( ret , ly_fore ) ; ret -> hconflicts = StemListAnyConflicts ( ret -> hstem ) ; ret -> vconflicts = StemListAnyConflicts ( ret -> vstem ) ; if ( context -> instance_count == 1 && ! ret -> hconflicts && ! ret -> vconflicts ) SCClearHintMasks ( ret , ly_fore , false ) ; HintsRenumber ( ret ) ; if ( name != NULL && strcmp ( name , ".notdef" ) != 0 ) ret -> widthset = true ; return ( ret ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void add_all_files_to_work_queue ( NautilusDirectory * directory ) { GList * node ; NautilusFile * file ; for ( node = directory -> details -> file_list ; node != NULL ; node = node -> next ) { file = NAUTILUS_FILE ( node -> data ) ; nautilus_directory_add_file_to_work_queue ( directory , file ) ; } }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void pdf_drop_xobject_imp ( fz_context * ctx , fz_storable * xobj_ ) { pdf_xobject * xobj = ( pdf_xobject * ) xobj_ ; pdf_drop_obj ( ctx , xobj -> obj ) ; fz_free ( ctx , xobj ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int s390_virtio_serial_init ( VirtIOS390Device * dev ) { VirtIOS390Bus * bus ; VirtIODevice * vdev ; int r ; bus = DO_UPCAST ( VirtIOS390Bus , bus , dev -> qdev . parent_bus ) ; vdev = virtio_serial_init ( ( DeviceState * ) dev , dev -> max_virtserial_ports ) ; if ( ! vdev ) { return - 1 ; } r = s390_virtio_device_init ( dev , vdev ) ; if ( ! r ) { bus -> console = dev ; } return r ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
void ntpd_time_stepped ( void ) { u_int saved_mon_enabled ; if ( MON_OFF != mon_enabled ) { saved_mon_enabled = mon_enabled ; mon_stop ( MON_OFF ) ; mon_start ( saved_mon_enabled ) ; } # ifdef SYS_WINNT win_time_stepped ( ) ; # endif }
0False
Categorize the following code snippet as vulnerable or not. True or False
int keyring_insert_keyblock ( KEYRING_HANDLE hd , KBNODE kb ) { int rc ; const char * fname ; if ( ! hd ) fname = NULL ; else if ( hd -> found . kr ) { fname = hd -> found . kr -> fname ; if ( hd -> found . kr -> read_only ) return gpg_error ( GPG_ERR_EACCES ) ; } else if ( hd -> current . kr ) { fname = hd -> current . kr -> fname ; if ( hd -> current . kr -> read_only ) return gpg_error ( GPG_ERR_EACCES ) ; } else fname = hd -> resource ? hd -> resource -> fname : NULL ; if ( ! fname ) return GPG_ERR_GENERAL ; iobuf_close ( hd -> current . iobuf ) ; hd -> current . iobuf = NULL ; rc = do_copy ( 1 , fname , kb , 0 , 0 ) ; if ( ! rc && kr_offtbl ) { update_offset_hash_table_from_kb ( kr_offtbl , kb , 0 ) ; } return rc ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
int ff_MPV_frame_start ( MpegEncContext * s , AVCodecContext * avctx ) { int i , ret ; Picture * pic ; s -> mb_skipped = 0 ; if ( s -> out_format != FMT_H264 || s -> codec_id == AV_CODEC_ID_SVQ3 ) { if ( s -> pict_type != AV_PICTURE_TYPE_B && s -> last_picture_ptr && s -> last_picture_ptr != s -> next_picture_ptr && s -> last_picture_ptr -> f . data [ 0 ] ) { ff_mpeg_unref_picture ( s , s -> last_picture_ptr ) ; } if ( ! s -> encoding ) { for ( i = 0 ; i < MAX_PICTURE_COUNT ; i ++ ) { if ( & s -> picture [ i ] != s -> last_picture_ptr && & s -> picture [ i ] != s -> next_picture_ptr && s -> picture [ i ] . reference && ! s -> picture [ i ] . needs_realloc ) { if ( ! ( avctx -> active_thread_type & FF_THREAD_FRAME ) ) av_log ( avctx , AV_LOG_ERROR , "releasing zombie picture\n" ) ; ff_mpeg_unref_picture ( s , & s -> picture [ i ] ) ; } } } } if ( ! s -> encoding ) { ff_release_unused_pictures ( s , 1 ) ; if ( s -> current_picture_ptr && s -> current_picture_ptr -> f . data [ 0 ] == NULL ) { pic = s -> current_picture_ptr ; } else { i = ff_find_unused_picture ( s , 0 ) ; if ( i < 0 ) { av_log ( s -> avctx , AV_LOG_ERROR , "no frame buffer available\n" ) ; return i ; } pic = & s -> picture [ i ] ; } pic -> reference = 0 ; if ( ! s -> droppable ) { if ( s -> codec_id == AV_CODEC_ID_H264 ) pic -> reference = s -> picture_structure ; else if ( s -> pict_type != AV_PICTURE_TYPE_B ) pic -> reference = 3 ; } pic -> f . coded_picture_number = s -> coded_picture_number ++ ; if ( ff_alloc_picture ( s , pic , 0 ) < 0 ) return - 1 ; s -> current_picture_ptr = pic ; s -> current_picture_ptr -> f . top_field_first = s -> top_field_first ; if ( s -> codec_id == AV_CODEC_ID_MPEG1VIDEO || s -> codec_id == AV_CODEC_ID_MPEG2VIDEO ) { if ( s -> picture_structure != PICT_FRAME ) s -> current_picture_ptr -> f . top_field_first = ( s -> picture_structure == PICT_TOP_FIELD ) == s -> first_field ; } s -> current_picture_ptr -> f . interlaced_frame = ! s -> progressive_frame && ! s -> progressive_sequence ; s -> current_picture_ptr -> field_picture = s -> picture_structure != PICT_FRAME ; } s -> current_picture_ptr -> f . pict_type = s -> pict_type ; s -> current_picture_ptr -> f . key_frame = s -> pict_type == AV_PICTURE_TYPE_I ; ff_mpeg_unref_picture ( s , & s -> current_picture ) ; if ( ( ret = ff_mpeg_ref_picture ( s , & s -> current_picture , s -> current_picture_ptr ) ) < 0 ) return ret ; if ( s -> codec_id != AV_CODEC_ID_H264 && s -> pict_type != AV_PICTURE_TYPE_B ) { s -> last_picture_ptr = s -> next_picture_ptr ; if ( ! s -> droppable ) s -> next_picture_ptr = s -> current_picture_ptr ; } av_dlog ( s -> avctx , "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n" , s -> last_picture_ptr , s -> next_picture_ptr , s -> current_picture_ptr , s -> last_picture_ptr ? s -> last_picture_ptr -> f . data [ 0 ] : NULL , s -> next_picture_ptr ? s -> next_picture_ptr -> f . data [ 0 ] : NULL , s -> current_picture_ptr ? s -> current_picture_ptr -> f . data [ 0 ] : NULL , s -> pict_type , s -> droppable ) ; if ( s -> codec_id != AV_CODEC_ID_H264 ) { if ( ( s -> last_picture_ptr == NULL || s -> last_picture_ptr -> f . data [ 0 ] == NULL ) && ( s -> pict_type != AV_PICTURE_TYPE_I || s -> picture_structure != PICT_FRAME ) ) { int h_chroma_shift , v_chroma_shift ; av_pix_fmt_get_chroma_sub_sample ( s -> avctx -> pix_fmt , & h_chroma_shift , & v_chroma_shift ) ; if ( s -> pict_type != AV_PICTURE_TYPE_I ) av_log ( avctx , AV_LOG_ERROR , "warning: first frame is no keyframe\n" ) ; else if ( s -> picture_structure != PICT_FRAME ) av_log ( avctx , AV_LOG_INFO , "allocate dummy last picture for field based first keyframe\n" ) ; i = ff_find_unused_picture ( s , 0 ) ; if ( i < 0 ) { av_log ( s -> avctx , AV_LOG_ERROR , "no frame buffer available\n" ) ; return i ; } s -> last_picture_ptr = & s -> picture [ i ] ; if ( ff_alloc_picture ( s , s -> last_picture_ptr , 0 ) < 0 ) { s -> last_picture_ptr = NULL ; return - 1 ; } memset ( s -> last_picture_ptr -> f . data [ 0 ] , 0 , avctx -> height * s -> last_picture_ptr -> f . linesize [ 0 ] ) ; memset ( s -> last_picture_ptr -> f . data [ 1 ] , 0x80 , ( avctx -> height >> v_chroma_shift ) * s -> last_picture_ptr -> f . linesize [ 1 ] ) ; memset ( s -> last_picture_ptr -> f . data [ 2 ] , 0x80 , ( avctx -> height >> v_chroma_shift ) * s -> last_picture_ptr -> f . linesize [ 2 ] ) ; ff_thread_report_progress ( & s -> last_picture_ptr -> tf , INT_MAX , 0 ) ; ff_thread_report_progress ( & s -> last_picture_ptr -> tf , INT_MAX , 1 ) ; } if ( ( s -> next_picture_ptr == NULL || s -> next_picture_ptr -> f . data [ 0 ] == NULL ) && s -> pict_type == AV_PICTURE_TYPE_B ) { i = ff_find_unused_picture ( s , 0 ) ; if ( i < 0 ) { av_log ( s -> avctx , AV_LOG_ERROR , "no frame buffer available\n" ) ; return i ; } s -> next_picture_ptr = & s -> picture [ i ] ; if ( ff_alloc_picture ( s , s -> next_picture_ptr , 0 ) < 0 ) { s -> next_picture_ptr = NULL ; return - 1 ; } ff_thread_report_progress ( & s -> next_picture_ptr -> tf , INT_MAX , 0 ) ; ff_thread_report_progress ( & s -> next_picture_ptr -> tf , INT_MAX , 1 ) ; } } if ( s -> codec_id != AV_CODEC_ID_H264 ) { if ( s -> last_picture_ptr ) { ff_mpeg_unref_picture ( s , & s -> last_picture ) ; if ( s -> last_picture_ptr -> f . data [ 0 ] && ( ret = ff_mpeg_ref_picture ( s , & s -> last_picture , s -> last_picture_ptr ) ) < 0 ) return ret ; } if ( s -> next_picture_ptr ) { ff_mpeg_unref_picture ( s , & s -> next_picture ) ; if ( s -> next_picture_ptr -> f . data [ 0 ] && ( ret = ff_mpeg_ref_picture ( s , & s -> next_picture , s -> next_picture_ptr ) ) < 0 ) return ret ; } assert ( s -> pict_type == AV_PICTURE_TYPE_I || ( s -> last_picture_ptr && s -> last_picture_ptr -> f . data [ 0 ] ) ) ; } if ( s -> picture_structure != PICT_FRAME && s -> out_format != FMT_H264 ) { int i ; for ( i = 0 ; i < 4 ; i ++ ) { if ( s -> picture_structure == PICT_BOTTOM_FIELD ) { s -> current_picture . f . data [ i ] += s -> current_picture . f . linesize [ i ] ; } s -> current_picture . f . linesize [ i ] *= 2 ; s -> last_picture . f . linesize [ i ] *= 2 ; s -> next_picture . f . linesize [ i ] *= 2 ; } } s -> err_recognition = avctx -> err_recognition ; if ( s -> mpeg_quant || s -> codec_id == AV_CODEC_ID_MPEG2VIDEO ) { s -> dct_unquantize_intra = s -> dct_unquantize_mpeg2_intra ; s -> dct_unquantize_inter = s -> dct_unquantize_mpeg2_inter ; } else if ( s -> out_format == FMT_H263 || s -> out_format == FMT_H261 ) { s -> dct_unquantize_intra = s -> dct_unquantize_h263_intra ; s -> dct_unquantize_inter = s -> dct_unquantize_h263_inter ; } else { s -> dct_unquantize_intra = s -> dct_unquantize_mpeg1_intra ; s -> dct_unquantize_inter = s -> dct_unquantize_mpeg1_inter ; } if ( s -> dct_error_sum ) { assert ( s -> avctx -> noise_reduction && s -> encoding ) ; update_noise_reduction ( s ) ; } if ( CONFIG_MPEG_XVMC_DECODER && s -> avctx -> xvmc_acceleration ) return ff_xvmc_field_start ( s , avctx ) ; return 0 ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int dissect_pcp_message_pmns_child ( tvbuff_t * tvb , packet_info * pinfo , proto_tree * tree , int offset ) { proto_item * pcp_pmns_child_item ; proto_tree * pcp_pmns_child_tree ; guint32 name_len ; pcp_pmns_child_item = proto_tree_add_item ( tree , hf_pcp_pmns_child , tvb , offset , - 1 , ENC_NA ) ; pcp_pmns_child_tree = proto_item_add_subtree ( pcp_pmns_child_item , ett_pcp ) ; col_append_fstr ( pinfo -> cinfo , COL_INFO , "[%s]" , val_to_str ( PCP_PDU_PMNS_CHILD , packettypenames , "Unknown Type:0x%02x" ) ) ; proto_tree_add_item ( pcp_pmns_child_tree , hf_pcp_pmns_subtype , tvb , offset , 4 , ENC_BIG_ENDIAN ) ; offset += 4 ; proto_tree_add_item ( pcp_pmns_child_tree , hf_pcp_pmns_namelen , tvb , offset , 4 , ENC_BIG_ENDIAN ) ; name_len = tvb_get_ntohl ( tvb , offset ) ; offset += 4 ; proto_tree_add_item ( pcp_pmns_child_tree , hf_pcp_pmns_name , tvb , offset , name_len , ENC_ASCII | ENC_NA ) ; offset += 4 ; return offset ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int decode_rgb24_frame ( CLLCContext * ctx , GetBitContext * gb , AVFrame * pic ) { AVCodecContext * avctx = ctx -> avctx ; uint8_t * dst ; int pred [ 3 ] ; int ret ; int i , j ; VLC vlc [ 3 ] ; pred [ 0 ] = 0x80 ; pred [ 1 ] = 0x80 ; pred [ 2 ] = 0x80 ; dst = pic -> data [ 0 ] ; skip_bits ( gb , 16 ) ; for ( i = 0 ; i < 3 ; i ++ ) { ret = read_code_table ( ctx , gb , & vlc [ i ] ) ; if ( ret < 0 ) { for ( j = 0 ; j <= i ; j ++ ) ff_free_vlc ( & vlc [ j ] ) ; av_log ( ctx -> avctx , AV_LOG_ERROR , "Could not read code table %d.\n" , i ) ; return ret ; } } for ( i = 0 ; i < avctx -> height ; i ++ ) { for ( j = 0 ; j < 3 ; j ++ ) read_rgb24_component_line ( ctx , gb , & pred [ j ] , & vlc [ j ] , & dst [ j ] ) ; dst += pic -> linesize [ 0 ] ; } for ( i = 0 ; i < 3 ; i ++ ) ff_free_vlc ( & vlc [ i ] ) ; return 0 ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void dtap_cc_retrieve_rej ( tvbuff_t * tvb , proto_tree * tree , packet_info * pinfo _U_ , guint32 offset , guint len ) { guint32 curr_offset ; guint32 consumed ; guint curr_len ; curr_offset = offset ; curr_len = len ; is_uplink = IS_UPLINK_FALSE ; ELEM_MAND_LV ( GSM_A_PDU_TYPE_DTAP , DE_CAUSE , NULL ) ; EXTRANEOUS_DATA_CHECK ( curr_len , 0 , pinfo , & ei_gsm_a_dtap_extraneous_data ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int dissect_h225_RegistrationReject ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { offset = dissect_per_sequence ( tvb , offset , actx , tree , hf_index , ett_h225_RegistrationReject , RegistrationReject_sequence ) ; return offset ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int inode_has_perm ( const struct cred * cred , struct inode * inode , u32 perms , struct common_audit_data * adp ) { struct inode_security_struct * isec ; u32 sid ; validate_creds ( cred ) ; if ( unlikely ( IS_PRIVATE ( inode ) ) ) return 0 ; sid = cred_sid ( cred ) ; isec = inode -> i_security ; return avc_has_perm ( sid , isec -> sid , isec -> sclass , perms , adp ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static krb5_error_code randkey_princ ( krb5_principal princ , krb5_boolean keepold , int n_ks , krb5_key_salt_tuple * ks ) { if ( keepold || ks ) { return kadm5_randkey_principal_3 ( handle , princ , keepold , n_ks , ks , NULL , NULL ) ; } else return kadm5_randkey_principal ( handle , princ , NULL , NULL ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void pitch_enhancer ( float * fixed_vector , float voice_fac ) { int i ; float cpe = 0.125 * ( 1 + voice_fac ) ; float last = fixed_vector [ 0 ] ; fixed_vector [ 0 ] -= cpe * fixed_vector [ 1 ] ; for ( i = 1 ; i < AMRWB_SFR_SIZE - 1 ; i ++ ) { float cur = fixed_vector [ i ] ; fixed_vector [ i ] -= cpe * ( last + fixed_vector [ i + 1 ] ) ; last = cur ; } fixed_vector [ AMRWB_SFR_SIZE - 1 ] -= cpe * last ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int dissect_CPMGetApproximatePosition ( tvbuff_t * tvb , packet_info * pinfo , proto_tree * parent_tree , gboolean in , void * data _U_ ) { gint offset = 16 ; proto_item * item ; proto_tree * tree ; item = proto_tree_add_item ( parent_tree , hf_mswsp_msg , tvb , offset , in ? 0 : - 1 , ENC_NA ) ; tree = proto_item_add_subtree ( item , ett_mswsp_msg ) ; proto_item_set_text ( item , "GetApproximatePosition%s" , in ? "In" : "Out" ) ; col_append_str ( pinfo -> cinfo , COL_INFO , "GetApproximatePosition" ) ; if ( in ) { proto_tree_add_item ( tree , hf_mswsp_msg_cpmgetapproxpos_hcursor , tvb , offset , 4 , ENC_LITTLE_ENDIAN ) ; offset += 4 ; proto_tree_add_item ( tree , hf_mswsp_msg_cpmgetapproxpos_chapt , tvb , offset , 4 , ENC_LITTLE_ENDIAN ) ; offset += 4 ; proto_tree_add_item ( tree , hf_mswsp_msg_cpmgetapproxpos_bmk , tvb , offset , 4 , ENC_LITTLE_ENDIAN ) ; } else { proto_tree_add_item ( tree , hf_mswsp_msg_cpmgetapproxpos_numerator , tvb , offset , 4 , ENC_LITTLE_ENDIAN ) ; offset += 4 ; proto_tree_add_item ( tree , hf_mswsp_msg_cpmgetapproxpos_denominator , tvb , offset , 4 , ENC_LITTLE_ENDIAN ) ; } return tvb_reported_length ( tvb ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static uint64_t translate_prom_address ( void * opaque , uint64_t addr ) { hwaddr * base_addr = ( hwaddr * ) opaque ; return addr + * base_addr - PROM_VADDR ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
int qemuMonitorJSONMigrateCancel ( qemuMonitorPtr mon ) { int ret ; virJSONValuePtr cmd = qemuMonitorJSONMakeCommand ( "migrate_cancel" , NULL ) ; virJSONValuePtr reply = NULL ; if ( ! cmd ) return - 1 ; ret = qemuMonitorJSONCommand ( mon , cmd , & reply ) ; if ( ret == 0 ) ret = qemuMonitorJSONCheckError ( cmd , reply ) ; virJSONValueFree ( cmd ) ; virJSONValueFree ( reply ) ; return ret ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
MPI # ifdef M_DEBUG mpi_debug_alloc ( unsigned nlimbs , const char * info ) # else mpi_alloc ( unsigned nlimbs ) # endif { MPI a ; if ( DBG_MEMORY ) log_debug ( "mpi_alloc(%u)\n" , nlimbs * BITS_PER_MPI_LIMB ) ; # ifdef M_DEBUG a = m_debug_alloc ( sizeof * a , info ) ; a -> d = nlimbs ? mpi_debug_alloc_limb_space ( nlimbs , 0 , info ) : NULL ; # else a = xmalloc ( sizeof * a ) ; a -> d = nlimbs ? mpi_alloc_limb_space ( nlimbs , 0 ) : NULL ; # endif a -> alloced = nlimbs ; a -> nlimbs = 0 ; a -> sign = 0 ; a -> flags = 0 ; a -> nbits = 0 ; return a ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void calculate_path_table_size ( struct vdd * vdd ) { int depth , size ; struct path_table * pt ; pt = vdd -> pathtbl ; size = 0 ; for ( depth = 0 ; depth < vdd -> max_depth ; depth ++ ) { struct isoent * * ptbl ; int i , cnt ; if ( ( cnt = pt [ depth ] . cnt ) == 0 ) break ; ptbl = pt [ depth ] . sorted ; for ( i = 0 ; i < cnt ; i ++ ) { int len ; if ( ptbl [ i ] -> identifier == NULL ) len = 1 ; else len = ptbl [ i ] -> id_len ; if ( len & 0x01 ) len ++ ; size += 8 + len ; } } vdd -> path_table_size = size ; vdd -> path_table_block = ( ( size + PATH_TABLE_BLOCK_SIZE - 1 ) / PATH_TABLE_BLOCK_SIZE ) * ( PATH_TABLE_BLOCK_SIZE / LOGICAL_BLOCK_SIZE ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void fz_drop_link_imp ( fz_context * ctx , fz_storable * storable ) { fz_icclink * link = ( fz_icclink * ) storable ; fz_cmm_fin_link ( ctx , link ) ; fz_free ( ctx , link ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int dissect_h245_T_frameSequence ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { offset = dissect_per_constrained_sequence_of ( tvb , offset , actx , tree , hf_index , ett_h245_T_frameSequence , T_frameSequence_sequence_of , 1 , 256 , FALSE ) ; return offset ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int set_bps_params ( AVCodecContext * avctx ) { switch ( avctx -> bits_per_coded_sample ) { case 8 : avctx -> sample_fmt = AV_SAMPLE_FMT_U8P ; break ; case 16 : avctx -> sample_fmt = AV_SAMPLE_FMT_S16P ; break ; case 24 : avctx -> sample_fmt = AV_SAMPLE_FMT_S32P ; break ; default : av_log ( avctx , AV_LOG_ERROR , "unsupported bits per sample: %d\n" , avctx -> bits_per_coded_sample ) ; return AVERROR_INVALIDDATA ; } avctx -> bits_per_raw_sample = avctx -> bits_per_coded_sample ; return 0 ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static jboolean IsDownloadDangerous ( JNIEnv * env , const JavaParamRef < jclass > & clazz , const JavaParamRef < jstring > & filename ) { base : : FilePath path ( base : : android : : ConvertJavaStringToUTF8 ( env , filename ) ) ; return download_util : : GetFileDangerLevel ( path ) != download_util : : NOT_DANGEROUS ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int vc1_decode_p_mb_intfi ( VC1Context * v ) { MpegEncContext * s = & v -> s ; GetBitContext * gb = & s -> gb ; int i ; int mb_pos = s -> mb_x + s -> mb_y * s -> mb_stride ; int cbp = 0 ; int mqdiff , mquant ; int ttmb = v -> ttfrm ; int mb_has_coeffs = 1 ; int dmv_x , dmv_y ; int val ; int first_block = 1 ; int dst_idx , off ; int pred_flag ; int block_cbp = 0 , pat , block_tt = 0 ; int idx_mbmode = 0 ; mquant = v -> pq ; idx_mbmode = get_vlc2 ( gb , v -> mbmode_vlc -> table , VC1_IF_MBMODE_VLC_BITS , 2 ) ; if ( idx_mbmode <= 1 ) { s -> mb_intra = v -> is_intra [ s -> mb_x ] = 1 ; s -> current_picture . motion_val [ 1 ] [ s -> block_index [ 0 ] + v -> blocks_off ] [ 0 ] = 0 ; s -> current_picture . motion_val [ 1 ] [ s -> block_index [ 0 ] + v -> blocks_off ] [ 1 ] = 0 ; s -> current_picture . mb_type [ mb_pos + v -> mb_off ] = MB_TYPE_INTRA ; GET_MQUANT ( ) ; s -> current_picture . qscale_table [ mb_pos ] = mquant ; s -> y_dc_scale = s -> y_dc_scale_table [ mquant ] ; s -> c_dc_scale = s -> c_dc_scale_table [ mquant ] ; v -> s . ac_pred = v -> acpred_plane [ mb_pos ] = get_bits1 ( gb ) ; mb_has_coeffs = idx_mbmode & 1 ; if ( mb_has_coeffs ) cbp = 1 + get_vlc2 ( & v -> s . gb , v -> cbpcy_vlc -> table , VC1_ICBPCY_VLC_BITS , 2 ) ; dst_idx = 0 ; for ( i = 0 ; i < 6 ; i ++ ) { s -> dc_val [ 0 ] [ s -> block_index [ i ] ] = 0 ; v -> mb_type [ 0 ] [ s -> block_index [ i ] ] = 1 ; dst_idx += i >> 2 ; val = ( ( cbp >> ( 5 - i ) ) & 1 ) ; v -> a_avail = v -> c_avail = 0 ; if ( i == 2 || i == 3 || ! s -> first_slice_line ) v -> a_avail = v -> mb_type [ 0 ] [ s -> block_index [ i ] - s -> block_wrap [ i ] ] ; if ( i == 1 || i == 3 || s -> mb_x ) v -> c_avail = v -> mb_type [ 0 ] [ s -> block_index [ i ] - 1 ] ; vc1_decode_intra_block ( v , s -> block [ i ] , i , val , mquant , ( i & 4 ) ? v -> codingset2 : v -> codingset ) ; if ( ( i > 3 ) && ( s -> flags & CODEC_FLAG_GRAY ) ) continue ; v -> vc1dsp . vc1_inv_trans_8x8 ( s -> block [ i ] ) ; off = ( i & 4 ) ? 0 : ( ( i & 1 ) * 8 + ( i & 2 ) * 4 * s -> linesize ) ; off += v -> cur_field_type ? ( ( i & 4 ) ? s -> current_picture_ptr -> f . linesize [ 1 ] : s -> current_picture_ptr -> f . linesize [ 0 ] ) : 0 ; s -> dsp . put_signed_pixels_clamped ( s -> block [ i ] , s -> dest [ dst_idx ] + off , ( i & 4 ) ? s -> uvlinesize : s -> linesize ) ; } } else { s -> mb_intra = v -> is_intra [ s -> mb_x ] = 0 ; s -> current_picture . mb_type [ mb_pos + v -> mb_off ] = MB_TYPE_16x16 ; for ( i = 0 ; i < 6 ; i ++ ) v -> mb_type [ 0 ] [ s -> block_index [ i ] ] = 0 ; if ( idx_mbmode <= 5 ) { dmv_x = dmv_y = pred_flag = 0 ; if ( idx_mbmode & 1 ) { get_mvdata_interlaced ( v , & dmv_x , & dmv_y , & pred_flag ) ; } vc1_pred_mv ( v , 0 , dmv_x , dmv_y , 1 , v -> range_x , v -> range_y , v -> mb_type [ 0 ] , pred_flag , 0 ) ; vc1_mc_1mv ( v , 0 ) ; mb_has_coeffs = ! ( idx_mbmode & 2 ) ; } else { v -> fourmvbp = get_vlc2 ( gb , v -> fourmvbp_vlc -> table , VC1_4MV_BLOCK_PATTERN_VLC_BITS , 1 ) ; for ( i = 0 ; i < 6 ; i ++ ) { if ( i < 4 ) { dmv_x = dmv_y = pred_flag = 0 ; val = ( ( v -> fourmvbp >> ( 3 - i ) ) & 1 ) ; if ( val ) { get_mvdata_interlaced ( v , & dmv_x , & dmv_y , & pred_flag ) ; } vc1_pred_mv ( v , i , dmv_x , dmv_y , 0 , v -> range_x , v -> range_y , v -> mb_type [ 0 ] , pred_flag , 0 ) ; vc1_mc_4mv_luma ( v , i , 0 ) ; } else if ( i == 4 ) vc1_mc_4mv_chroma ( v , 0 ) ; } mb_has_coeffs = idx_mbmode & 1 ; } if ( mb_has_coeffs ) cbp = 1 + get_vlc2 ( & v -> s . gb , v -> cbpcy_vlc -> table , VC1_CBPCY_P_VLC_BITS , 2 ) ; if ( cbp ) { GET_MQUANT ( ) ; } s -> current_picture . qscale_table [ mb_pos ] = mquant ; if ( ! v -> ttmbf && cbp ) { ttmb = get_vlc2 ( gb , ff_vc1_ttmb_vlc [ v -> tt_index ] . table , VC1_TTMB_VLC_BITS , 2 ) ; } dst_idx = 0 ; for ( i = 0 ; i < 6 ; i ++ ) { s -> dc_val [ 0 ] [ s -> block_index [ i ] ] = 0 ; dst_idx += i >> 2 ; val = ( ( cbp >> ( 5 - i ) ) & 1 ) ; off = ( i & 4 ) ? 0 : ( i & 1 ) * 8 + ( i & 2 ) * 4 * s -> linesize ; if ( v -> cur_field_type ) off += ( i & 4 ) ? s -> current_picture_ptr -> f . linesize [ 1 ] : s -> current_picture_ptr -> f . linesize [ 0 ] ; if ( val ) { pat = vc1_decode_p_block ( v , s -> block [ i ] , i , mquant , ttmb , first_block , s -> dest [ dst_idx ] + off , ( i & 4 ) ? s -> uvlinesize : s -> linesize , ( i & 4 ) && ( s -> flags & CODEC_FLAG_GRAY ) , & block_tt ) ; block_cbp |= pat << ( i << 2 ) ; if ( ! v -> ttmbf && ttmb < 8 ) ttmb = - 1 ; first_block = 0 ; } } } if ( s -> mb_x == s -> mb_width - 1 ) memmove ( v -> is_intra_base , v -> is_intra , sizeof ( v -> is_intra_base [ 0 ] ) * s -> mb_stride ) ; return 0 ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int kq_dispatch ( struct event_base * base , void * arg , struct timeval * tv ) { struct kqop * kqop = arg ; struct kevent * changes = kqop -> changes ; struct kevent * events = kqop -> events ; struct event * ev ; struct timespec ts , * ts_p = NULL ; int i , res ; if ( tv != NULL ) { TIMEVAL_TO_TIMESPEC ( tv , & ts ) ; ts_p = & ts ; } res = kevent ( kqop -> kq , changes , kqop -> nchanges , events , kqop -> nevents , ts_p ) ; kqop -> nchanges = 0 ; if ( res == - 1 ) { if ( errno != EINTR ) { event_warn ( "kevent" ) ; return ( - 1 ) ; } return ( 0 ) ; } event_debug ( ( "%s: kevent reports %d" , __func__ , res ) ) ; for ( i = 0 ; i < res ; i ++ ) { int which = 0 ; if ( events [ i ] . flags & EV_ERROR ) { if ( events [ i ] . data == EBADF || events [ i ] . data == EINVAL || events [ i ] . data == ENOENT ) continue ; errno = events [ i ] . data ; return ( - 1 ) ; } if ( events [ i ] . filter == EVFILT_READ ) { which |= EV_READ ; } else if ( events [ i ] . filter == EVFILT_WRITE ) { which |= EV_WRITE ; } else if ( events [ i ] . filter == EVFILT_SIGNAL ) { which |= EV_SIGNAL ; } if ( ! which ) continue ; if ( events [ i ] . filter == EVFILT_SIGNAL ) { struct event_list * head = ( struct event_list * ) events [ i ] . udata ; TAILQ_FOREACH ( ev , head , ev_signal_next ) { event_active ( ev , which , events [ i ] . data ) ; } } else { ev = ( struct event * ) events [ i ] . udata ; if ( ! ( ev -> ev_events & EV_PERSIST ) ) ev -> ev_flags &= ~ EVLIST_X_KQINKERNEL ; event_active ( ev , which , 1 ) ; } } return ( 0 ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
bool mysql_drop_user ( THD * thd , List < LEX_USER > & list ) { int result ; String wrong_users ; LEX_USER * user_name , * tmp_user_name ; List_iterator < LEX_USER > user_list ( list ) ; TABLE_LIST tables [ GRANT_TABLES ] ; bool some_users_deleted = FALSE ; ulong old_sql_mode = thd -> variables . sql_mode ; bool save_binlog_row_based ; DBUG_ENTER ( "mysql_drop_user" ) ; save_binlog_row_based = thd -> current_stmt_binlog_row_based ; thd -> clear_current_stmt_binlog_row_based ( ) ; if ( ( result = open_grant_tables ( thd , tables ) ) ) { thd -> current_stmt_binlog_row_based = save_binlog_row_based ; DBUG_RETURN ( result != 1 ) ; } thd -> variables . sql_mode &= ~ MODE_PAD_CHAR_TO_FULL_LENGTH ; rw_wrlock ( & LOCK_grant ) ; VOID ( pthread_mutex_lock ( & acl_cache -> lock ) ) ; while ( ( tmp_user_name = user_list ++ ) ) { if ( ! ( user_name = get_current_user ( thd , tmp_user_name ) ) ) { result = TRUE ; continue ; } if ( handle_grant_data ( tables , 1 , user_name , NULL ) <= 0 ) { append_user ( & wrong_users , user_name ) ; result = TRUE ; continue ; } some_users_deleted = TRUE ; } rebuild_check_host ( ) ; VOID ( pthread_mutex_unlock ( & acl_cache -> lock ) ) ; if ( result ) my_error ( ER_CANNOT_USER , MYF ( 0 ) , "DROP USER" , wrong_users . c_ptr_safe ( ) ) ; if ( some_users_deleted ) result |= write_bin_log ( thd , FALSE , thd -> query ( ) , thd -> query_length ( ) ) ; rw_unlock ( & LOCK_grant ) ; close_thread_tables ( thd ) ; thd -> variables . sql_mode = old_sql_mode ; thd -> current_stmt_binlog_row_based = save_binlog_row_based ; DBUG_RETURN ( result ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
void proto_register_opcua ( void ) { static hf_register_info hf [ ] = { { & hf_opcua_fragments , { "Message fragments" , "opcua.fragments" , FT_NONE , BASE_NONE , NULL , 0x00 , NULL , HFILL } } , { & hf_opcua_fragment , { "Message fragment" , "opcua.fragment" , FT_FRAMENUM , BASE_NONE , NULL , 0x00 , NULL , HFILL } } , { & hf_opcua_fragment_overlap , { "Message fragment overlap" , "opcua.fragment.overlap" , FT_BOOLEAN , BASE_NONE , NULL , 0x00 , NULL , HFILL } } , { & hf_opcua_fragment_overlap_conflicts , { "Message fragment overlapping with conflicting data" , "opcua.fragment.overlap.conflicts" , FT_BOOLEAN , BASE_NONE , NULL , 0x00 , NULL , HFILL } } , { & hf_opcua_fragment_multiple_tails , { "Message has multiple tail fragments" , "opcua.fragment.multiple_tails" , FT_BOOLEAN , BASE_NONE , NULL , 0x00 , NULL , HFILL } } , { & hf_opcua_fragment_too_long_fragment , { "Message fragment too long" , "opcua.fragment.too_long_fragment" , FT_BOOLEAN , BASE_NONE , NULL , 0x00 , NULL , HFILL } } , { & hf_opcua_fragment_error , { "Message defragmentation error" , "opcua.fragment.error" , FT_FRAMENUM , BASE_NONE , NULL , 0x00 , NULL , HFILL } } , { & hf_opcua_fragment_count , { "Message fragment count" , "opcua.fragment.count" , FT_UINT32 , BASE_DEC , NULL , 0x00 , NULL , HFILL } } , { & hf_opcua_reassembled_in , { "Reassembled in" , "opcua.reassembled.in" , FT_FRAMENUM , BASE_NONE , NULL , 0x00 , NULL , HFILL } } , { & hf_opcua_reassembled_length , { "Reassembled length" , "opcua.reassembled.length" , FT_UINT32 , BASE_DEC , NULL , 0x00 , NULL , HFILL } } } ; static gint * ett [ ] = { & ett_opcua_extensionobject , & ett_opcua_nodeid , & ett_opcua_transport , & ett_opcua_fragment , & ett_opcua_fragments } ; proto_opcua = proto_register_protocol ( "OpcUa Binary Protocol" , "OpcUa" , "opcua" ) ; registerTransportLayerTypes ( proto_opcua ) ; registerSecurityLayerTypes ( proto_opcua ) ; registerApplicationLayerTypes ( proto_opcua ) ; registerSimpleTypes ( proto_opcua ) ; registerEnumTypes ( proto_opcua ) ; registerComplexTypes ( ) ; registerServiceTypes ( ) ; registerFieldTypes ( proto_opcua ) ; proto_register_subtree_array ( ett , array_length ( ett ) ) ; proto_register_field_array ( proto_opcua , hf , array_length ( hf ) ) ; reassembly_table_register ( & opcua_reassembly_table , & addresses_reassembly_table_functions ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void test_bug40365 ( void ) { uint rc , i ; MYSQL_STMT * stmt = 0 ; MYSQL_BIND my_bind [ 2 ] ; my_bool is_null [ 2 ] = { 0 } ; MYSQL_TIME tm [ 2 ] ; DBUG_ENTER ( "test_bug40365" ) ; rc = mysql_query ( mysql , "DROP TABLE IF EXISTS t1" ) ; myquery ( rc ) ; rc = mysql_query ( mysql , "CREATE TABLE t1(c1 DATETIME, \ c2 DATE)" ) ; myquery ( rc ) ; stmt = mysql_simple_prepare ( mysql , "INSERT INTO t1 VALUES(?, ?)" ) ; check_stmt ( stmt ) ; verify_param_count ( stmt , 2 ) ; memset ( my_bind , 0 , sizeof ( my_bind ) ) ; my_bind [ 0 ] . buffer_type = MYSQL_TYPE_DATETIME ; my_bind [ 1 ] . buffer_type = MYSQL_TYPE_DATE ; for ( i = 0 ; i < ( int ) array_elements ( my_bind ) ; i ++ ) { my_bind [ i ] . buffer = ( void * ) & tm [ i ] ; my_bind [ i ] . is_null = & is_null [ i ] ; } rc = mysql_stmt_bind_param ( stmt , my_bind ) ; check_execute ( stmt , rc ) ; for ( i = 0 ; i < ( int ) array_elements ( my_bind ) ; i ++ ) { tm [ i ] . neg = 0 ; tm [ i ] . second_part = 0 ; tm [ i ] . year = 2009 ; tm [ i ] . month = 2 ; tm [ i ] . day = 29 ; tm [ i ] . hour = 0 ; tm [ i ] . minute = 0 ; tm [ i ] . second = 0 ; } rc = mysql_stmt_execute ( stmt ) ; check_execute ( stmt , rc ) ; rc = mysql_commit ( mysql ) ; myquery ( rc ) ; mysql_stmt_close ( stmt ) ; stmt = mysql_simple_prepare ( mysql , "SELECT * FROM t1" ) ; check_stmt ( stmt ) ; rc = mysql_stmt_bind_result ( stmt , my_bind ) ; check_execute ( stmt , rc ) ; rc = mysql_stmt_execute ( stmt ) ; check_execute ( stmt , rc ) ; rc = mysql_stmt_store_result ( stmt ) ; check_execute ( stmt , rc ) ; rc = mysql_stmt_fetch ( stmt ) ; check_execute ( stmt , rc ) ; if ( ! opt_silent ) fprintf ( stdout , "\n" ) ; for ( i = 0 ; i < array_elements ( my_bind ) ; i ++ ) { if ( ! opt_silent ) fprintf ( stdout , "\ntime[%d]: %02d-%02d-%02d " , i , tm [ i ] . year , tm [ i ] . month , tm [ i ] . day ) ; DIE_UNLESS ( tm [ i ] . year == 0 ) ; DIE_UNLESS ( tm [ i ] . month == 0 ) ; DIE_UNLESS ( tm [ i ] . day == 0 ) ; } mysql_stmt_close ( stmt ) ; rc = mysql_commit ( mysql ) ; myquery ( rc ) ; DBUG_VOID_RETURN ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void decode_filter_coeffs ( TAKDecContext * s , int filter_order , int size , int filter_quant , int16_t * filter ) { GetBitContext * gb = & s -> gb ; int i , j , a , b ; int filter_tmp [ MAX_PREDICTORS ] ; int16_t predictors [ MAX_PREDICTORS ] ; predictors [ 0 ] = get_sbits ( gb , 10 ) ; predictors [ 1 ] = get_sbits ( gb , 10 ) ; predictors [ 2 ] = get_sbits ( gb , size ) << ( 10 - size ) ; predictors [ 3 ] = get_sbits ( gb , size ) << ( 10 - size ) ; if ( filter_order > 4 ) { int av_uninit ( code_size ) ; int code_size_base = size - get_bits1 ( gb ) ; for ( i = 4 ; i < filter_order ; i ++ ) { if ( ! ( i & 3 ) ) code_size = code_size_base - get_bits ( gb , 2 ) ; predictors [ i ] = get_sbits ( gb , code_size ) << ( 10 - size ) ; } } filter_tmp [ 0 ] = predictors [ 0 ] << 6 ; for ( i = 1 ; i < filter_order ; i ++ ) { int * p1 = & filter_tmp [ 0 ] ; int * p2 = & filter_tmp [ i - 1 ] ; for ( j = 0 ; j < ( i + 1 ) / 2 ; j ++ ) { int tmp = * p1 + ( predictors [ i ] * * p2 + 256 >> 9 ) ; * p2 = * p2 + ( predictors [ i ] * * p1 + 256 >> 9 ) ; * p1 = tmp ; p1 ++ ; p2 -- ; } filter_tmp [ i ] = predictors [ i ] << 6 ; } a = 1 << ( 32 - ( 15 - filter_quant ) ) ; b = 1 << ( ( 15 - filter_quant ) - 1 ) ; for ( i = 0 , j = filter_order - 1 ; i < filter_order / 2 ; i ++ , j -- ) { filter [ j ] = a - ( ( filter_tmp [ i ] + b ) >> ( 15 - filter_quant ) ) ; filter [ i ] = a - ( ( filter_tmp [ j ] + b ) >> ( 15 - filter_quant ) ) ; } }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void json_print_section_footer ( WriterContext * wctx ) { JSONContext * json = wctx -> priv ; const struct section * section = wctx -> section [ wctx -> level ] ; if ( wctx -> level == 0 ) { json -> indent_level -- ; printf ( "\n} \n" ) ; } else if ( section -> flags & SECTION_FLAG_IS_ARRAY ) { printf ( "\n" ) ; json -> indent_level -- ; JSON_INDENT ( ) ; printf ( "]" ) ; } else { printf ( "%s" , json -> item_start_end ) ; json -> indent_level -- ; if ( ! json -> compact ) JSON_INDENT ( ) ; printf ( "} " ) ; } }
0False
Categorize the following code snippet as vulnerable or not. True or False
int wpa_decrypt_broadcast_key ( struct eapol_key_header * eapol_key , struct rsn_ie_header * rsn_ie , struct wpa_sa * sa ) { u_int8 * encrypted_key ; u_int16 key_len = 0 ; char tmp [ 512 ] ; ( void ) rsn_ie ; if ( sa -> algo == WPA_KEY_TKIP ) { key_len = ntohs ( eapol_key -> key_len ) ; } else if ( sa -> algo == WPA_KEY_CCMP ) { key_len = ntohs ( eapol_key -> key_data_len ) ; } if ( key_len > sizeof ( struct rsn_ie_header ) || key_len == 0 ) return - E_NOTHANDLED ; SAFE_CALLOC ( encrypted_key , key_len , sizeof ( u_int8 ) ) ; DEBUG_MSG ( "Encrypted Broadcast key: %s\n" , str_tohex ( encrypted_key , key_len , tmp , sizeof ( tmp ) ) ) ; DEBUG_MSG ( "KeyIV: %s\n" , str_tohex ( eapol_key -> key_IV , 16 , tmp , sizeof ( tmp ) ) ) ; DEBUG_MSG ( "decryption_key: %s\n" , str_tohex ( sa -> ptk + 16 , 16 , tmp , sizeof ( tmp ) ) ) ; # if 0 memcpy ( new_key , pEAPKey -> key_iv , 16 ) ; memcpy ( new_key + 16 , decryption_key , 16 ) ; DEBUG_DUMP ( "FullDecrKey:" , new_key , 32 ) ; if ( key_version == AIRPDCAP_WPA_KEY_VER_NOT_CCMP ) { guint8 dummy [ 256 ] ; rc4_state_struct rc4_state ; crypt_rc4_init ( & rc4_state , new_key , sizeof ( new_key ) ) ; crypt_rc4 ( & rc4_state , dummy , 256 ) ; crypt_rc4 ( & rc4_state , encrypted_key , key_len ) ; } else if ( key_version == AIRPDCAP_WPA_KEY_VER_AES_CCMP ) { guint8 key_found ; guint16 key_index ; guint8 * decrypted_data ; decrypted_data = ( guint8 * ) g_malloc ( key_len ) ; AES_unwrap ( decryption_key , 16 , encrypted_key , key_len , decrypted_data ) ; key_found = FALSE ; key_index = 0 ; while ( key_index < key_len && ! key_found ) { guint8 rsn_id ; rsn_id = decrypted_data [ key_index ] ; if ( rsn_id != 0xdd ) { key_index += decrypted_data [ key_index + 1 ] + 2 ; } else { key_found = TRUE ; } } if ( key_found ) { memcpy ( encrypted_key , decrypted_data + key_index + 8 , key_len - key_index - 8 ) ; } g_free ( decrypted_data ) ; } DEBUG_DUMP ( "Broadcast key:" , encrypted_key , key_len ) ; sa -> key = & dummy_key ; sa -> validKey = TRUE ; sa -> wpa . key_ver = key_version ; memset ( sa -> wpa . ptk , 0 , sizeof ( sa -> wpa . ptk ) ) ; memcpy ( sa -> wpa . ptk + 32 , szEncryptedKey , key_len ) ; g_free ( szEncryptedKey ) ; # endif SAFE_FREE ( encrypted_key ) ; return E_SUCCESS ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void sig_message_public ( SERVER_REC * server , const char * msg , const char * nick , const char * address , const char * target ) { CHANNEL_REC * channel ; int own ; channel = channel_find ( server , target ) ; if ( channel != NULL ) { own = nick_match_msg ( channel , msg , server -> nick ) ; CHANNEL_LAST_MSG_ADD ( channel , nick , own ) ; } }
1True
Categorize the following code snippet as vulnerable or not. True or False
static int sort_ft_key_write ( MI_SORT_PARAM * sort_param , const void * a ) { uint a_len , val_off , val_len , error ; uchar * p ; SORT_INFO * sort_info = sort_param -> sort_info ; SORT_FT_BUF * ft_buf = sort_info -> ft_buf ; SORT_KEY_BLOCKS * key_block = sort_info -> key_block ; val_len = HA_FT_WLEN + sort_info -> info -> s -> rec_reflength ; get_key_full_length_rdonly ( a_len , ( uchar * ) a ) ; if ( ! ft_buf ) { if ( ( sort_info -> info -> s -> base . key_reflength <= sort_info -> info -> s -> rec_reflength ) && ( sort_info -> info -> s -> options & ( HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD ) ) ) ft_buf = ( SORT_FT_BUF * ) my_malloc ( sort_param -> keyinfo -> block_length + sizeof ( SORT_FT_BUF ) , MYF ( MY_WME ) ) ; if ( ! ft_buf ) { sort_param -> key_write = sort_key_write ; return sort_key_write ( sort_param , a ) ; } sort_info -> ft_buf = ft_buf ; goto word_init_ft_buf ; } get_key_full_length_rdonly ( val_off , ft_buf -> lastkey ) ; if ( ha_compare_text ( sort_param -> seg -> charset , ( ( uchar * ) a ) + 1 , a_len - 1 , ft_buf -> lastkey + 1 , val_off - 1 , 0 , 0 ) == 0 ) { if ( ! ft_buf -> buf ) { ft_buf -> count ++ ; return sort_insert_key ( sort_param , key_block , ( ( uchar * ) a ) + a_len , HA_OFFSET_ERROR ) ; } memcpy ( ft_buf -> buf , ( char * ) a + a_len , val_len ) ; ft_buf -> buf += val_len ; if ( ft_buf -> buf < ft_buf -> end ) return 0 ; p = ft_buf -> lastkey + val_off ; while ( key_block -> inited ) key_block ++ ; sort_info -> key_block = key_block ; sort_param -> keyinfo = & sort_info -> info -> s -> ft2_keyinfo ; ft_buf -> count = ( uint ) ( ft_buf -> buf - p ) / val_len ; for ( error = 0 ; ! error && p < ft_buf -> buf ; p += val_len ) error = sort_insert_key ( sort_param , key_block , p , HA_OFFSET_ERROR ) ; ft_buf -> buf = 0 ; return error ; } if ( ( error = sort_ft_buf_flush ( sort_param ) ) ) return error ; word_init_ft_buf : a_len += val_len ; memcpy ( ft_buf -> lastkey , a , a_len ) ; ft_buf -> buf = ft_buf -> lastkey + a_len ; ft_buf -> end = ft_buf -> lastkey + ( sort_param -> keyinfo -> block_length - 32 ) ; return 0 ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
TSReturnCode TSUrlCopy ( TSMBuffer dest_bufp , TSMLoc dest_obj , TSMBuffer src_bufp , TSMLoc src_obj ) { sdk_assert ( sdk_sanity_check_mbuffer ( src_bufp ) == TS_SUCCESS ) ; sdk_assert ( sdk_sanity_check_mbuffer ( dest_bufp ) == TS_SUCCESS ) ; sdk_assert ( sdk_sanity_check_url_handle ( src_obj ) == TS_SUCCESS ) ; sdk_assert ( sdk_sanity_check_url_handle ( dest_obj ) == TS_SUCCESS ) ; if ( ! isWriteable ( dest_bufp ) ) { return TS_ERROR ; } HdrHeap * s_heap , * d_heap ; URLImpl * s_url , * d_url ; s_heap = ( ( HdrHeapSDKHandle * ) src_bufp ) -> m_heap ; d_heap = ( ( HdrHeapSDKHandle * ) dest_bufp ) -> m_heap ; s_url = ( URLImpl * ) src_obj ; d_url = ( URLImpl * ) dest_obj ; url_copy_onto ( s_url , s_heap , d_url , d_heap , ( s_heap != d_heap ) ) ; return TS_SUCCESS ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void ctcp_msg_dcc_accept ( IRC_SERVER_REC * server , const char * data , const char * nick , const char * addr , const char * target , DCC_REC * chat ) { FILE_DCC_REC * dcc ; uoff_t size ; int pasv_id ; if ( ! dcc_ctcp_resume_parse ( DCC_GET_TYPE , data , nick , & dcc , & size , & pasv_id ) || ( dcc != NULL && DCC_GET ( dcc ) -> get_type != DCC_GET_RESUME ) ) { signal_emit ( "dcc error ctcp" , 5 , "ACCEPT" , data , nick , addr , target ) ; } else if ( dcc != NULL && dcc_resume_file_check ( dcc , server , size ) ) { if ( ! dcc_is_passive ( dcc ) ) dcc_get_connect ( DCC_GET ( dcc ) ) ; else dcc_get_passive ( DCC_GET ( dcc ) ) ; } }
0False
Categorize the following code snippet as vulnerable or not. True or False
void qemu_spice_display_init ( void ) { QemuConsole * con ; int i ; for ( i = 0 ; ; i ++ ) { con = qemu_console_lookup_by_index ( i ) ; if ( ! con || ! qemu_console_is_graphic ( con ) ) { break ; } if ( qemu_spice_have_display_interface ( con ) ) { continue ; } qemu_spice_display_init_one ( con ) ; } }
0False
Categorize the following code snippet as vulnerable or not. True or False
static struct archive_string * archive_string_append ( struct archive_string * as , const char * p , size_t s ) { if ( archive_string_ensure ( as , as -> length + s + 1 ) == NULL ) return ( NULL ) ; memmove ( as -> s + as -> length , p , s ) ; as -> length += s ; as -> s [ as -> length ] = 0 ; return ( as ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
TEST_F ( TemplateURLTest , ParseURLNestedParameter ) { TemplateURLData data ; data . SetURL ( "{ %s" ) ; TemplateURL url ( data ) ; TemplateURLRef : : Replacements replacements ; bool valid = false ; EXPECT_EQ ( "{ " , url . url_ref ( ) . ParseURL ( "{ { searchTerms} " , & replacements , NULL , & valid ) ) ; ASSERT_EQ ( 1U , replacements . size ( ) ) ; EXPECT_EQ ( 1U , replacements [ 0 ] . index ) ; EXPECT_EQ ( TemplateURLRef : : SEARCH_TERMS , replacements [ 0 ] . type ) ; EXPECT_TRUE ( valid ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
void lock_memory ( MI_CHECK * param __attribute__ ( ( unused ) ) ) { # ifdef SUN_OS if ( param -> opt_lock_memory ) { int success = mlockall ( MCL_CURRENT ) ; if ( geteuid ( ) == 0 && success != 0 ) mi_check_print_warning ( param , "Failed to lock memory. errno %d" , my_errno ) ; } # endif }
0False
Categorize the following code snippet as vulnerable or not. True or False
static xmlLinkPtr xmlListLinkSearch ( xmlListPtr l , void * data ) { xmlLinkPtr lk ; if ( l == NULL ) return ( NULL ) ; lk = xmlListLowerSearch ( l , data ) ; if ( lk == l -> sentinel ) return NULL ; else { if ( l -> linkCompare ( lk -> data , data ) == 0 ) return lk ; return NULL ; } }
1True
Categorize the following code snippet as vulnerable or not. True or False
int ff_set_systematic_pal2 ( uint32_t pal [ 256 ] , enum PixelFormat pix_fmt ) { int i ; for ( i = 0 ; i < 256 ; i ++ ) { int r , g , b ; switch ( pix_fmt ) { case PIX_FMT_RGB8 : r = ( i >> 5 ) * 36 ; g = ( ( i >> 2 ) & 7 ) * 36 ; b = ( i & 3 ) * 85 ; break ; case PIX_FMT_BGR8 : b = ( i >> 6 ) * 85 ; g = ( ( i >> 3 ) & 7 ) * 36 ; r = ( i & 7 ) * 36 ; break ; case PIX_FMT_RGB4_BYTE : r = ( i >> 3 ) * 255 ; g = ( ( i >> 1 ) & 3 ) * 85 ; b = ( i & 1 ) * 255 ; break ; case PIX_FMT_BGR4_BYTE : b = ( i >> 3 ) * 255 ; g = ( ( i >> 1 ) & 3 ) * 85 ; r = ( i & 1 ) * 255 ; break ; case PIX_FMT_GRAY8 : r = b = g = i ; break ; default : return AVERROR ( EINVAL ) ; } pal [ i ] = b + ( g << 8 ) + ( r << 16 ) ; } return 0 ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
header_field_info * proto_registrar_get_byname ( const char * field_name ) { header_field_info * hfinfo ; prefix_initializer_t pi ; if ( ! field_name ) return NULL ; if ( g_strcmp0 ( field_name , last_field_name ) == 0 ) { return last_hfinfo ; } hfinfo = ( header_field_info * ) g_hash_table_lookup ( gpa_name_map , field_name ) ; if ( hfinfo ) { g_free ( last_field_name ) ; last_field_name = g_strdup ( field_name ) ; last_hfinfo = hfinfo ; return hfinfo ; } if ( ! prefixes ) return NULL ; if ( ( pi = ( prefix_initializer_t ) g_hash_table_lookup ( prefixes , field_name ) ) != NULL ) { pi ( field_name ) ; g_hash_table_remove ( prefixes , field_name ) ; } else { return NULL ; } hfinfo = ( header_field_info * ) g_hash_table_lookup ( gpa_name_map , field_name ) ; if ( hfinfo ) { g_free ( last_field_name ) ; last_field_name = g_strdup ( field_name ) ; last_hfinfo = hfinfo ; } return hfinfo ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int decode_frame ( AVCodecContext * avctx , void * data , int * got_frame , AVPacket * avpkt ) { Indeo3DecodeContext * ctx = avctx -> priv_data ; const uint8_t * buf = avpkt -> data ; int buf_size = avpkt -> size ; int res ; res = decode_frame_headers ( ctx , avctx , buf , buf_size ) ; if ( res < 0 ) return res ; if ( res ) { * got_frame = 0 ; return buf_size ; } if ( ctx -> frame_flags & BS_NONREF && ( avctx -> skip_frame >= AVDISCARD_NONREF ) ) return 0 ; if ( ! ( ctx -> frame_flags & BS_KEYFRAME ) && avctx -> skip_frame >= AVDISCARD_NONKEY ) return 0 ; ctx -> buf_sel = ( ctx -> frame_flags >> BS_BUFFER ) & 1 ; if ( ( res = decode_plane ( ctx , avctx , ctx -> planes , ctx -> y_data_ptr , ctx -> y_data_size , 40 ) ) ) return res ; if ( ( res = decode_plane ( ctx , avctx , & ctx -> planes [ 1 ] , ctx -> u_data_ptr , ctx -> u_data_size , 10 ) ) ) return res ; if ( ( res = decode_plane ( ctx , avctx , & ctx -> planes [ 2 ] , ctx -> v_data_ptr , ctx -> v_data_size , 10 ) ) ) return res ; if ( ctx -> frame . data [ 0 ] ) avctx -> release_buffer ( avctx , & ctx -> frame ) ; ctx -> frame . reference = 0 ; if ( ( res = ff_get_buffer ( avctx , & ctx -> frame ) ) < 0 ) { av_log ( ctx -> avctx , AV_LOG_ERROR , "get_buffer() failed\n" ) ; return res ; } output_plane ( & ctx -> planes [ 0 ] , ctx -> buf_sel , ctx -> frame . data [ 0 ] , ctx -> frame . linesize [ 0 ] , avctx -> height ) ; output_plane ( & ctx -> planes [ 1 ] , ctx -> buf_sel , ctx -> frame . data [ 1 ] , ctx -> frame . linesize [ 1 ] , ( avctx -> height + 3 ) >> 2 ) ; output_plane ( & ctx -> planes [ 2 ] , ctx -> buf_sel , ctx -> frame . data [ 2 ] , ctx -> frame . linesize [ 2 ] , ( avctx -> height + 3 ) >> 2 ) ; * got_frame = 1 ; * ( AVFrame * ) data = ctx -> frame ; return buf_size ; }
1True
Categorize the following code snippet as vulnerable or not. True or False
static vpx_codec_frame_flags_t get_frame_pkt_flags ( const VP9_COMP * cpi , unsigned int lib_flags ) { vpx_codec_frame_flags_t flags = lib_flags << 16 ; if ( lib_flags & FRAMEFLAGS_KEY # if CONFIG_SPATIAL_SVC || ( is_two_pass_svc ( cpi ) && cpi -> svc . layer_context [ 0 ] . is_key_frame ) # endif ) flags |= VPX_FRAME_IS_KEY ; if ( cpi -> droppable ) flags |= VPX_FRAME_IS_DROPPABLE ; return flags ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void _proc_multi_msg ( uint32_t rpc_uid , slurm_msg_t * msg ) { slurm_msg_t sub_msg , response_msg ; ctld_list_msg_t * ctld_req_msg , ctld_resp_msg ; List full_resp_list = NULL ; Buf single_req_buf = NULL ; Buf ret_buf , resp_buf = NULL ; ListIterator iter = NULL ; int rc ; if ( ! msg -> conn ) { error ( "Security violation, REQUEST_CTLD_MULT_MSG RPC from uid=%d" , rpc_uid ) ; slurm_send_rc_msg ( msg , ESLURM_ACCESS_DENIED ) ; return ; } ctld_req_msg = ( ctld_list_msg_t * ) msg -> data ; full_resp_list = list_create ( _ctld_free_list_msg ) ; iter = list_iterator_create ( ctld_req_msg -> my_list ) ; while ( ( single_req_buf = list_next ( iter ) ) ) { slurm_msg_t_init ( & sub_msg ) ; if ( unpack16 ( & sub_msg . msg_type , single_req_buf ) || unpack_msg ( & sub_msg , single_req_buf ) ) { error ( "Sub-message unpack error for REQUEST_CTLD_MULT_MSG %u RPC" , sub_msg . msg_type ) ; ret_buf = _build_rc_buf ( SLURM_ERROR , msg -> protocol_version ) ; list_append ( full_resp_list , ret_buf ) ; continue ; } sub_msg . conn = msg -> conn ; sub_msg . auth_cred = msg -> auth_cred ; ret_buf = NULL ; if ( slurmctld_conf . debug_flags & DEBUG_FLAG_PROTOCOL ) { char * p = rpc_num2string ( sub_msg . msg_type ) ; info ( "%s: received opcode %s" , __func__ , p ) ; } switch ( sub_msg . msg_type ) { case REQUEST_PING : rc = SLURM_SUCCESS ; ret_buf = _build_rc_buf ( rc , msg -> protocol_version ) ; break ; case REQUEST_SIB_MSG : _slurm_rpc_sib_msg ( rpc_uid , & sub_msg ) ; ret_buf = _build_rc_buf ( SLURM_SUCCESS , msg -> protocol_version ) ; break ; default : error ( "%s: Unsupported Message Type:%s" , __func__ , rpc_num2string ( sub_msg . msg_type ) ) ; } ( void ) slurm_free_msg_data ( sub_msg . msg_type , sub_msg . data ) ; if ( ! ret_buf ) { ret_buf = _build_rc_buf ( SLURM_ERROR , msg -> protocol_version ) ; } list_append ( full_resp_list , ret_buf ) ; } list_iterator_destroy ( iter ) ; ctld_resp_msg . my_list = full_resp_list ; slurm_msg_t_init ( & response_msg ) ; response_msg . flags = msg -> flags ; response_msg . protocol_version = msg -> protocol_version ; response_msg . address = msg -> address ; response_msg . conn = msg -> conn ; response_msg . msg_type = RESPONSE_CTLD_MULT_MSG ; response_msg . data = & ctld_resp_msg ; slurm_send_node_msg ( msg -> conn_fd , & response_msg ) ; FREE_NULL_LIST ( full_resp_list ) ; free_buf ( resp_buf ) ; return ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int put_error ( MYSQL * con ) { return put_info ( mysql_error ( con ) , INFO_ERROR , mysql_errno ( con ) , mysql_sqlstate ( con ) ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void check_data_home ( const char * path ) { }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void netbios_data_only_flags ( tvbuff_t * tvb , proto_tree * tree , int offset ) { proto_tree * field_tree ; proto_item * tf ; tf = proto_tree_add_item ( tree , hf_netb_flags , tvb , offset , 1 , ENC_LITTLE_ENDIAN ) ; field_tree = proto_item_add_subtree ( tf , ett_netb_flags ) ; proto_tree_add_item ( field_tree , hf_netb_flags_ack , tvb , offset , 1 , ENC_LITTLE_ENDIAN ) ; proto_tree_add_item ( field_tree , hf_netb_flags_ack_with_data , tvb , offset , 1 , ENC_LITTLE_ENDIAN ) ; proto_tree_add_item ( field_tree , hf_netb_flags_ack_expected , tvb , offset , 1 , ENC_LITTLE_ENDIAN ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
TEST_F ( ProtocolHandlerRegistryTest , TestIsEquivalentRegistered ) { ProtocolHandler ph1 = CreateProtocolHandler ( "test" , GURL ( "http://test/%s" ) ) ; ProtocolHandler ph2 = CreateProtocolHandler ( "test" , GURL ( "http://test/%s" ) ) ; registry ( ) -> OnAcceptRegisterProtocolHandler ( ph1 ) ; ASSERT_TRUE ( registry ( ) -> IsRegistered ( ph1 ) ) ; ASSERT_TRUE ( registry ( ) -> HasRegisteredEquivalent ( ph2 ) ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int cinepak_decode_frame ( AVCodecContext * avctx , void * data , int * got_frame , AVPacket * avpkt ) { const uint8_t * buf = avpkt -> data ; int ret = 0 , buf_size = avpkt -> size ; CinepakContext * s = avctx -> priv_data ; s -> data = buf ; s -> size = buf_size ; s -> frame . reference = 1 ; s -> frame . buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE ; if ( ( ret = avctx -> reget_buffer ( avctx , & s -> frame ) ) ) { av_log ( avctx , AV_LOG_ERROR , "reget_buffer() failed\n" ) ; return ret ; } if ( s -> palette_video ) { const uint8_t * pal = av_packet_get_side_data ( avpkt , AV_PKT_DATA_PALETTE , NULL ) ; if ( pal ) { s -> frame . palette_has_changed = 1 ; memcpy ( s -> pal , pal , AVPALETTE_SIZE ) ; } } cinepak_decode ( s ) ; if ( s -> palette_video ) memcpy ( s -> frame . data [ 1 ] , s -> pal , AVPALETTE_SIZE ) ; * got_frame = 1 ; * ( AVFrame * ) data = s -> frame ; return buf_size ; }
1True
Categorize the following code snippet as vulnerable or not. True or False
static int parse_CAggregSet ( tvbuff_t * tvb , int offset , proto_tree * parent_tree , proto_tree * pad_tree , const char * fmt , ... ) { guint32 cCount , i ; proto_item * item ; proto_tree * tree ; const char * txt ; va_list ap ; va_start ( ap , fmt ) ; txt = wmem_strdup_vprintf ( wmem_packet_scope ( ) , fmt , ap ) ; va_end ( ap ) ; tree = proto_tree_add_subtree ( parent_tree , tvb , offset , 0 , ett_CAggregSet , & item , txt ) ; cCount = tvb_get_letohl ( tvb , offset ) ; proto_tree_add_uint ( tree , hf_mswsp_caggregset_count , tvb , offset , 4 , cCount ) ; offset += 4 ; for ( i = 0 ; i < cCount ; i ++ ) { offset = parse_CAggregSpec ( tvb , offset , tree , pad_tree , "AggregSpecs[%u]" , i ) ; } proto_item_set_end ( item , tvb , offset ) ; return offset ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void print_version ( void ) { set_server_version ( ) ; printf ( "%s Ver %s for %s on %s (%s)\n" , my_progname , server_version , SYSTEM_TYPE , MACHINE_TYPE , MYSQL_COMPILATION_COMMENT ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void check_databases_are_compatible ( void ) { int newdbnum ; int olddbnum ; DbInfo * newdbinfo ; DbInfo * olddbinfo ; for ( newdbnum = 0 ; newdbnum < new_cluster . dbarr . ndbs ; newdbnum ++ ) { newdbinfo = & new_cluster . dbarr . dbs [ newdbnum ] ; for ( olddbnum = 0 ; olddbnum < old_cluster . dbarr . ndbs ; olddbnum ++ ) { olddbinfo = & old_cluster . dbarr . dbs [ olddbnum ] ; if ( strcmp ( newdbinfo -> db_name , olddbinfo -> db_name ) == 0 ) { check_locale_and_encoding ( olddbinfo , newdbinfo ) ; break ; } } } }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void curses_connection_data_join ( void ) { char src [ MAX_ASCII_ADDR_LEN ] ; char dst [ MAX_ASCII_ADDR_LEN ] ; char title [ 64 ] ; DEBUG_MSG ( "curses_connection_data_join" ) ; if ( wdg_conndata ) { struct conn_object * tmp_conn = curr_conn ; wdg_destroy_object ( & wdg_conndata ) ; curses_destroy_conndata ( ) ; curr_conn = tmp_conn ; } curr_conn -> flags |= CONN_VIEWING ; wdg_create_object ( & wdg_conndata , WDG_COMPOUND , WDG_OBJ_WANT_FOCUS ) ; wdg_set_color ( wdg_conndata , WDG_COLOR_SCREEN , EC_COLOR ) ; wdg_set_color ( wdg_conndata , WDG_COLOR_WINDOW , EC_COLOR ) ; wdg_set_color ( wdg_conndata , WDG_COLOR_FOCUS , EC_COLOR_FOCUS ) ; wdg_set_color ( wdg_conndata , WDG_COLOR_TITLE , EC_COLOR_TITLE ) ; wdg_set_title ( wdg_conndata , "Connection data" , WDG_ALIGN_LEFT ) ; wdg_set_size ( wdg_conndata , 1 , 2 , - 1 , SYSMSG_WIN_SIZE - 1 ) ; wdg_create_object ( & wdg_join , WDG_SCROLL , 0 ) ; snprintf ( title , 64 , "%s:%d - %s:%d" , ip_addr_ntoa ( & curr_conn -> L3_addr1 , src ) , ntohs ( curr_conn -> L4_addr1 ) , ip_addr_ntoa ( & curr_conn -> L3_addr2 , dst ) , ntohs ( curr_conn -> L4_addr2 ) ) ; wdg_set_title ( wdg_join , title , WDG_ALIGN_LEFT ) ; wdg_set_color ( wdg_join , WDG_COLOR_TITLE , EC_COLOR_TITLE ) ; wdg_set_color ( wdg_join , WDG_COLOR_FOCUS , EC_COLOR_FOCUS ) ; wdg_set_size ( wdg_join , 2 , 3 , - 2 , SYSMSG_WIN_SIZE - 2 ) ; wdg_scroll_set_lines ( wdg_join , GBL_CONF -> connection_buffer / ( current_screen . cols / 2 ) ) ; wdg_compound_add ( wdg_conndata , wdg_join ) ; wdg_add_destroy_key ( wdg_conndata , CTRL ( 'Q' ) , curses_destroy_conndata ) ; wdg_compound_add_callback ( wdg_conndata , 'j' , curses_connection_data_split ) ; wdg_compound_add_callback ( wdg_conndata , 'k' , curses_connection_kill_wrapper ) ; wdg_compound_add_callback ( wdg_conndata , ' ' , curses_connection_data_help ) ; wdg_draw_object ( wdg_conndata ) ; wdg_set_focus ( wdg_conndata ) ; connbuf_print ( & curr_conn -> data , join_print ) ; conntrack_hook_conn_add ( curr_conn , join_print_po ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void ardp_parse_header ( tvbuff_t * tvb , packet_info * pinfo , alljoyn_ardp_tree_data * tree_data ) { guint8 flags , header_length ; gint eaklen , packet_length ; guint16 data_length ; packet_length = tvb_reported_length ( tvb ) ; flags = tvb_get_guint8 ( tvb , 0 ) ; tree_data -> syn = ( flags & ARDP_SYN ) != 0 ; tree_data -> ack = ( flags & ARDP_ACK ) != 0 ; tree_data -> eak = ( flags & ARDP_EAK ) != 0 ; tree_data -> rst = ( flags & ARDP_RST ) != 0 ; tree_data -> nul = ( flags & ARDP_NUL ) != 0 ; header_length = 2 * tvb_get_guint8 ( tvb , ARDP_HEADER_LEN_OFFSET ) ; if ( packet_length < ARDP_DATA_LENGTH_OFFSET + 2 ) { set_pinfo_desegment ( pinfo , 0 , ARDP_DATA_LENGTH_OFFSET + 2 - packet_length ) ; tree_data -> offset = ARDP_HEADER_LEN_OFFSET + 1 ; return ; } data_length = tvb_get_ntohs ( tvb , ARDP_DATA_LENGTH_OFFSET ) ; if ( packet_length < header_length + data_length ) { set_pinfo_desegment ( pinfo , 0 , header_length + data_length - packet_length ) ; tree_data -> offset = ARDP_DATA_LENGTH_OFFSET + 2 ; return ; } proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_syn_flag , tvb , tree_data -> offset , 1 , ENC_NA ) ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_ack_flag , tvb , tree_data -> offset , 1 , ENC_NA ) ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_eak_flag , tvb , tree_data -> offset , 1 , ENC_NA ) ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_rst_flag , tvb , tree_data -> offset , 1 , ENC_NA ) ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_nul_flag , tvb , tree_data -> offset , 1 , ENC_NA ) ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_unused_flag , tvb , tree_data -> offset , 1 , ENC_NA ) ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_version_field , tvb , tree_data -> offset , 1 , ENC_NA ) ; tree_data -> offset += 1 ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_hlen , tvb , tree_data -> offset , 1 , ENC_NA ) ; tree_data -> offset += 1 ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_src , tvb , tree_data -> offset , 2 , ENC_BIG_ENDIAN ) ; tree_data -> offset += 2 ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_dst , tvb , tree_data -> offset , 2 , ENC_BIG_ENDIAN ) ; tree_data -> offset += 2 ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_dlen , tvb , tree_data -> offset , 2 , ENC_BIG_ENDIAN ) ; tree_data -> offset += 2 ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_seq , tvb , tree_data -> offset , 4 , ENC_BIG_ENDIAN ) ; tree_data -> sequence = tvb_get_ntohl ( tvb , tree_data -> offset ) ; tree_data -> offset += 4 ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_ack , tvb , tree_data -> offset , 4 , ENC_BIG_ENDIAN ) ; tree_data -> acknowledge = tvb_get_ntohl ( tvb , tree_data -> offset ) ; tree_data -> offset += 4 ; if ( tree_data -> syn ) { proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_segmax , tvb , tree_data -> offset , 2 , ENC_BIG_ENDIAN ) ; tree_data -> offset += 2 ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_segbmax , tvb , tree_data -> offset , 2 , ENC_BIG_ENDIAN ) ; tree_data -> offset += 2 ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_dackt , tvb , tree_data -> offset , 4 , ENC_BIG_ENDIAN ) ; tree_data -> offset += 4 ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_options , tvb , tree_data -> offset , 2 , ENC_BIG_ENDIAN ) ; tree_data -> offset += 2 ; } else { proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_ttl , tvb , tree_data -> offset , 4 , ENC_BIG_ENDIAN ) ; tree_data -> offset += 4 ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_lcs , tvb , tree_data -> offset , 4 , ENC_BIG_ENDIAN ) ; tree_data -> offset += 4 ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_nsa , tvb , tree_data -> offset , 4 , ENC_BIG_ENDIAN ) ; tree_data -> offset += 4 ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_fss , tvb , tree_data -> offset , 4 , ENC_BIG_ENDIAN ) ; tree_data -> start_sequence = tvb_get_ntohl ( tvb , tree_data -> offset ) ; tree_data -> offset += 4 ; proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_fcnt , tvb , tree_data -> offset , 2 , ENC_BIG_ENDIAN ) ; tree_data -> fragment_count = tvb_get_ntohs ( tvb , tree_data -> offset ) ; tree_data -> offset += 2 ; eaklen = header_length - ARDP_FIXED_HDR_LEN ; if ( eaklen > 0 ) { if ( tree_data -> eak ) { proto_tree_add_item ( tree_data -> alljoyn_tree , hf_ardp_bmp , tvb , tree_data -> offset , eaklen , ENC_NA ) ; } tree_data -> offset += eaklen ; } } }
0False
Categorize the following code snippet as vulnerable or not. True or False
afs_int32 SPR_ChangeEntry ( struct rx_call * call , afs_int32 aid , char * name , afs_int32 oid , afs_int32 newid ) { afs_int32 code ; afs_int32 cid = ANONYMOUSID ; code = changeEntry ( call , aid , name , oid , newid , & cid ) ; osi_auditU ( call , PTS_ChgEntEvent , code , AUD_ID , aid , AUD_STR , name , AUD_LONG , oid , AUD_LONG , newid , AUD_END ) ; ViceLog ( 5 , ( "PTS_ChangeEntry: code %d cid %d aid %d name %s oid %d newid %d\n" , code , cid , aid , name , oid , newid ) ) ; return code ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int atrac1_decode_frame ( AVCodecContext * avctx , void * data , int * got_frame_ptr , AVPacket * avpkt ) { AVFrame * frame = data ; const uint8_t * buf = avpkt -> data ; int buf_size = avpkt -> size ; AT1Ctx * q = avctx -> priv_data ; int ch , ret ; GetBitContext gb ; if ( buf_size < 212 * avctx -> channels ) { av_log ( avctx , AV_LOG_ERROR , "Not enough data to decode!\n" ) ; return AVERROR_INVALIDDATA ; } frame -> nb_samples = AT1_SU_SAMPLES ; if ( ( ret = ff_get_buffer ( avctx , frame , 0 ) ) < 0 ) { av_log ( avctx , AV_LOG_ERROR , "get_buffer() failed\n" ) ; return ret ; } for ( ch = 0 ; ch < avctx -> channels ; ch ++ ) { AT1SUCtx * su = & q -> SUs [ ch ] ; init_get_bits ( & gb , & buf [ 212 * ch ] , 212 * 8 ) ; ret = at1_parse_bsm ( & gb , su -> log2_block_count ) ; if ( ret < 0 ) return ret ; ret = at1_unpack_dequant ( & gb , su , q -> spec ) ; if ( ret < 0 ) return ret ; ret = at1_imdct_block ( su , q ) ; if ( ret < 0 ) return ret ; at1_subband_synthesis ( q , su , ( float * ) frame -> extended_data [ ch ] ) ; } * got_frame_ptr = 1 ; return avctx -> block_align ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
void vp9_coef_tree_initialize ( ) { init_bit_trees ( ) ; vp9_tokens_from_tree ( vp9_coef_encodings , vp9_coef_tree ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static void dumpcfffdselect ( SplineFont * sf , struct alltabs * at ) { int cid , k , lastfd , cnt ; int gid ; putc ( 3 , at -> fdselect ) ; putshort ( at -> fdselect , 0 ) ; for ( k = 0 ; k < sf -> subfontcnt ; ++ k ) if ( SCWorthOutputting ( sf -> subfonts [ k ] -> glyphs [ 0 ] ) ) break ; if ( k == sf -> subfontcnt ) -- k ; putshort ( at -> fdselect , 0 ) ; putc ( k , at -> fdselect ) ; lastfd = k ; cnt = 1 ; for ( gid = 1 ; gid < at -> gi . gcnt ; ++ gid ) { cid = at -> gi . bygid [ gid ] ; for ( k = 0 ; k < sf -> subfontcnt ; ++ k ) { if ( cid < sf -> subfonts [ k ] -> glyphcnt && SCWorthOutputting ( sf -> subfonts [ k ] -> glyphs [ cid ] ) ) break ; } if ( k == sf -> subfontcnt ) ; else { if ( k != lastfd ) { putshort ( at -> fdselect , gid ) ; putc ( k , at -> fdselect ) ; lastfd = k ; ++ cnt ; } } } putshort ( at -> fdselect , gid ) ; fseek ( at -> fdselect , 1 , SEEK_SET ) ; putshort ( at -> fdselect , cnt ) ; fseek ( at -> fdselect , 0 , SEEK_END ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int sockstat_seq_open ( struct inode * inode , struct file * file ) { return single_open_net ( inode , file , sockstat_seq_show ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
void vp9_lpf_vertical_8_sse2 ( unsigned char * s , int p , const unsigned char * blimit , const unsigned char * limit , const unsigned char * thresh , int count ) { DECLARE_ALIGNED_ARRAY ( 8 , unsigned char , t_dst , 8 * 8 ) ; unsigned char * src [ 1 ] ; unsigned char * dst [ 1 ] ; ( void ) count ; src [ 0 ] = s - 4 ; dst [ 0 ] = t_dst ; transpose ( src , p , dst , 8 , 1 ) ; vp9_lpf_horizontal_8_sse2 ( t_dst + 4 * 8 , 8 , blimit , limit , thresh , 1 ) ; src [ 0 ] = t_dst ; dst [ 0 ] = s - 4 ; transpose ( src , 8 , dst , p , 1 ) ; }
1True
Categorize the following code snippet as vulnerable or not. True or False
static inline int get_qscale ( MpegEncContext * s ) { int qscale = get_bits ( & s -> gb , 5 ) ; if ( s -> q_scale_type ) { return non_linear_qscale [ qscale ] ; } else { return qscale << 1 ; } }
0False
Categorize the following code snippet as vulnerable or not. True or False
static gboolean extcap_dumper_dump ( struct extcap_dumper extcap_dumper , char * buffer , gssize captured_length , gssize reported_length , time_t seconds , int nanoseconds ) { # ifdef ANDROIDDUMP_USE_LIBPCAP struct pcap_pkthdr pcap_header ; pcap_header . caplen = ( bpf_u_int32 ) captured_length ; pcap_header . len = ( bpf_u_int32 ) reported_length ; pcap_header . ts . tv_sec = seconds ; pcap_header . ts . tv_usec = nanoseconds / 1000 ; pcap_dump ( ( u_char * ) extcap_dumper . dumper . pcap , & pcap_header , buffer ) ; pcap_dump_flush ( extcap_dumper . dumper . pcap ) ; # else int err = 0 ; char * err_info ; struct wtap_pkthdr hdr ; hdr . presence_flags = WTAP_HAS_TS ; hdr . caplen = ( guint32 ) captured_length ; hdr . len = ( guint32 ) reported_length ; hdr . ts . secs = seconds ; hdr . ts . nsecs = ( int ) nanoseconds ; hdr . opt_comment = 0 ; hdr . opt_comment = NULL ; hdr . drop_count = 0 ; hdr . pack_flags = 0 ; hdr . rec_type = REC_TYPE_PACKET ; if ( extcap_dumper . encap == EXTCAP_ENCAP_BLUETOOTH_H4_WITH_PHDR ) { uint32_t * direction ; SET_DATA ( direction , value_u32 , buffer ) hdr . pseudo_header . bthci . sent = GINT32_FROM_BE ( * direction ) ? 0 : 1 ; hdr . len -= ( guint32 ) sizeof ( own_pcap_bluetooth_h4_header ) ; hdr . caplen -= ( guint32 ) sizeof ( own_pcap_bluetooth_h4_header ) ; buffer += sizeof ( own_pcap_bluetooth_h4_header ) ; hdr . pkt_encap = WTAP_ENCAP_BLUETOOTH_H4_WITH_PHDR ; } else if ( extcap_dumper . encap == EXTCAP_ENCAP_ETHERNET ) { hdr . pkt_encap = WTAP_ENCAP_ETHERNET ; } else { hdr . pkt_encap = WTAP_ENCAP_WIRESHARK_UPPER_PDU ; } if ( ! wtap_dump ( extcap_dumper . dumper . wtap , & hdr , ( const guint8 * ) buffer , & err , & err_info ) ) { errmsg_print ( "ERROR: Cannot dump: %s" , err_info ) ; return FALSE ; } wtap_dump_flush ( extcap_dumper . dumper . wtap ) ; # endif return TRUE ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
IN_PROC_BROWSER_TEST_F ( FramebustBlockBrowserTest , DisallowRadioButtonSelected ) { const GURL url = embedded_test_server ( ) -> GetURL ( "/iframe.html" ) ; ui_test_utils : : NavigateToURL ( browser ( ) , url ) ; auto * helper = GetFramebustTabHelper ( ) ; helper -> AddBlockedUrl ( url , base : : BindOnce ( & FramebustBlockBrowserTest : : OnClick , base : : Unretained ( this ) ) ) ; EXPECT_TRUE ( helper -> HasBlockedUrls ( ) ) ; HostContentSettingsMap * settings_map = HostContentSettingsMapFactory : : GetForProfile ( browser ( ) -> profile ( ) ) ; EXPECT_EQ ( CONTENT_SETTING_BLOCK , settings_map -> GetContentSetting ( url , GURL ( ) , CONTENT_SETTINGS_TYPE_POPUPS , std : : string ( ) ) ) ; { ContentSettingFramebustBlockBubbleModel framebust_block_bubble_model ( browser ( ) -> content_setting_bubble_model_delegate ( ) , GetWebContents ( ) , browser ( ) -> profile ( ) ) ; framebust_block_bubble_model . OnRadioClicked ( kDisallowRadioButtonIndex ) ; } EXPECT_EQ ( CONTENT_SETTING_BLOCK , settings_map -> GetContentSetting ( url , GURL ( ) , CONTENT_SETTINGS_TYPE_POPUPS , std : : string ( ) ) ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
void proto_register_zbee_zcl_met_idt ( void ) { static hf_register_info hf [ ] = { { & hf_zbee_zcl_met_idt_attr_id , { "Attribute" , "zbee_zcl_ha.metidt.attr_id" , FT_UINT16 , BASE_HEX , VALS ( zbee_zcl_met_idt_attr_names ) , 0x00 , NULL , HFILL } } , { & hf_zbee_zcl_met_idt_meter_type_id , { "Meter Type ID" , "zbee_zcl_ha.metidt.attr.meter_type.id" , FT_UINT16 , BASE_HEX , VALS ( zbee_zcl_met_idt_meter_type_names ) , 0x00 , NULL , HFILL } } , { & hf_zbee_zcl_met_idt_data_quality_id , { "Data Quality ID" , "zbee_zcl_ha.metidt.attr.data_quality.id" , FT_UINT16 , BASE_HEX , VALS ( zbee_zcl_met_idt_data_quality_names ) , 0x00 , NULL , HFILL } } } ; proto_zbee_zcl_met_idt = proto_register_protocol ( "ZigBee ZCL Meter Identification" , "ZCL Meter Identification" , ZBEE_PROTOABBREV_ZCL_METIDT ) ; proto_register_field_array ( proto_zbee_zcl_met_idt , hf , array_length ( hf ) ) ; register_dissector ( ZBEE_PROTOABBREV_ZCL_METIDT , dissect_zbee_zcl_met_idt , proto_zbee_zcl_met_idt ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int read_major_sync ( MLPDecodeContext * m , GetBitContext * gb ) { MLPHeaderInfo mh ; int substr , ret ; if ( ( ret = ff_mlp_read_major_sync ( m -> avctx , & mh , gb ) ) != 0 ) return ret ; if ( mh . group1_bits == 0 ) { av_log ( m -> avctx , AV_LOG_ERROR , "invalid/unknown bits per sample\n" ) ; return AVERROR_INVALIDDATA ; } if ( mh . group2_bits > mh . group1_bits ) { av_log ( m -> avctx , AV_LOG_ERROR , "Channel group 2 cannot have more bits per sample than group 1.\n" ) ; return AVERROR_INVALIDDATA ; } if ( mh . group2_samplerate && mh . group2_samplerate != mh . group1_samplerate ) { av_log ( m -> avctx , AV_LOG_ERROR , "Channel groups with differing sample rates are not currently supported.\n" ) ; return AVERROR_INVALIDDATA ; } if ( mh . group1_samplerate == 0 ) { av_log ( m -> avctx , AV_LOG_ERROR , "invalid/unknown sampling rate\n" ) ; return AVERROR_INVALIDDATA ; } if ( mh . group1_samplerate > MAX_SAMPLERATE ) { av_log ( m -> avctx , AV_LOG_ERROR , "Sampling rate %d is greater than the supported maximum (%d).\n" , mh . group1_samplerate , MAX_SAMPLERATE ) ; return AVERROR_INVALIDDATA ; } if ( mh . access_unit_size > MAX_BLOCKSIZE ) { av_log ( m -> avctx , AV_LOG_ERROR , "Block size %d is greater than the supported maximum (%d).\n" , mh . access_unit_size , MAX_BLOCKSIZE ) ; return AVERROR_INVALIDDATA ; } if ( mh . access_unit_size_pow2 > MAX_BLOCKSIZE_POW2 ) { av_log ( m -> avctx , AV_LOG_ERROR , "Block size pow2 %d is greater than the supported maximum (%d).\n" , mh . access_unit_size_pow2 , MAX_BLOCKSIZE_POW2 ) ; return AVERROR_INVALIDDATA ; } if ( mh . num_substreams == 0 ) return AVERROR_INVALIDDATA ; if ( m -> avctx -> codec_id == AV_CODEC_ID_MLP && mh . num_substreams > 2 ) { av_log ( m -> avctx , AV_LOG_ERROR , "MLP only supports up to 2 substreams.\n" ) ; return AVERROR_INVALIDDATA ; } if ( mh . num_substreams > MAX_SUBSTREAMS ) { av_log_ask_for_sample ( m -> avctx , "Number of substreams %d is larger than the maximum supported " "by the decoder.\n" , mh . num_substreams ) ; return AVERROR_PATCHWELCOME ; } m -> access_unit_size = mh . access_unit_size ; m -> access_unit_size_pow2 = mh . access_unit_size_pow2 ; m -> num_substreams = mh . num_substreams ; m -> max_decoded_substream = m -> num_substreams - 1 ; m -> avctx -> sample_rate = mh . group1_samplerate ; m -> avctx -> frame_size = mh . access_unit_size ; m -> avctx -> bits_per_raw_sample = mh . group1_bits ; if ( mh . group1_bits > 16 ) m -> avctx -> sample_fmt = AV_SAMPLE_FMT_S32 ; else m -> avctx -> sample_fmt = AV_SAMPLE_FMT_S16 ; m -> params_valid = 1 ; for ( substr = 0 ; substr < MAX_SUBSTREAMS ; substr ++ ) m -> substream [ substr ] . restart_seen = 0 ; if ( m -> avctx -> codec_id == AV_CODEC_ID_MLP ) { if ( ( substr = ( mh . num_substreams > 1 ) ) ) m -> substream [ 0 ] . ch_layout = AV_CH_LAYOUT_STEREO ; m -> substream [ substr ] . ch_layout = mh . channel_layout_mlp ; } else { if ( ( substr = ( mh . num_substreams > 1 ) ) ) m -> substream [ 0 ] . ch_layout = AV_CH_LAYOUT_STEREO ; if ( mh . num_substreams > 2 ) if ( mh . channel_layout_thd_stream2 ) m -> substream [ 2 ] . ch_layout = mh . channel_layout_thd_stream2 ; else m -> substream [ 2 ] . ch_layout = mh . channel_layout_thd_stream1 ; m -> substream [ substr ] . ch_layout = mh . channel_layout_thd_stream1 ; } return 0 ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
SPL_METHOD ( SplDoublyLinkedList , rewind ) { spl_dllist_object * intern = ( spl_dllist_object * ) zend_object_store_get_object ( getThis ( ) TSRMLS_CC ) ; if ( zend_parse_parameters_none ( ) == FAILURE ) { return ; } spl_dllist_it_helper_rewind ( & intern -> traverse_pointer , & intern -> traverse_position , intern -> llist , intern -> flags TSRMLS_CC ) ; }
0False
Categorize the following code snippet as vulnerable or not. True or False
static int decode_audio_block ( AC3DecodeContext * s , int blk ) { int fbw_channels = s -> fbw_channels ; int channel_mode = s -> channel_mode ; int i , bnd , seg , ch ; int different_transforms ; int downmix_output ; int cpl_in_use ; GetBitContext * gbc = & s -> gbc ; uint8_t bit_alloc_stages [ AC3_MAX_CHANNELS ] = { 0 } ; different_transforms = 0 ; if ( s -> block_switch_syntax ) { for ( ch = 1 ; ch <= fbw_channels ; ch ++ ) { s -> block_switch [ ch ] = get_bits1 ( gbc ) ; if ( ch > 1 && s -> block_switch [ ch ] != s -> block_switch [ 1 ] ) different_transforms = 1 ; } } if ( s -> dither_flag_syntax ) { for ( ch = 1 ; ch <= fbw_channels ; ch ++ ) { s -> dither_flag [ ch ] = get_bits1 ( gbc ) ; } } i = ! s -> channel_mode ; do { if ( get_bits1 ( gbc ) ) { s -> dynamic_range [ i ] = ( ( dynamic_range_tab [ get_bits ( gbc , 8 ) ] - 1.0 ) * s -> drc_scale ) + 1.0 ; } else if ( blk == 0 ) { s -> dynamic_range [ i ] = 1.0f ; } } while ( i -- ) ; if ( s -> eac3 && ( ! blk || get_bits1 ( gbc ) ) ) { s -> spx_in_use = get_bits1 ( gbc ) ; if ( s -> spx_in_use ) { int dst_start_freq , dst_end_freq , src_start_freq , start_subband , end_subband ; if ( s -> channel_mode == AC3_CHMODE_MONO ) { s -> channel_uses_spx [ 1 ] = 1 ; } else { for ( ch = 1 ; ch <= fbw_channels ; ch ++ ) s -> channel_uses_spx [ ch ] = get_bits1 ( gbc ) ; } dst_start_freq = get_bits ( gbc , 2 ) ; start_subband = get_bits ( gbc , 3 ) + 2 ; if ( start_subband > 7 ) start_subband += start_subband - 7 ; end_subband = get_bits ( gbc , 3 ) + 5 ; if ( end_subband > 7 ) end_subband += end_subband - 7 ; dst_start_freq = dst_start_freq * 12 + 25 ; src_start_freq = start_subband * 12 + 25 ; dst_end_freq = end_subband * 12 + 25 ; if ( start_subband >= end_subband ) { av_log ( s -> avctx , AV_LOG_ERROR , "invalid spectral extension " "range (%d >= %d)\n" , start_subband , end_subband ) ; return - 1 ; } if ( dst_start_freq >= src_start_freq ) { av_log ( s -> avctx , AV_LOG_ERROR , "invalid spectral extension " "copy start bin (%d >= %d)\n" , dst_start_freq , src_start_freq ) ; return - 1 ; } s -> spx_dst_start_freq = dst_start_freq ; s -> spx_src_start_freq = src_start_freq ; s -> spx_dst_end_freq = dst_end_freq ; decode_band_structure ( gbc , blk , s -> eac3 , 0 , start_subband , end_subband , ff_eac3_default_spx_band_struct , & s -> num_spx_bands , s -> spx_band_sizes ) ; } else { for ( ch = 1 ; ch <= fbw_channels ; ch ++ ) { s -> channel_uses_spx [ ch ] = 0 ; s -> first_spx_coords [ ch ] = 1 ; } } } if ( s -> spx_in_use ) { for ( ch = 1 ; ch <= fbw_channels ; ch ++ ) { if ( s -> channel_uses_spx [ ch ] ) { if ( s -> first_spx_coords [ ch ] || get_bits1 ( gbc ) ) { float spx_blend ; int bin , master_spx_coord ; s -> first_spx_coords [ ch ] = 0 ; spx_blend = get_bits ( gbc , 5 ) * ( 1.0f / 32 ) ; master_spx_coord = get_bits ( gbc , 2 ) * 3 ; bin = s -> spx_src_start_freq ; for ( bnd = 0 ; bnd < s -> num_spx_bands ; bnd ++ ) { int bandsize ; int spx_coord_exp , spx_coord_mant ; float nratio , sblend , nblend , spx_coord ; bandsize = s -> spx_band_sizes [ bnd ] ; nratio = ( ( float ) ( ( bin + ( bandsize >> 1 ) ) ) / s -> spx_dst_end_freq ) - spx_blend ; nratio = av_clipf ( nratio , 0.0f , 1.0f ) ; nblend = sqrtf ( 3.0f * nratio ) ; sblend = sqrtf ( 1.0f - nratio ) ; bin += bandsize ; spx_coord_exp = get_bits ( gbc , 4 ) ; spx_coord_mant = get_bits ( gbc , 2 ) ; if ( spx_coord_exp == 15 ) spx_coord_mant <<= 1 ; else spx_coord_mant += 4 ; spx_coord_mant <<= ( 25 - spx_coord_exp - master_spx_coord ) ; spx_coord = spx_coord_mant * ( 1.0f / ( 1 << 23 ) ) ; s -> spx_noise_blend [ ch ] [ bnd ] = nblend * spx_coord ; s -> spx_signal_blend [ ch ] [ bnd ] = sblend * spx_coord ; } } } else { s -> first_spx_coords [ ch ] = 1 ; } } } if ( s -> eac3 ? s -> cpl_strategy_exists [ blk ] : get_bits1 ( gbc ) ) { memset ( bit_alloc_stages , 3 , AC3_MAX_CHANNELS ) ; if ( ! s -> eac3 ) s -> cpl_in_use [ blk ] = get_bits1 ( gbc ) ; if ( s -> cpl_in_use [ blk ] ) { int cpl_start_subband , cpl_end_subband ; if ( channel_mode < AC3_CHMODE_STEREO ) { av_log ( s -> avctx , AV_LOG_ERROR , "coupling not allowed in mono or dual-mono\n" ) ; return - 1 ; } if ( s -> eac3 && get_bits1 ( gbc ) ) { av_log_missing_feature ( s -> avctx , "Enhanced coupling" , 1 ) ; return AVERROR_PATCHWELCOME ; } if ( s -> eac3 && s -> channel_mode == AC3_CHMODE_STEREO ) { s -> channel_in_cpl [ 1 ] = 1 ; s -> channel_in_cpl [ 2 ] = 1 ; } else { for ( ch = 1 ; ch <= fbw_channels ; ch ++ ) s -> channel_in_cpl [ ch ] = get_bits1 ( gbc ) ; } if ( channel_mode == AC3_CHMODE_STEREO ) s -> phase_flags_in_use = get_bits1 ( gbc ) ; cpl_start_subband = get_bits ( gbc , 4 ) ; cpl_end_subband = s -> spx_in_use ? ( s -> spx_src_start_freq - 37 ) / 12 : get_bits ( gbc , 4 ) + 3 ; if ( cpl_start_subband >= cpl_end_subband ) { av_log ( s -> avctx , AV_LOG_ERROR , "invalid coupling range (%d >= %d)\n" , cpl_start_subband , cpl_end_subband ) ; return - 1 ; } s -> start_freq [ CPL_CH ] = cpl_start_subband * 12 + 37 ; s -> end_freq [ CPL_CH ] = cpl_end_subband * 12 + 37 ; decode_band_structure ( gbc , blk , s -> eac3 , 0 , cpl_start_subband , cpl_end_subband , ff_eac3_default_cpl_band_struct , & s -> num_cpl_bands , s -> cpl_band_sizes ) ; } else { for ( ch = 1 ; ch <= fbw_channels ; ch ++ ) { s -> channel_in_cpl [ ch ] = 0 ; s -> first_cpl_coords [ ch ] = 1 ; } s -> first_cpl_leak = s -> eac3 ; s -> phase_flags_in_use = 0 ; } } else if ( ! s -> eac3 ) { if ( ! blk ) { av_log ( s -> avctx , AV_LOG_ERROR , "new coupling strategy must " "be present in block 0\n" ) ; return - 1 ; } else { s -> cpl_in_use [ blk ] = s -> cpl_in_use [ blk - 1 ] ; } } cpl_in_use = s -> cpl_in_use [ blk ] ; if ( cpl_in_use ) { int cpl_coords_exist = 0 ; for ( ch = 1 ; ch <= fbw_channels ; ch ++ ) { if ( s -> channel_in_cpl [ ch ] ) { if ( ( s -> eac3 && s -> first_cpl_coords [ ch ] ) || get_bits1 ( gbc ) ) { int master_cpl_coord , cpl_coord_exp , cpl_coord_mant ; s -> first_cpl_coords [ ch ] = 0 ; cpl_coords_exist = 1 ; master_cpl_coord = 3 * get_bits ( gbc , 2 ) ; for ( bnd = 0 ; bnd < s -> num_cpl_bands ; bnd ++ ) { cpl_coord_exp = get_bits ( gbc , 4 ) ; cpl_coord_mant = get_bits ( gbc , 4 ) ; if ( cpl_coord_exp == 15 ) s -> cpl_coords [ ch ] [ bnd ] = cpl_coord_mant << 22 ; else s -> cpl_coords [ ch ] [ bnd ] = ( cpl_coord_mant + 16 ) << 21 ; s -> cpl_coords [ ch ] [ bnd ] >>= ( cpl_coord_exp + master_cpl_coord ) ; } } else if ( ! blk ) { av_log ( s -> avctx , AV_LOG_ERROR , "new coupling coordinates must " "be present in block 0\n" ) ; return - 1 ; } } else { s -> first_cpl_coords [ ch ] = 1 ; } } if ( channel_mode == AC3_CHMODE_STEREO && cpl_coords_exist ) { for ( bnd = 0 ; bnd < s -> num_cpl_bands ; bnd ++ ) { s -> phase_flags [ bnd ] = s -> phase_flags_in_use ? get_bits1 ( gbc ) : 0 ; } } } if ( channel_mode == AC3_CHMODE_STEREO ) { if ( ( s -> eac3 && ! blk ) || get_bits1 ( gbc ) ) { s -> num_rematrixing_bands = 4 ; if ( cpl_in_use && s -> start_freq [ CPL_CH ] <= 61 ) { s -> num_rematrixing_bands -= 1 + ( s -> start_freq [ CPL_CH ] == 37 ) ; } else if ( s -> spx_in_use && s -> spx_src_start_freq <= 61 ) { s -> num_rematrixing_bands -- ; } for ( bnd = 0 ; bnd < s -> num_rematrixing_bands ; bnd ++ ) s -> rematrixing_flags [ bnd ] = get_bits1 ( gbc ) ; } else if ( ! blk ) { av_log ( s -> avctx , AV_LOG_WARNING , "Warning: " "new rematrixing strategy not present in block 0\n" ) ; s -> num_rematrixing_bands = 0 ; } } for ( ch = ! cpl_in_use ; ch <= s -> channels ; ch ++ ) { if ( ! s -> eac3 ) s -> exp_strategy [ blk ] [ ch ] = get_bits ( gbc , 2 - ( ch == s -> lfe_ch ) ) ; if ( s -> exp_strategy [ blk ] [ ch ] != EXP_REUSE ) bit_alloc_stages [ ch ] = 3 ; } for ( ch = 1 ; ch <= fbw_channels ; ch ++ ) { s -> start_freq [ ch ] = 0 ; if ( s -> exp_strategy [ blk ] [ ch ] != EXP_REUSE ) { int group_size ; int prev = s -> end_freq [ ch ] ; if ( s -> channel_in_cpl [ ch ] ) s -> end_freq [ ch ] = s -> start_freq [ CPL_CH ] ; else if ( s -> channel_uses_spx [ ch ] ) s -> end_freq [ ch ] = s -> spx_src_start_freq ; else { int bandwidth_code = get_bits ( gbc , 6 ) ; if ( bandwidth_code > 60 ) { av_log ( s -> avctx , AV_LOG_ERROR , "bandwidth code = %d > 60\n" , bandwidth_code ) ; return - 1 ; } s -> end_freq [ ch ] = bandwidth_code * 3 + 73 ; } group_size = 3 << ( s -> exp_strategy [ blk ] [ ch ] - 1 ) ; s -> num_exp_groups [ ch ] = ( s -> end_freq [ ch ] + group_size - 4 ) / group_size ; if ( blk > 0 && s -> end_freq [ ch ] != prev ) memset ( bit_alloc_stages , 3 , AC3_MAX_CHANNELS ) ; } } if ( cpl_in_use && s -> exp_strategy [ blk ] [ CPL_CH ] != EXP_REUSE ) { s -> num_exp_groups [ CPL_CH ] = ( s -> end_freq [ CPL_CH ] - s -> start_freq [ CPL_CH ] ) / ( 3 << ( s -> exp_strategy [ blk ] [ CPL_CH ] - 1 ) ) ; } for ( ch = ! cpl_in_use ; ch <= s -> channels ; ch ++ ) { if ( s -> exp_strategy [ blk ] [ ch ] != EXP_REUSE ) { s -> dexps [ ch ] [ 0 ] = get_bits ( gbc , 4 ) << ! ch ; if ( decode_exponents ( gbc , s -> exp_strategy [ blk ] [ ch ] , s -> num_exp_groups [ ch ] , s -> dexps [ ch ] [ 0 ] , & s -> dexps [ ch ] [ s -> start_freq [ ch ] + ! ! ch ] ) ) { av_log ( s -> avctx , AV_LOG_ERROR , "exponent out-of-range\n" ) ; return - 1 ; } if ( ch != CPL_CH && ch != s -> lfe_ch ) skip_bits ( gbc , 2 ) ; } } if ( s -> bit_allocation_syntax ) { if ( get_bits1 ( gbc ) ) { s -> bit_alloc_params . slow_decay = ff_ac3_slow_decay_tab [ get_bits ( gbc , 2 ) ] >> s -> bit_alloc_params . sr_shift ; s -> bit_alloc_params . fast_decay = ff_ac3_fast_decay_tab [ get_bits ( gbc , 2 ) ] >> s -> bit_alloc_params . sr_shift ; s -> bit_alloc_params . slow_gain = ff_ac3_slow_gain_tab [ get_bits ( gbc , 2 ) ] ; s -> bit_alloc_params . db_per_bit = ff_ac3_db_per_bit_tab [ get_bits ( gbc , 2 ) ] ; s -> bit_alloc_params . floor = ff_ac3_floor_tab [ get_bits ( gbc , 3 ) ] ; for ( ch = ! cpl_in_use ; ch <= s -> channels ; ch ++ ) bit_alloc_stages [ ch ] = FFMAX ( bit_alloc_stages [ ch ] , 2 ) ; } else if ( ! blk ) { av_log ( s -> avctx , AV_LOG_ERROR , "new bit allocation info must " "be present in block 0\n" ) ; return - 1 ; } } if ( ! s -> eac3 || ! blk ) { if ( s -> snr_offset_strategy && get_bits1 ( gbc ) ) { int snr = 0 ; int csnr ; csnr = ( get_bits ( gbc , 6 ) - 15 ) << 4 ; for ( i = ch = ! cpl_in_use ; ch <= s -> channels ; ch ++ ) { if ( ch == i || s -> snr_offset_strategy == 2 ) snr = ( csnr + get_bits ( gbc , 4 ) ) << 2 ; if ( blk && s -> snr_offset [ ch ] != snr ) { bit_alloc_stages [ ch ] = FFMAX ( bit_alloc_stages [ ch ] , 1 ) ; } s -> snr_offset [ ch ] = snr ; if ( ! s -> eac3 ) { int prev = s -> fast_gain [ ch ] ; s -> fast_gain [ ch ] = ff_ac3_fast_gain_tab [ get_bits ( gbc , 3 ) ] ; if ( blk && prev != s -> fast_gain [ ch ] ) bit_alloc_stages [ ch ] = FFMAX ( bit_alloc_stages [ ch ] , 2 ) ; } } } else if ( ! s -> eac3 && ! blk ) { av_log ( s -> avctx , AV_LOG_ERROR , "new snr offsets must be present in block 0\n" ) ; return - 1 ; } } if ( s -> fast_gain_syntax && get_bits1 ( gbc ) ) { for ( ch = ! cpl_in_use ; ch <= s -> channels ; ch ++ ) { int prev = s -> fast_gain [ ch ] ; s -> fast_gain [ ch ] = ff_ac3_fast_gain_tab [ get_bits ( gbc , 3 ) ] ; if ( blk && prev != s -> fast_gain [ ch ] ) bit_alloc_stages [ ch ] = FFMAX ( bit_alloc_stages [ ch ] , 2 ) ; } } else if ( s -> eac3 && ! blk ) { for ( ch = ! cpl_in_use ; ch <= s -> channels ; ch ++ ) s -> fast_gain [ ch ] = ff_ac3_fast_gain_tab [ 4 ] ; } if ( s -> frame_type == EAC3_FRAME_TYPE_INDEPENDENT && get_bits1 ( gbc ) ) { skip_bits ( gbc , 10 ) ; } if ( cpl_in_use ) { if ( s -> first_cpl_leak || get_bits1 ( gbc ) ) { int fl = get_bits ( gbc , 3 ) ; int sl = get_bits ( gbc , 3 ) ; if ( blk && ( fl != s -> bit_alloc_params . cpl_fast_leak || sl != s -> bit_alloc_params . cpl_slow_leak ) ) { bit_alloc_stages [ CPL_CH ] = FFMAX ( bit_alloc_stages [ CPL_CH ] , 2 ) ; } s -> bit_alloc_params . cpl_fast_leak = fl ; s -> bit_alloc_params . cpl_slow_leak = sl ; } else if ( ! s -> eac3 && ! blk ) { av_log ( s -> avctx , AV_LOG_ERROR , "new coupling leak info must " "be present in block 0\n" ) ; return - 1 ; } s -> first_cpl_leak = 0 ; } if ( s -> dba_syntax && get_bits1 ( gbc ) ) { for ( ch = ! cpl_in_use ; ch <= fbw_channels ; ch ++ ) { s -> dba_mode [ ch ] = get_bits ( gbc , 2 ) ; if ( s -> dba_mode [ ch ] == DBA_RESERVED ) { av_log ( s -> avctx , AV_LOG_ERROR , "delta bit allocation strategy reserved\n" ) ; return - 1 ; } bit_alloc_stages [ ch ] = FFMAX ( bit_alloc_stages [ ch ] , 2 ) ; } for ( ch = ! cpl_in_use ; ch <= fbw_channels ; ch ++ ) { if ( s -> dba_mode [ ch ] == DBA_NEW ) { s -> dba_nsegs [ ch ] = get_bits ( gbc , 3 ) + 1 ; for ( seg = 0 ; seg < s -> dba_nsegs [ ch ] ; seg ++ ) { s -> dba_offsets [ ch ] [ seg ] = get_bits ( gbc , 5 ) ; s -> dba_lengths [ ch ] [ seg ] = get_bits ( gbc , 4 ) ; s -> dba_values [ ch ] [ seg ] = get_bits ( gbc , 3 ) ; } bit_alloc_stages [ ch ] = FFMAX ( bit_alloc_stages [ ch ] , 2 ) ; } } } else if ( blk == 0 ) { for ( ch = 0 ; ch <= s -> channels ; ch ++ ) { s -> dba_mode [ ch ] = DBA_NONE ; } } for ( ch = ! cpl_in_use ; ch <= s -> channels ; ch ++ ) { if ( bit_alloc_stages [ ch ] > 2 ) { ff_ac3_bit_alloc_calc_psd ( s -> dexps [ ch ] , s -> start_freq [ ch ] , s -> end_freq [ ch ] , s -> psd [ ch ] , s -> band_psd [ ch ] ) ; } if ( bit_alloc_stages [ ch ] > 1 ) { if ( ff_ac3_bit_alloc_calc_mask ( & s -> bit_alloc_params , s -> band_psd [ ch ] , s -> start_freq [ ch ] , s -> end_freq [ ch ] , s -> fast_gain [ ch ] , ( ch == s -> lfe_ch ) , s -> dba_mode [ ch ] , s -> dba_nsegs [ ch ] , s -> dba_offsets [ ch ] , s -> dba_lengths [ ch ] , s -> dba_values [ ch ] , s -> mask [ ch ] ) ) { av_log ( s -> avctx , AV_LOG_ERROR , "error in bit allocation\n" ) ; return - 1 ; } } if ( bit_alloc_stages [ ch ] > 0 ) { const uint8_t * bap_tab = s -> channel_uses_aht [ ch ] ? ff_eac3_hebap_tab : ff_ac3_bap_tab ; s -> ac3dsp . bit_alloc_calc_bap ( s -> mask [ ch ] , s -> psd [ ch ] , s -> start_freq [ ch ] , s -> end_freq [ ch ] , s -> snr_offset [ ch ] , s -> bit_alloc_params . floor , bap_tab , s -> bap [ ch ] ) ; } } if ( s -> skip_syntax && get_bits1 ( gbc ) ) { int skipl = get_bits ( gbc , 9 ) ; while ( skipl -- ) skip_bits ( gbc , 8 ) ; } decode_transform_coeffs ( s , blk ) ; if ( s -> channel_mode == AC3_CHMODE_STEREO ) do_rematrixing ( s ) ; for ( ch = 1 ; ch <= s -> channels ; ch ++ ) { float gain = 1.0 / 4194304.0f ; if ( s -> channel_mode == AC3_CHMODE_DUALMONO ) { gain *= s -> dynamic_range [ 2 - ch ] ; } else { gain *= s -> dynamic_range [ 0 ] ; } s -> fmt_conv . int32_to_float_fmul_scalar ( s -> transform_coeffs [ ch ] , s -> fixed_coeffs [ ch ] , gain , 256 ) ; } if ( s -> spx_in_use && CONFIG_EAC3_DECODER ) { ff_eac3_apply_spectral_extension ( s ) ; } downmix_output = s -> channels != s -> out_channels && ! ( ( s -> output_mode & AC3_OUTPUT_LFEON ) && s -> fbw_channels == s -> out_channels ) ; if ( different_transforms ) { if ( s -> downmixed ) { s -> downmixed = 0 ; ac3_upmix_delay ( s ) ; } do_imdct ( s , s -> channels ) ; if ( downmix_output ) { s -> ac3dsp . downmix ( s -> outptr , s -> downmix_coeffs , s -> out_channels , s -> fbw_channels , 256 ) ; } } else { if ( downmix_output ) { s -> ac3dsp . downmix ( s -> xcfptr + 1 , s -> downmix_coeffs , s -> out_channels , s -> fbw_channels , 256 ) ; } if ( downmix_output && ! s -> downmixed ) { s -> downmixed = 1 ; s -> ac3dsp . downmix ( s -> dlyptr , s -> downmix_coeffs , s -> out_channels , s -> fbw_channels , 128 ) ; } do_imdct ( s , s -> out_channels ) ; } return 0 ; }
0False