idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
2,600
def total_marks ( self ) : total = 0 for answer in self . answers : for number , part in enumerate ( answer ) : if number > 0 : if part [ 2 ] > 0 : total += part [ 2 ] return total
Compute the total mark for the assessment .
2,601
def permute ( num ) : "Permutation for randomizing data order." if permute_data : return np . random . permutation ( num ) else : logging . warning ( "Warning not permuting data" ) return np . arange ( num )
Permutation for randomizing data order .
2,602
def discrete ( cats , name = 'discrete' ) : import json ks = list ( cats ) for key in ks : if isinstance ( key , bytes ) : cats [ key . decode ( 'utf-8' ) ] = cats . pop ( key ) return 'discrete(' + json . dumps ( [ cats , name ] ) + ')'
Return a class category that shows the encoding
2,603
def clear_cache ( dataset_name = None ) : dr = data_resources [ dataset_name ] if 'dirs' in dr : for dirs , files in zip ( dr [ 'dirs' ] , dr [ 'files' ] ) : for dir , file in zip ( dirs , files ) : path = os . path . join ( data_path , dataset_name , dir , file ) if os . path . exists ( path ) : logging . info ( "clear_cache: removing " + path ) os . unlink ( path ) for dir in dirs : path = os . path . join ( data_path , dataset_name , dir ) if os . path . exists ( path ) : logging . info ( "clear_cache: remove directory " + path ) os . rmdir ( path ) else : for file_list in dr [ 'files' ] : for file in file_list : path = os . path . join ( data_path , dataset_name , file ) if os . path . exists ( path ) : logging . info ( "clear_cache: remove " + path ) os . unlink ( path )
Remove a data set from the cache
2,604
def to_arff ( dataset , ** kwargs ) : pods_data = dataset ( ** kwargs ) vals = list ( kwargs . values ( ) ) for i , v in enumerate ( vals ) : if isinstance ( v , list ) : vals [ i ] = '|' . join ( v ) else : vals [ i ] = str ( v ) args = '_' . join ( vals ) n = dataset . __name__ if len ( args ) > 0 : n += '_' + args n = n . replace ( ' ' , '-' ) ks = pods_data . keys ( ) d = None if 'Y' in ks and 'X' in ks : d = pd . DataFrame ( pods_data [ 'X' ] ) if 'Xtest' in ks : d = d . append ( pd . DataFrame ( pods_data [ 'Xtest' ] ) , ignore_index = True ) if 'covariates' in ks : d . columns = pods_data [ 'covariates' ] dy = pd . DataFrame ( pods_data [ 'Y' ] ) if 'Ytest' in ks : dy = dy . append ( pd . DataFrame ( pods_data [ 'Ytest' ] ) , ignore_index = True ) if 'response' in ks : dy . columns = pods_data [ 'response' ] for c in dy . columns : if c not in d . columns : d [ c ] = dy [ c ] else : d [ 'y' + str ( c ) ] = dy [ c ] elif 'Y' in ks : d = pd . DataFrame ( pods_data [ 'Y' ] ) if 'Ytest' in ks : d = d . append ( pd . DataFrame ( pods_data [ 'Ytest' ] ) , ignore_index = True ) elif 'data' in ks : d = pd . DataFrame ( pods_data [ 'data' ] ) if d is not None : df2arff ( d , n , pods_data )
Take a pods data set and write it as an ARFF file
2,605
def epomeo_gpx ( data_set = 'epomeo_gpx' , sample_every = 4 ) : import gpxpy import gpxpy . gpx if not data_available ( data_set ) : download_data ( data_set ) files = [ 'endomondo_1' , 'endomondo_2' , 'garmin_watch_via_endomondo' , 'viewranger_phone' , 'viewranger_tablet' ] X = [ ] for file in files : gpx_file = open ( os . path . join ( data_path , 'epomeo_gpx' , file + '.gpx' ) , 'r' ) gpx = gpxpy . parse ( gpx_file ) segment = gpx . tracks [ 0 ] . segments [ 0 ] points = [ point for track in gpx . tracks for segment in track . segments for point in segment . points ] data = [ [ ( point . time - datetime . datetime ( 2013 , 8 , 21 ) ) . total_seconds ( ) , point . latitude , point . longitude , point . elevation ] for point in points ] X . append ( np . asarray ( data ) [ : : sample_every , : ] ) gpx_file . close ( ) if pandas_available : X = pd . DataFrame ( X [ 0 ] , columns = [ 'seconds' , 'latitude' , 'longitude' , 'elevation' ] ) X . set_index ( keys = 'seconds' , inplace = True ) return data_details_return ( { 'X' : X , 'info' : 'Data is an array containing time in seconds, latitude, longitude and elevation in that order.' } , data_set )
Data set of three GPS traces of the same movement on Mt Epomeo in Ischia . Requires gpxpy to run .
2,606
def pmlr ( volumes = 'all' , data_set = 'pmlr' ) : if not data_available ( data_set ) : download_data ( data_set ) proceedings_file = open ( os . path . join ( data_path , data_set , 'proceedings.yaml' ) , 'r' ) import yaml proceedings = yaml . load ( proceedings_file ) data_name_full = 'pmlr_volumes' data_resources [ data_name_full ] = data_resources [ data_set ] . copy ( ) data_resources [ data_name_full ] [ 'files' ] = [ ] data_resources [ data_name_full ] [ 'dirs' ] = [ ] data_resources [ data_name_full ] [ 'urls' ] = [ ] for entry in proceedings : if volumes == 'all' or entry [ 'volume' ] in volumes : file = entry [ 'yaml' ] . split ( '/' ) [ - 1 ] dir = 'v' + str ( entry [ 'volume' ] ) data_resources [ data_name_full ] [ 'files' ] . append ( [ file ] ) data_resources [ data_name_full ] [ 'dirs' ] . append ( [ dir ] ) data_resources [ data_name_full ] [ 'urls' ] . append ( data_resources [ data_set ] [ 'urls' ] [ 0 ] ) Y = [ ] if not data_available ( data_name_full ) : download_data ( data_name_full ) for entry in reversed ( proceedings ) : volume = entry [ 'volume' ] if volumes == 'all' or volume in volumes : file = entry [ 'yaml' ] . split ( '/' ) [ - 1 ] volume_file = open ( os . path . join ( data_path , data_name_full , 'v' + str ( volume ) , file ) , 'r' ) Y += yaml . load ( volume_file ) if pandas_available : Y = pd . DataFrame ( Y ) Y [ 'published' ] = pd . to_datetime ( Y [ 'published' ] ) Y [ 'issued' ] = Y [ 'issued' ] . apply ( lambda x : np . datetime64 ( datetime . datetime ( * x [ 'date-parts' ] ) ) ) Y [ 'author' ] = Y [ 'author' ] . apply ( lambda x : [ str ( author [ 'given' ] ) + ' ' + str ( author [ 'family' ] ) for author in x ] ) Y [ 'editor' ] = Y [ 'editor' ] . apply ( lambda x : [ str ( editor [ 'given' ] ) + ' ' + str ( editor [ 'family' ] ) for editor in x ] ) columns = list ( Y . columns ) columns [ 14 ] = datetime64_ ( 'published' ) columns [ 11 ] = datetime64_ ( 'issued' ) Y . columns = columns return data_details_return ( { 'Y' : Y , 'info' : 'Data is a pandas data frame containing each paper, its abstract, authors, volumes and venue.' } , data_set )
Abstracts from the Proceedings of Machine Learning Research
2,607
def lee_yeast_ChIP ( data_set = 'lee_yeast_ChIP' ) : if not data_available ( data_set ) : download_data ( data_set ) from pandas import read_csv dir_path = os . path . join ( data_path , data_set ) filename = os . path . join ( dir_path , 'binding_by_gene.tsv' ) S = read_csv ( filename , header = 1 , index_col = 0 , sep = '\t' ) transcription_factors = [ col for col in S . columns if col [ : 7 ] != 'Unnamed' ] annotations = S [ [ 'Unnamed: 1' , 'Unnamed: 2' , 'Unnamed: 3' ] ] S = S [ transcription_factors ] return data_details_return ( { 'annotations' : annotations , 'Y' : S , 'transcription_factors' : transcription_factors } , data_set )
Yeast ChIP data from Lee et al .
2,608
def osu_run1 ( data_set = 'osu_run1' , sample_every = 4 ) : path = os . path . join ( data_path , data_set ) if not data_available ( data_set ) : import zipfile download_data ( data_set ) zip = zipfile . ZipFile ( os . path . join ( data_path , data_set , 'run1TXT.ZIP' ) , 'r' ) for name in zip . namelist ( ) : zip . extract ( name , path ) from . import mocap Y , connect = mocap . load_text_data ( 'Aug210106' , path ) Y = Y [ 0 : - 1 : sample_every , : ] return data_details_return ( { 'Y' : Y , 'connect' : connect } , data_set )
Ohio State University s Run1 motion capture data set .
2,609
def toy_linear_1d_classification ( seed = default_seed ) : def sample_class ( f ) : p = 1. / ( 1. + np . exp ( - f ) ) c = np . random . binomial ( 1 , p ) c = np . where ( c , 1 , - 1 ) return c np . random . seed ( seed = seed ) x1 = np . random . normal ( - 3 , 5 , 20 ) x2 = np . random . normal ( 3 , 5 , 20 ) X = ( np . r_ [ x1 , x2 ] ) [ : , None ] return { 'X' : X , 'Y' : sample_class ( 2. * X ) , 'F' : 2. * X , 'covariates' : [ 'X' ] , 'response' : [ discrete ( { 'positive' : 1 , 'negative' : - 1 } ) ] , 'seed' : seed }
Simple classification data in one dimension for illustrating models .
2,610
def airline_delay ( data_set = 'airline_delay' , num_train = 700000 , num_test = 100000 , seed = default_seed ) : if not data_available ( data_set ) : download_data ( data_set ) dir_path = os . path . join ( data_path , data_set ) filename = os . path . join ( dir_path , 'filtered_data.pickle' ) import pandas as pd data = pd . read_pickle ( filename ) data . pop ( 'Year' ) Yall = data . pop ( 'ArrDelay' ) . values [ : , None ] Xall = data . values all_data = num_train + num_test Xall = Xall [ : all_data ] Yall = Yall [ : all_data ] np . random . seed ( seed = seed ) N_shuffled = permute ( Yall . shape [ 0 ] ) train , test = N_shuffled [ num_test : ] , N_shuffled [ : num_test ] X , Y = Xall [ train ] , Yall [ train ] Xtest , Ytest = Xall [ test ] , Yall [ test ] covariates = [ 'month' , 'day of month' , 'day of week' , 'departure time' , 'arrival time' , 'air time' , 'distance to travel' , 'age of aircraft / years' ] response = [ 'delay' ] return data_details_return ( { 'X' : X , 'Y' : Y , 'Xtest' : Xtest , 'Ytest' : Ytest , 'seed' : seed , 'info' : "Airline delay data used for demonstrating Gaussian processes for big data." , 'covariates' : covariates , 'response' : response } , data_set )
Airline delay data used in Gaussian Processes for Big Data by Hensman Fusi and Lawrence
2,611
def olympic_sprints ( data_set = 'rogers_girolami_data' ) : X = np . zeros ( ( 0 , 2 ) ) Y = np . zeros ( ( 0 , 1 ) ) cats = { } for i , dataset in enumerate ( [ olympic_100m_men , olympic_100m_women , olympic_200m_men , olympic_200m_women , olympic_400m_men , olympic_400m_women ] ) : data = dataset ( ) year = data [ 'X' ] time = data [ 'Y' ] X = np . vstack ( ( X , np . hstack ( ( year , np . ones_like ( year ) * i ) ) ) ) Y = np . vstack ( ( Y , time ) ) cats [ dataset . __name__ ] = i data [ 'X' ] = X data [ 'Y' ] = Y data [ 'info' ] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning." return data_details_return ( { 'X' : X , 'Y' : Y , 'covariates' : [ decimalyear ( 'year' , '%Y' ) , discrete ( cats , 'event' ) ] , 'response' : [ 'time' ] , 'info' : "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning." , 'output_info' : { 0 : '100m Men' , 1 : '100m Women' , 2 : '200m Men' , 3 : '200m Women' , 4 : '400m Men' , 5 : '400m Women' } } , data_set )
All olympics sprint winning times for multiple output prediction .
2,612
def movielens100k ( data_set = 'movielens100k' ) : if not data_available ( data_set ) : import zipfile download_data ( data_set ) dir_path = os . path . join ( data_path , data_set ) zip = zipfile . ZipFile ( os . path . join ( dir_path , 'ml-100k.zip' ) , 'r' ) for name in zip . namelist ( ) : zip . extract ( name , dir_path ) import pandas as pd encoding = 'latin-1' movie_path = os . path . join ( data_path , 'movielens100k' , 'ml-100k' ) items = pd . read_csv ( os . path . join ( movie_path , 'u.item' ) , index_col = 'index' , header = None , sep = '|' , names = [ 'index' , 'title' , 'date' , 'empty' , 'imdb_url' , 'unknown' , 'Action' , 'Adventure' , 'Animation' , 'Children' 's' , 'Comedy' , 'Crime' , 'Documentary' , 'Drama' , 'Fantasy' , 'Film-Noir' , 'Horror' , 'Musical' , 'Mystery' , 'Romance' , 'Sci-Fi' , 'Thriller' , 'War' , 'Western' ] , encoding = encoding ) users = pd . read_csv ( os . path . join ( movie_path , 'u.user' ) , index_col = 'index' , header = None , sep = '|' , names = [ 'index' , 'age' , 'sex' , 'job' , 'id' ] , encoding = encoding ) parts = [ 'u1.base' , 'u1.test' , 'u2.base' , 'u2.test' , 'u3.base' , 'u3.test' , 'u4.base' , 'u4.test' , 'u5.base' , 'u5.test' , 'ua.base' , 'ua.test' , 'ub.base' , 'ub.test' ] ratings = [ ] for part in parts : rate_part = pd . read_csv ( os . path . join ( movie_path , part ) , index_col = 'index' , header = None , sep = '\t' , names = [ 'user' , 'item' , 'rating' , 'index' ] , encoding = encoding ) rate_part [ 'split' ] = part ratings . append ( rate_part ) Y = pd . concat ( ratings ) return data_details_return ( { 'Y' : Y , 'film_info' : items , 'user_info' : users , 'info' : 'The Movielens 100k data' } , data_set )
Data set of movie ratings collected by the University of Minnesota and cleaned up for use .
2,613
def ceres ( data_set = 'ceres' ) : if not data_available ( data_set ) : download_data ( data_set ) import pandas as pd data = pd . read_csv ( os . path . join ( data_path , data_set , 'ceresData.txt' ) , index_col = 'Tag' , header = None , sep = '\t' , names = [ 'Tag' , 'Mittlere Sonnenzeit' , 'Gerade Aufstig in Zeit' , 'Gerade Aufstiegung in Graden' , 'Nordlich Abweich' , 'Geocentrische Laenger' , 'Geocentrische Breite' , 'Ort der Sonne + 20" Aberration' , 'Logar. d. Distanz' ] , parse_dates = True , dayfirst = False ) return data_details_return ( { 'data' : data } , data_set )
Twenty two observations of the Dwarf planet Ceres as observed by Giueseppe Piazzi and published in the September edition of Monatlicher Correspondenz in 1801 . These were the measurements used by Gauss to fit a model of the planets orbit through which the planet was recovered three months later .
2,614
def access_elementusers ( self , elementuser_id , access_id = None , tenant_id = None , api_version = "v2.0" ) : if tenant_id is None and self . _parent_class . tenant_id : tenant_id = self . _parent_class . tenant_id elif not tenant_id : raise TypeError ( "tenant_id is required but not set or cached." ) cur_ctlr = self . _parent_class . controller if not access_id : url = str ( cur_ctlr ) + "/{}/api/tenants/{}/elementusers/{}/access" . format ( api_version , tenant_id , elementuser_id ) else : url = str ( cur_ctlr ) + "/{}/api/tenants/{}/elementusers/{}/access/{}" . format ( api_version , tenant_id , elementuser_id , access_id ) api_logger . debug ( "URL = %s" , url ) return self . _parent_class . rest_call ( url , "get" )
Get all accesses for a particular user
2,615
def logout ( self , api_version = "v2.0" ) : cur_ctlr = self . _parent_class . controller url = str ( cur_ctlr ) + "/{}/api/logout" . format ( api_version ) api_logger . debug ( "URL = %s" , url ) return self . _parent_class . rest_call ( url , "get" )
Logout current session
2,616
def use_token ( self , token = None ) : api_logger . info ( 'use_token function:' ) if not isinstance ( token , ( text_type , binary_type ) ) : api_logger . debug ( '"token" was not a text-style string: {}' . format ( text_type ( token ) ) ) return False session = self . _parent_class . expose_session ( ) session . cookies . clear ( ) self . _parent_class . add_headers ( { 'X-Auth-Token' : token } ) if self . interactive_update_profile_vars ( ) : if self . _parent_class . tenant_id : if self . interactive_tenant_update_vars ( ) : if self . _parent_class . is_esp : choose_status , chosen_client_id = self . interactive_client_choice ( ) if choose_status : clogin_resp = self . _parent_class . post . login_clients ( chosen_client_id , { } ) if clogin_resp . cgx_status : c_profile = self . interactive_update_profile_vars ( ) t_profile = self . interactive_tenant_update_vars ( ) if c_profile and t_profile : self . _parent_class . _password = None return True else : if t_profile : print ( "ESP Client Tenant detail retrieval failed." ) self . _parent_class . email = None self . _parent_class . _password = None return False else : print ( "ESP Client Login failed." ) self . _parent_class . email = None self . _parent_class . _password = None return False else : print ( "ESP Client Choice failed." ) self . _parent_class . email = None self . _parent_class . _password = None return False self . _parent_class . _password = None return True else : print ( "Tenant detail retrieval failed." ) self . _parent_class . email = None self . _parent_class . _password = None return False else : self . _parent_class . email = None self . _parent_class . _password = None return False api_logger . info ( "EMAIL = %s" , self . _parent_class . email ) api_logger . info ( "USER_ID = %s" , self . _parent_class . _user_id ) api_logger . info ( "USER ROLES = %s" , json . dumps ( self . _parent_class . roles ) ) api_logger . info ( "TENANT_ID = %s" , self . _parent_class . tenant_id ) api_logger . info ( "TENANT_NAME = %s" , self . _parent_class . tenant_name ) api_logger . info ( "TOKEN_SESSION = %s" , self . _parent_class . token_session ) return True
Function to use static AUTH_TOKEN as auth for the constructor instead of full login process .
2,617
def interactive_tenant_update_vars ( self ) : api_logger . info ( 'interactive_tenant_update_vars function:' ) tenant_resp = self . _parent_class . get . tenants ( self . _parent_class . tenant_id ) status = tenant_resp . cgx_status tenant_dict = tenant_resp . cgx_content if status : api_logger . debug ( "new tenant_dict: %s" , tenant_dict ) self . _parent_class . tenant_name = tenant_dict . get ( 'name' , self . _parent_class . tenant_id ) self . _parent_class . is_esp = tenant_dict . get ( 'is_esp' ) address_lookup = tenant_dict . get ( 'address' , None ) if address_lookup : tenant_address = address_lookup . get ( 'street' , "" ) + ", " tenant_address += ( str ( address_lookup . get ( 'street2' , "" ) ) + ", " ) tenant_address += ( str ( address_lookup . get ( 'city' , "" ) ) + ", " ) tenant_address += ( str ( address_lookup . get ( 'state' , "" ) ) + ", " ) tenant_address += ( str ( address_lookup . get ( 'post_code' , "" ) ) + ", " ) tenant_address += ( str ( address_lookup . get ( 'country' , "" ) ) + ", " ) else : tenant_address = "Unknown" self . _parent_class . address = tenant_address return True else : return False
Function to update the cloudgenix . API object with tenant login info . Run after login or client login .
2,618
def interactive_update_profile_vars ( self ) : profile = self . _parent_class . get . profile ( ) if profile . cgx_status : self . _parent_class . tenant_id = profile . cgx_content . get ( 'tenant_id' ) self . _parent_class . email = profile . cgx_content . get ( 'email' ) self . _parent_class . _user_id = profile . cgx_content . get ( 'id' ) self . _parent_class . roles = profile . cgx_content . get ( 'roles' , [ ] ) self . _parent_class . token_session = profile . cgx_content . get ( 'token_session' ) return True else : print ( "Profile retrieval failed." ) self . _parent_class . _password = None return False
Function to update the cloudgenix . API object with profile info . Run after login or client login .
2,619
def quick_menu ( self , banner , list_line_format , choice_list ) : invalid = True menu_int = - 1 while invalid : print ( banner ) for item_index , item_value in enumerate ( choice_list ) : print ( list_line_format . format ( item_index + 1 , * item_value ) ) menu_choice = compat_input ( "\nChoose a Number or (Q)uit: " ) if str ( menu_choice ) . lower ( ) in [ 'q' ] : print ( "Exiting.." ) self . _parent_class . get . logout ( ) sys . exit ( 0 ) try : menu_int = int ( menu_choice ) sanity = True except ValueError : print ( "ERROR: " , menu_choice ) sanity = False if sanity and 1 <= menu_int <= len ( choice_list ) : invalid = False else : print ( "Invalid input, needs to be between 1 and {0}.\n" . format ( len ( choice_list ) ) ) return choice_list [ int ( menu_int ) - 1 ]
Function to display a quick menu for user input
2,620
def check_sso_login ( self , operator_email , request_id ) : data = { "email" : operator_email , "requestId" : request_id } api_logger . info ( 'check_sso_login function:' ) response = self . _parent_class . post . login ( data = data ) if not response . cgx_content . get ( 'x_auth_token' ) : return response auth_region = self . _parent_class . parse_region ( response ) self . _parent_class . update_region_to_controller ( auth_region ) self . _parent_class . reparse_login_cookie_after_region_update ( response ) return response
Login to the CloudGenix API and see if SAML SSO has occurred . This function is used to check and see if SAML SSO has succeeded while waiting .
2,621
def quick_confirm ( prompt , default_value ) : valid = False value = default_value . lower ( ) while not valid : input_val = compat_input ( prompt + "[{0}]: " . format ( default_value ) ) if input_val == "" : value = default_value . lower ( ) valid = True else : try : if input_val . lower ( ) in [ 'y' , 'n' ] : value = input_val . lower ( ) valid = True else : print ( "ERROR: enter 'Y' or 'N'." ) valid = False except ValueError : print ( "ERROR: enter 'Y' or 'N'." ) valid = False return value
Function to display a quick confirmation for user input
2,622
def quick_int_input ( prompt , default_value , min_val = 1 , max_val = 30 ) : valid = False num_val = default_value while not valid : input_val = compat_input ( prompt + "[{0}]: " . format ( default_value ) ) if input_val == "" : num_val = default_value valid = True else : try : num_val = int ( input_val ) if min_val <= num_val <= max_val : valid = True else : print ( "ERROR: must be between {0} and {1}." . format ( min , max ) ) valid = False except ValueError : print ( "ERROR: must be a number." ) valid = False return num_val
Function to display a quick question for integer user input
2,623
def quick_str_input ( prompt , default_value ) : valid = False str_val = default_value while not valid : input_val = raw_input ( prompt + "[{0}]: " . format ( default_value ) ) if input_val == "" : str_val = default_value valid = True else : try : str_val = text_type ( input_val ) valid = True except ValueError : print ( "ERROR: must be text." ) valid = False return str_val
Function to display a quick question for text input .
2,624
def tran_hash ( self , a , b , c , n ) : return ( ( ( TRAN [ ( a + n ) & 255 ] ^ TRAN [ b ] * ( n + n + 1 ) ) + TRAN [ ( c ) ^ TRAN [ n ] ] ) & 255 )
implementation of the tran53 hash function
2,625
def process ( self , chunk ) : self . _digest = None if isinstance ( chunk , text_type ) : chunk = chunk . encode ( 'utf-8' ) for char in chunk : self . num_char += 1 if PY3 : c = char else : c = ord ( char ) if len ( self . window ) > 1 : self . acc [ self . tran_hash ( c , self . window [ 0 ] , self . window [ 1 ] , 0 ) ] += 1 if len ( self . window ) > 2 : self . acc [ self . tran_hash ( c , self . window [ 0 ] , self . window [ 2 ] , 1 ) ] += 1 self . acc [ self . tran_hash ( c , self . window [ 1 ] , self . window [ 2 ] , 2 ) ] += 1 if len ( self . window ) > 3 : self . acc [ self . tran_hash ( c , self . window [ 0 ] , self . window [ 3 ] , 3 ) ] += 1 self . acc [ self . tran_hash ( c , self . window [ 1 ] , self . window [ 3 ] , 4 ) ] += 1 self . acc [ self . tran_hash ( c , self . window [ 2 ] , self . window [ 3 ] , 5 ) ] += 1 self . acc [ self . tran_hash ( self . window [ 3 ] , self . window [ 0 ] , c , 6 ) ] += 1 self . acc [ self . tran_hash ( self . window [ 3 ] , self . window [ 2 ] , c , 7 ) ] += 1 if len ( self . window ) < 4 : self . window = [ c ] + self . window else : self . window = [ c ] + self . window [ : 3 ]
computes the hash of all of the trigrams in the chunk using a window of length 5
2,626
def from_file ( self , fname ) : f = open ( fname , "rb" ) data = f . read ( ) self . update ( data ) f . close ( )
read in a file and compute digest
2,627
def compare ( self , digest_2 , is_hex = False ) : if is_hex : digest_2 = convert_hex_to_ints ( digest_2 ) bit_diff = 0 for i in range ( len ( self . digest ) ) : bit_diff += POPC [ self . digest [ i ] ^ digest_2 [ i ] ] return 128 - bit_diff
returns difference between the nilsimsa digests between the current object and a given digest
2,628
def tenant_forgot_password_login ( self , data , tenant_id = None , api_version = "v2.0" ) : if tenant_id is None and self . _parent_class . tenant_id : tenant_id = self . _parent_class . tenant_id elif not tenant_id : raise TypeError ( "tenant_id is required but not set or cached." ) cur_ctlr = self . _parent_class . controller url = str ( cur_ctlr ) + "/{}/api/tenants/{}/login/password/forgot" . format ( api_version , tenant_id ) api_logger . debug ( "URL = %s" , url ) return self . _parent_class . rest_call ( url , "post" , data = data , sensitive = True )
Forgot password API
2,629
def is_valid_file ( parser , arg ) : if not os . path . exists ( arg ) : parser . error ( "File %s not found" % arg ) else : return arg
verify the validity of the given file . Never trust the End - User
2,630
def getID ( code_file ) : json_path = ghostfolder + '/' + json_file if os . path . exists ( json_path ) : pass else : download_file ( 'https://ghostbin.com/languages.json' ) lang = detect_lang ( code_file ) json_data = json . load ( file ( json_path ) ) ID = '' for i in range ( len ( json_data ) ) : temp = len ( json_data [ i ] [ 'languages' ] ) for j in range ( temp ) : if json_data [ i ] [ 'languages' ] [ j ] [ 'name' ] . lower ( ) == lang . lower ( ) : ID = json_data [ i ] [ 'languages' ] [ j ] [ 'id' ] print ( 'Gotten language ID from \'languages.json\': {0}' . format ( ID ) ) return ID
Get the language ID of the input file language
2,631
def detect_lang ( path ) : blob = FileBlob ( path , os . getcwd ( ) ) if blob . is_text : print ( 'Programming language of the file detected: {0}' . format ( blob . language . name ) ) return blob . language . name else : print ( 'File not a text file. Exiting...' ) sys . exit ( )
Detect the language used in the given file .
2,632
def screenshot ( self , scale = None , quality = None ) : output_dir = BuiltIn ( ) . get_variable_value ( '${OUTPUTDIR}' ) ts = time . time ( ) st = datetime . datetime . fromtimestamp ( ts ) . strftime ( '%Y%m%d%H%M%S' ) screenshot_path = '%s%s%s.png' % ( output_dir , os . sep , st ) self . device . screenshot ( screenshot_path , scale , quality ) logger . info ( '\n<a href="%s">%s</a><br><img src="%s">' % ( screenshot_path , st , screenshot_path ) , html = True )
Take a screenshot of device and log in the report with timestamp scale for screenshot size and quality for screenshot quality default scale = 1 . 0 quality = 100
2,633
def call ( self , obj , method , * args , ** selectors ) : func = getattr ( obj , method ) return func ( ** selectors )
This keyword can use object method from original python uiautomator
2,634
def merge_sims ( oldsims , newsims , clip = None ) : if oldsims is None : result = newsims or [ ] elif newsims is None : result = oldsims else : result = sorted ( oldsims + newsims , key = lambda item : - item [ 1 ] ) if clip is not None : result = result [ : clip ] return result
Merge two precomputed similarity lists truncating the result to clip most similar items .
2,635
def terminate ( self ) : try : self . id2sims . terminate ( ) except : pass import glob for fname in glob . glob ( self . fname + '*' ) : try : os . remove ( fname ) logger . info ( "deleted %s" % fname ) except Exception , e : logger . warning ( "failed to delete %s: %s" % ( fname , e ) ) for val in self . __dict__ . keys ( ) : try : delattr ( self , val ) except : pass
Delete all files created by this index invalidating self . Use with care .
2,636
def update_ids ( self , docids ) : logger . info ( "updating %i id mappings" % len ( docids ) ) for docid in docids : if docid is not None : pos = self . id2pos . get ( docid , None ) if pos is not None : logger . info ( "replacing existing document %r in %s" % ( docid , self ) ) del self . pos2id [ pos ] self . id2pos [ docid ] = self . length try : del self . id2sims [ docid ] except : pass self . length += 1 self . id2sims . sync ( ) self . update_mappings ( )
Update id - > pos mapping with new document ids .
2,637
def vec_by_id ( self , docid ) : pos = self . id2pos [ docid ] return self . qindex . vector_by_id ( pos )
Return indexed vector corresponding to document docid .
2,638
def merge ( self , other ) : other . qindex . normalize , other . qindex . num_best = False , self . topsims logger . info ( "updating old precomputed values" ) pos , lenself = 0 , len ( self . qindex ) for chunk in self . qindex . iter_chunks ( ) : for sims in other . qindex [ chunk ] : if pos in self . pos2id : docid = self . pos2id [ pos ] sims = self . sims2scores ( sims ) self . id2sims [ docid ] = merge_sims ( self . id2sims [ docid ] , sims , self . topsims ) pos += 1 if pos % 10000 == 0 : logger . info ( "PROGRESS: updated doc #%i/%i" % ( pos , lenself ) ) self . id2sims . sync ( ) logger . info ( "merging fresh index into optimized one" ) pos , docids = 0 , [ ] for chunk in other . qindex . iter_chunks ( ) : for vec in chunk : if pos in other . pos2id : self . qindex . add_documents ( [ vec ] ) docids . append ( other . pos2id [ pos ] ) pos += 1 self . qindex . save ( ) self . update_ids ( docids ) logger . info ( "precomputing most similar for the fresh index" ) pos , lenother = 0 , len ( other . qindex ) norm , self . qindex . normalize = self . qindex . normalize , False topsims , self . qindex . num_best = self . qindex . num_best , self . topsims for chunk in other . qindex . iter_chunks ( ) : for sims in self . qindex [ chunk ] : if pos in other . pos2id : docid = other . pos2id [ pos ] self . id2sims [ docid ] = self . sims2scores ( sims ) pos += 1 if pos % 10000 == 0 : logger . info ( "PROGRESS: precomputed doc #%i/%i" % ( pos , lenother ) ) self . qindex . normalize , self . qindex . num_best = norm , topsims self . id2sims . sync ( )
Merge documents from the other index . Update precomputed similarities in the process .
2,639
def doc2vec ( self , doc ) : bow = self . dictionary . doc2bow ( doc [ 'tokens' ] ) if self . method == 'lsi' : return self . lsi [ self . tfidf [ bow ] ] elif self . method == 'lda' : return self . lda [ bow ] elif self . method == 'lda_tfidf' : return self . lda [ self . tfidf [ bow ] ] elif self . method == 'logentropy' : return self . logent [ bow ]
Convert a single SimilarityDocument to vector .
2,640
def flush ( self , save_index = False , save_model = False , clear_buffer = False ) : if save_index : if self . fresh_index is not None : self . fresh_index . save ( self . location ( 'index_fresh' ) ) if self . opt_index is not None : self . opt_index . save ( self . location ( 'index_opt' ) ) if save_model : if self . model is not None : self . model . save ( self . location ( 'model' ) ) self . payload . commit ( ) if clear_buffer : if hasattr ( self , 'fresh_docs' ) : try : self . fresh_docs . terminate ( ) except : pass self . fresh_docs = SqliteDict ( journal_mode = JOURNAL_MODE ) self . fresh_docs . sync ( )
Commit all changes clear all caches .
2,641
def close ( self ) : try : self . payload . close ( ) except : pass try : self . model . close ( ) except : pass try : self . fresh_index . close ( ) except : pass try : self . opt_index . close ( ) except : pass try : self . fresh_docs . terminate ( ) except : pass
Explicitly close open file handles databases etc .
2,642
def train ( self , corpus = None , method = 'auto' , clear_buffer = True , params = None ) : if corpus is not None : self . flush ( clear_buffer = True ) self . buffer ( corpus ) if not self . fresh_docs : msg = "train called but no training corpus specified for %s" % self logger . error ( msg ) raise ValueError ( msg ) if method == 'auto' : numdocs = len ( self . fresh_docs ) if numdocs < 1000 : logging . warning ( "too few training documents; using simple log-entropy model instead of latent semantic indexing" ) method = 'logentropy' else : method = 'lsi' if params is None : params = { } self . model = SimModel ( self . fresh_docs , method = method , params = params ) self . flush ( save_model = True , clear_buffer = clear_buffer )
Create an indexing model . Will overwrite the model if it already exists . All indexes become invalid because documents in them use a now - obsolete representation .
2,643
def index ( self , corpus = None , clear_buffer = True ) : if not self . model : msg = 'must initialize model for %s before indexing documents' % self . basename logger . error ( msg ) raise AttributeError ( msg ) if corpus is not None : self . flush ( clear_buffer = True ) self . buffer ( corpus ) if not self . fresh_docs : msg = "index called but no indexing corpus specified for %s" % self logger . error ( msg ) raise ValueError ( msg ) if not self . fresh_index : logger . info ( "starting a new fresh index for %s" % self ) self . fresh_index = SimIndex ( self . location ( 'index_fresh' ) , self . model . num_features ) self . fresh_index . index_documents ( self . fresh_docs , self . model ) if self . opt_index is not None : self . opt_index . delete ( self . fresh_docs . keys ( ) ) logger . info ( "storing document payloads" ) for docid in self . fresh_docs : payload = self . fresh_docs [ docid ] . get ( 'payload' , None ) if payload is None : break self . payload [ docid ] = payload self . flush ( save_index = True , clear_buffer = clear_buffer )
Permanently index all documents previously added via buffer or directly index documents from corpus if specified .
2,644
def drop_index ( self , keep_model = True ) : modelstr = "" if keep_model else "and model " logger . info ( "deleting similarity index " + modelstr + "from %s" % self . basename ) for index in [ self . fresh_index , self . opt_index ] : if index is not None : index . terminate ( ) self . fresh_index , self . opt_index = None , None if self . payload is not None : self . payload . close ( ) fname = self . location ( 'payload' ) try : if os . path . exists ( fname ) : os . remove ( fname ) logger . info ( "deleted %s" % fname ) except Exception , e : logger . warning ( "failed to delete %s" % fname ) self . payload = SqliteDict ( self . location ( 'payload' ) , autocommit = True , journal_mode = JOURNAL_MODE ) if not keep_model and self . model is not None : self . model . close ( ) fname = self . location ( 'model' ) try : if os . path . exists ( fname ) : os . remove ( fname ) logger . info ( "deleted %s" % fname ) except Exception , e : logger . warning ( "failed to delete %s" % fname ) self . model = None self . flush ( save_index = True , save_model = True , clear_buffer = True )
Drop all indexed documents . If keep_model is False also dropped the model .
2,645
def delete ( self , docids ) : logger . info ( "asked to drop %i documents" % len ( docids ) ) for index in [ self . opt_index , self . fresh_index ] : if index is not None : index . delete ( docids ) self . flush ( save_index = True )
Delete specified documents from the index .
2,646
def find_similar ( self , doc , min_score = 0.0 , max_results = 100 ) : logger . debug ( "received query call with %r" % doc ) if self . is_locked ( ) : msg = "cannot query while the server is being updated" logger . error ( msg ) raise RuntimeError ( msg ) sims_opt , sims_fresh = None , None for index in [ self . fresh_index , self . opt_index ] : if index is not None : index . topsims = max_results if isinstance ( doc , basestring ) : docid = doc if self . opt_index is not None and docid in self . opt_index : sims_opt = self . opt_index . sims_by_id ( docid ) if self . fresh_index is not None : vec = self . opt_index . vec_by_id ( docid ) sims_fresh = self . fresh_index . sims_by_vec ( vec , normalize = False ) elif self . fresh_index is not None and docid in self . fresh_index : sims_fresh = self . fresh_index . sims_by_id ( docid ) if self . opt_index is not None : vec = self . fresh_index . vec_by_id ( docid ) sims_opt = self . opt_index . sims_by_vec ( vec , normalize = False ) else : raise ValueError ( "document %r not in index" % docid ) else : if 'topics' in doc : vec = gensim . matutils . any2sparse ( doc [ 'topics' ] ) else : vec = self . model . doc2vec ( doc ) if self . opt_index is not None : sims_opt = self . opt_index . sims_by_vec ( vec ) if self . fresh_index is not None : sims_fresh = self . fresh_index . sims_by_vec ( vec ) merged = merge_sims ( sims_opt , sims_fresh ) logger . debug ( "got %s raw similars, pruning with max_results=%s, min_score=%s" % ( len ( merged ) , max_results , min_score ) ) result = [ ] for docid , score in merged : if score < min_score or 0 < max_results <= len ( result ) : break result . append ( ( docid , float ( score ) , self . payload . get ( docid , None ) ) ) return result
Find max_results most similar articles in the index each having similarity score of at least min_score . The resulting list may be shorter than max_results in case there are not enough matching documents .
2,647
def keys ( self ) : result = [ ] if self . fresh_index is not None : result += self . fresh_index . keys ( ) if self . opt_index is not None : result += self . opt_index . keys ( ) return result
Return ids of all indexed documents .
2,648
def check_session ( self ) : if self . session is None : if self . autosession : self . open_session ( ) else : msg = "must open a session before modifying %s" % self raise RuntimeError ( msg )
Make sure a session is open .
2,649
def open_session ( self ) : if self . session is not None : msg = "session already open; commit it or rollback before opening another one in %s" % self logger . error ( msg ) raise RuntimeError ( msg ) logger . info ( "opening a new session" ) logger . info ( "removing %s" % self . loc_session ) try : shutil . rmtree ( self . loc_session ) except : logger . info ( "failed to delete %s" % self . loc_session ) logger . info ( "cloning server from %s to %s" % ( self . loc_stable , self . loc_session ) ) shutil . copytree ( self . loc_stable , self . loc_session ) self . session = SimServer ( self . loc_session , use_locks = self . use_locks ) self . lock_update . acquire ( )
Open a new session to modify this server .
2,650
def buffer ( self , * args , ** kwargs ) : self . check_session ( ) result = self . session . buffer ( * args , ** kwargs ) return result
Buffer documents in the current session
2,651
def index ( self , * args , ** kwargs ) : self . check_session ( ) result = self . session . index ( * args , ** kwargs ) if self . autosession : self . commit ( ) return result
Index documents in the current session
2,652
def drop_index ( self , keep_model = True ) : self . check_session ( ) result = self . session . drop_index ( keep_model ) if self . autosession : self . commit ( ) return result
Drop all indexed documents from the session . Optionally drop model too .
2,653
def delete ( self , docids ) : self . check_session ( ) result = self . session . delete ( docids ) if self . autosession : self . commit ( ) return result
Delete documents from the current session .
2,654
def optimize ( self ) : self . check_session ( ) result = self . session . optimize ( ) if self . autosession : self . commit ( ) return result
Optimize index for faster by - document - id queries .
2,655
def commit ( self ) : if self . session is not None : logger . info ( "committing transaction in %s" % self ) tmp = self . stable self . stable , self . session = self . session , None self . istable = 1 - self . istable self . write_istable ( ) tmp . close ( ) self . lock_update . release ( ) else : logger . warning ( "commit called but there's no open session in %s" % self )
Commit changes made by the latest session .
2,656
def terminate ( self ) : logger . info ( "deleting entire server %s" % self ) self . close ( ) try : shutil . rmtree ( self . basedir ) logger . info ( "deleted server under %s" % self . basedir ) for val in self . __dict__ . keys ( ) : try : delattr ( self , val ) except : pass except Exception , e : logger . warning ( "failed to delete SessionServer: %s" % ( e ) )
Delete all files created by this server invalidating self . Use with care .
2,657
def find_similar ( self , * args , ** kwargs ) : if self . session is not None and self . autosession : self . commit ( ) return self . stable . find_similar ( * args , ** kwargs )
Find similar articles .
2,658
async def profile ( self , ctx , platform , name ) : player = await self . client . get_player ( platform , name ) solos = await player . get_solos ( ) await ctx . send ( "# of kills in solos for {}: {}" . format ( name , solos . kills . value ) )
Fetch a profile .
2,659
def generate_chunks ( data , chunk_size = DEFAULT_CHUNK_SIZE ) : iterator = iter ( repeated . getvalues ( data ) ) while True : chunk = list ( itertools . islice ( iterator , chunk_size ) ) if not chunk : return yield chunk
Yield chunk_size items from data at a time .
2,660
def reduce ( reducer , data , chunk_size = DEFAULT_CHUNK_SIZE ) : if not chunk_size : return finalize ( reducer , fold ( reducer , data ) ) chunks = generate_chunks ( data , chunk_size ) intermediate = fold ( reducer , next ( chunks ) ) for chunk in chunks : intermediate = merge ( reducer , intermediate , fold ( reducer , chunk ) ) return finalize ( reducer , intermediate )
Repeatedly call fold and merge on data and then finalize .
2,661
def conditions ( self ) : for idx in six . moves . range ( 1 , len ( self . children ) , 2 ) : yield ( self . children [ idx - 1 ] , self . children [ idx ] )
The if - else pairs .
2,662
def handle_noargs ( self , ** options ) : r = get_r ( ) since = datetime . utcnow ( ) - timedelta ( days = 1 ) metrics = { } categories = r . metric_slugs_by_category ( ) for category_name , slug_list in categories . items ( ) : metrics [ category_name ] = [ ] for slug in slug_list : metric_values = r . get_metric_history ( slug , since = since ) metrics [ category_name ] . append ( ( slug , metric_values ) ) template = "redis_metrics/email/report.{fmt}" data = { 'today' : since , 'metrics' : metrics , } message = render_to_string ( template . format ( fmt = 'txt' ) , data ) message_html = render_to_string ( template . format ( fmt = 'html' ) , data ) msg = EmailMultiAlternatives ( subject = "Redis Metrics Report" , body = message , from_email = settings . DEFAULT_FROM_EMAIL , to = [ email for name , email in settings . ADMINS ] ) msg . attach_alternative ( message_html , "text/html" ) msg . send ( )
Send Report E - mails .
2,663
def add_tasks ( self , value ) : tasks = self . _validate_entities ( value ) self . _tasks . update ( tasks ) self . _task_count = len ( self . _tasks )
Adds tasks to the existing set of tasks of the Stage
2,664
def to_dict ( self ) : stage_desc_as_dict = { 'uid' : self . _uid , 'name' : self . _name , 'state' : self . _state , 'state_history' : self . _state_history , 'parent_pipeline' : self . _p_pipeline } return stage_desc_as_dict
Convert current Stage into a dictionary
2,665
def from_dict ( self , d ) : if 'uid' in d : if d [ 'uid' ] : self . _uid = d [ 'uid' ] if 'name' in d : if d [ 'name' ] : self . _name = d [ 'name' ] if 'state' in d : if isinstance ( d [ 'state' ] , str ) or isinstance ( d [ 'state' ] , unicode ) : if d [ 'state' ] in states . _stage_state_values . keys ( ) : self . _state = d [ 'state' ] else : raise ValueError ( obj = self . _uid , attribute = 'state' , expected_value = states . _stage_state_values . keys ( ) , actual_value = value ) else : raise TypeError ( entity = 'state' , expected_type = str , actual_type = type ( d [ 'state' ] ) ) else : self . _state = states . INITIAL if 'state_history' in d : if isinstance ( d [ 'state_history' ] , list ) : self . _state_history = d [ 'state_history' ] else : raise TypeError ( entity = 'state_history' , expected_type = list , actual_type = type ( d [ 'state_history' ] ) ) if 'parent_pipeline' in d : if isinstance ( d [ 'parent_pipeline' ] , dict ) : self . _p_pipeline = d [ 'parent_pipeline' ] else : raise TypeError ( entity = 'parent_pipeline' , expected_type = dict , actual_type = type ( d [ 'parent_pipeline' ] ) )
Create a Stage from a dictionary . The change is in inplace .
2,666
def _make_spec_file ( self ) : if issubclass ( BdistRPMCommand , object ) : spec_file = super ( BdistRPMCommand , self ) . _make_spec_file ( ) else : spec_file = bdist_rpm . _make_spec_file ( self ) if sys . version_info [ 0 ] < 3 : python_package = "python" else : python_package = "python3" description = [ ] summary = "" in_description = False python_spec_file = [ ] for line in spec_file : if line . startswith ( "Summary: " ) : summary = line elif line . startswith ( "BuildRequires: " ) : line = "BuildRequires: {0:s}-setuptools" . format ( python_package ) elif line . startswith ( "Requires: " ) : if python_package == "python3" : line = line . replace ( "python" , "python3" ) elif line . startswith ( "%description" ) : in_description = True elif line . startswith ( "%files" ) : line = "%files -f INSTALLED_FILES -n {0:s}-%{{name}}" . format ( python_package ) elif line . startswith ( "%prep" ) : in_description = False python_spec_file . append ( "%package -n {0:s}-%{{name}}" . format ( python_package ) ) python_spec_file . append ( "{0:s}" . format ( summary ) ) python_spec_file . append ( "" ) python_spec_file . append ( "%description -n {0:s}-%{{name}}" . format ( python_package ) ) python_spec_file . extend ( description ) elif in_description : if not description and not line : continue description . append ( line ) python_spec_file . append ( line ) return python_spec_file
Generates the text of an RPM spec file .
2,667
def resolve ( self , name ) : for scope in reversed ( self . scopes ) : try : return structured . resolve ( scope , name ) except ( KeyError , AttributeError ) : continue raise AttributeError ( name )
Call IStructured . resolve across all scopes and return first hit .
2,668
def reflect ( self , name ) : result = None for scope in reversed ( self . scopes ) : try : if isinstance ( scope , type ) : result = structured . reflect_static_member ( scope , name ) else : result = structured . reflect_runtime_member ( scope , name ) if result is not None : return result except ( NotImplementedError , KeyError , AttributeError ) : continue return protocol . AnyType
Reflect name starting with local scope all the way up to global .
2,669
def reflect_runtime_member ( self , name ) : for scope in reversed ( self . scopes ) : try : return structured . reflect_runtime_member ( scope , name ) except ( NotImplementedError , KeyError , AttributeError ) : continue return protocol . AnyType
Reflect name using ONLY runtime reflection .
2,670
def reflect_static_member ( cls , name ) : for scope in reversed ( cls . scopes ) : try : return structured . reflect_static_member ( scope , name ) except ( NotImplementedError , KeyError , AttributeError ) : continue return protocol . AnyType
Reflect name using ONLY static reflection .
2,671
def get_hostmap ( profile ) : hostmap = dict ( ) for entry in profile : if entry [ ru . EVENT ] == 'hostname' : hostmap [ entry [ ru . UID ] ] = entry [ ru . MSG ] return hostmap
We abuse the profile combination to also derive a pilot - host map which will tell us on what exact host each pilot has been running . To do so we check for the PMGR_ACTIVE advance event in agent_0 . prof and use the NTP sync info to associate a hostname .
2,672
def get_hostmap_deprecated ( profiles ) : hostmap = dict ( ) for pname , prof in profiles . iteritems ( ) : if not len ( prof ) : continue if not prof [ 0 ] [ ru . MSG ] : continue host , ip , _ , _ , _ = prof [ 0 ] [ ru . MSG ] . split ( ':' ) host_id = '%s:%s' % ( host , ip ) for row in prof : if 'agent_0.prof' in pname and row [ ru . EVENT ] == 'advance' and row [ ru . STATE ] == rps . PMGR_ACTIVE : hostmap [ row [ ru . UID ] ] = host_id break return hostmap
This method mangles combine_profiles and get_hostmap and is deprecated . At this point it only returns the hostmap
2,673
def categorize_metrics ( self ) : category = self . cleaned_data [ 'category_name' ] metrics = self . cleaned_data [ 'metrics' ] self . r . reset_category ( category , metrics )
Called only on a valid form this method will place the chosen metrics in the given catgory .
2,674
def match ( self , f , * args ) : try : match = f ( self . tokenizer , * args ) except StopIteration : return if match is None : return if not isinstance ( match , grammar . TokenMatch ) : raise TypeError ( "Invalid grammar function %r returned %r." % ( f , match ) ) self . matched = match return match
Match grammar function f against next token and set self . matched .
2,675
def reject ( self , f , * args ) : match = self . match ( f , * args ) if match : token = self . peek ( 0 ) raise errors . EfilterParseError ( query = self . tokenizer . source , token = token , message = "Was not expecting a %s here." % token . name )
Like match but throw a parse error if f matches .
2,676
def expect ( self , f , * args ) : match = self . accept ( f , * args ) if match : return match try : func_name = f . func_name except AttributeError : func_name = "<unnamed grammar function>" start , end = self . current_position ( ) raise errors . EfilterParseError ( query = self . tokenizer . source , start = start , end = end , message = "Was expecting %s here." % ( func_name ) )
Like accept but throws a parse error if f doesn t match .
2,677
def solve_var ( expr , vars ) : try : return Result ( structured . resolve ( vars , expr . value ) , ( ) ) except ( KeyError , AttributeError ) as e : raise errors . EfilterKeyError ( root = expr , key = expr . value , message = e , query = expr . source ) except ( TypeError , ValueError ) as e : if vars . locals is None : raise errors . EfilterNoneError ( root = expr , query = expr . source , message = "Trying to access member %r of a null." % expr . value ) else : raise errors . EfilterTypeError ( root = expr , query = expr . source , message = "%r (vars: %r)" % ( e , vars ) ) except NotImplementedError as e : raise errors . EfilterError ( root = expr , query = expr . source , message = "Trying to access member %r of an instance of %r." % ( expr . value , type ( vars ) ) )
Returns the value of the var named in the expression .
2,678
def solve_repeat ( expr , vars ) : try : result = repeated . meld ( * [ solve ( x , vars ) . value for x in expr . children ] ) return Result ( result , ( ) ) except TypeError : raise errors . EfilterTypeError ( root = expr , query = expr . source , message = "All values in a repeated value must be of the same type." )
Build a repeated value from subexpressions .
2,679
def solve_tuple ( expr , vars ) : result = tuple ( solve ( x , vars ) . value for x in expr . children ) return Result ( result , ( ) )
Build a tuple from subexpressions .
2,680
def solve_ifelse ( expr , vars ) : for condition , result in expr . conditions ( ) : if boolean . asbool ( solve ( condition , vars ) . value ) : return solve ( result , vars ) return solve ( expr . default ( ) , vars )
Evaluate conditions and return the one that matches .
2,681
def solve_map ( expr , vars ) : lhs_values , _ = __solve_for_repeated ( expr . lhs , vars ) def lazy_map ( ) : try : for lhs_value in repeated . getvalues ( lhs_values ) : yield solve ( expr . rhs , __nest_scope ( expr . lhs , vars , lhs_value ) ) . value except errors . EfilterNoneError as error : error . root = expr raise return Result ( repeated . lazy ( lazy_map ) , ( ) )
Solves the map - form by recursively calling its RHS with new vars .
2,682
def solve_let ( expr , vars ) : lhs_value = solve ( expr . lhs , vars ) . value if not isinstance ( lhs_value , structured . IStructured ) : raise errors . EfilterTypeError ( root = expr . lhs , query = expr . original , message = "The LHS of 'let' must evaluate to an IStructured. Got %r." % ( lhs_value , ) ) return solve ( expr . rhs , __nest_scope ( expr . lhs , vars , lhs_value ) )
Solves a let - form by calling RHS with nested scope .
2,683
def solve_filter ( expr , vars ) : lhs_values , _ = __solve_for_repeated ( expr . lhs , vars ) def lazy_filter ( ) : for lhs_value in repeated . getvalues ( lhs_values ) : if solve ( expr . rhs , __nest_scope ( expr . lhs , vars , lhs_value ) ) . value : yield lhs_value return Result ( repeated . lazy ( lazy_filter ) , ( ) )
Filter values on the LHS by evaluating RHS with each value .
2,684
def solve_sort ( expr , vars ) : lhs_values = repeated . getvalues ( __solve_for_repeated ( expr . lhs , vars ) [ 0 ] ) sort_expression = expr . rhs def _key_func ( x ) : return solve ( sort_expression , __nest_scope ( expr . lhs , vars , x ) ) . value results = ordered . ordered ( lhs_values , key_func = _key_func ) return Result ( repeated . meld ( * results ) , ( ) )
Sort values on the LHS by the value they yield when passed to RHS .
2,685
def solve_each ( expr , vars ) : lhs_values , _ = __solve_for_repeated ( expr . lhs , vars ) for lhs_value in repeated . getvalues ( lhs_values ) : result = solve ( expr . rhs , __nest_scope ( expr . lhs , vars , lhs_value ) ) if not result . value : return result . _replace ( value = False ) return Result ( True , ( ) )
Return True if RHS evaluates to a true value with each state of LHS .
2,686
def solve_cast ( expr , vars ) : lhs = solve ( expr . lhs , vars ) . value t = solve ( expr . rhs , vars ) . value if t is None : raise errors . EfilterTypeError ( root = expr , query = expr . source , message = "Cannot find type named %r." % expr . rhs . value ) if not isinstance ( t , type ) : raise errors . EfilterTypeError ( root = expr . rhs , query = expr . source , message = "%r is not a type and cannot be used with 'cast'." % ( t , ) ) try : cast_value = t ( lhs ) except TypeError : raise errors . EfilterTypeError ( root = expr , query = expr . source , message = "Invalid cast %s -> %s." % ( type ( lhs ) , t ) ) return Result ( cast_value , ( ) )
Get cast LHS to RHS .
2,687
def solve_isinstance ( expr , vars ) : lhs = solve ( expr . lhs , vars ) try : t = solve ( expr . rhs , vars ) . value except errors . EfilterKeyError : t = None if t is None : raise errors . EfilterTypeError ( root = expr . rhs , query = expr . source , message = "Cannot find type named %r." % expr . rhs . value ) if not isinstance ( t , type ) : raise errors . EfilterTypeError ( root = expr . rhs , query = expr . source , message = "%r is not a type and cannot be used with 'isa'." % ( t , ) ) return Result ( protocol . implements ( lhs . value , t ) , ( ) )
Typecheck whether LHS is type on the RHS .
2,688
def set_version ( mod_root ) : try : version_base = None version_detail = None src_root = os . path . dirname ( __file__ ) if not src_root : src_root = '.' with open ( src_root + '/VERSION' , 'r' ) as f : version_base = f . readline ( ) . strip ( ) p = sp . Popen ( 'cd %s ; ' 'test -z `git rev-parse --show-prefix` || exit -1; ' 'tag=`git describe --tags --always` 2>/dev/null ; ' 'branch=`git branch | grep -e "^*" | cut -f 2- -d " "` 2>/dev/null ; ' 'echo $tag@$branch' % src_root , stdout = sp . PIPE , stderr = sp . STDOUT , shell = True ) version_detail = str ( p . communicate ( ) [ 0 ] . strip ( ) ) version_detail = version_detail . replace ( 'detached from ' , 'detached-' ) version_detail = re . sub ( '[/ ]+' , '-' , version_detail ) version_detail = re . sub ( '[^[email protected]]+' , '' , version_detail ) if p . returncode != 0 or version_detail == '@' or 'git-error' in version_detail or 'not-a-git-repo' in version_detail or 'not-found' in version_detail or 'fatal' in version_detail : version = version_base elif '@' not in version_base : version = '%s-%s' % ( version_base , version_detail ) else : version = version_base path = '%s/%s' % ( src_root , mod_root ) with open ( path + "/VERSION" , "w" ) as f : f . write ( version + "\n" ) sdist_name = "%s-%s.tar.gz" % ( name , version ) sdist_name = sdist_name . replace ( '/' , '-' ) sdist_name = sdist_name . replace ( '@' , '-' ) sdist_name = sdist_name . replace ( '#' , '-' ) sdist_name = sdist_name . replace ( '_' , '-' ) if '--record' in sys . argv or 'bdist_egg' in sys . argv or 'bdist_wheel' in sys . argv : shutil . move ( "VERSION" , "VERSION.bak" ) shutil . copy ( "%s/VERSION" % path , "VERSION" ) os . system ( "python setup.py sdist" ) shutil . copy ( 'dist/%s' % sdist_name , '%s/%s' % ( mod_root , sdist_name ) ) shutil . move ( "VERSION.bak" , "VERSION" ) with open ( path + "/SDIST" , "w" ) as f : f . write ( sdist_name + "\n" ) return version_base , version_detail , sdist_name except Exception as e : raise RuntimeError ( 'Could not extract/set version: %s' % e )
mod_root a VERSION file containes the version strings is created in mod_root during installation . That file is used at runtime to get the version information .
2,689
def isgood ( name ) : if not isbad ( name ) : if name . endswith ( '.py' ) or name . endswith ( '.json' ) or name . endswith ( '.tar' ) : return True return False
Whether name should be installed
2,690
def meld ( * values ) : values = [ x for x in values if x is not None ] if not values : return None result = repeated ( * values ) if isrepeating ( result ) : return result return getvalue ( result )
Return the repeated value or the first value if there s only one .
2,691
def getvalue ( x ) : if isrepeating ( x ) : raise TypeError ( "Ambiguous call to getvalue for %r which has more than one value." % x ) for value in getvalues ( x ) : return value
Return the single value of x or raise TypError if more than one value .
2,692
def to_dict ( self ) : task_desc_as_dict = { 'uid' : self . _uid , 'name' : self . _name , 'state' : self . _state , 'state_history' : self . _state_history , 'pre_exec' : self . _pre_exec , 'executable' : self . _executable , 'arguments' : self . _arguments , 'post_exec' : self . _post_exec , 'cpu_reqs' : self . _cpu_reqs , 'gpu_reqs' : self . _gpu_reqs , 'lfs_per_process' : self . _lfs_per_process , 'upload_input_data' : self . _upload_input_data , 'copy_input_data' : self . _copy_input_data , 'link_input_data' : self . _link_input_data , 'move_input_data' : self . _move_input_data , 'copy_output_data' : self . _copy_output_data , 'move_output_data' : self . _move_output_data , 'download_output_data' : self . _download_output_data , 'stdout' : self . _stdout , 'stderr' : self . _stderr , 'exit_code' : self . _exit_code , 'path' : self . _path , 'tag' : self . _tag , 'parent_stage' : self . _p_stage , 'parent_pipeline' : self . _p_pipeline , } return task_desc_as_dict
Convert current Task into a dictionary
2,693
def keyword ( tokens , expected ) : try : token = next ( iter ( tokens ) ) except StopIteration : return if token and token . name == "symbol" and token . value . lower ( ) == expected : return TokenMatch ( None , token . value , ( token , ) )
Case - insensitive keyword match .
2,694
def multi_keyword ( tokens , keyword_parts ) : tokens = iter ( tokens ) matched_tokens = [ ] limit = len ( keyword_parts ) for idx in six . moves . range ( limit ) : try : token = next ( tokens ) except StopIteration : return if ( not token or token . name != "symbol" or token . value . lower ( ) != keyword_parts [ idx ] ) : return matched_tokens . append ( token ) return TokenMatch ( None , token . value , matched_tokens )
Match a case - insensitive keyword consisting of multiple tokens .
2,695
def prefix ( tokens , operator_table ) : operator , matched_tokens = operator_table . prefix . match ( tokens ) if operator : return TokenMatch ( operator , None , matched_tokens )
Match a prefix of an operator .
2,696
def infix ( tokens , operator_table ) : operator , matched_tokens = operator_table . infix . match ( tokens ) if operator : return TokenMatch ( operator , None , matched_tokens )
Match an infix of an operator .
2,697
def suffix ( tokens , operator_table ) : operator , matched_tokens = operator_table . suffix . match ( tokens ) if operator : return TokenMatch ( operator , None , matched_tokens )
Match a suffix of an operator .
2,698
def match_tokens ( expected_tokens ) : if isinstance ( expected_tokens , Token ) : def _grammar_func ( tokens ) : try : next_token = next ( iter ( tokens ) ) except StopIteration : return if next_token == expected_tokens : return TokenMatch ( None , next_token . value , ( next_token , ) ) elif isinstance ( expected_tokens , tuple ) : match_len = len ( expected_tokens ) def _grammar_func ( tokens ) : upcoming = tuple ( itertools . islice ( tokens , match_len ) ) if upcoming == expected_tokens : return TokenMatch ( None , None , upcoming ) else : raise TypeError ( "'expected_tokens' must be an instance of Token or a tuple " "thereof. Got %r." % expected_tokens ) return _grammar_func
Generate a grammar function that will match expected_tokens only .
2,699
def expression ( self , previous_precedence = 0 ) : lhs = self . atom ( ) return self . operator ( lhs , previous_precedence )
An expression is an atom or an infix expression .