idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
60,800
def _get_service_names ( self ) : master_info = None connection_errors = [ ] for sentinel in self . _sentinel . sentinels : try : master_info = sentinel . sentinel_masters ( ) break except ( redis . ConnectionError , redis . TimeoutError ) as e : connection_errors . append ( 'Failed to connect to {} due to error: "{}".' . format ( sentinel , e ) ) continue if master_info is None : raise redis . ConnectionError ( 'Could not get master info from Sentinel\n{}:' . format ( '\n' . join ( connection_errors ) ) ) return list ( master_info . keys ( ) )
Get a list of service names from Sentinel . Tries Sentinel hosts until one succeeds ; if none succeed raises a ConnectionError .
60,801
def timid_relpath ( arg ) : from os . path import isabs , relpath , sep if isabs ( arg ) : result = relpath ( arg ) if result . count ( sep ) + 1 < arg . count ( sep ) : return result return arg
convert an argument to a relative path carefully
60,802
def ensure_virtualenv ( args , return_values ) : def adjust_options ( options , args ) : venv_path = return_values . venv_path = args [ 0 ] if venv_path == DEFAULT_VIRTUALENV_PATH or options . prompt == '<dirname>' : from os . path import abspath , basename , dirname options . prompt = '(%s)' % basename ( dirname ( abspath ( venv_path ) ) ) if options . python is None : source_python = None else : source_python = virtualenv . resolve_interpreter ( options . python ) destination_python = venv_python ( venv_path ) if exists ( destination_python ) : reason = invalid_virtualenv_reason ( venv_path , source_python , destination_python , options ) if reason : info ( 'Removing invalidated virtualenv. (%s)' % reason ) run ( ( 'rm' , '-rf' , venv_path ) ) else : info ( 'Keeping valid virtualenv from previous run.' ) raise SystemExit ( 0 ) import virtualenv virtualenv . adjust_options = adjust_options from sys import argv argv [ : ] = ( 'virtualenv' , ) + args info ( colorize ( argv ) ) raise_on_failure ( virtualenv . main ) if return_values . venv_path is not None : run ( ( 'rm' , '-rf' , join ( return_values . venv_path , 'local' ) ) )
Ensure we have a valid virtualenv .
60,803
def touch ( filename , timestamp ) : if timestamp is not None : timestamp = ( timestamp , timestamp ) from os import utime utime ( filename , timestamp )
set the mtime of a file
60,804
def pip_faster ( venv_path , pip_command , install , bootstrap_deps ) : execfile_ ( venv_executable ( venv_path , 'activate_this.py' ) ) from os import environ environ [ 'PIP_DISABLE_PIP_VERSION_CHECK' ] = '1' run ( ( 'pip' , 'install' ) + bootstrap_deps ) run ( pip_command + install )
install and run pip - faster
60,805
def raise_on_failure ( mainfunc ) : try : errors = mainfunc ( ) if errors : exit ( errors ) except CalledProcessError as error : exit ( error . returncode ) except SystemExit as error : if error . code : raise except KeyboardInterrupt : exit ( 1 )
raise if and only if mainfunc fails
60,806
def cache_installed_wheels ( index_url , installed_packages ) : for installed_package in installed_packages : if not _can_be_cached ( installed_package ) : continue _store_wheel_in_cache ( installed_package . link . path , index_url )
After installation pip tells us what it installed and from where .
60,807
def pip ( args ) : from sys import stdout stdout . write ( colorize ( ( 'pip' , ) + args ) ) stdout . write ( '\n' ) stdout . flush ( ) return pipmodule . _internal . main ( list ( args ) )
Run pip in - process .
60,808
def dist_to_req ( dist ) : try : from pip . _internal . operations . freeze import FrozenRequirement except ImportError : from pip import FrozenRequirement orig_name , dist . project_name = dist . project_name , dist . key result = FrozenRequirement . from_dist ( dist , [ ] ) dist . project_name = orig_name return result
Make a pip . FrozenRequirement from a pkg_resources distribution object
60,809
def req_cycle ( req ) : cls = req . __class__ seen = { req . name } while isinstance ( req . comes_from , cls ) : req = req . comes_from if req . name in seen : return True else : seen . add ( req . name ) return False
is this requirement cyclic?
60,810
def pretty_req ( req ) : from copy import copy req = copy ( req ) req . link = None req . satisfied_by = None return req
return a copy of a pip requirement that is a bit more readable at the expense of removing some of its data
60,811
def trace_requirements ( requirements ) : requirements = tuple ( pretty_req ( r ) for r in requirements ) working_set = fresh_working_set ( ) from collections import deque queue = deque ( requirements ) queued = { _package_req_to_pkg_resources_req ( req . req ) for req in queue } errors = [ ] result = [ ] while queue : req = queue . popleft ( ) logger . debug ( 'tracing: %s' , req ) try : dist = working_set . find_normalized ( _package_req_to_pkg_resources_req ( req . req ) ) except pkg_resources . VersionConflict as conflict : dist = conflict . args [ 0 ] errors . append ( 'Error: version conflict: {} ({}) <-> {}' . format ( dist , timid_relpath ( dist . location ) , req ) ) assert dist is not None , 'Should be unreachable in pip8+' result . append ( dist_to_req ( dist ) ) extras = [ extra for extra in req . extras if extra in dist . extras ] for sub_req in sorted ( dist . requires ( extras = extras ) , key = lambda req : req . key ) : sub_req = InstallRequirement ( sub_req , req ) if req_cycle ( sub_req ) : logger . warning ( 'Circular dependency! %s' , sub_req ) continue elif sub_req . req in queued : logger . debug ( 'already queued: %s' , sub_req ) continue else : logger . debug ( 'adding sub-requirement %s' , sub_req ) queue . append ( sub_req ) queued . add ( sub_req . req ) if errors : raise InstallationError ( '\n' . join ( errors ) ) return result
given an iterable of pip InstallRequirements return the set of required packages given their transitive requirements .
60,812
def patch ( attrs , updates ) : orig = { } for attr , value in updates : orig [ attr ] = attrs [ attr ] attrs [ attr ] = value return orig
Perform a set of updates to a attribute dictionary return the original values .
60,813
def patched ( attrs , updates ) : orig = patch ( attrs , updates . items ( ) ) try : yield orig finally : patch ( attrs , orig . items ( ) )
A context in which some attributes temporarily have a modified value .
60,814
def pipfaster_packagefinder ( ) : try : from pip . _internal . cli import base_command except ImportError : from pip . _internal import basecommand as base_command return patched ( vars ( base_command ) , { 'PackageFinder' : FasterPackageFinder } )
Provide a short - circuited search when the requirement is pinned and appears on disk .
60,815
def pipfaster_download_cacher ( index_urls ) : from pip . _internal import download orig = download . _download_http_url patched_fn = get_patched_download_http_url ( orig , index_urls ) return patched ( vars ( download ) , { '_download_http_url' : patched_fn } )
vanilla pip stores a cache of the http session in its cache and not the wheel files . We intercept the download and save those files into our cache
60,816
def run ( self , options , args ) : if options . prune : previously_installed = pip_get_installed ( ) index_urls = [ options . index_url ] + options . extra_index_urls with pipfaster_download_cacher ( index_urls ) : requirement_set = super ( FasterInstallCommand , self ) . run ( options , args , ) required = requirement_set . requirements . values ( ) if not options . extra_index_urls : cache_installed_wheels ( options . index_url , requirement_set . successfully_downloaded ) if not options . ignore_dependencies : required = trace_requirements ( required ) if not options . prune : return requirement_set extraneous = ( reqnames ( previously_installed ) - reqnames ( required ) - reqnames ( trace_requirements ( [ install_req_from_line ( 'venv-update' ) ] ) ) - frozenset ( ( 'pkg-resources' , ) ) ) if extraneous : extraneous = sorted ( extraneous ) pip ( ( 'uninstall' , '--yes' ) + tuple ( extraneous ) )
update install options with caching values
60,817
def setEncoder ( self , encoder ) : if not encoder : self . _encoder = json . JSONEncoder ( ) else : self . _encoder = encoder self . _encode = self . _encoder . encode
Sets the client s encoder encoder should be an instance of a json . JSONEncoder class
60,818
def setDecoder ( self , decoder ) : if not decoder : self . _decoder = json . JSONDecoder ( ) else : self . _decoder = decoder self . _decode = self . _decoder . decode
Sets the client s decoder decoder should be an instance of a json . JSONDecoder class
60,819
def jsondel ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.DEL' , name , str_path ( path ) )
Deletes the JSON value stored at key name under path
60,820
def jsonget ( self , name , * args ) : pieces = [ name ] if len ( args ) == 0 : pieces . append ( Path . rootPath ( ) ) else : for p in args : pieces . append ( str_path ( p ) ) try : return self . execute_command ( 'JSON.GET' , * pieces ) except TypeError : return None
Get the object stored as a JSON value at key name args is zero or more paths and defaults to root path
60,821
def jsonmget ( self , path , * args ) : pieces = [ ] pieces . extend ( args ) pieces . append ( str_path ( path ) ) return self . execute_command ( 'JSON.MGET' , * pieces )
Gets the objects stored as a JSON values under path from keys args
60,822
def jsonset ( self , name , path , obj , nx = False , xx = False ) : pieces = [ name , str_path ( path ) , self . _encode ( obj ) ] if nx and xx : raise Exception ( 'nx and xx are mutually exclusive: use one, the ' 'other or neither - but not both' ) elif nx : pieces . append ( 'NX' ) elif xx : pieces . append ( 'XX' ) return self . execute_command ( 'JSON.SET' , * pieces )
Set the JSON value at key name under the path to obj nx if set to True set value only if it does not exist xx if set to True set value only if it exists
60,823
def jsontype ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.TYPE' , name , str_path ( path ) )
Gets the type of the JSON value under path from key name
60,824
def jsonstrappend ( self , name , string , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.STRAPPEND' , name , str_path ( path ) , self . _encode ( string ) )
Appends to the string JSON value under path at key name the provided string
60,825
def jsonstrlen ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.STRLEN' , name , str_path ( path ) )
Returns the length of the string JSON value under path at key name
60,826
def jsonarrappend ( self , name , path = Path . rootPath ( ) , * args ) : pieces = [ name , str_path ( path ) ] for o in args : pieces . append ( self . _encode ( o ) ) return self . execute_command ( 'JSON.ARRAPPEND' , * pieces )
Appends the objects args to the array under the path in key name
60,827
def jsonarrindex ( self , name , path , scalar , start = 0 , stop = - 1 ) : return self . execute_command ( 'JSON.ARRINDEX' , name , str_path ( path ) , self . _encode ( scalar ) , start , stop )
Returns the index of scalar in the JSON array under path at key name . The search can be limited using the optional inclusive start and exclusive stop indices .
60,828
def jsonarrinsert ( self , name , path , index , * args ) : pieces = [ name , str_path ( path ) , index ] for o in args : pieces . append ( self . _encode ( o ) ) return self . execute_command ( 'JSON.ARRINSERT' , * pieces )
Inserts the objects args to the array at index index under the path in key name
60,829
def jsonarrlen ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.ARRLEN' , name , str_path ( path ) )
Returns the length of the array JSON value under path at key name
60,830
def jsonarrpop ( self , name , path = Path . rootPath ( ) , index = - 1 ) : return self . execute_command ( 'JSON.ARRPOP' , name , str_path ( path ) , index )
Pops the element at index in the array JSON value under path at key name
60,831
def jsonarrtrim ( self , name , path , start , stop ) : return self . execute_command ( 'JSON.ARRTRIM' , name , str_path ( path ) , start , stop )
Trim the array JSON value under path at key name to the inclusive range given by start and stop
60,832
def jsonobjkeys ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.OBJKEYS' , name , str_path ( path ) )
Returns the key names in the dictionary JSON value under path at key name
60,833
def jsonobjlen ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.OBJLEN' , name , str_path ( path ) )
Returns the length of the dictionary JSON value under path at key name
60,834
def get_pg_info ( ) : from psycopg2 import connect , OperationalError log . debug ( "entered get_pg_info" ) try : conf = settings . DATABASES [ 'default' ] database = conf [ "NAME" ] user = conf [ "USER" ] host = conf [ "HOST" ] port = conf [ "PORT" ] password = conf [ "PASSWORD" ] except ( AttributeError , KeyError ) : log . error ( "No PostgreSQL connection info found in settings." ) return { "status" : NO_CONFIG } except TypeError : return { "status" : DOWN } log . debug ( "got past getting conf" ) try : start = datetime . now ( ) connection = connect ( database = database , user = user , host = host , port = port , password = password , connect_timeout = TIMEOUT_SECONDS , ) log . debug ( "at end of context manager" ) micro = ( datetime . now ( ) - start ) . microseconds connection . close ( ) except ( OperationalError , KeyError ) as ex : log . error ( "No PostgreSQL connection info found in settings. %s Error: %s" , conf , ex ) return { "status" : DOWN } log . debug ( "got to end of postgres check successfully" ) return { "status" : UP , "response_microseconds" : micro }
Check PostgreSQL connection .
60,835
def get_redis_info ( ) : from kombu . utils . url import _parse_url as parse_redis_url from redis import ( StrictRedis , ConnectionError as RedisConnectionError , ResponseError as RedisResponseError , ) for conf_name in ( 'REDIS_URL' , 'BROKER_URL' , 'CELERY_BROKER_URL' ) : if hasattr ( settings , conf_name ) : url = getattr ( settings , conf_name ) if url . startswith ( 'redis://' ) : break else : log . error ( "No redis connection info found in settings." ) return { "status" : NO_CONFIG } _ , host , port , _ , password , database , _ = parse_redis_url ( url ) start = datetime . now ( ) try : rdb = StrictRedis ( host = host , port = port , db = database , password = password , socket_timeout = TIMEOUT_SECONDS , ) info = rdb . info ( ) except ( RedisConnectionError , TypeError ) as ex : log . error ( "Error making Redis connection: %s" , ex . args ) return { "status" : DOWN } except RedisResponseError as ex : log . error ( "Bad Redis response: %s" , ex . args ) return { "status" : DOWN , "message" : "auth error" } micro = ( datetime . now ( ) - start ) . microseconds del rdb ret = { "status" : UP , "response_microseconds" : micro , } fields = ( "uptime_in_seconds" , "used_memory" , "used_memory_peak" ) ret . update ( { x : info [ x ] for x in fields } ) return ret
Check Redis connection .
60,836
def get_elasticsearch_info ( ) : from elasticsearch import ( Elasticsearch , ConnectionError as ESConnectionError ) if hasattr ( settings , 'ELASTICSEARCH_URL' ) : url = settings . ELASTICSEARCH_URL else : return { "status" : NO_CONFIG } start = datetime . now ( ) try : search = Elasticsearch ( url , request_timeout = TIMEOUT_SECONDS ) search . info ( ) except ESConnectionError : return { "status" : DOWN } del search micro = ( datetime . now ( ) - start ) . microseconds return { "status" : UP , "response_microseconds" : micro , }
Check Elasticsearch connection .
60,837
def get_celery_info ( ) : import celery if not getattr ( settings , 'USE_CELERY' , False ) : log . error ( "No celery config found. Set USE_CELERY in settings to enable." ) return { "status" : NO_CONFIG } start = datetime . now ( ) try : app = celery . Celery ( 'tasks' ) app . config_from_object ( 'django.conf:settings' , namespace = 'CELERY' ) app . connection ( ) . ensure_connection ( max_retries = 1 ) celery_stats = celery . task . control . inspect ( ) . stats ( ) if not celery_stats : log . error ( "No running Celery workers were found." ) return { "status" : DOWN , "message" : "No running Celery workers" } except Exception as exp : log . error ( "Error connecting to the backend: %s" , exp ) return { "status" : DOWN , "message" : "Error connecting to the backend" } return { "status" : UP , "response_microseconds" : ( datetime . now ( ) - start ) . microseconds }
Check celery availability
60,838
def get_certificate_info ( ) : if hasattr ( settings , 'MIT_WS_CERTIFICATE' ) and settings . MIT_WS_CERTIFICATE : mit_ws_certificate = settings . MIT_WS_CERTIFICATE else : return { "status" : NO_CONFIG } app_cert = OpenSSL . crypto . load_certificate ( OpenSSL . crypto . FILETYPE_PEM , ( mit_ws_certificate if not isinstance ( mit_ws_certificate , str ) else mit_ws_certificate . encode ( ) . decode ( 'unicode_escape' ) . encode ( ) ) ) app_cert_expiration = datetime . strptime ( app_cert . get_notAfter ( ) . decode ( 'ascii' ) , '%Y%m%d%H%M%SZ' ) date_delta = app_cert_expiration - datetime . now ( ) return { 'app_cert_expires' : app_cert_expiration . strftime ( '%Y-%m-%dT%H:%M:%S' ) , 'status' : UP if date_delta . days > 30 else DOWN }
checks app certificate expiry status
60,839
def _start ( self ) : if self . whoami is None : me = self . get_me ( ) if me . get ( 'ok' , False ) : self . whoami = me [ 'result' ] else : raise ValueError ( 'Bot Cannot request information, check ' 'api_key' )
Requests bot information based on current api_key and sets self . whoami to dictionary with username first_name and id of the configured bot .
60,840
def poll ( self , offset = None , poll_timeout = 600 , cooldown = 60 , debug = False ) : if self . config [ 'api_key' ] is None : raise ValueError ( 'config api_key is undefined' ) if offset or self . config . get ( 'offset' , None ) : self . offset = offset or self . config . get ( 'offset' , None ) self . _start ( ) while True : try : response = self . get_updates ( poll_timeout , self . offset ) if response . get ( 'ok' , False ) is False : raise ValueError ( response [ 'error' ] ) else : self . process_updates ( response ) except Exception as e : print ( 'Error: Unknown Exception' ) print ( e ) if debug : raise e else : time . sleep ( cooldown )
These should also be in the config section but some here for overrides
60,841
def get_attr ( obj , attr , default = None ) : if '.' not in attr : return getattr ( obj , attr , default ) else : L = attr . split ( '.' ) return get_attr ( getattr ( obj , L [ 0 ] , default ) , '.' . join ( L [ 1 : ] ) , default )
Recursive get object s attribute . May use dot notation .
60,842
def asset ( path ) : commit = bitcaster . get_full_version ( ) return mark_safe ( '{0}?{1}' . format ( _static ( path ) , commit ) )
Join the given path with the STATIC_URL setting .
60,843
def get_client_ip ( request ) : try : return request . META [ 'HTTP_X_FORWARDED_FOR' ] . split ( ',' ) [ 0 ] . strip ( ) except ( KeyError , IndexError ) : return request . META . get ( 'REMOTE_ADDR' )
Naively yank the first IP address in an X - Forwarded - For header and assume this is correct .
60,844
def _pack_image ( filename , max_size , form_field = 'image' , f = None ) : if f is None : try : if os . path . getsize ( filename ) > ( max_size * 1024 ) : raise TweepError ( 'File is too big, must be less than %skb.' % max_size ) except os . error as e : raise TweepError ( 'Unable to access file: %s' % e . strerror ) fp = open ( filename , 'rb' ) else : f . seek ( 0 , 2 ) if f . tell ( ) > ( max_size * 1024 ) : raise TweepError ( 'File is too big, must be less than %skb.' % max_size ) f . seek ( 0 ) fp = f file_type = mimetypes . guess_type ( filename ) if file_type is None : raise TweepError ( 'Could not determine file type' ) file_type = file_type [ 0 ] if file_type not in [ 'image/gif' , 'image/jpeg' , 'image/png' ] : raise TweepError ( 'Invalid file type for image: %s' % file_type ) if isinstance ( filename , six . text_type ) : filename = filename . encode ( 'utf-8' ) BOUNDARY = b'Tw3ePy' body = [ ] body . append ( b'--' + BOUNDARY ) body . append ( 'Content-Disposition: form-data; name="{0}";' ' filename="{1}"' . format ( form_field , filename ) . encode ( 'utf-8' ) ) body . append ( 'Content-Type: {0}' . format ( file_type ) . encode ( 'utf-8' ) ) body . append ( b'' ) body . append ( fp . read ( ) ) body . append ( b'--' + BOUNDARY + b'--' ) body . append ( b'' ) fp . close ( ) body = b'\r\n' . join ( body ) headers = { 'Content-Type' : 'multipart/form-data; boundary=Tw3ePy' , 'Content-Length' : str ( len ( body ) ) } return headers , body
Pack image from file into multipart - formdata post body
60,845
def channel_submit_row ( context ) : change = context [ 'change' ] is_popup = context [ 'is_popup' ] save_as = context [ 'save_as' ] show_save = context . get ( 'show_save' , True ) show_save_and_continue = context . get ( 'show_save_and_continue' , True ) can_delete = context [ 'has_delete_permission' ] can_add = context [ 'has_add_permission' ] can_change = context [ 'has_change_permission' ] ctx = Context ( context ) ctx . update ( { 'show_delete_link' : ( not is_popup and can_delete and change and context . get ( 'show_delete' , True ) ) , 'show_save_as_new' : not is_popup and change and save_as , 'show_save_and_add_another' : ( can_add and not is_popup and ( not save_as or context [ 'add' ] ) ) , 'show_save_and_continue' : ( not is_popup and can_change and show_save_and_continue ) , 'show_save' : show_save , } ) return ctx
Display the row of buttons for delete and save .
60,846
def get_setting ( self , name ) : notfound = object ( ) "get configuration from 'constance.config' first " value = getattr ( config , name , notfound ) if name . endswith ( '_WHITELISTED_DOMAINS' ) : if value : return value . split ( ',' ) else : return [ ] if value is notfound : value = getattr ( settings , name ) if name . endswith ( '_URL' ) : if isinstance ( value , Promise ) : value = force_text ( value ) value = resolve_url ( value ) return value
get configuration from constance . config first
60,847
def debug ( self , request , message , extra_tags = '' , fail_silently = False ) : add ( self . target_name , request , constants . DEBUG , message , extra_tags = extra_tags , fail_silently = fail_silently )
Add a message with the DEBUG level .
60,848
def info ( self , request , message , extra_tags = '' , fail_silently = False ) : add ( self . target_name , request , constants . INFO , message , extra_tags = extra_tags , fail_silently = fail_silently )
Add a message with the INFO level .
60,849
def success ( self , request , message , extra_tags = '' , fail_silently = False ) : add ( self . target_name , request , constants . SUCCESS , message , extra_tags = extra_tags , fail_silently = fail_silently )
Add a message with the SUCCESS level .
60,850
def warning ( self , request , message , extra_tags = '' , fail_silently = False ) : add ( self . target_name , request , constants . WARNING , message , extra_tags = extra_tags , fail_silently = fail_silently )
Add a message with the WARNING level .
60,851
def error ( self , request , message , extra_tags = '' , fail_silently = False ) : add ( self . target_name , request , constants . ERROR , message , extra_tags = extra_tags , fail_silently = fail_silently )
Add a message with the ERROR level .
60,852
def signup ( request , signup_form = SignupForm , template_name = 'userena/signup_form.html' , success_url = None , extra_context = None ) : if userena_settings . USERENA_DISABLE_SIGNUP : raise PermissionDenied if userena_settings . USERENA_WITHOUT_USERNAMES and ( signup_form == SignupForm ) : signup_form = SignupFormOnlyEmail form = signup_form ( ) if request . method == 'POST' : form = signup_form ( request . POST , request . FILES ) if form . is_valid ( ) : user = form . save ( ) userena_signals . signup_complete . send ( sender = None , user = user ) if success_url : redirect_to = success_url else : redirect_to = reverse ( 'userena_signup_complete' , kwargs = { 'username' : user . username } ) if request . user . is_authenticated ( ) : logout ( request ) if ( userena_settings . USERENA_SIGNIN_AFTER_SIGNUP and not userena_settings . USERENA_ACTIVATION_REQUIRED ) : user = authenticate ( identification = user . email , check_password = False ) login ( request , user ) return redirect ( redirect_to ) if not extra_context : extra_context = dict ( ) extra_context [ 'form' ] = form return ExtraContextTemplateView . as_view ( template_name = template_name , extra_context = extra_context ) ( request )
Signup of an account .
60,853
def extend ( self , other ) : overlap = [ key for key in other . defaults if key in self . defaults ] if overlap : raise ValueError ( "Duplicate hyperparameter(s): %s" % " " . join ( overlap ) ) new = dict ( self . defaults ) new . update ( other . defaults ) return HyperparameterDefaults ( ** new )
Return a new HyperparameterDefaults instance containing the hyperparameters from the current instance combined with those from other .
60,854
def with_defaults ( self , obj ) : self . check_valid_keys ( obj ) obj = dict ( obj ) for ( key , value ) in self . defaults . items ( ) : if key not in obj : obj [ key ] = value return obj
Given a dict of hyperparameter settings return a dict containing those settings augmented by the defaults for any keys missing from the dict .
60,855
def subselect ( self , obj ) : return dict ( ( key , value ) for ( key , value ) in obj . items ( ) if key in self . defaults )
Filter a dict of hyperparameter settings to only those keys defined in this HyperparameterDefaults .
60,856
def check_valid_keys ( self , obj ) : invalid_keys = [ x for x in obj if x not in self . defaults ] if invalid_keys : raise ValueError ( "No such model parameters: %s. Valid parameters are: %s" % ( " " . join ( invalid_keys ) , " " . join ( self . defaults ) ) )
Given a dict of hyperparameter settings throw an exception if any keys are not defined in this HyperparameterDefaults instance .
60,857
def models_grid ( self , ** kwargs ) : self . check_valid_keys ( kwargs ) for ( key , value ) in kwargs . items ( ) : if not isinstance ( value , list ) : raise ValueError ( "All parameters must be lists, but %s is %s" % ( key , str ( type ( value ) ) ) ) parameters = dict ( ( key , [ value ] ) for ( key , value ) in self . defaults . items ( ) ) parameters . update ( kwargs ) parameter_names = list ( parameters ) parameter_values = [ parameters [ name ] for name in parameter_names ] models = [ dict ( zip ( parameter_names , model_values ) ) for model_values in itertools . product ( * parameter_values ) ] return models
Make a grid of models by taking the cartesian product of all specified model parameter lists .
60,858
def fixed_length_vector_encoded_sequences ( self , vector_encoding_name ) : cache_key = ( "fixed_length_vector_encoding" , vector_encoding_name ) if cache_key not in self . encoding_cache : index_encoded_matrix = amino_acid . index_encoding ( self . fixed_length_sequences . values , amino_acid . AMINO_ACID_INDEX ) vector_encoded = amino_acid . fixed_vectors_encoding ( index_encoded_matrix , amino_acid . ENCODING_DATA_FRAMES [ vector_encoding_name ] ) result = vector_encoded [ self . indices ] self . encoding_cache [ cache_key ] = result return self . encoding_cache [ cache_key ]
Encode alleles .
60,859
def index_encoding ( sequences , letter_to_index_dict ) : df = pandas . DataFrame ( iter ( s ) for s in sequences ) result = df . replace ( letter_to_index_dict ) return result . values
Encode a sequence of same - length strings to a matrix of integers of the same shape . The map from characters to integers is given by letter_to_index_dict .
60,860
def apply_hyperparameter_renames ( cls , hyperparameters ) : for ( from_name , to_name ) in cls . hyperparameter_renames . items ( ) : if from_name in hyperparameters : value = hyperparameters . pop ( from_name ) if to_name : hyperparameters [ to_name ] = value return hyperparameters
Handle hyperparameter renames .
60,861
def borrow_cached_network ( klass , network_json , network_weights ) : assert network_weights is not None key = klass . keras_network_cache_key ( network_json ) if key not in klass . KERAS_MODELS_CACHE : import keras . models network = keras . models . model_from_json ( network_json ) existing_weights = None else : ( network , existing_weights ) = klass . KERAS_MODELS_CACHE [ key ] if existing_weights is not network_weights : network . set_weights ( network_weights ) klass . KERAS_MODELS_CACHE [ key ] = ( network , network_weights ) def throw ( * args , ** kwargs ) : raise NotImplementedError ( "Do not call fit on cached model." ) network . fit = throw return network
Return a keras Model with the specified architecture and weights . As an optimization when possible this will reuse architectures from a process - wide cache .
60,862
def network ( self , borrow = False ) : if self . _network is None and self . network_json is not None : self . load_weights ( ) if borrow : return self . borrow_cached_network ( self . network_json , self . network_weights ) else : import keras . models self . _network = keras . models . model_from_json ( self . network_json ) if self . network_weights is not None : self . _network . set_weights ( self . network_weights ) self . network_json = None self . network_weights = None return self . _network
Return the keras model associated with this predictor .
60,863
def load_weights ( self ) : if self . network_weights_loader : self . network_weights = self . network_weights_loader ( ) self . network_weights_loader = None
Load weights by evaluating self . network_weights_loader if needed .
60,864
def predict ( self , peptides , allele_encoding = None , batch_size = 4096 ) : assert self . prediction_cache is not None use_cache = ( allele_encoding is None and isinstance ( peptides , EncodableSequences ) ) if use_cache and peptides in self . prediction_cache : return self . prediction_cache [ peptides ] . copy ( ) x_dict = { 'peptide' : self . peptides_to_network_input ( peptides ) } if allele_encoding is not None : allele_input = self . allele_encoding_to_network_input ( allele_encoding ) x_dict [ 'allele' ] = allele_input network = self . network ( borrow = True ) raw_predictions = network . predict ( x_dict , batch_size = batch_size ) predictions = numpy . array ( raw_predictions , dtype = "float64" ) [ : , 0 ] result = to_ic50 ( predictions ) if use_cache : self . prediction_cache [ peptides ] = result return result
Predict affinities .
60,865
def make_scores ( ic50_y , ic50_y_pred , sample_weight = None , threshold_nm = 500 , max_ic50 = 50000 ) : y_pred = from_ic50 ( ic50_y_pred , max_ic50 ) try : auc = sklearn . metrics . roc_auc_score ( ic50_y <= threshold_nm , y_pred , sample_weight = sample_weight ) except ValueError as e : logging . warning ( e ) auc = numpy . nan try : f1 = sklearn . metrics . f1_score ( ic50_y <= threshold_nm , ic50_y_pred <= threshold_nm , sample_weight = sample_weight ) except ValueError as e : logging . warning ( e ) f1 = numpy . nan try : tau = scipy . stats . kendalltau ( ic50_y_pred , ic50_y ) [ 0 ] except ValueError as e : logging . warning ( e ) tau = numpy . nan return dict ( auc = auc , f1 = f1 , tau = tau )
Calculate AUC F1 and Kendall Tau scores .
60,866
def variable_length_to_fixed_length_vector_encoding ( self , vector_encoding_name , left_edge = 4 , right_edge = 4 , max_length = 15 ) : cache_key = ( "fixed_length_vector_encoding" , vector_encoding_name , left_edge , right_edge , max_length ) if cache_key not in self . encoding_cache : fixed_length_sequences = ( self . sequences_to_fixed_length_index_encoded_array ( self . sequences , left_edge = left_edge , right_edge = right_edge , max_length = max_length ) ) result = amino_acid . fixed_vectors_encoding ( fixed_length_sequences , amino_acid . ENCODING_DATA_FRAMES [ vector_encoding_name ] ) assert result . shape [ 0 ] == len ( self . sequences ) self . encoding_cache [ cache_key ] = result return self . encoding_cache [ cache_key ]
Encode variable - length sequences using a fixed - length encoding designed for preserving the anchor positions of class I peptides .
60,867
def sequences_to_fixed_length_index_encoded_array ( klass , sequences , left_edge = 4 , right_edge = 4 , max_length = 15 ) : result = numpy . full ( fill_value = amino_acid . AMINO_ACID_INDEX [ 'X' ] , shape = ( len ( sequences ) , max_length ) , dtype = "int32" ) df = pandas . DataFrame ( { "peptide" : sequences } ) df [ "length" ] = df . peptide . str . len ( ) middle_length = max_length - left_edge - right_edge for ( length , sub_df ) in df . groupby ( "length" ) : if length < left_edge + right_edge : raise ValueError ( "Sequence '%s' (length %d) unsupported: length must be at " "least %d. There are %d total peptides with this length." % ( sub_df . iloc [ 0 ] . peptide , length , left_edge + right_edge , len ( sub_df ) ) ) if length > max_length : raise ValueError ( "Sequence '%s' (length %d) unsupported: length must be at " "most %d. There are %d total peptides with this length." % ( sub_df . iloc [ 0 ] . peptide , length , max_length , len ( sub_df ) ) ) fixed_length_sequences = numpy . stack ( sub_df . peptide . map ( lambda s : numpy . array ( [ amino_acid . AMINO_ACID_INDEX [ char ] for char in s ] ) ) . values ) num_null = max_length - length num_null_left = int ( math . ceil ( num_null / 2 ) ) num_middle_filled = middle_length - num_null middle_start = left_edge + num_null_left result [ sub_df . index , : left_edge ] = fixed_length_sequences [ : , : left_edge ] result [ sub_df . index , middle_start : middle_start + num_middle_filled ] = fixed_length_sequences [ : , left_edge : left_edge + num_middle_filled ] result [ sub_df . index , - right_edge : ] = fixed_length_sequences [ : , - right_edge : ] return result
Transform a sequence of strings where each string is of length at least left_edge + right_edge and at most max_length into strings of length max_length using a scheme designed to preserve the anchor positions of class I peptides .
60,868
def robust_mean ( log_values ) : if log_values . shape [ 1 ] <= 3 : return numpy . nanmean ( log_values , axis = 1 ) without_nans = numpy . nan_to_num ( log_values ) mask = ( ( ~ numpy . isnan ( log_values ) ) & ( without_nans <= numpy . nanpercentile ( log_values , 75 , axis = 1 ) . reshape ( ( - 1 , 1 ) ) ) & ( without_nans >= numpy . nanpercentile ( log_values , 25 , axis = 1 ) . reshape ( ( - 1 , 1 ) ) ) ) return ( without_nans * mask . astype ( float ) ) . sum ( 1 ) / mask . sum ( 1 )
Mean of values falling within the 25 - 75 percentiles .
60,869
def neural_networks ( self ) : result = [ ] for models in self . allele_to_allele_specific_models . values ( ) : result . extend ( models ) result . extend ( self . class1_pan_allele_models ) return result
List of the neural networks in the ensemble .
60,870
def merge ( cls , predictors ) : assert len ( predictors ) > 0 if len ( predictors ) == 1 : return predictors [ 0 ] allele_to_allele_specific_models = collections . defaultdict ( list ) class1_pan_allele_models = [ ] allele_to_fixed_length_sequence = predictors [ 0 ] . allele_to_fixed_length_sequence for predictor in predictors : for ( allele , networks ) in ( predictor . allele_to_allele_specific_models . items ( ) ) : allele_to_allele_specific_models [ allele ] . extend ( networks ) class1_pan_allele_models . extend ( predictor . class1_pan_allele_models ) return Class1AffinityPredictor ( allele_to_allele_specific_models = allele_to_allele_specific_models , class1_pan_allele_models = class1_pan_allele_models , allele_to_fixed_length_sequence = allele_to_fixed_length_sequence )
Merge the ensembles of two or more Class1AffinityPredictor instances .
60,871
def merge_in_place ( self , others ) : new_model_names = [ ] for predictor in others : for model in predictor . class1_pan_allele_models : model_name = self . model_name ( "pan-class1" , len ( self . class1_pan_allele_models ) ) self . class1_pan_allele_models . append ( model ) row = pandas . Series ( collections . OrderedDict ( [ ( "model_name" , model_name ) , ( "allele" , "pan-class1" ) , ( "config_json" , json . dumps ( model . get_config ( ) ) ) , ( "model" , model ) , ] ) ) . to_frame ( ) . T self . _manifest_df = pandas . concat ( [ self . manifest_df , row ] , ignore_index = True ) new_model_names . append ( model_name ) for allele in predictor . allele_to_allele_specific_models : if allele not in self . allele_to_allele_specific_models : self . allele_to_allele_specific_models [ allele ] = [ ] current_models = self . allele_to_allele_specific_models [ allele ] for model in predictor . allele_to_allele_specific_models [ allele ] : model_name = self . model_name ( allele , len ( current_models ) ) row = pandas . Series ( collections . OrderedDict ( [ ( "model_name" , model_name ) , ( "allele" , allele ) , ( "config_json" , json . dumps ( model . get_config ( ) ) ) , ( "model" , model ) , ] ) ) . to_frame ( ) . T self . _manifest_df = pandas . concat ( [ self . manifest_df , row ] , ignore_index = True ) current_models . append ( model ) new_model_names . append ( model_name ) self . clear_cache ( ) return new_model_names
Add the models present other predictors into the current predictor .
60,872
def percentile_ranks ( self , affinities , allele = None , alleles = None , throw = True ) : if allele is not None : try : transform = self . allele_to_percent_rank_transform [ allele ] return transform . transform ( affinities ) except KeyError : msg = "Allele %s has no percentile rank information" % allele if throw : raise ValueError ( msg ) else : warnings . warn ( msg ) return numpy . ones ( len ( affinities ) ) * numpy . nan if alleles is None : raise ValueError ( "Specify allele or alleles" ) df = pandas . DataFrame ( { "affinity" : affinities } ) df [ "allele" ] = alleles df [ "result" ] = numpy . nan for ( allele , sub_df ) in df . groupby ( "allele" ) : df . loc [ sub_df . index , "result" ] = self . percentile_ranks ( sub_df . affinity , allele = allele , throw = throw ) return df . result . values
Return percentile ranks for the given ic50 affinities and alleles .
60,873
def calibrate_percentile_ranks ( self , peptides = None , num_peptides_per_length = int ( 1e5 ) , alleles = None , bins = None ) : if bins is None : bins = to_ic50 ( numpy . linspace ( 1 , 0 , 1000 ) ) if alleles is None : alleles = self . supported_alleles if peptides is None : peptides = [ ] lengths = range ( self . supported_peptide_lengths [ 0 ] , self . supported_peptide_lengths [ 1 ] + 1 ) for length in lengths : peptides . extend ( random_peptides ( num_peptides_per_length , length ) ) encoded_peptides = EncodableSequences . create ( peptides ) for ( i , allele ) in enumerate ( alleles ) : predictions = self . predict ( encoded_peptides , allele = allele ) transform = PercentRankTransform ( ) transform . fit ( predictions , bins = bins ) self . allele_to_percent_rank_transform [ allele ] = transform return encoded_peptides
Compute the cumulative distribution of ic50 values for a set of alleles over a large universe of random peptides to enable computing quantiles in this distribution later .
60,874
def filter_networks ( self , predicate ) : allele_to_allele_specific_models = { } for ( allele , models ) in self . allele_to_allele_specific_models . items ( ) : allele_to_allele_specific_models [ allele ] = [ m for m in models if predicate ( m ) ] class1_pan_allele_models = [ m for m in self . class1_pan_allele_models if predicate ( m ) ] return Class1AffinityPredictor ( allele_to_allele_specific_models = allele_to_allele_specific_models , class1_pan_allele_models = class1_pan_allele_models , allele_to_fixed_length_sequence = self . allele_to_fixed_length_sequence , )
Return a new Class1AffinityPredictor containing a subset of this predictor s neural networks .
60,875
def model_select ( self , score_function , alleles = None , min_models = 1 , max_models = 10000 ) : if alleles is None : alleles = self . supported_alleles dfs = [ ] allele_to_allele_specific_models = { } for allele in alleles : df = pandas . DataFrame ( { 'model' : self . allele_to_allele_specific_models [ allele ] } ) df [ "model_num" ] = df . index df [ "allele" ] = allele df [ "selected" ] = False round_num = 1 while not df . selected . all ( ) and sum ( df . selected ) < max_models : score_col = "score_%2d" % round_num prev_score_col = "score_%2d" % ( round_num - 1 ) existing_selected = list ( df [ df . selected ] . model ) df [ score_col ] = [ numpy . nan if row . selected else score_function ( Class1AffinityPredictor ( allele_to_allele_specific_models = { allele : [ row . model ] + existing_selected } ) ) for ( _ , row ) in df . iterrows ( ) ] if round_num > min_models and ( df [ score_col ] . max ( ) < df [ prev_score_col ] . max ( ) ) : break ( best_model_index , ) = df . loc [ ( df [ score_col ] == df [ score_col ] . max ( ) ) ] . sample ( 1 ) . index df . loc [ best_model_index , "selected" ] = True round_num += 1 dfs . append ( df ) allele_to_allele_specific_models [ allele ] = list ( df . loc [ df . selected ] . model ) df = pandas . concat ( dfs , ignore_index = True ) new_predictor = Class1AffinityPredictor ( allele_to_allele_specific_models , metadata_dataframes = { "model_selection" : df , } ) return new_predictor
Perform model selection using a user - specified scoring function .
60,876
def to_series ( self ) : return pandas . Series ( self . cdf , index = [ numpy . nan ] + list ( self . bin_edges ) + [ numpy . nan ] )
Serialize the fit to a pandas . Series .
60,877
def get_default_class1_models_dir ( test_exists = True ) : if _MHCFLURRY_DEFAULT_CLASS1_MODELS_DIR : result = join ( get_downloads_dir ( ) , _MHCFLURRY_DEFAULT_CLASS1_MODELS_DIR ) if test_exists and not exists ( result ) : raise IOError ( "No such directory: %s" % result ) return result else : return get_path ( "models_class1" , "models" , test_exists = test_exists )
Return the absolute path to the default class1 models dir .
60,878
def get_current_release_downloads ( ) : downloads = ( get_downloads_metadata ( ) [ 'releases' ] [ get_current_release ( ) ] [ 'downloads' ] ) return OrderedDict ( ( download [ "name" ] , { 'downloaded' : exists ( join ( get_downloads_dir ( ) , download [ "name" ] ) ) , 'metadata' : download , } ) for download in downloads )
Return a dict of all available downloads in the current release .
60,879
def get_path ( download_name , filename = '' , test_exists = True ) : assert '/' not in download_name , "Invalid download: %s" % download_name path = join ( get_downloads_dir ( ) , download_name , filename ) if test_exists and not exists ( path ) : raise RuntimeError ( "Missing MHCflurry downloadable file: %s. " "To download this data, run:\n\tmhcflurry-downloads fetch %s\n" "in a shell." % ( quote ( path ) , download_name ) ) return path
Get the local path to a file in a MHCflurry download
60,880
def configure ( ) : global _DOWNLOADS_DIR global _CURRENT_RELEASE _CURRENT_RELEASE = None _DOWNLOADS_DIR = environ . get ( "MHCFLURRY_DOWNLOADS_DIR" ) if not _DOWNLOADS_DIR : metadata = get_downloads_metadata ( ) _CURRENT_RELEASE = environ . get ( "MHCFLURRY_DOWNLOADS_CURRENT_RELEASE" ) if not _CURRENT_RELEASE : _CURRENT_RELEASE = metadata [ 'current-release' ] current_release_compatability = ( metadata [ "releases" ] [ _CURRENT_RELEASE ] [ "compatibility-version" ] ) current_compatability = metadata [ "current-compatibility-version" ] if current_release_compatability != current_compatability : logging . warn ( "The specified downloads are not compatible with this version " "of the MHCflurry codebase. Downloads: release %s, " "compatability version: %d. Code compatability version: %d" % ( _CURRENT_RELEASE , current_release_compatability , current_compatability ) ) data_dir = environ . get ( "MHCFLURRY_DATA_DIR" ) if not data_dir : data_dir = user_data_dir ( "mhcflurry" , version = "4" ) _DOWNLOADS_DIR = join ( data_dir , _CURRENT_RELEASE ) logging . debug ( "Configured MHCFLURRY_DOWNLOADS_DIR: %s" % _DOWNLOADS_DIR )
Setup various global variables based on environment variables .
60,881
def make_worker_pool ( processes = None , initializer = None , initializer_kwargs_per_process = None , max_tasks_per_worker = None ) : if not processes : processes = cpu_count ( ) pool_kwargs = { 'processes' : processes , } if max_tasks_per_worker : pool_kwargs [ "maxtasksperchild" ] = max_tasks_per_worker if initializer : if initializer_kwargs_per_process : assert len ( initializer_kwargs_per_process ) == processes kwargs_queue = Queue ( ) kwargs_queue_backup = Queue ( ) for kwargs in initializer_kwargs_per_process : kwargs_queue . put ( kwargs ) kwargs_queue_backup . put ( kwargs ) pool_kwargs [ "initializer" ] = worker_init_entry_point pool_kwargs [ "initargs" ] = ( initializer , kwargs_queue , kwargs_queue_backup ) else : pool_kwargs [ "initializer" ] = initializer worker_pool = Pool ( ** pool_kwargs ) print ( "Started pool: %s" % str ( worker_pool ) ) pprint ( pool_kwargs ) return worker_pool
Convenience wrapper to create a multiprocessing . Pool .
60,882
def calibrate_percentile_ranks ( allele , predictor , peptides = None ) : global GLOBAL_DATA if peptides is None : peptides = GLOBAL_DATA [ "calibration_peptides" ] predictor . calibrate_percentile_ranks ( peptides = peptides , alleles = [ allele ] ) return { allele : predictor . allele_to_percent_rank_transform [ allele ] , }
Private helper function .
60,883
def set_keras_backend ( backend = None , gpu_device_nums = None , num_threads = None ) : os . environ [ "KERAS_BACKEND" ] = "tensorflow" original_backend = backend if not backend : backend = "tensorflow-default" if gpu_device_nums is not None : os . environ [ "CUDA_VISIBLE_DEVICES" ] = "," . join ( [ str ( i ) for i in gpu_device_nums ] ) if backend == "tensorflow-cpu" or gpu_device_nums == [ ] : print ( "Forcing tensorflow/CPU backend." ) os . environ [ "CUDA_VISIBLE_DEVICES" ] = "" device_count = { 'CPU' : 1 , 'GPU' : 0 } elif backend == "tensorflow-gpu" : print ( "Forcing tensorflow/GPU backend." ) device_count = { 'CPU' : 0 , 'GPU' : 1 } elif backend == "tensorflow-default" : print ( "Forcing tensorflow backend." ) device_count = None else : raise ValueError ( "Unsupported backend: %s" % backend ) import tensorflow from keras import backend as K if K . backend ( ) == 'tensorflow' : config = tensorflow . ConfigProto ( device_count = device_count ) config . gpu_options . allow_growth = True if num_threads : config . inter_op_parallelism_threads = num_threads config . intra_op_parallelism_threads = num_threads session = tensorflow . Session ( config = config ) K . set_session ( session ) else : if original_backend or gpu_device_nums or num_threads : warnings . warn ( "Only tensorflow backend can be customized. Ignoring " " customization. Backend: %s" % K . backend ( ) )
Configure Keras backend to use GPU or CPU . Only tensorflow is supported .
60,884
def uproot ( tree ) : uprooted = tree . copy ( ) uprooted . parent = None for child in tree . all_children ( ) : uprooted . add_general_child ( child ) return uprooted
Take a subranch of a tree and deep - copy the children of this subbranch into a new LabeledTree
60,885
def copy ( self ) : return LabeledTree ( udepth = self . udepth , depth = self . depth , text = self . text , label = self . label , children = self . children . copy ( ) if self . children != None else [ ] , parent = self . parent )
Deep Copy of a LabeledTree
60,886
def add_child ( self , child ) : self . children . append ( child ) child . parent = self self . udepth = max ( [ child . udepth for child in self . children ] ) + 1
Adds a branch to the current tree .
60,887
def lowercase ( self ) : if len ( self . children ) > 0 : for child in self . children : child . lowercase ( ) else : self . text = self . text . lower ( )
Lowercase all strings in this tree . Works recursively and in - place .
60,888
def inject_visualization_javascript ( tree_width = 1200 , tree_height = 400 , tree_node_radius = 10 ) : from . javascript import insert_sentiment_markup insert_sentiment_markup ( tree_width = tree_width , tree_height = tree_height , tree_node_radius = tree_node_radius )
In an Ipython notebook show SST trees using the same Javascript code as used by Jason Chuang s visualisations .
60,889
def create_tree_from_string ( line ) : depth = 0 current_word = "" root = None current_node = root for char in line : if char == '(' : if current_node is not None and len ( current_word ) > 0 : attribute_text_label ( current_node , current_word ) current_word = "" depth += 1 if depth > 1 : child = LabeledTree ( depth = depth ) current_node . add_child ( child ) current_node = child root . add_general_child ( child ) else : root = LabeledTree ( depth = depth ) root . add_general_child ( root ) current_node = root elif char == ')' : if len ( current_word ) > 0 : attribute_text_label ( current_node , current_word ) current_word = "" depth -= 1 if current_node . parent != None : current_node . parent . udepth = max ( current_node . udepth + 1 , current_node . parent . udepth ) current_node = current_node . parent else : current_word += char if depth != 0 : raise ParseError ( "Not an equal amount of closing and opening parentheses" ) return root
Parse and convert a string representation of an example into a LabeledTree datastructure .
60,890
def import_tree_corpus ( path ) : tree_list = LabeledTreeCorpus ( ) with codecs . open ( path , "r" , "UTF-8" ) as f : for line in f : tree_list . append ( create_tree_from_string ( line ) ) return tree_list
Import a text file of treebank trees .
60,891
def load_sst ( path = None , url = 'http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip' ) : if path is None : path = os . path . expanduser ( "~/stanford_sentiment_treebank/" ) makedirs ( path , exist_ok = True ) fnames = download_sst ( path , url ) return { key : import_tree_corpus ( value ) for key , value in fnames . items ( ) }
Download and read in the Stanford Sentiment Treebank dataset into a dictionary with a train dev and test keys . The dictionary keys point to lists of LabeledTrees .
60,892
def labels ( self ) : labelings = OrderedDict ( ) for tree in self : for label , line in tree . to_labeled_lines ( ) : labelings [ line ] = label return labelings
Construct a dictionary of string - > labels
60,893
def to_file ( self , path , mode = "w" ) : with open ( path , mode = mode ) as f : for tree in self : for label , line in tree . to_labeled_lines ( ) : f . write ( line + "\n" )
Save the corpus to a text file in the original format .
60,894
def import_tree_corpus ( labels_path , parents_path , texts_path ) : with codecs . open ( labels_path , "r" , "UTF-8" ) as f : label_lines = f . readlines ( ) with codecs . open ( parents_path , "r" , "UTF-8" ) as f : parent_lines = f . readlines ( ) with codecs . open ( texts_path , "r" , "UTF-8" ) as f : word_lines = f . readlines ( ) assert len ( label_lines ) == len ( parent_lines ) assert len ( label_lines ) == len ( word_lines ) trees = [ ] for labels , parents , words in zip ( label_lines , parent_lines , word_lines ) : labels = [ int ( l ) + 2 for l in labels . strip ( ) . split ( " " ) ] parents = [ int ( l ) for l in parents . strip ( ) . split ( " " ) ] words = words . strip ( ) . split ( " " ) assert len ( labels ) == len ( parents ) trees . append ( read_tree ( parents , labels , words ) ) return trees
Import dataset from the TreeLSTM data generation scrips .
60,895
def assign_texts ( node , words , next_idx = 0 ) : if len ( node . children ) == 0 : node . text = words [ next_idx ] return next_idx + 1 else : for child in node . children : next_idx = assign_texts ( child , words , next_idx ) return next_idx
Recursively assign the words to nodes by finding and assigning strings to the leaves of a tree in left to right order .
60,896
def read_tree ( parents , labels , words ) : trees = { } root = None for i in range ( 1 , len ( parents ) + 1 ) : if not i in trees and parents [ i - 1 ] != - 1 : idx = i prev = None while True : parent = parents [ idx - 1 ] if parent == - 1 : break tree = LabeledTree ( ) if prev is not None : tree . add_child ( prev ) trees [ idx ] = tree tree . label = labels [ idx - 1 ] if trees . get ( parent ) is not None : trees [ parent ] . add_child ( tree ) break elif parent == 0 : root = tree break else : prev = tree idx = parent assert assign_texts ( root , words ) == len ( words ) return root
Take as input a list of integers for parents and labels along with a list of words and reconstruct a LabeledTree .
60,897
def set_initial_status ( self , configuration = None ) : super ( CognitiveOpDynModel , self ) . set_initial_status ( configuration ) for node in self . status : self . status [ node ] = np . random . random_sample ( ) self . initial_status = self . status . copy ( ) self . params [ 'nodes' ] [ 'cognitive' ] = { } T_range = ( self . params [ 'model' ] [ 'T_range_min' ] , self . params [ 'model' ] [ 'T_range_max' ] ) if self . params [ 'model' ] [ 'T_range_min' ] > self . params [ 'model' ] [ 'T_range_max' ] : T_range = ( self . params [ 'model' ] [ 'T_range_max' ] , self . params [ 'model' ] [ 'T_range_min' ] ) B_range = ( self . params [ 'model' ] [ 'B_range_min' ] , self . params [ 'model' ] [ 'B_range_max' ] ) if self . params [ 'model' ] [ 'B_range_min' ] > self . params [ 'model' ] [ 'B_range_max' ] : B_range = ( self . params [ 'model' ] [ 'B_range_max' ] , self . params [ 'model' ] [ 'B_range_min' ] ) s = float ( self . params [ 'model' ] [ 'R_fraction_negative' ] + self . params [ 'model' ] [ 'R_fraction_neutral' ] + self . params [ 'model' ] [ 'R_fraction_positive' ] ) R_distribution = ( self . params [ 'model' ] [ 'R_fraction_negative' ] / s , self . params [ 'model' ] [ 'R_fraction_neutral' ] / s , self . params [ 'model' ] [ 'R_fraction_positive' ] / s ) for node in self . graph . nodes ( ) : R_prob = np . random . random_sample ( ) if R_prob < R_distribution [ 0 ] : R = - 1 elif R_prob < ( R_distribution [ 0 ] + R_distribution [ 1 ] ) : R = 0 else : R = 1 self . params [ 'nodes' ] [ 'cognitive' ] [ node ] = ( R , B_range [ 0 ] + ( B_range [ 1 ] - B_range [ 0 ] ) * np . random . random_sample ( ) , T_range [ 0 ] + ( T_range [ 1 ] - T_range [ 0 ] ) * np . random . random_sample ( ) )
Override behaviour of methods in class DiffusionModel . Overwrites initial status using random real values . Generates random node profiles .
60,898
def add_node_configuration ( self , param_name , node_id , param_value ) : if param_name not in self . config [ 'nodes' ] : self . config [ 'nodes' ] [ param_name ] = { node_id : param_value } else : self . config [ 'nodes' ] [ param_name ] [ node_id ] = param_value
Set a parameter for a given node
60,899
def add_node_set_configuration ( self , param_name , node_to_value ) : for nid , val in future . utils . iteritems ( node_to_value ) : self . add_node_configuration ( param_name , nid , val )
Set Nodes parameter