idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
61,100
def _validate_path ( path ) : if path is None : return None new_path = np . array ( sorted ( set ( path ) , reverse = True ) ) if new_path [ 0 ] != path [ 0 ] : print ( "Warning: Path must be sorted largest to smallest." ) return new_path
Sorts path values from largest to smallest .
61,101
def ebic ( self , gamma = 0 ) : if not self . is_fitted_ : return if not isinstance ( self . precision_ , list ) : return metrics . ebic ( self . sample_covariance_ , self . precision_ , self . n_samples_ , self . n_features_ , gamma = gamma , ) ebic_scores = [ ] for lidx , lam in enumerate ( self . path_ ) : ebic_scores . append ( metrics . ebic ( self . sample_covariance_ , self . precision_ [ lidx ] , self . n_samples_ , self . n_features_ , gamma = gamma , ) ) return np . array ( ebic_scores )
Compute EBIC scores for each model . If model is not path then returns a scalar score value .
61,102
def ebic_select ( self , gamma = 0 ) : if not isinstance ( self . precision_ , list ) : raise ValueError ( "EBIC requires multiple models to select from." ) return if not self . is_fitted_ : return ebic_scores = self . ebic ( gamma = gamma ) min_indices = np . where ( np . abs ( ebic_scores - ebic_scores . min ( ) ) < 1e-10 ) return np . max ( min_indices )
Uses Extended Bayesian Information Criteria for model selection .
61,103
def quic_graph_lasso ( X , num_folds , metric ) : print ( "QuicGraphicalLasso + GridSearchCV with:" ) print ( " metric: {}" . format ( metric ) ) search_grid = { "lam" : np . logspace ( np . log10 ( 0.01 ) , np . log10 ( 1.0 ) , num = 100 , endpoint = True ) , "init_method" : [ "cov" ] , "score_metric" : [ metric ] , } model = GridSearchCV ( QuicGraphicalLasso ( ) , search_grid , cv = num_folds , refit = True ) model . fit ( X ) bmodel = model . best_estimator_ print ( " len(cv_lams): {}" . format ( len ( search_grid [ "lam" ] ) ) ) print ( " cv-lam: {}" . format ( model . best_params_ [ "lam" ] ) ) print ( " lam_scale_: {}" . format ( bmodel . lam_scale_ ) ) print ( " lam_: {}" . format ( bmodel . lam_ ) ) return bmodel . covariance_ , bmodel . precision_ , bmodel . lam_
Run QuicGraphicalLasso with mode = default and use standard scikit GridSearchCV to find the best lambda .
61,104
def quic_graph_lasso_cv ( X , metric ) : print ( "QuicGraphicalLassoCV with:" ) print ( " metric: {}" . format ( metric ) ) model = QuicGraphicalLassoCV ( cv = 2 , n_refinements = 6 , n_jobs = 1 , init_method = "cov" , score_metric = metric , ) model . fit ( X ) print ( " len(cv_lams): {}" . format ( len ( model . cv_lams_ ) ) ) print ( " lam_scale_: {}" . format ( model . lam_scale_ ) ) print ( " lam_: {}" . format ( model . lam_ ) ) return model . covariance_ , model . precision_ , model . lam_
Run QuicGraphicalLassoCV on data with metric of choice .
61,105
def graph_lasso ( X , num_folds ) : print ( "GraphLasso (sklearn)" ) model = GraphLassoCV ( cv = num_folds ) model . fit ( X ) print ( " lam_: {}" . format ( model . alpha_ ) ) return model . covariance_ , model . precision_ , model . alpha_
Estimate inverse covariance via scikit - learn GraphLassoCV class .
61,106
def _quic_path ( X , path , X_test = None , lam = 0.5 , tol = 1e-6 , max_iter = 1000 , Theta0 = None , Sigma0 = None , method = "quic" , verbose = 0 , score_metric = "log_likelihood" , init_method = "corrcoef" , ) : S , lam_scale_ = _init_coefs ( X , method = init_method ) path = path . copy ( order = "C" ) if method == "quic" : ( precisions_ , covariances_ , opt_ , cputime_ , iters_ , duality_gap_ ) = quic ( S , lam , mode = "path" , tol = tol , max_iter = max_iter , Theta0 = Theta0 , Sigma0 = Sigma0 , path = path , msg = verbose , ) else : raise NotImplementedError ( "Only method='quic' has been implemented." ) if X_test is not None : S_test , lam_scale_test = _init_coefs ( X_test , method = init_method ) path_errors = [ ] for lidx , lam in enumerate ( path ) : path_errors . append ( _compute_error ( S_test , covariances_ [ lidx ] , precisions_ [ lidx ] , score_metric = score_metric , ) ) scores_ = [ - e for e in path_errors ] return covariances_ , precisions_ , scores_ return covariances_ , precisions_
Wrapper to compute path for example X .
61,107
def lam_at_index ( self , lidx ) : if self . path_ is None : return self . lam * self . lam_scale_ return self . lam * self . lam_scale_ * self . path_ [ lidx ]
Compute the scaled lambda used at index lidx .
61,108
def _compute_ranks ( X , winsorize = False , truncation = None , verbose = True ) : n_samples , n_features = X . shape Xrank = np . zeros ( shape = X . shape ) if winsorize : if truncation is None : truncation = 1 / ( 4 * np . power ( n_samples , 0.25 ) * np . sqrt ( np . pi * np . log ( n_samples ) ) ) elif truncation > 1 : truncation = np . min ( 1.0 , truncation ) for col in np . arange ( n_features ) : Xrank [ : , col ] = rankdata ( X [ : , col ] , method = "average" ) Xrank [ : , col ] /= n_samples if winsorize : if n_samples > 100 * n_features : Xrank [ : , col ] = n_samples * Xrank [ : , col ] / ( n_samples + 1 ) else : lower_truncate = Xrank [ : , col ] <= truncation upper_truncate = Xrank [ : , col ] > 1 - truncation Xrank [ lower_truncate , col ] = truncation Xrank [ upper_truncate , col ] = 1 - truncation return Xrank
Transform each column into ranked data . Tied ranks are averaged . Ranks can optionally be winsorized as described in Liu 2009 otherwise this returns Tsukahara s scaled rank based Z - estimator .
61,109
def spearman_correlation ( X , rowvar = False ) : Xrank = _compute_ranks ( X ) rank_correlation = np . corrcoef ( Xrank , rowvar = rowvar ) return 2 * np . sin ( rank_correlation * np . pi / 6 )
Computes the spearman correlation estimate . This is effectively a bias corrected pearson correlation between rank transformed columns of X .
61,110
def kendalltau_correlation ( X , rowvar = False , weighted = False ) : if rowvar : X = X . T _ , n_features = X . shape rank_correlation = np . eye ( n_features ) for row in np . arange ( n_features ) : for col in np . arange ( 1 + row , n_features ) : if weighted : rank_correlation [ row , col ] , _ = weightedtau ( X [ : , row ] , X [ : , col ] , rank = False ) else : rank_correlation [ row , col ] , _ = kendalltau ( X [ : , row ] , X [ : , col ] ) rank_correlation = np . triu ( rank_correlation , 1 ) + rank_correlation . T return np . sin ( rank_correlation * np . pi / 2 )
Computes kendall s tau correlation estimate . The option to use scipy . stats . weightedtau is not recommended as the implementation does not appear to handle ties correctly .
61,111
def version ( self ) : request_params = dict ( self . request_params ) request_url = str ( self . request_url ) result = self . do_http_request ( 'get' , request_url , data = request_params , custom_header = str ( self . etiquette ) ) . json ( ) return result [ 'message-version' ]
This attribute retrieve the API version .
61,112
def count ( self ) : request_params = dict ( self . request_params ) request_url = str ( self . request_url ) request_params [ 'rows' ] = 0 result = self . do_http_request ( 'get' , request_url , data = request_params , custom_header = str ( self . etiquette ) ) . json ( ) return int ( result [ 'message' ] [ 'total-results' ] )
This method retrieve the total of records resulting from a given query .
61,113
def url ( self ) : request_params = self . _escaped_pagging ( ) sorted_request_params = sorted ( [ ( k , v ) for k , v in request_params . items ( ) ] ) req = requests . Request ( 'get' , self . request_url , params = sorted_request_params ) . prepare ( ) return req . url
This attribute retrieve the url that will be used as a HTTP request to the Crossref API .
61,114
def doi ( self , doi , only_message = True ) : request_url = build_url_endpoint ( '/' . join ( [ self . ENDPOINT , doi ] ) ) request_params = { } result = self . do_http_request ( 'get' , request_url , data = request_params , custom_header = str ( self . etiquette ) ) if result . status_code == 404 : return result = result . json ( ) return result [ 'message' ] if only_message is True else result
This method retrieve the DOI metadata related to a given DOI number .
61,115
def doi_exists ( self , doi ) : request_url = build_url_endpoint ( '/' . join ( [ self . ENDPOINT , doi ] ) ) request_params = { } result = self . do_http_request ( 'get' , request_url , data = request_params , only_headers = True , custom_header = str ( self . etiquette ) ) if result . status_code == 404 : return False return True
This method retrieve a boolean according to the existence of a crossref DOI number . It returns False if the API results a 404 status code .
61,116
def works ( self , funder_id ) : context = '%s/%s' % ( self . ENDPOINT , str ( funder_id ) ) return Works ( context = context )
This method retrieve a iterable of Works of the given funder .
61,117
def works ( self , member_id ) : context = '%s/%s' % ( self . ENDPOINT , str ( member_id ) ) return Works ( context = context )
This method retrieve a iterable of Works of the given member .
61,118
def all ( self ) : request_url = build_url_endpoint ( self . ENDPOINT , self . context ) request_params = dict ( self . request_params ) result = self . do_http_request ( 'get' , request_url , data = request_params , custom_header = str ( self . etiquette ) ) if result . status_code == 404 : raise StopIteration ( ) result = result . json ( ) for item in result [ 'message' ] [ 'items' ] : yield item
This method retrieve an iterator with all the available types .
61,119
def works ( self , type_id ) : context = '%s/%s' % ( self . ENDPOINT , str ( type_id ) ) return Works ( context = context )
This method retrieve a iterable of Works of the given type .
61,120
def works ( self , prefix_id ) : context = '%s/%s' % ( self . ENDPOINT , str ( prefix_id ) ) return Works ( context = context )
This method retrieve a iterable of Works of the given prefix .
61,121
def works ( self , issn ) : context = '%s/%s' % ( self . ENDPOINT , str ( issn ) ) return Works ( context = context )
This method retrieve a iterable of Works of the given journal .
61,122
def register_doi ( self , submission_id , request_xml ) : endpoint = self . get_endpoint ( 'deposit' ) files = { 'mdFile' : ( '%s.xml' % submission_id , request_xml ) } params = { 'operation' : 'doMDUpload' , 'login_id' : self . api_user , 'login_passwd' : self . api_key } result = self . do_http_request ( 'post' , endpoint , data = params , files = files , timeout = 10 , custom_header = str ( self . etiquette ) ) return result
This method registry a new DOI number in Crossref or update some DOI metadata .
61,123
def _find_plugin_dir ( module_type ) : for install_dir in _get_plugin_install_dirs ( ) : candidate = os . path . join ( install_dir , module_type ) if os . path . isdir ( candidate ) : return candidate else : raise PluginCandidateError ( 'No plugin found for `{}` module in paths:\n{}' . format ( module_type , '\n' . join ( _get_plugin_install_dirs ( ) ) ) )
Find the directory containing the plugin definition for the given type . Do this by searching all the paths where plugins can live for a dir that matches the type name .
61,124
def merged_args_dicts ( global_args , subcommand_args ) : merged = global_args . copy ( ) for key , val in subcommand_args . items ( ) : if key not in merged : merged [ key ] = val elif type ( merged [ key ] ) is type ( val ) is bool : merged [ key ] = merged [ key ] or val else : raise RuntimeError ( "Unmergable args." ) return merged
We deal with docopt args from the toplevel peru parse and the subcommand parse . We don t want False values for a flag in the subcommand to override True values if that flag was given at the top level . This function specifically handles that case .
61,125
def force_utf8_in_ascii_mode_hack ( ) : if sys . stdout . encoding == 'ANSI_X3.4-1968' : sys . stdout = open ( sys . stdout . fileno ( ) , mode = 'w' , encoding = 'utf8' , buffering = 1 ) sys . stderr = open ( sys . stderr . fileno ( ) , mode = 'w' , encoding = 'utf8' , buffering = 1 )
In systems without a UTF8 locale configured Python will default to ASCII mode for stdout and stderr . This causes our fancy display to fail with encoding errors . In particular you run into this if you try to run peru inside of Docker . This is a hack to force emitting UTF8 in that case . Hopefully it doesn t break anything important .
61,126
async def parse_target ( self , runtime , target_str ) : pipeline_parts = target_str . split ( RULE_SEPARATOR ) module = await self . resolve_module ( runtime , pipeline_parts [ 0 ] , target_str ) rules = [ ] for part in pipeline_parts [ 1 : ] : rule = await self . resolve_rule ( runtime , part ) rules . append ( rule ) return module , tuple ( rules )
A target is a pipeline of a module into zero or more rules and each module and rule can itself be scoped with zero or more module names .
61,127
def _maybe_quote ( val ) : assert isinstance ( val , str ) , 'We should never set non-string values.' needs_quoting = False try : int ( val ) needs_quoting = True except Exception : pass try : float ( val ) needs_quoting = True except Exception : pass if needs_quoting : return '"{}"' . format ( val ) else : return val
All of our values should be strings . Usually those can be passed in as bare words but if they re parseable as an int or float we need to quote them .
61,128
async def gather_coalescing_exceptions ( coros , display , * , verbose ) : exceptions = [ ] reprs = [ ] async def catching_wrapper ( coro ) : try : return ( await coro ) except Exception as e : exceptions . append ( e ) if isinstance ( e , PrintableError ) and not verbose : reprs . append ( e . message ) else : reprs . append ( traceback . format_exc ( ) ) return None if hasattr ( asyncio , 'ensure_future' ) : schedule = getattr ( asyncio , 'ensure_future' ) else : schedule = getattr ( asyncio , 'async' ) futures = [ schedule ( catching_wrapper ( coro ) ) for coro in coros ] results = await asyncio . gather ( * futures ) if exceptions : raise GatheredExceptions ( exceptions , reprs ) else : return results
The tricky thing about running multiple coroutines in parallel is what we re supposed to do when one of them raises an exception . The approach we re using here is to catch exceptions and keep waiting for other tasks to finish . At the end we reraise a GatheredExceptions error if any exceptions were caught .
61,129
async def create_subprocess_with_handle ( command , display_handle , * , shell = False , cwd , ** kwargs ) : encoding = sys . stdout . encoding or 'utf8' decoder_factory = codecs . getincrementaldecoder ( encoding ) decoder = decoder_factory ( errors = 'replace' ) output_copy = io . StringIO ( ) with display_handle : stdin = asyncio . subprocess . DEVNULL stdout = asyncio . subprocess . PIPE stderr = asyncio . subprocess . STDOUT if shell : proc = await asyncio . create_subprocess_shell ( command , stdin = stdin , stdout = stdout , stderr = stderr , cwd = cwd , ** kwargs ) else : proc = await asyncio . create_subprocess_exec ( * command , stdin = stdin , stdout = stdout , stderr = stderr , cwd = cwd , ** kwargs ) while True : outputbytes = await proc . stdout . read ( 4096 ) if not outputbytes : break outputstr = decoder . decode ( outputbytes ) outputstr_unified = _unify_newlines ( outputstr ) display_handle . write ( outputstr_unified ) output_copy . write ( outputstr_unified ) returncode = await proc . wait ( ) if returncode != 0 : raise subprocess . CalledProcessError ( returncode , command , output_copy . getvalue ( ) ) if hasattr ( decoder , 'buffer' ) : assert not decoder . buffer , 'decoder nonempty: ' + repr ( decoder . buffer ) return output_copy . getvalue ( )
Writes subprocess output to a display handle as it comes in and also returns a copy of it as a string . Throws if the subprocess returns an error . Note that cwd is a required keyword - only argument on theory that peru should never start child processes wherever I happen to be running right now .
61,130
def raises_gathered ( error_type ) : container = RaisesGatheredContainer ( ) try : yield container except GatheredExceptions as e : if len ( e . exceptions ) != 1 : raise inner = e . exceptions [ 0 ] if not isinstance ( inner , error_type ) : raise container . exception = inner
For use in tests . Many tests expect a single error to be thrown and want it to be of a specific type . This is a helper method for when that type is inside a gathered exception .
61,131
def get_request_filename ( request ) : if 'Content-Disposition' in request . info ( ) : disposition = request . info ( ) [ 'Content-Disposition' ] pieces = re . split ( r'\s*;\s*' , disposition ) for piece in pieces : if piece . startswith ( 'filename=' ) : filename = piece [ len ( 'filename=' ) : ] if filename . startswith ( '"' ) : filename = filename [ 1 : ] if filename . endswith ( '"' ) : filename = filename [ : - 1 ] filename = filename . replace ( '\\"' , '"' ) return filename return os . path . basename ( urlsplit ( request . url ) . path ) or 'index.html'
Figure out the filename for an HTTP download .
61,132
def _extract_optional_list_field ( blob , name ) : value = _optional_list ( typesafe_pop ( blob , name , [ ] ) ) if value is None : raise ParserError ( '"{}" field must be a string or a list.' . format ( name ) ) return value
Handle optional fields that can be either a string or a list of strings .
61,133
def pop_all ( self ) : new_stack = type ( self ) ( ) new_stack . _exit_callbacks = self . _exit_callbacks self . _exit_callbacks = deque ( ) return new_stack
Preserve the context stack by transferring it to a new instance .
61,134
def callback ( self , callback , * args , ** kwds ) : _exit_wrapper = self . _create_cb_wrapper ( callback , * args , ** kwds ) _exit_wrapper . __wrapped__ = callback self . _push_exit_callback ( _exit_wrapper ) return callback
Registers an arbitrary callback and arguments . Cannot suppress exceptions .
61,135
def push_async_callback ( self , callback , * args , ** kwds ) : _exit_wrapper = self . _create_async_cb_wrapper ( callback , * args , ** kwds ) _exit_wrapper . __wrapped__ = callback self . _push_exit_callback ( _exit_wrapper , False ) return callback
Registers an arbitrary coroutine function and arguments . Cannot suppress exceptions .
61,136
async def Runtime ( args , env ) : 'This is the async constructor for the _Runtime class.' r = _Runtime ( args , env ) await r . _init_cache ( ) return r
This is the async constructor for the _Runtime class .
61,137
def find_project_file ( start_dir , basename ) : prefix = os . path . abspath ( start_dir ) while True : candidate = os . path . join ( prefix , basename ) if os . path . isfile ( candidate ) : return candidate if os . path . exists ( candidate ) : raise PrintableError ( "Found {}, but it's not a file." . format ( candidate ) ) if os . path . dirname ( prefix ) == prefix : raise PrintableError ( "Can't find " + basename ) prefix = os . path . dirname ( prefix )
Walk up the directory tree until we find a file of the given name .
61,138
def delete_if_error ( path ) : try : yield except Exception : if os . path . exists ( path ) : os . remove ( path ) raise
If any exception is raised inside the context delete the file at the given path and allow the exception to continue .
61,139
def _format_file_lines ( files ) : LINES_TO_SHOW = 10 if len ( files ) <= LINES_TO_SHOW : lines = '\n' . join ( files ) else : lines = ( '\n' . join ( files [ : LINES_TO_SHOW - 1 ] ) + '\n...{} total' . format ( len ( files ) ) ) return lines
Given a list of filenames that we re about to print limit it to a reasonable number of lines .
61,140
def git_env ( self ) : 'Set the index file and prevent git from reading global configs.' env = dict ( os . environ ) for var in [ "HOME" , "XDG_CONFIG_HOME" ] : env . pop ( var , None ) env [ "GIT_CONFIG_NOSYSTEM" ] = "true" env [ "GIT_INDEX_FILE" ] = os . path . abspath ( self . index_file ) return env
Set the index file and prevent git from reading global configs .
61,141
def load_states ( ) : from pkg_resources import resource_stream with resource_stream ( __name__ , 'states.pkl' ) as pklfile : for s in pickle . load ( pklfile ) : state = State ( ** s ) if state . is_obsolete : OBSOLETE . append ( state ) elif state . is_territory : TERRITORIES . append ( state ) else : STATES . append ( state ) if state . is_contiguous : STATES_CONTIGUOUS . append ( state ) if state . is_continental : STATES_CONTINENTAL . append ( state ) STATES_AND_TERRITORIES . append ( state ) globals ( ) [ state . abbr ] = state
Load state data from pickle file distributed with this package .
61,142
def lookup ( val , field = None , use_cache = True ) : import jellyfish if field is None : if FIPS_RE . match ( val ) : field = 'fips' elif ABBR_RE . match ( val ) : val = val . upper ( ) field = 'abbr' else : val = jellyfish . metaphone ( val ) field = 'name_metaphone' cache_key = "%s:%s" % ( field , val ) if use_cache and cache_key in _lookup_cache : return _lookup_cache [ cache_key ] for state in STATES_AND_TERRITORIES : if val == getattr ( state , field ) : _lookup_cache [ cache_key ] = state return state
Semi - fuzzy state lookup . This method will make a best effort attempt at finding the state based on the lookup value provided .
61,143
def query ( searchstr , outformat = FORMAT_BIBTEX , allresults = False ) : logger . debug ( "Query: {sstring}" . format ( sstring = searchstr ) ) searchstr = '/scholar?q=' + quote ( searchstr ) url = GOOGLE_SCHOLAR_URL + searchstr header = HEADERS header [ 'Cookie' ] = "GSP=CF=%d" % outformat request = Request ( url , headers = header ) response = urlopen ( request ) html = response . read ( ) html = html . decode ( 'utf8' ) tmp = get_links ( html , outformat ) result = list ( ) if not allresults : tmp = tmp [ : 1 ] for link in tmp : url = GOOGLE_SCHOLAR_URL + link request = Request ( url , headers = header ) response = urlopen ( request ) bib = response . read ( ) bib = bib . decode ( 'utf8' ) result . append ( bib ) return result
Query google scholar .
61,144
def get_links ( html , outformat ) : if outformat == FORMAT_BIBTEX : refre = re . compile ( r'<a href="https://scholar.googleusercontent.com(/scholar\.bib\?[^"]*)' ) elif outformat == FORMAT_ENDNOTE : refre = re . compile ( r'<a href="https://scholar.googleusercontent.com(/scholar\.enw\?[^"]*)"' ) elif outformat == FORMAT_REFMAN : refre = re . compile ( r'<a href="https://scholar.googleusercontent.com(/scholar\.ris\?[^"]*)"' ) elif outformat == FORMAT_WENXIANWANG : refre = re . compile ( r'<a href="https://scholar.googleusercontent.com(/scholar\.ral\?[^"]*)"' ) reflist = refre . findall ( html ) reflist = [ re . sub ( '&(%s);' % '|' . join ( name2codepoint ) , lambda m : chr ( name2codepoint [ m . group ( 1 ) ] ) , s ) for s in reflist ] return reflist
Return a list of reference links from the html .
61,145
def convert_pdf_to_txt ( pdf , startpage = None ) : if startpage is not None : startpageargs = [ '-f' , str ( startpage ) ] else : startpageargs = [ ] stdout = subprocess . Popen ( [ "pdftotext" , "-q" ] + startpageargs + [ pdf , "-" ] , stdout = subprocess . PIPE ) . communicate ( ) [ 0 ] if not isinstance ( stdout , str ) : stdout = stdout . decode ( ) return stdout
Convert a pdf file to text and return the text .
61,146
def pdflookup ( pdf , allresults , outformat , startpage = None ) : txt = convert_pdf_to_txt ( pdf , startpage ) txt = re . sub ( "\W" , " " , txt ) words = txt . strip ( ) . split ( ) [ : 20 ] gsquery = " " . join ( words ) bibtexlist = query ( gsquery , outformat , allresults ) return bibtexlist
Look a pdf up on google scholar and return bibtex items .
61,147
def _get_bib_element ( bibitem , element ) : lst = [ i . strip ( ) for i in bibitem . split ( "\n" ) ] for i in lst : if i . startswith ( element ) : value = i . split ( "=" , 1 ) [ - 1 ] value = value . strip ( ) while value . endswith ( ',' ) : value = value [ : - 1 ] while value . startswith ( '{' ) or value . startswith ( '"' ) : value = value [ 1 : - 1 ] return value return None
Return element from bibitem or None .
61,148
def rename_file ( pdf , bibitem ) : year = _get_bib_element ( bibitem , "year" ) author = _get_bib_element ( bibitem , "author" ) if author : author = author . split ( "," ) [ 0 ] title = _get_bib_element ( bibitem , "title" ) l = [ i for i in ( year , author , title ) if i ] filename = "-" . join ( l ) + ".pdf" newfile = pdf . replace ( os . path . basename ( pdf ) , filename ) logger . info ( 'Renaming {in_} to {out}' . format ( in_ = pdf , out = newfile ) ) os . rename ( pdf , newfile )
Attempt to rename pdf according to bibitem .
61,149
def soup_maker ( fh ) : try : from bs4 import BeautifulSoup soup = BeautifulSoup ( fh , "lxml" ) for tag in soup . find_all ( ) : tag . name = tag . name . lower ( ) except ImportError : from BeautifulSoup import BeautifulStoneSoup soup = BeautifulStoneSoup ( fh ) return soup
Takes a file handler returns BeautifulSoup
61,150
def parse ( self , file_handle ) : xbrl_obj = XBRL ( ) if not hasattr ( file_handle , 'read' ) : file_handler = open ( file_handle ) else : file_handler = file_handle xbrl_file = XBRLPreprocessedFile ( file_handler ) xbrl = soup_maker ( xbrl_file . fh ) file_handler . close ( ) xbrl_base = xbrl . find ( name = re . compile ( "xbrl*:*" ) ) if xbrl . find ( 'xbrl' ) is None and xbrl_base is None : raise XBRLParserException ( 'The xbrl file is empty!' ) lookahead = xbrl . find ( name = re . compile ( "context" , re . IGNORECASE | re . MULTILINE ) ) . name if ":" in lookahead : self . xbrl_base = lookahead . split ( ":" ) [ 0 ] + ":" else : self . xbrl_base = "" return xbrl
parse is the main entry point for an XBRLParser . It takes a file handle .
61,151
def parseDEI ( self , xbrl , ignore_errors = 0 ) : dei_obj = DEI ( ) if ignore_errors == 2 : logging . basicConfig ( filename = '/tmp/xbrl.log' , level = logging . ERROR , format = '%(asctime)s %(levelname)s %(name)s %(message)s' ) logger = logging . getLogger ( __name__ ) else : logger = None trading_symbol = xbrl . find_all ( name = re . compile ( "(dei:tradingsymbol)" , re . IGNORECASE | re . MULTILINE ) ) dei_obj . trading_symbol = self . data_processing ( trading_symbol , xbrl , ignore_errors , logger , options = { 'type' : 'String' , 'no_context' : True } ) company_name = xbrl . find_all ( name = re . compile ( "(dei:entityregistrantname)" , re . IGNORECASE | re . MULTILINE ) ) dei_obj . company_name = self . data_processing ( company_name , xbrl , ignore_errors , logger , options = { 'type' : 'String' , 'no_context' : True } ) shares_outstanding = xbrl . find_all ( name = re . compile ( "(dei:entitycommonstocksharesoutstanding)" , re . IGNORECASE | re . MULTILINE ) ) dei_obj . shares_outstanding = self . data_processing ( shares_outstanding , xbrl , ignore_errors , logger , options = { 'type' : 'Number' , 'no_context' : True } ) public_float = xbrl . find_all ( name = re . compile ( "(dei:entitypublicfloat)" , re . IGNORECASE | re . MULTILINE ) ) dei_obj . public_float = self . data_processing ( public_float , xbrl , ignore_errors , logger , options = { 'type' : 'Number' , 'no_context' : True } ) return dei_obj
Parse DEI from our XBRL soup and return a DEI object .
61,152
def parseCustom ( self , xbrl , ignore_errors = 0 ) : custom_obj = Custom ( ) custom_data = xbrl . find_all ( re . compile ( '^((?!(us-gaap|dei|xbrll|xbrldi)).)*:\s*' , re . IGNORECASE | re . MULTILINE ) ) elements = { } for data in custom_data : if XBRLParser ( ) . is_number ( data . text ) : setattr ( custom_obj , data . name . split ( ':' ) [ 1 ] , data . text ) return custom_obj
Parse company custom entities from XBRL and return an Custom object .
61,153
def trim_decimals ( s , precision = - 3 ) : encoded = s . encode ( 'ascii' , 'ignore' ) str_val = "" if six . PY3 : str_val = str ( encoded , encoding = 'ascii' , errors = 'ignore' ) [ : precision ] else : if precision == 0 : str_val = str ( encoded ) else : str_val = str ( encoded ) [ : precision ] if len ( str_val ) > 0 : return float ( str_val ) else : return 0
Convert from scientific notation using precision
61,154
def data_processing ( self , elements , xbrl , ignore_errors , logger , context_ids = [ ] , ** kwargs ) : options = kwargs . get ( 'options' , { 'type' : 'Number' , 'no_context' : False } ) if options [ 'type' ] == 'String' : if len ( elements ) > 0 : return elements [ 0 ] . text if options [ 'no_context' ] == True : if len ( elements ) > 0 and XBRLParser ( ) . is_number ( elements [ 0 ] . text ) : return elements [ 0 ] . text try : correct_elements = [ ] for element in elements : std = element . attrs [ 'contextref' ] if std in context_ids : correct_elements . append ( element ) elements = correct_elements if len ( elements ) > 0 and XBRLParser ( ) . is_number ( elements [ 0 ] . text ) : decimals = elements [ 0 ] . attrs [ 'decimals' ] if decimals is not None : attr_precision = decimals if xbrl . precision != 0 and xbrl . precison != attr_precision : xbrl . precision = attr_precision if elements : return XBRLParser ( ) . trim_decimals ( elements [ 0 ] . text , int ( xbrl . precision ) ) else : return 0 else : return 0 except Exception as e : if ignore_errors == 0 : raise XBRLParserException ( 'value extraction error' ) elif ignore_errors == 1 : return 0 elif ignore_errors == 2 : logger . error ( str ( e ) + " error at " + '' . join ( elements [ 0 ] . text ) )
Process a XBRL tag object and extract the correct value as stated by the context .
61,155
def by_name ( self ) : return { key . split ( preferences_settings . SECTION_KEY_SEPARATOR ) [ - 1 ] : value for key , value in self . all ( ) . items ( ) }
Return a dictionary with preferences identifiers and values but without the section name in the identifier
61,156
def get_cache_key ( self , section , name ) : if not self . instance : return 'dynamic_preferences_{0}_{1}_{2}' . format ( self . model . __name__ , section , name ) return 'dynamic_preferences_{0}_{1}_{2}_{3}' . format ( self . model . __name__ , self . instance . pk , section , name , self . instance . pk )
Return the cache key corresponding to a given preference
61,157
def from_cache ( self , section , name ) : cached_value = self . cache . get ( self . get_cache_key ( section , name ) , CachedValueNotFound ) if cached_value is CachedValueNotFound : raise CachedValueNotFound if cached_value == preferences_settings . CACHE_NONE_VALUE : cached_value = None return self . registry . get ( section = section , name = name ) . serializer . deserialize ( cached_value )
Return a preference raw_value from cache
61,158
def many_from_cache ( self , preferences ) : keys = { p : self . get_cache_key ( p . section . name , p . name ) for p in preferences } cached = self . cache . get_many ( list ( keys . values ( ) ) ) for k , v in cached . items ( ) : if v == preferences_settings . CACHE_NONE_VALUE : cached [ k ] = None return { p . identifier ( ) : p . serializer . deserialize ( cached [ k ] ) for p , k in keys . items ( ) if k in cached }
Return cached value for given preferences missing preferences will be skipped
61,159
def all ( self ) : if not preferences_settings . ENABLE_CACHE : return self . load_from_db ( ) preferences = self . registry . preferences ( ) a = self . many_from_cache ( preferences ) if len ( a ) == len ( preferences ) : return a a . update ( self . load_from_db ( cache = True ) ) return a
Return a dictionary containing all preferences by section Loaded from cache or from db in case of cold cache
61,160
def load_from_db ( self , cache = False ) : a = { } db_prefs = { p . preference . identifier ( ) : p for p in self . queryset } for preference in self . registry . preferences ( ) : try : db_pref = db_prefs [ preference . identifier ( ) ] except KeyError : db_pref = self . create_db_pref ( section = preference . section . name , name = preference . name , value = preference . get ( 'default' ) ) else : if cache : self . to_cache ( db_pref ) a [ preference . identifier ( ) ] = db_pref . value return a
Return a dictionary of preferences by section directly from DB
61,161
def validate_value ( self , value ) : field = self . instance . preference . setup_field ( ) value = field . to_python ( value ) field . validate ( value ) field . run_validators ( value ) return value
We call validation from the underlying form field
61,162
def to_python ( cls , value , ** kwargs ) : if not value : return '' try : return str ( value ) except : pass try : return value . encode ( 'utf-8' ) except : pass raise cls . exception ( "Cannot deserialize value {0} tostring" . format ( value ) )
String deserialisation just return the value as a string
61,163
def get_by_instance ( self , instance ) : for model , registry in self . items ( ) : try : instance_class = model . _meta . get_field ( 'instance' ) . remote_field . model if isinstance ( instance , instance_class ) : return registry except FieldDoesNotExist : pass return None
Return a preference registry using a model instance
61,164
def register ( self , preference_class ) : preference = preference_class ( registry = self ) self . section_objects [ preference . section . name ] = preference . section try : self [ preference . section . name ] [ preference . name ] = preference except KeyError : self [ preference . section . name ] = collections . OrderedDict ( ) self [ preference . section . name ] [ preference . name ] = preference return preference_class
Store the given preference class in the registry .
61,165
def get ( self , name , section = None , fallback = False ) : try : _section , name = name . split ( preferences_settings . SECTION_KEY_SEPARATOR ) return self [ _section ] [ name ] except ValueError : pass try : return self [ section ] [ name ] except KeyError : if fallback : return self . _fallback ( section_name = section , pref_name = name ) raise NotFoundInRegistry ( "No such preference in {0} with section={1} and name={2}" . format ( self . __class__ . __name__ , section , name ) )
Returns a previously registered preference
61,166
def manager ( self , ** kwargs ) : return PreferencesManager ( registry = self , model = self . preference_model , ** kwargs )
Return a preference manager that can be used to retrieve preference values
61,167
def preferences ( self , section = None ) : if section is None : return [ self [ section ] [ name ] for section in self for name in self [ section ] ] else : return [ self [ section ] [ name ] for name in self [ section ] ]
Return a list of all registered preferences or a list of preferences registered for a given section
61,168
def get_queryset ( self ) : self . init_preferences ( ) queryset = super ( PreferenceViewSet , self ) . get_queryset ( ) section = self . request . query_params . get ( 'section' ) if section : queryset = queryset . filter ( section = section ) return queryset
We just ensure preferences are actually populated before fetching from db
61,169
def bulk ( self , request , * args , ** kwargs ) : manager = self . get_manager ( ) errors = { } preferences = [ ] payload = request . data try : for identifier , value in payload . items ( ) : try : preferences . append ( self . queryset . model . registry . get ( identifier ) ) except exceptions . NotFoundInRegistry : errors [ identifier ] = 'invalid preference' except ( TypeError , AttributeError ) : return Response ( 'invalid payload' , status = 400 ) if errors : return Response ( errors , status = 400 ) queries = [ Q ( section = p . section . name , name = p . name ) for p in preferences ] query = queries [ 0 ] for q in queries [ 1 : ] : query |= q preferences_qs = self . get_queryset ( ) . filter ( query ) serializer_objects = [ ] for p in preferences_qs : s = self . get_serializer_class ( ) ( p , data = { 'value' : payload [ p . preference . identifier ( ) ] } ) serializer_objects . append ( s ) validation_errors = { } for s in serializer_objects : if s . is_valid ( ) : continue validation_errors [ s . instance . preference . identifier ( ) ] = s . errors if validation_errors : return Response ( validation_errors , status = 400 ) for s in serializer_objects : s . save ( ) return Response ( [ s . data for s in serializer_objects ] , status = 200 , )
Update multiple preferences at once
61,170
def set_value ( self , value ) : self . raw_value = self . preference . serializer . serialize ( value )
Save serialized self . value to self . raw_value
61,171
def delete_preferences ( queryset ) : deleted = [ ] for p in queryset : try : pref = p . registry . get ( section = p . section , name = p . name , fallback = False ) except NotFoundInRegistry : p . delete ( ) deleted . append ( p ) return deleted
Delete preferences objects if they are not present in registry . Return a list of deleted objects
61,172
def create_deletion_handler ( preference ) : def delete_related_preferences ( sender , instance , * args , ** kwargs ) : queryset = preference . registry . preference_model . objects . filter ( name = preference . name , section = preference . section ) related_preferences = queryset . filter ( raw_value = preference . serializer . serialize ( instance ) ) related_preferences . delete ( ) return delete_related_preferences
Will generate a dynamic handler to purge related preference on instance deletion
61,173
def get_field_kwargs ( self ) : kwargs = self . field_kwargs . copy ( ) kwargs . setdefault ( 'label' , self . get ( 'verbose_name' ) ) kwargs . setdefault ( 'help_text' , self . get ( 'help_text' ) ) kwargs . setdefault ( 'widget' , self . get ( 'widget' ) ) kwargs . setdefault ( 'required' , self . get ( 'required' ) ) kwargs . setdefault ( 'initial' , self . initial ) kwargs . setdefault ( 'validators' , [ ] ) kwargs [ 'validators' ] . append ( self . validate ) return kwargs
Return a dict of arguments to use as parameters for the field class instianciation .
61,174
def get_api_field_data ( self ) : field = self . setup_field ( ) d = { 'class' : field . __class__ . __name__ , 'widget' : { 'class' : field . widget . __class__ . __name__ } } try : d [ 'input_type' ] = field . widget . input_type except AttributeError : d [ 'input_type' ] = None return d
Field data to serialize for use on front - end side for example will include choices available for a choice field
61,175
def commiter_factory ( config : dict ) -> BaseCommitizen : name : str = config [ "name" ] try : _cz = registry [ name ] ( config ) except KeyError : msg_error = ( "The commiter has not been found in the system.\n\n" f"Try running 'pip install {name}'\n" ) out . error ( msg_error ) raise SystemExit ( NO_COMMITIZEN_FOUND ) else : return _cz
Return the correct commitizen existing in the registry .
61,176
def generate_version ( current_version : str , increment : str , prerelease : Optional [ str ] = None ) -> Version : pre_version = prerelease_generator ( current_version , prerelease = prerelease ) semver = semver_generator ( current_version , increment = increment ) return Version ( f"{semver}{pre_version}" )
Based on the given increment a proper semver will be generated .
61,177
def update_version_in_files ( current_version : str , new_version : str , files : list ) : for filepath in files : with open ( filepath , "r" ) as file : filedata = file . read ( ) filedata = filedata . replace ( current_version , new_version ) with open ( filepath , "w" ) as file : file . write ( filedata )
Change old version to the new one in every file given .
61,178
def create_tag ( version : Union [ Version , str ] , tag_format : Optional [ str ] = None ) : if isinstance ( version , str ) : version = Version ( version ) if not tag_format : return version . public major , minor , patch = version . release prerelease = "" if version . is_prerelease : prerelease = f"{version.pre[0]}{version.pre[1]}" t = Template ( tag_format ) return t . safe_substitute ( version = version , major = major , minor = minor , patch = patch , prerelease = prerelease )
The tag and the software version might be different .
61,179
def read_pyproject_conf ( data : str ) -> dict : doc = parse ( data ) try : return doc [ "tool" ] [ "commitizen" ] except exceptions . NonExistentKey : return { }
We expect to have a section in pyproject looking like
61,180
def read_raw_parser_conf ( data : str ) -> dict : config = configparser . ConfigParser ( allow_no_value = True ) config . read_string ( data ) try : _data : dict = dict ( config [ "commitizen" ] ) if "files" in _data : files = _data [ "files" ] _f = json . loads ( files ) _data . update ( { "files" : _f } ) return _data except KeyError : return { }
We expect to have a section like this
61,181
def set_key ( key : str , value : str ) -> dict : if not _conf . path : return { } if "toml" in _conf . path : with open ( _conf . path , "r" ) as f : parser = parse ( f . read ( ) ) parser [ "tool" ] [ "commitizen" ] [ key ] = value with open ( _conf . path , "w" ) as f : f . write ( parser . as_string ( ) ) else : parser = configparser . ConfigParser ( ) parser . read ( _conf . path ) parser [ "commitizen" ] [ key ] = value with open ( _conf . path , "w" ) as f : parser . write ( f ) return _conf . config
Set or update a key in the conf .
61,182
def close ( self ) : if self . fp : self . fp . close ( ) self . fp = None if self . fp_extra : self . fp_extra . close ( ) self . fp_extra = None self . ctype = None
Close a file pointer .
61,183
def get_compression_type ( self , file_name ) : ext = os . path . splitext ( file_name ) [ 1 ] if ext == '.gz' : self . ctype = 'gzip' elif ext == '.bz2' : self . ctype = 'bzip2' elif ext in ( '.xz' , '.lzma' ) : self . ctype = 'lzma' else : self . ctype = None
Determine compression type for a given file using its extension .
61,184
def do ( to_install ) : for solver in to_install : print ( 'preparing {0}' . format ( solver ) ) download_archive ( sources [ solver ] ) extract_archive ( sources [ solver ] [ - 1 ] , solver ) adapt_files ( solver ) patch_solver ( solver ) compile_solver ( solver )
Prepare all solvers specified in the command line .
61,185
def adapt_files ( solver ) : print ( "adapting {0}'s files" . format ( solver ) ) root = os . path . join ( 'solvers' , solver ) for arch in to_extract [ solver ] : arch = os . path . join ( root , arch ) extract_archive ( arch , solver , put_inside = True ) for fnames in to_move [ solver ] : old = os . path . join ( root , fnames [ 0 ] ) new = os . path . join ( root , fnames [ 1 ] ) os . rename ( old , new ) for f in to_remove [ solver ] : f = os . path . join ( root , f ) if os . path . isdir ( f ) : shutil . rmtree ( f ) else : os . remove ( f )
Rename and remove files whenever necessary .
61,186
def _map_extlit ( self , l ) : v = abs ( l ) if v in self . vmap . e2i : return int ( copysign ( self . vmap . e2i [ v ] , l ) ) else : self . topv += 1 self . vmap . e2i [ v ] = self . topv self . vmap . i2e [ self . topv ] = v return int ( copysign ( self . topv , l ) )
Map an external variable to an internal one if necessary .
61,187
def init ( self , bootstrap_with ) : formula = WCNF ( ) for to_hit in bootstrap_with : to_hit = list ( map ( lambda obj : self . idpool . id ( obj ) , to_hit ) ) formula . append ( to_hit ) for obj_id in six . iterkeys ( self . idpool . id2obj ) : formula . append ( [ - obj_id ] , weight = 1 ) if self . htype == 'rc2' : self . oracle = RC2 ( formula , solver = self . solver , adapt = False , exhaust = True , trim = 5 ) elif self . htype == 'lbx' : self . oracle = LBX ( formula , solver_name = self . solver , use_cld = True ) else : self . oracle = MCSls ( formula , solver_name = self . solver , use_cld = True )
This method serves for initializing the hitting set solver with a given list of sets to hit . Concretely the hitting set problem is encoded into partial MaxSAT as outlined above which is then fed either to a MaxSAT solver or an MCS enumerator .
61,188
def get ( self ) : model = self . oracle . compute ( ) if model : if self . htype == 'rc2' : self . hset = filter ( lambda v : v > 0 , model ) else : self . hset = model return list ( map ( lambda vid : self . idpool . id2obj [ vid ] , self . hset ) )
This method computes and returns a hitting set . The hitting set is obtained using the underlying oracle operating the MaxSAT problem formulation . The computed solution is mapped back to objects of the problem domain .
61,189
def hit ( self , to_hit ) : to_hit = list ( map ( lambda obj : self . idpool . id ( obj ) , to_hit ) ) new_obj = list ( filter ( lambda vid : vid not in self . oracle . vmap . e2i , to_hit ) ) self . oracle . add_clause ( to_hit ) for vid in new_obj : self . oracle . add_clause ( [ - vid ] , 1 )
This method adds a new set to hit to the hitting set solver . This is done by translating the input iterable of objects into a list of Boolean variables in the MaxSAT problem formulation .
61,190
def block ( self , to_block ) : to_block = list ( map ( lambda obj : self . idpool . id ( obj ) , to_block ) ) new_obj = list ( filter ( lambda vid : vid not in self . oracle . vmap . e2i , to_block ) ) self . oracle . add_clause ( [ - vid for vid in to_block ] ) for vid in new_obj : self . oracle . add_clause ( [ - vid ] , 1 )
The method serves for imposing a constraint forbidding the hitting set solver to compute a given hitting set . Each set to block is encoded as a hard clause in the MaxSAT problem formulation which is then added to the underlying oracle .
61,191
def _compute ( self , approx ) : i = 0 while i < len ( approx ) : to_test = approx [ : i ] + approx [ ( i + 1 ) : ] sel , clid = approx [ i ] , self . vmap [ approx [ i ] ] if self . verbose > 1 : print ( 'c testing clid: {0}' . format ( clid ) , end = '' ) if self . oracle . solve ( assumptions = to_test ) : if self . verbose > 1 : print ( ' -> sat (keeping {0})' . format ( clid ) ) i += 1 else : if self . verbose > 1 : print ( ' -> unsat (removing {0})' . format ( clid ) ) approx = to_test
Deletion - based MUS extraction . Given an over - approximation of an MUS i . e . an unsatisfiable core previously returned by a SAT oracle the method represents a loop which at each iteration removes a clause from the core and checks whether the remaining clauses of the approximation are unsatisfiable together with the hard clauses .
61,192
def run ( self ) : prepare . do ( to_install ) distutils . command . build . build . run ( self )
Download patch and compile SAT solvers before building .
61,193
def add_clause ( self , clause , no_return = True ) : if self . solver : res = self . solver . add_clause ( clause , no_return ) if not no_return : return res
This method is used to add a single clause to the solver . An optional argument no_return controls whether or not to check the formula s satisfiability after adding the new clause .
61,194
def append_formula ( self , formula , no_return = True ) : if self . solver : res = self . solver . append_formula ( formula , no_return ) if not no_return : return res
This method can be used to add a given list of clauses into the solver .
61,195
def enum_models ( self , assumptions = [ ] ) : if self . glucose : done = False while not done : if self . use_timer : start_time = time . clock ( ) self . status = pysolvers . glucose41_solve ( self . glucose , assumptions ) if self . use_timer : self . call_time = time . clock ( ) - start_time self . accu_time += self . call_time model = self . get_model ( ) if model : self . add_clause ( [ - l for l in model ] ) yield model else : done = True
Iterate over models of the internal formula .
61,196
def propagate ( self , assumptions = [ ] , phase_saving = 0 ) : if self . maplesat : if self . use_timer : start_time = time . clock ( ) def_sigint_handler = signal . signal ( signal . SIGINT , signal . SIG_DFL ) st , props = pysolvers . maplechrono_propagate ( self . maplesat , assumptions , phase_saving ) def_sigint_handler = signal . signal ( signal . SIGINT , def_sigint_handler ) if self . use_timer : self . call_time = time . clock ( ) - start_time self . accu_time += self . call_time return bool ( st ) , props if props != None else [ ]
Propagate a given set of assumption literals .
61,197
def get_proof ( self ) : if self . maplesat and self . prfile : self . prfile . seek ( 0 ) return [ line . rstrip ( ) for line in self . prfile . readlines ( ) ]
Get a proof produced while deciding the formula .
61,198
def add_atmost ( self , lits , k , no_return = True ) : if self . minicard : res = pysolvers . minicard_add_am ( self . minicard , lits , k ) if res == False : self . status = False if not no_return : return res
Add a new atmost constraint to solver s internal formula .
61,199
def id ( self , obj ) : vid = self . obj2id [ obj ] if vid not in self . id2obj : self . id2obj [ vid ] = obj return vid
The method is to be used to assign an integer variable ID for a given new object . If the object already has an ID no new ID is created and the old one is returned instead .