idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
61,300
def build_ontologies ( self , exclude_BNodes = False , return_string = False ) : out = [ ] qres = self . sparqlHelper . getOntology ( ) if qres : for candidate in qres : if isBlankNode ( candidate [ 0 ] ) : if exclude_BNodes : continue else : checkDC_ID = [ x for x in self . rdflib_graph . objects ( candidate [ 0 ] , rdflib . namespace . DC . identifier ) ] if checkDC_ID : out += [ Ontology ( checkDC_ID [ 0 ] , namespaces = self . namespaces ) , ] else : vannprop = rdflib . URIRef ( "http://purl.org/vocab/vann/preferredNamespaceUri" ) vannpref = rdflib . URIRef ( "http://purl.org/vocab/vann/preferredNamespacePrefix" ) checkDC_ID = [ x for x in self . rdflib_graph . objects ( candidate [ 0 ] , vannprop ) ] if checkDC_ID : checkDC_prefix = [ x for x in self . rdflib_graph . objects ( candidate [ 0 ] , vannpref ) ] if checkDC_prefix : out += [ Ontology ( checkDC_ID [ 0 ] , namespaces = self . namespaces , prefPrefix = checkDC_prefix [ 0 ] ) ] else : out += [ Ontology ( checkDC_ID [ 0 ] , namespaces = self . namespaces ) ] else : out += [ Ontology ( candidate [ 0 ] , namespaces = self . namespaces ) ] else : pass self . all_ontologies = out for onto in self . all_ontologies : onto . triples = self . sparqlHelper . entityTriples ( onto . uri ) onto . _buildGraph ( )
Extract ontology instances info from the graph then creates python objects for them .
61,301
def build_entity_from_uri ( self , uri , ontospyClass = None ) : if not ontospyClass : ontospyClass = RDF_Entity elif not issubclass ( ontospyClass , RDF_Entity ) : click . secho ( "Error: <%s> is not a subclass of ontospy.RDF_Entity" % str ( ontospyClass ) ) return None else : pass qres = self . sparqlHelper . entityTriples ( uri ) if qres : entity = ontospyClass ( rdflib . URIRef ( uri ) , None , self . namespaces ) entity . triples = qres entity . _buildGraph ( ) test = entity . getValuesForProperty ( rdflib . RDF . type ) if test : entity . rdftype = test entity . rdftype_qname = [ entity . _build_qname ( x ) for x in test ] return entity else : return None
Extract RDF statements having a URI as subject then instantiate the RDF_Entity Python object so that it can be queried further .
61,302
def printClassTree ( self , element = None , showids = False , labels = False , showtype = False ) : TYPE_MARGIN = 11 if not element : for x in self . toplayer_classes : printGenericTree ( x , 0 , showids , labels , showtype , TYPE_MARGIN ) else : printGenericTree ( element , 0 , showids , labels , showtype , TYPE_MARGIN )
Print nicely into stdout the class tree of an ontology
61,303
def printPropertyTree ( self , element = None , showids = False , labels = False , showtype = False ) : TYPE_MARGIN = 18 if not element : for x in self . toplayer_properties : printGenericTree ( x , 0 , showids , labels , showtype , TYPE_MARGIN ) else : printGenericTree ( element , 0 , showids , labels , showtype , TYPE_MARGIN )
Print nicely into stdout the property tree of an ontology
61,304
def add ( self , text = "" , default_continuousAdd = True ) : if not text and default_continuousAdd : self . continuousAdd ( ) else : pprefix = "" for x , y in self . rdflib_graph . namespaces ( ) : pprefix += "@prefix %s: <%s> . \n" % ( x , y ) if text and ( not text . strip ( ) . endswith ( "." ) ) : text += " ." text = text . replace ( " sub " , " rdfs:subClassOf " ) text = text . replace ( " class " , " owl:Class " ) self . rdflib_graph . parse ( data = pprefix + text , format = "turtle" )
add some turtle text
61,305
def rdf_source ( self , aformat = "turtle" ) : if aformat and aformat not in self . SUPPORTED_FORMATS : return "Sorry. Allowed formats are %s" % str ( self . SUPPORTED_FORMATS ) if aformat == "dot" : return self . __serializedDot ( ) else : return self . rdflib_graph . serialize ( format = aformat )
Serialize graph using the format required
61,306
def omnigraffle ( self ) : temp = self . rdf_source ( "dot" ) try : from os . path import expanduser home = expanduser ( "~" ) filename = home + "/tmp/turtle_sketch.dot" f = open ( filename , "w" ) except : filename = "turtle_sketch.dot" f = open ( filename , "w" ) f . write ( temp ) f . close ( ) try : os . system ( "open " + filename ) except : os . system ( "start " + filename )
tries to open an export directly in omnigraffle
61,307
def main ( ) : print ( "Ontospy " + VERSION ) Shell ( ) . _clear_screen ( ) print ( Style . BRIGHT + "** Ontospy Interactive Ontology Browser " + VERSION + " **" + Style . RESET_ALL ) Shell ( ) . cmdloop ( ) raise SystemExit ( 1 )
standalone line script
61,308
def _print ( self , ms , style = "TIP" ) : styles1 = { 'IMPORTANT' : Style . BRIGHT , 'TIP' : Style . DIM , 'URI' : Style . BRIGHT , 'TEXT' : Fore . GREEN , 'MAGENTA' : Fore . MAGENTA , 'BLUE' : Fore . BLUE , 'GREEN' : Fore . GREEN , 'RED' : Fore . RED , 'DEFAULT' : Style . DIM , } try : print ( styles1 [ style ] + ms + Style . RESET_ALL ) except : print ( styles1 [ 'DEFAULT' ] + ms + Style . RESET_ALL )
abstraction for managing color printing
61,309
def _printM ( self , messages ) : if len ( messages ) == 2 : print ( Style . BRIGHT + messages [ 0 ] + Style . RESET_ALL + Fore . BLUE + messages [ 1 ] + Style . RESET_ALL ) else : print ( "Not implemented" )
print a list of strings - for the mom used only by stats printout
61,310
def _printDescription ( self , hrlinetop = True ) : if hrlinetop : self . _print ( "----------------" ) NOTFOUND = "[not found]" if self . currentEntity : obj = self . currentEntity [ 'object' ] label = obj . bestLabel ( ) or NOTFOUND description = obj . bestDescription ( ) or NOTFOUND print ( Style . BRIGHT + "OBJECT TYPE: " + Style . RESET_ALL + Fore . BLACK + uri2niceString ( obj . rdftype ) + Style . RESET_ALL ) print ( Style . BRIGHT + "URI : " + Style . RESET_ALL + Fore . GREEN + "<" + unicode ( obj . uri ) + ">" + Style . RESET_ALL ) print ( Style . BRIGHT + "TITLE : " + Style . RESET_ALL + Fore . BLACK + label + Style . RESET_ALL ) print ( Style . BRIGHT + "DESCRIPTION: " + Style . RESET_ALL + Fore . BLACK + description + Style . RESET_ALL ) else : self . _clear_screen ( ) self . _print ( "Graph: <" + self . current [ 'fullpath' ] + ">" , 'TIP' ) self . _print ( "----------------" , "TIP" ) self . _printStats ( self . current [ 'graph' ] ) for obj in self . current [ 'graph' ] . all_ontologies : print ( Style . BRIGHT + "Ontology URI: " + Style . RESET_ALL + Fore . RED + "<%s>" % str ( obj . uri ) + Style . RESET_ALL ) label = obj . bestLabel ( ) or NOTFOUND description = obj . bestDescription ( ) or NOTFOUND print ( Style . BRIGHT + "Title : " + Style . RESET_ALL + Fore . BLACK + label + Style . RESET_ALL ) print ( Style . BRIGHT + "Description : " + Style . RESET_ALL + Fore . BLACK + description + Style . RESET_ALL ) self . _print ( "----------------" , "TIP" )
generic method to print out a description
61,311
def _next_ontology ( self ) : currentfile = self . current [ 'file' ] try : idx = self . all_ontologies . index ( currentfile ) return self . all_ontologies [ idx + 1 ] except : return self . all_ontologies [ 0 ]
Dynamically retrieves the next ontology in the list
61,312
def _load_ontology ( self , filename , preview_mode = False ) : if not preview_mode : fullpath = self . LOCAL_MODELS + filename g = manager . get_pickled_ontology ( filename ) if not g : g = manager . do_pickle_ontology ( filename ) else : fullpath = filename filename = os . path . basename ( os . path . normpath ( fullpath ) ) g = Ontospy ( fullpath , verbose = True ) self . current = { 'file' : filename , 'fullpath' : fullpath , 'graph' : g } self . currentEntity = None self . _print_entity_intro ( g )
Loads an ontology
61,313
def _select_property ( self , line ) : g = self . current [ 'graph' ] if not line : out = g . all_properties using_pattern = False else : using_pattern = True if line . isdigit ( ) : line = int ( line ) out = g . get_property ( line ) if out : if type ( out ) == type ( [ ] ) : choice = self . _selectFromList ( out , using_pattern , "property" ) if choice : self . currentEntity = { 'name' : choice . locale or choice . uri , 'object' : choice , 'type' : 'property' } else : self . currentEntity = { 'name' : out . locale or out . uri , 'object' : out , 'type' : 'property' } if self . currentEntity : self . _print_entity_intro ( entity = self . currentEntity ) else : print ( "not found" )
try to match a property and load it
61,314
def _select_concept ( self , line ) : g = self . current [ 'graph' ] if not line : out = g . all_skos_concepts using_pattern = False else : using_pattern = True if line . isdigit ( ) : line = int ( line ) out = g . get_skos ( line ) if out : if type ( out ) == type ( [ ] ) : choice = self . _selectFromList ( out , using_pattern , "concept" ) if choice : self . currentEntity = { 'name' : choice . locale or choice . uri , 'object' : choice , 'type' : 'concept' } else : self . currentEntity = { 'name' : out . locale or out . uri , 'object' : out , 'type' : 'concept' } if self . currentEntity : self . _print_entity_intro ( entity = self . currentEntity ) else : print ( "not found" )
try to match a class and load it
61,315
def do_visualize ( self , line ) : if not self . current : self . _help_noontology ( ) return line = line . split ( ) try : from . . ontodocs . builder import action_visualize except : self . _print ( "This command requires the ontodocs package: `pip install ontodocs`" ) return import webbrowser url = action_visualize ( args = self . current [ 'file' ] , fromshell = True ) if url : webbrowser . open ( url ) return
Visualize an ontology - ie wrapper for export command
61,316
def do_import ( self , line ) : line = line . split ( ) if line and line [ 0 ] == "starter-pack" : actions . action_bootstrap ( ) elif line and line [ 0 ] == "uri" : self . _print ( "------------------\nEnter a valid graph URI: (e.g. http://www.w3.org/2009/08/skos-reference/skos.rdf)" ) var = input ( ) if var : if var . startswith ( "http" ) : try : actions . action_import ( var ) except : self . _print ( "OPS... An Unknown Error Occurred - Aborting installation of <%s>" % var ) else : self . _print ( "Not valid. TIP: URIs should start with 'http://'" ) elif line and line [ 0 ] == "file" : self . _print ( "------------------\nEnter a full file path: (e.g. '/Users/mike/Desktop/journals.ttl')" ) var = input ( ) if var : try : actions . action_import ( var ) except : self . _print ( "OPS... An Unknown Error Occurred - Aborting installation of <%s>" % var ) elif line and line [ 0 ] == "repo" : actions . action_webimport ( ) else : self . help_import ( ) self . all_ontologies = manager . get_localontologies ( ) return
Import an ontology
61,317
def do_file ( self , line ) : opts = self . FILE_OPTS if not self . all_ontologies : self . _help_nofiles ( ) return line = line . split ( ) if not line or line [ 0 ] not in opts : self . help_file ( ) return if line [ 0 ] == "rename" : self . _rename_file ( ) elif line [ 0 ] == "delete" : self . _delete_file ( ) else : return
PErform some file operation
61,318
def do_serialize ( self , line ) : opts = self . SERIALIZE_OPTS if not self . current : self . _help_noontology ( ) return line = line . split ( ) g = self . current [ 'graph' ] if not line : line = [ 'turtle' ] if line [ 0 ] not in opts : self . help_serialize ( ) return elif self . currentEntity : self . currentEntity [ 'object' ] . printSerialize ( line [ 0 ] ) else : self . _print ( g . rdf_source ( format = line [ 0 ] ) )
Serialize an entity into an RDF flavour
61,319
def do_back ( self , line ) : "Go back one step. From entity => ontology; from ontology => ontospy top level." if self . currentEntity : self . currentEntity = None self . prompt = _get_prompt ( self . current [ 'file' ] ) else : self . current = None self . prompt = _get_prompt ( )
Go back one step . From entity = > ontology ; from ontology = > ontospy top level .
61,320
def do_zen ( self , line ) : _quote = random . choice ( QUOTES ) print ( Style . DIM + unicode ( _quote [ 'text' ] ) ) print ( Style . BRIGHT + unicode ( _quote [ 'source' ] ) + Style . RESET_ALL )
Inspiring quotes for the working ontologist
61,321
def complete_get ( self , text , line , begidx , endidx ) : options = self . GET_OPTS if not text : completions = options else : completions = [ f for f in options if f . startswith ( text ) ] return completions
completion for find command
61,322
def complete_info ( self , text , line , begidx , endidx ) : opts = self . INFO_OPTS if not text : completions = opts else : completions = [ f for f in opts if f . startswith ( text ) ] return completions
completion for info command
61,323
def build_D3treeStandard ( old , MAX_DEPTH , level = 1 , toplayer = None ) : out = [ ] if not old : old = toplayer for x in old : d = { } d [ 'qname' ] = x . qname d [ 'name' ] = x . bestLabel ( quotes = False ) . replace ( "_" , " " ) d [ 'objid' ] = x . id if x . children ( ) and level < MAX_DEPTH : d [ 'size' ] = len ( x . children ( ) ) + 5 d [ 'realsize' ] = len ( x . children ( ) ) d [ 'children' ] = build_D3treeStandard ( x . children ( ) , MAX_DEPTH , level + 1 ) else : d [ 'size' ] = 1 d [ 'realsize' ] = 0 out += [ d ] return out
For d3s examples all we need is a json with name children and size .. eg
61,324
def build_D3bubbleChart ( old , MAX_DEPTH , level = 1 , toplayer = None ) : out = [ ] if not old : old = toplayer for x in old : d = { } d [ 'qname' ] = x . qname d [ 'name' ] = x . bestLabel ( quotes = False ) . replace ( "_" , " " ) d [ 'objid' ] = x . id if x . children ( ) and level < MAX_DEPTH : duplicate_row = { } duplicate_row [ 'qname' ] = x . qname duplicate_row [ 'name' ] = x . bestLabel ( quotes = False ) . replace ( "_" , " " ) duplicate_row [ 'objid' ] = x . id duplicate_row [ 'size' ] = len ( x . children ( ) ) + 5 duplicate_row [ 'realsize' ] = len ( x . children ( ) ) out += [ duplicate_row ] d [ 'children' ] = build_D3bubbleChart ( x . children ( ) , MAX_DEPTH , level + 1 ) else : d [ 'size' ] = 1 d [ 'realsize' ] = 0 out += [ d ] return out
Similar to standar d3 but nodes with children need to be duplicated otherwise they are not depicted explicitly but just color coded
61,325
def infer_best_title ( self ) : if self . ontospy_graph . all_ontologies : return self . ontospy_graph . all_ontologies [ 0 ] . uri elif self . ontospy_graph . sources : return self . ontospy_graph . sources [ 0 ] else : return "Untitled"
Selects something usable as a title for an ontospy graph
61,326
def build ( self , output_path = "" ) : self . output_path = self . checkOutputPath ( output_path ) self . _buildStaticFiles ( ) self . final_url = self . _buildTemplates ( ) printDebug ( "Done." , "comment" ) printDebug ( "=> %s" % ( self . final_url ) , "comment" ) return self . final_url
method that should be inherited by all vis classes
61,327
def _buildTemplates ( self ) : contents = self . _renderTemplate ( self . template_name , extraContext = None ) f = self . main_file_name main_url = self . _save2File ( contents , f , self . output_path ) return main_url
do all the things necessary to build the viz should be adapted to work for single - file viz or multi - files etc .
61,328
def _build_basic_context ( self ) : topclasses = self . ontospy_graph . toplayer_classes [ : ] if len ( topclasses ) < 3 : for topclass in self . ontospy_graph . toplayer_classes : for child in topclass . children ( ) : if child not in topclasses : topclasses . append ( child ) if not self . static_url : self . static_url = "static/" context_data = { "STATIC_URL" : self . static_url , "ontodocs_version" : VERSION , "ontospy_graph" : self . ontospy_graph , "topclasses" : topclasses , "docs_title" : self . title , "namespaces" : self . ontospy_graph . namespaces , "stats" : self . ontospy_graph . stats ( ) , "sources" : self . ontospy_graph . sources , "ontologies" : self . ontospy_graph . all_ontologies , "classes" : self . ontospy_graph . all_classes , "properties" : self . ontospy_graph . all_properties , "objproperties" : self . ontospy_graph . all_properties_object , "dataproperties" : self . ontospy_graph . all_properties_datatype , "annotationproperties" : self . ontospy_graph . all_properties_annotation , "skosConcepts" : self . ontospy_graph . all_skos_concepts , "instances" : [ ] } return context_data
Return a standard dict used in django as a template context
61,329
def checkOutputPath ( self , output_path ) : if not output_path : output_path = os . path . join ( self . output_path_DEFAULT , slugify ( unicode ( self . title ) ) ) if os . path . exists ( output_path ) : shutil . rmtree ( output_path ) os . makedirs ( output_path ) return output_path
Create or clean up output path
61,330
def highlight_code ( self , ontospy_entity ) : try : pygments_code = highlight ( ontospy_entity . rdf_source ( ) , TurtleLexer ( ) , HtmlFormatter ( ) ) pygments_code_css = HtmlFormatter ( ) . get_style_defs ( '.highlight' ) return { "pygments_code" : pygments_code , "pygments_code_css" : pygments_code_css } except Exception as e : printDebug ( "Error: Pygmentize Failed" , "red" ) return { }
produce an html version of Turtle code with syntax highlighted using Pygments CSS
61,331
def query ( self , q , format = "" , convert = True ) : lines = [ "PREFIX %s: <%s>" % ( k , r ) for k , r in self . prefixes . iteritems ( ) ] lines . extend ( q . split ( "\n" ) ) query = "\n" . join ( lines ) if self . verbose : print ( query , "\n\n" ) return self . __doQuery ( query , format , convert )
Generic SELECT query structure . q is the main body of the query .
61,332
def describe ( self , uri , format = "" , convert = True ) : lines = [ "PREFIX %s: <%s>" % ( k , r ) for k , r in self . prefixes . iteritems ( ) ] if uri . startswith ( "http://" ) : lines . extend ( [ "DESCRIBE <%s>" % uri ] ) else : lines . extend ( [ "DESCRIBE %s" % uri ] ) query = "\n" . join ( lines ) if self . verbose : print ( query , "\n\n" ) return self . __doQuery ( query , format , convert )
A simple DESCRIBE query with no where arguments . uri is the resource you want to describe .
61,333
def __doQuery ( self , query , format , convert ) : self . __getFormat ( format ) self . sparql . setQuery ( query ) if convert : results = self . sparql . query ( ) . convert ( ) else : results = self . sparql . query ( ) return results
Inner method that does the actual query
61,334
def get_default_preds ( ) : g = ontospy . Ontospy ( rdfsschema , text = True , verbose = False , hide_base_schemas = False ) classes = [ ( x . qname , x . bestDescription ( ) ) for x in g . all_classes ] properties = [ ( x . qname , x . bestDescription ( ) ) for x in g . all_properties ] commands = [ ( 'exit' , 'exits the terminal' ) , ( 'show' , 'show current buffer' ) ] return rdfschema + owlschema + classes + properties + commands
dynamically build autocomplete options based on an external file
61,335
def matcher ( graph1 , graph2 , confidence = 0.5 , output_file = "matching_results.csv" , class_or_prop = "classes" , verbose = False ) : printDebug ( "----------\nNow matching..." ) f = open ( output_file , 'wt' ) counter = 0 try : writer = csv . writer ( f , quoting = csv . QUOTE_NONNUMERIC ) writer . writerow ( ( 'name 1' , 'name 2' , 'uri 1' , 'uri 2' ) ) if class_or_prop == "classes" : for x in graph1 . all_classes : l1 = unicode ( x . bestLabel ( qname_allowed = True ) ) for y in graph2 . all_classes : l2 = unicode ( y . bestLabel ( qname_allowed = True ) ) if similar ( l1 , l2 ) > confidence : counter += 1 row = [ l1 , l2 , x . uri , y . uri ] writer . writerow ( [ s . encode ( 'utf8' ) if type ( s ) is unicode else s for s in row ] ) if verbose : print ( "%s ==~== %s" % ( l1 , l2 ) ) elif class_or_prop == "properties" : for x in graph1 . all_properties : l1 = unicode ( x . bestLabel ( qname_allowed = True ) ) for y in graph2 . all_properties : l2 = unicode ( y . bestLabel ( qname_allowed = True ) ) if similar ( l1 , l2 ) > confidence : counter += 1 row = [ l1 , l2 , x . uri , y . uri ] writer . writerow ( [ s . encode ( 'utf8' ) if type ( s ) is unicode else s for s in row ] ) if verbose : print ( "%s ==~== %s" % ( l1 , l2 ) ) finally : f . close ( ) printDebug ( "%d candidates found." % counter )
takes two graphs and matches its classes based on qname label etc ..
61,336
def safe_str ( u , errors = "replace" ) : s = u . encode ( sys . stdout . encoding or "utf-8" , errors ) return s
Safely print the given string .
61,337
def OLD_printDebug ( s , style = None ) : if style == "comment" : s = Style . DIM + s + Style . RESET_ALL elif style == "important" : s = Style . BRIGHT + s + Style . RESET_ALL elif style == "normal" : s = Style . RESET_ALL + s + Style . RESET_ALL elif style == "red" : s = Fore . RED + s + Style . RESET_ALL elif style == "green" : s = Fore . GREEN + s + Style . RESET_ALL try : print ( s , file = sys . stderr ) except : pass
util for printing in colors to sys . stderr stream
61,338
def pprint2columns ( llist , max_length = 60 ) : if len ( llist ) == 0 : return None col_width = max ( len ( word ) for word in llist ) + 2 if not len ( llist ) % 2 == 0 : llist += [ ' ' ] if col_width > max_length : for el in llist : print ( el ) else : column1 = llist [ : int ( len ( llist ) / 2 ) ] column2 = llist [ int ( len ( llist ) / 2 ) : ] for c1 , c2 in zip ( column1 , column2 ) : space = " " * ( col_width - len ( c1 ) ) print ( "%s%s%s" % ( c1 , space , c2 ) )
llist = a list of strings max_length = if a word is longer than that for single col display
61,339
def playSound ( folder , name = "" ) : try : if not name : onlyfiles = [ f for f in os . listdir ( folder ) if os . path . isfile ( os . path . join ( folder , f ) ) ] name = random . choice ( onlyfiles ) subprocess . call ( [ "afplay" , folder + name ] ) except : pass
as easy as that
61,340
def truncate ( data , l = 20 ) : "truncate a string" info = ( data [ : l ] + '..' ) if len ( data ) > l else data return info
truncate a string
61,341
def printGenericTree ( element , level = 0 , showids = True , labels = False , showtype = True , TYPE_MARGIN = 18 ) : ID_MARGIN = 5 SHORT_TYPES = { "rdf:Property" : "rdf:Property" , "owl:AnnotationProperty" : "owl:Annot.Pr." , "owl:DatatypeProperty" : "owl:DatatypePr." , "owl:ObjectProperty" : "owl:ObjectPr." , } if showids : _id_ = Fore . BLUE + "[%d]%s" % ( element . id , " " * ( ID_MARGIN - len ( str ( element . id ) ) ) ) + Fore . RESET elif showtype : _prop = uri2niceString ( element . rdftype ) try : prop = SHORT_TYPES [ _prop ] except : prop = _prop _id_ = Fore . BLUE + "[%s]%s" % ( prop , " " * ( TYPE_MARGIN - len ( prop ) ) ) + Fore . RESET else : _id_ = "" if labels : bestLabel = element . bestLabel ( qname_allowed = False ) if bestLabel : bestLabel = Fore . MAGENTA + " (\"%s\")" % bestLabel + Fore . RESET else : bestLabel = "" printDebug ( "%s%s%s%s" % ( _id_ , "-" * 4 * level , element . qname , bestLabel ) ) for sub in element . children ( ) : printGenericTree ( sub , ( level + 1 ) , showids , labels , showtype , TYPE_MARGIN )
Print nicely into stdout the taxonomical tree of an ontology .
61,342
def firstStringInList ( literalEntities , prefLanguage = "en" ) : match = "" if len ( literalEntities ) == 1 : match = literalEntities [ 0 ] elif len ( literalEntities ) > 1 : for x in literalEntities : if getattr ( x , 'language' ) and getattr ( x , 'language' ) == prefLanguage : match = x if not match : match = literalEntities [ 0 ] return match
from a list of literals returns the one in prefLanguage if no language specification is available return first element
61,343
def joinStringsInList ( literalEntities , prefLanguage = "en" ) : match = [ ] if len ( literalEntities ) == 1 : return literalEntities [ 0 ] elif len ( literalEntities ) > 1 : for x in literalEntities : if getattr ( x , 'language' ) and getattr ( x , 'language' ) == prefLanguage : match . append ( x ) if not match : for x in literalEntities : match . append ( x ) return " - " . join ( [ x for x in match ] )
from a list of literals returns the ones in prefLanguage joined up . if the desired language specification is not available join all up
61,344
def sortByNamespacePrefix ( urisList , nsList ) : exit = [ ] urisList = sort_uri_list_by_name ( urisList ) for ns in nsList : innerexit = [ ] for uri in urisList : if str ( uri ) . startswith ( str ( ns ) ) : innerexit += [ uri ] exit += innerexit for uri in urisList : if uri not in exit : exit += [ uri ] return exit
Given an ordered list of namespaces prefixes order a list of uris based on that . Eg
61,345
def sort_uri_list_by_name ( uri_list , bypassNamespace = False ) : def get_last_bit ( uri_string ) : try : x = uri_string . split ( "#" ) [ 1 ] except : x = uri_string . split ( "/" ) [ - 1 ] return x try : if bypassNamespace : return sorted ( uri_list , key = lambda x : get_last_bit ( x . __str__ ( ) ) ) else : return sorted ( uri_list ) except : print ( "Error in <sort_uri_list_by_name>: possibly a UnicodeEncodeError" ) return uri_list
Sorts a list of uris
61,346
def inferNamespacePrefix ( aUri ) : stringa = aUri . __str__ ( ) try : prefix = stringa . replace ( "#" , "" ) . split ( "/" ) [ - 1 ] except : prefix = "" return prefix
From a URI returns the last bit and simulates a namespace prefix when rendering the ontology .
61,347
def niceString2uri ( aUriString , namespaces = None ) : if not namespaces : namespaces = [ ] for aNamespaceTuple in namespaces : if aNamespaceTuple [ 0 ] and aUriString . find ( aNamespaceTuple [ 0 ] . __str__ ( ) + ":" ) == 0 : aUriString_name = aUriString . split ( ":" ) [ 1 ] return rdflib . term . URIRef ( aNamespaceTuple [ 1 ] + aUriString_name ) return rdflib . term . URIRef ( aUriString )
From a string representing a URI possibly with the namespace qname returns a URI instance .
61,348
def shellPrintOverview ( g , opts = { 'labels' : False } ) : ontologies = g . all_ontologies try : labels = opts [ 'labels' ] except : labels = False print ( Style . BRIGHT + "Namespaces\n-----------" + Style . RESET_ALL ) if g . namespaces : for p , u in g . namespaces : row = Fore . GREEN + "%s" % p + Fore . BLACK + " %s" % u + Fore . RESET print ( row ) else : printDebug ( "None found" , "comment" ) print ( Style . BRIGHT + "\nOntologies\n-----------" + Style . RESET_ALL ) if ontologies : for o in ontologies : o . printTriples ( ) else : printDebug ( "None found" , "comment" ) print ( Style . BRIGHT + "\nClasses\n" + "-" * 10 + Style . RESET_ALL ) if g . all_classes : g . printClassTree ( showids = False , labels = labels ) else : printDebug ( "None found" , "comment" ) print ( Style . BRIGHT + "\nProperties\n" + "-" * 10 + Style . RESET_ALL ) if g . all_properties : g . printPropertyTree ( showids = False , labels = labels ) else : printDebug ( "None found" , "comment" ) print ( Style . BRIGHT + "\nSKOS Concepts\n" + "-" * 10 + Style . RESET_ALL ) if g . all_skos_concepts : g . printSkosTree ( showids = False , labels = labels ) else : printDebug ( "None found" , "comment" ) print ( Style . BRIGHT + "\nSHACL Shapes\n" + "-" * 10 + Style . RESET_ALL ) if g . all_shapes : for x in g . all_shapes : printDebug ( "%s" % ( x . qname ) ) else : printDebug ( "None found" , "comment" )
overview of graph invoked from command line
61,349
def try_sort_fmt_opts ( rdf_format_opts_list , uri ) : filename , file_extension = os . path . splitext ( uri ) if file_extension == ".ttl" or file_extension == ".turtle" : return [ 'turtle' , 'n3' , 'nt' , 'json-ld' , 'rdfa' , 'xml' ] elif file_extension == ".xml" or file_extension == ".rdf" : return [ 'xml' , 'turtle' , 'n3' , 'nt' , 'json-ld' , 'rdfa' ] elif file_extension == ".nt" or file_extension == ".n3" : return [ 'n3' , 'nt' , 'turtle' , 'xml' , 'json-ld' , 'rdfa' ] elif file_extension == ".json" or file_extension == ".jsonld" : return [ 'json-ld' , 'rdfa' , 'n3' , 'nt' , 'turtle' , 'xml' , ] elif file_extension == ".rdfa" : return [ 'rdfa' , 'json-ld' , 'n3' , 'nt' , 'turtle' , 'xml' , ] else : return rdf_format_opts_list
reorder fmt options based on uri file type suffix - if available - so to test most likely serialization first when parsing some RDF
61,350
def ask_visualization ( ) : printDebug ( "Please choose an output format for the ontology visualization: (q=quit)\n" , "important" ) while True : text = "" for viz in VISUALIZATIONS_LIST : text += "%d) %s\n" % ( VISUALIZATIONS_LIST . index ( viz ) + 1 , viz [ 'Title' ] ) var = input ( text + ">" ) if var == "q" : return "" else : try : n = int ( var ) - 1 test = VISUALIZATIONS_LIST [ n ] return n except : printDebug ( "Invalid selection. Please try again." , "red" ) continue
ask user which viz output to use
61,351
def select_visualization ( n ) : try : n = int ( n ) - 1 test = VISUALIZATIONS_LIST [ n ] return n except : printDebug ( "Invalid viz-type option. Valid options are:" , "red" ) show_types ( ) raise SystemExit ( 1 )
get viz choice based on numerical index
61,352
def action_analyze ( sources , endpoint = None , print_opts = False , verbose = False , extra = False , raw = False ) : for x in sources : click . secho ( "Parsing %s..." % str ( x ) , fg = 'white' ) if extra : hide_base_schemas = False hide_implicit_types = False hide_implicit_preds = False else : hide_base_schemas = True hide_implicit_types = True hide_implicit_preds = True if raw : o = Ontospy ( uri_or_path = sources , verbose = verbose , build_all = False ) s = o . serialize ( ) print ( s ) return elif endpoint : g = Ontospy ( sparql_endpoint = sources [ 0 ] , verbose = verbose , hide_base_schemas = hide_base_schemas , hide_implicit_types = hide_implicit_types , hide_implicit_preds = hide_implicit_preds ) printDebug ( "Extracting classes info" ) g . build_classes ( ) printDebug ( "..done" ) printDebug ( "Extracting properties info" ) g . build_properties ( ) printDebug ( "..done" ) else : g = Ontospy ( uri_or_path = sources , verbose = verbose , hide_base_schemas = hide_base_schemas , hide_implicit_types = hide_implicit_types , hide_implicit_preds = hide_implicit_preds ) shellPrintOverview ( g , print_opts )
Load up a model into ontospy and analyze it
61,353
def action_listlocal ( all_details = True ) : " select a file from the local repo " options = get_localontologies ( ) counter = 1 if not options : printDebug ( "Your local library is empty. Use 'ontospy lib --bootstrap' to add some ontologies to it." ) return else : if all_details : _print_table_ontologies ( ) else : _print2cols_ontologies ( ) while True : printDebug ( "------------------\nSelect a model by typing its number: (enter=quit)" , "important" ) var = input ( ) if var == "" or var == "q" : return None else : try : _id = int ( var ) ontouri = options [ _id - 1 ] printDebug ( "---------\nYou selected: " + ontouri + "\n---------" , "green" ) return ontouri except : printDebug ( "Please enter a valid option." , "comment" ) continue
select a file from the local repo
61,354
def action_import ( location , verbose = True ) : location = str ( location ) ONTOSPY_LOCAL_MODELS = get_home_location ( ) fullpath = "" try : if location . startswith ( "www." ) : location = "http://%s" % str ( location ) if location . startswith ( "http" ) : headers = { 'Accept' : "application/rdf+xml" } try : req = urllib2 . request ( location , headers = headers ) res = urllib2 . urlopen ( req ) except : req = urllib . request . Request ( location , headers = headers ) res = urlopen ( req ) final_location = res . geturl ( ) printDebug ( "Saving data from <%s>" % final_location , "green" ) filename = location . replace ( "http://" , "" ) . replace ( "/" , "_" ) if not filename . lower ( ) . endswith ( ( '.rdf' , '.owl' , '.rdfs' , '.ttl' , '.n3' ) ) : filename = filename + ".rdf" fullpath = ONTOSPY_LOCAL_MODELS + "/" + filename file_ = open ( fullpath , 'wb' ) file_ . write ( res . read ( ) ) file_ . close ( ) else : if os . path . isfile ( location ) : filename = location . split ( "/" ) [ - 1 ] or location . split ( "/" ) [ - 2 ] fullpath = ONTOSPY_LOCAL_MODELS + "/" + filename shutil . copy ( location , fullpath ) else : raise ValueError ( 'The location specified is not a file.' ) except : printDebug ( "Error retrieving file. Please make sure <%s> is a valid location." % location , "important" ) if os . path . exists ( fullpath ) : os . remove ( fullpath ) return None try : g = Ontospy ( fullpath , verbose = verbose ) except : g = None if os . path . exists ( fullpath ) : os . remove ( fullpath ) printDebug ( "Error parsing file. Please make sure %s contains valid RDF." % location , "important" ) if g : printDebug ( "Caching..." , "red" ) do_pickle_ontology ( filename , g ) printDebug ( "----------\n...completed!" , "important" ) return g
Import files into the local repo
61,355
def action_import_folder ( location ) : if os . path . isdir ( location ) : onlyfiles = [ f for f in os . listdir ( location ) if os . path . isfile ( os . path . join ( location , f ) ) ] for file in onlyfiles : if not file . startswith ( "." ) : filepath = os . path . join ( location , file ) click . secho ( "\n---------\n" + filepath + "\n---------" , fg = 'red' ) return action_import ( filepath ) else : printDebug ( "Not a valid directory" , "important" ) return None
Try to import all files from a local folder
61,356
def action_webimport ( hrlinetop = False ) : DIR_OPTIONS = { 1 : "http://lov.okfn.org" , 2 : "http://prefix.cc/popular/" } selection = None while True : if hrlinetop : printDebug ( "----------" ) text = "Please select which online directory to scan: (enter=quit)\n" for x in DIR_OPTIONS : text += "%d) %s\n" % ( x , DIR_OPTIONS [ x ] ) var = input ( text + "> " ) if var == "q" or var == "" : return None else : try : selection = int ( var ) test = DIR_OPTIONS [ selection ] break except : printDebug ( "Invalid selection. Please try again." , "important" ) continue printDebug ( "----------" ) text = "Search for a specific keyword? (enter=show all)\n" var = input ( text + "> " ) keyword = var try : if selection == 1 : _import_LOV ( keyword = keyword ) elif selection == 2 : _import_PREFIXCC ( keyword = keyword ) except : printDebug ( "Sorry, the online repository seems to be unreachable." ) return True
select from the available online directories for import
61,357
def action_bootstrap ( verbose = False ) : printDebug ( "The following ontologies will be imported:" ) printDebug ( "--------------" ) count = 0 for s in BOOTSTRAP_ONTOLOGIES : count += 1 print ( count , "<%s>" % s ) printDebug ( "--------------" ) printDebug ( "Note: this operation may take several minutes." ) printDebug ( "Proceed? [Y/N]" ) var = input ( ) if var == "y" or var == "Y" : for uri in BOOTSTRAP_ONTOLOGIES : try : printDebug ( "--------------" ) action_import ( uri , verbose ) except : printDebug ( "OPS... An Unknown Error Occurred - Aborting Installation" ) printDebug ( "\n==========\n" + "Bootstrap command completed." , "important" ) return True else : printDebug ( "--------------" ) printDebug ( "Goodbye" ) return False
Bootstrap the local REPO with a few cool ontologies
61,358
def action_update_library_location ( _location ) : printDebug ( "Old location: '%s'" % get_home_location ( ) , "comment" ) if os . path . isdir ( _location ) : config = SafeConfigParser ( ) config_filename = ONTOSPY_LOCAL + '/config.ini' config . read ( config_filename ) if not config . has_section ( 'models' ) : config . add_section ( 'models' ) config . set ( 'models' , 'dir' , _location ) with open ( config_filename , 'w' ) as f : config . write ( f ) return _location else : return None
Sets the folder that contains models for the local library
61,359
def action_cache_reset ( ) : printDebug ( ) printDebug ( ) ONTOSPY_LOCAL_MODELS = get_home_location ( ) shutil . rmtree ( ONTOSPY_LOCAL_CACHE_TOP ) var = input ( Style . BRIGHT + "=====\nProceed? (y/n) " + Style . RESET_ALL ) if var == "y" : repo_contents = get_localontologies ( ) print ( Style . BRIGHT + "\n=====\n%d ontologies available in the local library\n=====" % len ( repo_contents ) + Style . RESET_ALL ) for onto in repo_contents : fullpath = ONTOSPY_LOCAL_MODELS + "/" + onto try : print ( Fore . RED + "\n=====\n" + onto + Style . RESET_ALL ) print ( "Loading graph..." ) g = Ontospy ( fullpath ) print ( "Loaded " , fullpath ) except : g = None print ( "Error parsing file. Please make sure %s contains valid RDF." % fullpath ) if g : print ( "Caching..." ) do_pickle_ontology ( onto , g ) print ( Style . BRIGHT + "===Completed===" + Style . RESET_ALL ) else : print ( "Goodbye" )
Delete all contents from cache folder Then re - generate cached version of all models in the local repo
61,360
def compare_ordereddict ( self , X , Y ) : child = self . compare_dicts ( X , Y ) if isinstance ( child , DeepExplanation ) : return child for i , j in zip ( X . items ( ) , Y . items ( ) ) : if i [ 0 ] != j [ 0 ] : c = self . get_context ( ) msg = "X{0} and Y{1} are in a different order" . format ( red ( c . current_X_keys ) , green ( c . current_Y_keys ) ) return DeepExplanation ( msg ) return True
Compares two instances of an OrderedDict .
61,361
def stub ( base_class = None , ** attributes ) : if base_class is None : base_class = object members = { "__init__" : lambda self : None , "__new__" : lambda * args , ** kw : object . __new__ ( * args , * kw ) , "__metaclass__" : None , } members . update ( attributes ) return type ( f"{base_class.__name__}Stub" , ( base_class , ) , members ) ( )
creates a python class on - the - fly with the given keyword - arguments as class - attributes accessible with . attrname .
61,362
def assertion ( func ) : func = assertionmethod ( func ) setattr ( AssertionBuilder , func . __name__ , func ) return func
Extend sure with a custom assertion method .
61,363
def chainproperty ( func ) : func = assertionproperty ( func ) setattr ( AssertionBuilder , func . fget . __name__ , func ) return func
Extend sure with a custom chain property .
61,364
def equal ( self , what , epsilon = None ) : try : comparison = DeepComparison ( self . obj , what , epsilon ) . compare ( ) error = False except AssertionError as e : error = e comparison = None if isinstance ( comparison , DeepExplanation ) : error = comparison . get_assertion ( self . obj , what ) if self . negative : if error : return True msg = '%s should differ from %s, but is the same thing' raise AssertionError ( msg % ( safe_repr ( self . obj ) , safe_repr ( what ) ) ) else : if not error : return True raise error
compares given object X with an expected Y object .
61,365
def find_dependencies ( self , dependent_rev , recurse = None ) : if recurse is None : recurse = self . options . recurse try : dependent = self . get_commit ( dependent_rev ) except InvalidCommitish as e : abort ( e . message ( ) ) self . todo . append ( dependent ) self . todo_d [ dependent . hex ] = True first_time = True while self . todo : sha1s = [ commit . hex [ : 8 ] for commit in self . todo ] if first_time : self . logger . info ( "Initial TODO list: %s" % " " . join ( sha1s ) ) first_time = False else : self . logger . info ( " TODO list now: %s" % " " . join ( sha1s ) ) dependent = self . todo . pop ( 0 ) dependent_sha1 = dependent . hex del self . todo_d [ dependent_sha1 ] self . logger . info ( " Processing %s from TODO list" % dependent_sha1 [ : 8 ] ) if dependent_sha1 in self . done_d : self . logger . info ( " %s already done previously" % dependent_sha1 ) continue self . notify_listeners ( 'new_commit' , dependent ) parent = dependent . parents [ 0 ] self . find_dependencies_with_parent ( dependent , parent ) self . done . append ( dependent_sha1 ) self . done_d [ dependent_sha1 ] = True self . logger . info ( " Found all dependencies for %s" % dependent_sha1 [ : 8 ] ) dependencies = self . dependencies . get ( dependent_sha1 , { } ) self . notify_listeners ( 'dependent_done' , dependent , dependencies ) self . logger . info ( "Finished processing TODO list" ) self . notify_listeners ( 'all_done' )
Find all dependencies of the given revision recursively traversing the dependency tree if requested .
61,366
def find_dependencies_with_parent ( self , dependent , parent ) : self . logger . info ( " Finding dependencies of %s via parent %s" % ( dependent . hex [ : 8 ] , parent . hex [ : 8 ] ) ) diff = self . repo . diff ( parent , dependent , context_lines = self . options . context_lines ) for patch in diff : path = patch . delta . old_file . path self . logger . info ( " Examining hunks in %s" % path ) for hunk in patch . hunks : self . blame_diff_hunk ( dependent , parent , path , hunk )
Find all dependencies of the given revision caused by the given parent commit . This will be called multiple times for merge commits which have multiple parents .
61,367
def blame_diff_hunk ( self , dependent , parent , path , hunk ) : line_range_before = "-%d,%d" % ( hunk . old_start , hunk . old_lines ) line_range_after = "+%d,%d" % ( hunk . new_start , hunk . new_lines ) self . logger . info ( " Blaming hunk %s @ %s (listed below)" % ( line_range_before , parent . hex [ : 8 ] ) ) if not self . tree_lookup ( path , parent ) : return blame = self . run_blame ( hunk , parent , path ) dependent_sha1 = dependent . hex self . register_new_dependent ( dependent , dependent_sha1 ) line_to_culprit = { } for line in blame . split ( '\n' ) : self . process_hunk_line ( dependent , dependent_sha1 , parent , path , line , line_to_culprit ) self . debug_hunk ( line_range_before , line_range_after , hunk , line_to_culprit )
Run git blame on the parts of the hunk which exist in the older commit in the diff . The commits generated by git blame are the commits which the newer commit in the diff depends on because without the lines from those commits the hunk would not apply correctly .
61,368
def tree_lookup ( self , target_path , commit ) : segments = target_path . split ( "/" ) tree_or_blob = commit . tree path = '' while segments : dirent = segments . pop ( 0 ) if isinstance ( tree_or_blob , pygit2 . Tree ) : if dirent in tree_or_blob : tree_or_blob = self . repo [ tree_or_blob [ dirent ] . oid ] if path : path += '/' path += dirent else : self . logger . debug ( " %s not in %s in %s" % ( dirent , path , commit . hex [ : 8 ] ) ) return None else : self . logger . debug ( " %s not a tree in %s" % ( tree_or_blob , commit . hex [ : 8 ] ) ) return None return tree_or_blob
Navigate to the tree or blob object pointed to by the given target path for the given commit . This is necessary because each git tree only contains entries for the directory it refers to not recursively for all subdirectories .
61,369
def abbreviate_sha1 ( cls , sha1 ) : cmd = [ 'git' , 'rev-parse' , '--short' , sha1 ] out = subprocess . check_output ( cmd , universal_newlines = True ) . strip ( ) return out
Uniquely abbreviates the given SHA1 .
61,370
def describe ( cls , sha1 ) : cmd = [ 'git' , 'describe' , '--all' , '--long' , sha1 ] out = None try : out = subprocess . check_output ( cmd , stderr = subprocess . STDOUT , universal_newlines = True ) except subprocess . CalledProcessError as e : if e . output . find ( 'No tags can describe' ) != - 1 : return '' raise out = out . strip ( ) out = re . sub ( r'^(heads|tags|remotes)/' , '' , out ) out = re . sub ( r'-g[0-9a-f]{7,}$' , '' , out ) return out
Returns a human - readable representation of the given SHA1 .
61,371
def refs_to ( cls , sha1 , repo ) : matching = [ ] for refname in repo . listall_references ( ) : symref = repo . lookup_reference ( refname ) dref = symref . resolve ( ) oid = dref . target commit = repo . get ( oid ) if commit . hex == sha1 : matching . append ( symref . shorthand ) return matching
Returns all refs pointing to the given SHA1 .
61,372
def add_commit ( self , commit ) : sha1 = commit . hex if sha1 in self . _commits : return self . _commits [ sha1 ] title , separator , body = commit . message . partition ( "\n" ) commit = { 'explored' : False , 'sha1' : sha1 , 'name' : GitUtils . abbreviate_sha1 ( sha1 ) , 'describe' : GitUtils . describe ( sha1 ) , 'refs' : GitUtils . refs_to ( sha1 , self . repo ( ) ) , 'author_name' : commit . author . name , 'author_mail' : commit . author . email , 'author_time' : commit . author . time , 'author_offset' : commit . author . offset , 'committer_name' : commit . committer . name , 'committer_mail' : commit . committer . email , 'committer_time' : commit . committer . time , 'committer_offset' : commit . committer . offset , 'title' : title , 'separator' : separator , 'body' : body . lstrip ( "\n" ) , } self . _json [ 'commits' ] . append ( commit ) self . _commits [ sha1 ] = len ( self . _json [ 'commits' ] ) - 1 return self . _commits [ sha1 ]
Adds the commit to the commits array if it doesn t already exist and returns the commit s index in the array .
61,373
def get ( self , path , params = None , headers = None ) : response = requests . get ( self . _url_for ( path ) , params = params , headers = self . _headers ( headers ) ) self . _handle_errors ( response ) return response
Perform a GET request optionally providing query - string params .
61,374
def post ( self , path , body , headers = None ) : response = requests . post ( self . _url_for ( path ) , data = json . dumps ( body ) , headers = self . _headers ( headers ) ) self . _handle_errors ( response ) return response
Perform a POST request providing a body which will be JSON - encoded .
61,375
def create ( self , params = None , headers = None ) : path = '/creditor_bank_accounts' if params is not None : params = { self . _envelope_key ( ) : params } try : response = self . _perform_request ( 'POST' , path , params , headers , retry_failures = True ) except errors . IdempotentCreationConflictError as err : return self . get ( identity = err . conflicting_resource_id , params = params , headers = headers ) return self . _resource_for ( response )
Create a creditor bank account .
61,376
def list ( self , params = None , headers = None ) : path = '/creditor_bank_accounts' response = self . _perform_request ( 'GET' , path , params , headers , retry_failures = True ) return self . _resource_for ( response )
List creditor bank accounts .
61,377
def get ( self , identity , params = None , headers = None ) : path = self . _sub_url_params ( '/creditor_bank_accounts/:identity' , { 'identity' : identity , } ) response = self . _perform_request ( 'GET' , path , params , headers , retry_failures = True ) return self . _resource_for ( response )
Get a single creditor bank account .
61,378
def disable ( self , identity , params = None , headers = None ) : path = self . _sub_url_params ( '/creditor_bank_accounts/:identity/actions/disable' , { 'identity' : identity , } ) if params is not None : params = { 'data' : params } response = self . _perform_request ( 'POST' , path , params , headers , retry_failures = False ) return self . _resource_for ( response )
Disable a creditor bank account .
61,379
def create ( self , params = None , headers = None ) : path = '/mandate_pdfs' if params is not None : params = { self . _envelope_key ( ) : params } response = self . _perform_request ( 'POST' , path , params , headers , retry_failures = True ) return self . _resource_for ( response )
Create a mandate PDF .
61,380
def update ( self , identity , params = None , headers = None ) : path = self . _sub_url_params ( '/payments/:identity' , { 'identity' : identity , } ) if params is not None : params = { self . _envelope_key ( ) : params } response = self . _perform_request ( 'PUT' , path , params , headers , retry_failures = True ) return self . _resource_for ( response )
Update a payment .
61,381
def resolve_config ( self ) : conf = self . load_config ( self . force_default ) for k in conf [ 'hues' ] : conf [ 'hues' ] [ k ] = getattr ( KEYWORDS , conf [ 'hues' ] [ k ] ) as_tuples = lambda name , obj : namedtuple ( name , obj . keys ( ) ) ( ** obj ) self . hues = as_tuples ( 'Hues' , conf [ 'hues' ] ) self . opts = as_tuples ( 'Options' , conf [ 'options' ] ) self . labels = as_tuples ( 'Labels' , conf [ 'labels' ] )
Resolve configuration params to native instances
61,382
def apply ( funcs , stack ) : return reduce ( lambda x , y : y ( x ) , funcs , stack )
Apply functions to the stack passing the resulting stack to next state .
61,383
def colorize ( string , stack ) : codes = optimize ( stack ) if len ( codes ) : prefix = SEQ % ';' . join ( map ( str , codes ) ) suffix = SEQ % STYLE . reset return prefix + string + suffix else : return string
Apply optimal ANSI escape sequences to the string .
61,384
def compute_agreement_score ( num_matches , num1 , num2 ) : denom = num1 + num2 - num_matches if denom == 0 : return 0 return num_matches / denom
Agreement score is used as a criteria to match unit1 and unit2 .
61,385
def collect_results ( working_folder ) : results = { } working_folder = Path ( working_folder ) output_folders = working_folder / 'output_folders' for rec_name in os . listdir ( output_folders ) : if not os . path . isdir ( output_folders / rec_name ) : continue results [ rec_name ] = { } for sorter_name in os . listdir ( output_folders / rec_name ) : output_folder = output_folders / rec_name / sorter_name if not os . path . isdir ( output_folder ) : continue SorterClass = sorter_dict [ sorter_name ] results [ rec_name ] [ sorter_name ] = SorterClass . get_result_from_folder ( output_folder ) return results
Collect results in a working_folder .
61,386
def run_sorter ( sorter_name_or_class , recording , output_folder = None , delete_output_folder = False , grouping_property = None , parallel = False , debug = False , ** params ) : if isinstance ( sorter_name_or_class , str ) : SorterClass = sorter_dict [ sorter_name_or_class ] elif sorter_name_or_class in sorter_full_list : SorterClass = sorter_name_or_class else : raise ( ValueError ( 'Unknown sorter' ) ) sorter = SorterClass ( recording = recording , output_folder = output_folder , grouping_property = grouping_property , parallel = parallel , debug = debug , delete_output_folder = delete_output_folder ) sorter . set_params ( ** params ) sorter . run ( ) sortingextractor = sorter . get_result ( ) return sortingextractor
Generic function to run a sorter via function approach .
61,387
def compute_performance ( SC , verbose = True , output = 'dict' ) : counts = SC . _counts tp_rate = float ( counts [ 'TP' ] ) / counts [ 'TOT_ST1' ] * 100 cl_rate = float ( counts [ 'CL' ] ) / counts [ 'TOT_ST1' ] * 100 fn_rate = float ( counts [ 'FN' ] ) / counts [ 'TOT_ST1' ] * 100 fp_st1 = float ( counts [ 'FP' ] ) / counts [ 'TOT_ST1' ] * 100 fp_st2 = float ( counts [ 'FP' ] ) / counts [ 'TOT_ST2' ] * 100 accuracy = tp_rate / ( tp_rate + fn_rate + fp_st1 ) * 100 sensitivity = tp_rate / ( tp_rate + fn_rate ) * 100 miss_rate = fn_rate / ( tp_rate + fn_rate ) * 100 precision = tp_rate / ( tp_rate + fp_st1 ) * 100 false_discovery_rate = fp_st1 / ( tp_rate + fp_st1 ) * 100 performance = { 'tp' : tp_rate , 'cl' : cl_rate , 'fn' : fn_rate , 'fp_st1' : fp_st1 , 'fp_st2' : fp_st2 , 'accuracy' : accuracy , 'sensitivity' : sensitivity , 'precision' : precision , 'miss_rate' : miss_rate , 'false_disc_rate' : false_discovery_rate } if verbose : txt = _txt_performance . format ( ** performance ) print ( txt ) if output == 'dict' : return performance elif output == 'pandas' : return pd . Series ( performance )
Return some performance value for comparison .
61,388
def _complex_response_to_error_adapter ( self , body ) : meta = body . get ( 'meta' ) errors = body . get ( 'errors' ) e = [ ] for error in errors : status = error [ 'status' ] code = error [ 'code' ] title = error [ 'title' ] e . append ( ErrorDetails ( status , code , title ) ) return e , meta
Convert a list of error responses .
61,389
def _adapt_response ( self , response ) : errors , meta = super ( ServerError , self ) . _adapt_response ( response ) return errors [ 0 ] , meta
Convert various error responses to standardized ErrorDetails .
61,390
def _prepare ( self ) : if self . method not in http . ALLOWED_METHODS : raise UberIllegalState ( 'Unsupported HTTP Method.' ) api_host = self . api_host headers = self . _build_headers ( self . method , self . auth_session ) url = build_url ( api_host , self . path ) data , params = generate_data ( self . method , self . args ) return generate_prepared_request ( self . method , url , headers , data , params , self . handlers , )
Builds a URL and return a PreparedRequest .
61,391
def _send ( self , prepared_request ) : session = Session ( ) response = session . send ( prepared_request ) return Response ( response )
Send a PreparedRequest to the server .
61,392
def _build_headers ( self , method , auth_session ) : token_type = auth_session . token_type if auth_session . server_token : token = auth_session . server_token else : token = auth_session . oauth2credential . access_token if not self . _authorization_headers_valid ( token_type , token ) : message = 'Invalid token_type or token.' raise UberIllegalState ( message ) headers = { 'Authorization' : ' ' . join ( [ token_type , token ] ) , 'X-Uber-User-Agent' : 'Python Rides SDK v{}' . format ( LIB_VERSION ) , } if method in http . BODY_METHODS : headers . update ( http . DEFAULT_CONTENT_HEADERS ) return headers
Create headers for the request .
61,393
def authorization_code_grant_flow ( credentials , storage_filename ) : auth_flow = AuthorizationCodeGrant ( credentials . get ( 'client_id' ) , credentials . get ( 'scopes' ) , credentials . get ( 'client_secret' ) , credentials . get ( 'redirect_url' ) , ) auth_url = auth_flow . get_authorization_url ( ) login_message = 'Login as a driver and grant access by going to:\n\n{}\n' login_message = login_message . format ( auth_url ) response_print ( login_message ) redirect_url = 'Copy the URL you are redirected to and paste here:\n\n' result = input ( redirect_url ) . strip ( ) try : session = auth_flow . get_session ( result ) except ( ClientError , UberIllegalState ) as error : fail_print ( error ) return credential = session . oauth2credential credential_data = { 'client_id' : credential . client_id , 'redirect_url' : credential . redirect_url , 'access_token' : credential . access_token , 'expires_in_seconds' : credential . expires_in_seconds , 'scopes' : list ( credential . scopes ) , 'grant_type' : credential . grant_type , 'client_secret' : credential . client_secret , 'refresh_token' : credential . refresh_token , } with open ( storage_filename , 'w' ) as yaml_file : yaml_file . write ( safe_dump ( credential_data , default_flow_style = False ) ) return UberRidesClient ( session , sandbox_mode = True )
Get an access token through Authorization Code Grant .
61,394
def _request_access_token ( grant_type , client_id = None , client_secret = None , scopes = None , code = None , redirect_url = None , refresh_token = None ) : url = build_url ( auth . AUTH_HOST , auth . ACCESS_TOKEN_PATH ) if isinstance ( scopes , set ) : scopes = ' ' . join ( scopes ) args = { 'grant_type' : grant_type , 'client_id' : client_id , 'client_secret' : client_secret , 'scope' : scopes , 'code' : code , 'redirect_uri' : redirect_url , 'refresh_token' : refresh_token , } response = post ( url = url , data = args ) if response . status_code == codes . ok : return response message = 'Failed to request access token: {}.' message = message . format ( response . reason ) raise ClientError ( response , message )
Make an HTTP POST to request an access token .
61,395
def refresh_access_token ( credential ) : if credential . grant_type == auth . AUTHORIZATION_CODE_GRANT : response = _request_access_token ( grant_type = auth . REFRESH_TOKEN , client_id = credential . client_id , client_secret = credential . client_secret , redirect_url = credential . redirect_url , refresh_token = credential . refresh_token , ) oauth2credential = OAuth2Credential . make_from_response ( response = response , grant_type = credential . grant_type , client_id = credential . client_id , client_secret = credential . client_secret , redirect_url = credential . redirect_url , ) return Session ( oauth2credential = oauth2credential ) elif credential . grant_type == auth . CLIENT_CREDENTIALS_GRANT : response = _request_access_token ( grant_type = auth . CLIENT_CREDENTIALS_GRANT , client_id = credential . client_id , client_secret = credential . client_secret , scopes = credential . scopes , ) oauth2credential = OAuth2Credential . make_from_response ( response = response , grant_type = credential . grant_type , client_id = credential . client_id , client_secret = credential . client_secret , ) return Session ( oauth2credential = oauth2credential ) message = '{} Grant Type does not support Refresh Tokens.' message = message . format ( credential . grant_type ) raise UberIllegalState ( message )
Use a refresh token to request a new access token .
61,396
def _build_authorization_request_url ( self , response_type , redirect_url , state = None ) : if response_type not in auth . VALID_RESPONSE_TYPES : message = '{} is not a valid response type.' raise UberIllegalState ( message . format ( response_type ) ) args = OrderedDict ( [ ( 'scope' , ' ' . join ( self . scopes ) ) , ( 'state' , state ) , ( 'redirect_uri' , redirect_url ) , ( 'response_type' , response_type ) , ( 'client_id' , self . client_id ) , ] ) return build_url ( auth . AUTH_HOST , auth . AUTHORIZE_PATH , args )
Form URL to request an auth code or access token .
61,397
def _extract_query ( self , redirect_url ) : qs = urlparse ( redirect_url ) qs = qs . fragment if isinstance ( self , ImplicitGrant ) else qs . query query_params = parse_qs ( qs ) query_params = { qp : query_params [ qp ] [ 0 ] for qp in query_params } return query_params
Extract query parameters from a url .
61,398
def _generate_state_token ( self , length = 32 ) : choices = ascii_letters + digits return '' . join ( SystemRandom ( ) . choice ( choices ) for _ in range ( length ) )
Generate CSRF State Token .
61,399
def get_authorization_url ( self ) : return self . _build_authorization_request_url ( response_type = auth . CODE_RESPONSE_TYPE , redirect_url = self . redirect_url , state = self . state_token , )
Start the Authorization Code Grant process .