idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
---|---|---|
61,200 |
def _next ( self ) : self . top += 1 while self . _occupied and self . top >= self . _occupied [ 0 ] [ 0 ] : if self . top <= self . _occupied [ 0 ] [ 1 ] : self . top = self . _occupied [ 0 ] [ 1 ] + 1 self . _occupied . pop ( 0 ) return self . top
|
Get next variable ID . Skip occupied intervals if any .
|
61,201 |
def from_file ( self , fname , comment_lead = [ 'c' ] , compressed_with = 'use_ext' ) : with FileObject ( fname , mode = 'r' , compression = compressed_with ) as fobj : self . from_fp ( fobj . fp , comment_lead )
|
Read a CNF formula from a file in the DIMACS format . A file name is expected as an argument . A default argument is comment_lead for parsing comment lines . A given file can be compressed by either gzip bzip2 or lzma .
|
61,202 |
def from_fp ( self , file_pointer , comment_lead = [ 'c' ] ) : self . nv = 0 self . clauses = [ ] self . comments = [ ] comment_lead = tuple ( 'p' ) + tuple ( comment_lead ) for line in file_pointer : line = line . strip ( ) if line : if line [ 0 ] not in comment_lead : cl = [ int ( l ) for l in line . split ( ) [ : - 1 ] ] self . nv = max ( [ abs ( l ) for l in cl ] + [ self . nv ] ) self . clauses . append ( cl ) elif not line . startswith ( 'p cnf ' ) : self . comments . append ( line )
|
Read a CNF formula from a file pointer . A file pointer should be specified as an argument . The only default argument is comment_lead which can be used for parsing specific comment lines .
|
61,203 |
def from_clauses ( self , clauses ) : self . clauses = copy . deepcopy ( clauses ) for cl in self . clauses : self . nv = max ( [ abs ( l ) for l in cl ] + [ self . nv ] )
|
This methods copies a list of clauses into a CNF object .
|
61,204 |
def to_fp ( self , file_pointer , comments = None ) : for c in self . comments : print ( c , file = file_pointer ) if comments : for c in comments : print ( c , file = file_pointer ) print ( 'p cnf' , self . nv , len ( self . clauses ) , file = file_pointer ) for cl in self . clauses : print ( ' ' . join ( str ( l ) for l in cl ) , '0' , file = file_pointer )
|
The method can be used to save a CNF formula into a file pointer . The file pointer is expected as an argument . Additionally supplementary comment lines can be specified in the comments parameter .
|
61,205 |
def append ( self , clause ) : self . nv = max ( [ abs ( l ) for l in clause ] + [ self . nv ] ) self . clauses . append ( clause )
|
Add one more clause to CNF formula . This method additionally updates the number of variables i . e . variable self . nv used in the formula .
|
61,206 |
def from_fp ( self , file_pointer , comment_lead = [ 'c' ] ) : self . nv = 0 self . hard = [ ] self . soft = [ ] self . wght = [ ] self . topw = 0 self . comments = [ ] comment_lead = tuple ( 'p' ) + tuple ( comment_lead ) for line in file_pointer : line = line . strip ( ) if line : if line [ 0 ] not in comment_lead : cl = [ int ( l ) for l in line . split ( ) [ : - 1 ] ] w = cl . pop ( 0 ) self . nv = max ( [ abs ( l ) for l in cl ] + [ self . nv ] ) if w >= self . topw : self . hard . append ( cl ) else : self . soft . append ( cl ) self . wght . append ( w ) elif not line . startswith ( 'p wcnf ' ) : self . comments . append ( line ) else : self . topw = int ( line . rsplit ( ' ' , 1 ) [ 1 ] )
|
Read a WCNF formula from a file pointer . A file pointer should be specified as an argument . The only default argument is comment_lead which can be used for parsing specific comment lines .
|
61,207 |
def to_fp ( self , file_pointer , comments = None ) : for c in self . comments : print ( c , file = file_pointer ) if comments : for c in comments : print ( c , file = file_pointer ) print ( 'p wcnf' , self . nv , len ( self . hard ) + len ( self . soft ) , self . topw , file = file_pointer ) for i , cl in enumerate ( self . soft ) : print ( self . wght [ i ] , ' ' . join ( str ( l ) for l in cl ) , '0' , file = file_pointer ) for cl in self . hard : print ( self . topw , ' ' . join ( str ( l ) for l in cl ) , '0' , file = file_pointer )
|
The method can be used to save a WCNF formula into a file pointer . The file pointer is expected as an argument . Additionally supplementary comment lines can be specified in the comments parameter .
|
61,208 |
def append ( self , clause , weight = None ) : self . nv = max ( [ abs ( l ) for l in clause ] + [ self . nv ] ) if weight : self . soft . append ( clause ) self . wght . append ( weight ) else : self . hard . append ( clause )
|
Add one more clause to WCNF formula . This method additionally updates the number of variables i . e . variable self . nv used in the formula .
|
61,209 |
def from_fp ( self , file_pointer , comment_lead = [ 'c' ] ) : self . nv = 0 self . clauses = [ ] self . atmosts = [ ] self . comments = [ ] comment_lead = tuple ( 'p' ) + tuple ( comment_lead ) for line in file_pointer : line = line . strip ( ) if line : if line [ 0 ] not in comment_lead : if line [ - 1 ] == '0' : cl = [ int ( l ) for l in line . split ( ) [ : - 1 ] ] self . nv = max ( [ abs ( l ) for l in cl ] + [ self . nv ] ) self . clauses . append ( cl ) else : items = [ i for i in line . split ( ) ] lits = [ int ( l ) for l in items [ : - 2 ] ] rhs = int ( items [ - 1 ] ) self . nv = max ( [ abs ( l ) for l in lits ] + [ self . nv ] ) if items [ - 2 ] [ 0 ] == '>' : lits = list ( map ( lambda l : - l , lits ) ) rhs = len ( lits ) - rhs self . atmosts . append ( [ lits , rhs ] ) elif not line . startswith ( 'p cnf+ ' ) : self . comments . append ( line )
|
Read a CNF + formula from a file pointer . A file pointer should be specified as an argument . The only default argument is comment_lead which can be used for parsing specific comment lines .
|
61,210 |
def to_fp ( self , file_pointer , comments = None ) : for c in self . comments : print ( c , file = file_pointer ) if comments : for c in comments : print ( c , file = file_pointer ) ftype = 'cnf+' if self . atmosts else 'cnf' print ( 'p' , ftype , self . nv , len ( self . clauses ) + len ( self . atmosts ) , file = file_pointer ) for cl in self . clauses : print ( ' ' . join ( str ( l ) for l in cl ) , '0' , file = file_pointer ) for am in self . atmosts : print ( ' ' . join ( str ( l ) for l in am [ 0 ] ) , '<=' , am [ 1 ] , file = file_pointer )
|
The method can be used to save a CNF + formula into a file pointer . The file pointer is expected as an argument . Additionally supplementary comment lines can be specified in the comments parameter .
|
61,211 |
def append ( self , clause , is_atmost = False ) : if not is_atmost : self . nv = max ( [ abs ( l ) for l in clause ] + [ self . nv ] ) self . clauses . append ( clause ) else : self . nv = max ( [ abs ( l ) for l in clause [ 0 ] ] + [ self . nv ] ) self . atmosts . append ( clause )
|
Add a single clause or a single AtMostK constraint to CNF + formula . This method additionally updates the number of variables i . e . variable self . nv used in the formula .
|
61,212 |
def from_fp ( self , file_pointer , comment_lead = [ 'c' ] ) : self . nv = 0 self . hard = [ ] self . atms = [ ] self . soft = [ ] self . wght = [ ] self . topw = 0 self . comments = [ ] comment_lead = tuple ( 'p' ) + tuple ( comment_lead ) for line in file_pointer : line = line . strip ( ) if line : if line [ 0 ] not in comment_lead : if line [ - 1 ] == '0' : cl = [ int ( l ) for l in line . split ( ) [ : - 1 ] ] w = cl . pop ( 0 ) self . nv = max ( [ abs ( l ) for l in cl ] + [ self . nv ] ) if w >= self . topw : self . hard . append ( cl ) else : self . soft . append ( cl ) self . wght . append ( w ) else : items = [ i for i in line . split ( ) ] lits = [ int ( l ) for l in items [ 1 : - 2 ] ] rhs = int ( items [ - 1 ] ) self . nv = max ( [ abs ( l ) for l in lits ] + [ self . nv ] ) if items [ - 2 ] [ 0 ] == '>' : lits = list ( map ( lambda l : - l , lits ) ) rhs = len ( lits ) - rhs self . atms . append ( [ lits , rhs ] ) elif not line . startswith ( 'p wcnf+ ' ) : self . comments . append ( line ) else : self . topw = int ( line . rsplit ( ' ' , 1 ) [ 1 ] )
|
Read a WCNF + formula from a file pointer . A file pointer should be specified as an argument . The only default argument is comment_lead which can be used for parsing specific comment lines .
|
61,213 |
def append ( self , clause , weight = None , is_atmost = False ) : if not is_atmost : self . nv = max ( [ abs ( l ) for l in clause ] + [ self . nv ] ) if weight : self . soft . append ( clause ) self . wght . append ( weight ) else : self . hard . append ( clause ) else : self . nv = max ( [ abs ( l ) for l in clause [ 0 ] ] + [ self . nv ] ) self . atms . append ( clause )
|
Add a single clause or a single AtMostK constraint to WCNF + formula . This method additionally updates the number of variables i . e . variable self . nv used in the formula .
|
61,214 |
def delete ( self ) : if self . oracle : self . time += self . oracle . time_accum ( ) self . oracle . delete ( ) self . oracle = None
|
Explicit destructor of the internal SAT oracle .
|
61,215 |
def split_core ( self , minw ) : for clid in self . core : sel = self . sels [ clid ] if self . wght [ clid ] > minw : self . topv += 1 cl_new = [ ] for l in self . soft [ clid ] : if l != - sel : cl_new . append ( l ) else : cl_new . append ( - self . topv ) self . sels . append ( self . topv ) self . vmap [ self . topv ] = len ( self . soft ) self . soft . append ( cl_new ) self . wght . append ( self . wght [ clid ] - minw ) self . wght [ clid ] = minw self . scpy . append ( True )
|
Split clauses in the core whenever necessary .
|
61,216 |
def relax_core ( self ) : if len ( self . core ) > 1 : rels = [ ] for clid in self . core : self . topv += 1 rels . append ( self . topv ) self . soft [ clid ] . append ( self . topv ) am1 = CardEnc . atmost ( lits = rels , top_id = self . topv , encoding = self . cenc ) for cl in am1 . clauses : self . hard . append ( cl ) for am in am1 . atmosts : self . atm1 . append ( am ) self . topv = am1 . nv elif len ( self . core ) == 1 : self . remove_unit_core ( )
|
Relax and bound the core .
|
61,217 |
def parse_options ( ) : try : opts , args = getopt . getopt ( sys . argv [ 1 : ] , 'hms:v' , [ 'help' , 'model' , 'solver=' , 'verbose' ] ) except getopt . GetoptError as err : sys . stderr . write ( str ( err ) . capitalize ( ) ) print_usage ( ) sys . exit ( 1 ) solver = 'g4' verbose = 1 print_model = False for opt , arg in opts : if opt in ( '-h' , '--help' ) : print_usage ( ) sys . exit ( 0 ) elif opt in ( '-m' , '--model' ) : print_model = True elif opt in ( '-s' , '--solver' ) : solver = str ( arg ) elif opt in ( '-v' , '--verbose' ) : verbose += 1 else : assert False , 'Unhandled option: {0} {1}' . format ( opt , arg ) return print_model , solver , verbose , args
|
Parses command - line options .
|
61,218 |
def parse_formula ( fml_file ) : if re . search ( '\.wcnf(\.(gz|bz2|lzma|xz))?$' , fml_file ) : fml = WCNF ( from_file = fml_file ) else : fml = CNF ( from_file = fml_file ) . weighted ( ) return fml
|
Parse and return MaxSAT formula .
|
61,219 |
def _init ( self , formula ) : self . oracle = Solver ( name = self . solver , bootstrap_with = formula . hard , incr = True , use_timer = True ) for i , cl in enumerate ( formula . soft ) : self . topv += 1 selv = self . topv cl . append ( self . topv ) self . oracle . add_clause ( cl ) self . sels . append ( selv ) if self . verbose > 1 : print ( 'c formula: {0} vars, {1} hard, {2} soft' . format ( formula . nv , len ( formula . hard ) , len ( formula . soft ) ) )
|
SAT oracle initialization . The method creates a new SAT oracle and feeds it with the formula s hard clauses . Afterwards all soft clauses of the formula are augmented with selector literals and also added to the solver . The list of all introduced selectors is stored in variable self . sels .
|
61,220 |
def _get_model_cost ( self , formula , model ) : model_set = set ( model ) cost = 0 for i , cl in enumerate ( formula . soft ) : cost += formula . wght [ i ] if all ( l not in model_set for l in filter ( lambda l : abs ( l ) <= self . formula . nv , cl ) ) else 0 return cost
|
Given a WCNF formula and a model the method computes the MaxSAT cost of the model i . e . the sum of weights of soft clauses that are unsatisfied by the model .
|
61,221 |
def parse_options ( ) : try : opts , args = getopt . getopt ( sys . argv [ 1 : ] , 'ac:e:hilms:t:vx' , [ 'adapt' , 'comp=' , 'enum=' , 'exhaust' , 'help' , 'incr' , 'blo' , 'minimize' , 'solver=' , 'trim=' , 'verbose' ] ) except getopt . GetoptError as err : sys . stderr . write ( str ( err ) . capitalize ( ) ) usage ( ) sys . exit ( 1 ) adapt = False exhaust = False cmode = None to_enum = 1 incr = False blo = False minz = False solver = 'g3' trim = 0 verbose = 1 for opt , arg in opts : if opt in ( '-a' , '--adapt' ) : adapt = True elif opt in ( '-c' , '--comp' ) : cmode = str ( arg ) elif opt in ( '-e' , '--enum' ) : to_enum = str ( arg ) if to_enum != 'all' : to_enum = int ( to_enum ) else : to_enum = 0 elif opt in ( '-h' , '--help' ) : usage ( ) sys . exit ( 0 ) elif opt in ( '-i' , '--incr' ) : incr = True elif opt in ( '-l' , '--blo' ) : blo = True elif opt in ( '-m' , '--minimize' ) : minz = True elif opt in ( '-s' , '--solver' ) : solver = str ( arg ) elif opt in ( '-t' , '--trim' ) : trim = int ( arg ) elif opt in ( '-v' , '--verbose' ) : verbose += 1 elif opt in ( '-x' , '--exhaust' ) : exhaust = True else : assert False , 'Unhandled option: {0} {1}' . format ( opt , arg ) return adapt , blo , cmode , to_enum , exhaust , incr , minz , solver , trim , verbose , args
|
Parses command - line option
|
61,222 |
def delete ( self ) : if self . oracle : self . oracle . delete ( ) self . oracle = None if self . solver != 'mc' : for t in six . itervalues ( self . tobj ) : t . delete ( )
|
Explicit destructor of the internal SAT oracle and all the totalizer objects creating during the solving process .
|
61,223 |
def trim_core ( self ) : for i in range ( self . trim ) : self . oracle . solve ( assumptions = self . core ) new_core = self . oracle . get_core ( ) if len ( new_core ) == len ( self . core ) : break self . core = new_core
|
This method trims a previously extracted unsatisfiable core at most a given number of times . If a fixed point is reached before that the method returns .
|
61,224 |
def minimize_core ( self ) : if self . minz and len ( self . core ) > 1 : self . core = sorted ( self . core , key = lambda l : self . wght [ l ] ) self . oracle . conf_budget ( 1000 ) i = 0 while i < len ( self . core ) : to_test = self . core [ : i ] + self . core [ ( i + 1 ) : ] if self . oracle . solve_limited ( assumptions = to_test ) == False : self . core = to_test else : i += 1
|
Reduce a previously extracted core and compute an over - approximation of an MUS . This is done using the simple deletion - based MUS extraction algorithm .
|
61,225 |
def update_sum ( self , assump ) : t = self . tobj [ assump ] b = self . bnds [ assump ] + 1 if self . solver != 'mc' : t . increase ( ubound = b , top_id = self . topv ) self . topv = t . top_id if t . nof_new : for cl in t . cnf . clauses [ - t . nof_new : ] : self . oracle . add_clause ( cl ) else : rhs = len ( t . lits ) if b < rhs : if not t . rhs [ b ] : self . topv += 1 t . rhs [ b ] = self . topv amb = [ [ - t . rhs [ b ] ] * ( rhs - b ) + t . lits , rhs ] self . oracle . add_atmost ( * amb ) return t , b
|
The method is used to increase the bound for a given totalizer sum . The totalizer object is identified by the input parameter assump which is an assumption literal associated with the totalizer object .
|
61,226 |
def set_bound ( self , tobj , rhs ) : self . tobj [ - tobj . rhs [ rhs ] ] = tobj self . bnds [ - tobj . rhs [ rhs ] ] = rhs self . wght [ - tobj . rhs [ rhs ] ] = self . minw self . sums . append ( - tobj . rhs [ rhs ] )
|
Given a totalizer sum and its right - hand side to be enforced the method creates a new sum assumption literal which will be used in the following SAT oracle calls .
|
61,227 |
def filter_assumps ( self ) : self . sels = list ( filter ( lambda x : x not in self . garbage , self . sels ) ) self . sums = list ( filter ( lambda x : x not in self . garbage , self . sums ) ) self . bnds = { l : b for l , b in six . iteritems ( self . bnds ) if l not in self . garbage } self . wght = { l : w for l , w in six . iteritems ( self . wght ) if l not in self . garbage } self . garbage . clear ( )
|
Filter out unnecessary selectors and sums from the list of assumption literals . The corresponding values are also removed from the dictionaries of bounds and weights .
|
61,228 |
def __get_path_to_mecab_config ( self ) : if six . PY2 : path_mecab_config_dir = subprocess . check_output ( [ 'which' , 'mecab-config' ] ) path_mecab_config_dir = path_mecab_config_dir . strip ( ) . replace ( '/mecab-config' , '' ) else : path_mecab_config_dir = subprocess . check_output ( [ 'which' , 'mecab-config' ] ) . decode ( self . string_encoding ) path_mecab_config_dir = path_mecab_config_dir . strip ( ) . replace ( '/mecab-config' , '' ) logger . info ( msg = 'mecab-config is detected at {}' . format ( path_mecab_config_dir ) ) return path_mecab_config_dir
|
You get path into mecab - config
|
61,229 |
def __result_parser ( self , analyzed_line , is_feature , is_surface ) : assert isinstance ( analyzed_line , str ) assert isinstance ( is_feature , bool ) assert isinstance ( is_surface , bool ) surface , features = analyzed_line . split ( '\t' , 1 ) tuple_pos , word_stem = self . __feature_parser ( features , surface ) tokenized_obj = TokenizedResult ( node_obj = None , analyzed_line = analyzed_line , tuple_pos = tuple_pos , word_stem = word_stem , word_surface = surface , is_feature = is_feature , is_surface = is_surface ) return tokenized_obj
|
Extract surface word and feature from analyzed line . Extracted elements are returned with TokenizedResult class
|
61,230 |
def __is_valid_pos ( pos_tuple , valid_pos ) : def is_valid_pos ( valid_pos_tuple ) : length_valid_pos_tuple = len ( valid_pos_tuple ) if valid_pos_tuple == pos_tuple [ : length_valid_pos_tuple ] : return True else : return False seq_bool_flags = [ is_valid_pos ( valid_pos_tuple ) for valid_pos_tuple in valid_pos ] if True in set ( seq_bool_flags ) : return True else : return False
|
This function checks token s pos is with in POS set that user specified . If token meets all conditions Return True ; else return False
|
61,231 |
def filter_words ( tokenized_obj , valid_pos , stopwords , check_field_name = 'stem' ) : assert isinstance ( tokenized_obj , TokenizedSenetence ) assert isinstance ( valid_pos , list ) assert isinstance ( stopwords , list ) filtered_tokens = [ ] for token_obj in tokenized_obj . tokenized_objects : assert isinstance ( token_obj , TokenizedResult ) if check_field_name == 'stem' : res_stopwords = __is_sotpwords ( token_obj . word_stem , stopwords ) else : res_stopwords = __is_sotpwords ( token_obj . word_surface , stopwords ) res_pos_condition = __is_valid_pos ( token_obj . tuple_pos , valid_pos ) if valid_pos != [ ] and stopwords == [ ] : if res_pos_condition : filtered_tokens . append ( token_obj ) if valid_pos == [ ] and stopwords != [ ] : if res_stopwords is False : filtered_tokens . append ( token_obj ) if valid_pos != [ ] and stopwords != [ ] : if res_stopwords is False and res_pos_condition : filtered_tokens . append ( token_obj ) filtered_object = FilteredObject ( sentence = tokenized_obj . sentence , tokenized_objects = filtered_tokens , pos_condition = valid_pos , stopwords = stopwords ) return filtered_object
|
This function filter token that user don t want to take . Condition is stopword and pos .
|
61,232 |
def __extend_token_object ( self , token_object , is_denormalize = True , func_denormalizer = denormalize_text ) : assert isinstance ( token_object , TokenizedResult ) if is_denormalize : if token_object . is_feature == True : if token_object . is_surface == True : token = ( func_denormalizer ( token_object . word_surface ) , token_object . tuple_pos ) else : token = ( func_denormalizer ( token_object . word_stem ) , token_object . tuple_pos ) else : if token_object . is_surface == True : token = func_denormalizer ( token_object . word_surface ) else : token = func_denormalizer ( token_object . word_stem ) else : if token_object . is_feature == True : if token_object . is_surface == True : token = ( token_object . word_surface , token_object . tuple_pos ) else : token = ( token_object . word_stem , token_object . tuple_pos ) else : if token_object . is_surface == True : token = token_object . word_surface else : token = token_object . word_stem return token
|
This method creates dict object from token object .
|
61,233 |
def notify ( notification , value = None , unset_environment = False ) : if not isinstance ( notification , Notification ) : raise TypeError ( "state must be an instance of Notification" ) state = notification . value if state . constant is not None and value : raise ValueError ( "State %s should contain only constant value %r" % ( state . name , state . constant ) , state . name , state . constant ) line = "%s=%s" % ( state . name , state . constant if state . constant is not None else state . type ( value ) ) log . debug ( "Send %r into systemd" , line ) try : return sd_notify ( line , unset_environment ) except Exception as e : log . error ( "%s" , e )
|
Send notification to systemd daemon
|
61,234 |
def expand_source_paths ( paths ) : for src_path in paths : if src_path . endswith ( ( '.pyc' , '.pyo' ) ) : py_path = get_py_path ( src_path ) if os . path . exists ( py_path ) : src_path = py_path yield src_path
|
Convert pyc files into their source equivalents .
|
61,235 |
def iter_module_paths ( modules = None ) : modules = modules or list ( sys . modules . values ( ) ) for module in modules : try : filename = module . __file__ except ( AttributeError , ImportError ) : continue if filename is not None : abs_filename = os . path . abspath ( filename ) if os . path . isfile ( abs_filename ) : yield abs_filename
|
Yield paths of all imported modules .
|
61,236 |
def update_paths ( self ) : new_paths = [ ] with self . lock : for path in expand_source_paths ( iter_module_paths ( ) ) : if path not in self . paths : self . paths . add ( path ) new_paths . append ( path ) if new_paths : self . watch_paths ( new_paths )
|
Check sys . modules for paths to add to our path set .
|
61,237 |
def search_traceback ( self , tb ) : new_paths = [ ] with self . lock : for filename , line , funcname , txt in traceback . extract_tb ( tb ) : path = os . path . abspath ( filename ) if path not in self . paths : self . paths . add ( path ) new_paths . append ( path ) if new_paths : self . watch_paths ( new_paths )
|
Inspect a traceback for new paths to add to our path set .
|
61,238 |
def args_from_interpreter_flags ( ) : flag_opt_map = { 'debug' : 'd' , 'dont_write_bytecode' : 'B' , 'no_user_site' : 's' , 'no_site' : 'S' , 'ignore_environment' : 'E' , 'verbose' : 'v' , 'bytes_warning' : 'b' , 'quiet' : 'q' , 'optimize' : 'O' , } args = [ ] for flag , opt in flag_opt_map . items ( ) : v = getattr ( sys . flags , flag , 0 ) if v > 0 : args . append ( '-' + opt * v ) for opt in sys . warnoptions : args . append ( '-W' + opt ) return args
|
Return a list of command - line arguments reproducing the current settings in sys . flags and sys . warnoptions .
|
61,239 |
def spawn ( spec , kwargs , pass_fds = ( ) ) : r , w = os . pipe ( ) for fd in [ r ] + list ( pass_fds ) : set_inheritable ( fd , True ) preparation_data = get_preparation_data ( ) r_handle = get_handle ( r ) args , env = get_command_line ( pipe_handle = r_handle ) process = subprocess . Popen ( args , env = env , close_fds = False ) to_child = os . fdopen ( w , 'wb' ) to_child . write ( pickle . dumps ( [ preparation_data , spec , kwargs ] ) ) to_child . close ( ) return process
|
Invoke a python function in a subprocess .
|
61,240 |
def get_watchman_sockpath ( binpath = 'watchman' ) : path = os . getenv ( 'WATCHMAN_SOCK' ) if path : return path cmd = [ binpath , '--output-encoding=json' , 'get-sockname' ] result = subprocess . check_output ( cmd ) result = json . loads ( result ) return result [ 'sockname' ]
|
Find the watchman socket or raise .
|
61,241 |
def start_reloader ( worker_path , reload_interval = 1 , shutdown_interval = default , verbose = 1 , logger = None , monitor_factory = None , worker_args = None , worker_kwargs = None , ignore_files = None , ) : if is_active ( ) : return get_reloader ( ) if logger is None : logger = DefaultLogger ( verbose ) if monitor_factory is None : monitor_factory = find_default_monitor_factory ( logger ) if shutdown_interval is default : shutdown_interval = reload_interval reloader = Reloader ( worker_path = worker_path , worker_args = worker_args , worker_kwargs = worker_kwargs , reload_interval = reload_interval , shutdown_interval = shutdown_interval , monitor_factory = monitor_factory , logger = logger , ignore_files = ignore_files , ) return reloader . run ( )
|
Start a monitor and then fork a worker process which starts by executing the importable function at worker_path .
|
61,242 |
def run ( self ) : self . _capture_signals ( ) self . _start_monitor ( ) try : while True : if not self . _run_worker ( ) : self . _wait_for_changes ( ) time . sleep ( self . reload_interval ) except KeyboardInterrupt : pass finally : self . _stop_monitor ( ) self . _restore_signals ( ) sys . exit ( 1 )
|
Execute the reloader forever blocking the current thread .
|
61,243 |
def run_once ( self ) : self . _capture_signals ( ) self . _start_monitor ( ) try : self . _run_worker ( ) except KeyboardInterrupt : return finally : self . _stop_monitor ( ) self . _restore_signals ( )
|
Execute the worker once .
|
61,244 |
def malloc ( self , key , shape , dtype ) : if key not in self . _memory or self . _memory [ key ] . shape != shape or self . _memory [ key ] . dtype != dtype : self . _memory [ key ] = Shmem ( key , shape , dtype , self . _uuid ) return self . _memory [ key ] . np_array
|
Allocates a block of shared memory and returns a numpy array whose data corresponds with that block .
|
61,245 |
def package_info ( pkg_name ) : indent = " " for config , _ in _iter_packages ( ) : if pkg_name == config [ "name" ] : print ( "Package:" , pkg_name ) print ( indent , "Platform:" , config [ "platform" ] ) print ( indent , "Version:" , config [ "version" ] ) print ( indent , "Path:" , config [ "path" ] ) print ( indent , "Worlds:" ) for world in config [ "maps" ] : world_info ( world [ "name" ] , world_config = world , initial_indent = " " )
|
Prints the information of a package .
|
61,246 |
def world_info ( world_name , world_config = None , initial_indent = "" , next_indent = " " ) : if world_config is None : for config , _ in _iter_packages ( ) : for world in config [ "maps" ] : if world [ "name" ] == world_name : world_config = world if world_config is None : raise HolodeckException ( "Couldn't find world " + world_name ) second_indent = initial_indent + next_indent agent_indent = second_indent + next_indent sensor_indent = agent_indent + next_indent print ( initial_indent , world_config [ "name" ] ) print ( second_indent , "Resolution:" , world_config [ "window_width" ] , "x" , world_config [ "window_height" ] ) print ( second_indent , "Agents:" ) for agent in world_config [ "agents" ] : print ( agent_indent , "Name:" , agent [ "agent_name" ] ) print ( agent_indent , "Type:" , agent [ "agent_type" ] ) print ( agent_indent , "Sensors:" ) for sensor in agent [ "sensors" ] : print ( sensor_indent , sensor )
|
Gets and prints the information of a world .
|
61,247 |
def install ( package_name ) : holodeck_path = util . get_holodeck_path ( ) binary_website = "https://s3.amazonaws.com/holodeckworlds/" if package_name not in packages : raise HolodeckException ( "Unknown package name " + package_name ) package_url = packages [ package_name ] print ( "Installing " + package_name + " at " + holodeck_path ) install_path = os . path . join ( holodeck_path , "worlds" ) binary_url = binary_website + util . get_os_key ( ) + "_" + package_url _download_binary ( binary_url , install_path ) if os . name == "posix" : _make_binary_excecutable ( package_name , install_path )
|
Installs a holodeck package .
|
61,248 |
def remove ( package_name ) : if package_name not in packages : raise HolodeckException ( "Unknown package name " + package_name ) for config , path in _iter_packages ( ) : if config [ "name" ] == package_name : shutil . rmtree ( path )
|
Removes a holodeck package .
|
61,249 |
def make ( world_name , gl_version = GL_VERSION . OPENGL4 , window_res = None , cam_res = None , verbose = False ) : holodeck_worlds = _get_worlds_map ( ) if world_name not in holodeck_worlds : raise HolodeckException ( "Invalid World Name" ) param_dict = copy ( holodeck_worlds [ world_name ] ) param_dict [ "start_world" ] = True param_dict [ "uuid" ] = str ( uuid . uuid4 ( ) ) param_dict [ "gl_version" ] = gl_version param_dict [ "verbose" ] = verbose if window_res is not None : param_dict [ "window_width" ] = window_res [ 0 ] param_dict [ "window_height" ] = window_res [ 1 ] if cam_res is not None : param_dict [ "camera_width" ] = cam_res [ 0 ] param_dict [ "camera_height" ] = cam_res [ 1 ] return HolodeckEnvironment ( ** param_dict )
|
Creates a holodeck environment using the supplied world name .
|
61,250 |
def unlink ( self ) : if os . name == "posix" : self . __linux_unlink__ ( ) elif os . name == "nt" : self . __windows_unlink__ ( ) else : raise HolodeckException ( "Currently unsupported os: " + os . name )
|
unlinks the shared memory
|
61,251 |
def add_number_parameters ( self , number ) : if isinstance ( number , list ) : for x in number : self . add_number_parameters ( x ) return self . _parameters . append ( "{ \"value\": " + str ( number ) + " }" )
|
Add given number parameters to the internal list .
|
61,252 |
def add_string_parameters ( self , string ) : if isinstance ( string , list ) : for x in string : self . add_string_parameters ( x ) return self . _parameters . append ( "{ \"value\": \"" + string + "\" }" )
|
Add given string parameters to the internal list .
|
61,253 |
def set_type ( self , weather_type ) : weather_type . lower ( ) exists = self . has_type ( weather_type ) if exists : self . add_string_parameters ( weather_type )
|
Set the weather type .
|
61,254 |
def uav_example ( ) : env = holodeck . make ( "UrbanCity" ) env . set_control_scheme ( "uav0" , ControlSchemes . UAV_ROLL_PITCH_YAW_RATE_ALT ) for i in range ( 10 ) : env . reset ( ) command = np . array ( [ 0 , 0 , 2 , 10 ] ) for _ in range ( 1000 ) : state , reward , terminal , _ = env . step ( command ) pixels = state [ Sensors . PIXEL_CAMERA ] velocity = state [ Sensors . VELOCITY_SENSOR ]
|
A basic example of how to use the UAV agent .
|
61,255 |
def sphere_example ( ) : env = holodeck . make ( "MazeWorld" ) command = 2 for i in range ( 10 ) : env . reset ( ) for _ in range ( 1000 ) : state , reward , terminal , _ = env . step ( command ) pixels = state [ Sensors . PIXEL_CAMERA ] orientation = state [ Sensors . ORIENTATION_SENSOR ]
|
A basic example of how to use the sphere agent .
|
61,256 |
def android_example ( ) : env = holodeck . make ( "AndroidPlayground" ) command = np . ones ( 94 ) * 10 for i in range ( 10 ) : env . reset ( ) for j in range ( 1000 ) : if j % 50 == 0 : command *= - 1 state , reward , terminal , _ = env . step ( command ) pixels = state [ Sensors . PIXEL_CAMERA ] orientation = state [ Sensors . ORIENTATION_SENSOR ]
|
A basic example of how to use the android agent .
|
61,257 |
def multi_agent_example ( ) : env = holodeck . make ( "UrbanCity" ) cmd0 = np . array ( [ 0 , 0 , - 2 , 10 ] ) cmd1 = np . array ( [ 0 , 0 , 5 , 10 ] ) for i in range ( 10 ) : env . reset ( ) sensors = [ Sensors . PIXEL_CAMERA , Sensors . LOCATION_SENSOR , Sensors . VELOCITY_SENSOR ] agent = AgentDefinition ( "uav1" , agents . UavAgent , sensors ) env . spawn_agent ( agent , [ 1 , 1 , 5 ] ) env . set_control_scheme ( "uav0" , ControlSchemes . UAV_ROLL_PITCH_YAW_RATE_ALT ) env . set_control_scheme ( "uav1" , ControlSchemes . UAV_ROLL_PITCH_YAW_RATE_ALT ) env . tick ( ) env . act ( "uav0" , cmd0 ) env . act ( "uav1" , cmd1 ) for _ in range ( 1000 ) : states = env . tick ( ) uav0_terminal = states [ "uav0" ] [ Sensors . TERMINAL ] uav1_reward = states [ "uav1" ] [ Sensors . REWARD ]
|
A basic example of using multiple agents
|
61,258 |
def world_command_examples ( ) : env = holodeck . make ( "MazeWorld" ) for _ in range ( 300 ) : _ = env . tick ( ) env . reset ( ) env . set_day_time ( 6 ) for _ in range ( 300 ) : _ = env . tick ( ) env . reset ( ) env . start_day_cycle ( 5 ) for _ in range ( 1500 ) : _ = env . tick ( ) env . reset ( ) env . set_fog_density ( .25 ) for _ in range ( 300 ) : _ = env . tick ( ) env . reset ( ) env . set_weather ( "rain" ) for _ in range ( 500 ) : _ = env . tick ( ) env . reset ( ) env . set_weather ( "cloudy" ) for _ in range ( 500 ) : _ = env . tick ( ) env . reset ( ) env . teleport_camera ( [ 1000 , 1000 , 1000 ] , [ 0 , 0 , 0 ] ) for _ in range ( 500 ) : _ = env . tick ( ) env . reset ( )
|
A few examples to showcase commands for manipulating the worlds .
|
61,259 |
def editor_example ( ) : sensors = [ Sensors . PIXEL_CAMERA , Sensors . LOCATION_SENSOR , Sensors . VELOCITY_SENSOR ] agent = AgentDefinition ( "uav0" , agents . UavAgent , sensors ) env = HolodeckEnvironment ( agent , start_world = False ) env . agents [ "uav0" ] . set_control_scheme ( 1 ) command = [ 0 , 0 , 10 , 50 ] for i in range ( 10 ) : env . reset ( ) for _ in range ( 1000 ) : state , reward , terminal , _ = env . step ( command )
|
This editor example shows how to interact with holodeck worlds while they are being built in the Unreal Engine . Most people that use holodeck will not need this .
|
61,260 |
def editor_multi_agent_example ( ) : agent_definitions = [ AgentDefinition ( "uav0" , agents . UavAgent , [ Sensors . PIXEL_CAMERA , Sensors . LOCATION_SENSOR ] ) , AgentDefinition ( "uav1" , agents . UavAgent , [ Sensors . LOCATION_SENSOR , Sensors . VELOCITY_SENSOR ] ) ] env = HolodeckEnvironment ( agent_definitions , start_world = False ) cmd0 = np . array ( [ 0 , 0 , - 2 , 10 ] ) cmd1 = np . array ( [ 0 , 0 , 5 , 10 ] ) for i in range ( 10 ) : env . reset ( ) env . act ( "uav0" , cmd0 ) env . act ( "uav1" , cmd1 ) for _ in range ( 1000 ) : states = env . tick ( ) uav0_terminal = states [ "uav0" ] [ Sensors . TERMINAL ] uav1_reward = states [ "uav1" ] [ Sensors . REWARD ]
|
This editor example shows how to interact with holodeck worlds that have multiple agents . This is specifically for when working with UE4 directly and not a prebuilt binary .
|
61,261 |
def get_holodeck_path ( ) : if "HOLODECKPATH" in os . environ and os . environ [ "HOLODECKPATH" ] != "" : return os . environ [ "HOLODECKPATH" ] if os . name == "posix" : return os . path . expanduser ( "~/.local/share/holodeck" ) elif os . name == "nt" : return os . path . expanduser ( "~\\AppData\\Local\\holodeck" ) else : raise NotImplementedError ( "holodeck is only supported for Linux and Windows" )
|
Gets the path of the holodeck environment
|
61,262 |
def convert_unicode ( value ) : if isinstance ( value , dict ) : return { convert_unicode ( key ) : convert_unicode ( value ) for key , value in value . iteritems ( ) } elif isinstance ( value , list ) : return [ convert_unicode ( item ) for item in value ] elif isinstance ( value , unicode ) : return value . encode ( 'utf-8' ) else : return value
|
Resolves python 2 issue with json loading in unicode instead of string
|
61,263 |
def info ( self ) : result = list ( ) result . append ( "Agents:\n" ) for agent in self . _all_agents : result . append ( "\tName: " ) result . append ( agent . name ) result . append ( "\n\tType: " ) result . append ( type ( agent ) . __name__ ) result . append ( "\n\t" ) result . append ( "Sensors:\n" ) for sensor in self . _sensor_map [ agent . name ] . keys ( ) : result . append ( "\t\t" ) result . append ( Sensors . name ( sensor ) ) result . append ( "\n" ) return "" . join ( result )
|
Returns a string with specific information about the environment . This information includes which agents are in the environment and which sensors they have .
|
61,264 |
def reset ( self ) : self . _reset_ptr [ 0 ] = True self . _commands . clear ( ) for _ in range ( self . _pre_start_steps + 1 ) : self . tick ( ) return self . _default_state_fn ( )
|
Resets the environment and returns the state . If it is a single agent environment it returns that state for that agent . Otherwise it returns a dict from agent name to state .
|
61,265 |
def step ( self , action ) : self . _agent . act ( action ) self . _handle_command_buffer ( ) self . _client . release ( ) self . _client . acquire ( ) return self . _get_single_state ( )
|
Supplies an action to the main agent and tells the environment to tick once . Primary mode of interaction for single agent environments .
|
61,266 |
def teleport ( self , agent_name , location = None , rotation = None ) : self . agents [ agent_name ] . teleport ( location * 100 , rotation ) self . tick ( )
|
Teleports the target agent to any given location and applies a specific rotation .
|
61,267 |
def tick ( self ) : self . _handle_command_buffer ( ) self . _client . release ( ) self . _client . acquire ( ) return self . _get_full_state ( )
|
Ticks the environment once . Normally used for multi - agent environments .
|
61,268 |
def add_state_sensors ( self , agent_name , sensors ) : if isinstance ( sensors , list ) : for sensor in sensors : self . add_state_sensors ( agent_name , sensor ) else : if agent_name not in self . _sensor_map : self . _sensor_map [ agent_name ] = dict ( ) self . _sensor_map [ agent_name ] [ sensors ] = self . _client . malloc ( agent_name + "_" + Sensors . name ( sensors ) , Sensors . shape ( sensors ) , Sensors . dtype ( sensors ) )
|
Adds a sensor to a particular agent . This only works if the world you are running also includes that particular sensor on the agent .
|
61,269 |
def spawn_agent ( self , agent_definition , location ) : self . _should_write_to_command_buffer = True self . _add_agents ( agent_definition ) command_to_send = SpawnAgentCommand ( location , agent_definition . name , agent_definition . type ) self . _commands . add_command ( command_to_send )
|
Queues a spawn agent command . It will be applied when tick or step is called next . The agent won t be able to be used until the next frame .
|
61,270 |
def set_fog_density ( self , density ) : if density < 0 or density > 1 : raise HolodeckException ( "Fog density should be between 0 and 1" ) self . _should_write_to_command_buffer = True command_to_send = ChangeFogDensityCommand ( density ) self . _commands . add_command ( command_to_send )
|
Queue up a change fog density command . It will be applied when tick or step is called next . By the next tick the exponential height fog in the world will have the new density . If there is no fog in the world it will be automatically created with the given density .
|
61,271 |
def set_day_time ( self , hour ) : self . _should_write_to_command_buffer = True command_to_send = DayTimeCommand ( hour % 24 ) self . _commands . add_command ( command_to_send )
|
Queue up a change day time command . It will be applied when tick or step is called next . By the next tick the lighting and the skysphere will be updated with the new hour . If there is no skysphere or directional light in the world the command will not function properly but will not cause a crash .
|
61,272 |
def start_day_cycle ( self , day_length ) : if day_length <= 0 : raise HolodeckException ( "The given day length should be between above 0!" ) self . _should_write_to_command_buffer = True command_to_send = DayCycleCommand ( True ) command_to_send . set_day_length ( day_length ) self . _commands . add_command ( command_to_send )
|
Queue up a day cycle command to start the day cycle . It will be applied when tick or step is called next . The sky sphere will now update each tick with an updated sun angle as it moves about the sky . The length of a day will be roughly equivalent to the number of minutes given .
|
61,273 |
def stop_day_cycle ( self ) : self . _should_write_to_command_buffer = True command_to_send = DayCycleCommand ( False ) self . _commands . add_command ( command_to_send )
|
Queue up a day cycle command to stop the day cycle . It will be applied when tick or step is called next . By the next tick day cycle will stop where it is .
|
61,274 |
def teleport_camera ( self , location , rotation ) : self . _should_write_to_command_buffer = True command_to_send = TeleportCameraCommand ( location , rotation ) self . _commands . add_command ( command_to_send )
|
Queue up a teleport camera command to stop the day cycle . By the next tick the camera s location and rotation will be updated
|
61,275 |
def set_control_scheme ( self , agent_name , control_scheme ) : if agent_name not in self . agents : print ( "No such agent %s" % agent_name ) else : self . agents [ agent_name ] . set_control_scheme ( control_scheme )
|
Set the control scheme for a specific agent .
|
61,276 |
def _handle_command_buffer ( self ) : if self . _should_write_to_command_buffer : self . _write_to_command_buffer ( self . _commands . to_json ( ) ) self . _should_write_to_command_buffer = False self . _commands . clear ( )
|
Checks if we should write to the command buffer writes all of the queued commands to the buffer and then clears the contents of the self . _commands list
|
61,277 |
def _write_to_command_buffer ( self , to_write ) : np . copyto ( self . _command_bool_ptr , True ) to_write += '0' input_bytes = str . encode ( to_write ) for index , val in enumerate ( input_bytes ) : self . _command_buffer_ptr [ index ] = val
|
Write input to the command buffer . Reformat input string to the correct format .
|
61,278 |
def teleport ( self , location = None , rotation = None ) : val = 0 if location is not None : val += 1 np . copyto ( self . _teleport_buffer , location ) if rotation is not None : np . copyto ( self . _rotation_buffer , rotation ) val += 2 self . _teleport_bool_buffer [ 0 ] = val
|
Teleports the agent to a specific location with a specific rotation .
|
61,279 |
def url ( self , request = "" ) : if request . startswith ( "/" ) : request = request [ 1 : ] return "{}://{}/{}" . format ( self . scheme , self . host , request )
|
Build the url with the appended request if provided .
|
61,280 |
def object_url ( self , object_t , object_id = None , relation = None , ** kwargs ) : if object_t not in self . objects_types : raise TypeError ( "{} is not a valid type" . format ( object_t ) ) request_items = ( str ( item ) for item in [ object_t , object_id , relation ] if item is not None ) request = "/" . join ( request_items ) base_url = self . url ( request ) if self . access_token is not None : kwargs [ "access_token" ] = str ( self . access_token ) if kwargs : for key , value in kwargs . items ( ) : if not isinstance ( value , str ) : kwargs [ key ] = str ( value ) sorted_kwargs = SortedDict . from_dict ( kwargs ) result = "{}?{}" . format ( base_url , urlencode ( sorted_kwargs ) ) else : result = base_url return result
|
Helper method to build the url to query to access the object passed as parameter
|
61,281 |
def get_album ( self , object_id , relation = None , ** kwargs ) : return self . get_object ( "album" , object_id , relation = relation , ** kwargs )
|
Get the album with the provided id
|
61,282 |
def get_artist ( self , object_id , relation = None , ** kwargs ) : return self . get_object ( "artist" , object_id , relation = relation , ** kwargs )
|
Get the artist with the provided id
|
61,283 |
def search ( self , query , relation = None , index = 0 , limit = 25 , ** kwargs ) : return self . get_object ( "search" , relation = relation , q = query , index = index , limit = limit , ** kwargs )
|
Search track album artist or user
|
61,284 |
def advanced_search ( self , terms , relation = None , index = 0 , limit = 25 , ** kwargs ) : assert isinstance ( terms , dict ) , "terms must be a dict" query = " " . join ( sorted ( [ '{}:"{}"' . format ( k , v ) for ( k , v ) in terms . items ( ) ] ) ) return self . get_object ( "search" , relation = relation , q = query , index = index , limit = limit , ** kwargs )
|
Advanced search of track album or artist .
|
61,285 |
def asdict ( self ) : result = { } for key in self . _fields : value = getattr ( self , key ) if isinstance ( value , list ) : value = [ i . asdict ( ) if isinstance ( i , Resource ) else i for i in value ] if isinstance ( value , Resource ) : value = value . asdict ( ) result [ key ] = value return result
|
Convert resource to dictionary
|
61,286 |
def get_relation ( self , relation , ** kwargs ) : return self . client . get_object ( self . type , self . id , relation , self , ** kwargs )
|
Generic method to load the relation from any resource .
|
61,287 |
def iter_relation ( self , relation , ** kwargs ) : index = 0 while 1 : items = self . get_relation ( relation , index = index , ** kwargs ) for item in items : yield ( item ) if len ( items ) == 0 : break index += len ( items )
|
Generic method to iterate relation from any resource .
|
61,288 |
def run ( graph , save_on_github = False , main_entity = None ) : try : ontology = graph . all_ontologies [ 0 ] uri = ontology . uri except : ontology = None uri = ";" . join ( [ s for s in graph . sources ] ) ontotemplate = open ( ONTODOCS_VIZ_TEMPLATES + "sigmajs.html" , "r" ) t = Template ( ontotemplate . read ( ) ) dict_graph = build_class_json ( graph . classes ) JSON_DATA_CLASSES = json . dumps ( dict_graph ) if False : c_mylist = build_D3treeStandard ( 0 , 99 , 1 , graph . toplayer_classes ) p_mylist = build_D3treeStandard ( 0 , 99 , 1 , graph . toplayer_properties ) s_mylist = build_D3treeStandard ( 0 , 99 , 1 , graph . toplayer_skos ) c_total = len ( graph . classes ) p_total = len ( graph . all_properties ) s_total = len ( graph . all_skos_concepts ) JSON_DATA_CLASSES = json . dumps ( { 'children' : c_mylist , 'name' : 'owl:Thing' , 'id' : "None" } ) JSON_DATA_PROPERTIES = json . dumps ( { 'children' : p_mylist , 'name' : 'Properties' , 'id' : "None" } ) JSON_DATA_CONCEPTS = json . dumps ( { 'children' : s_mylist , 'name' : 'Concepts' , 'id' : "None" } ) c = Context ( { "ontology" : ontology , "main_uri" : uri , "STATIC_PATH" : ONTODOCS_VIZ_STATIC , "classes" : graph . classes , "classes_TOPLAYER" : len ( graph . toplayer_classes ) , "properties" : graph . all_properties , "properties_TOPLAYER" : len ( graph . toplayer_properties ) , "skosConcepts" : graph . all_skos_concepts , "skosConcepts_TOPLAYER" : len ( graph . toplayer_skos ) , 'JSON_DATA_CLASSES' : JSON_DATA_CLASSES , } ) rnd = t . render ( c ) return safe_str ( rnd )
|
2016 - 11 - 30
|
61,289 |
def _debugGraph ( self ) : print ( "Len of graph: " , len ( self . rdflib_graph ) ) for x , y , z in self . rdflib_graph : print ( x , y , z )
|
internal util to print out contents of graph
|
61,290 |
def load_uri ( self , uri ) : if self . verbose : printDebug ( "Reading: <%s>" % uri , fg = "green" ) success = False sorted_fmt_opts = try_sort_fmt_opts ( self . rdf_format_opts , uri ) for f in sorted_fmt_opts : if self . verbose : printDebug ( ".. trying rdf serialization: <%s>" % f ) try : if f == 'json-ld' : if self . verbose : printDebug ( "Detected JSONLD - loading data into rdflib.ConjunctiveGraph()" , fg = 'green' ) temp_graph = rdflib . ConjunctiveGraph ( ) else : temp_graph = rdflib . Graph ( ) temp_graph . parse ( uri , format = f ) if self . verbose : printDebug ( "..... success!" , bold = True ) success = True self . sources_valid += [ uri ] self . rdflib_graph = self . rdflib_graph + temp_graph break except : temp = None if self . verbose : printDebug ( "..... failed" ) if not success == True : self . loading_failed ( sorted_fmt_opts , uri = uri ) self . sources_invalid += [ uri ]
|
Load a single resource into the graph for this object .
|
61,291 |
def print_summary ( self ) : if self . sources_valid : printDebug ( "----------\nLoaded %d triples.\n----------" % len ( self . rdflib_graph ) , fg = 'white' ) printDebug ( "RDF sources loaded successfully: %d of %d." % ( len ( self . sources_valid ) , len ( self . sources_valid ) + len ( self . sources_invalid ) ) , fg = 'green' ) for s in self . sources_valid : printDebug ( "..... '" + s + "'" , fg = 'white' ) printDebug ( "----------" , fg = 'white' ) else : printDebug ( "Sorry - no valid RDF was found" , fg = 'red' ) if self . sources_invalid : printDebug ( "----------\nRDF sources failed to load: %d.\n----------" % ( len ( self . sources_invalid ) ) , fg = 'red' ) for s in self . sources_invalid : printDebug ( "-> " + s , fg = "red" )
|
print out stats about loading operation
|
61,292 |
def loading_failed ( self , rdf_format_opts , uri = "" ) : if uri : uri = " <%s>" % str ( uri ) printDebug ( "----------\nFatal error parsing graph%s\n(using RDF serializations: %s)" % ( uri , str ( rdf_format_opts ) ) , "red" ) printDebug ( "----------\nTIP: You can try one of the following RDF validation services\n<http://mowl-power.cs.man.ac.uk:8080/validator/validate>\n<http://www.ivan-herman.net/Misc/2008/owlrl/>" ) return
|
default message if we need to abort loading
|
61,293 |
def _build_qname ( self , uri = None , namespaces = None ) : if not uri : uri = self . uri if not namespaces : namespaces = self . namespaces return uri2niceString ( uri , namespaces )
|
extracts a qualified name for a uri
|
61,294 |
def ancestors ( self , cl = None , noduplicates = True ) : if not cl : cl = self if cl . parents ( ) : bag = [ ] for x in cl . parents ( ) : if x . uri != cl . uri : bag += [ x ] + self . ancestors ( x , noduplicates ) else : bag += [ x ] if noduplicates : return remove_duplicates ( bag ) else : return bag else : return [ ]
|
returns all ancestors in the taxonomy
|
61,295 |
def descendants ( self , cl = None , noduplicates = True ) : if not cl : cl = self if cl . children ( ) : bag = [ ] for x in cl . children ( ) : if x . uri != cl . uri : bag += [ x ] + self . descendants ( x , noduplicates ) else : bag += [ x ] if noduplicates : return remove_duplicates ( bag ) else : return bag else : return [ ]
|
returns all descendants in the taxonomy
|
61,296 |
def annotations ( self , qname = True ) : if qname : return sorted ( [ ( uri2niceString ( x , self . namespaces ) ) , ( uri2niceString ( y , self . namespaces ) ) , z ] for x , y , z in self . triples ) else : return sorted ( self . triples )
|
wrapper that returns all triples for an onto . By default resources URIs are transformed into qnames
|
61,297 |
def printStats ( self ) : printDebug ( "----------------" ) printDebug ( "Parents......: %d" % len ( self . parents ( ) ) ) printDebug ( "Children.....: %d" % len ( self . children ( ) ) ) printDebug ( "Ancestors....: %d" % len ( self . ancestors ( ) ) ) printDebug ( "Descendants..: %d" % len ( self . descendants ( ) ) ) printDebug ( "Domain of....: %d" % len ( self . domain_of ) ) printDebug ( "Range of.....: %d" % len ( self . range_of ) ) printDebug ( "Instances....: %d" % self . count ( ) ) printDebug ( "----------------" )
|
shortcut to pull out useful info for interactive use
|
61,298 |
def load_sparql ( self , sparql_endpoint , verbose = False , hide_base_schemas = True , hide_implicit_types = True , hide_implicit_preds = True , credentials = None ) : try : graph = rdflib . ConjunctiveGraph ( 'SPARQLUpdateStore' ) if credentials and type ( credentials ) == tuple : graph . store . setCredentials ( credentials [ 0 ] , credentials [ 1 ] ) graph . open ( sparql_endpoint ) self . rdflib_graph = graph self . sparql_endpoint = sparql_endpoint self . sources = [ sparql_endpoint ] self . sparqlHelper = SparqlHelper ( self . rdflib_graph , self . sparql_endpoint ) self . namespaces = sorted ( self . rdflib_graph . namespaces ( ) ) except : printDebug ( "Error trying to connect to Endpoint." ) raise
|
Set up a SPARQLStore backend as a virtual ontospy graph
|
61,299 |
def build_all ( self , verbose = False , hide_base_schemas = True , hide_implicit_types = True , hide_implicit_preds = True ) : if verbose : printDebug ( "Scanning entities..." , "green" ) printDebug ( "----------" , "comment" ) self . build_ontologies ( ) if verbose : printDebug ( "Ontologies.........: %d" % len ( self . all_ontologies ) , "comment" ) self . build_classes ( hide_base_schemas , hide_implicit_types ) if verbose : printDebug ( "Classes............: %d" % len ( self . all_classes ) , "comment" ) self . build_properties ( hide_implicit_preds ) if verbose : printDebug ( "Properties.........: %d" % len ( self . all_properties ) , "comment" ) if verbose : printDebug ( "..annotation.......: %d" % len ( self . all_properties_annotation ) , "comment" ) if verbose : printDebug ( "..datatype.........: %d" % len ( self . all_properties_datatype ) , "comment" ) if verbose : printDebug ( "..object...........: %d" % len ( self . all_properties_object ) , "comment" ) self . build_skos_concepts ( ) if verbose : printDebug ( "Concepts (SKOS)....: %d" % len ( self . all_skos_concepts ) , "comment" ) self . build_shapes ( ) if verbose : printDebug ( "Shapes (SHACL).....: %d" % len ( self . all_shapes ) , "comment" ) self . __computeInferredProperties ( ) if verbose : printDebug ( "----------" , "comment" )
|
Extract all ontology entities from an RDF graph and construct Python representations of them .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.