idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
---|---|---|
1,400 |
def render_customizations ( self ) : disable_plugins = self . customize_conf . get ( 'disable_plugins' , [ ] ) if not disable_plugins : logger . debug ( "No site-specific plugins to disable" ) else : for plugin_dict in disable_plugins : try : self . dj . remove_plugin ( plugin_dict [ 'plugin_type' ] , plugin_dict [ 'plugin_name' ] ) logger . debug ( "site-specific plugin disabled -> Type:{} Name:{}" . format ( plugin_dict [ 'plugin_type' ] , plugin_dict [ 'plugin_name' ] ) ) except KeyError : logger . debug ( "Invalid custom configuration found for disable_plugins" ) enable_plugins = self . customize_conf . get ( 'enable_plugins' , [ ] ) if not enable_plugins : logger . debug ( "No site-specific plugins to enable" ) else : for plugin_dict in enable_plugins : try : self . dj . add_plugin ( plugin_dict [ 'plugin_type' ] , plugin_dict [ 'plugin_name' ] , plugin_dict [ 'plugin_args' ] ) logger . debug ( "site-specific plugin enabled -> Type:{} Name:{} Args: {}" . format ( plugin_dict [ 'plugin_type' ] , plugin_dict [ 'plugin_name' ] , plugin_dict [ 'plugin_args' ] ) ) except KeyError : logger . debug ( "Invalid custom configuration found for enable_plugins" )
|
Customize prod_inner for site specific customizations
|
1,401 |
def setup_json_capture ( osbs , os_conf , capture_dir ) : try : os . mkdir ( capture_dir ) except OSError : pass finally : osbs . os . _con . request = ResponseSaver ( capture_dir , os_conf . get_openshift_api_uri ( ) , os_conf . get_k8s_api_uri ( ) , osbs . os . _con . request ) . request
|
Only used for setting up the testing framework .
|
1,402 |
def _longest_val_in_column ( self , col ) : try : return max ( [ len ( x [ col ] ) for x in self . table if x [ col ] ] ) + 2 except KeyError : logger . error ( "there is no column %r" , col ) raise
|
get size of longest value in specific column
|
1,403 |
def _init ( self ) : self . col_count = len ( self . col_list ) self . col_longest = self . get_all_longest_col_lengths ( ) self . data_length = sum ( self . col_longest . values ( ) ) if self . terminal_width > 0 : self . total_free_space = ( self . terminal_width - self . data_length ) - self . col_count + 1 if self . total_free_space <= 0 : self . total_free_space = None else : self . default_column_space = self . total_free_space // self . col_count self . default_column_space_remainder = self . total_free_space % self . col_count logger . debug ( "total free space: %d, column space: %d, remainder: %d, columns: %d" , self . total_free_space , self . default_column_space , self . default_column_space_remainder , self . col_count ) else : self . total_free_space = None
|
initialize all values based on provided input
|
1,404 |
def _count_sizes ( self ) : format_list = [ ] header_sepa_format_list = [ ] self . col_widths = { } for col in self . col_list : col_length = self . col_longest [ col ] col_width = col_length + self . _separate ( ) format_list . append ( " {%s:%d} " % ( col , col_width - 2 ) ) header_sepa_format_list . append ( "{%s:%d}" % ( col , col_width ) ) self . col_widths [ col ] = col_width logger . debug ( "column widths %s" , self . col_widths ) self . format_str = "|" . join ( format_list ) self . header_format_str = "+" . join ( header_sepa_format_list ) self . header_data = { } for k in self . col_widths : self . header_data [ k ] = "-" * self . col_widths [ k ]
|
count all values needed to display whole table
|
1,405 |
def get_all_longest_col_lengths ( self ) : response = { } for col in self . col_list : response [ col ] = self . _longest_val_in_column ( col ) return response
|
iterate over all columns and get their longest values
|
1,406 |
def _separate ( self ) : if self . total_free_space is None : return 0 else : sepa = self . default_column_space if self . default_column_space_remainder > 0 : sepa += 1 self . default_column_space_remainder -= 1 logger . debug ( "remainder: %d, separator: %d" , self . default_column_space_remainder , sepa ) return sepa
|
get a width of separator for current column
|
1,407 |
def render ( self ) : print ( self . format_str . format ( ** self . header ) , file = sys . stderr ) print ( self . header_format_str . format ( ** self . header_data ) , file = sys . stderr ) for row in self . data : print ( self . format_str . format ( ** row ) )
|
print provided table
|
1,408 |
def set_params ( self , ** kwargs ) : self . scratch = kwargs . get ( 'scratch' ) self . is_auto = kwargs . pop ( 'is_auto' , False ) self . isolated = kwargs . get ( 'isolated' ) self . osbs_api = kwargs . pop ( 'osbs_api' , None ) self . validate_build_variation ( ) self . base_image = kwargs . get ( 'base_image' ) self . platform_node_selector = kwargs . get ( 'platform_node_selector' , { } ) self . scratch_build_node_selector = kwargs . get ( 'scratch_build_node_selector' , { } ) self . explicit_build_node_selector = kwargs . get ( 'explicit_build_node_selector' , { } ) self . auto_build_node_selector = kwargs . get ( 'auto_build_node_selector' , { } ) self . isolated_build_node_selector = kwargs . get ( 'isolated_build_node_selector' , { } ) logger . debug ( "now setting params '%s' for user_params" , kwargs ) self . user_params . set_params ( ** kwargs ) self . source_registry = None self . organization = None
|
set parameters in the user parameters
|
1,409 |
def set_data_from_reactor_config ( self ) : reactor_config_override = self . user_params . reactor_config_override . value reactor_config_map = self . user_params . reactor_config_map . value data = None if reactor_config_override : data = reactor_config_override elif reactor_config_map : config_map = self . osbs_api . get_config_map ( reactor_config_map ) data = config_map . get_data_by_key ( 'config.yaml' ) if not data : if self . user_params . flatpak . value : raise OsbsValidationException ( "flatpak_base_image must be provided" ) else : return source_registry_key = 'source_registry' registry_organization_key = 'registries_organization' req_secrets_key = 'required_secrets' token_secrets_key = 'worker_token_secrets' flatpak_key = 'flatpak' flatpak_base_image_key = 'base_image' if source_registry_key in data : self . source_registry = data [ source_registry_key ] if registry_organization_key in data : self . organization = data [ registry_organization_key ] if self . user_params . flatpak . value : flatpack_base_image = data . get ( flatpak_key , { } ) . get ( flatpak_base_image_key , None ) if flatpack_base_image : self . base_image = flatpack_base_image self . user_params . base_image . value = flatpack_base_image else : raise OsbsValidationException ( "flatpak_base_image must be provided" ) required_secrets = data . get ( req_secrets_key , [ ] ) token_secrets = data . get ( token_secrets_key , [ ] ) self . _set_required_secrets ( required_secrets , token_secrets )
|
Sets data from reactor config
|
1,410 |
def _set_required_secrets ( self , required_secrets , token_secrets ) : if self . user_params . build_type . value == BUILD_TYPE_ORCHESTRATOR : required_secrets += token_secrets if not required_secrets : return secrets = self . template [ 'spec' ] [ 'strategy' ] [ 'customStrategy' ] . setdefault ( 'secrets' , [ ] ) existing = set ( secret_mount [ 'secretSource' ] [ 'name' ] for secret_mount in secrets ) required_secrets = set ( required_secrets ) already_set = required_secrets . intersection ( existing ) if already_set : logger . debug ( "secrets %s are already set" , already_set ) for secret in required_secrets - existing : secret_path = os . path . join ( SECRETS_PATH , secret ) logger . info ( "Configuring %s secret at %s" , secret , secret_path ) secrets . append ( { 'secretSource' : { 'name' : secret , } , 'mountPath' : secret_path , } )
|
Sets required secrets
|
1,411 |
def adjust_for_isolated ( self ) : if self . user_params . isolated . value : remove_plugins = [ ( "prebuild_plugins" , "check_and_set_rebuild" ) , ( "prebuild_plugins" , "stop_autorebuild_if_disabled" ) ] for when , which in remove_plugins : self . pt . remove_plugin ( when , which , 'removed from isolated build request' )
|
Remove certain plugins in order to handle the isolated build scenario .
|
1,412 |
def adjust_for_flatpak ( self ) : if self . user_params . flatpak . value : remove_plugins = [ ( "prebuild_plugins" , "resolve_composes" ) , ( "prepublish_plugins" , "squash" ) , ( "postbuild_plugins" , "pulp_push" ) , ( "postbuild_plugins" , "pulp_tag" ) , ( "postbuild_plugins" , "pulp_sync" ) , ( "exit_plugins" , "pulp_publish" ) , ( "exit_plugins" , "pulp_pull" ) , ( "exit_plugins" , "delete_from_registry" ) , ] for when , which in remove_plugins : self . pt . remove_plugin ( when , which , 'not needed for flatpak build' )
|
Remove plugins that don t work when building Flatpaks
|
1,413 |
def render_customizations ( self ) : disable_plugins = self . pt . customize_conf . get ( 'disable_plugins' , [ ] ) if not disable_plugins : logger . debug ( 'No site-user specified plugins to disable' ) else : for plugin in disable_plugins : try : self . pt . remove_plugin ( plugin [ 'plugin_type' ] , plugin [ 'plugin_name' ] , 'disabled at user request' ) except KeyError : logger . info ( 'Invalid custom configuration found for disable_plugins' ) enable_plugins = self . pt . customize_conf . get ( 'enable_plugins' , [ ] ) if not enable_plugins : logger . debug ( 'No site-user specified plugins to enable"' ) else : for plugin in enable_plugins : try : msg = 'enabled at user request' self . pt . add_plugin ( plugin [ 'plugin_type' ] , plugin [ 'plugin_name' ] , plugin [ 'plugin_args' ] , msg ) except KeyError : logger . info ( 'Invalid custom configuration found for enable_plugins' )
|
Customize template for site user specified customizations
|
1,414 |
def render_koji ( self ) : phase = 'prebuild_plugins' plugin = 'koji' if not self . pt . has_plugin_conf ( phase , plugin ) : return if self . user_params . yum_repourls . value : self . pt . remove_plugin ( phase , plugin , 'there is a yum repo user parameter' ) elif not self . pt . set_plugin_arg_valid ( phase , plugin , "target" , self . user_params . koji_target . value ) : self . pt . remove_plugin ( phase , plugin , 'no koji target supplied in user parameters' )
|
if there is yum repo in user params don t pick stuff from koji
|
1,415 |
def render_check_and_set_platforms ( self ) : phase = 'prebuild_plugins' plugin = 'check_and_set_platforms' if not self . pt . has_plugin_conf ( phase , plugin ) : return if self . user_params . koji_target . value : self . pt . set_plugin_arg ( phase , plugin , "koji_target" , self . user_params . koji_target . value )
|
If the check_and_set_platforms plugin is present configure it
|
1,416 |
def get_all_build_configs_by_labels ( self , label_selectors ) : labels = [ '%s=%s' % ( field , value ) for field , value in label_selectors ] labels = ',' . join ( labels ) url = self . _build_url ( "buildconfigs/" , labelSelector = labels ) return self . _get ( url ) . json ( ) [ 'items' ]
|
Returns all builds matching a given set of label selectors . It is up to the calling function to filter the results .
|
1,417 |
def get_build_config_by_labels ( self , label_selectors ) : items = self . get_all_build_configs_by_labels ( label_selectors ) if not items : raise OsbsException ( "Build config not found for labels: %r" % ( label_selectors , ) ) if len ( items ) > 1 : raise OsbsException ( "More than one build config found for labels: %r" % ( label_selectors , ) ) return items [ 0 ]
|
Returns a build config matching the given label selectors . This method will raise OsbsException if not exactly one build config is found .
|
1,418 |
def get_build_config_by_labels_filtered ( self , label_selectors , filter_key , filter_value ) : items = self . get_all_build_configs_by_labels ( label_selectors ) if filter_value is not None : build_configs = [ ] for build_config in items : match_value = graceful_chain_get ( build_config , * filter_key . split ( '.' ) ) if filter_value == match_value : build_configs . append ( build_config ) items = build_configs if not items : raise OsbsException ( "Build config not found for labels: %r" % ( label_selectors , ) ) if len ( items ) > 1 : raise OsbsException ( "More than one build config found for labels: %r" % ( label_selectors , ) ) return items [ 0 ]
|
Returns a build config matching the given label selectors filtering against another predetermined value . This method will raise OsbsException if not exactly one build config is found after filtering .
|
1,419 |
def stream_logs ( self , build_id ) : kwargs = { 'follow' : 1 } min_idle_timeout = 60 last_activity = time . time ( ) while True : buildlogs_url = self . _build_url ( "builds/%s/log/" % build_id , ** kwargs ) try : response = self . _get ( buildlogs_url , stream = 1 , headers = { 'Connection' : 'close' } ) check_response ( response ) for line in response . iter_lines ( ) : last_activity = time . time ( ) yield line except OsbsException as exc : if not isinstance ( exc . cause , ConnectionError ) : raise idle = time . time ( ) - last_activity logger . debug ( "connection closed after %ds" , idle ) if idle < min_idle_timeout : return since = int ( idle - 1 ) logger . debug ( "fetching logs starting from %ds ago" , since ) kwargs [ 'sinceSeconds' ] = since
|
stream logs from build
|
1,420 |
def list_builds ( self , build_config_id = None , koji_task_id = None , field_selector = None , labels = None ) : query = { } selector = '{key}={value}' label = { } if labels is not None : label . update ( labels ) if build_config_id is not None : label [ 'buildconfig' ] = build_config_id if koji_task_id is not None : label [ 'koji-task-id' ] = str ( koji_task_id ) if label : query [ 'labelSelector' ] = ',' . join ( [ selector . format ( key = key , value = value ) for key , value in label . items ( ) ] ) if field_selector is not None : query [ 'fieldSelector' ] = field_selector url = self . _build_url ( "builds/" , ** query ) return self . _get ( url )
|
List builds matching criteria
|
1,421 |
def create_resource_quota ( self , name , quota_json ) : url = self . _build_k8s_url ( "resourcequotas/" ) response = self . _post ( url , data = json . dumps ( quota_json ) , headers = { "Content-Type" : "application/json" } ) if response . status_code == http_client . CONFLICT : url = self . _build_k8s_url ( "resourcequotas/%s" % name ) response = self . _put ( url , data = json . dumps ( quota_json ) , headers = { "Content-Type" : "application/json" } ) check_response ( response ) return response
|
Prevent builds being scheduled and wait for running builds to finish .
|
1,422 |
def adjust_attributes_on_object ( self , collection , name , things , values , how ) : url = self . _build_url ( "%s/%s" % ( collection , name ) ) response = self . _get ( url ) logger . debug ( "before modification: %s" , response . content ) build_json = response . json ( ) how ( build_json [ 'metadata' ] , things , values ) response = self . _put ( url , data = json . dumps ( build_json ) , use_json = True ) check_response ( response ) return response
|
adjust labels or annotations on object
|
1,423 |
def update_annotations_on_build ( self , build_id , annotations ) : return self . adjust_attributes_on_object ( 'builds' , build_id , 'annotations' , annotations , self . _update_metadata_things )
|
set annotations on build object
|
1,424 |
def load ( self ) : formatter = JsonLinesTableFormatter ( self . load_dict ( ) ) formatter . accept ( self ) return formatter . to_table_data ( )
|
Extract tabular data as |TableData| instances from a Line - delimited JSON file . |load_source_desc_file|
|
1,425 |
def load ( self ) : import gspread from oauth2client . service_account import ServiceAccountCredentials self . _validate_table_name ( ) self . _validate_title ( ) scope = [ "https://spreadsheets.google.com/feeds" , "https://www.googleapis.com/auth/drive" ] credentials = ServiceAccountCredentials . from_json_keyfile_name ( self . source , scope ) gc = gspread . authorize ( credentials ) try : for worksheet in gc . open ( self . title ) . worksheets ( ) : self . _worksheet = worksheet self . __all_values = [ row for row in worksheet . get_all_values ( ) ] if self . _is_empty_sheet ( ) : continue try : self . __strip_empty_col ( ) except ValueError : continue value_matrix = self . __all_values [ self . _get_start_row_idx ( ) : ] try : headers = value_matrix [ 0 ] rows = value_matrix [ 1 : ] except IndexError : continue self . inc_table_count ( ) yield TableData ( self . make_table_name ( ) , headers , rows , dp_extractor = self . dp_extractor , type_hints = self . _extract_type_hints ( headers ) , ) except gspread . exceptions . SpreadsheetNotFound : raise OpenError ( "spreadsheet '{}' not found" . format ( self . title ) ) except gspread . exceptions . APIError as e : raise APIError ( e )
|
Load table data from a Google Spreadsheet .
|
1,426 |
def buildconfig_update ( orig , new , remove_nonexistent_keys = False ) : if isinstance ( orig , dict ) and isinstance ( new , dict ) : clean_triggers ( orig , new ) if remove_nonexistent_keys : missing = set ( orig . keys ( ) ) - set ( new . keys ( ) ) for k in missing : orig . pop ( k ) for k , v in new . items ( ) : if k == 'strategy' : remove_nonexistent_keys = True if isinstance ( orig . get ( k ) , dict ) and isinstance ( v , dict ) : buildconfig_update ( orig [ k ] , v , remove_nonexistent_keys ) else : orig [ k ] = v
|
Performs update of given orig BuildConfig with values from new BuildConfig . Both BuildConfigs have to be represented as dict s .
|
1,427 |
def checkout_git_repo ( git_url , target_dir = None , commit = None , retry_times = GIT_MAX_RETRIES , branch = None , depth = None ) : tmpdir = tempfile . mkdtemp ( ) target_dir = target_dir or os . path . join ( tmpdir , "repo" ) try : yield clone_git_repo ( git_url , target_dir , commit , retry_times , branch , depth ) finally : shutil . rmtree ( tmpdir )
|
clone provided git repo to target_dir optionally checkout provided commit yield the ClonedRepoData and delete the repo when finished
|
1,428 |
def clone_git_repo ( git_url , target_dir = None , commit = None , retry_times = GIT_MAX_RETRIES , branch = None , depth = None ) : retry_delay = GIT_BACKOFF_FACTOR target_dir = target_dir or os . path . join ( tempfile . mkdtemp ( ) , "repo" ) commit = commit or "master" logger . info ( "cloning git repo '%s'" , git_url ) logger . debug ( "url = '%s', dir = '%s', commit = '%s'" , git_url , target_dir , commit ) cmd = [ "git" , "clone" ] if branch : cmd += [ "-b" , branch , "--single-branch" ] if depth : cmd += [ "--depth" , str ( depth ) ] elif depth : logger . warning ( "branch not provided for %s, depth setting ignored" , git_url ) depth = None cmd += [ git_url , target_dir ] logger . debug ( "cloning '%s'" , cmd ) repo_commit = '' repo_depth = None for counter in range ( retry_times + 1 ) : try : subprocess . check_output ( cmd , stderr = subprocess . STDOUT ) repo_commit , repo_depth = reset_git_repo ( target_dir , commit , depth ) break except subprocess . CalledProcessError as exc : if counter != retry_times : logger . info ( "retrying command '%s':\n '%s'" , cmd , exc . output ) time . sleep ( retry_delay * ( 2 ** counter ) ) else : raise OsbsException ( "Unable to clone git repo '%s' " "branch '%s'" % ( git_url , branch ) , cause = exc , traceback = sys . exc_info ( ) [ 2 ] ) return ClonedRepoData ( target_dir , repo_commit , repo_depth )
|
clone provided git repo to target_dir optionally checkout provided commit
|
1,429 |
def reset_git_repo ( target_dir , git_reference , retry_depth = None ) : deepen = retry_depth or 0 base_commit_depth = 0 for _ in range ( GIT_FETCH_RETRY ) : try : if not deepen : cmd = [ 'git' , 'rev-list' , '--count' , git_reference ] base_commit_depth = int ( subprocess . check_output ( cmd , cwd = target_dir ) ) - 1 cmd = [ "git" , "reset" , "--hard" , git_reference ] logger . debug ( "Resetting current HEAD: '%s'" , cmd ) subprocess . check_call ( cmd , cwd = target_dir ) break except subprocess . CalledProcessError : if not deepen : raise OsbsException ( 'cannot find commit %s in repo %s' % ( git_reference , target_dir ) ) deepen *= 2 cmd = [ "git" , "fetch" , "--depth" , str ( deepen ) ] subprocess . check_call ( cmd , cwd = target_dir ) logger . debug ( "Couldn't find commit %s, increasing depth with '%s'" , git_reference , cmd ) else : raise OsbsException ( 'cannot find commit %s in repo %s' % ( git_reference , target_dir ) ) cmd = [ "git" , "rev-parse" , "HEAD" ] logger . debug ( "getting SHA-1 of provided ref '%s'" , git_reference ) commit_id = subprocess . check_output ( cmd , cwd = target_dir , universal_newlines = True ) commit_id = commit_id . strip ( ) logger . info ( "commit ID = %s" , commit_id ) final_commit_depth = None if not deepen : cmd = [ 'git' , 'rev-list' , '--count' , 'HEAD' ] final_commit_depth = int ( subprocess . check_output ( cmd , cwd = target_dir ) ) - base_commit_depth return commit_id , final_commit_depth
|
hard reset git clone in target_dir to given git_reference
|
1,430 |
def get_imagestreamtag_from_image ( image ) : ret = image ret = strip_registry_from_image ( image ) ret = ret . replace ( '/' , '-' ) if ret . find ( ':' ) == - 1 : ret += ":latest" return ret
|
return ImageStreamTag give a FROM value
|
1,431 |
def get_time_from_rfc3339 ( rfc3339 ) : try : dt = dateutil . parser . parse ( rfc3339 , ignoretz = False ) return dt . timestamp ( ) except NameError : time_tuple = strptime ( rfc3339 , '%Y-%m-%dT%H:%M:%SZ' ) return timegm ( time_tuple )
|
return time tuple from an RFC 3339 - formatted time string
|
1,432 |
def make_name_from_git ( repo , branch , limit = 53 , separator = '-' , hash_size = 5 ) : branch = branch or 'unknown' full = urlparse ( repo ) . path . lstrip ( '/' ) + branch repo = git_repo_humanish_part_from_uri ( repo ) shaval = sha256 ( full . encode ( 'utf-8' ) ) . hexdigest ( ) hash_str = shaval [ : hash_size ] limit = limit - len ( hash_str ) - 1 sanitized = sanitize_strings_for_openshift ( repo , branch , limit , separator , False ) return separator . join ( filter ( None , ( sanitized , hash_str ) ) )
|
return name string representing the given git repo and branch to be used as a build name .
|
1,433 |
def wrap_name_from_git ( prefix , suffix , * args , ** kwargs ) : prefix = '' . join ( filter ( VALID_BUILD_CONFIG_NAME_CHARS . match , list ( prefix ) ) ) suffix = '' . join ( filter ( VALID_BUILD_CONFIG_NAME_CHARS . match , list ( suffix ) ) ) kwargs [ 'limit' ] = kwargs . get ( 'limit' , 64 ) - len ( prefix ) - len ( suffix ) - 2 name_from_git = make_name_from_git ( * args , ** kwargs ) return '-' . join ( [ prefix , name_from_git , suffix ] )
|
wraps the result of make_name_from_git in a suffix and postfix adding separators for each .
|
1,434 |
def get_name ( self , label_type ) : if label_type in self . _label_values : return self . _label_values [ label_type ] [ 0 ] else : return Labels . LABEL_NAMES [ label_type ] [ 0 ]
|
returns the most preferred label name if there isn t any correct name in the list it will return newest label name
|
1,435 |
def get_new_names_by_old ( ) : newdict = { } for label_type , label_names in Labels . LABEL_NAMES . items ( ) : for oldname in label_names [ 1 : ] : newdict [ oldname ] = Labels . LABEL_NAMES [ label_type ] [ 0 ] return newdict
|
Return dictionary new label name indexed by old label name .
|
1,436 |
def kerberos_ccache_init ( principal , keytab_file , ccache_file = None ) : tgt_valid = False env = { "LC_ALL" : "C" } if ccache_file : env [ "KRB5CCNAME" ] = ccache_file rc , klist , _ = run ( [ "klist" ] , extraenv = env ) if rc == 0 : for line in klist . splitlines ( ) : m = re . match ( KLIST_TGT_RE , line ) if m : year = m . group ( "year" ) if len ( year ) == 2 : year = "20" + year expires = datetime . datetime ( int ( year ) , int ( m . group ( "month" ) ) , int ( m . group ( "day" ) ) , int ( m . group ( "hour" ) ) , int ( m . group ( "minute" ) ) , int ( m . group ( "second" ) ) ) if expires - datetime . datetime . now ( ) > datetime . timedelta ( hours = 1 ) : logger . debug ( "Valid TGT found, not renewing" ) tgt_valid = True break if not tgt_valid : logger . debug ( "Retrieving kerberos TGT" ) rc , out , err = run ( [ "kinit" , "-k" , "-t" , keytab_file , principal ] , extraenv = env ) if rc != 0 : raise OsbsException ( "kinit returned %s:\nstdout: %s\nstderr: %s" % ( rc , out , err ) ) if ccache_file : os . environ [ "KRB5CCNAME" ] = ccache_file
|
Checks whether kerberos credential cache has ticket - granting ticket that is valid for at least an hour .
|
1,437 |
def load ( self ) : self . _validate ( ) formatter = SqliteTableFormatter ( self . source ) formatter . accept ( self ) return formatter . to_table_data ( )
|
Extract tabular data as |TableData| instances from a SQLite database file . |load_source_desc_file|
|
1,438 |
def get_data ( self ) : data = graceful_chain_get ( self . json , "data" ) if data is None : return { } data_dict = { } for key in data : if self . is_yaml ( key ) : data_dict [ key ] = yaml . load ( data [ key ] ) else : data_dict [ key ] = json . loads ( data [ key ] ) return data_dict
|
Find the data stored in the config_map
|
1,439 |
def get_data_by_key ( self , name ) : data = graceful_chain_get ( self . json , "data" ) if data is None or name not in data : return { } if self . is_yaml ( name ) : return yaml . load ( data [ name ] ) or { } return json . loads ( data [ name ] )
|
Find the object stored by a JSON string at key name
|
1,440 |
def list_builds ( self , field_selector = None , koji_task_id = None , running = None , labels = None ) : if running : running_fs = "," . join ( [ "status!={status}" . format ( status = status . capitalize ( ) ) for status in BUILD_FINISHED_STATES ] ) if not field_selector : field_selector = running_fs else : field_selector = ',' . join ( [ field_selector , running_fs ] ) response = self . os . list_builds ( field_selector = field_selector , koji_task_id = koji_task_id , labels = labels ) serialized_response = response . json ( ) build_list = [ ] for build in serialized_response [ "items" ] : build_list . append ( BuildResponse ( build , self ) ) return build_list
|
List builds with matching fields
|
1,441 |
def get_build_request ( self , build_type = None , inner_template = None , outer_template = None , customize_conf = None , arrangement_version = DEFAULT_ARRANGEMENT_VERSION ) : if build_type is not None : warnings . warn ( "build types are deprecated, do not use the build_type argument" ) validate_arrangement_version ( arrangement_version ) if not arrangement_version or arrangement_version < REACTOR_CONFIG_ARRANGEMENT_VERSION : build_request = BuildRequest ( build_json_store = self . os_conf . get_build_json_store ( ) , inner_template = inner_template , outer_template = outer_template , customize_conf = customize_conf ) else : build_request = BuildRequestV2 ( build_json_store = self . os_conf . get_build_json_store ( ) , outer_template = outer_template , customize_conf = customize_conf ) cpu_limit = self . build_conf . get_cpu_limit ( ) memory_limit = self . build_conf . get_memory_limit ( ) storage_limit = self . build_conf . get_storage_limit ( ) if ( cpu_limit is not None or memory_limit is not None or storage_limit is not None ) : build_request . set_resource_limits ( cpu = cpu_limit , memory = memory_limit , storage = storage_limit ) return build_request
|
return instance of BuildRequest or BuildRequestV2
|
1,442 |
def create_build_from_buildrequest ( self , build_request ) : build_request . set_openshift_required_version ( self . os_conf . get_openshift_required_version ( ) ) build = build_request . render ( ) response = self . os . create_build ( json . dumps ( build ) ) build_response = BuildResponse ( response . json ( ) , self ) return build_response
|
render provided build_request and submit build from it
|
1,443 |
def _get_image_stream_info_for_build_request ( self , build_request ) : image_stream = None image_stream_tag_name = None if build_request . has_ist_trigger ( ) : image_stream_tag_id = build_request . trigger_imagestreamtag image_stream_id , image_stream_tag_name = image_stream_tag_id . split ( ':' ) try : image_stream = self . get_image_stream ( image_stream_id ) . json ( ) except OsbsResponseException as x : if x . status_code != 404 : raise if image_stream : try : self . get_image_stream_tag ( image_stream_tag_id ) . json ( ) except OsbsResponseException as x : if x . status_code != 404 : raise return image_stream , image_stream_tag_name
|
Return ImageStream and ImageStreamTag name for base_image of build_request
|
1,444 |
def create_prod_build ( self , * args , ** kwargs ) : logger . warning ( "prod (all-in-one) builds are deprecated, " "please use create_orchestrator_build " "(support will be removed in version 0.54)" ) return self . _do_create_prod_build ( * args , ** kwargs )
|
Create a production build
|
1,445 |
def create_worker_build ( self , ** kwargs ) : missing = set ( ) for required in ( 'platform' , 'release' , 'arrangement_version' ) : if not kwargs . get ( required ) : missing . add ( required ) if missing : raise ValueError ( "Worker build missing required parameters: %s" % missing ) if kwargs . get ( 'platforms' ) : raise ValueError ( "Worker build called with unwanted platforms param" ) arrangement_version = kwargs [ 'arrangement_version' ] kwargs . setdefault ( 'inner_template' , WORKER_INNER_TEMPLATE . format ( arrangement_version = arrangement_version ) ) kwargs . setdefault ( 'outer_template' , WORKER_OUTER_TEMPLATE ) kwargs . setdefault ( 'customize_conf' , WORKER_CUSTOMIZE_CONF ) kwargs [ 'build_type' ] = BUILD_TYPE_WORKER try : return self . _do_create_prod_build ( ** kwargs ) except IOError as ex : if os . path . basename ( ex . filename ) == kwargs [ 'inner_template' ] : raise OsbsValidationException ( "worker invalid arrangement_version %s" % arrangement_version ) raise
|
Create a worker build
|
1,446 |
def create_orchestrator_build ( self , ** kwargs ) : if not self . can_orchestrate ( ) : raise OsbsOrchestratorNotEnabled ( "can't create orchestrate build " "when can_orchestrate isn't enabled" ) extra = [ x for x in ( 'platform' , ) if kwargs . get ( x ) ] if extra : raise ValueError ( "Orchestrator build called with unwanted parameters: %s" % extra ) arrangement_version = kwargs . setdefault ( 'arrangement_version' , self . build_conf . get_arrangement_version ( ) ) if arrangement_version < REACTOR_CONFIG_ARRANGEMENT_VERSION and not kwargs . get ( 'platforms' ) : raise ValueError ( 'Orchestrator build requires platforms param' ) kwargs . setdefault ( 'inner_template' , ORCHESTRATOR_INNER_TEMPLATE . format ( arrangement_version = arrangement_version ) ) kwargs . setdefault ( 'outer_template' , ORCHESTRATOR_OUTER_TEMPLATE ) kwargs . setdefault ( 'customize_conf' , ORCHESTRATOR_CUSTOMIZE_CONF ) kwargs [ 'build_type' ] = BUILD_TYPE_ORCHESTRATOR try : return self . _do_create_prod_build ( ** kwargs ) except IOError as ex : if os . path . basename ( ex . filename ) == kwargs [ 'inner_template' ] : raise OsbsValidationException ( "orchestrator invalid arrangement_version %s" % arrangement_version ) raise
|
Create an orchestrator build
|
1,447 |
def get_orchestrator_build_logs ( self , build_id , follow = False , wait_if_missing = False ) : logs = self . get_build_logs ( build_id = build_id , follow = follow , wait_if_missing = wait_if_missing , decode = True ) if logs is None : return if isinstance ( logs , GeneratorType ) : for entries in logs : for entry in entries . splitlines ( ) : yield LogEntry ( * self . _parse_build_log_entry ( entry ) ) else : for entry in logs . splitlines ( ) : yield LogEntry ( * self . _parse_build_log_entry ( entry ) )
|
provide logs from orchestrator build
|
1,448 |
def import_image_tags ( self , name , tags , repository , insecure = False ) : stream_import_file = os . path . join ( self . os_conf . get_build_json_store ( ) , 'image_stream_import.json' ) with open ( stream_import_file ) as f : stream_import = json . load ( f ) return self . os . import_image_tags ( name , stream_import , tags , repository , insecure )
|
Import image tags from specified container repository .
|
1,449 |
def ensure_image_stream_tag ( self , stream , tag_name , scheduled = False , source_registry = None , organization = None , base_image = None ) : img_stream_tag_file = os . path . join ( self . os_conf . get_build_json_store ( ) , 'image_stream_tag.json' ) with open ( img_stream_tag_file ) as f : tag_template = json . load ( f ) repository = None registry = None insecure = False if source_registry : registry = RegistryURI ( source_registry [ 'url' ] ) . docker_uri insecure = source_registry . get ( 'insecure' , False ) if base_image and registry : repository = self . _get_enclosed_repo_with_source_registry ( base_image , registry , organization ) return self . os . ensure_image_stream_tag ( stream , tag_name , tag_template , scheduled , repository = repository , insecure = insecure )
|
Ensures the tag is monitored in ImageStream
|
1,450 |
def create_image_stream ( self , name , docker_image_repository , insecure_registry = False ) : img_stream_file = os . path . join ( self . os_conf . get_build_json_store ( ) , 'image_stream.json' ) with open ( img_stream_file ) as f : stream = json . load ( f ) stream [ 'metadata' ] [ 'name' ] = name stream [ 'metadata' ] . setdefault ( 'annotations' , { } ) stream [ 'metadata' ] [ 'annotations' ] [ ANNOTATION_SOURCE_REPO ] = docker_image_repository if insecure_registry : stream [ 'metadata' ] [ 'annotations' ] [ ANNOTATION_INSECURE_REPO ] = 'true' return self . os . create_image_stream ( json . dumps ( stream ) )
|
Create an ImageStream object
|
1,451 |
def get_compression_extension ( self ) : build_request = BuildRequest ( build_json_store = self . os_conf . get_build_json_store ( ) ) inner = build_request . inner_template postbuild_plugins = inner . get ( 'postbuild_plugins' , [ ] ) for plugin in postbuild_plugins : if plugin . get ( 'name' ) == 'compress' : args = plugin . get ( 'args' , { } ) method = args . get ( 'method' , 'gzip' ) if method == 'gzip' : return '.gz' elif method == 'lzma' : return '.xz' raise OsbsValidationException ( "unknown compression method '%s'" % method ) return None
|
Find the filename extension for the docker save output which may or may not be compressed .
|
1,452 |
def create_config_map ( self , name , data ) : config_data_file = os . path . join ( self . os_conf . get_build_json_store ( ) , 'config_map.json' ) with open ( config_data_file ) as f : config_data = json . load ( f ) config_data [ 'metadata' ] [ 'name' ] = name data_dict = { } for key , value in data . items ( ) : data_dict [ key ] = json . dumps ( value ) config_data [ 'data' ] = data_dict response = self . os . create_config_map ( config_data ) config_map_response = ConfigMapResponse ( response . json ( ) ) return config_map_response
|
Create an ConfigMap object on the server
|
1,453 |
def get_config_map ( self , name ) : response = self . os . get_config_map ( name ) config_map_response = ConfigMapResponse ( response . json ( ) ) return config_map_response
|
Get a ConfigMap object from the server
|
1,454 |
def wipe ( self ) : try : if os . isfile ( self . _dbpath ) : os . remove ( self . _dbpath ) except OSError : pass
|
Wipe the bolt database .
|
1,455 |
def metadata ( self , delete = False ) : if delete : return self . _session . delete ( self . __v1 ( ) + "/metadata" ) . json ( ) else : return self . _session . get ( self . __v1 ( ) + "/metadata" ) . json ( )
|
Gets the metadata .
|
1,456 |
def addDelay ( self , urlPattern = "" , delay = 0 , httpMethod = None ) : print ( "addDelay is deprecated please use delays instead" ) delay = { "urlPattern" : urlPattern , "delay" : delay } if httpMethod : delay [ "httpMethod" ] = httpMethod return self . delays ( delays = { "data" : [ delay ] } )
|
Adds delays .
|
1,457 |
def __enableProxy ( self ) : os . environ [ "HTTP_PROXY" ] = self . httpProxy ( ) os . environ [ "HTTPS_PROXY" ] = self . httpsProxy ( ) os . environ [ "REQUESTS_CA_BUNDLE" ] = os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , "cert.pem" )
|
Set the required environment variables to enable the use of hoverfly as a proxy .
|
1,458 |
def __writepid ( self , pid ) : import tempfile d = tempfile . gettempdir ( ) name = os . path . join ( d , "hoverpy.%i.%i" % ( self . _proxyPort , self . _adminPort ) ) with open ( name , 'w' ) as f : f . write ( str ( pid ) ) logging . debug ( "writing to %s" % name )
|
HoverFly fails to launch if it s already running on the same ports . So we have to keep track of them using temp files with the proxy port and admin port containing the processe s PID .
|
1,459 |
def __rmpid ( self ) : import tempfile d = tempfile . gettempdir ( ) name = os . path . join ( d , "hoverpy.%i.%i" % ( self . _proxyPort , self . _adminPort ) ) if os . path . exists ( name ) : os . unlink ( name ) logging . debug ( "deleting %s" % name )
|
Remove the PID file on shutdown unfortunately this may not get called if not given the time to shut down .
|
1,460 |
def __start ( self ) : logging . debug ( "starting %i" % id ( self ) ) self . __kill_if_not_shut_properly ( ) self . FNULL = open ( os . devnull , 'w' ) flags = self . __flags ( ) cmd = [ hoverfly ] + flags if self . _showCmd : print ( cmd ) self . _process = Popen ( [ hoverfly ] + flags , stdin = self . FNULL , stdout = self . FNULL , stderr = subprocess . STDOUT ) start = time . time ( ) while time . time ( ) - start < 1 : try : url = "http://%s:%i/api/health" % ( self . _host , self . _adminPort ) r = self . _session . get ( url ) j = r . json ( ) up = "message" in j and "healthy" in j [ "message" ] if up : logging . debug ( "has pid %i" % self . _process . pid ) self . __writepid ( self . _process . pid ) return self . _process else : time . sleep ( 1 / 100.0 ) except : time . sleep ( 1 / 100.0 ) pass logging . error ( "Could not start hoverfly!" ) raise ValueError ( "Could not start hoverfly!" )
|
Start the hoverfly process .
|
1,461 |
def __stop ( self ) : if logging : logging . debug ( "stopping" ) self . _process . terminate ( ) self . _process . communicate ( ) self . _process = None self . FNULL . close ( ) self . FNULL = None self . __disableProxy ( ) self . __rmpid ( )
|
Stop the hoverfly process .
|
1,462 |
def __flags ( self ) : flags = [ ] if self . _capture : flags . append ( "-capture" ) if self . _spy : flags . append ( "-spy" ) if self . _dbpath : flags += [ "-db-path" , self . _dbpath ] flags += [ "-db" , "boltdb" ] else : flags += [ "-db" , "memory" ] if self . _synthesize : assert ( self . _middleware ) flags += [ "-synthesize" ] if self . _simulation : flags += [ "-import" , self . _simulation ] if self . _proxyPort : flags += [ "-pp" , str ( self . _proxyPort ) ] if self . _adminPort : flags += [ "-ap" , str ( self . _adminPort ) ] if self . _modify : flags += [ "-modify" ] if self . _verbose : flags += [ "-v" ] if self . _dev : flags += [ "-dev" ] if self . _metrics : flags += [ "-metrics" ] if self . _auth : flags += [ "-auth" ] if self . _middleware : flags += [ "-middleware" , self . _middleware ] if self . _cert : flags += [ "-cert" , self . _cert ] if self . _certName : flags += [ "-cert-name" , self . _certName ] if self . _certOrg : flags += [ "-cert-org" , self . _certOrg ] if self . _destination : flags += [ "-destination" , self . _destination ] if self . _key : flags += [ "-key" , self . _key ] if self . _dest : for i in range ( len ( self . _dest ) ) : flags += [ "-dest" , self . _dest [ i ] ] if self . _generateCACert : flags += [ "-generate-ca-cert" ] if not self . _tlsVerification : flags += [ "-tls-verification" , "false" ] logging . debug ( "flags:" + str ( flags ) ) return flags
|
Internal method . Turns arguments into flags .
|
1,463 |
def load ( self ) : formatter = JsonTableFormatter ( self . load_dict ( ) ) formatter . accept ( self ) return formatter . to_table_data ( )
|
Extract tabular data as |TableData| instances from a JSON file . |load_source_desc_file|
|
1,464 |
def load ( self ) : self . _validate ( ) self . _logger . logging_load ( ) self . encoding = get_file_encoding ( self . source , self . encoding ) with io . open ( self . source , "r" , encoding = self . encoding ) as fp : formatter = MediaWikiTableFormatter ( fp . read ( ) ) formatter . accept ( self ) return formatter . to_table_data ( )
|
Extract tabular data as |TableData| instances from a MediaWiki file . |load_source_desc_file|
|
1,465 |
def load ( self ) : self . _validate ( ) self . _logger . logging_load ( ) formatter = MediaWikiTableFormatter ( self . source ) formatter . accept ( self ) return formatter . to_table_data ( )
|
Extract tabular data as |TableData| instances from a MediaWiki text object . |load_source_desc_text|
|
1,466 |
def get_dock_json ( self ) : env_json = self . build_json [ 'spec' ] [ 'strategy' ] [ 'customStrategy' ] [ 'env' ] try : p = [ env for env in env_json if env [ "name" ] == "ATOMIC_REACTOR_PLUGINS" ] except TypeError : raise RuntimeError ( "\"env\" is not iterable" ) if len ( p ) <= 0 : raise RuntimeError ( "\"env\" misses key ATOMIC_REACTOR_PLUGINS" ) dock_json_str = p [ 0 ] [ 'value' ] dock_json = json . loads ( dock_json_str ) return dock_json
|
return dock json from existing build json
|
1,467 |
def get_container_image_ids ( self ) : statuses = graceful_chain_get ( self . json , "status" , "containerStatuses" ) if statuses is None : return { } def remove_prefix ( image_id , prefix ) : if image_id . startswith ( prefix ) : return image_id [ len ( prefix ) : ] return image_id return { status [ 'image' ] : remove_prefix ( status [ 'imageID' ] , 'docker://' ) for status in statuses }
|
Find the image IDs the containers use .
|
1,468 |
def get_failure_reason ( self ) : reason_key = 'reason' cid_key = 'containerID' exit_key = 'exitCode' pod_status = self . json . get ( 'status' , { } ) statuses = pod_status . get ( 'containerStatuses' , [ ] ) for status in statuses : try : terminated = status [ 'state' ] [ 'terminated' ] exit_code = terminated [ 'exitCode' ] if exit_code != 0 : reason_dict = { exit_key : exit_code , } if 'containerID' in terminated : reason_dict [ cid_key ] = terminated [ 'containerID' ] for key in [ 'message' , 'reason' ] : try : reason_dict [ reason_key ] = terminated [ key ] break except KeyError : continue else : reason_dict [ reason_key ] = 'Exit code {code}' . format ( code = exit_code ) return reason_dict except KeyError : continue for key in [ 'message' , 'reason' ] : try : return { reason_key : pod_status [ key ] } except KeyError : continue return { reason_key : pod_status [ 'phase' ] }
|
Find the reason a pod failed
|
1,469 |
def get_error_message ( self ) : error_reason = self . get_error_reason ( ) if error_reason : error_message = error_reason . get ( 'pod' ) or None if error_message : return "Error in pod: %s" % error_message plugin = error_reason . get ( 'plugin' ) [ 0 ] or None error_message = error_reason . get ( 'plugin' ) [ 1 ] or None if error_message : return "Error in plugin %s: %s" % ( plugin , error_message ) else : return "Error in plugin %s" % plugin
|
Return an error message based on atomic - reactor s metadata
|
1,470 |
def load ( self ) : import xlrd self . _validate ( ) self . _logger . logging_load ( ) try : workbook = xlrd . open_workbook ( self . source ) except xlrd . biffh . XLRDError as e : raise OpenError ( e ) for worksheet in workbook . sheets ( ) : self . _worksheet = worksheet if self . _is_empty_sheet ( ) : continue self . __extract_not_empty_col_idx ( ) try : start_row_idx = self . _get_start_row_idx ( ) except DataError : continue rows = [ self . __get_row_values ( row_idx ) for row_idx in range ( start_row_idx + 1 , self . _row_count ) ] self . inc_table_count ( ) headers = self . __get_row_values ( start_row_idx ) yield TableData ( self . _make_table_name ( ) , headers , rows , dp_extractor = self . dp_extractor , type_hints = self . _extract_type_hints ( headers ) , )
|
Extract tabular data as |TableData| instances from an Excel file . |spreadsheet_load_desc|
|
1,471 |
def load ( self ) : self . _validate ( ) self . _logger . logging_load ( ) self . encoding = get_file_encoding ( self . source , self . encoding ) self . _ltsv_input_stream = io . open ( self . source , "r" , encoding = self . encoding ) for data_matrix in self . _to_data_matrix ( ) : formatter = SingleJsonTableConverterA ( data_matrix ) formatter . accept ( self ) return formatter . to_table_data ( )
|
Extract tabular data as |TableData| instances from a LTSV file . |load_source_desc_file|
|
1,472 |
def load ( self ) : self . _validate ( ) self . _logger . logging_load ( ) self . _ltsv_input_stream = self . source . splitlines ( ) for data_matrix in self . _to_data_matrix ( ) : formatter = SingleJsonTableConverterA ( data_matrix ) formatter . accept ( self ) return formatter . to_table_data ( )
|
Extract tabular data as |TableData| instances from a LTSV text object . |load_source_desc_text|
|
1,473 |
def encode_args ( args , extra = False ) : if not args : return '' methodargs = ', ' . join ( [ encode ( a ) for a in args ] ) if extra : methodargs += ', ' return methodargs
|
Encode a list of arguments
|
1,474 |
def fill ( self , field , value ) : self . client . nowait ( 'browser.fill' , ( field , value ) ) return self
|
Fill a specified form field in the current document .
|
1,475 |
def value ( self , value ) : self . client . nowait ( 'set_field' , ( Literal ( 'browser' ) , self . element , value ) )
|
Used to set the value of form elements .
|
1,476 |
def fire ( self , event ) : self . browser . fire ( self . element , event ) return self
|
Fires a specified DOM event on the current node .
|
1,477 |
def _utf8_encode ( self , d ) : for k , v in d . items ( ) : if isinstance ( v , str ) : d [ k ] = v . encode ( 'utf8' ) . lower ( ) if isinstance ( v , list ) : for index , item in enumerate ( v ) : item = item . encode ( 'utf8' ) . lower ( ) v [ index ] = item if isinstance ( v , dict ) : d [ k ] = self . _utf8_encode ( v ) return d
|
Ensures all values are encoded in UTF - 8 and converts them to lowercase
|
1,478 |
def _bool_encode ( self , d ) : for k , v in d . items ( ) : if isinstance ( v , bool ) : d [ k ] = str ( v ) . lower ( ) return d
|
Converts bool values to lowercase strings
|
1,479 |
def parse_rosters ( self ) : lx_doc = self . html_doc ( ) if not self . __blocks : self . __pl_blocks ( lx_doc ) for t in [ 'home' , 'away' ] : self . rosters [ t ] = self . __clean_pl_block ( self . __blocks [ t ] ) return self if self . rosters else None
|
Parse the home and away game rosters
|
1,480 |
def parse_scratches ( self ) : lx_doc = self . html_doc ( ) if not self . __blocks : self . __pl_blocks ( lx_doc ) for t in [ 'aw_scr' , 'h_scr' ] : ix = 'away' if t == 'aw_scr' else 'home' self . scratches [ ix ] = self . __clean_pl_block ( self . __blocks [ t ] ) return self if self . scratches else None
|
Parse the home and away healthy scratches
|
1,481 |
def parse_coaches ( self ) : lx_doc = self . html_doc ( ) tr = lx_doc . xpath ( '//tr[@id="HeadCoaches"]' ) [ 0 ] for i , td in enumerate ( tr ) : txt = td . xpath ( './/text()' ) txt = ex_junk ( txt , [ '\n' , '\r' ] ) team = 'away' if i == 0 else 'home' self . coaches [ team ] = txt [ 0 ] return self if self . coaches else None
|
Parse the home and away coaches
|
1,482 |
def parse_officials ( self ) : lx_doc = self . html_doc ( ) off_parser = opm ( self . game_key . season ) self . officials = off_parser ( lx_doc ) return self if self . officials else None
|
Parse the officials
|
1,483 |
def is_valid ( self , domain , diagnose = False ) : return_status = [ ValidDiagnosis ( ) ] dns_checked = False try : dns . resolver . query ( domain , 'MX' ) dns_checked = True except ( dns . resolver . NXDOMAIN , dns . name . NameTooLong ) : return_status . append ( DNSDiagnosis ( 'NO_RECORD' ) ) if len ( domain . split ( '.' ) ) == 1 : dns_checked = True except dns . resolver . NoAnswer : return_status . append ( DNSDiagnosis ( 'NO_MX_RECORD' ) ) try : dns . resolver . query ( domain ) except dns . resolver . NoAnswer : return_status . append ( DNSDiagnosis ( 'NO_RECORD' ) ) except dns . resolver . NoNameservers : return_status . append ( DNSDiagnosis ( 'NO_NAMESERVERS' ) ) except ( dns . exception . Timeout , dns . resolver . Timeout ) : return_status . append ( DNSDiagnosis ( 'DNS_TIMEDOUT' ) ) if not dns_checked : atom_list = domain . split ( "." ) if len ( atom_list ) == 1 : return_status . append ( RFC5321Diagnosis ( 'TLD' ) ) try : float ( atom_list [ len ( atom_list ) - 1 ] [ 0 ] ) return_status . append ( RFC5321Diagnosis ( 'TLDNUMERIC' ) ) except ValueError : pass final_status = max ( return_status ) return final_status if diagnose else final_status == ValidDiagnosis ( )
|
Check whether a domain has a valid MX or A record .
|
1,484 |
def parse_plays_stream ( self ) : lx_doc = self . html_doc ( ) if lx_doc is not None : parser = PlayParser ( self . game_key . season , self . game_key . game_type ) plays = lx_doc . xpath ( '//tr[@class = "evenColor"]' ) for p in plays : p_obj = parser . build_play ( p ) self . plays . append ( p_obj ) yield p_obj
|
Generate and yield a stream of parsed plays . Useful for per play processing .
|
1,485 |
def stack_files ( files , hemi , source , target ) : import csv import os import numpy as np fname = "sdist_%s_%s_%s.csv" % ( hemi , source , target ) filename = os . path . join ( os . getcwd ( ) , fname ) alldist = [ ] for dfile in files : alldist . append ( np . genfromtxt ( dfile , delimiter = ',' ) ) alldist = np . array ( alldist ) alldist . tofile ( filename , "," ) return filename
|
This function takes a list of files as input and vstacks them
|
1,486 |
def __html_rep ( self , game_key , rep_code ) : seas , gt , num = game_key . to_tuple ( ) url = [ self . __domain , "scores/htmlreports/" , str ( seas - 1 ) , str ( seas ) , "/" , rep_code , "0" , str ( gt ) , ( "%04i" % ( num ) ) , ".HTM" ] url = '' . join ( url ) return self . __open ( url )
|
Retrieves the nhl html reports for the specified game and report code
|
1,487 |
def to_char ( token ) : if ord ( token ) in _range ( 9216 , 9229 + 1 ) : token = _unichr ( ord ( token ) - 9216 ) return token
|
Transforms the ASCII control character symbols to their real char .
|
1,488 |
def load_freesurfer_label ( annot_input , label_name , cortex = None ) : if cortex is not None : print ( "Warning: cortex is not used to load the freesurfer label" ) labels , color_table , names = nib . freesurfer . read_annot ( annot_input ) names = [ i . decode ( 'utf-8' ) for i in names ] label_value = names . index ( label_name ) label_nodes = np . array ( np . where ( np . in1d ( labels , label_value ) ) , dtype = np . int32 ) return label_nodes
|
Get source node list for a specified freesurfer label .
|
1,489 |
def get_freesurfer_label ( annot_input , verbose = True ) : labels , color_table , names = nib . freesurfer . read_annot ( annot_input ) if verbose : print ( names ) return names
|
Print freesurfer label names .
|
1,490 |
def surf_keep_cortex ( surf , cortex ) : vertices , triangles = surf cortex_vertices = np . array ( vertices [ cortex ] , dtype = np . float64 ) cortex_triangles = triangles_keep_cortex ( triangles , cortex ) return cortex_vertices , cortex_triangles
|
Remove medial wall from cortical surface to ensure that shortest paths are only calculated through the cortex .
|
1,491 |
def triangles_keep_cortex ( triangles , cortex ) : input_shape = triangles . shape triangle_is_in_cortex = np . all ( np . reshape ( np . in1d ( triangles . ravel ( ) , cortex ) , input_shape ) , axis = 1 ) cortex_triangles_old = np . array ( triangles [ triangle_is_in_cortex ] , dtype = np . int32 ) new_index = np . digitize ( cortex_triangles_old . ravel ( ) , cortex , right = True ) cortex_triangles = np . array ( np . arange ( len ( cortex ) ) [ new_index ] . reshape ( cortex_triangles_old . shape ) , dtype = np . int32 ) return cortex_triangles
|
Remove triangles with nodes not contained in the cortex label array
|
1,492 |
def dist_calc ( surf , cortex , source_nodes ) : cortex_vertices , cortex_triangles = surf_keep_cortex ( surf , cortex ) translated_source_nodes = translate_src ( source_nodes , cortex ) data = gdist . compute_gdist ( cortex_vertices , cortex_triangles , source_indices = translated_source_nodes ) dist = recort ( data , surf , cortex ) del data return dist
|
Calculate exact geodesic distance along cortical surface from set of source nodes . dist_type specifies whether to calculate min mean median or max distance values from a region - of - interest . If running only on single node defaults to min .
|
1,493 |
def zone_calc ( surf , cortex , src ) : cortex_vertices , cortex_triangles = surf_keep_cortex ( surf , cortex ) dist_vals = np . zeros ( ( len ( source_nodes ) , len ( cortex_vertices ) ) ) for x in range ( len ( source_nodes ) ) : translated_source_nodes = translate_src ( source_nodes [ x ] , cortex ) dist_vals [ x , : ] = gdist . compute_gdist ( cortex_vertices , cortex_triangles , source_indices = translated_source_nodes ) data = np . argsort ( dist_vals , axis = 0 ) [ 0 , : ] + 1 zone = recort ( data , surf , cortex ) del data return zone
|
Calculate closest nodes to each source node using exact geodesic distance along the cortical surface .
|
1,494 |
def load_module ( filename ) : basename = os . path . basename ( filename ) path = os . path . dirname ( filename ) sys . path . append ( path ) return __import__ ( os . path . splitext ( basename ) [ 0 ] )
|
Loads a module by filename
|
1,495 |
def make_machine_mapping ( machine_list ) : if machine_list is None : return { } else : mapping = { } for pair in machine_list : if ( constants . MACHINE_SEPARATOR not in pair ) or ( pair . count ( constants . MACHINE_SEPARATOR ) != 1 ) : raise ValueError ( "machine pairs must be passed as two strings separted by a %s" , constants . MACHINE_SEPARATOR ) ( logical , physical ) = pair . split ( constants . MACHINE_SEPARATOR ) mapping [ logical ] = physical return mapping
|
Convert the machine list argument from a list of names into a mapping of logical names to physical hosts . This is similar to the _parse_configs function but separated to provide the opportunity for extension and additional checking of machine access
|
1,496 |
def parse_config_list ( config_list ) : if config_list is None : return { } else : mapping = { } for pair in config_list : if ( constants . CONFIG_SEPARATOR not in pair ) or ( pair . count ( constants . CONFIG_SEPARATOR ) != 1 ) : raise ValueError ( "configs must be passed as two strings separted by a %s" , constants . CONFIG_SEPARATOR ) ( config , value ) = pair . split ( constants . CONFIG_SEPARATOR ) mapping [ config ] = value return mapping
|
Parse a list of configuration properties separated by =
|
1,497 |
def deploy ( self , unique_id , configs = None ) : self . install ( unique_id , configs ) self . start ( unique_id , configs )
|
Deploys the service to the host . This should at least perform the same actions as install and start but may perform additional tasks as needed .
|
1,498 |
def undeploy ( self , unique_id , configs = None ) : self . stop ( unique_id , configs ) self . uninstall ( unique_id , configs )
|
Undeploys the service . This should at least perform the same actions as stop and uninstall but may perform additional tasks as needed .
|
1,499 |
def sleep ( self , unique_id , delay , configs = None ) : self . pause ( unique_id , configs ) time . sleep ( delay ) self . resume ( unique_id , configs )
|
Pauses the process for the specified delay and then resumes it
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.