code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def format_field_by_match(self, value, match): groups = match.groups() fill, align, sign, sharp, zero, width, comma, prec, type_ = groups if not comma and not prec and type_ not in list('fF%'): return None if math.isnan(value) or math.isinf(value): return None locale = self.numeric_locale # Format number value. prefix = get_prefix(sign) if type_ == 'd': if prec is not None: raise ValueError('precision not allowed in ' 'integer format specifier') string = format_number(value, 0, prefix, locale) elif type_ in 'fF%': format_ = format_percent if type_ == '%' else format_number string = format_(value, int(prec or DEFAULT_PREC), prefix, locale) else: # Don't handle otherwise. return None if not comma: # Formatted number always contains group symbols. # Remove the symbols if not required. string = remove_group_symbols(string, locale) if not (fill or align or zero or width): return string # Fix a layout. spec = ''.join([fill or u'', align or u'>', zero or u'', width or u'']) return format(string, spec)
Formats a field by a Regex match of the format spec pattern.
def stdout_encode(u, default='utf-8'): # from http://stackoverflow.com/questions/3627793/best-output-type-and- # encoding-practices-for-repr-functions encoding = sys.stdout.encoding or default return u.encode(encoding, "replace").decode(encoding, "replace")
Encodes a given string with the proper standard out encoding If sys.stdout.encoding isn't specified, it this defaults to @default @default: default encoding -> #str with standard out encoding
def get_terminal_width(): # http://www.brandonrubin.me/2014/03/18/python-snippet-get-terminal-width/ command = ['tput', 'cols'] try: width = int(subprocess.check_output(command)) except OSError as e: print( "Invalid Command '{0}': exit status ({1})".format( command[0], e.errno)) except subprocess.CalledProcessError as e: print( "'{0}' returned non-zero exit status: ({1})".format( command, e.returncode)) else: return width
-> #int width of the terminal window
def gen_rand_str(*size, use=None, keyspace=None): keyspace = keyspace or (string.ascii_letters + string.digits) keyspace = [char for char in keyspace] use = use or np.random if size: size = size if len(size) == 2 else (size[0], size[0] + 1) else: size = (6, 7) return ''.join( use.choice(keyspace) for _ in range(use.randint(*size)))
Generates a random string using random module specified in @use within the @keyspace @*size: #int size range for the length of the string @use: the random module to use @keyspace: #str chars allowed in the random string .. from redis_structures.debug import gen_rand_str gen_rand_str() # -> 'PRCpAq' gen_rand_str(1, 2) # -> 'Y' gen_rand_str(12, keyspace="abcdefg") # -> 'gaaacffbedf' ..
def rand_readable(*size, use=None, density=6): use = use or np.random keyspace = [c for c in string.ascii_lowercase if c != "l"] vowels = ("a", "e", "i", "o", "u") def use_vowel(density): not use.randint(0, density) if size: size = size if len(size) == 2 else (size[0]-1, size[0]) else: size = (6, 7) return ''.join( use.choice(vowels if use_vowel(density) else keyspace) for _ in range(use.randint(*size)))
Generates a random string with readable characters using random module specified in @use @*size: #int size range for the length of the string @use: the random module to use @density: how often to include a vowel, you can expect a vowel about once every (density) nth character .. from redis_structures.debug import rand_readable rand_readable() # -> 'hyiaqk' rand_readable(15, 20) # -> 'oqspyywvhifsaikiaoi' rand_readable(15, 20, density=1) # -> 'oeuiueioieeioeeeue' rand_readable(15, 20, density=15) # -> 'ktgjabwdqhgeanh' ..
def get_parent_obj(obj): try: cls = get_class_that_defined_method(obj) if cls and cls != obj: return cls except AttributeError: pass if hasattr(obj, '__module__') and obj.__module__: try: module = importlib.import_module(obj.__module__) objname = get_obj_name(obj).split(".") owner = getattr(module, objname[-2]) return getattr(owner, objname[-1]) except Exception: try: return module except Exception: pass try: assert hasattr(obj, '__qualname__') or hasattr(obj, '__name__') objname = obj.__qualname__ if hasattr(obj, '__qualname__') \ else obj.__name__ objname = objname.split(".") assert len(objname) > 1 return locate(".".join(objname[:-1])) except Exception: try: module = importlib.import_module(".".join(objname[:-1])) return module except Exception: pass return None
Gets the name of the object containing @obj and returns as a string @obj: any python object -> #str parent object name or None .. from redis_structures.debug import get_parent_obj get_parent_obj(get_parent_obj) # -> <module 'redis_structures.debug' from> ..
def get_obj_name(obj, full=True): has_name_attr = hasattr(obj, '__name__') if has_name_attr and obj.__name__ == "<lambda>": try: src = whitespace_sub("", inspect.getsource(obj))\ .replace("\n", "; ").strip(" <>") except OSError: src = obj.__name__ return lambda_sub("", src) if hasattr(obj, '__qualname__') and obj.__qualname__: return obj.__qualname__.split(".")[-1] elif has_name_attr and obj.__name__: return obj.__name__.split(".")[-1] elif hasattr(obj, '__class__'): return str(obj.__class__.__name__).strip("<>") else: return str(obj.__repr__())
Gets the #str name of @obj @obj: any python object @full: #bool returns with parent name as well if True -> #str object name .. from redis_structures.debug import get_parent_obj get_obj_name(get_obj_name) # -> 'get_obj_name' get_obj_name(redis_structures.debug.Timer) # -> 'Timer' ..
def format(self): _bold = bold if not self.pretty: _bold = lambda x: x # Attach memory address and return _attrs = self._format_attrs() self.data = "<{}.{}({}){}>{}".format( self.obj.__module__ if hasattr(self.obj, "__module__") \ else "__main__", _bold(self.obj.__class__.__name__), _attrs, ":{}".format(hex(id(self.obj))) if self.address else "", _break+self.supplemental if self.supplemental else "") return stdout_encode(self.data)
Formats the __repr__ string -> #str containing __repr__ output
def randstr(self): return gen_rand_str( 2, 10, use=self.random, keyspace=list(self.keyspace))
-> #str result of :func:gen_rand_str
def set(self, size=1000): get_val = lambda: self._map_type() return set(get_val() for x in range(size))
Creates a random #set @size: #int number of random values to include in the set -> random #set
def list(self, size=1000, tree_depth=1): if not tree_depth: return self._map_type() return list(self.deque(size, tree_depth-1) for x in range(size))
Creates a random #list @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|[value1, value2]| 2=|[[value1, value2], [value1, value2]]| -> random #list
def pretty_print(self, obj=None): print(self.pretty(obj if obj is not None else self.obj))
Formats and prints @obj or :prop:obj @obj: the object you'd like to prettify
def array(self): if self._intervals_len: if self._array_len != self._intervals_len: if not self._array_len: self._array = np.array(self.intervals) \ if hasattr(np, 'array') else self.intervals else: self._array = np.concatenate(( self._array, self.intervals), axis=0) \ if hasattr(np, 'concatenate') else \ (self._array + self.intervals) self._array_len += len(self.intervals) self.intervals = [] return self._array return []
Returns :prop:intervals as a numpy array, caches -> :class:numpy.array
def reset(self): self._start = 0 self._first_start = 0 self._stop = time.perf_counter() self._array = None self._array_len = 0 self.intervals = [] self._intervals_len = 0
Resets the time intervals
def time(self, intervals=1, *args, _show_progress=True, _print=True, _collect_garbage=False, **kwargs): self.reset() self.num_intervals = intervals for func in self.progress(self._callables): try: #: Don't ruin all timings if just one doesn't work t = Timer( func, _precision=self.precision, _parent_progressbar=self.progress) t.time( intervals, *args, _print=False, _show_progress=_show_progress, _collect_garbage=_collect_garbage, **kwargs) except Exception as e: print(RuntimeWarning( "{} with {}".format(colorize( "{} failed".format(Look.pretty_objname( func, color="yellow")), "yellow"), repr(e)))) self._callable_results.append(t) self.progress.update() self.info(_print=_print) return self.results
Measures the execution time of :prop:_callables for @intervals @intervals: #int number of intervals to measure the execution time of the function for @*args: arguments to pass to the callable being timed @**kwargs: arguments to pass to the callable being timed @_show_progress: #bool whether or not to print a progress bar @_print: #bool whether or not to print the results of the timing @_collect_garbage: #bool whether or not to garbage collect while timing @_quiet: #bool whether or not to disable the print() function's ability to output to terminal during the timing -> #tuple of :class:Timer :prop:results of timing
def read(filename): import os here = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(here, filename)) as fd: return fd.read()
Read a file relative to setup.py location.
def find_version(filename): import re content = read(filename) version_match = re.search( r"^__version__ = ['\"]([^'\"]*)['\"]", content, re.M ) if version_match: return version_match.group(1) raise RuntimeError('Unable to find version string.')
Find package version in file.
def find_requirements(filename): import string content = read(filename) requirements = [] for line in content.splitlines(): line = line.strip() if line and line[:1] in string.ascii_letters: requirements.append(line) return requirements
Find requirements in file.
def generate_uuid(basedata=None): if basedata is None: return str(uuid.uuid4()) elif isinstance(basedata, str): checksum = hashlib.md5(basedata).hexdigest() return '%8s-%4s-%4s-%4s-%12s' % ( checksum[0:8], checksum[8:12], checksum[12:16], checksum[16:20], checksum[20:32])
Provides a _random_ UUID with no input, or a UUID4-format MD5 checksum of any input data provided
def from_unix(cls, seconds, milliseconds=0): base = list(time.gmtime(seconds))[0:6] base.append(milliseconds * 1000) # microseconds return cls(*base)
Produce a full |datetime.datetime| object from a Unix timestamp
def to_unix(cls, timestamp): if not isinstance(timestamp, datetime.datetime): raise TypeError('Time.milliseconds expects a datetime object') base = time.mktime(timestamp.timetuple()) return base
Wrapper over time module to produce Unix epoch time as a float
def milliseconds_offset(cls, timestamp, now=None): if isinstance(timestamp, (int, float)): base = timestamp else: base = cls.to_unix(timestamp) base += (timestamp.microsecond / 1000000) if now is None: now = time.time() return (now - base) * 1000
Offset time (in milliseconds) from a |datetime.datetime| object to now
def fixUTF8(cls, data): # Ensure proper encoding for UA's servers... for key in data: if isinstance(data[key], str): data[key] = data[key].encode('utf-8') return data
Convert all strings to UTF-8
def alias(cls, typemap, base, *names): cls.parameter_alias[base] = (typemap, base) for i in names: cls.parameter_alias[i] = (typemap, base)
Declare an alternate (humane) name for a measurement protocol parameter
def consume_options(cls, data, hittype, args): opt_position = 0 data['t'] = hittype # integrate hit type parameter if hittype in cls.option_sequence: for expected_type, optname in cls.option_sequence[hittype]: if opt_position < len(args) and isinstance(args[opt_position], expected_type): data[optname] = args[opt_position] opt_position += 1
Interpret sequential arguments related to known hittypes based on declared structures
def hittime(cls, timestamp=None, age=None, milliseconds=None): if isinstance(timestamp, (int, float)): return int(Time.milliseconds_offset(Time.from_unix(timestamp, milliseconds=milliseconds))) if isinstance(timestamp, datetime.datetime): return int(Time.milliseconds_offset(timestamp)) if isinstance(age, (int, float)): return int(age * 1000) + (milliseconds or 0)
Returns an integer represeting the milliseconds offset for a given hit (relative to now)
def set_timestamp(self, data): if 'hittime' in data: # an absolute timestamp data['qt'] = self.hittime(timestamp=data.pop('hittime', None)) if 'hitage' in data: # a relative age (in seconds) data['qt'] = self.hittime(age=data.pop('hitage', None))
Interpret time-related options, apply queue-time parameter as needed
async def send(self, hittype, *args, **data): if hittype not in self.valid_hittypes: raise KeyError('Unsupported Universal Analytics Hit Type: {0}'.format(repr(hittype))) self.set_timestamp(data) self.consume_options(data, hittype, args) for item in args: # process dictionary-object arguments of transcient data if isinstance(item, dict): for key, val in self.payload(item): data[key] = val for k, v in self.params.items(): # update only absent parameters if k not in data: data[k] = v data = dict(self.payload(data)) if self.hash_client_id: data['cid'] = generate_uuid(data['cid']) # Transmit the hit to Google... await self.http.send(data)
Transmit HTTP requests to Google Analytics using the measurement protocol
def p_null_assignment(self, t): '''null_assignment : IDENT EQ NULL''' self.accu.add(Term('obs_vlabel', [self.name,"gen(\""+t[1]+"\")","0"])f p_null_assignment(self, t): '''null_assignment : IDENT EQ NULL''' self.accu.add(Term('obs_vlabel', [self.name,"gen(\""+t[1]+"\")","0"]))
null_assignment : IDENT EQ NULL
def p_plus_assignment(self, t): '''plus_assignment : IDENT EQ PLUS''' self.accu.add(Term('obs_vlabel', [self.name,"gen(\""+t[1]+"\")","1"])f p_plus_assignment(self, t): '''plus_assignment : IDENT EQ PLUS''' self.accu.add(Term('obs_vlabel', [self.name,"gen(\""+t[1]+"\")","1"]))
plus_assignment : IDENT EQ PLUS
def p_minus_assignment(self, t): '''minus_assignment : IDENT EQ MINUS''' self.accu.add(Term('obs_vlabel', [self.name,"gen(\""+t[1]+"\")","-1"])f p_minus_assignment(self, t): '''minus_assignment : IDENT EQ MINUS''' self.accu.add(Term('obs_vlabel', [self.name,"gen(\""+t[1]+"\")","-1"]))
minus_assignment : IDENT EQ MINUS
def p_notplus_assignment(self, t): '''notplus_assignment : IDENT EQ NOTPLUS''' self.accu.add(Term('obs_vlabel', [self.name,"gen(\""+t[1]+"\")","notPlus"])f p_notplus_assignment(self, t): '''notplus_assignment : IDENT EQ NOTPLUS''' self.accu.add(Term('obs_vlabel', [self.name,"gen(\""+t[1]+"\")","notPlus"]))
notplus_assignment : IDENT EQ NOTPLUS
def p_notminus_assignment(self, t): '''notminus_assignment : IDENT EQ NOTMINUS''' self.accu.add(Term('obs_vlabel', [self.name,"gen(\""+t[1]+"\")","notMinus"])f p_notminus_assignment(self, t): '''notminus_assignment : IDENT EQ NOTMINUS''' self.accu.add(Term('obs_vlabel', [self.name,"gen(\""+t[1]+"\")","notMinus"]))
notminus_assignment : IDENT EQ NOTMINUS
def p_input_assignment(self, t): '''input_assignment : IDENT EQ INPUT''' self.accu.add(Term('input', [self.name,"gen(\""+t[1]+"\")"])f p_input_assignment(self, t): '''input_assignment : IDENT EQ INPUT''' self.accu.add(Term('input', [self.name,"gen(\""+t[1]+"\")"]))
input_assignment : IDENT EQ INPUT
def p_min_assignment(self, t): '''min_assignment : IDENT EQ MIN''' self.accu.add(Term('ismin', [self.name,"gen(\""+t[1]+"\")"])f p_min_assignment(self, t): '''min_assignment : IDENT EQ MIN''' self.accu.add(Term('ismin', [self.name,"gen(\""+t[1]+"\")"]))
min_assignment : IDENT EQ MIN
def p_max_assignment(self, t): '''max_assignment : IDENT EQ MAX''' self.accu.add(Term('ismax', [self.name,"gen(\""+t[1]+"\")"])f p_max_assignment(self, t): '''max_assignment : IDENT EQ MAX''' self.accu.add(Term('ismax', [self.name,"gen(\""+t[1]+"\")"]))
max_assignment : IDENT EQ MAX
def delete_service_version(self, service_id , service_version='default', mode='production'): ''' delete_service(self, service_id, service_version='default', mode='production') Deletes a Service version from Opereto :Parameters: * *service_id* (`string`) -- Service identifier * *service_version* (`string`) -- Service version. Default is 'default' * *mode* (`string`) -- development/production. Default is production :return: success/failure :Example: .. code-block:: python opereto_client.delete_service('my_service_id') ''' return self._call_rest_api('delete', '/services/'+service_id+'/'+mode+'/'+service_version, error='Failed to delete service'f delete_service_version(self, service_id , service_version='default', mode='production'): ''' delete_service(self, service_id, service_version='default', mode='production') Deletes a Service version from Opereto :Parameters: * *service_id* (`string`) -- Service identifier * *service_version* (`string`) -- Service version. Default is 'default' * *mode* (`string`) -- development/production. Default is production :return: success/failure :Example: .. code-block:: python opereto_client.delete_service('my_service_id') ''' return self._call_rest_api('delete', '/services/'+service_id+'/'+mode+'/'+service_version, error='Failed to delete service')
delete_service(self, service_id, service_version='default', mode='production') Deletes a Service version from Opereto :Parameters: * *service_id* (`string`) -- Service identifier * *service_version* (`string`) -- Service version. Default is 'default' * *mode* (`string`) -- development/production. Default is production :return: success/failure :Example: .. code-block:: python opereto_client.delete_service('my_service_id')
def verify_environment(self, environment_id): ''' verify_environment(self, environment_id) Verifies validity of an existing environment :Parameters: * *environment_id* (`string`) -- Environment identifier :return: Success or errors in case the verification failed :Return Example: .. code-block:: json # verification failure {'errors': ['Topology key cluster_name is missing in environment specification'], 'agents': {}, 'success': False, 'warnings': []} # verification success {'errors': [], 'agents': {}, 'success': True, 'warnings': []} ''' request_data = {'id': environment_id} return self._call_rest_api('post', '/environments/verify', data=request_data, error='Failed to verify environment.'f verify_environment(self, environment_id): ''' verify_environment(self, environment_id) Verifies validity of an existing environment :Parameters: * *environment_id* (`string`) -- Environment identifier :return: Success or errors in case the verification failed :Return Example: .. code-block:: json # verification failure {'errors': ['Topology key cluster_name is missing in environment specification'], 'agents': {}, 'success': False, 'warnings': []} # verification success {'errors': [], 'agents': {}, 'success': True, 'warnings': []} ''' request_data = {'id': environment_id} return self._call_rest_api('post', '/environments/verify', data=request_data, error='Failed to verify environment.')
verify_environment(self, environment_id) Verifies validity of an existing environment :Parameters: * *environment_id* (`string`) -- Environment identifier :return: Success or errors in case the verification failed :Return Example: .. code-block:: json # verification failure {'errors': ['Topology key cluster_name is missing in environment specification'], 'agents': {}, 'success': False, 'warnings': []} # verification success {'errors': [], 'agents': {}, 'success': True, 'warnings': []}
def create_environment(self, topology_name, topology={}, id=None, **kwargs): ''' create_environment(self, topology_name, topology={}, id=None, **kwargs) Create a new environment :Parameters: * *topology_name* (`string`) -- The topology identifier. Must be provided to create an environment. * *topology* (`object`) -- Topology data (must match the topology json schema) * *id* (`object`) -- The environment identifier. If none provided when creating environment, Opereto will automatically assign a unique identifier. :return: id of the created environment ''' request_data = {'topology_name': topology_name,'id': id, 'topology':topology, 'add_only':True} request_data.update(**kwargs) return self._call_rest_api('post', '/environments', data=request_data, error='Failed to create environment'f create_environment(self, topology_name, topology={}, id=None, **kwargs): ''' create_environment(self, topology_name, topology={}, id=None, **kwargs) Create a new environment :Parameters: * *topology_name* (`string`) -- The topology identifier. Must be provided to create an environment. * *topology* (`object`) -- Topology data (must match the topology json schema) * *id* (`object`) -- The environment identifier. If none provided when creating environment, Opereto will automatically assign a unique identifier. :return: id of the created environment ''' request_data = {'topology_name': topology_name,'id': id, 'topology':topology, 'add_only':True} request_data.update(**kwargs) return self._call_rest_api('post', '/environments', data=request_data, error='Failed to create environment')
create_environment(self, topology_name, topology={}, id=None, **kwargs) Create a new environment :Parameters: * *topology_name* (`string`) -- The topology identifier. Must be provided to create an environment. * *topology* (`object`) -- Topology data (must match the topology json schema) * *id* (`object`) -- The environment identifier. If none provided when creating environment, Opereto will automatically assign a unique identifier. :return: id of the created environment
def modify_environment(self, environment_id, **kwargs): ''' modify_environment(self, environment_id, **kwargs) Modifies an existing environment :Parameters: * *environment_id* (`string`) -- The environment identifier Keywords args: The variables to change in the environment :return: id of the created environment ''' request_data = {'id': environment_id} request_data.update(**kwargs) return self._call_rest_api('post', '/environments', data=request_data, error='Failed to modify environment'f modify_environment(self, environment_id, **kwargs): ''' modify_environment(self, environment_id, **kwargs) Modifies an existing environment :Parameters: * *environment_id* (`string`) -- The environment identifier Keywords args: The variables to change in the environment :return: id of the created environment ''' request_data = {'id': environment_id} request_data.update(**kwargs) return self._call_rest_api('post', '/environments', data=request_data, error='Failed to modify environment')
modify_environment(self, environment_id, **kwargs) Modifies an existing environment :Parameters: * *environment_id* (`string`) -- The environment identifier Keywords args: The variables to change in the environment :return: id of the created environment
def modify_agent(self, agent_id, **kwargs): ''' modify_agent(self, agent_id, **kwargs) | Modifies agent information (like name) :Parameters: * *agent_id* (`string`) -- Identifier of an existing agent :Example: .. code-block:: python opereto_client = OperetoClient() opereto_client.modify_agent('agentId', name='my new name') ''' request_data = {'id': agent_id} request_data.update(**kwargs) return self._call_rest_api('post', '/agents'+'', data=request_data, error='Failed to modify agent [%s]'%agent_idf modify_agent(self, agent_id, **kwargs): ''' modify_agent(self, agent_id, **kwargs) | Modifies agent information (like name) :Parameters: * *agent_id* (`string`) -- Identifier of an existing agent :Example: .. code-block:: python opereto_client = OperetoClient() opereto_client.modify_agent('agentId', name='my new name') ''' request_data = {'id': agent_id} request_data.update(**kwargs) return self._call_rest_api('post', '/agents'+'', data=request_data, error='Failed to modify agent [%s]'%agent_id)
modify_agent(self, agent_id, **kwargs) | Modifies agent information (like name) :Parameters: * *agent_id* (`string`) -- Identifier of an existing agent :Example: .. code-block:: python opereto_client = OperetoClient() opereto_client.modify_agent('agentId', name='my new name')
def modify_process_summary(self, pid=None, text='', append=False): ''' modify_process_summary(self, pid=None, text='') Modifies the summary text of the process execution :Parameters: * *key* (`pid`) -- Identifier of an existing process * *key* (`text`) -- summary text * *append* (`boolean`) -- True to append to summary. False to override it. ''' pid = self._get_pid(pid) if append: current_summary = self.get_process_info(pid).get('summary') or '' modified_text = current_summary + '\n' + text text = modified_text request_data = {"id": pid, "data": str(text)} return self._call_rest_api('post', '/processes/'+pid+'/summary', data=request_data, error='Failed to update process summary'f modify_process_summary(self, pid=None, text='', append=False): ''' modify_process_summary(self, pid=None, text='') Modifies the summary text of the process execution :Parameters: * *key* (`pid`) -- Identifier of an existing process * *key* (`text`) -- summary text * *append* (`boolean`) -- True to append to summary. False to override it. ''' pid = self._get_pid(pid) if append: current_summary = self.get_process_info(pid).get('summary') or '' modified_text = current_summary + '\n' + text text = modified_text request_data = {"id": pid, "data": str(text)} return self._call_rest_api('post', '/processes/'+pid+'/summary', data=request_data, error='Failed to update process summary')
modify_process_summary(self, pid=None, text='') Modifies the summary text of the process execution :Parameters: * *key* (`pid`) -- Identifier of an existing process * *key* (`text`) -- summary text * *append* (`boolean`) -- True to append to summary. False to override it.
def stop_process(self, pids, status='success'): ''' stop_process(self, pids, status='success') Stops a running process :Parameters: * *pid* (`string`) -- Identifier of an existing process * *result* (`string`) -- the value the process will be terminated with. Any of the following possible values: success , failure , error , warning , terminated ''' if status not in process_result_statuses: raise OperetoClientError('Invalid process result [%s]'%status) pids = self._get_pids(pids) for pid in pids: self._call_rest_api('post', '/processes/'+pid+'/terminate/'+status, error='Failed to stop process'f stop_process(self, pids, status='success'): ''' stop_process(self, pids, status='success') Stops a running process :Parameters: * *pid* (`string`) -- Identifier of an existing process * *result* (`string`) -- the value the process will be terminated with. Any of the following possible values: success , failure , error , warning , terminated ''' if status not in process_result_statuses: raise OperetoClientError('Invalid process result [%s]'%status) pids = self._get_pids(pids) for pid in pids: self._call_rest_api('post', '/processes/'+pid+'/terminate/'+status, error='Failed to stop process')
stop_process(self, pids, status='success') Stops a running process :Parameters: * *pid* (`string`) -- Identifier of an existing process * *result* (`string`) -- the value the process will be terminated with. Any of the following possible values: success , failure , error , warning , terminated
def get_process_status(self, pid=None): ''' get_process_status(self, pid=None) Get current status of a process :Parameters: * *pid* (`string`) -- Identifier of an existing process ''' pid = self._get_pid(pid) return self._call_rest_api('get', '/processes/'+pid+'/status', error='Failed to fetch process status'f get_process_status(self, pid=None): ''' get_process_status(self, pid=None) Get current status of a process :Parameters: * *pid* (`string`) -- Identifier of an existing process ''' pid = self._get_pid(pid) return self._call_rest_api('get', '/processes/'+pid+'/status', error='Failed to fetch process status')
get_process_status(self, pid=None) Get current status of a process :Parameters: * *pid* (`string`) -- Identifier of an existing process
def get_process_flow(self, pid=None): ''' get_process_flow(self, pid=None) Get process in flow context. The response returns a sub-tree of the whole flow containing the requested process, its direct children processes, and all ancestors. You can navigate within the flow backword and forward by running this call on the children or ancestors of a given process. :Parameters: * *pid* (`string`) -- Identifier of an existing process ''' pid = self._get_pid(pid) return self._call_rest_api('get', '/processes/'+pid+'/flow', error='Failed to fetch process information'f get_process_flow(self, pid=None): ''' get_process_flow(self, pid=None) Get process in flow context. The response returns a sub-tree of the whole flow containing the requested process, its direct children processes, and all ancestors. You can navigate within the flow backword and forward by running this call on the children or ancestors of a given process. :Parameters: * *pid* (`string`) -- Identifier of an existing process ''' pid = self._get_pid(pid) return self._call_rest_api('get', '/processes/'+pid+'/flow', error='Failed to fetch process information')
get_process_flow(self, pid=None) Get process in flow context. The response returns a sub-tree of the whole flow containing the requested process, its direct children processes, and all ancestors. You can navigate within the flow backword and forward by running this call on the children or ancestors of a given process. :Parameters: * *pid* (`string`) -- Identifier of an existing process
def get_process_rca(self, pid=None): ''' get_process_rca(self, pid=None) Get the RCA tree of a given failed process. The RCA tree contains all failed child processes that caused the failure of the given process. :Parameters: * *pid* (`string`) -- Identifier of an existing process ''' pid = self._get_pid(pid) return self._call_rest_api('get', '/processes/'+pid+'/rca', error='Failed to fetch process information'f get_process_rca(self, pid=None): ''' get_process_rca(self, pid=None) Get the RCA tree of a given failed process. The RCA tree contains all failed child processes that caused the failure of the given process. :Parameters: * *pid* (`string`) -- Identifier of an existing process ''' pid = self._get_pid(pid) return self._call_rest_api('get', '/processes/'+pid+'/rca', error='Failed to fetch process information')
get_process_rca(self, pid=None) Get the RCA tree of a given failed process. The RCA tree contains all failed child processes that caused the failure of the given process. :Parameters: * *pid* (`string`) -- Identifier of an existing process
def get_process_info(self, pid=None): ''' get_process_info(self, pid=None) Get process general information. :Parameters: * *pid* (`string`) -- Identifier of an existing process ''' pid = self._get_pid(pid) return self._call_rest_api('get', '/processes/'+pid, error='Failed to fetch process information'f get_process_info(self, pid=None): ''' get_process_info(self, pid=None) Get process general information. :Parameters: * *pid* (`string`) -- Identifier of an existing process ''' pid = self._get_pid(pid) return self._call_rest_api('get', '/processes/'+pid, error='Failed to fetch process information')
get_process_info(self, pid=None) Get process general information. :Parameters: * *pid* (`string`) -- Identifier of an existing process
def get_process_log(self, pid=None, start=0, limit=1000): ''' get_process_log(self, pid=None, start=0, limit=1000 Get process logs :Parameters: * *pid* (`string`) -- Identifier of an existing process * *pid* (`string`) -- start index to retrieve logs from * *pid* (`string`) -- maximum number of entities to retrieve :return: Process log entries ''' pid = self._get_pid(pid) data = self._call_rest_api('get', '/processes/'+pid+'/log?start={}&limit={}'.format(start,limit), error='Failed to fetch process log') return data['list'f get_process_log(self, pid=None, start=0, limit=1000): ''' get_process_log(self, pid=None, start=0, limit=1000 Get process logs :Parameters: * *pid* (`string`) -- Identifier of an existing process * *pid* (`string`) -- start index to retrieve logs from * *pid* (`string`) -- maximum number of entities to retrieve :return: Process log entries ''' pid = self._get_pid(pid) data = self._call_rest_api('get', '/processes/'+pid+'/log?start={}&limit={}'.format(start,limit), error='Failed to fetch process log') return data['list']
get_process_log(self, pid=None, start=0, limit=1000 Get process logs :Parameters: * *pid* (`string`) -- Identifier of an existing process * *pid* (`string`) -- start index to retrieve logs from * *pid* (`string`) -- maximum number of entities to retrieve :return: Process log entries
def get_process_properties(self, pid=None, name=None): ''' get_process_properties(self, pid=None, name=None) Get process properties (both input and output properties) :Parameters: * *pid* (`string`) -- Identifier of an existing process * *name* (`string`) -- optional - Property name ''' pid = self._get_pid(pid) res = self._call_rest_api('get', '/processes/'+pid+'/properties', error='Failed to fetch process properties') if name: try: return res[name] except KeyError as e: raise OperetoClientError(message='Invalid property [%s]'%name, code=404) else: return ref get_process_properties(self, pid=None, name=None): ''' get_process_properties(self, pid=None, name=None) Get process properties (both input and output properties) :Parameters: * *pid* (`string`) -- Identifier of an existing process * *name* (`string`) -- optional - Property name ''' pid = self._get_pid(pid) res = self._call_rest_api('get', '/processes/'+pid+'/properties', error='Failed to fetch process properties') if name: try: return res[name] except KeyError as e: raise OperetoClientError(message='Invalid property [%s]'%name, code=404) else: return res
get_process_properties(self, pid=None, name=None) Get process properties (both input and output properties) :Parameters: * *pid* (`string`) -- Identifier of an existing process * *name* (`string`) -- optional - Property name
def wait_to_start(self, pids=[]): ''' wait_to_start(self, pids=[]) Wait for processes to start :Parameters: * *pids* (`list`) -- list of processes to wait to start ''' actual_pids = self._get_pids(pids) return self.wait_for(pids=actual_pids, status_list=process_result_statuses+['in_process']f wait_to_start(self, pids=[]): ''' wait_to_start(self, pids=[]) Wait for processes to start :Parameters: * *pids* (`list`) -- list of processes to wait to start ''' actual_pids = self._get_pids(pids) return self.wait_for(pids=actual_pids, status_list=process_result_statuses+['in_process'])
wait_to_start(self, pids=[]) Wait for processes to start :Parameters: * *pids* (`list`) -- list of processes to wait to start
def wait_to_end(self, pids=[]): ''' wait_to_end(self, pids=[]) Wait for processes to finish :Parameters: * *pids* (`list`) -- list of processes to wait to finish ''' actual_pids = self._get_pids(pids) return self.wait_for(pids=actual_pids, status_list=process_result_statusesf wait_to_end(self, pids=[]): ''' wait_to_end(self, pids=[]) Wait for processes to finish :Parameters: * *pids* (`list`) -- list of processes to wait to finish ''' actual_pids = self._get_pids(pids) return self.wait_for(pids=actual_pids, status_list=process_result_statuses)
wait_to_end(self, pids=[]) Wait for processes to finish :Parameters: * *pids* (`list`) -- list of processes to wait to finish
def get_process_runtime_cache(self, key, pid=None): ''' get_process_runtime_cache(self, key, pid=None) Get a pre-defined run time parameter value :Parameters: * *key* (`string`) -- Identifier of the runtime cache * *pid* (`string`) -- Identifier of an existing process ''' value = None pid = self._get_pid(pid) value = self._call_rest_api('get', '/processes/'+pid+'/cache?key=%s'%key, error='Failed to fetch process runtime cache') return valuf get_process_runtime_cache(self, key, pid=None): ''' get_process_runtime_cache(self, key, pid=None) Get a pre-defined run time parameter value :Parameters: * *key* (`string`) -- Identifier of the runtime cache * *pid* (`string`) -- Identifier of an existing process ''' value = None pid = self._get_pid(pid) value = self._call_rest_api('get', '/processes/'+pid+'/cache?key=%s'%key, error='Failed to fetch process runtime cache') return value
get_process_runtime_cache(self, key, pid=None) Get a pre-defined run time parameter value :Parameters: * *key* (`string`) -- Identifier of the runtime cache * *pid* (`string`) -- Identifier of an existing process
def set_process_runtime_cache(self, key, value, pid=None): ''' set_process_runtime_cache(self, key, value, pid=None) Set a process run time parameter :Parameters: * *key* (`string`) -- parameter key * *key* (`value`) -- parameter value * *key* (`pid`) -- optional - Identifier of an existing process ''' pid = self._get_pid(pid) self._call_rest_api('post', '/processes/'+pid+'/cache', data={'key': key, 'value': value}, error='Failed to modify process runtime cache'f set_process_runtime_cache(self, key, value, pid=None): ''' set_process_runtime_cache(self, key, value, pid=None) Set a process run time parameter :Parameters: * *key* (`string`) -- parameter key * *key* (`value`) -- parameter value * *key* (`pid`) -- optional - Identifier of an existing process ''' pid = self._get_pid(pid) self._call_rest_api('post', '/processes/'+pid+'/cache', data={'key': key, 'value': value}, error='Failed to modify process runtime cache')
set_process_runtime_cache(self, key, value, pid=None) Set a process run time parameter :Parameters: * *key* (`string`) -- parameter key * *key* (`value`) -- parameter value * *key* (`pid`) -- optional - Identifier of an existing process
def modify_product(self, product_id, name=None, description=None, attributes={}): ''' modify_product(self, product_id, name=None, description=None, attributes={}) Modify an existing product :Parameters: * *product_id* (`string`) -- identifier of an existing product * *name* (`string`) -- name of the product * *description* (`string`) -- product description * *attributes* (`object`) -- product attributes to modify ''' request_data = {'id': product_id} if name: request_data['name']=name if description: request_data['description']=description if attributes: request_data['attributes']=attributes return self._call_rest_api('post', '/products', data=request_data, error='Failed to modify a new product'f modify_product(self, product_id, name=None, description=None, attributes={}): ''' modify_product(self, product_id, name=None, description=None, attributes={}) Modify an existing product :Parameters: * *product_id* (`string`) -- identifier of an existing product * *name* (`string`) -- name of the product * *description* (`string`) -- product description * *attributes* (`object`) -- product attributes to modify ''' request_data = {'id': product_id} if name: request_data['name']=name if description: request_data['description']=description if attributes: request_data['attributes']=attributes return self._call_rest_api('post', '/products', data=request_data, error='Failed to modify a new product')
modify_product(self, product_id, name=None, description=None, attributes={}) Modify an existing product :Parameters: * *product_id* (`string`) -- identifier of an existing product * *name* (`string`) -- name of the product * *description* (`string`) -- product description * *attributes* (`object`) -- product attributes to modify
def modify_qc(self, qc_id=None, **kwargs): ''' modify_qc(self, qc_id=None, **kwargs) Modify a Quality Criteria :Parameters: * *qc_id* (`string`) -- The Quality criteria identifier ''' if qc_id: request_data = {'id': qc_id} request_data.update(**kwargs) return self._call_rest_api('post', '/qc', data=request_data, error='Failed to modify criteria') else: return self.create_qc(**kwargsf modify_qc(self, qc_id=None, **kwargs): ''' modify_qc(self, qc_id=None, **kwargs) Modify a Quality Criteria :Parameters: * *qc_id* (`string`) -- The Quality criteria identifier ''' if qc_id: request_data = {'id': qc_id} request_data.update(**kwargs) return self._call_rest_api('post', '/qc', data=request_data, error='Failed to modify criteria') else: return self.create_qc(**kwargs)
modify_qc(self, qc_id=None, **kwargs) Modify a Quality Criteria :Parameters: * *qc_id* (`string`) -- The Quality criteria identifier
def write(self): for entry in self._instream: if isinstance(entry, Feature): for feature in entry: if feature.num_children > 0 or feature.is_multi: if feature.is_multi and feature != feature.multi_rep: continue self.feature_counts[feature.type] += 1 fid = '{}{}'.format(feature.type, self.feature_counts[feature.type]) feature.add_attribute('ID', fid) else: feature.drop_attribute('ID') if isinstance(entry, Sequence) and not self._seq_written: print('##FASTA', file=self.outfile) self._seq_written = True print(repr(entry), file=self.outfile)
Pull features from the instream and write them to the output.
def datetime2yeardoy(time: Union[str, datetime.datetime]) -> Tuple[int, float]: T = np.atleast_1d(time) utsec = np.empty_like(T, float) yd = np.empty_like(T, int) for i, t in enumerate(T): if isinstance(t, np.datetime64): t = t.astype(datetime.datetime) elif isinstance(t, str): t = parse(t) utsec[i] = datetime2utsec(t) yd[i] = t.year*1000 + int(t.strftime('%j')) return yd.squeeze()[()], utsec.squeeze()[()]
Inputs: T: Numpy 1-D array of datetime.datetime OR string for dateutil.parser.parse Outputs: yd: yyyyddd four digit year, 3 digit day of year (INTEGER) utsec: seconds from midnight utc
def yeardoy2datetime(yeardate: int, utsec: Union[float, int] = None) -> datetime.datetime: if isinstance(yeardate, (tuple, list, np.ndarray)): if utsec is None: return np.asarray([yeardoy2datetime(y) for y in yeardate]) elif isinstance(utsec, (tuple, list, np.ndarray)): return np.asarray([yeardoy2datetime(y, s) for y, s in zip(yeardate, utsec)]) yeardate = int(yeardate) yd = str(yeardate) if len(yd) != 7: raise ValueError('yyyyddd expected') year = int(yd[:4]) assert 0 < year < 3000, 'year not in expected format' dt = datetime.datetime(year, 1, 1) + datetime.timedelta(days=int(yd[4:]) - 1) assert isinstance(dt, datetime.datetime) if utsec is not None: dt += datetime.timedelta(seconds=utsec) return dt
Inputs: yd: yyyyddd four digit year, 3 digit day of year (INTEGER 7 digits) outputs: t: datetime http://stackoverflow.com/questions/2427555/python-question-year-and-day-of-year-to-date
def date2doy(time: Union[str, datetime.datetime]) -> Tuple[int, int]: T = np.atleast_1d(time) year = np.empty(T.size, dtype=int) doy = np.empty_like(year) for i, t in enumerate(T): yd = str(datetime2yeardoy(t)[0]) year[i] = int(yd[:4]) doy[i] = int(yd[4:]) assert ((0 < doy) & (doy < 366)).all(), 'day of year must be 0 < doy < 366' return doy, year
< 366 for leap year too. normal year 0..364. Leap 0..365.
def datetime2gtd(time: Union[str, datetime.datetime, np.datetime64], glon: Union[float, List[float], np.ndarray] = np.nan) -> Tuple[int, float, float]: # %% T = np.atleast_1d(time) glon = np.asarray(glon) doy = np.empty_like(T, int) utsec = np.empty_like(T, float) stl = np.empty((T.size, *glon.shape)) for i, t in enumerate(T): if isinstance(t, str): t = parse(t) elif isinstance(t, np.datetime64): t = t.astype(datetime.datetime) elif isinstance(t, (datetime.datetime, datetime.date)): pass else: raise TypeError('unknown time datatype {}'.format(type(t))) # %% Day of year doy[i] = int(t.strftime('%j')) # %% seconds since utc midnight utsec[i] = datetime2utsec(t) stl[i, ...] = utsec[i] / 3600. + glon / 15. return doy, utsec, stl
Inputs: time: Numpy 1-D array of datetime.datetime OR string for dateutil.parser.parse glon: Numpy 2-D array of geodetic longitudes (degrees) Outputs: iyd: day of year utsec: seconds from midnight utc stl: local solar time
def datetime2utsec(t: Union[str, datetime.date, datetime.datetime, np.datetime64]) -> float: if isinstance(t, (tuple, list, np.ndarray)): return np.asarray([datetime2utsec(T) for T in t]) elif isinstance(t, datetime.date) and not isinstance(t, datetime.datetime): return 0. elif isinstance(t, np.datetime64): t = t.astype(datetime.datetime) elif isinstance(t, str): t = parse(t) return datetime.timedelta.total_seconds(t - datetime.datetime.combine(t.date(), datetime.datetime.min.time()))
input: datetime output: float utc seconds since THIS DAY'S MIDNIGHT
def yeardec2datetime(atime: float) -> datetime.datetime: # %% if isinstance(atime, (float, int)): # typically a float year = int(atime) remainder = atime - year boy = datetime.datetime(year, 1, 1) eoy = datetime.datetime(year + 1, 1, 1) seconds = remainder * (eoy - boy).total_seconds() T = boy + datetime.timedelta(seconds=seconds) assert isinstance(T, datetime.datetime) elif isinstance(atime[0], float): return np.asarray([yeardec2datetime(t) for t in atime]) else: raise TypeError('expecting float, not {}'.format(type(atime))) return T
Convert atime (a float) to DT.datetime This is the inverse of datetime2yeardec. assert dt2t(t2dt(atime)) == atime http://stackoverflow.com/questions/19305991/convert-fractional-years-to-a-real-date-in-python Authored by "unutbu" http://stackoverflow.com/users/190597/unutbu In Python, go from decimal year (YYYY.YYY) to datetime, and from datetime to decimal year.
def datetime2yeardec(time: Union[str, datetime.datetime, datetime.date]) -> float: if isinstance(time, str): t = parse(time) elif isinstance(time, datetime.datetime): t = time elif isinstance(time, datetime.date): t = datetime.datetime.combine(time, datetime.datetime.min.time()) elif isinstance(time, (tuple, list, np.ndarray)): return np.asarray([datetime2yeardec(t) for t in time]) else: raise TypeError('unknown input type {}'.format(type(time))) year = t.year boy = datetime.datetime(year, 1, 1) eoy = datetime.datetime(year + 1, 1, 1) return year + ((t - boy).total_seconds() / ((eoy - boy).total_seconds()))
Convert a datetime into a float. The integer part of the float should represent the year. Order should be preserved. If adate<bdate, then d2t(adate)<d2t(bdate) time distances should be preserved: If bdate-adate=ddate-cdate then dt2t(bdate)-dt2t(adate) = dt2t(ddate)-dt2t(cdate)
def randomdate(year: int) -> datetime.date: if calendar.isleap(year): doy = random.randrange(366) else: doy = random.randrange(365) return datetime.date(year, 1, 1) + datetime.timedelta(days=doy)
gives random date in year
def init_arg_names(obj): # doing something wildly hacky by pulling out the arguments to # __init__ or __new__ and hoping that they match fields defined on the # object try: init_code = obj.__init__.__func__.__code__ except AttributeError: try: init_code = obj.__new__.__func__.__code__ except AttributeError: # if object is a namedtuple then we can return its fields # as the required initial args if hasattr(obj, "_fields"): return obj._fields else: raise ValueError("Cannot determine args to %s.__init__" % (obj,)) arg_names = init_code.co_varnames[:init_code.co_argcount] # drop self argument nonself_arg_names = arg_names[1:] return nonself_arg_names
Names of arguments to __init__ method of this object's class.
def function_to_serializable_representation(fn): if type(fn) not in (FunctionType, BuiltinFunctionType): raise ValueError( "Can't serialize %s : %s, must be globally defined function" % ( fn, type(fn),)) if hasattr(fn, "__closure__") and fn.__closure__ is not None: raise ValueError("No serializable representation for closure %s" % (fn,)) return {"__module__": get_module_name(fn), "__name__": fn.__name__}
Converts a Python function into a serializable representation. Does not currently work for methods or functions with closure data.
def dict_to_serializable_repr(x): # list of JSON representations of hashable objects which were # used as keys in this dictionary serialized_key_list = [] serialized_keys_to_names = {} # use the class of x rather just dict since we might want to convert # derived classes such as OrderedDict result = type(x)() for (k, v) in x.items(): if not isinstance(k, string_types): # JSON does not support using complex types such as tuples # or user-defined objects with implementations of __hash__ as # keys in a dictionary so we must keep the serialized # representations of such values in a list and refer to indices # in that list serialized_key_repr = to_json(k) if serialized_key_repr in serialized_keys_to_names: k = serialized_keys_to_names[serialized_key_repr] else: k = index_to_serialized_key_name(len(serialized_key_list)) serialized_keys_to_names[serialized_key_repr] = k serialized_key_list.append(serialized_key_repr) result[k] = to_serializable_repr(v) if len(serialized_key_list) > 0: # only include this list of serialized keys if we had any non-string # keys result[SERIALIZED_DICTIONARY_KEYS_FIELD] = serialized_key_list return result
Recursively convert values of dictionary to serializable representations. Convert non-string keys to JSON representations and replace them in the dictionary with indices of unique JSON strings (e.g. __1, __2, etc..).
def from_serializable_dict(x): if "__name__" in x: return _lookup_value(x.pop("__module__"), x.pop("__name__")) non_string_key_objects = [ from_json(serialized_key) for serialized_key in x.pop(SERIALIZED_DICTIONARY_KEYS_FIELD, []) ] converted_dict = type(x)() for k, v in x.items(): serialized_key_index = parse_serialized_keys_index(k) if serialized_key_index is not None: k = non_string_key_objects[serialized_key_index] converted_dict[k] = from_serializable_repr(v) if "__class__" in converted_dict: class_object = converted_dict.pop("__class__") if "__value__" in converted_dict: return class_object(converted_dict["__value__"]) elif hasattr(class_object, "from_dict"): return class_object.from_dict(converted_dict) else: return class_object(**converted_dict) return converted_dict
Reconstruct a dictionary by recursively reconstructing all its keys and values. This is the most hackish part since we rely on key names such as __name__, __class__, __module__ as metadata about how to reconstruct an object. TODO: It would be cleaner to always wrap each object in a layer of type metadata and then have an inner dictionary which represents the flattened result of to_dict() for user-defined objects.
def to_dict(obj): if isinstance(obj, dict): return obj elif hasattr(obj, "to_dict"): return obj.to_dict() try: return simple_object_to_dict(obj) except: raise ValueError( "Cannot convert %s : %s to dictionary" % ( obj, type(obj)))
If value wasn't isn't a primitive scalar or collection then it needs to either implement to_dict (instances of Serializable) or has member data matching each required arg of __init__.
def to_serializable_repr(x): t = type(x) if isinstance(x, list): return list_to_serializable_repr(x) elif t in (set, tuple): return { "__class__": class_to_serializable_representation(t), "__value__": list_to_serializable_repr(x) } elif isinstance(x, dict): return dict_to_serializable_repr(x) elif isinstance(x, (FunctionType, BuiltinFunctionType)): return function_to_serializable_representation(x) elif type(x) is type: return class_to_serializable_representation(x) else: state_dictionary = to_serializable_repr(to_dict(x)) state_dictionary["__class__"] = class_to_serializable_representation( x.__class__) return state_dictionary
Convert an instance of Serializable or a primitive collection containing such instances into serializable types.
def _check(self, accepted): # logging.debug('A check is now happening...') # for key in self.statediag[1].trans: # logging.debug('transition to '+`key`+" with "+self.statediag[1].trans[key][0]) total = [] if 1 in self.quickresponse: total = total + self.quickresponse[1] if (1, 0) in self.quickresponse: total = total + self.quickresponse[(1, 0)] for key in total: if (key.id == 1 or key.id == (1, 0)) and key.type == 3: if accepted is None: if 2 in key.trans: # print 'Found' return key.trans[2] else: for state in accepted: if (2, state) in key.trans: # print 'Found' return key.trans[(2, state)] return -1
_check for string existence
def printer(self): for key in self.statediag: if key.trans is not None and len(key.trans) > 0: print '****** ' + repr(key.id) + '(' + repr(key.type)\ + ' on sym ' + repr(key.sym) + ') ******' print key.trans
Visualizes the current state
def init(self, states, accepted): self.statediag = [] for key in states: self.statediag.append(states[key]) self.quickresponse = {} self.quickresponse_types = {} self.quickresponse_types[0] = [] self.quickresponse_types[1] = [] self.quickresponse_types[2] = [] self.quickresponse_types[3] = [] self.quickresponse_types[4] = [] for state in self.statediag: if state.id not in self.quickresponse: self.quickresponse[state.id] = [state] else: self.quickresponse[state.id].append(state) self.quickresponse_types[state.type].append(state) # self.printer() # raw_input('next stepA?') return self._stage(accepted, 0)
Initialization of the indexing dictionaries
def execute(filelocation, outpath, executable, args=None, switchArgs=None): procArgs = ['java', '-jar', executable] procArgs.extend(['-output_path', outpath]) if args is not None: for arg in args: procArgs.extend(['-'+arg[0], arg[1]]) if switchArgs is not None: procArgs.extend(['-'+arg for arg in switchArgs]) procArgs.extend(aux.toList(filelocation)) ## run it ## proc = subprocess.Popen(procArgs, stderr=subprocess.PIPE) ## But do not wait till netstat finish, start displaying output immediately ## while True: out = proc.stderr.read(1) if out == '' and proc.poll() != None: break if out != '': sys.stdout.write(out) sys.stdout.flush()
Executes the dinosaur tool on Windows operating systems. :param filelocation: either a single mgf file path or a list of file paths. :param outpath: path of the output file, file must not exist :param executable: must specify the complete file path of the spectra-cluster-cli.jar file, supported version is 1.0.2 BETA. :param args: list of arguments containing a value, for details see the spectra-cluster-cli help. Arguments should be added as tuples or a list. For example: [('precursor_tolerance', '0.5'), ('rounds', '3')] :param switchArgs: list of arguments not containing a value, for details see the spectra-cluster-cli help. Arguments should be added as strings. For example: ['fast_mode', 'keep_binary_files']
def generate_example(): cmd_args = sys.argv[1:] parser = argparse.ArgumentParser(description='Confpy example generator.') parser.add_argument( '--module', action='append', help='A python module which should be imported.', ) parser.add_argument( '--file', action='append', help='A python file which should be evaled.', ) parser.add_argument( '--format', default='JSON', choices=('JSON', 'INI'), help='The output format of the configuration file.', ) args = parser.parse_args(cmd_args) for module in args.module or (): __import__(module) for source_file in args.file or (): cfg = pyfile.PythonFile(path=source_file).config cfg = config.Configuration() print(example.generate_example(cfg, ext=args.format))
Generate a configuration file example. This utility will load some number of Python modules which are assumed to register options with confpy and generate an example configuration file based on those options.
def count(self, val=True): return sum((elem.count(val) for elem in self._iter_components()))
Get the number of bits in the array with the specified value. Args: val: A boolean value to check against the array's value. Returns: An integer of the number of bits in the array equal to val.
def _api_group_for_type(cls): _groups = { (u"v1beta1", u"Deployment"): u"extensions", (u"v1beta1", u"DeploymentList"): u"extensions", (u"v1beta1", u"ReplicaSet"): u"extensions", (u"v1beta1", u"ReplicaSetList"): u"extensions", } key = ( cls.apiVersion, cls.__name__.rsplit(u".")[-1], ) group = _groups.get(key, None) return group
Determine which Kubernetes API group a particular PClass is likely to belong with. This is basically nonsense. The question being asked is wrong. An abstraction has failed somewhere. Fixing that will get rid of the need for this.
def response(request, status, obj): request.setResponseCode(status) request.responseHeaders.setRawHeaders( u"content-type", [u"application/json"], ) body = dumps_bytes(obj) return body
Generate a response. :param IRequest request: The request being responsed to. :param int status: The response status code to set. :param obj: Something JSON-dumpable to write into the response body. :return bytes: The response body to write out. eg, return this from a *render_* method.
def create(self, collection_name, obj): obj = self.agency.before_create(self, obj) new = self.agency.after_create(self, obj) updated = self.transform( [collection_name], lambda c: c.add(new), ) return updated
Create a new object in the named collection. :param unicode collection_name: The name of the collection in which to create the object. :param IObject obj: A description of the object to create. :return _KubernetesState: A new state based on the current state but also containing ``obj``.
def replace(self, collection_name, old, new): self.agency.before_replace(self, old, new) updated = self.transform( [collection_name], lambda c: c.replace(old, new), ) return updated
Replace an existing object with a new version of it. :param unicode collection_name: The name of the collection in which to replace an object. :param IObject old: A description of the object being replaced. :param IObject new: A description of the object to take the place of ``old``. :return _KubernetesState: A new state based on the current state but also containing ``obj``.
def delete(self, collection_name, obj): updated = self.transform( [collection_name], lambda c: obj.delete_from(c), ) return updated
Delete an existing object. :param unicode collection_name: The name of the collection from which to delete the object. :param IObject obj: A description of the object to delete. :return _KubernetesState: A new state based on the current state but not containing ``obj``.
def get_list_connections(self, environment, product, unique_name_list=None, is_except=False): return_list = [] for item in self.connection_sets: if unique_name_list: if item['unique_name']: if is_except: if item['environment'] == environment and item['product'] == product and \ (item['unique_name'] not in unique_name_list): return_list.append(item) elif not is_except: if item['environment'] == environment and item['product'] == product and \ (item['unique_name'] in unique_name_list): return_list.append(item) else: if item['environment'] == environment and item['product'] == product: return_list.append(item) return return_list
Gets list of connections that satisfy the filter by environment, product and (optionally) unique DB names :param environment: Environment name :param product: Product name :param unique_name_list: list of unique db aliases :param is_except: take the connections with aliases provided or, the other wat around, take all the rest :return: list of dictionaries with connections
def set_address(): global STATSD_ADDR connection_string = os.getenv('STATSD') if connection_string: url = urlparse.urlparse(connection_string) STATSD_ADDR = (url.hostname, url.port) else: STATSD_ADDR = (os.getenv('STATSD_HOST', 'localhost'), int(os.getenv('STATSD_PORT', 8125)))
Set the (host, port) to connect to from the environment. If the environment is updated, a call to this function will update the address this client connects to. This function will prefer to use the ``STATSD`` connection string environment variable, but will fall back to using the ``STATSD_HOST`` and ``STATSD_PORT``.
def execution_timer(value): def _invoke(method, key_arg_position, *args, **kwargs): start_time = time.time() result = method(*args, **kwargs) duration = time.time() - start_time key = [method.func_name] if key_arg_position is not None: key.append(args[key_arg_position]) add_timing('.'.join(key), value=duration) return result if type(value) is types.FunctionType: def wrapper(*args, **kwargs): return _invoke(value, None, *args, **kwargs) return wrapper else: def duration_decorator(func): def wrapper(*args, **kwargs): return _invoke(func, value, *args, **kwargs) return wrapper return duration_decorator
The ``execution_timer`` decorator allows for easy instrumentation of the duration of function calls, using the method name in the key. The following example would add duration timing with the key ``my_function`` .. code: python @statsd.execution_timer def my_function(foo): pass You can also have include a string argument value passed to your method as part of the key. Pass the index offset of the arguments to specify the argument number to use. In the following example, the key would be ``my_function.baz``: .. code:python @statsd.execution_timer(2) def my_function(foo, bar, 'baz'): pass
def _send(key, value, metric_type): if STATSD_PREFIX: key = '.'.join([STATSD_PREFIX, key]) try: STATSD_SOCKET.sendto('{0}:{1}|{2}'.format(key, value, metric_type).encode(), STATSD_ADDR) except socket.error: LOGGER.exception(SOCKET_ERROR)
Send the specified value to the statsd daemon via UDP without a direct socket connection. :param str value: The properly formatted statsd counter value
def type_names(prefix, sizerange): namelist = [] for i in sizerange: namelist.append(prefix + str(i)) return tuple(namelist)
Helper for type name generation, like: bytes1 .. bytes32
def type_names_mn(prefix, sizerangem, sizerangen): lm = [] ln = [] namelist = [] # construct lists out of ranges for i in sizerangem: lm.append(i) for i in sizerangen: ln.append(i) # sizes (in bits) are valid if (%8 == 0) and (m+n <= 256) # first condition is covered by passing proper sizerange{m,n} validpairs = [tuple([m,n]) for m in lm for n in ln if m+n<=256] for i in validpairs: namelist.append(prefix + str(i[0]) + 'x' + str(i[1])) return tuple(namelist)
Helper for type name generation, like: fixed0x8 .. fixed0x256
def _get_lang(self, *args, **kwargs): if "lang" in kwargs: if kwargs["lang"] in self._available_languages: self.lang = kwargs["lang"]
Let users select language
def notify(self, msg, color='green', notify='true', message_format='text'): self.message_dict = { 'message': msg, 'color': color, 'notify': notify, 'message_format': message_format, } if not self.debug: return requests.post( self.notification_url, json.dumps(self.message_dict), headers=self.headers ) else: print('HipChat message: <{}>'.format(msg)) return []
Send notification to specified HipChat room
def trial(path=TESTS_PATH, coverage=False): args = ['trial'] if coverage: args.append('--coverage') args.append(path) print args local(' '.join(args))
Run tests using trial
def process_result_value(self, value, dialect): if value is not None: cmd = "value = {}".format(value) exec(cmd) return value
When SQLAlchemy gets the string representation from a ReprObjType column, it converts it to the python equivalent via exec.
def make_regex(separator): return re.compile(r'(?:' + re.escape(separator) + r')?((?:[^' + re.escape(separator) + r'\\]|\\.)+)')
Utility function to create regexp for matching escaped separators in strings.
def strip_comments(text): regex = r'\s*(#|\/{2}).*$' regex_inline = r'(:?(?:\s)*([A-Za-z\d\.{}]*)|((?<=\").*\"),?)(?:\s)*(((#|(\/{2})).*)|)$' # noqa lines = text.split('\n') for index, line in enumerate(lines): if re.search(regex, line): if re.search(r'^' + regex, line, re.IGNORECASE): lines[index] = "" elif re.search(regex_inline, line): lines[index] = re.sub(regex_inline, r'\1', line) return '\n'.join(lines)
Comment stripper for JSON.
def register(action): if isinstance(action, str): Action.register(Action(action)) elif isinstance(action, Action): Action.registered.add(action) else: for a in action: Action.register(a)
Action registration is used to support generating lists of permitted actions from a permission set and an object pattern. Only registered actions will be returned by such queries.
def add(self, effect=None, act=None, obj=None, policy=None, policies=None): if policies is not None: for p in policies: self.add(policy=p) elif policy is not None: for e, a, o in policy: self.add(e, a, o) else: objc = obj.components if obj is not None else [] self.tree[act.components + objc] = effect
Insert an individual (effect, action, object) triple or all triples for a policy or list of policies.
def allow(self, act, obj=None): objc = obj.components if obj is not None else [] try: return self.tree[act.components + objc] == 'allow' except KeyError: return False
Determine where a given action on a given object is allowed.
def permitted_actions(self, obj=None): return [a for a in Action.registered if self.allow(a, obj(str(a)) if obj is not None else None)]
Determine permitted actions for a given object pattern.
def subscribe(ws): while ws is not None: gevent.sleep(0.1) try: message = ws.receive() # expect function name to subscribe to if message: stream.register(ws, message) except WebSocketError: ws = None
WebSocket endpoint, used for liveupdates
def could_scope_out(self): return not self.waiting_for or \ isinstance(self.waiting_for, callable.EndOfStory) or \ self.is_breaking_a_loop()
could bubble up from current scope :return:
def get_child_story(self): logger.debug('# get_child_story') story_loop = self.compiled_story() if hasattr(story_loop, 'children_matcher') and not self.matched: return self.get_story_scope_child(story_loop) story_part = self.get_current_story_part() if not hasattr(story_part, 'get_child_by_validation_result'): logger.debug('# does not have get_child_by_validation_result') return None if isinstance(self.waiting_for, forking.SwitchOnValue): logger.debug('# switch on value') return story_part.get_child_by_validation_result(self.waiting_for.value) # for some base classes we could try validate result direct child_story = story_part.get_child_by_validation_result(self.waiting_for) if child_story: logger.debug('# child_story') logger.debug(child_story) return child_story stack_tail = self.stack_tail() if stack_tail['data'] is not None and not self.matched: validator = matchers.deserialize(stack_tail['data']) logger.debug('# validator') logger.debug(validator) logger.debug('# self.message') logger.debug(self.message) validation_result = validator.validate(self.message) logger.debug('# validation_result') logger.debug(validation_result) res = story_part.get_child_by_validation_result(validation_result) logger.debug('# res') logger.debug(res) # or we validate message # but can't find right child story # maybe we should use independent validators for each story here if res is None: return self.get_story_scope_child(story_part) else: return res return None
try child story that match message and get scope of it :return: