code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def dettach_igw(self, req, driver): response = driver.dettach_igw(req.params) data = { 'action': 'attach_igw', 'controller': 'network', 'id': id, 'cloud': req.environ['calplus.cloud'], 'response': response } return data
Dettach network from Internet gateway :Param req :Type object Request
def attach_vpngw(self, req, id, driver): vpngw = driver.get_vnpgw(req.params, id) if vpngw is None: vpngw = driver.create_vpngw(req.params, id) response = driver.attach_vpngw(req.params, vpngw) data = { 'action': 'attach_igw', 'controller': 'network', 'id': id, 'cloud': req.environ['calplus.cloud'], 'response': response } return data
Attach network to VPN gateway :Param req :Type object Request
def connectMSExchange(server): if not sspi: return False, 'No sspi module found.' # send the SMTP EHLO command code, response = server.ehlo() if code != SMTP_EHLO_OKAY: return False, 'Server did not respond to EHLO command.' sspi_client = sspi.ClientAuth('NTLM') # generate NTLM Type 1 message sec_buffer = None err, sec_buffer = sspi_client.authorize(sec_buffer) # noinspection PyShadowingBuiltins buffer = sec_buffer[0].Buffer ntlm_message = base64.encodestring(buffer).replace('\n', '') # send NTLM Type 1 message -- Authentication Request code, response = server.docmd('AUTH', 'NTLM ' + ntlm_message) # verify the NTLM Type 2 response -- Challenge Message if code != SMTP_AUTH_CHALLENGE: msg = 'Server did not respond as expected to NTLM negotiate message' return False, msg # generate NTLM Type 3 message err, sec_buffer = sspi_client.authorize(base64.decodestring(response)) # noinspection PyShadowingBuiltins buffer = sec_buffer[0].Buffer ntlm_message = base64.encodestring(buffer).replace('\n', '') # send the NTLM Type 3 message -- Response Message code, response = server.docmd('', ntlm_message) if code != SMTP_AUTH_OKAY: return False, response return True, ''
Creates a connection for the inputted server to a Microsoft Exchange server. :param server | <smtplib.SMTP> :usage |>>> import smtplib |>>> import projex.notify |>>> smtp = smtplib.SMTP('mail.server.com') |>>> projex.notify.connectMSExchange(smtp) :return (<bool> success, <str> reason)
def set_entries(self, entries: List[Tuple[str, str]], titles, resources): self.entries = [] for flag, pagename in entries: title = titles[pagename].children[0] resource = resources.get(pagename, None) if resource and hasattr(resource, 'is_published') and not \ resource.is_published: continue # Even if there is no resource for this tocentry, we can # use the toctree info self.entries.append(dict( title=title, href=pagename, resource=resource )) self.result_count = len(self.entries)
Provide the template the data for the toc entries
def render(self, builder, context, sphinx_app: Sphinx): context['sphinx_app'] = sphinx_app context['toctree'] = self html = builder.templates.render(self.template + '.html', context) return html
Given a Sphinx builder and context with site in it, generate HTML
def associate_public_ip(self, instance_id, public_ip_id, private_ip=None): return self.driver.associate_public_ip( instance_id, public_ip_id, private_ip)
Associate a external IP
def deprecatedmethod(classname='', info=''): def decorated(func): @wraps(func) def wrapped(*args, **kwds): frame = last_frame = None try: frame = inspect.currentframe() last_frame = frame.f_back fname = last_frame.f_code.co_filename func_file = func.func_code.co_filename opts = { 'func': func.__name__, 'line': last_frame.f_lineno, 'file': fname, 'class': classname, 'info': info, 'package': projex.packageFromPath(func_file) } msg = 'Deprecated method called from %(file)s, line %(line)d.' \ '\n %(package)s.%(class)s.%(func)s is deprecated.' \ ' %(info)s' % opts logger.warning(errors.DeprecatedMethodWarning(msg)) finally: del frame del last_frame return func(*args, **kwds) wrapped.__name__ = func.__name__ wrapped.__doc__ = ':warning This method is deprecated! %s\n\n' % info if func.__doc__: wrapped.__doc__ += func.__doc__ wrapped.__dict__.update(func.__dict__) wrapped.__dict__['func_type'] = 'deprecated method' return wrapped return decorated
Defines a particular method as being deprecated - the method will exist for backwards compatibility, but will contain information as to how update code to become compatible with the current system. Code that is deprecated will only be supported through the end of a minor release cycle and will be cleaned during a major release upgrade. :usage |from projex.decorators import deprecated | |class A(object): | @deprecatedmethod('A', 'Use A.printout instead') | def format( self ): | print 'test' | | def printout( self ): : print 'new test'
def profiler(sorting=('tottime',), stripDirs=True, limit=20, path='', autoclean=True): def decorated(func): """ Wrapper function to handle the profiling options. """ # create a call to the wrapping @wraps(func) def wrapped(*args, **kwds): """ Inner method for calling the profiler method. """ # define the profile name filename = os.path.join(path, '%s.prof' % func.__name__) # create a profiler for the method to run through prof = hotshot.Profile(filename) results = prof.runcall(func, *args, **kwds) prof.close() # log the information about it stats = hotshot.stats.load(filename) if stripDirs: stats.strip_dirs() # we don't want to know about the arguments for this method stats.sort_stats(*sorting) stats.print_stats(limit) # remove the file if desired if autoclean: os.remove(filename) return results return wrapped return decorated
Creates a profile wrapper around a method to time out all the operations that it runs through. For more information, look into the hotshot Profile documentation online for the built-in Python package. :param sorting <tuple> ( <key>, .. ) :param stripDirs <bool> :param limit <int> :param path <str> :param autoclean <bool> :usage |from projex.decorators import profiler | |class A: | @profiler() # must be called as a method | def increment(amount, count = 1): | return amount + count | |a = A() |a.increment(10) |
def retrymethod(count, sleep=0): def decorated(func): @wraps(func) def wrapped(*args, **kwds): # do the retry options for i in range(count - 1): try: return func(*args, **kwds) except StandardError: pass if sleep: time.sleep(sleep) # run as standard return func(*args, **kwds) return wrapped return decorated
Defines a decorator method to wrap a method with a retry mechanism. The wrapped method will be attempt to be called the given number of times based on the count value, waiting the number of seconds defined by the sleep parameter. If the throw option is defined, then the given error will be thrown after the final attempt fails. :param count | <int> sleep | <int> | msecs
def launch_server(message_handler, options): logger = logging.getLogger(__name__) # if (options.debug): # logger.setLevel(logging.DEBUG) # if not options.monitor_port: # logger.warning( # "Monitoring not enabled. No monitor-port option defined.") # else: # threading.Thread(target=launch_monitor_server, args=(options.host, options.monitor_port, logger)).start() # Create the server, binding to specified host on configured port # logger.info( # 'Starting server on host %s port %d Python version %s.%s.%s' % ((options.host, options.port) + sys.version_info[:3])) # server = ThreadedTCPServer((options.host, options.port), # Activate the server; this will keep running until you # interrupt the program with Ctrl-C try: while True: logger.debug('waiting for more data') if not message_handler.handle(): break logger.warning("I/O stream closed from client") except KeyboardInterrupt: logger.info("I/O stream closed from client exiting...") os._exit(142) except: logger.exception("Error encountered handling message")
Launch a message server :param handler_function: The handler function to execute for each message :param options: Application options for TCP, etc.
def validate(self, options): try: codecs.getencoder(options.char_encoding) except LookupError: self.parser.error("invalid 'char-encoding' %s" % options.char_encoding)
Validate the options or exit()
def parse_code(url): result = urlparse(url) query = parse_qs(result.query) return query['code']
Parse the code parameter from the a URL :param str url: URL to parse :return: code query parameter :rtype: str
def user_token(scopes, client_id=None, client_secret=None, redirect_uri=None): webbrowser.open_new(authorize_url(client_id=client_id, redirect_uri=redirect_uri, scopes=scopes)) code = parse_code(raw_input('Enter the URL that you were redirected to: ')) return User(code, client_id=client_id, client_secret=client_secret, redirect_uri=redirect_uri)
Generate a user access token :param List[str] scopes: Scopes to get :param str client_id: Spotify Client ID :param str client_secret: Spotify Client secret :param str redirect_uri: Spotify redirect URI :return: Generated access token :rtype: User
def consume_file(self, infile): reader = tag.reader.GFF3Reader(infilename=infile) self.consume(reader)
Load the specified GFF3 file into memory.
def consume_seqreg(self, seqreg): if not isinstance(seqreg, tag.directive.Directive) or \ seqreg.type != 'sequence-region': raise ValueError('expected ##sequence-region directive') if seqreg.seqid in self.declared_regions: msg = 'duplicate sequence region "{}"'.format(seqreg.seqid) raise ValueError(msg) self.declared_regions[seqreg.seqid] = seqreg.range.copy()
Load a :code:`##sequence-region` directive into memory.
def consume_feature(self, feature): if not isinstance(feature, tag.feature.Feature): raise ValueError('expected Feature object') self[feature.seqid][feature.start:feature.end] = feature if feature.seqid not in self.inferred_regions: self.inferred_regions[feature.seqid] = feature._range.copy() newrange = self.inferred_regions[feature.seqid].merge(feature._range) self.inferred_regions[feature.seqid].start = newrange.start self.inferred_regions[feature.seqid].end = newrange.end
Load a :code:`Feature` object into memory.
def consume(self, entrystream): for entry in entrystream: if isinstance(entry, tag.directive.Directive) and \ entry.type == 'sequence-region': self.consume_seqreg(entry) elif isinstance(entry, tag.feature.Feature): self.consume_feature(entry)
Load a stream of entries into memory. Only Feature objects and sequence-region directives are loaded, all other entries are discarded.
def query(self, seqid, start, end, strict=True): return sorted([ intvl.data for intvl in self[seqid].search(start, end, strict) ])
Query the index for features in the specified range. :param seqid: ID of the sequence to query :param start: start of the query interval :param end: end of the query interval :param strict: indicates whether query is strict containment or overlap (:code:`True` and :code:`False`, respectively)
def cli(ctx, stage): if not ctx.bubble: ctx.say_yellow( 'There is no bubble present, will not show any transformer functions') raise click.Abort() rule_functions = get_registered_rule_functions() ctx.gbc.say('before loading functions:' + str(len(rule_functions))) load_rule_functions(ctx) ctx.gbc.say('after loading functions:' + str(len(rule_functions))) ctx.gbc.say('rule_functions:', stuff=rule_functions, verbosity=10) rule_functions.set_parent(ctx.gbc) for f in rule_functions: ctx.say('fun: ' + f, verbosity=1) ctx.gbc.say('funs: ', stuff=rule_functions.get_rule_functions(), verbosity=100) return True
Show the functions that are available, bubble system and custom.
def to_utctimestamp(a_datetime): if a_datetime.tzinfo is None: delta = a_datetime - datetime(1970, 1, 1) else: delta = a_datetime - datetime(1970, 1, 1, tzinfo=utc) return delta.total_seconds()
Calculate number of seconds from UTC 1970-01-01 00:00:00. When: - dt doesn't have tzinfo: assume it's a utc time. - dt has tzinfo: use tzinfo. WARNING, if your datetime object doens't have ``tzinfo``, make sure it's a UTC time, but **NOT a LOCAL TIME**. **中文文档** 计算时间戳, 若: - 不带tzinfo: 则默认为是UTC time。 - 带tzinfo: 则使用tzinfo。
def to_utc(a_datetime, keep_utc_tzinfo=False): if a_datetime.tzinfo: utc_datetime = a_datetime.astimezone(utc) # convert to utc time if keep_utc_tzinfo is False: utc_datetime = utc_datetime.replace(tzinfo=None) return utc_datetime else: return a_datetime
Convert a time awared datetime to utc datetime. :param a_datetime: a timezone awared datetime. (If not, then just returns) :param keep_utc_tzinfo: whether to retain the utc time zone information. **中文文档** 将一个带时区的时间转化成UTC时间。而对于UTC时间而言, 有没有时区信息都无所谓了。
def utc_to_tz(utc_datetime, tzinfo, keep_tzinfo=False): tz_awared_datetime = utc_datetime.replace(tzinfo=utc).astimezone(tzinfo) if keep_tzinfo is False: tz_awared_datetime = tz_awared_datetime.replace(tzinfo=None) return tz_awared_datetime
Convert a UTC datetime to a time awared local time :param utc_datetime: :param tzinfo: :param keep_tzinfo:
def repr_data_size(size_in_bytes, precision=2): # pragma: no cover if size_in_bytes < 1024: return "%s B" % size_in_bytes magnitude_of_data = ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"] index = 0 while 1: index += 1 size_in_bytes, mod = divmod(size_in_bytes, 1024) if size_in_bytes < 1024: break template = "{0:.%sf} {1}" % precision s = template.format(size_in_bytes + mod / 1024.0, magnitude_of_data[index]) return s
Return human readable string represent of a file size. Doesn"t support size greater than 1EB. For example: - 100 bytes => 100 B - 100,000 bytes => 97.66 KB - 100,000,000 bytes => 95.37 MB - 100,000,000,000 bytes => 93.13 GB - 100,000,000,000,000 bytes => 90.95 TB - 100,000,000,000,000,000 bytes => 88.82 PB ... Magnitude of data:: 1000 kB kilobyte 1000 ** 2 MB megabyte 1000 ** 3 GB gigabyte 1000 ** 4 TB terabyte 1000 ** 5 PB petabyte 1000 ** 6 EB exabyte 1000 ** 7 ZB zettabyte 1000 ** 8 YB yottabyte
def update_slots(self, event): if isinstance(event, LexInputEvent): event_slots = event.currentIntent.slots elif isinstance(event, basestring) or isinstance(event, unicode) or isinstance(event, str): event_slots = deepcopy(json.loads(event)['currentIntent']['slots']) else: event_slots = deepcopy(event['currentIntent']['slots']) for key, val in event_slots.items(): if key not in self.dialogAction.slots._schema.fields: field = Field(key, types.StringType()) self.dialogAction.slots._schema.append_field(field) self.dialogAction.slots[key] = val
:type lex_input_event: LexInputEvent :return: None
def render_toctrees(kb_app: kb, sphinx_app: Sphinx, doctree: doctree, fromdocname: str): # Only do any of this if toctree support is turned on in KaybeeSettings. # By default, this is off. settings: KaybeeSettings = sphinx_app.config.kaybee_settings if not settings.articles.use_toctree: return # Setup a template and context builder: StandaloneHTMLBuilder = sphinx_app.builder env: BuildEnvironment = sphinx_app.env # Toctree support. First, get the registered toctree class, if any registered_toctree = ToctreeAction.get_for_context(kb_app) for node in doctree.traverse(toctree): if node.attributes['hidden']: continue custom_toctree = registered_toctree(fromdocname) context = builder.globalcontext.copy() context['sphinx_app'] = sphinx_app # Get the toctree entries. We only handle one level of depth for # now. To go further, we need to recurse like sphinx's # adapters.toctree._toctree_add_classes function entries = node.attributes['entries'] # The challenge here is that some items in a toctree # might not be resources in our "database". So we have # to ask Sphinx to get us the titles. custom_toctree.set_entries(entries, env.titles, sphinx_app.env.resources) output = custom_toctree.render(builder, context, sphinx_app) # Put the output into the node contents listing = [nodes.raw('', output, format='html')] node.replace_self(listing)
Look in doctrees for toctree and replace with custom render
def stamp_excerpt(kb_app: kb, sphinx_app: Sphinx, doctree: doctree): # First, find out which resource this is. Won't be easy. resources = sphinx_app.env.resources confdir = sphinx_app.confdir source = PurePath(doctree.attributes['source']) # Get the relative path inside the docs dir, without .rst, then # get the resource docname = str(source.relative_to(confdir)).split('.rst')[0] resource = resources.get(docname) if resource: # Stamp the excerpt on the resource excerpt = getattr(resource.props, 'excerpt', False) auto_excerpt = getattr(resource.props, 'auto_excerpt', False) if excerpt: resource.excerpt = excerpt elif not auto_excerpt: resource.excerpt = None else: # Extract the excerpt based on the number of paragraphs # in auto_excerpt resource.excerpt = get_rst_excerpt(doctree, auto_excerpt)
Walk the tree and extract excert into resource.excerpt
def bitfieldify(buff, count): databits = bitarray() databits.frombytes(buff) return databits[len(databits)-count:]
Extract a bitarray out of a bytes array. Some hardware devices read from the LSB to the MSB, but the bit types available prefer to put pad bits on the LSB side, completely changing the data. This function takes in bytes and the number of bits to extract starting from the LSB, and produces a bitarray of those bits.
def build_byte_align_buff(bits): bitmod = len(bits)%8 if bitmod == 0: rdiff = bitarray() else: #KEEP bitarray rdiff = bitarray(8-bitmod) rdiff.setall(False) return rdiff+bits
Pad the left side of a bitarray with 0s to align its length with byte boundaries. Args: bits: A bitarray to be padded and aligned. Returns: A newly aligned bitarray.
def create(self, name, cidr, **kwargs): return self.driver.create(name, cidr, **kwargs)
This function will create a user network. Within OpenStack, it will create a network and a subnet Within AWS, it will create a VPC and a subnet :param name: string :param cidr: string E.x: "10.0.0.0/24" :param kwargs: dict :return: dict
def find_whole_word(w): return re.compile(r'\b({0})\b'.format(w), flags=re.IGNORECASE).search
Scan through string looking for a location where this word produces a match, and return a corresponding MatchObject instance. Return None if no position in the string matches the pattern; note that this is different from finding a zero-length match at some point in the string.
def parse(self, fp, headersonly=True): feedparser = FeedParser(self._class) feedparser._set_headersonly() try: mp = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ) except: mp = fp data = "" # While parsing the header we can convert to us-ascii? while True: line = mp.readline() data = data + line.decode("us-ascii") if line == b"\n": break feedparser.feed(data) # mp[0:5000]) return feedparser.close()
Create a message structure from the data in a file.
def coerce(self, values): if isinstance(values, compat.basestring): values = tuple(value.strip() for value in values.split(',')) # Create a list of options to store each value. opt_iter = tuple(copy.deepcopy(self._option) for value in values) for opt_obj, val in compat.zip(opt_iter, values): opt_obj.__set__(None, val) return opt_iter
Convert an iterable of literals to an iterable of options. Args: values (iterable or string): An iterable of raw values to convert into options. If the value is a string is is assumed to be a comma separated list and will be split before processing. Returns: iterable: An iterable of option values initialized with the raw values from `values`. Raises: TypeError: If `values` is not iterable or string. TypeError: If the underlying option raises a TypeError. ValueError: If the underlying option raises a ValueError.
def get(self, name, default=None): option = self._options.get(name, None) if option is None: return default return option.__get__(self)
Fetch an option from the dictionary. Args: name (str): The name of the option. default: The value to return if the name is missing. Returns: any: The value stored by the option. This method resolves the option to its value rather than returning the option object itself. Use the 'options()' method or this object's iter to get the raw options.
def set(self, name, value): if name not in self._options: raise AttributeError("Option {0} does not exist.".format(name)) return self._options[name].__set__(self, value)
Set an option value. Args: name (str): The name of the option. value: The value to set the option to. Raises: AttributeError: If the name is not registered. TypeError: If the value is not a string or appropriate native type. ValueError: If the value is a string but cannot be coerced.
def register(self, name, option): if name in self._options: raise ValueError("Option {0} already exists.".format(name)) if not isinstance(option, opt.Option): raise TypeError("Options must be of type Option.") self._options[name] = option
Register a new option with the namespace. Args: name (str): The name to register the option under. option (option.Option): The option object to register. Raises: TypeError: If the option is not an option.Option object. ValueError: If the name is already registered.
def set(self, name, value): if name not in self._options: self.register(name, self._generator()) return self._options[name].__set__(self, value)
Set an option value. Args: name (str): The name of the option. value: The value to set the option to. Raises: TypeError: If the value is not a string or appropriate native type. ValueError: If the value is a string but cannot be coerced. If the name is not registered a new option will be created using the option generator.
def GetCompressedFilesInDir(fileDir, fileList, ignoreDirList, supportedFormatList = ['.rar',]): goodlogging.Log.Info("EXTRACT", "Parsing file directory: {0}".format(fileDir)) if os.path.isdir(fileDir) is True: for globPath in glob.glob(os.path.join(fileDir, '*')): if os.path.splitext(globPath)[1] in supportedFormatList: fileList.append(globPath)
Get all supported files from given directory folder. Appends to given file list. Parameters ---------- fileDir : string File directory to search. fileList : list List which any file matches will be added to. ignoreDirList : list List of directories to ignore in recursive lookup (currently unused). supportedFormatList : list [optional : default = ['.rar',]] List of supported file formats to search for.
def MultipartArchiving(firstPartExtractList, otherPartSkippedList, archiveDir, otherPartFilePath = None): if otherPartFilePath is None: for filePath in list(otherPartSkippedList): MultipartArchiving(firstPartExtractList, otherPartSkippedList, archiveDir, filePath) else: baseFileName = re.findall("(.+?)[.]part.+?rar", otherPartFilePath)[0] if baseFileName in firstPartExtractList: util.ArchiveProcessedFile(otherPartFilePath, archiveDir) if otherPartFilePath in otherPartSkippedList: otherPartSkippedList.remove(otherPartFilePath) elif otherPartFilePath not in otherPartSkippedList: otherPartSkippedList.append(otherPartFilePath)
Archive all parts of multi-part compressed file. If file has been extracted (via part1) then move all subsequent parts directly to archive directory. If file has not been extracted then if part >1 add to other part skipped list and only archive when the first part is sent for archiving. Parameters ---------- firstPartExtractList : list File directory to search. otherPartSkippedList : list List which any file matches will be added to. archiveDir : list List of directories to ignore in recursive lookup (currently unused). otherPartFilePath : list [optional : default = None] List of supported file formats to search for.
def DoRarExtraction(rarArchive, targetFile, dstDir): try: rarArchive.extract(targetFile, dstDir) except BaseException as ex: goodlogging.Log.Info("EXTRACT", "Extract failed - Exception: {0}".format(ex)) return False else: return True
RAR extraction with exception catching Parameters ---------- rarArchive : RarFile object RarFile object to extract. targetFile : string Target file name. dstDir : string Target directory. Returns ---------- boolean False if rar extraction failed, otherwise True.
def GetRarPassword(skipUserInput): goodlogging.Log.Info("EXTRACT", "RAR file needs password to extract") if skipUserInput is False: prompt = "Enter password, 'x' to skip this file or 'exit' to quit this program: " response = goodlogging.Log.Input("EXTRACT", prompt) response = util.CheckEmptyResponse(response) else: response = 'x' if response.lower() == 'x': goodlogging.Log.Info("EXTRACT", "File extraction skipped without password") return False elif response.lower() == 'exit': goodlogging.Log.Fatal("EXTRACT", "Program terminated by user 'exit'") else: return response
Get password for rar archive from user input. Parameters ---------- skipUserInput : boolean Set to skip user input. Returns ---------- string or boolean If no password is given then returns False otherwise returns user response string.
def CheckPasswordReuse(skipUserInput): goodlogging.Log.Info("EXTRACT", "RAR files needs password to extract") if skipUserInput is False: prompt = "Enter 't' to reuse the last password for just this file, " \ "'a' to reuse for all subsequent files, " \ "'n' to enter a new password for this file " \ "or 's' to enter a new password for all files: " response = goodlogging.Log.Input("EXTRACT", prompt) response = util.ValidUserResponse(response, ('t','a','n','s')) else: response = 'a' if response.lower() == 's': return -1 if response.lower() == 'n': return 0 elif response.lower() == 't': return 1 elif response.lower() == 'a': return 2
Check with user for password reuse. Parameters ---------- skipUserInput : boolean Set to skip user input. Returns ---------- int Integer from -1 to 2 depending on user response.
def register(function=None, *, singleton=False, threadlocal=False, name=None): warnings.warn( ( 'Module level `register` decorator has been deprecated and will ' 'be removed in a future release. ' 'Use the Injector class instead' ), DeprecationWarning ) def decorator(function): return manager.register(function, singleton=singleton, threadlocal=threadlocal, name=name) if function: return decorator(function) else: return decorator
:deprecated: 1.0.0 Use :class:`giveme.injector.Injector` instead. Register a dependency factory in the dependency manager. The function name is the name of the dependency. This can be used as a decorator. Args: function (callable): The dependency factory function Not needed when used as decorator. singleton (``bool``, optional): If ``True`` the given function is only called once during the application lifetime. Injectees will receive the already created instance when available. Defaults to ``False`` threadlocal (``bool``, optional): Same as singleton except the returned instance is available only to the thread that created it. Defaults to ``False`` name (``str``, optional): Overridden name for the dependency. Defaults to the name of the registered function.
def inject(function=None, **overridden_names): warnings.warn( ( 'Module level `inject` decorator has been deprecated and will ' 'be removed in a future release. ' 'Use the Injector class instead' ), DeprecationWarning ) def decorator(function): @wraps(function) def wrapper(*args, **kwargs): signature = inspect.signature(function) params = signature.parameters if not params: return function(*args, **kwargs) for name, param in params.items(): if param.kind not in (param.KEYWORD_ONLY, param.POSITIONAL_OR_KEYWORD): continue if name in kwargs: # Manual override, ignore it continue try: resolved_name = overridden_names.get(name, name) kwargs[name] = manager.get_value(resolved_name) except KeyError: pass return function(*args, **kwargs) return wrapper if function: return decorator(function) else: return decorator
:deprecated: 1.0.0 Use :class:`giveme.injector.Injector` instead. Inject dependencies into given function's arguments. By default the injector looks for keyword arguments matching registered dependency names. Example: @register def db_connection(): return create_db_connection() @inject def save_thing(thing, db_connection=None): db_connection.store(thing) Arbitrary arguments may also be mapped to specific dependency names by passing them to the decorator as ``arg='dependency_name'`` Example: @inject(db='db_connection') def save_thing(thing, db=None): # `db_connection` injected as `db` Args: function (callable): The function that accepts a dependency. Implicitly passed when used as a decorator. **overridden_names: Mappings of `function` arguments to dependency names in the form of ``function_argument='dependency name'``
def register(self, func, singleton=False, threadlocal=False, name=None): func._giveme_singleton = singleton func._giveme_threadlocal = threadlocal if name is None: name = func.__name__ self._registered[name] = func return func
Register a dependency function
def get_value(self, name): factory = self._registered.get(name) if not factory: raise KeyError('Name not registered') if factory._giveme_singleton: if name in self._singletons: return self._singletons[name] self._singletons[name] = factory() return self._singletons[name] elif factory._giveme_threadlocal: if hasattr(self._threadlocals, name): return getattr(self._threadlocals, name) setattr(self._threadlocals, name, factory()) return getattr(self._threadlocals, name) return factory()
Get return value of a dependency factory or a live singleton instance.
def execute(filelocation, args, outdir, filters=None, executable='msConvert.exe'): procArgs = [executable, filelocation] procArgs.extend(aux.toList(args)) if filters is not None: for arg in aux.toList(filters): procArgs.extend(['--filter', arg]) procArgs.extend(['-o', outdir]) ## run it ## proc = subprocess.Popen(procArgs, stderr=subprocess.PIPE) ## But do not wait till netstat finish, start displaying output immediately ## while True: out = proc.stderr.read(1) if out == '' and proc.poll() != None: break if out != '': sys.stdout.write(out) sys.stdout.flush()
Execute the msConvert tool on Windows operating systems. :param filelocation: input file path :param args: str() or list(), msConvert arguments for details see the msConvert help below. :param outdir: path of the output directory :param filters: str() or list(), specify additional parameters and filters, for details see the msConvert help below. :param executable: must specify the complete file path of the msConvert.exe if its location is not in the ``PATH`` environment variable.
def execute(filelocation, outformat, outdir, log=False, executable='RawConverter.exe'): assert outformat in ['ms1', 'ms2', 'ms3', 'mgf'] args = [executable, filelocation, '--'+outformat, '--out_folder', outdir, '--select_mono_prec'] ## run it ## proc = subprocess.Popen(args, cwd=os.path.dirname(executable), stderr=subprocess.PIPE) ## But do not wait till netstat finish, start displaying output immediately ## while True: out = proc.stderr.read(1) if out == '' and proc.poll() != None: break if out != '': sys.stdout.write(out) sys.stdout.flush()
Execute the msConvert tool on Windows operating systems. :param filelocation: input file path :param outformat: output format, must be one of the following: ms1, ms2, ms3, mgf :param outdir: path of the output directory :param log: #TODO :param executable: must specify the complete file path of the RawConverter.exe if its location is not in the ``PATH`` environment variable. .. note: Specifying the complete path to the executable is probably always necessary because RawConverter looks for the file "AveragineTable.txt" in the working directory.
def trace(fun, *a, **k): @wraps(fun) def tracer(*a, **k): ret = fun(*a, **k) print('trace:fun: %s\n ret=%s\n a=%s\nk%s\n' % (str(fun), str(ret), str(a), str(k))) return ret return tracer
define a tracer for a rule function for log and statistic purposes
def timer(fun, *a, **k): @wraps(fun) def timer(*a, **k): start = arrow.now() ret = fun(*a, **k) end = arrow.now() print('timer:fun: %s\n start:%s,end:%s, took [%s]' % ( str(fun), str(start), str(end), str(end - start))) return ret return timer
define a timer for a rule function for log and statistic purposes
def get_function(self, fun=None): sfun = str(fun) self.say('get_function:' + sfun, verbosity=100) if not fun: return NoRuleFunction() # dummy to execute via no_fun if sfun in self._rule_functions: return self._rule_functions[sfun] else: self.add_function(name=sfun, fun=self.rule_function_not_found(fun)) self.cry('fun(%s) not found, returning dummy' % (sfun), verbosity=10) if sfun in self._rule_functions: return self._rule_functions[sfun] else: self.rule_function_not_found(fun)
get function as RuleFunction or return a NoRuleFunction function
def add_function(self, fun=None, name=None, fun_type=FUN_TYPE): if not name: if six.PY2: name = fun.func_name else: name = fun.__name__ self.say('adding fun(%s)' % name, verbosity=50) self.say('adding fun_type:%s' % fun_type, verbosity=50) if self.function_exists(name): self.cry('overwriting :fun(%s)' % name, verbosity=10) self.say('added :' + name, verbosity=10) self._rule_functions[name] = RuleFunction(name, fun, fun_type) return True
actually replace function
def function_exists(self, fun): res = fun in self._rule_functions self.say('function exists:' + str(fun) + ':' + str(res), verbosity=10) return res
get function's existense
def rule_function_not_found(self, fun=None): sfun = str(fun) self.cry('rule_function_not_found:' + sfun) def not_found(*a, **k): return(sfun + ':rule_function_not_found', k.keys()) return not_found
any function that does not exist will be added as a dummy function that will gather inputs for easing into the possible future implementation
def get_elem_type(elem): elem_type = None if isinstance(elem, list): if elem[0].get("type") == "radio": elem_type = "radio" else: raise ValueError(u"Unknown element type: {}".format(elem)) elif elem.name == "select": elem_type = "select" elif elem.name == "input": elem_type = elem.get("type") else: raise ValueError(u"Unknown element type: {}".format(elem)) # To be removed assert elem_type is not None return elem_type
Get elem type of soup selection :param elem: a soup element
def get_option_value(elem): value = elem.get("value") if value is None: value = elem.text.strip() if value is None or value == "": msg = u"Error parsing value from {}.".format(elem) raise ValueError(msg) return value
Get the value attribute, or if it doesn't exist the text content. <option value="foo">bar</option> => "foo" <option>bar</option> => "bar" :param elem: a soup element
def parse_value(val): val = val.replace("%", " ")\ .replace(" ","")\ .replace(",", ".")\ .replace("st","").strip() missing = ["Ejdeltagit", "N/A"] if val in missing: return val elif val == "": return None return float(val)
Parse values from html
def _get_html(self, url): self.log.info(u"/GET {}".format(url)) r = requests.get(url) if hasattr(r, 'from_cache'): if r.from_cache: self.log.info("(from cache)") if r.status_code != 200: throw_request_err(r) return r.content
Get html from url
def _get_json(self, url): self.log.info(u"/GET " + url) r = requests.get(url) if hasattr(r, 'from_cache'): if r.from_cache: self.log.info("(from cache)") if r.status_code != 200: throw_request_err(r) return r.json()
Get json from url
def regions(self): regions = [] elem = self.dimensions["region"].elem for option_elem in elem.find_all("option"): region = option_elem.text.strip() regions.append(region) return regions
Get a list of all regions
def _get_region_slug(self, id_or_label): #region = self.dimensions["region"].get(id_or_label) region = id_or_label slug = region\ .replace(u" ","-")\ .replace(u"ö","o")\ .replace(u"Ö","O")\ .replace(u"ä","a")\ .replace(u"å","a") + "s" EXCEPTIONS = { "Jamtland-Harjedalens": "Jamtlands", "Rikets": "Sveriges", } if slug in EXCEPTIONS: slug = EXCEPTIONS[slug] return slug
Get the regional slug to be used in url "Norrbotten" => "Norrbottens" :param id_or_label: Id or label of region
def _parse_result_page(self, url, payload, only_region=False): data = [] try: if only_region: html = self.scraper._get_html(url) else: html = self.scraper._post_html(url, payload=payload) except RequestException500: self.scraper.log.warning(u"Status code 500 on {} with {}".format(url, payload)) return None current_selection = self._get_current_selection(html) table = Datatable(html) data = [] for row in table.data: region_or_unit_id, region_or_unit_label = row["region_or_unit"] if region_or_unit_label in self.regions: row["region"] = region_or_unit_label row["unit"] = None else: row["region"] = None row["unit"] = region_or_unit_label value = row["value"] row.pop("value", None) row.pop("region_or_unit", None) for dim in self.dimensions: if dim.id not in row: row[dim.id] = current_selection[dim.id][1] # gets label data.append(Result(value, row)) return data
Get data from a result page :param url: url to query :param payload: payload to pass :return: a dictlist with data
def elem_type(self): if not hasattr(self, "_elem_type"): self._elem_type = get_elem_type(self.elem) return self._elem_type
:returns: "select"|"radio"|"checkbox"
def default_value(self): if not hasattr(self, "_default_value"): if self.elem_type == "select": try: # Get option marked "selected" def_value = get_option_value(self.elem.select_one("[selected]")) except AttributeError: # ...or if that one doesen't exist get the first option def_value = get_option_value(self.elem.select_one("option")) elif self.elem_type == "checkbox": def_value = self.elem.get("value") elif self.elem_type == "radio": def_value = [x for x in self.elem if x.has_attr("checked")][0].get("value") self._default_value = def_value assert def_value is not None return self._default_value
The default category when making a query
def measures(self): if self._measures == None: self._measures = get_unique([x["measure"] for x in self.data]) return self._measures
Get a list of the measuers of this datatable Measures can be "Antal Besök inom 7 dagar", "Måluppfyllelse vårdgarantin", etc
def _parse_values(self): data = [] if self.has_tabs: def _parse_tab_text(tab): # Annoying html in tabs if tab.select_one(".visible_normal"): return tab.select_one(".visible_normal").text else: return tab.text sub_table_ids = [_parse_tab_text(x) for x in self.soup.select(".table_switch li")] sub_tables = self.soup.select(".dataTables_wrapper") assert len(sub_tables) == len(sub_table_ids) assert len(sub_tables) > 0 for measure, table in zip(sub_table_ids, sub_tables): if self.has_horizontal_scroll: _data = self._parse_horizontal_scroll_table(table) for region, col, value in _data: data.append({ "region_or_unit": region, "select_period": col, # Hardcode warning! "measure": measure, }) else: if self.has_horizontal_scroll: raise NotImplementedError() if self.has_vertical_scroll: table = self.soup.select_one("#DataTables_Table_0_wrapper") _data = self._parse_vertical_scroll_table(table) else: table = self.soup.select(".chart.table.scrolling")[-1] _data = self._parse_regular_table(table) for region, measure, value in _data: data.append({ "region_or_unit": region, "measure": measure, "value": value }) return data
Get values
def _parse_horizontal_scroll_table(self, table_html): row_labels = [parse_text(x.text) for x in table_html.select(".DTFC_LeftBodyWrapper tbody tr")] row_label_ids = [None] * len(row_labels) cols = [parse_text(x.text) for x in table_html.select(".dataTables_scrollHead th")] value_rows = table_html.select(".dataTables_scrollBody tbody tr") values = [] for row_i, value_row in enumerate(value_rows): row_values = [parse_value(x.text) for x in value_row.select("td")] values.append(row_values) sheet = Sheet(zip(row_label_ids, row_labels), cols, values) return sheet.long_format
Get list of dicts from horizontally scrollable table
def as_dictlist(self): data = [] for row_i, row in enumerate(self.row_index): for col_i, col in enumerate(self.col_index): value = self.values_by_row[row_i][col_i] data.append({ "row": row, "col": col, "value": value, }) return data
Returns a dictlist with values [ { "row": "row_a", "col": "col_a", "value": 1, } ]
def is_json_file(filename, show_warnings = False): try: config_dict = load_config(filename, file_type = "json") is_json = True except: is_json = False return(is_json)
Check configuration file type is JSON Return a boolean indicating wheather the file is JSON format or not
def is_yaml_file(filename, show_warnings = False): if is_json_file(filename): return(False) try: config_dict = load_config(filename, file_type = "yaml") if(type(config_dict) == str): is_yaml = False else: is_yaml = True except: is_yaml = False return(is_yaml)
Check configuration file type is yaml Return a boolean indicating wheather the file is yaml format or not
def is_ini_file(filename, show_warnings = False): try: config_dict = load_config(filename, file_type = "ini") if config_dict == {}: is_ini = False else: is_ini = True except: is_ini = False return(is_ini)
Check configuration file type is INI Return a boolean indicating wheather the file is INI format or not
def is_toml_file(filename, show_warnings = False): if is_yaml_file(filename): return(False) try: config_dict = load_config(filename, file_type = "toml") is_toml = True except: is_toml = False return(is_toml)
Check configuration file type is TOML Return a boolean indicating wheather the file is TOML format or not
def get_config_type(filename): if is_json_file(filename): return("json") elif is_ini_file(filename): return("ini") elif is_yaml_file(filename): return("yaml") elif is_toml_file(filename): return("toml") else: return(False)
Get configuration file type:[JSON, YAML, INI, TOML] Return the configuration filetype: json, yaml, ini, toml or False
def _collect_settings(self, apps): contents = {} if apps: for app in apps: if app not in settings.INSTALLED_APPS: raise CommandError("Application '{0}' not in settings.INSTALLED_APPS".format(app)) else: apps = settings.INSTALLED_APPS for app in apps: module = import_module(app) for module_dir in module.__path__: json_file = os.path.abspath(os.path.join(module_dir, self.json_file)) if os.path.isfile(json_file): with open(json_file, 'r') as fp: contents[app] = json.load(fp) return contents
Iterate over given apps or INSTALLED_APPS and collect the content of each's settings file, which is expected to be in JSON format.
def required_unique(objects, key): keys = {} duplicate = set() for k in map(key, objects): keys[k] = keys.get(k, 0) + 1 if keys[k] > 1: duplicate.add(k) if duplicate: return (False, u"Duplicate object keys: {}".format(duplicate)) return (True, u"")
A pyrsistent invariant which requires all objects in the given iterable to have a unique key. :param objects: The objects to check. :param key: A one-argument callable to compute the key of an object. :return: An invariant failure if any two or more objects have the same key computed. An invariant success otherwise.
def item_by_name(self, name): for obj in self.items: if obj.metadata.name == name: return obj raise KeyError(name)
Find an item in this collection by its name metadata. :param unicode name: The name of the object for which to search. :raise KeyError: If no object matching the given name is found. :return IObject: The object with the matching name.
def _fetch_dimensions(self, dataset): yield Dimension(u"date", label="Day of the month") yield Dimension(u"month", datatype="month", dialect="swedish") yield Dimension(u"year", datatype="year")
Declaring available dimensions like this is not mandatory, but nice, especially if they differ from dataset to dataset. If you are using a built in datatype, you can specify the dialect you are expecting, to have values normalized. This scraper will look for Swedish month names (e.g. 'Januari'), but return them according to the Statscraper standard ('january').
def _dct_from_mro(cls: type, attr_name: str) -> dict: Get a merged dictionary from `cls` bases attribute `attr_name`. MRO defines importance (closest = strongest).""" d = {} for c in reversed(cls.mro()): d.update(getattr(c, attr_name, {})) return d
Get a merged dictionary from `cls` bases attribute `attr_name`. MRO defines importance (closest = strongest).
def _sorted_items(mapping: typing.Mapping) -> typing.Generator: to_yield = set(mapping) while to_yield: for key, values in mapping.items(): if key not in to_yield or (to_yield - {key} & set(values)): # other keys left to yield before this one continue yield key, values to_yield.remove(key)
Given a mapping where values are iterables, yield items whose values contained references are not used as keys first: Example: >>> dct = {'two': ('two', 'one', 'foo'), 'one': ('hi', 'six', 'net'), 'six': ('three', 'four'), 'foo': ['bar']} >>> for k, v in _sorted_items(dct): ... print(k, v) ... six ('three', 'four') foo ['bar'] one ('hi', 'six', 'net') two ('two', 'one', 'foo')
def _init_name_core(self, name: str): self.__regex = re.compile(rf'^{self._pattern}$') self.name = name
Runs whenever a new instance is initialized or `sep` is set.
def values(self) -> typing.Dict[str, str]: return {k: v for k, v in self._items if v is not None}
The field values of this object's name as a dictionary in the form of {field: value}.
def get_name(self, **values) -> str: if not values and self.name: return self.name if values: # if values are provided, solve compounds that may be affected for ck, cvs in _sorted_items(self.compounds): if ck in cvs and ck in values: # redefined compound name to outer scope e.g. fifth = (fifth, sixth) continue comp_values = [values.pop(cv, getattr(self, cv)) for cv in cvs] if None not in comp_values: values[ck] = ''.join(rf'{v}' for v in comp_values) return self._get_nice_name(**values)
Get a new name string from this object's name values. :param values: Variable keyword arguments where the **key** should refer to a field on this object that will use the provided **value** to build the new name.
def cast_config(cls, config: typing.Mapping[str, str]) -> typing.Dict[str, str]: return {k: cls.cast(v, k) for k, v in config.items()}
Cast `config` to grouped regular expressions.
def appdataPath(appname): # determine Mac OS appdata location if sys.platform == 'darwin': # credit: MHL try: from AppKit import NSSearchPathForDirectoriesInDomains # NSApplicationSupportDirectory = 14 # NSUserDomainMask = 1 # True for expanding the tilde into a fully qualified path basepath = NSSearchPathForDirectoriesInDomains(14, 1, True) return os.path.join(basepath[0], appname) except (ImportError, AttributeError, IndexError): basepath = os.path.expanduser("~/Library/Application Support") return os.path.join(basepath, appname) # determine Windows OS appdata location elif sys.platform == 'win32': return os.path.join(os.environ.get('APPDATA'), appname) # determine Linux OS appdata location else: return os.path.expanduser(os.path.join('~', '.' + appname))
Returns the generic location for storing application data in a cross platform way. :return <str>
def _execute_primitives(self, commands): for p in commands: if self._scanchain and self._scanchain._debug: print(" Executing", p)#pragma: no cover p.execute(self)
Run a list of executable primitives on this controller, and distribute the returned data to the associated TDOPromises. Args: commands: A list of Executable Primitives to be run in order.
def pretty_version_text(): version_lines = ["dtool, version {}".format(dtool_version)] version_lines.append("\nBase:") version_lines.append("dtoolcore, version {}".format(dtoolcore.__version__)) version_lines.append("dtool-cli, version {}".format(__version__)) # List the storage broker packages. version_lines.append("\nStorage brokers:") for ep in iter_entry_points("dtool.storage_brokers"): package = ep.module_name.split(".")[0] dyn_load_p = __import__(package) version = dyn_load_p.__version__ storage_broker = ep.load() version_lines.append( "{}, {}, version {}".format( storage_broker.key, package.replace("_", "-"), version)) # List the plugin packages. modules = [ep.module_name for ep in iter_entry_points("dtool.cli")] packages = set([m.split(".")[0] for m in modules]) version_lines.append("\nPlugins:") for p in packages: dyn_load_p = __import__(p) version_lines.append( "{}, version {}".format( p.replace("_", "-"), dyn_load_p.__version__)) return "\n".join(version_lines)
Return pretty version text listing all plugins.
def dtool(debug): level = logging.WARNING if debug: level = logging.DEBUG logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=level)
Tool to work with datasets.
def add_nic(self, instance_id, net_id): #TODO: upgrade with port_id and fixed_ip in future self.client.servers.interface_attach( instance_id, None, net_id, None) return True
Add a Network Interface Controller
def delete_nic(self, instance_id, port_id): self.client.servers.interface_detach(instance_id, port_id) return True
Delete a Network Interface Controller
def list_nic(self, instance_id): #NOTE: interfaces a list of novaclient.v2.servers.Server interfaces = self.client.servers.interface_list(instance_id) return interfaces
List all Network Interface Controller
def associate_public_ip(self, instance_id, public_ip_id, private_ip=None): floating_ip = self.client.floating_ips.get(public_ip_id) floating_ip = floating_ip.to_dict() address = floating_ip.get('ip') self.client.servers.add_floating_ip(instance_id, address, private_ip) return True
Associate a external IP
def disassociate_public_ip(self, public_ip_id): floating_ip = self.client.floating_ips.get(public_ip_id) floating_ip = floating_ip.to_dict() instance_id = floating_ip.get('instance_id') address = floating_ip.get('ip') self.client.servers.remove_floating_ip(instance_id, address) return True
Disassociate a external IP
def split(self, bitindex): if bitindex < 0: raise ValueError("bitindex must be larger or equal to 0.") if bitindex > len(self): raise ValueError( "bitindex larger than the array's size. " "Len: %s; bitindex: %s"%(len(self), bitindex)) if bitindex == 0: return None, self if bitindex == len(self): return self, None left = TDOPromise(self._chain, self._bitstart, bitindex, _parent=self) #Starts at 0 because offset is for incoming data from #associated primitive, not location in parent. right = TDOPromise(self._chain, 0, len(self)-bitindex, _parent=self) self._components = [] self._addsub(left, 0) self._addsub(right, bitindex) return left, right
Split a promise into two promises at the provided index. A common operation in JTAG is reading/writing to a register. During the operation, the TMS pin must be low, but during the writing of the last bit, the TMS pin must be high. Requiring all reads or writes to have full arbitrary control over the TMS pin is unrealistic. Splitting a promise into two sub promises is a way to mitigate this issue. The final read bit is its own subpromise that can be associated with a different primitive than the 'rest' of the subpromise. Returns: Two TDOPromise instances: the 'Rest' and the 'Tail'. The 'Rest' is the first chunk of the original promise. The 'Tail' is a single bit sub promise for the final bit in the operation If the 'Rest' would have a length of 0, None is returned
def _fulfill(self, bits, ignore_nonpromised_bits=False): if self._allsubsfulfilled(): if not self._components: if ignore_nonpromised_bits: self._value = bits[self._bitstartselective: self._bitstartselective + self._bitlength] else: self._value = bits[self._bitstart:self._bitend] else: self._value = self._components[0][0]._value for sub, offset in self._components[1:]: self._value += sub._value if self._parent is not None: self._parent._fulfill(None)
Supply the promise with the bits from its associated primitive's execution. The fulfillment process must walk the promise chain backwards until it reaches the original promise and can supply the final value. The data that comes in can either be all a bit read for every bit written by the associated primitive, or (if the primitive supports it), only the bits that are used by promises. The ignore_nonpromised_bits flag specifies which format the incoming data is in. Args: bits: A bitarray (or compatible) containing the data read from the jtag controller's TDO pin. ignore_nonpromised_bits: A boolean specifying if only promised bits are being returned (and thus the 2nd index of the promise must be used for slicing the incoming data).
def makesubatoffset(self, bitoffset, *, _offsetideal=None): if _offsetideal is None: _offsetideal = bitoffset if bitoffset is 0: return self newpromise = TDOPromise( self._chain, self._bitstart + bitoffset, self._bitlength, _parent=self, bitstartselective=self._bitstartselective+_offsetideal ) self._addsub(newpromise, 0) return newpromise
Create a copy of this promise with an offset, and use it as this promise's child. If this promise's primitive is being merged with another primitive, a new subpromise may be required to keep track of the new offset of data coming from the new primitive. Args: bitoffset: An integer offset of the data in the new primitive. _offsetideal: integer offset of the data if terms of bits actually used for promises. Used to calculate the start index to read if the associated primitive has arbitrary TDO control. Returns: A TDOPromise registered with this promise, and with the correct offset.
def add(self, promise, bitoffset, *, _offsetideal=None): #This Assumes that things are added in order. #Sorting or checking should likely be added. if _offsetideal is None: _offsetideal = bitoffset if isinstance(promise, TDOPromise): newpromise = promise.makesubatoffset( bitoffset, _offsetideal=_offsetideal) self._promises.append(newpromise) elif isinstance(promise, TDOPromiseCollection): for p in promise._promises: self.add(p, bitoffset, _offsetideal=_offsetideal)
Add a promise to the promise collection at an optional offset. Args: promise: A TDOPromise to add to this collection. bitoffset: An integer offset for this new promise in the collection. _offsetideal: An integer offset for this new promise in the collection if the associated primitive supports arbitrary TDO control.
def split(self, bitindex): if bitindex < 0: raise ValueError("bitindex must be larger or equal to 0.") if bitindex == 0: return None, self lastend = 0 split_promise = False for splitindex, p in enumerate(self._promises): if bitindex in range(lastend, p._bitstart): split_promise = False break if bitindex in range(p._bitstart, p._bitend): if bitindex-p._bitstart == 0: split_promise = False else: split_promise = True break lastend = p._bitend else: raise Exception("Should be impossible") processed_left = TDOPromiseCollection(self._chain) processed_right = TDOPromiseCollection(self._chain) if split_promise: left, right = p.split(bitindex-p._bitstart) for i in range(splitindex): processed_left.add(self._promises[i], 0) processed_left.add(left, 0) processed_right.add(right, 0) for tmpprim in self._promises[splitindex+1:]: processed_right.add(tmpprim, -bitindex) return processed_left, processed_right else: for i in range(splitindex): processed_left.add(self._promises[i], 0) for i in range(splitindex, len(self._promises)): processed_right.add(self._promises[i], -bitindex) return processed_left, processed_right
Split a promise into two promises. A tail bit, and the 'rest'. Same operation as the one on TDOPromise, except this works with a collection of promises and splits the appropriate one. Returns: The 'Rest' and the 'Tail'. The 'Rest' is TDOPromiseCollection containing the first chunk of the original TDOPromiseCollection. The 'Tail' is a single bit sub promise for the final bit in the operation If the 'Rest' would have a length of 0, None is returned
def makesubatoffset(self, bitoffset, *, _offsetideal=None): if _offsetideal is None: _offsetideal = bitoffset if bitoffset is 0: return self newpromise = TDOPromiseCollection(self._chain) for promise in self._promises: newpromise.add(promise, bitoffset, _offsetideal=_offsetideal) return newpromise
Create a copy of this PromiseCollection with an offset applied to each contained promise and register each with their parent. If this promise's primitive is being merged with another primitive, a new subpromise may be required to keep track of the new offset of data coming from the new primitive. Args: bitoffset: An integer offset of the data in the new primitive. _offsetideal: An integer offset to use if the associated primitive supports arbitrary TDO control. Returns: A new TDOPromiseCollection registered with this promise collection, and with the correct offset.
def call_jira_rest(self, url, user, password, method="GET", data=None): headers = {'content-type': 'application/json'} self._logger.debug('Connecting to Jira to call the following REST method {0}'.format(url)) if method == "GET": response = requests.get(self.base_url + url, auth=requests.auth.HTTPBasicAuth(user, password)) elif method == "POST": response = requests.post(self.base_url + url, data=json.dumps(data), auth=requests.auth.HTTPBasicAuth(user, password), headers=headers) else: raise ValueError('method argument supports GET or POST values only') self._logger.debug('REST call successfully finalised') return response.json()
Make JIRA REST call :param data: data for rest call :param method: type of call: GET or POST for now :param url: url to call :param user: user for authentication :param password: password for authentication :return:
def cli(ctx, stage): if not ctx.bubble: ctx.say_yellow('There is no bubble present, ' + 'will not show any transformer rules') raise click.Abort() path = ctx.home + '/' RULES = None ctx.say('Stage:'+stage, verbosity=10) if stage in STAGES: if stage in ctx.cfg.CFG: STAGE = ctx.cfg.CFG[stage] ctx.say('Stage found:', stuff=STAGE,verbosity=100) if 'TRANSFORM' in STAGE: TRANSFORM = STAGE.TRANSFORM ctx.say('Transform found:', stuff=TRANSFORM, verbosity=100) if 'RULES' in TRANSFORM: RULES = TRANSFORM.RULES ctx.say('Rules found:', stuff=RULES, verbosity=100) if not RULES: ctx.say_red('There is no TRANSFORM.RULES in stage:' + stage) ctx.say_yellow('please check configuration in ' + ctx.home + '/config/config.yaml') raise click.Abort() if type(RULES) == str and RULES.endswith('.bubble'): ctx.say('loading rules',verbosity=10) rules = get_bubble(ctx, path + RULES) rule_type = 'bubble' transformer = Transformer(rules=rules, rule_type=rule_type, bubble_path=path, verbose=ctx.get_verbose()) rules = transformer._rules.get_rules() ctx.say('current number of rules:' + str(len(rules)), verbosity=1) for r in rules: ctx.say('rule: ' + str(r), verbosity=1) ctx.gbc.say('rules: ', stuff=rules, verbosity=100) else: ctx.say('no rules!') return True
Show transformer rules
def connectExec(connection, protocol, commandLine): deferred = connectSession(connection, protocol) @deferred.addCallback def requestSubsystem(session): return session.requestExec(commandLine) return deferred
Connect a Protocol to a ssh exec session