docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Check if a given date is a holiday. Args: date (date, datetime or str): Date to be checked. Returns: bool: True if the date is a holiday, False otherwise.
def isholiday(self, date): date = parsefun(date) if self.holidays: # i is the index of first holiday >= date i = bisect.bisect_left(self.holidays, date) if i == 0 and date < self.holidays[0]: warn('Holiday list exhausted at start, ' \ 'isholiday(%s) output may be incorrect.' % date) elif i == len(self.holidays): warn('Holiday list exhausted at end, ' \ 'isholiday(%s) output may be incorrect.' % date) elif self.holidays[i] == date: return True return False
818,995
Adjust date to last day of the month, regardless of work days. Args: date (date, datetime or str): Date to be adjusted. Returns: datetime: Adjusted date.
def caleom(date): date = parsefun(date) date += datetime.timedelta(days=32-date.day) date -= datetime.timedelta(days=date.day) return date
819,002
Generate business days between two dates, taking holidays into consideration. Args: date1 (date, datetime or str): Date start of interval. date2 (date, datetime or str): Date end of interval, not included. Note: All business days between date1 (inc) and date2 (exc) are returned, and date2 must be bigger than date1. Yields: datetime: Business days in the specified range.
def range(self, date1, date2): date1 = self.adjust(parsefun(date1), FOLLOWING) date2 = parsefun(date2) holidays = [] holidx = 0 if len(self.holidays): index1 = bisect.bisect_left(self.holidays, date1) index2 = bisect.bisect_left(self.holidays, date2) if index2 > index1: holidays = self.holidays[index1:index2] datewk = date1.weekday() while date1 < date2: if (holidx < len(holidays)) and (holidays[holidx] == date1): holidx += 1 else: yield date1 date1 += datetime.timedelta(days=\ self.weekdaymap[datewk].offsetnext) datewk = self.weekdaymap[datewk].nextworkday
819,003
Set command line arguments as a source Parses the command line arguments described by the parameters. Args: name: the long name of the argument (foo) short_name: the optional short name of the argument (f) type: the optional type of the argument, defaults to bool help: the optional help text for the argument
def argv(cls, name, short_name=None, type=None, help=None): cls.__hierarchy.append(argv.Argv(name, short_name, type, help))
819,061
Generate an identifier for a callable signal receiver. This is used when disconnecting receivers, where we need to correctly establish equivalence between the input receiver and the receivers assigned to a signal. Args: receiver: A callable object. Returns: An identifier for the receiver.
def __make_id(receiver): if __is_bound_method(receiver): return (id(receiver.__func__), id(receiver.__self__)) return id(receiver)
819,305
Return all signal handlers that are currently still alive for the input `signal`. Args: signal: A signal name. Returns: A list of callable receivers for the input signal.
def __live_receivers(signal): with __lock: __purge() receivers = [funcref() for funcref in __receivers[signal]] return receivers
819,307
Return ``True`` if the `method` is a bound method (attached to an class instance. Args: method: A method or function type object.
def __is_bound_method(method): if not(hasattr(method, "__func__") and hasattr(method, "__self__")): return False # Bound methods have a __self__ attribute pointing to the owner instance return six.get_method_self(method) is not None
819,308
Register `receiver` method/function as a receiver for the `signal`. When the signal is emitted, this receiver will be invoked along with all other associated signals. Args: signal: A signal identifier (e.g., a signal name) receiver: A callable object to connect to the signal.
def connect(signal, receiver): __check_receiver(receiver) if __is_bound_method(receiver): ref = WeakMethod else: ref = weakref.ref with __lock: __purge() __receivers[signal].append(ref(receiver))
819,309
Disconnect the receiver `func` from the signal, identified by `signal_id`. Args: signal: The signal identifier. receiver: The callable receiver to disconnect. Returns: True if the receiver was successfully disconnected. False otherwise.
def disconnect(signal, receiver): inputkey = __make_id(receiver) with __lock: __purge() receivers = __receivers.get(signal) for idx in six.moves.range(len(receivers)): connected = receivers[idx]() if inputkey != __make_id(connected): continue del receivers[idx] return True # receiver successfully disconnected! return False
819,310
Emit a signal by serially calling each registered signal receiver for the `signal`. Note: The receiver must accept the *args and/or **kwargs that have been passed to it. There expected parameters are not dictated by mixbox. Args: signal: A signal identifier or name. *args: A variable-length argument list to pass to the receiver. **kwargs: Keyword-arguments to pass to the receiver.
def emit(signal, *args, **kwargs): if signal not in __receivers: return receivers = __live_receivers(signal) for func in receivers: func(*args, **kwargs)
819,311
Unset the TypedFields on the input `entity`. Args: entity: A mixbox.Entity object. *types: A variable-length list of TypedField subclasses. If not provided, defaults to TypedField.
def unset(entity, *types): if not types: types = (TypedField,) fields = list(entity._fields.keys()) remove = (x for x in fields if isinstance(x, types)) for field in remove: del entity._fields[field]
819,937
Return True if the input TypedField `field` contains instance attributes that match the input parameters. Args: field: A TypedField instance. params: A dictionary of TypedField instance attribute-to-value mappings. Returns: True if the input TypedField matches the input parameters.
def _matches(field, params): fieldattrs = six.iteritems(params) return all(getattr(field, attr) == val for attr, val in fieldattrs)
819,938
Iterate over the input class members and yield its TypedFields. Args: klass: A class (usually an Entity subclass). Yields: (class attribute name, TypedField instance) tuples.
def iterfields(klass): is_field = lambda x: isinstance(x, TypedField) for name, field in inspect.getmembers(klass, predicate=is_field): yield name, field
819,939
Return all TypedFields found on the input `Entity` that were initialized with the input **kwargs. Example: >>> find(myentity, multiple=True, type_=Foo) Note: TypedFields.__init__() can accept a string or a class as a type_ argument, but this method expects a class. Args: **kwargs: TypedField __init__ **kwargs to search on. Returns: A list of TypedFields with matching **kwarg values.
def find(entity, **kwargs): try: typedfields = entity.typed_fields() except AttributeError: typedfields = iterfields(entity.__class__) matching = [x for x in typedfields if _matches(x, kwargs)] return matching
819,940
Return the TypedField value for the input `instance` and `owner`. If the TypedField is a "multiple" field and hasn't been set yet, set the field to an empty list and return it. Args: instance: An instance of the `owner` class that this TypedField belongs to.. owner: The TypedField owner class.
def __get__(self, instance, owner=None): if instance is None: return self elif self in instance._fields: return instance._fields[self] elif self.multiple: return instance._fields.setdefault(self, self._listfunc()) else: return None
819,942
Get the namespace the given prefix maps to. Args: prefix (str): The prefix Returns: str: The namespace, or None if the prefix isn't mapped to anything in this set.
def namespace_for_prefix(self, prefix): try: ni = self.__lookup_prefix(prefix) except PrefixNotFoundError: return None else: return ni.uri
820,294
Return a subset of this NamespaceSet containing only data for the given namespaces. Args: ns_uris (iterable): An iterable of namespace URIs which select the namespaces for the subset. Returns: The subset Raises: NamespaceNotFoundError: If any namespace URIs in `ns_uris` don't match any namespaces in this set.
def subset(self, ns_uris): sub_ns = NamespaceSet() for ns_uri in ns_uris: ni = self.__lookup_uri(ns_uri) new_ni = copy.deepcopy(ni) # We should be able to reach into details of our own # implementation on another obj, right?? This makes the subset # operation faster. We can set up the innards directly from a # cloned _NamespaceInfo. sub_ns._NamespaceSet__add_namespaceinfo(new_ni) return sub_ns
820,308
Return the version of the root element passed in. Args: root (etree.Element) Returns: distutils.StrictVersion Raises: UnknownVersionError
def _get_version(self, root): # Note: STIX and MAEC use a "version" attribute. To support CybOX, a # subclass will need to combine "cybox_major_version", # "cybox_minor_version", and "cybox_update_version". version = self.get_version(root) if version: return StrictVersion(version) raise UnknownVersionError( "Unable to determine the version of the input document. No " "version information found on the root element." )
820,315
Ensure the root element is a supported version. Args: root (etree.Element) Raises: UnsupportedVersionError
def _check_version(self, root): version = self._get_version(root) supported = [StrictVersion(x) for x in self.supported_versions(root.tag)] if version in supported: return error = "Document version ({0}) not in supported versions ({1})" raise UnsupportedVersionError( message=error.format(version, supported), expected=supported, found=version )
820,316
Check that the XML element tree has a supported root element. Args: root (etree.Element) Raises: UnsupportedRootElementError
def _check_root_tag(self, root): supported = self.supported_tags() if root.tag in supported: return error = "Document root element ({0}) not one of ({1})" raise UnsupportedRootElementError( message=error.format(root.tag, supported), expected=supported, found=root.tag, )
820,317
Attempts to parse `value` into an instance of ``datetime.datetime``. If `value` is ``None``, this function will return ``None``. Args: value: A timestamp. This can be a string or datetime.datetime value.
def parse_datetime(value): if not value: return None elif isinstance(value, datetime.datetime): return value return dateutil.parser.parse(value)
820,347
Attempts to parse `value` into an instance of ``datetime.date``. If `value` is ``None``, this function will return ``None``. Args: value: A timestamp. This can be a string, datetime.date, or datetime.datetime value.
def parse_date(value): if not value: return None if isinstance(value, datetime.date): return value return parse_datetime(value).date()
820,348
Attempts to convert `value` into an ``xs:date`` string. If `value` is ``None``, ``None`` will be returned. Args: value: A date value. This can be a string, datetime.date, or datetime.datetime object. Returns: An ``xs:date`` formatted timestamp string.
def serialize_date(value): if not value: return None elif isinstance(value, datetime.datetime): return value.date().isoformat() elif isinstance(value, datetime.date): return value.isoformat() else: return parse_date(value).isoformat()
820,349
Create an instance of the class associated with the `key` (xsi:type) and initialize it with the *args and **kwargs. Args: key: A class lookup key (see entity_class()). Returns: An instance of the class associated with the `key`.
def instance(cls, key, *args, **kwargs): klass = cls.entity_class(key) return klass(*args, **kwargs)
820,513
Parse the dictionary and return an Entity instance. This will attempt to extract type information from the input dictionary and pass it to entity_class to resolve the correct class for the type. Args: cls_dict: A dictionary representation of an Entity object. fallback_xsi_type: An xsi_type to use for string input, which doesn't have properties Returns: An Entity instance.
def from_dict(cls, cls_dict, fallback_xsi_type=None): if not cls_dict: return None if isinstance(cls_dict, six.string_types): if not getattr(cls, "_convert_strings", False): return cls_dict try: typekey = cls.dictkey(cls_dict) except TypeError: typekey = fallback_xsi_type klass = cls.entity_class(typekey) return klass.from_dict(cls_dict)
820,514
Parse the generateDS object and return an Entity instance. This will attempt to extract type information from the input object and pass it to entity_class to resolve the correct class for the type. Args: cls_obj: A generateDS object. Returns: An Entity instance.
def from_obj(cls, cls_obj): if not cls_obj: return None typekey = cls.objkey(cls_obj) klass = cls.entity_class(typekey) return klass.from_obj(cls_obj)
820,515
Import the class referred to by the fully qualified class path. Args: classpath: A full "foo.bar.MyClass" path to a class definition. Returns: The class referred to by the classpath. Raises: ImportError: If an error occurs while importing the module. AttributeError: IF the class does not exist in the imported module.
def import_class(classpath): modname, classname = classpath.rsplit(".", 1) module = importlib.import_module(modname) klass = getattr(module, classname) return klass
820,597
Attempt to return a Python class for the input class reference. If `classref` is a class or None, return it. If `classref` is a python classpath (e.g., "foo.bar.MyClass") import the class and return it. Args: classref: A fully-qualified Python path to class, or a Python class. Returns: A class.
def resolve_class(classref): if classref is None: return None elif isinstance(classref, six.class_types): return classref elif isinstance(classref, six.string_types): return import_class(classref) else: raise ValueError("Unable to resolve class for '%s'" % classref)
820,598
Function decorator which checks that the decorated function is called with a set of required kwargs. Args: *argnames: String keyword argument names. Raises: ValueError: If a required kwarg is missing in the decorated function call.
def needkwargs(*argnames): required = set(argnames) def decorator(func): def inner(*args, **kwargs): missing = required - set(kwargs) if missing: err = "%s kwargs are missing." % list(missing) raise ValueError(err) return func(*args, **kwargs) return inner return decorator
820,599
Removes all CDATA blocks from `text` if it contains them. Note: If the function contains escaped XML characters outside of a CDATA block, they will be unescaped. Args: A string containing one or more CDATA blocks. Returns: An XML unescaped string with CDATA block qualifiers removed.
def strip_cdata(text): if not is_cdata(text): return text xml = "<e>{0}</e>".format(text) node = etree.fromstring(xml) return node.text
820,706
Wraps the input `text` in a ``<![CDATA[ ]]>`` block. If the text contains CDATA sections already, they are stripped and replaced by the application of an outer-most CDATA block. Args: text: A string to wrap in a CDATA block. Returns: The `text` value wrapped in ``<![CDATA[]]>``
def cdata(text): if not text: return text if is_cdata(text): text = strip_cdata(text) escaped = "{0}{1}{2}".format(CDATA_START, text, CDATA_END) return escaped
820,707
Return True if the input value is valid for insertion into the inner list. Args: value: An object about to be inserted.
def _is_valid(self, value): # Entities have an istypeof method that can perform more sophisticated # type checking. if hasattr(self._type, "istypeof"): return self._type.istypeof(value) else: return isinstance(value, self._type)
820,709
Attempt to set the value at position `key` to the `value`. If a value is not the correct type, an attempt will be made to convert it to the correct type. Args: key: An index. value: A value to set.
def __setitem__(self, key, value): if not self._is_valid(value): value = self._fix_value(value) self._inner.__setitem__(key, value)
820,711
Get file from WeedFS. Returns file content. May be problematic for large files as content is stored in memory. Args: **fid**: File identifier <volume_id>,<file_name_hash> Returns: Content of the file with provided fid or None if file doesn't exist on the server .. versionadded:: 0.3.1
def get_file(self, fid): url = self.get_file_url(fid) return self.conn.get_raw_data(url)
823,433
Gets size of uploaded file Or None if file doesn't exist. Args: **fid**: File identifier <volume_id>,<file_name_hash> Returns: Int or None
def get_file_size(self, fid): url = self.get_file_url(fid) res = self.conn.head(url) if res is not None: size = res.headers.get("content-length", None) if size is not None: return int(size) return None
823,436
Checks if file with provided fid exists Args: **fid**: File identifier <volume_id>,<file_name_hash> Returns: True if file exists. False if not.
def file_exists(self, fid): res = self.get_file_size(fid) if res is not None: return True return False
823,437
Prepare headers for http communication. Return dict of header to be used in requests. Args: .. versionadded:: 0.3.2 **additional_headers**: (optional) Additional headers to be used with request Returns: Headers dict. Key and values are string
def _prepare_headers(self, additional_headers=None, **kwargs): user_agent = "pyseaweed/{version}".format(version=__version__) headers = {"User-Agent": user_agent} if additional_headers is not None: headers.update(additional_headers) return headers
823,572
Gets data from url as text Returns content under the provided url as text Args: **url**: address of the wanted data .. versionadded:: 0.3.2 **additional_headers**: (optional) Additional headers to be used with request Returns: string
def get_data(self, url, *args, **kwargs): res = self._conn.get(url, headers=self._prepare_headers(**kwargs)) if res.status_code == 200: return res.text else: return None
823,574
Gets data from url as bytes Returns content under the provided url as bytes ie. for binary data Args: **url**: address of the wanted data .. versionadded:: 0.3.2 **additional_headers**: (optional) Additional headers to be used with request Returns: bytes
def get_raw_data(self, url, *args, **kwargs): res = self._conn.get(url, headers=self._prepare_headers(**kwargs)) if res.status_code == 200: return res.content else: return None
823,575
Uploads file to provided url. Returns contents as text Args: **url**: address where to upload file **filename**: Name of the uploaded file **file_stream**: file like object to upload .. versionadded:: 0.3.2 **additional_headers**: (optional) Additional headers to be used with request Returns: string
def post_file(self, url, filename, file_stream, *args, **kwargs): res = self._conn.post(url, files={filename: file_stream}, headers=self._prepare_headers(**kwargs)) if res.status_code == 200 or res.status_code == 201: return res.text else: return None
823,576
Deletes data under provided url Returns status as boolean. Args: **url**: address of file to be deleted .. versionadded:: 0.3.2 **additional_headers**: (optional) Additional headers to be used with request Returns: Boolean. True if request was successful. False if not.
def delete_data(self, url, *args, **kwargs): res = self._conn.delete(url, headers=self._prepare_headers(**kwargs)) if res.status_code == 200 or res.status_code == 202: return True else: return False
823,577
Custom compare function. Returns ``1`` if the first ``Piper`` instance is upstream of the second ``Piper`` instance, ``-1`` if the first ``Piper`` is downstream of the second ``Piper`` and ``0`` if the two ``Pipers`` are independent. Arguments: - piper1(``Piper``) ``Piper`` instance. - piper2(``Piper``) ``Piper`` instance.
def children_after_parents(self, piper1, piper2): if piper1 in self[piper2].deep_nodes(): return 1 elif piper2 in self[piper1].deep_nodes(): return - 1 else: return 0
824,136
Removes a ``Piper`` from the ``Dagger`` instance. Arguments: - piper(``Piper`` or id(``Piper``)) ``Piper`` instance or ``Piper`` instance id. - forced(bool) [default: ``False``] If "forced" is ``True``, will not raise a ``DaggerError`` if the ``Piper`` hase outgoing pipes and will also remove it.
def del_piper(self, piper, forced=False): self.log.debug('%s trying to delete piper %s' % \ (repr(self), repr(piper))) try: piper = self.resolve(piper, forgive=False) except DaggerError: self.log.error('%s cannot resolve piper from %s' % \ (repr(self), repr(piper))) raise DaggerError('%s cannot resolve piper from %s' % \ (repr(self), repr(piper))) if self.incoming_edges(piper) and not forced: self.log.error('%s piper %s has down-stream pipers (use forced =True to override)' % \ (repr(self), piper)) raise DaggerError('%s piper %s has down-stream pipers (use forced =True to override)' % \ (repr(self), piper)) self.del_node(piper) self.log.debug('%s deleted piper %s' % (repr(self), piper))
824,146
Adds a sequence of ``Pipers`` instances to the ``Dagger`` in the specified order. Takes optional arguments for ``Dagger.add_piper``. Arguments: - pipers(sequence of valid ``add_piper`` arguments) Sequence of ``Pipers`` or valid ``Dagger.add_piper`` arguments to be added to the ``Dagger`` in the left to right order.
def add_pipers(self, pipers, *args, **kwargs): for piper in pipers: self.add_piper(piper, *args, **kwargs)
824,149
Deletes a sequence of ``Pipers`` instances from the ``Dagger`` in the reverse of the specified order. Takes optional arguments for ``Dagger.del_piper``. Arguments: - pipers (sequence of valid ``del_piper`` arguments) Sequence of ``Pipers`` or valid ``Dagger.del_piper`` arguments to be removed from the ``Dagger`` in the right to left order.
def del_pipers(self, pipers, *args, **kwargs): pipers.reverse() for piper in pipers: self.del_piper(piper, *args, **kwargs)
824,150
Adds a sequence of pipes to the ``Dagger`` in the specified order. Takes optional arguments for ``Dagger.add_pipe``. Arguments: - pipes(sequence of valid ``add_pipe`` arguments) Sequence of pipes or other valid ``Dagger.add_pipe`` arguments to be added to the ``Dagger`` in the left to right order.
def add_pipes(self, pipes, *args, **kwargs): for pipe in pipes: self.add_pipe(pipe, *args, **kwargs)
824,151
Deletes a sequence of pipes from the ``Dagger`` in the specified order. Takes optional arguments for ``Dagger.del_pipe``. Arguments: - pipes(sequence of valid ``del_pipe`` arguments) Sequence of pipes or other valid ``Dagger.del_pipe`` arguments to be removed from the ``Dagger`` in the left to right order.
def del_pipes(self, pipes, *args, **kwargs): for pipe in pipes: self.del_pipe(pipe * args, **kwargs)
824,152
Saves pipeline as a Python source code file. Arguments: - filename(``path``) Path to save the pipeline source code.
def save(self, filename): handle = open(filename, 'wb') handle.write(P_LAY % self._code()) handle.close()
824,157
Instanciates (loads) pipeline from a source code file. Arguments: - filename(``path``) location of the pipeline source code.
def load(self, filename): dir_name = os.path.dirname(filename) mod_name = os.path.basename(filename).split('.')[0] self.filename = mod_name sys.path.insert(0, dir_name) mod = __import__(mod_name) sys.path.remove(dir_name) # do not pollute the path. pipers, xtras, pipes = mod.pipeline() self.add_pipers(pipers, xtras) self.add_pipes(pipes)
824,158
Waits (blocks) until a running pipeline finishes. Arguments: - timeout(``int``) [default: ``None``] Specifies the timeout, ``RuntimeError`` will be raised. The default is to wait indefinetely for the pipeline to finish.
def wait(self, timeout=None): if self._started.isSet() and \ self._running.isSet() and \ not self._pausing.isSet(): self._finished.wait(timeout) else: raise PlumberError
824,161
Connects the ``Piper`` instance to its upstream ``Pipers`` that should be given as a sequence. This connects this ``Piper.inbox`` with the upstream ``Piper.outbox`` respecting any "consume", "spawn" and "produce" arguments. Arguments: - inbox(sequence) sequence of ``Piper`` instances.
def connect(self, inbox): if self.started: self.log.error('Piper %s is started and cannot connect to %s.' % \ (self, inbox)) raise PiperError('Piper %s is started and cannot connect to %s.' % \ (self, inbox)) elif self.connected: self.log.error('Piper %s is connected and cannot connect to %s.' % \ (self, inbox)) raise PiperError('Piper %s is connected and cannot connect to %s.' % \ (self, inbox)) elif hasattr(self.imap, '_started') and self.imap._started.isSet(): self.log.error('Piper %s cannot connect (NuMap is started).' % \ self) raise PiperError('Piper %s cannot connect (NuMap is started).' % \ self) else: # not started and not connected and NuMap not started self.log.debug('Piper %s connects to %s' % (self, inbox)) # determine the stride with which result will be consumed from the # input. stride = self.imap.stride if hasattr(self.imap, 'stride') else 1 # Tee input iterators. The idea is to create a promise object for a # tee. The actual teed iterator will be created on start. Each tee # is protected with a seperate lock the reasons for this are: # - tee objects are as a collection not thread safe # - tee objects might be next'ed from different threads, a single # lock will not guarantee that a thread might be allowed to finish # it's stride. (How it works that a thread releases the next # thread only if it finished a stride teed = [] for piper in inbox: if hasattr(piper, '_iter'): # isinstance Piper? piper.tee_num += 1 tee_lock = Lock() tee_lock.acquire() piper.tee_locks.append(tee_lock) piper = _TeePiper(piper, piper.tee_num - 1, stride) teed.append(_InputIterator(piper, self)) # set how much to consume from input iterators. self.inbox = _Zip(*teed) if self.consume == 1 else\ _Consume(_Zip(*teed), n=self.consume, stride=stride) # set how much to for i in xrange(self.spawn): self.imap_tasks.append(\ self.imap(self.worker, self.inbox) \ if self.imap is imap else \ self.imap(self.worker, self.inbox, timeout=self.timeout, \ track=self.track)) # chain the results together. outbox = _Chain(self.imap_tasks, stride=stride) # Make output #prd = ProduceFromSequence if self.produce_from_sequence else Produce if self.produce == 1: self.outbox = outbox elif self.repeat: self.outbox = _Repeat(outbox, n=self.produce, stride=stride) else: self.outbox = _Produce(outbox, n=self.produce, stride=stride) self.connected = True return self
824,166
Disconnects the ``Piper`` instance from its upstream ``Pipers`` or input data if the ``Piper`` is the input node of a pipeline. Arguments: - forced(``bool``) [default: ``False``] If ``True`` the ``Piper`` will try to forcefully remove all tasks (including the spawned ones) from the ``NuMap`` instance.
def disconnect(self, forced=False): if not self.connected: self.log.error('Piper %s is not connected and cannot be disconnected' % self) raise PiperError('Piper %s is not connected and cannot be disconnected' % self) elif self.started: self.log.error('Piper %s is started and cannot be disconnected (stop first)' % self) raise PiperError('Piper %s is started and cannot be disconnected (stop first)' % self) elif hasattr(self.imap, '_started') and self.imap._started.isSet(): self.log.error('Piper %s cannot disconnect as its NuMap is started' % self) raise PiperError('Piper %s cannot disconnect as its NuMap is started' % self) else: # connected and not started if hasattr(self.imap, '_started'): if self.imap._tasks == []: # fully stopped pass elif self.imap_tasks[-1].task == len(self.imap._tasks) - 1: # the last task of this piper is the last task in the NuMap self.imap.pop_task(number=self.spawn) elif forced: # removes all tasks from the NuMap can be called multiple # times. self.imap.pop_task(number=True) else: msg = 'Piper %s is not the last Piper added to the NuMap' % \ self self.log.error(msg) raise PiperError(msg) self.log.debug('Piper %s disconnected from %s' % (self, self.inbox)) self.imap_tasks = [] self.inbox = None self.outbox = None self.connected = False
824,168
Removes a previously added **task** from the ``NuMap`` instance. Arguments: - number (``int`` or ``True``) A positive integer specifying the number of **tasks** to pop. If number is set ``True`` all **tasks** will be popped.
def pop_task(self, number): if not self._started.isSet(): if number is True: self._tasks = [] self._tasks_tracked = {} elif number > 0: last_task_id = len(self._tasks) - 1 for i in xrange(number): self._tasks.pop() self._tasks_tracked.pop(last_task_id - i, None) else: log.error('%s cannot delete tasks (is started).' % self) raise RuntimeError('%s cannot delete tasks (is started).' % self)
824,315
Write language-specific script template to file. Arguments: - fn(``string``) path to save the template to - lang('python', 'bash') which programming language
def write_template(fn, lang="python"): with open(fn, "wb") as fh: if lang == "python": fh.write(PY_TEMPLATE) elif lang == "bash": fh.write(SH_TEMPLATE)
824,354
Execute arbitrary scripts. Arguments: - cfg(``dict``) script configuartion dictionary
def script(inbox, cfg): script_name = cfg["id"] script_id = str(abs(hash((cfg["id"],) + tuple(inbox[0].values()))))[0:8] # LOG.log(mp.DEFAULT, "@papy;script %s:%s started" % (script_name, script_id)) # LOG.log(mp.SUBDEFAULT, "@papy;%s:%s received: %s" % (script_name, script_id, inbox)) args = {} args["params"] = dict(cfg["params"]) args["in"] = {} for in_port in cfg["in"]: for inin_ports in inbox: in_path = inin_ports.get(in_port, None) if (in_path is not None): # first matching input-output (including type) port is linked remaining ignored args["in"][in_port] = in_path break # check that all input ports are connected if len(args["in"]) < len(cfg["in"]): raise Exception("not all in_ports connected, got: %s" % (args["in"],)) # create output file for out_ports args["out"] = {} out = {} for i, (out_port, out_ext) in enumerate(cfg["out"]): if cfg["in"] == tuple(out_port_ for out_port_, _ in cfg["out"]): pfx = args["in"][cfg["in"][i]].split("/")[-1].split(".")[0] + "_" base = cfg["id"] else: pfx = args["in"][cfg["in"][0]].split("/")[-1].split(".")[0] + "_" base = cfg["id"] + "-" + out_port if out_ext: out_path = cfg["dir"] + "/" + pfx + base + "." + out_ext else: out_path = cfg["dir"] + "/" + pfx + base args["out"][out_port] = out_path out[out_port] = out_path # evaluate and check for errors ret = _eval_script(cfg["evaluator"], cfg["preamble"], cfg["dir"], cfg["executable"], cfg["script"], args) if ret[0] != 0: # LOG.error("@papy;%s:%s %s:%s:%s" % (script_name, script_id, ret[0], # ret[1].replace("\n", "<br>"), # ret[2].replace("\n", "<br>"))) raise Exception(ret[0], cfg["script"], ret[1], ret[2]) #LOG.log(mp.SUBDEFAULT, "@papy;%s:%s produced:%s" % (script_name, script_id, out)) #LOG.log(mp.DEFAULT, "@papy;script %s:%s finished" % (script_name, script_id)) return out
824,355
G square test for a binary data. Args: dm: the data matrix to be used (as a numpy.ndarray). x: the first node (as an integer). y: the second node (as an integer). s: the set of neibouring nodes of x and y (as a set()). Returns: p_val: the p-value of conditional independence.
def g_square_bin(dm, x, y, s): def _calculate_tlog(x, y, s, dof, dm): nijk = np.zeros((2, 2, dof)) s_size = len(s) z = [] for z_index in range(s_size): z.append(s.pop()) pass for row_index in range(0, dm.shape[0]): i = dm[row_index, x] j = dm[row_index, y] k = [] k_index = 0 for z_index in range(s_size): k_index += dm[row_index, z[z_index]] * int(pow(2, z_index)) pass nijk[i, j, k_index] += 1 pass nik = np.ndarray((2, dof)) njk = np.ndarray((2, dof)) for k_index in range(dof): nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1) njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0) pass nk = njk.sum(axis = 0) tlog = np.zeros((2, 2 , dof)) tlog.fill(np.nan) for k in range(dof): tx = np.array([nik[:,k]]).T ty = np.array([njk[:,k]]) tdijk = tx.dot(ty) tlog[:,:,k] = nijk[:,:,k] * nk[k] / tdijk pass return (nijk, tlog) _logger.debug('Edge %d -- %d with subset: %s' % (x, y, s)) row_size = dm.shape[0] s_size = len(s) dof = int(pow(2, s_size)) row_size_required = 10 * dof if row_size < row_size_required: _logger.warning('Not enough samples. %s is too small. Need %s.' % (str(row_size), str(row_size_required))) return 1 nijk = None if s_size < 6: if s_size == 0: nijk = np.zeros((2, 2)) for row_index in range(0, dm.shape[0]): i = dm[row_index, x] j = dm[row_index, y] nijk[i, j] += 1 pass tx = np.array([nijk.sum(axis = 1)]).T ty = np.array([nijk.sum(axis = 0)]) tdij = tx.dot(ty) tlog = nijk * row_size / tdij pass if s_size > 0: nijk, tlog = _calculate_tlog(x, y, s, dof, dm) pass pass else: # s_size >= 6 nijk = np.zeros((2, 2, 1)) i = dm[0, x] j = dm[0, y] k = [] for z in s: k.append(dm[:,z]) pass k = np.array(k).T parents_count = 1 parents_val = np.array([k[0,:]]) nijk[i, j, parents_count - 1] = 1 for it_sample in range(1, row_size): is_new = True i = dm[it_sample, x] j = dm[it_sample, y] tcomp = parents_val[:parents_count,:] == k[it_sample,:] for it_parents in range(parents_count): if np.all(tcomp[it_parents,:]): nijk[i, j, it_parents] += 1 is_new = False break pass if is_new is True: parents_count += 1 parents_val = np.r_[parents_val, [k[it_sample,:]]] nnijk = np.zeros((2,2,parents_count)) for p in range(parents_count - 1): nnijk[:,:,p] = nijk[:,:,p] nnijk[i, j, parents_count - 1] = 1 nijk = nnijk pass pass nik = np.ndarray((2, parents_count)) njk = np.ndarray((2, parents_count)) for k_index in range(parents_count): nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1) njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0) pass nk = njk.sum(axis = 0) tlog = np.zeros((2, 2 , parents_count)) tlog.fill(np.nan) for k in range(parents_count): tX = np.array([nik[:,k]]).T tY = np.array([njk[:,k]]) tdijk = tX.dot(tY) tlog[:,:,k] = nijk[:,:,k] * nk[k] / tdijk pass pass log_tlog = np.log(tlog) G2 = np.nansum(2 * nijk * log_tlog) # _logger.debug('dof = %d' % dof) # _logger.debug('nijk = %s' % nijk) # _logger.debug('tlog = %s' % tlog) # _logger.debug('log(tlog) = %s' % log_tlog) _logger.debug('G2 = %f' % G2) p_val = chi2.sf(G2, dof) _logger.info('p_val = %s' % str(p_val)) return p_val
824,363
Removes a **node object** from the ``DictGraph``. Returns ``True`` if a **node object** has been removed. If the **node object** is not in the ``DictGraph`` raises a ``KeyError``. Arguments: - node(``object``) **node object** to be removed. Any hashable Python ``object``.
def del_node(self, node): for node_ in self.values(): if node in node_: node_.pop(node) return bool(self.pop(node))
824,723
Adds an edge to the ``DictGraph``. An edge is just a pair of **node objects**. If the **node objects** are not in the graph they are created. Arguments: - edge(iterable) An ordered pair of **node objects**. The edge is assumed to have a direction from the first to the second **node object**. - double(``bool``) [default: ``False```] If ``True`` the the reverse edge is also added.
def add_edge(self, edge, double=False): (left_entity, right_entity) = edge self.add_node(left_entity) self.add_node(right_entity) self[left_entity].update({right_entity:self[right_entity]}) if double: self.add_edge((edge[1], edge[0]))
824,724
Removes an edge from the ``DictGraph``. An edge is a pair of **node objects**. The **node objects** are not removed from the ``DictGraph``. Arguments: - edge(``tuple``) An ordered pair of **node objects**. The edge is assumed to have a direction from the first to the second **node object**. - double(``bool``) [default: ``False```] If ``True`` the the reverse edge is also removed.
def del_edge(self, edge, double=False): (left_entity, right_entity) = edge self[left_entity].pop(right_entity) if double: self.del_edge((edge[1], edge[0]))
824,725
Adds **node objects** to the graph. Arguments: - nodes(iterable) Sequence of **node objects** to be added to the ``DictGraph`` - xtras(iterable) [default: ``None``] Sequence of ``Node.xtra`` dictionaries corresponding to the **node objects** being added. See: ``Graph.add_node``.
def add_nodes(self, nodes, xtras=None): for node, xtra in izip(nodes, (xtras or repeat(None))): self.add_node(node, xtra)
824,726
Adds edges to the graph. Takes optional arguments for ``DictGraph.add_edge``. Arguments: - edges(iterable) Sequence of edges to be added to the ``DictGraph``.
def add_edges(self, edges, *args, **kwargs): for edge in edges: self.add_edge(edge, *args, **kwargs)
824,727
Removes edges from the graph. Takes optional arguments for ``DictGraph.del_edge``. Arguments: - edges(iterable) Sequence of edges to be removed from the ``DictGraph``.
def del_edges(self, edges, *args, **kwargs): for edge in edges: self.del_edge(edge, *args, **kwargs)
824,728
Returns a ``tuple`` of all edges in the ``DictGraph`` an edge is a pair of **node objects**. Arguments: - nodes(iterable) [default: ``None``] iterable of **node objects** if specified the edges will be limited to those outgoing from one of the specified nodes.
def edges(self, nodes=None): # If a Node has been directly updated (__not__ recommended) # then the Graph will not know the added nodes and therefore will # miss half of their edges. edges = set() for node in (nodes or self.iterkeys()): ends = self[node].nodes() edges.update([(node, end) for end in ends]) return tuple(edges)
824,729
Returns a ``tuple`` of incoming edges for a **node object**. Arguments: - node(``object``) **node object** present in the graph to be queried for incoming edges.
def incoming_edges(self, node): edges = self.edges() in_edges = [] for out_node, in_node in edges: if node is in_node: in_edges.append((out_node, in_node)) return tuple(in_edges)
824,730
Returns a ``tuple`` of outgoing edges for a **node object**. Arguments: - node(``object``) **node object** present in the graph to be queried for outgoing edges.
def outgoing_edges(self, node): #TODO: pls make outgoig_edges less insane edges = self.edges() out_edges = [] for out_node, in_node in edges: if node is out_node: out_edges.append((out_node, in_node)) return tuple(out_edges)
824,731
Passes inputs with indecies in s. By default passes the whole inbox. Arguments: - s(sequence) [default: ``None``] The default translates to a range for all inputs of the "inbox" i.e. ``range(len(inbox))``
def spasser(inbox, s=None): seq = (s or range(len(inbox))) return [input_ for i, input_ in enumerate(inbox) if i in seq]
824,782
Zips inputs from inbox with indicies in "s". By default zips the whole inbox (all indices). Arguments: - s(sequence) [default: ``None``]
def szipper(inbox, s=None): #TODO: test szipper return zip(*[input_ for i, input_ in enumerate(inbox) if i in s])
824,783
String joins input with indices in s. Arguments: - s(sequence) [default: ``None``] ``tuple`` or ``list`` of indices of the elements which will be joined. - join(``str``) [default: ``""``] String which will join the elements of the inbox i.e. ``join.join()``.
def sjoiner(inbox, s=None, join=""): return join.join([input_ for i, input_ in enumerate(inbox) if i in s])
824,784
Creates a string generator from a stream (file handle) containing data delimited by the delimiter strings. This is a stand-alone function and should be used to feed external data into a pipeline. Arguments: - hande(``file``) A file handle open for reading. - delimiter(``str``) [default: ``None``] The default means that items will be separated by two new-line characters i.e.: ``"\\n\\n"``.
def load_stream(handle, delimiter=None): delimiter = (delimiter or "") + "\n" while True: item = [] while True: line = handle.readline() if line == "": raise StopIteration elif line == delimiter: if item: break elif line != '\n': item.append(line) yield "".join(item)
824,786
Creates a line generator from a stream (file handle) containing data in lines. Arguments: - follow(``bool``) [default: ``False``] If ``True`` follows the file after it finishes like 'tail -f'. - wait(``float``) [default: ``0.1``] time to wait in seconds between file polls.
def make_lines(handle, follow=False, wait=0.1): while True: line = handle.readline() if line: yield line elif follow: time.sleep(wait) else: raise StopIteration
824,789
Read links and associated categories for specified articles in text file seperated by a space Args: corpus_dir (str): The directory to save the generated corpus datastore_type (Optional[str]): Format to save generated corpus. Specify either 'file' or 'sqlite'. db_name (Optional[str]): Name of database if 'sqlite' is selected.
def __init__(self,corpus_dir,datastore_type='file',db_name='corpus.db'): self.g = Goose({'browser_user_agent': 'Mozilla','parser_class':'soup'}) #self.g = Goose({'browser_user_agent': 'Mozilla'}) self.corpus_dir = corpus_dir self.datastore_type = datastore_type self.db_name = db_name self.stats = defaultdict(int) self._create_corpus_dir(self.corpus_dir) self.db = None if self.datastore_type == 'sqlite': self.db = self.corpus_dir + '/' + self.db_name self._set_up_db(self.db)
824,886
Read links and associated categories for specified articles in text file seperated by a space Args: file_path (str): The path to text file with news article links and category Returns: articles: Array of tuples that contains article link & cateogory ex. [('IPO','www.cs.columbia.edu')]
def read_links_file(self,file_path): articles = [] with open(file_path) as f: for line in f: line = line.strip() #Ignore blank lines if len(line) != 0: link,category = line.split(' ') articles.append((category.rstrip(),link.strip())) return articles
824,888
Setup integration Register plug-ins and integrate into the host Arguments: console (bool): DEPRECATED port (int, optional): DEPRECATED
def setup(console=False, port=None): if self._has_been_setup: teardown() register_plugins() register_host() self._has_been_setup = True print("pyblish: Pyblish loaded successfully.")
825,969
Create new sketch Params: <int> width <str> path <int> flags <int> seed
def create(self, width=0, depth=0, path=None, flags=0, seed=0): return self.create_method(self, width, depth, path, flags, seed)
825,990
Add key-value Params: <str> key <int> key_length Return: <int> key_value
def get(self, key, key_length=0): if key_length < 1: key_length = len(key) return self.get_method(self, key, key_length)
825,992
Set value to key-value Params: <str> key <int> value <int> key_length Return: <int> key_value
def set(self, key, value, key_length=0): if key_length < 1: key_length = len(key) if self.k: self._update(key, value) return self.set_method(self, key, key_length, value)
825,993
Add value to key-value Params: <str> key <int> value <int> key_length Return: <int> key_value
def add(self, key, value, key_length=0): if key_length < 1: key_length = len(key) val = self.add_method(self, key, key_length, value) if self.k: self._update(key, value) return val
825,994
Add value to key-value Params: <str> key <int> value <int> key_length Return: <int> key_value
def inc(self, key, key_length=0): if key_length < 1: key_length = len(key) val = self.add_method(self, key, key_length, 1) if self.k: self._update(key, val) return val
825,995
Shrink sketch Params: <Sketch> src_sketch <int> width <str> path <int> flags
def shrink(self, src, width=0, path=None, flags=0): self.shrink_method(self, src, width, path, flags)
825,997
Merge two sketches Params: <Sketch> sketch <lambda> | <function> lhs_filter <lambda> | <function> rhs_filter
def merge(self, rhs, lhs_filter=None, rhs_filter=None): if lhs_filter or rhs_filter: get_ = self.get___method set_ = self.set___method max_value = _madoka.Sketch_max_value(self) for table_id in range(self.depth): for cell_id in range(self.width): lhs_val = get_(self, table_id, cell_id) rhs_val = get_(rhs, table_id, cell_id) if lhs_filter: lhs_val = lhs_filter(lhs_val) if rhs_filter: rhs_val = rhs_filter(rhs_val) if (lhs_val >= max_value) or (rhs_val >= (max_value - lhs_val)): lhs_val = self.max_value else: lhs_val += rhs_val set_(self, table_id, cell_id, lhs_val) else: self.merge_method(self, rhs) if rhs.k: for (key, val) in rhs.most_common(rhs.k): self._update(key, val)
825,998
Set values from dict Params: <dict <str> <int>> src_dict
def fromdict(self, src_dict, method='set'): if method == 'set': _method = self.set_method else: _method = self.add_method if hasattr(src_dict, 'iteritems'): for (key, val) in src_dict.iteritems(): _method(self, key, len(key), val) if self.k: self._update(key, val) else: for (key, val) in src_dict.items(): _method(self, key, len(key), val) if self.k: self._update(key, val)
826,001
Create new sketch Params: <int> width <int> max_value <str> path <int> flags <int> seed
def create(self, width=0, max_value=0, path=None, flags=0, seed=0): return _madoka.Sketch_create(self, width, max_value, path, flags, seed)
826,005
Increment key-value Params: <str> key <int> key_length Return: <int> key_value
def inc(self, key, key_length=0): if key_length < 1: key_length = len(key) return _madoka.Sketch_inc(self, key, key_length)
826,006
Shrink sketch Params: <Sketch> src_sketch <int> width <int> max_value <lambda> | <function> filter <str> path <int> flags
def shrink(self, src, width=0, max_value=0, filter_method=None, path=None, flags=0): if filter_method: get_ = _madoka.Sketch_get__ set_ = _madoka.Sketch_set__ new_sketch = Sketch(width, max_value, path, flags, src.seed) for table_id in range(SKETCH_DEPTH): for offset in range(width, src.width, width): for cell_id in range(width): val = get_(src, table_id, offset + cell_id) val = filter_method(val) val = max_value if val > max_value else val if val > get_(new_sketch, table_id, cell_id): set_(new_sketch, table_id, cell_id, val) self.swap(new_sketch) else: _madoka.Sketch_shrink(self, src, width, max_value, None, path, flags)
826,007
The initializer sets up stuff to do the work Args: dict of args Returns: kwarg[Profile]: asdasdf Raises: SystemError if thing are not all good
def __init__(self, **kwargs): try: self.nap_time = int(os.environ.get('CSU_POLL_INTERVAL', 30)) except Exception: self.nap_time = 15 self._stack_name = kwargs.get('Stack') self._verbose = kwargs.get('Verbose', False) if not self._stack_name: logging.error('no stack name given, exiting') raise SystemError if not self._init_boto3_clients(kwargs.get('Profile'), kwargs.get('Region')): logging.error('client initialization failed, exiting') raise SystemError
826,065
The utililty requires boto3 clients to CloudFormation. Args: None Returns: Good or Bad; True or False
def _init_boto3_clients(self, profile, region): try: session = None if profile and region: session = boto3.session.Session(profile_name=profile, region_name=region) elif profile: session = boto3.session.Session(profile_name=profile) elif region: session = boto3.session.Session(region_name=region) else: session = boto3.session.Session() self._cloud_formation = session.client('cloudformation') return True except Exception as wtf: logging.error(wtf, exc_info=True) return False
826,066
Determine the drift of the stack. Args: None Returns: Good or Bad; True or False
def determine_drift(self): try: response = self._cloud_formation.detect_stack_drift(StackName=self._stack_name) drift_request_id = response.get('StackDriftDetectionId', None) if drift_request_id: logging.info('drift_request_id: %s - polling', drift_request_id) drift_calc_done = False while not drift_calc_done: time.sleep(self.nap_time) response = self._cloud_formation.describe_stack_drift_detection_status( StackDriftDetectionId=drift_request_id ) current_state = response.get('DetectionStatus', None) logging.info( 'describe_stack_drift_detection_status(): {}'.format(current_state) ) drift_calc_done = current_state in CALC_DONE_STATES drift_answer = response.get('StackDriftStatus', 'UNKNOWN') logging.info('drift of {}: {}'.format( self._stack_name, drift_answer )) if drift_answer == 'DRIFTED': if self._verbose: self._print_drift_report() return False else: return True else: logging.warning('drift_request_id is None') return False except Exception as wtf: logging.error(wtf, exc_info=True) return False
826,067
Report the drift of the stack. Args: None Returns: Good or Bad; True or False Note: not yet implemented
def _print_drift_report(self): try: response = self._cloud_formation.describe_stack_resources(StackName=self._stack_name) rows = [] for resource in response.get('StackResources', []): row = [] row.append(resource.get('LogicalResourceId', 'unknown')) row.append(resource.get('PhysicalResourceId', 'unknown')) row.append(resource.get('ResourceStatus', 'unknown')) row.append(resource.get('DriftInformation', {}).get('StackResourceDriftStatus', 'unknown')) rows.append(row) print('Drift Report:') print(tabulate(rows, headers=[ 'Logical ID', 'Physical ID', 'Resource Status', 'Drift Info' ])) except Exception as wtf: logging.error(wtf, exc_info=True) return False return True
826,068
Parse a file-like object or string. Args: file_or_string (file, str): File-like object or string. Returns: ParseResults: instance of pyparsing parse results.
def parse(file_or_string): from mysqlparse.grammar.sql_file import sql_file_syntax if hasattr(file_or_string, 'read') and hasattr(file_or_string.read, '__call__'): return sql_file_syntax.parseString(file_or_string.read()) elif isinstance(file_or_string, six.string_types): return sql_file_syntax.parseString(file_or_string) else: raise TypeError("Expected file-like or string object, but got '{type_name}' instead.".format( type_name=type(file_or_string).__name__, ))
826,073
Return True if OS name in /etc/lsb-release of host given by fabric param `-H` is the same as given by argument, False else. If arg version_id is not None only return True if it is the same as in /etc/lsb-release, too. Args: name: 'Debian GNU/Linux', 'Ubuntu' version_id(None or str): None, '14.04', (Ubuntu) '16.04', (Ubuntu) '8', (Debian)
def is_os(name, version_id=None): result = False os_release_infos = _fetch_os_release_infos() if name == os_release_infos.get('name', None): if version_id is None: result = True elif version_id == os_release_infos.get('version_id', None): result = True return result
826,209
Determine latest stable python versions and return them as a list of str. Args: minors([<str>,..]): List of python minor versions as str, eg. ['2.6', '2.7', '3.3', '3.4', '3.5', '3.6'] Return example: ['2.6.9', '2.7.14', '3.3.7', '3.4.8', '3.5.5', '3.6.4']
def determine_latest_pythons(minors): # eg: ['2.6.9', '2.7.14', '3.3.7', '3.4.8', '3.5.5', '3.6.4'] latests = [] versions_str = fabric.api.local(flo( 'pyenv install --list | tr -d [:blank:] | ' 'grep -P "^[\d\.]+$"'), capture=True) versions = versions_str.split() for minor in minors: candidates = [version for version in versions if version.startswith(minor)] # sort version numbers: https://stackoverflow.com/a/2574090 candidates.sort(key=lambda s: [int(u) for u in s.split('.')]) latest = candidates[-1] latests.append(latest) print(latests) return latests
826,212
Install or update Janus, a distribution of addons and mappings for vim. More info: https://github.com/carlhuda/janus Customization: https://github.com/carlhuda/janus/wiki/Customization Args: uninstall: If not None, Uninstall janus and restore old vim config
def vim_janus(uninstall=None): if uninstall is not None: uninstall_janus() else: if not exists('~/.vim/janus'): print_msg('not installed => install') install_janus() else: print_msg('already installed => update') update_janus() customize_janus() show_files_used_by_vim_and_janus()
826,214
Helper function to facilitate upsert. Args: ini_date - the dictionary of info to run upsert Exit: 0 - good 1 - bad
def start_upsert(ini_data): stack_driver = CloudStackUtility(ini_data) poll_stack = not ini_data.get('no_poll', False) if stack_driver.upsert(): logging.info('stack create/update was started successfully.') if poll_stack: stack_tool = None try: profile = ini_data.get('environment', {}).get('profile') if profile: boto3_session = boto3.session.Session(profile_name=profile) else: boto3_session = boto3.session.Session() region = ini_data['environment']['region'] stack_name = ini_data['environment']['stack_name'] cf_client = stack_driver.get_cloud_formation_client() if not cf_client: cf_client = boto3_session.client('cloudformation', region_name=region) stack_tool = stack_tool = StackTool( stack_name, region, cf_client ) except Exception as wtf: logging.warning('there was a problems creating stack tool: {}'.format(wtf)) if stack_driver.poll_stack(): try: logging.info('stack create/update was finished successfully.') stack_tool.print_stack_info() except Exception as wtf: logging.warning('there was a problems printing stack info: {}'.format(wtf)) sys.exit(0) else: try: logging.error('stack create/update was did not go well.') stack_tool.print_stack_events() except Exception as wtf: logging.warning('there was a problems printing stack events: {}'.format(wtf)) sys.exit(1) else: logging.error('start of stack create/update did not go well.') sys.exit(1)
826,227
Read the INI file Args: ini_file - path to the file Returns: A dictionary of stuff from the INI file Exits: 1 - if problems are encountered
def read_config_info(ini_file): try: config = RawConfigParser() config.optionxform = lambda option: option config.read(ini_file) the_stuff = {} for section in config.sections(): the_stuff[section] = {} for option in config.options(section): the_stuff[section][option] = config.get(section, option) return the_stuff except Exception as wtf: logging.error('Exception caught in read_config_info(): {}'.format(wtf)) traceback.print_exc(file=sys.stdout) return sys.exit(1)
826,228
StackTool is a simple tool to print some specific data about a CloudFormation stack. Args: stack_name - name of the stack of interest region - AWS region where the stack was created Returns: not a damn thing Raises: SystemError - if everything isn't just right
def __init__(self, stack_name, region, cf_client): try: self._stack_name = stack_name self._region = region self._cf_client = cf_client except Exception: raise SystemError
826,230
List resources from the given stack Args: None Returns: A dictionary filled resources or None if things went sideways
def print_stack_info(self): try: rest_api_id = None deployment_found = False response = self._cf_client.describe_stack_resources( StackName=self._stack_name ) print('\nThe following resources were created:') rows = [] for resource in response['StackResources']: if resource['ResourceType'] == 'AWS::ApiGateway::RestApi': rest_api_id = resource['PhysicalResourceId'] elif resource['ResourceType'] == 'AWS::ApiGateway::Deployment': deployment_found = True row = [] row.append(resource['ResourceType']) row.append(resource['LogicalResourceId']) row.append(resource['PhysicalResourceId']) rows.append(row) print(tabulate(rows, headers=['Resource Type', 'Logical ID', 'Physical ID'])) if rest_api_id and deployment_found: url = 'https://{}.execute-api.{}.amazonaws.com/{}'.format( rest_api_id, self._region, '<stage>' ) print('\nThe deployed service can be found at this URL:') print('\t{}\n'.format(url)) return response except Exception as wtf: print(wtf) return None
826,231
List events from the given stack Args: None Returns: None
def print_stack_events(self): first_token = '7be7981bd6287dd8112305e8f3822a6f' keep_going = True next_token = first_token current_request_token = None rows = [] try: while keep_going and next_token: if next_token == first_token: response = self._cf_client.describe_stack_events( StackName=self._stack_name ) else: response = self._cf_client.describe_stack_events( StackName=self._stack_name, NextToken=next_token ) next_token = response.get('NextToken', None) for event in response['StackEvents']: row = [] event_time = event.get('Timestamp') request_token = event.get('ClientRequestToken', 'unknown') if current_request_token is None: current_request_token = request_token elif current_request_token != request_token: keep_going = False break row.append(event_time.strftime('%x %X')) row.append(event.get('LogicalResourceId')) row.append(event.get('ResourceStatus')) row.append(event.get('ResourceStatusReason', '')) rows.append(row) if len(rows) > 0: print('\nEvents for the current upsert:') print(tabulate(rows, headers=['Time', 'Logical ID', 'Status', 'Message'])) return True else: print('\nNo stack events found\n') except Exception as wtf: print(wtf) return False
826,232
write data to cache file parameters: cache_path - path to cache file content - a data structure to save into cache file
def write(content, filename='cache'): cache_path = get_cache_path(filename) with open(cache_path, 'w') as file: if content is not None: json.dump(content, file, indent=3, sort_keys=True)
826,530
Recursively iterate through 'package_module' and add every fabric task to the 'addon_module' keeping the task hierarchy. Args: addon_module(types.ModuleType) package_module(types.ModuleType) package_name(str): Required, to avoid redundant addition of tasks Return: None
def add_tasks_r(addon_module, package_module, package_name): module_dict = package_module.__dict__ for attr_name, attr_val in module_dict.items(): if isinstance(attr_val, fabric.tasks.WrappedCallableTask): addon_module.__dict__[attr_name] = attr_val elif attr_name != package_name \ and isinstance(attr_val, types.ModuleType) \ and attr_val.__name__.startswith('fabsetup_') \ and attr_name.split('.')[-1] != package_name: submodule_name = flo('{addon_module.__name__}.{attr_name}') submodule = get_or_create_module_r(submodule_name) package_module = attr_val add_tasks_r(submodule, package_module, package_name) addon_module.__dict__[attr_name] = submodule
826,546
Load an fabsetup addon given by 'package_name' and hook it in the base task namespace 'username'. Args: username(str) package_name(str) _globals(dict): the globals() namespace of the fabric script. Return: None
def load_addon(username, package_name, _globals): addon_module = get_or_create_module_r(username) package_module = __import__(package_name) add_tasks_r(addon_module, package_module, package_name) _globals.update({username: addon_module}) del package_module del addon_module
826,547