code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def launch_monitor_server(host, port, logger): logger.info('Starting monitor server on host %s port %d' % (host, port)) server = ThreadedTCPServer((host, port), MonitorHandler) server.serve_forever()
Launch a monitor server :param port: the monitor port :param logger: the logger
def create_handler(cls, message_handler, buffer_size, logger): cls.BUFFER_SIZE = buffer_size cls.message_handler = message_handler cls.logger = logger cls.message_handler.logger = logging.getLogger(message_handler.__class__.__name__) cls.message_handler.logger.setLevel(logger.level) return cls
Class variables used here since the framework creates an instance for each connection :param message_handler: the MessageHandler used to process each message. :param buffer_size: the TCP buffer size. :param logger: the global logger. :return: this class.
def handle(self): logger = StreamHandler.logger logger.debug("handling requests with message handler %s " % StreamHandler.message_handler.__class__.__name__) message_handler = StreamHandler.message_handler try: while True: logger.debug('waiting for more data') if not message_handler.handle(self.request, StreamHandler.BUFFER_SIZE): break logger.warning("connection closed from %s" % (self.client_address[0])) self.request.close() except: logger.exception("connection closed from %s" % (self.client_address[0])) finally: self.request.close()
The required handle method.
def receiveError(self, reasonCode, description): error = disconnectErrors.get(reasonCode, DisconnectError) self.connectionClosed(error(reasonCode, description)) SSHClientTransport.receiveError(self, reasonCode, description)
Called when we receive a disconnect error message from the other side.
def get_logger(name): # if name is a python file, or a path to a python file, extract the module name if name.endswith(".py"): name = name[:-3] if os.sep in name: name = name.split(os.sep)[-1] return logging.getLogger(name)
Special get_logger. Typically name is the name of the application using Balsa. :param name: name of the logger to get, which is usually the application name. Optionally it can be a python file name or path (e.g. __file__). :return: the logger for the logger name
def traceback_string(): tb_string = None exc_type, exc_value, exc_traceback = traceback.sys.exc_info() if exc_type is not None: display_lines_list = [str(exc_value)] + traceback.format_tb(exc_traceback) tb_string = "\n".join(display_lines_list) return tb_string
Helper function that formats most recent traceback. Useful when a program has an overall try/except and it wants to output the program trace to the log. :return: formatted traceback string (or None if no traceback available)
def lookup_path(bin_name): paths = ('/usr/local/sbin/', '/usr/local/bin/', '/usr/sbin/', '/usr/bin/') for p in paths: fq_path = p + bin_name found = os.path.isfile(fq_path) and os.access(fq_path, os.X_OK) if found: return fq_path return False
Calls to external binaries can't depend on $PATH
def get_message_handler(self, message_handlers): encoder = self.options.encoder try: return message_handlers[encoder] except KeyError: raise NotImplementedError('No RequestHandler defined for given encoder (%s).' % encoder)
Create a MessageHandler for the configured Encoder :param message_handlers: a dictionart of MessageHandler keyed by encoder :return: a MessageHandler
def handle(self, event): callback = getattr(self, 'on_{event}'.format(event=event.event), None) callback(event)
Entry point to handle user events. :param event: Received event. See a full list `here <https://dev.twitter.com/streaming/overview/messages-types#Events_event>`_.
def get_form(self, form_class=None): form = super(LineAlbaranUpdate, self).get_form(form_class) raise Exception("Cambiar ProductStock por ProductUnique") return form
ps = ProductStock.objects.filter(line_albaran=self.object).first() if ps: # initial field form.fields['storage'].initial = ps.batch.zone.storage form.fields['zone'].initial = ps.batch.zone form.fields['batch'].initial = ps.batch
def _emplace_pmrna(mrnas, parent, strict=False): mrnas.sort(key=lambda m: (m.cdslen, m.get_attribute('ID'))) pmrna = mrnas.pop() if strict: parent.children = [pmrna] else: parent.children = [c for c in parent.children if c not in mrnas]
Retrieve the primary mRNA and discard all others.
def _emplace_transcript(transcripts, parent): transcripts.sort(key=lambda t: (len(t), t.get_attribute('ID'))) pt = transcripts.pop() parent.children = [pt]
Retrieve the primary transcript and discard all others.
def primary_mrna(entrystream, parenttype='gene'): for entry in entrystream: if not isinstance(entry, tag.Feature): yield entry continue for parent in tag.select.features(entry, parenttype, traverse=True): mrnas = [f for f in parent.children if f.type == 'mRNA'] if len(mrnas) == 0: continue _emplace_pmrna(mrnas, parent) yield entry
Select a single mRNA as a representative for each protein-coding gene. The primary mRNA is the one with the longest translation product. In cases where multiple isoforms have the same translated length, the feature ID is used for sorting. This function **does not** return only mRNA features, it returns all GFF3 entry types (pragmas, features, sequences, etc). The function **does** modify the gene features that pass through to ensure that they have at most a single mRNA feature. >>> reader = tag.GFF3Reader(tag.pkgdata('pdom-withseq.gff3')) >>> filter = tag.transcript.primary_mrna(reader) >>> for gene in tag.select.features(filter, type='gene'): ... assert gene.num_children == 1
def _get_primary_type(ttypes, parent, logstream=stderr): if len(ttypes) > 1: if logstream: # pragma: no branch message = '[tag::transcript::primary_transcript]' message += ' WARNING: feature {:s}'.format(parent.slug) message += ' has multiple associated transcript types' message += ' {}'.format(ttypes) print(message, file=logstream) if 'mRNA' not in ttypes: message = ( 'cannot resolve multiple transcript types if "mRNA" is' ' not one of those types {}'.format(ttypes) ) raise Exception(message) ttypes = ['mRNA'] return ttypes[0]
Check for multiple transcript types and, if possible, select one.
def parse_parent(docname): lineage = docname.split('/') lineage_count = len(lineage) if docname == 'index': # This is the top of the Sphinx project parent = None elif lineage_count == 1: # This is a non-index doc in root, e.g. about parent = 'index' elif lineage_count == 2 and lineage[-1] == 'index': # This is blog/index, parent is the root parent = 'index' elif lineage_count == 2: # This is blog/about parent = lineage[0] + '/index' elif lineage[-1] == 'index': # This is blog/sub/index parent = '/'.join(lineage[:-2]) + '/index' else: # This should be blog/sub/about parent = '/'.join(lineage[:-1]) + '/index' return parent
Given a docname path, pick apart and return name of parent
def parents(self, resources): if self.docname == 'index': # The root has no parents return [] parents = [] parent = resources.get(self.parent) while parent is not None: parents.append(parent) parent = resources.get(parent.parent) return parents
Split the path in name and get parents
def acquire(self, resources, prop_name): # Instance custom_prop = getattr(self.props, prop_name, None) if custom_prop: return custom_prop # Parents...can't use acquire as have to keep going on acquireds for parent in self.parents(resources): acquireds = parent.props.acquireds if acquireds: # First try in the per-type acquireds rtype_acquireds = acquireds.get(self.rtype) if rtype_acquireds: prop_acquired = rtype_acquireds.get(prop_name) if prop_acquired: return prop_acquired # Next in the "all" section of acquireds all_acquireds = acquireds.get('all') if all_acquireds: prop_acquired = all_acquireds.get(prop_name) if prop_acquired: return prop_acquired return
Starting with self, walk until you find prop or None
def template(self, resources): template_name = self.acquire(resources, 'template') if template_name: return template_name else: # We're putting an exception for "resource", the built-in # rtype/directive. We want it to work out-of-the-box without # requiring an _templates/resource.html in the docs project. # Instead, use the page.html the ships with Sphinx. if self.rtype == 'resource': return 'page' else: return self.rtype
Get the template from: YAML, hierarchy, or class
def find_prop_item(self, prop_name, prop_key, prop_value): # Image props are a sequence of dicts. We often need one of them. # Where one of the items has a dict key matching a value, and if # nothing matches, return None prop = getattr(self.props, prop_name, None) if prop: return next( (p for p in prop if getattr(p, prop_key) == prop_value), None ) return None
Look for a list prop with an item where key == value
def default() : if Shaman._default_instance is not None : return Shaman._default_instance with open((os.path.dirname(__file__) or '.') + '/data/trained.json') as file : tset = json.loads(file.read()) Shaman._default_instance = Shaman(tset) return Shaman._default_instance
Get default shaman instance by "data/trained.json"
def detect(self, code) : keywords = KeywordFetcher.fetch( code ) probabilities = {} for keyword in keywords : if keyword not in self.trained_set['keywords'] : continue data = self.trained_set['keywords'][keyword] p_avg = sum(data.values()) / len(data) # Average probability of all languages for language, probability in data.items() : # By Naïve Bayes Classification p = probability / p_avg probabilities[ language ] = probabilities.get(language, 0) + math.log(1 + p) for pattern, data in self.trained_set['patterns'].items() : matcher = PatternMatcher(pattern) p0 = matcher.getratio(code) for language, p_avg in data.items() : if language not in probabilities : continue p = 1 - abs(p_avg - p0) probabilities[ language ] *= p # Convert `log` operated probability to percentile sum_val = 0 for language, p in probabilities.items() : sum_val += math.pow(math.e / 2, p) for language, p in probabilities.items() : probabilities[language] = math.pow(math.e / 2, p) / sum_val * 100 return sorted(probabilities.items(), key=lambda a: a[1], reverse=True)
Detect language with code
def fetch(code) : ret = {} code = KeywordFetcher._remove_strings(code) result = KeywordFetcher.prog.findall(code) for keyword in result : if len(keyword) <= 1: continue # Ignore single-length word if keyword.isdigit(): continue # Ignore number if keyword[0] == '-' or keyword[0] == '*' : keyword = keyword[1:] # Remove first char if string is starting by '-' or '*' (Pointer or Negative numbers) if keyword[-1] == '-' or keyword[-1] == '*' : keyword = keyword[0:-1] # Remove last char if string is finished by '-' or '*' if len(keyword) <= 1: continue ret[ keyword ] = ret.get(keyword, 0) + 1 # `ret[ keyword ] += 1` with initial value return ret
Fetch keywords by Code
def _remove_strings(code) : removed_string = "" is_string_now = None for i in range(0, len(code)-1) : append_this_turn = False if code[i] == "'" and (i == 0 or code[i-1] != '\\') : if is_string_now == "'" : is_string_now = None elif is_string_now == None : is_string_now = "'" append_this_turn = True elif code[i] == '"' and (i == 0 or code[i-1] != '\\') : if is_string_now == '"' : is_string_now = None elif is_string_now == None : is_string_now = '"' append_this_turn = True if is_string_now == None or append_this_turn == True : removed_string += code[i] return removed_string
Remove strings in code
def getratio(self, code) : if len(code) == 0 : return 0 code_replaced = self.prog.sub('', code) return (len(code) - len(code_replaced)) / len(code)
Get ratio of code and pattern matched
def loadXmlProperty(self, xprop): if xprop.tag == 'property': value = self.dataInterface().fromXml(xprop[0]) self._xmlData[xprop.get('name', '')] = value
Loads an XML property that is a child of the root data being loaded. :param xprop | <xml.etree.ElementTree.Element>
def toXml(self, xparent=None): if xparent is None: xml = ElementTree.Element('object') else: xml = ElementTree.SubElement(xparent, 'object') xml.set('class', self.__class__.__name__) for name, value in self._xmlData.items(): xprop = ElementTree.SubElement(xml, 'property') xprop.set('name', name) XmlDataIO.toXml(value, xprop) return xml
Converts this object to XML. :param xparent | <xml.etree.ElementTree.Element> || None :return <xml.etree.ElementTree.Element>
def fromXml(cls, xml): clsname = xml.get('class') if clsname: subcls = XmlObject.byName(clsname) if subcls is None: inst = MissingXmlObject(clsname) else: inst = subcls() else: inst = cls() inst.loadXml(xml) return inst
Restores an object from XML. :param xml | <xml.etree.ElementTree.Element> :return subclass of <XmlObject>
def fromXml(cls, elem): if elem is None: return None addon = cls.byName(elem.tag) if not addon: raise RuntimeError('{0} is not a supported XML tag'.format(elem.tag)) return addon.load(elem)
Converts the inputted element to a Python object by looking through the IO addons for the element's tag. :param elem | <xml.etree.ElementTree.Element> :return <variant>
def toXml(cls, data, xparent=None): if data is None: return None # store XmlObjects separately from base types if isinstance(data, XmlObject): name = 'object' else: name = type(data).__name__ addon = cls.byName(name) if not addon: raise RuntimeError('{0} is not a supported XML tag'.format(name)) return addon.save(data, xparent)
Converts the inputted element to a Python object by looking through the IO addons for the element's tag. :param data | <variant> xparent | <xml.etree.ElementTree.Element> || None :return <xml.etree.ElementTree.Element>
def save(self, data, xparent=None): if xparent is not None: elem = ElementTree.SubElement(xparent, 'bool') else: elem = ElementTree.Element('bool') elem.text = nstr(data) return elem
Parses the element from XML to Python. :param data | <variant> xparent | <xml.etree.ElementTree.Element> || None :return <xml.etree.ElementTree.Element>
def load(self, elem): self.testTag(elem, 'dict') out = {} for xitem in elem: key = xitem.get('key') try: value = XmlDataIO.fromXml(xitem[0]) except IndexError: value = None out[key] = value return out
Converts the inputted dict tag to Python. :param elem | <xml.etree.ElementTree> :return <dict>
def save(self, data, xparent=None): if xparent is not None: elem = ElementTree.SubElement(xparent, 'dict') else: elem = ElementTree.Element('dict') for key, value in sorted(data.items()): xitem = ElementTree.SubElement(elem, 'item') xitem.set('key', nstr(key)) XmlDataIO.toXml(value, xitem) return elem
Parses the element from XML to Python. :param data | <variant> xparent | <xml.etree.ElementTree.Element> || None :return <xml.etree.ElementTree.Element>
def load(self, elem): self.testTag(elem, 'list') out = [] for xitem in elem: out.append(XmlDataIO.fromXml(xitem)) return out
Converts the inputted list tag to Python. :param elem | <xml.etree.ElementTree> :return <list>
def save(self, data, xparent=None): if xparent is not None: elem = ElementTree.SubElement(xparent, 'list') else: elem = ElementTree.Element('list') for item in data: XmlDataIO.toXml(item, elem) return elem
Parses the element from XML to Python. :param data | <variant> xparent | <xml.etree.ElementTree.Element> || None :return <xml.etree.ElementTree.Element>
def load(self, elem): self.testTag(elem, 'set') out = set() for xitem in elem: out.add(XmlDataIO.fromXml(xitem)) return out
Converts the inputted set tag to Python. :param elem | <xml.etree.ElementTree> :return <set>
def load(self, elem): self.testTag(elem, 'str') return elem.text if elem.text is not None else ''
Converts the inputted string tag to Python. :param elem | <xml.etree.ElementTree> :return <str>
def template_substitute(text, **kwargs): for name, value in kwargs.items(): placeholder_pattern = "{%s}" % name if placeholder_pattern in text: text = text.replace(placeholder_pattern, value) return text
Replace placeholders in text by using the data mapping. Other placeholders that is not represented by data is left untouched. :param text: Text to search and replace placeholders. :param data: Data mapping/dict for placeholder key and values. :return: Potentially modified text with replaced placeholders.
def text_remove_empty_lines(text): lines = [ line.rstrip() for line in text.splitlines() if line.strip() ] return "\n".join(lines)
Whitespace normalization: - Strip empty lines - Strip trailing whitespace
def text_normalize(text): # if not isinstance(text, str): if isinstance(text, bytes): # -- MAYBE: command.ouput => bytes, encoded stream output. text = codecs.decode(text) lines = [ line.strip() for line in text.splitlines() if line.strip() ] return "\n".join(lines)
Whitespace normalization: - Strip empty lines - Strip leading whitespace in a line - Strip trailing whitespace in a line - Normalize line endings
def _wva(values, weights): assert len(values) == len(weights) and len(weights) > 0 return sum([mul(*x) for x in zip(values, weights)]) / sum(weights)
Calculates a weighted average
def mode_interactive(options): articles = set() failures = set() url = input('Enter a URL: ') while url != '': article = _get_article(url=url, bodyLines=options.bodyLines, debug=options.debug) if (article): articles.add(article) else: failures.add(url) url = input('Enter a URL (press enter to end): ') _output(articles, options.outputFile, failures, options.failureFile)
Interactive Mode: terminal prompts repeatedly for a url to fetch
def mode_clipboard_watch(options): articles = set() failures = set() print('Hello, this is news-scraper. Copy a URL to start!') print('To quit, press CTRL+C in this window.\n') url = pyperclip.paste() while True: try: tmp_value = pyperclip.paste() if tmp_value != url: url = tmp_value print('Fetching article...') if options.debug: print("Value changed: %s" % str(url)[:100]) article = _get_article(url=url, bodyLines=options.bodyLines, debug=options.debug) if (article): articles.add(article) else: failures.add(url) time.sleep(0.2) except KeyboardInterrupt: break _output(articles, options.outputFile, failures, options.failureFile)
Clipboard Watch Mode: watches for a new string on the clipboard, and tries to fetch that URL
def parse_connection_string_psycopg2(connection_string): conn_prepared = {} conn_parsed = urlparse(connection_string) if not conn_parsed.hostname: _re_dbstr = re.compile(r'\bhost=(?P<host>[0-9a-zA-Z_.!@#$%^&*()~]+)|' r'dbname=(?P<dbname>[0-9a-zA-Z_.!@#$%^&*()~]+)|' r'port=(?P<port>[0-9a-zA-Z_.!@#$%^&*()~]+)|' r'user=(?P<user>[0-9a-zA-Z_.!@#$%^&*()~]+)|' r'password=(?P<password>[0-9a-zA-Z_.!@#$%^&*()~]+)\b', re.IGNORECASE) for match in _re_dbstr.finditer(connection_string): match_dict = match.groupdict() if match_dict['host']: conn_prepared['host'] = match_dict['host'] if match_dict['port']: conn_prepared['port'] = match_dict['port'] if match_dict['dbname']: conn_prepared['dbname'] = match_dict['dbname'] if match_dict['user']: conn_prepared['user'] = match_dict['user'] if match_dict['password']: conn_prepared['password'] = match_dict['password'] else: conn_prepared = { 'host': conn_parsed.hostname, 'port': conn_parsed.port, 'dbname': conn_parsed.path, 'user': conn_parsed.username, 'password': conn_parsed.password } return conn_prepared
parses psycopg2 consumable connection string :param connection_string: :return: return dictionary with connection string parts
def get_pgpm_db_version(cls, cur, schema_name='_pgpm'): cls.set_search_path(cur, schema_name) cur.execute("SELECT _find_schema('{0}', '{1}')" .format(schema_name, 'x')) # TODO: make it work with the way it's written below. currently throws error as func returns record # without column list # cur.callproc('_find_schema', [schema_name, 'x']) pgpm_v_ext = tuple(cur.fetchone()[0][1:-1].split(',')) return pgpm_v_ext[2], pgpm_v_ext[3], pgpm_v_ext[4]
returns current version of pgpm schema :return: tuple of major, minor and patch components of version
def create_db_schema(cls, cur, schema_name): create_schema_script = "CREATE SCHEMA {0} ;\n".format(schema_name) cur.execute(create_schema_script)
Create Postgres schema script and execute it on cursor
def grant_usage_privileges(cls, cur, schema_name, roles): cur.execute('GRANT USAGE ON SCHEMA {0} TO {1};' 'GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA {0} TO {1};' .format(schema_name, roles))
Sets search path
def grant_usage_install_privileges(cls, cur, schema_name, roles): cur.execute('GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA {0} TO {1};' 'GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA {0} TO {1};' 'GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA {0} TO {1};' .format(schema_name, roles))
Sets search path
def grant_default_usage_install_privileges(cls, cur, schema_name, roles): cur.execute('ALTER DEFAULT PRIVILEGES IN SCHEMA {0} ' 'GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO {1};' 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT EXECUTE ON FUNCTIONS TO {1};' 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} ' 'GRANT USAGE, SELECT ON SEQUENCES TO {1};' .format(schema_name, roles))
Sets search path
def revoke_all(cls, cur, schema_name, roles): cur.execute('REVOKE ALL ON SCHEMA {0} FROM {1};' 'REVOKE ALL ON ALL TABLES IN SCHEMA {0} FROM {1};' 'REVOKE ALL ON ALL SEQUENCES IN SCHEMA {0} FROM {1};' 'REVOKE ALL ON ALL FUNCTIONS IN SCHEMA {0} FROM {1};'.format(schema_name, roles))
Revoke all privileges from schema, tables, sequences and functions for a specific role
def schema_exists(cls, cur, schema_name): cur.execute("SELECT EXISTS (SELECT schema_name FROM information_schema.schemata WHERE schema_name = '{0}');" .format(schema_name)) return cur.fetchone()[0]
Check if schema exists
def pandas(self): if self._pandas is None: self._pandas = pd.DataFrame().from_records(self.list_of_dicts) return self._pandas
Return a Pandas dataframe.
def translate(self, dialect): new_resultset = copy(self) new_resultset.dialect = dialect for result in new_resultset: for dimensionvalue in result.dimensionvalues: dimensionvalue.value = dimensionvalue.translate(dialect) return new_resultset
Return a copy of this ResultSet in a different dialect.
def append(self, val): val.resultset = self val.dataset = self.dataset # Check result dimensions against available dimensions for this dataset if val.dataset: dataset_dimensions = self.dataset.dimensions for k, v in val.raw_dimensions.items(): if k not in dataset_dimensions: d = Dimension(k) else: d = dataset_dimensions[k] # Normalize if we have a datatype and a foreign dialect normalized_value = unicode(v) if d.dialect and d.datatype: if d.dialect in d.datatype.dialects: for av in d.allowed_values: # Not all allowed_value have all dialects if unicode(v) in av.dialects.get(d.dialect, []): normalized_value = av.value # Use first match # We do not support multiple matches # This is by design. break # Create DimensionValue object if isinstance(v, DimensionValue): dim = v v.value = normalized_value else: if k in dataset_dimensions: dim = DimensionValue(normalized_value, d) else: dim = DimensionValue(normalized_value, Dimension()) val.dimensionvalues.append(dim) # Add last list of dimension values to the ResultSet # They will usually be the same for each result self.dimensionvalues = val.dimensionvalues super(ResultSet, self).append(val)
Connect any new results to the resultset. This is where all the heavy lifting is done for creating results: - We add a datatype here, so that each result can handle validation etc independently. This is so that scraper authors don't need to worry about creating and passing around datatype objects. - As the scraper author yields result objects, we append them to a resultset. - This is also where we normalize dialects.
def tuple(self): return (self.value, {dv.id: dv.value for dv in self.dimensionvalues})
Tuple conversion to (value, dimensions), e.g.: (123, {dimension_1: "foo", dimension_2: "bar"})
def allowed_values(self): if self._allowed_values is None: self._allowed_values = ValueList() for val in self.scraper._fetch_allowed_values(self): if isinstance(val, DimensionValue): self._allowed_values.append(val) else: self._allowed_values.append(DimensionValue(val, Dimension())) return self._allowed_values
Return a list of allowed values.
def append(self, val): val.scraper = self.scraper val._collection_path = copy(self.collection._collection_path) val._collection_path.append(val) super(ItemList, self).append(val)
Connect any new items to the scraper.
def _move_here(self): cu = self.scraper.current_item # Already here? if self is cu: return # A child? if cu.items and self in cu.items: self.scraper.move_to(self) return # A parent? if self is cu.parent: self.scraper.move_up() # A sibling? if self.parent and self in self.parent.items: self.scraper.move_up() self.scraper.move_to(self) return # Last resort: Move to top and all the way down again self.scraper.move_to_top() for step in self.path: self.scraper.move_to(step)
Move the cursor to this item.
def items(self): if self.scraper.current_item is not self: self._move_here() if self._items is None: self._items = ItemList() self._items.scraper = self.scraper self._items.collection = self for i in self.scraper._fetch_itemslist(self): i.parent = self if i.type == TYPE_DATASET and i.dialect is None: i.dialect = self.scraper.dialect self._items.append(i) return self._items
ItemList of children.
def _hash(self): dump = dumps(self.query, sort_keys=True) if isinstance(dump, str): dump = dump.encode('utf-8') return md5(dump).hexdigest()
Return a hash for the current query. This hash is _not_ a unique representation of the dataset!
def fetch_next(self, query=None, **kwargs): if query: self.query = query hash_ = self._hash if hash_ in self._data: for result in self._data[hash_]: yield result if self.scraper.current_item is not self: self._move_here() self._data[hash_] = ResultSet() self._data[hash_].dialect = self.dialect self._data[hash_].dataset = self for result in self.scraper._fetch_data(self, query=self.query, **kwargs): self._data[hash_].append(result) yield result
Generator to yield data one row at a time. Yields a Result, not the entire ResultSet. The containing ResultSet can be accessed through `Result.resultset`, but be careful not to manipulate the ResultSet until it is populated (when this generator is empty), or you may see unexpected results.
def dimensions(self): # First of all: Select this dataset if self.scraper.current_item is not self: self._move_here() if self._dimensions is None: self._dimensions = DimensionList() for d in self.scraper._fetch_dimensions(self): d.dataset = self d.scraper = self.scraper self._dimensions.append(d) return self._dimensions
Available dimensions, if defined.
def shape(self): if not self.data: return (0, 0) return (len(self.data), len(self.dimensions))
Compute the shape of the dataset as (rows, cols).
def on(cls, hook): def decorator(function_): cls._hooks[hook].append(function_) return function_ return decorator
Hook decorator.
def move_to_top(self): self.current_item = self.root for f in self._hooks["top"]: f(self) return self
Move to root item.
def move_up(self): if self.current_item.parent is not None: self.current_item = self.current_item.parent for f in self._hooks["up"]: f(self) if self.current_item is self.root: for f in self._hooks["top"]: f(self) return self
Move up one level in the hierarchy, unless already on top.
def move_to(self, id_): if self.items: try: self.current_item = self.items[id_] except (StopIteration, IndexError, NoSuchItem): raise NoSuchItem for f in self._hooks["select"]: f(self, id_) return self
Select a child item by id (str), reference or index.
def descendants(self): for i in self.current_item.items: self.move_to(i) if i.type == TYPE_COLLECTION: for c in self.children: yield c else: yield i self.move_up()
Recursively return every dataset below current item.
def children(self): from warnings import warn warn("Deprecated. Use Scraper.descendants.", DeprecationWarning) for descendant in self.descendants: yield descendant
Former, misleading name for descendants.
def make_python_name(s, default=None, number_prefix='N',encoding="utf-8"): if s in ('', None): s = default s = str(s) s = re.sub("[^a-zA-Z0-9_]", "_", s) if not re.match('\d', s) is None: s = number_prefix+s return unicode(s, encoding)
Returns a unicode string that can be used as a legal python identifier. :Arguments: *s* string *default* use *default* if *s* is ``None`` *number_prefix* string to prepend if *s* starts with a number
def recarray(self): return numpy.rec.fromrecords(self.records, names=self.names)
Returns data as :class:`numpy.recarray`.
def main(arguments=None): # setup the command-line util settings su = tools( arguments=arguments, docString=__doc__, logLevel="WARNING", options_first=False, projectName=False ) arguments, settings, log, dbConn = su.setup() # unpack remaining cl arguments using `exec` to setup the variable names # automatically for arg, val in arguments.iteritems(): if arg[0] == "-": varname = arg.replace("-", "") + "Flag" else: varname = arg.replace("<", "").replace(">", "") if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = '%s'" % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug('%s = %s' % (varname, val,)) if os.path.isfile(pathToYaml): from fundamentals.mysql import yaml_to_database # PARSE YAML FILE CONTENTS AND ADD TO DATABASE yaml2db = yaml_to_database( log=log, settings=settings, dbConn=dbConn ) yaml2db.add_yaml_file_content_to_database( filepath=pathToYaml, deleteFile=deleteFlag ) basename = os.path.basename(pathToYaml) print "Content of %(basename)s added to database" % locals() else: from fundamentals.mysql import yaml_to_database yaml2db = yaml_to_database( log=log, settings=settings, dbConn=dbConn, pathToInputDir=pathToYaml, deleteFiles=deleteFlag ) yaml2db.ingest() print "Content of %(pathToYaml)s directory added to database" % locals() return
The main function used when ``yaml_to_database.py`` when installed as a cl tool
def ingest(self): self.log.debug('starting the ``ingest`` method') for d in os.listdir(self.pathToInputDir): if os.path.isfile(os.path.join(self.pathToInputDir, d)) and "yaml" in d.lower(): self.add_yaml_file_content_to_database( filepath=os.path.join(self.pathToInputDir, d), deleteFile=self.deleteFiles ) self.log.debug('completed the ``ingest`` method') return None
*ingest the contents of the directory of yaml files into a database* **Return:** - None **Usage:** To import an entire directory of yaml files into a database, use the following: .. code-block:: python from fundamentals.mysql import yaml_to_database yaml2db = yaml_to_database( log=log, settings=settings, dbConn=dbConn, pathToInputDir="/path/to/yaml/directory", deleteFiles=False ) yaml2db.ingest()
def data_type(self, data_type): allowed_values = ["string", "number", "date", "color"] if data_type is not None and data_type not in allowed_values: raise ValueError( "Invalid value for `data_type` ({0}), must be one of {1}" .format(data_type, allowed_values) ) self._data_type = data_type
Sets the data_type of this Option. :param data_type: The data_type of this Option. :type: str
def create_option(cls, option, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_option_with_http_info(option, **kwargs) else: (data) = cls._create_option_with_http_info(option, **kwargs) return data
Create Option Create a new Option This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_option(option, async=True) >>> result = thread.get() :param async bool :param Option option: Attributes of option to create (required) :return: Option If the method is called asynchronously, returns the request thread.
def delete_option_by_id(cls, option_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_option_by_id_with_http_info(option_id, **kwargs) else: (data) = cls._delete_option_by_id_with_http_info(option_id, **kwargs) return data
Delete Option Delete an instance of Option by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_option_by_id(option_id, async=True) >>> result = thread.get() :param async bool :param str option_id: ID of option to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_option_by_id(cls, option_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_option_by_id_with_http_info(option_id, **kwargs) else: (data) = cls._get_option_by_id_with_http_info(option_id, **kwargs) return data
Find Option Return single instance of Option by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_option_by_id(option_id, async=True) >>> result = thread.get() :param async bool :param str option_id: ID of option to return (required) :return: Option If the method is called asynchronously, returns the request thread.
def list_all_options(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_options_with_http_info(**kwargs) else: (data) = cls._list_all_options_with_http_info(**kwargs) return data
List Options Return a list of Options This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_options(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Option] If the method is called asynchronously, returns the request thread.
def replace_option_by_id(cls, option_id, option, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_option_by_id_with_http_info(option_id, option, **kwargs) else: (data) = cls._replace_option_by_id_with_http_info(option_id, option, **kwargs) return data
Replace Option Replace all attributes of Option This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_option_by_id(option_id, option, async=True) >>> result = thread.get() :param async bool :param str option_id: ID of option to replace (required) :param Option option: Attributes of option to replace (required) :return: Option If the method is called asynchronously, returns the request thread.
def update_option_by_id(cls, option_id, option, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_option_by_id_with_http_info(option_id, option, **kwargs) else: (data) = cls._update_option_by_id_with_http_info(option_id, option, **kwargs) return data
Update Option Update attributes of Option This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_option_by_id(option_id, option, async=True) >>> result = thread.get() :param async bool :param str option_id: ID of option to update. (required) :param Option option: Attributes of option to update. (required) :return: Option If the method is called asynchronously, returns the request thread.
def get_callable_signature_as_string(the_callable): args, varargs, varkw, defaults = inspect.getargspec(the_callable) tmp_args = list(args) args_dict = {} if defaults: defaults = list(defaults) else: defaults = [] while defaults: args_dict[tmp_args.pop()] = defaults.pop() while tmp_args: args_dict[tmp_args.pop()] = None args_list = [] for arg in args: if args_dict[arg] is not None: args_list.append("%s=%s" % (arg, repr(args_dict[arg]))) else: args_list.append(arg) if varargs: args_list.append("*%s" % varargs) if varkw: args_list.append("**%s" % varkw) args_string = ', '.join(args_list) return "def %s(%s)" % (the_callable.__name__, args_string)
Return a string representing a callable. It is executed as if it would have been declared on the prompt. >>> def foo(arg1, arg2, arg3='val1', arg4='val2', *args, **argd): ... pass >>> get_callable_signature_as_string(foo) def foo(arg1, arg2, arg3='val1', arg4='val2', *args, **argd) :param the_callable: the callable to be analyzed. :type the_callable: function/callable. :return: the signature.
def get_callable_documentation(the_callable): return wrap_text_in_a_box( title=get_callable_signature_as_string(the_callable), body=(getattr(the_callable, '__doc__') or 'No documentation').replace( '\n', '\n\n'), style='ascii_double')
Return a string with the callable signature and its docstring. :param the_callable: the callable to be analyzed. :type the_callable: function/callable. :return: the signature.
def register_extension_class(ext, base, *args, **kwargs): ext_instance = ext.plugin(base, *args, **kwargs) setattr(base, ext.name.lstrip('_'), ext_instance)
Instantiate the given extension class and register as a public attribute of the given base. README: The expected protocol here is to instantiate the given extension and pass the base object as the first positional argument, then unpack args and kwargs as additional arguments to the extension's constructor.
def register_extension_method(ext, base, *args, **kwargs): bound_method = create_bound_method(ext.plugin, base) setattr(base, ext.name.lstrip('_'), bound_method)
Register the given extension method as a public attribute of the given base. README: The expected protocol here is that the given extension method is an unbound function. It will be bound to the specified base as a method, and then set as a public attribute of that base.
def token_auto_auth(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): try: response = func(self, *args, **kwargs) # If auth failure occurs, attempt to re-authenticate and replay once at most. except errors.AuthFailure: # Request to have authentication refreshed. self._client.auth._refresh() # Replay original request. response = func(self, *args, **kwargs) return response # TODO(TheDodd): match func call signature and docs. return wrapper
Wrap class methods with automatic token re-authentication. This wrapper will detect authentication failures coming from its wrapped method. When one is caught, it will request a new token, and simply replay the original request. The one constraint that this wrapper has is that the wrapped method's class must have the :py:class:`objectrocket.client.Client` object embedded in it as the property ``_client``. Such is the design of all current client operations layers.
def check_auth(args, role=None): users = boto3.resource("dynamodb").Table(os.environ['people']) if not (args.get('email', None) and args.get('api_key', None)): mesg = "Invalid request: `email` and `api_key` are required" return {'success': False, 'message': mesg} user = users.get_item(Key={'email': args.get('email')}) if 'Item' not in user: return {'success': False, 'message': 'User does not exist.'} user = user['Item'] if user['api_key'] != args['api_key']: return {'success': False, 'message': 'API key was invalid.'} if role: if user['role'] not in role: mesg = 'User is not authorized to make this change.' return {'success': False, 'message': mesg} return {'success': True, 'message': None, 'user': user}
Check the user authentication.
def lambda_handler(event, context): auth = check_auth(event, role=["admin"]) if not auth['success']: return auth table = boto3.resource("dynamodb").Table(os.environ['database']) results = table.scan() output = {'success': True, 'events': list(), 'eventsCount': 0} for item in results.get('Items', list()): output['events'].append(item) output['eventsCount'] = len(output['events']) return output
Main handler.
def get_theme_dir(): return os.path.abspath(os.path.join(os.path.dirname(__file__), "theme"))
Returns path to directory containing this package's theme. This is designed to be used when setting the ``html_theme_path`` option within Sphinx's ``conf.py`` file.
def create_discount_promotion(cls, discount_promotion, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_discount_promotion_with_http_info(discount_promotion, **kwargs) else: (data) = cls._create_discount_promotion_with_http_info(discount_promotion, **kwargs) return data
Create DiscountPromotion Create a new DiscountPromotion This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_discount_promotion(discount_promotion, async=True) >>> result = thread.get() :param async bool :param DiscountPromotion discount_promotion: Attributes of discountPromotion to create (required) :return: DiscountPromotion If the method is called asynchronously, returns the request thread.
def delete_discount_promotion_by_id(cls, discount_promotion_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_discount_promotion_by_id_with_http_info(discount_promotion_id, **kwargs) else: (data) = cls._delete_discount_promotion_by_id_with_http_info(discount_promotion_id, **kwargs) return data
Delete DiscountPromotion Delete an instance of DiscountPromotion by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_discount_promotion_by_id(discount_promotion_id, async=True) >>> result = thread.get() :param async bool :param str discount_promotion_id: ID of discountPromotion to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_discount_promotion_by_id(cls, discount_promotion_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_discount_promotion_by_id_with_http_info(discount_promotion_id, **kwargs) else: (data) = cls._get_discount_promotion_by_id_with_http_info(discount_promotion_id, **kwargs) return data
Find DiscountPromotion Return single instance of DiscountPromotion by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_discount_promotion_by_id(discount_promotion_id, async=True) >>> result = thread.get() :param async bool :param str discount_promotion_id: ID of discountPromotion to return (required) :return: DiscountPromotion If the method is called asynchronously, returns the request thread.
def list_all_discount_promotions(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_discount_promotions_with_http_info(**kwargs) else: (data) = cls._list_all_discount_promotions_with_http_info(**kwargs) return data
List DiscountPromotions Return a list of DiscountPromotions This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_discount_promotions(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[DiscountPromotion] If the method is called asynchronously, returns the request thread.
def replace_discount_promotion_by_id(cls, discount_promotion_id, discount_promotion, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_discount_promotion_by_id_with_http_info(discount_promotion_id, discount_promotion, **kwargs) else: (data) = cls._replace_discount_promotion_by_id_with_http_info(discount_promotion_id, discount_promotion, **kwargs) return data
Replace DiscountPromotion Replace all attributes of DiscountPromotion This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_discount_promotion_by_id(discount_promotion_id, discount_promotion, async=True) >>> result = thread.get() :param async bool :param str discount_promotion_id: ID of discountPromotion to replace (required) :param DiscountPromotion discount_promotion: Attributes of discountPromotion to replace (required) :return: DiscountPromotion If the method is called asynchronously, returns the request thread.
def update_discount_promotion_by_id(cls, discount_promotion_id, discount_promotion, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_discount_promotion_by_id_with_http_info(discount_promotion_id, discount_promotion, **kwargs) else: (data) = cls._update_discount_promotion_by_id_with_http_info(discount_promotion_id, discount_promotion, **kwargs) return data
Update DiscountPromotion Update attributes of DiscountPromotion This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_discount_promotion_by_id(discount_promotion_id, discount_promotion, async=True) >>> result = thread.get() :param async bool :param str discount_promotion_id: ID of discountPromotion to update. (required) :param DiscountPromotion discount_promotion: Attributes of discountPromotion to update. (required) :return: DiscountPromotion If the method is called asynchronously, returns the request thread.
def make_code_readable(s): s = s if isinstance(s, str) else str(s) MAP = {",": ",\n", "{": "{\n ", "}": "\n}"} ll = [] state = "open" flag_single = False flag_double = False flag_backslash = False for ch in s: if flag_backslash: flag_backslash = False continue if ch == "\\": flag_backslash = True continue if flag_single: if ch == "'": flag_single = False elif not flag_double and ch == "'": flag_single = True if flag_double: if ch == '"': flag_double = False elif not flag_single and ch == '"': flag_double = True if flag_single or flag_double: ll.append(ch) else: ll.append(MAP.get(ch, ch)) return "".join(ll)
Add newlines at strategic places in code string for printing. Args: s: str, piece of code. If not str, will attempt to convert to str. Returns: str
def chunk_string(string, length): return (string[0 + i:length + i] for i in range(0, len(string), length))
Splits a string into fixed-length chunks. This function returns a generator, using a generator comprehension. The generator returns the string sliced, from 0 + a multiple of the length of the chunks, to the length of the chunks + a multiple of the length of the chunks. Reference: http://stackoverflow.com/questions/18854620
def seconds2str(seconds): if seconds < 0: return "{0:.3g}s".format(seconds) elif math.isnan(seconds): return "NaN" elif math.isinf(seconds): return "Inf" m, s = divmod(seconds, 60) h, m = divmod(m, 60) if h >= 1: return "{0:g}h {1:02g}m {2:.3g}s".format(h, m, s) elif m >= 1: return "{0:02g}m {1:.3g}s".format(m, s) else: return "{0:.3g}s".format(s)
Returns string such as 1h 05m 55s.
def make_fits_keys_dict(keys): key_dict = {} new_keys = [] for key in keys: # converts to valid FITS key according to reference [1] above fits_key = valid_fits_key(key) num_digits = 1 i = -1 i_max = 9 while fits_key in new_keys: i += 1 if i > i_max: i = 0 i_max = i_max * 10 + 9 num_digits += 1 fits_key = fits_key[:(8 - num_digits)] + (("%0{0:d}d".format(num_digits)) % i) key_dict[key] = fits_key new_keys.append(fits_key) return key_dict
Returns a dictionary to translate to unique FITS header keys up to 8 characters long This is similar to Windows making up 8-character names for filenames that are longer than this "The keyword names may be up to 8 characters long and can only contain uppercase letters A to Z, the digits 0 to 9, the hyphen, and the underscore character." [1] Arguments: keys -- list of strings Returns: dictionary whose keys are the elements in the "keys" argument, and whose values are made-up uppercase names References: [1] http://fits.gsfc.nasa.gov/fits_primer.html
def valid_fits_key(key): ret = re.sub("[^A-Z0-9\-_]", "", key.upper())[:8] if len(ret) == 0: raise RuntimeError("key '{0!s}' has no valid characters to be a key in a FITS header".format(key)) return ret
Makes valid key for a FITS header "The keyword names may be up to 8 characters long and can only contain uppercase letters A to Z, the digits 0 to 9, the hyphen, and the underscore character." (http://fits.gsfc.nasa.gov/fits_primer.html)
def eval_fieldnames(string_, varname="fieldnames"): ff = eval(string_) if not isinstance(ff, list): raise RuntimeError("{0!s} must be a list".format(varname)) if not all([isinstance(x, str) for x in ff]): raise RuntimeError("{0!s} must be a list of strings".format(varname)) ff = [x.upper() for x in ff] return ff
Evaluates string_, must evaluate to list of strings. Also converts field names to uppercase
def module_to_dict(module): lot = [(key, module.__getattribute__(key)) for key in module.__all__] ret = dict(lot) return ret
Creates a dictionary whose keys are module.__all__ Returns: {"(attribute name)": attribute, ...}