code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def list_styles(self): known = sorted(self.defaults.known_styles) if not known: err_exit('No styles', 0) for style in known: if style == self.defaults.default_style: print(style, '(default)') else: print(style) sys.exit(0)
Print available styles and exit.
def render_file(self, filename): dirname, basename = split(filename) with changedir(dirname): infile = abspath(basename) outfile = abspath('.%s.html' % basename) self.docutils.publish_file(infile, outfile, self.styles) return outfile
Convert a reST file to HTML.
def render_long_description(self, dirname): with changedir(dirname): self.setuptools.check_valid_package() long_description = self.setuptools.get_long_description() outfile = abspath('.long-description.html') self.docutils.publish_string(long_description, outfile, self.styles) return outfile
Convert a package's long description to HTML.
def open_in_browser(self, outfile): if self.browser == 'default': webbrowser.open('file://%s' % outfile) else: browser = webbrowser.get(self.browser) browser.open('file://%s' % outfile)
Open the given HTML file in a browser.
def run(self): os.environ['JARN_RUN'] = '1' self.python.check_valid_python() args = self.parse_options(self.args) if args: arg = args[0] else: arg = os.curdir if arg: arg = expanduser(arg) if isfile(arg): outfile = self.render_file(arg) elif isdir(arg): outfile = self.render_long_description(arg) else: err_exit('No such file or directory: %s' % arg) self.open_in_browser(outfile)
Render and display Python package documentation.
def combine_pdf_as_bytes(pdfs: List[BytesIO]) -> bytes: writer = PdfWriter() for pdf in pdfs: writer.addpages(PdfReader(pdf).pages) bio = BytesIO() writer.write(bio) bio.seek(0) output = bio.read() bio.close() return output
Combine PDFs and return a byte-string with the result. Arguments --------- pdfs A list of BytesIO representations of PDFs
def split(self, granularity_after_split, exclude_partial=True): if granularity_after_split == Granularity.DAY: return self.get_days() elif granularity_after_split == Granularity.WEEK: return self.get_weeks(exclude_partial) elif granularity_after_split == Granularity.MONTH: return self.get_months(exclude_partial) elif granularity_after_split == Granularity.QUARTER: return self.get_quarters(exclude_partial) elif granularity_after_split == Granularity.HALF_YEAR: return self.get_half_years(exclude_partial) elif granularity_after_split == Granularity.YEAR: return self.get_years(exclude_partial) else: raise Exception("Invalid granularity: %s" % granularity_after_split)
Split a period into a given granularity. Optionally include partial periods at the start and end of the period.
def config_dict(config): return dict( (key, getattr(config, key)) for key in config.values )
Given a Sphinx config object, return a dictionary of config values.
def from_defn(cls, defn): "Return the first Repl subclass that works with this" instances = (subcl(defn) for subcl in cls.__subclasses__()) return next(filter(None, instances)f from_defn(cls, defn): "Return the first Repl subclass that works with this" instances = (subcl(defn) for subcl in cls.__subclasses__()) return next(filter(None, instances))
Return the first Repl subclass that works with this
def from_definition(cls, defn, names={}): repls = map(Repl.from_defn, defn.get('replace', [])) self = cls(repls) vars(self).update(names) vars(self).update(defn.get('using', {})) return self
A definition may contain the following members: - using: a dictionary of variables available for substitution - replace: a list of replacement definitions.
def data(self, index, role=Qt.DisplayRole): if not index.isValid(): return None if role == Qt.DisplayRole or role == Qt.EditRole: return ' ' if role == Qt.BackgroundColorRole: color = self.color_da[index.row(), index.column()].values return QtGui.QColor.fromRgbF(*color) return None
Cell content
def headerData(self, section, orientation, role=Qt.DisplayRole): if role != Qt.DisplayRole: return None if orientation == Qt.Vertical: return six.text_type(self.color_da.cmap[section].values) return super(ColormapModel, self).headerData(section, orientation, role)
Set header data
def get_colormap(cls, names=[], N=10, *args, **kwargs): names = safe_list(names) obj = cls(names, N, *args, **kwargs) vbox = obj.layout() buttons = QDialogButtonBox( QDialogButtonBox.Ok | QDialogButtonBox.Cancel, parent=obj) buttons.button(QDialogButtonBox.Ok).setEnabled(False) vbox.addWidget(buttons) buttons.accepted.connect(obj.accept) buttons.rejected.connect(obj.reject) obj.table.selectionModel().selectionChanged.connect( lambda indices: buttons.button(QDialogButtonBox.Ok).setEnabled( bool(indices))) accepted = obj.exec_() if accepted: return obj.table.chosen_colormap
Open a :class:`ColormapDialog` and get a colormap Parameters ---------- %(ColormapModel.parameters)s Other Parameters ---------------- ``*args, **kwargs`` Anything else that is passed to the ColormapDialog Returns ------- str or matplotlib.colors.Colormap Either the name of a standard colormap available via :func:`psy_simple.colors.get_cmap` or a colormap
def show_colormap(cls, names=[], N=10, show=True, *args, **kwargs): names = safe_list(names) obj = cls(names, N, *args, **kwargs) vbox = obj.layout() buttons = QDialogButtonBox(QDialogButtonBox.Close, parent=obj) buttons.rejected.connect(obj.close) vbox.addWidget(buttons) if show: obj.show() return obj
Show a colormap dialog Parameters ---------- %(show_colormaps.parameters.no_use_qt)s
def cmd_list(args): for penlist in penStore.data: puts(penlist + " (" + str(len(penStore.data[penlist])) + ")")
List all element in pen
def cmd_all(args): for penlist in penStore.data: puts(penlist) with indent(4, ' -'): for penfile in penStore.data[penlist]: puts(penfile)
List everything recursively
def cmd_create(args): name = args.get(0) if name: penStore.createList(name) else: puts("not valid")
Creates a list
def cmd_touch_note(args): major = args.get(0) minor = args.get(1) if major in penStore.data: if minor is None: # show items in list for note in penStore.data[major]: puts(note) elif minor in penStore.data[major]: penStore.openNote(major, minor) else: penStore.createNote(major, minor) penStore.openNote(major, minor) else: puts("No list of that name.")
Create a note
def cmd_delete(args): major = args.get(0) minor = args.get(1) if major is not None: if major in penStore.data: if minor is None: if len(penStore.data[major]) > 0: if raw_input("are you sure (y/n)? ") not in ['y', 'Y', 'yes', 'Yes']: return ExitStatus.ABORT penStore.deleteList(major) puts("list deleted") elif minor in penStore.data[major]: penStore.deleteNote(major, minor) puts("note deleted") else: puts("no such note, sorry! (%s)" % minor) else: puts("no such list, sorry! (%s)" % major) else: print """ - pen: delete help ------------------------------------------------------------ pen delete <list> deletes list and all of its notes pen delete <list> <note> deletes note """
Deletes a node
def restclient_admin_required(view_func): def wrapper(request, *args, **kwargs): template = 'access_denied.html' if hasattr(settings, 'RESTCLIENTS_ADMIN_AUTH_MODULE'): auth_func = import_string(settings.RESTCLIENTS_ADMIN_AUTH_MODULE) else: context = {'error_msg': ( "Your application must define an authorization function as " "RESTCLIENTS_ADMIN_AUTH_MODULE in settings.py.")} return render(request, template, context=context, status=401) service = args[0] if len(args) > 0 else None url = args[1] if len(args) > 1 else None if auth_func(request, service, url): return view_func(request, *args, **kwargs) return render(request, template, status=401) return login_required(function=wrapper)
View decorator that checks whether the user is permitted to view proxy restclients. Calls login_required in case the user is not authenticated.
def open_file(filepath): if sys.platform.startswith('darwin'): subprocess.Popen(('open', filepath)) elif os.name == 'nt': os.startfile(filepath) elif os.name == 'posix': subprocess.Popen(('xdg-open', filepath))
Open file with the default system app. Copied from https://stackoverflow.com/a/435669/1224456
def destination_heuristic(data): counter = collections.Counter() for entry in data: file_field = entry['fields'].get('file') if not file_field: continue path = os.path.dirname(file_field) counter[path] += 1 if not counter: # No paths found raise click.ClickException( 'Path finding heuristics failed: no paths in the database' ) # Find the paths that appears most often sorted_paths = sorted(counter, reverse=True) groupby = itertools.groupby(sorted_paths, key=len) _, group = next(groupby) # We know that there's at least one candidate. Make sure it's # the only one candidate = next(group) try: next(group) except StopIteration: return candidate else: raise click.ClickException( 'Path finding heuristics failed: ' 'there are multiple equally valid paths in the database' )
A heuristic to get the folder with all other files from bib, using majority vote.
def remove_entry(data, entry): ''' Remove an entry in place. ''' file_field = entry['fields'].get('file') if file_field: try: os.remove(file_field) except IOError: click.echo('This entry\'s file was missing') data.remove(entryf remove_entry(data, entry): ''' Remove an entry in place. ''' file_field = entry['fields'].get('file') if file_field: try: os.remove(file_field) except IOError: click.echo('This entry\'s file was missing') data.remove(entry)
Remove an entry in place.
def string_to_basename(s): ''' Converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. ''' s = s.strip().lower() s = re.sub(r'[^\w\s-]', '', s) return re.sub(r'[\s-]+', '-', sf string_to_basename(s): ''' Converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. ''' s = s.strip().lower() s = re.sub(r'[^\w\s-]', '', s) return re.sub(r'[\s-]+', '-', s)
Converts to lowercase, removes non-alpha characters, and converts spaces to hyphens.
def editor(*args, **kwargs): ''' Wrapper for `click.edit` that raises an error when None is returned. ''' result = click.edit(*args, **kwargs) if result is None: msg = 'Editor exited without saving, command aborted' raise click.ClickException(msg) return resulf editor(*args, **kwargs): ''' Wrapper for `click.edit` that raises an error when None is returned. ''' result = click.edit(*args, **kwargs) if result is None: msg = 'Editor exited without saving, command aborted' raise click.ClickException(msg) return result
Wrapper for `click.edit` that raises an error when None is returned.
def parse_gpx(gpx_element, gpxns=None): gpxns = gpxns if gpxns is not None else determine_gpx_namespace(gpx_element) if gpx_element.tag != gpxns+'gpx': raise ValueError("No gpx root element") version = gpx_element.attrib['version'] if version == '1.0': return parse_gpx_1_0(gpx_element, gpxns=gpxns) elif version == '1.1': return parse_gpx_1_1(gpx_element, gpxns=gpxns) else: raise ValueError("Cannot parse GPX version {0}".format(version))
Parse a GPX file into a GpxModel. Args: gpx_element: The root <gpx> element of an XML document containing a version attribute. GPX versions 1.0 and 1.1 are supported. gpxns: The XML namespace for GPX in Clarke notation (i.e. delimited by curly braces). Returns: A GpxModel representing the data from the supplies xml. Raises: ValueError: The supplied XML could not be parsed as GPX.
def backup_file(filename): if not os.path.exists(filename): return BACKUP_SUFFIX = ".sprinter.bak" backup_filename = filename + BACKUP_SUFFIX shutil.copyfile(filename, backup_filename)
create a backup of the file desired
def inject(self, filename, content): # ensure content always has one trailing newline content = _unicode(content).rstrip() + "\n" if filename not in self.inject_dict: self.inject_dict[filename] = "" self.inject_dict[filename] += content
add the injection content to the dictionary
def commit(self): self.logger.debug("Starting injections...") self.logger.debug("Injections dict is:") self.logger.debug(self.inject_dict) self.logger.debug("Clear list is:") self.logger.debug(self.clear_set) for filename, content in self.inject_dict.items(): content = _unicode(content) self.logger.debug("Injecting values into %s..." % filename) self.destructive_inject(filename, content) for filename in self.clear_set: self.logger.debug("Clearing injection from %s..." % filename) self.destructive_clear(filename)
commit the injections desired, overwriting any previous injections in the file.
def injected(self, filename): full_path = os.path.expanduser(filename) if not os.path.exists(full_path): return False with codecs.open(full_path, 'r+', encoding="utf-8") as fh: contents = fh.read() return self.wrapper_match.search(contents) is not None
Return true if the file has already been injected before.
def destructive_inject(self, filename, content): content = _unicode(content) backup_file(filename) full_path = self.__generate_file(filename) with codecs.open(full_path, 'r', encoding="utf-8") as f: new_content = self.inject_content(f.read(), content) with codecs.open(full_path, 'w+', encoding="utf-8") as f: f.write(new_content)
Injects the injections desired immediately. This should generally be run only during the commit phase, when no future injections will be done.
def __generate_file(self, file_path): file_path = os.path.expanduser(file_path) if not os.path.exists(os.path.dirname(file_path)): self.logger.debug("Directories missing! Creating directories for %s..." % file_path) os.makedirs(os.path.dirname(file_path)) if not os.path.exists(file_path): open(file_path, "w+").close() return file_path
Generate the file at the file_path desired. Creates any needed directories on the way. returns the absolute path of the file.
def in_noninjected_file(self, file_path, content): if os.path.exists(file_path): file_content = codecs.open(file_path, encoding="utf-8").read() file_content = self.wrapper_match.sub(u"", file_content) else: file_content = "" return file_content.find(content) != -1
Checks if a string exists in the file, sans the injected
def inject_content(self, content, inject_string): inject_string = _unicode(inject_string) content = self.wrapper_match.sub("", _unicode(content)) if self.override_match: sprinter_overrides = self.override_match.search(content) if sprinter_overrides: content = self.override_match.sub("", content) sprinter_overrides = sprinter_overrides.groups()[0] else: sprinter_overrides = "" content += """ %s %s %s """ % (self.wrapper, inject_string.rstrip(), self.wrapper) if self.override_match: content += sprinter_overrides.rstrip() + "\n" return content
Inject inject_string into a text buffer, wrapped with #{{ wrapper }} comments if condition lambda is not satisfied or is None. Remove old instances of injects if they exist.
def clear_content(self, content): content = _unicode(content) return self.wrapper_match.sub("", content)
Clear the injected content from the content buffer, and return the results
def get_all_orders_ungrouped(self): for olist in self._orders.values(): for order in olist.orders: yield order
Uses a generator to return all orders within. :py:class:`MarketOrder` objects are yielded directly, instead of being grouped in :py:class:`MarketItemsInRegionList` instances. .. note:: This is a generator! :rtype: generator :returns: Generates a list of :py:class:`MarketOrder` instances.
def add_order(self, order): # This key is used to group the orders based on region. key = '%s_%s' % (order.region_id, order.type_id) if not self._orders.has_key(key): # We don't have any orders for this yet. Prep the region+item # combo by instantiating a new MarketItemsInRegionList for # the MarketOrders. self.set_empty_region( order.region_id, order.type_id, order.generated_at ) # The MarketOrder gets stuffed into the MarketItemsInRegionList for this # item+region combo. self._orders[key].add_order(order)
Adds a MarketOrder instance to the list of market orders contained within this order list. Does some behind-the-scenes magic to get it all ready for serialization. :param MarketOrder order: The order to add to this order list.
def set_empty_region(self, region_id, type_id, generated_at, error_if_orders_present=True): key = '%s_%s' % (region_id, type_id) if error_if_orders_present and self._orders.has_key(key): raise ItemAlreadyPresentError( "Orders already exist for the given region and type ID. " "Pass error_if_orders_present=False to disable this failsafe, " "if desired." ) self._orders[key] = MarketItemsInRegionList( region_id, type_id, generated_at)
Prepares for the given region+item combo by instantiating a :py:class:`MarketItemsInRegionList` instance, which will track region ID, type ID, and generated time. This is mostly used for the JSON deserialization process in case there are no orders for the given region+item combo. :param int region_id: The region ID. :param int type_id: The item's type ID. :param datetime.datetime generated_at: The time that the order set was generated. :keyword bool error_if_orders_present: If True, raise an exception if an order already exists for this item+region combo when this is called. This failsafe may be disabled by passing False here.
def add_entry(self, entry): # This key is used to group the orders based on region. key = '%s_%s' % (entry.region_id, entry.type_id) if not self._history.has_key(key): # We don't have any orders for this yet. Prep the region+item # combo by instantiating a new MarketItemsInRegionList for # the MarketOrders. self.set_empty_region( entry.region_id, entry.type_id, entry.generated_at ) # The MarketOrder gets stuffed into the MarketItemsInRegionList for this # item+region combo. self._history[key].add_entry(entry)
Adds a MarketHistoryEntry instance to the list of market history entries contained within this instance. Does some behind-the-scenes magic to get it all ready for serialization. :param MarketHistoryEntry entry: The history entry to add to instance.
def set_empty_region(self, region_id, type_id, generated_at, error_if_entries_present=True): key = '%s_%s' % (region_id, type_id) if error_if_entries_present and self._history.has_key(key): raise ItemAlreadyPresentError( "Orders already exist for the given region and type ID. " "Pass error_if_orders_present=False to disable this failsafe, " "if desired." ) self._history[key] = HistoryItemsInRegionList( region_id, type_id, generated_at)
Prepares for the given region+item combo by instantiating a :py:class:`HistoryItemsInRegionList` instance, which will track region ID, type ID, and generated time. This is mostly used for the JSON deserialization process in case there are no orders for the given region+item combo. :param int region_id: The region ID. :param int type_id: The item's type ID. :param datetime.datetime generated_at: The time that the order set was generated. :keyword bool error_if_entries_present: If True, raise an exception if an entry already exists for this item+region combo when this is called. This failsafe may be disabled by passing False here.
def _find_file(self, file_name: str, lookup_dir: Path) -> Path or None: '''Find a file in a directory by name. Check subdirectories recursively. :param file_name: Name of the file :lookup_dir: Starting directory :returns: Path to the found file or None if the file was not found :raises: FileNotFoundError ''' self.logger.debug('Trying to find the file {file_name} inside the directory {lookup_dir}') result = None for item in lookup_dir.rglob('*'): if item.name == file_name: result = item break else: raise FileNotFoundError(file_name) self.logger.debug('File found: {result}') return resulf _find_file(self, file_name: str, lookup_dir: Path) -> Path or None: '''Find a file in a directory by name. Check subdirectories recursively. :param file_name: Name of the file :lookup_dir: Starting directory :returns: Path to the found file or None if the file was not found :raises: FileNotFoundError ''' self.logger.debug('Trying to find the file {file_name} inside the directory {lookup_dir}') result = None for item in lookup_dir.rglob('*'): if item.name == file_name: result = item break else: raise FileNotFoundError(file_name) self.logger.debug('File found: {result}') return result
Find a file in a directory by name. Check subdirectories recursively. :param file_name: Name of the file :lookup_dir: Starting directory :returns: Path to the found file or None if the file was not found :raises: FileNotFoundError
def _shift_headings(self, content: str, shift: int) -> str: '''Shift Markdown headings in a string by a given value. The shift can be positive or negative. :param content: Markdown content :param shift: Heading shift :returns: Markdown content with headings shifted by ``shift`` ''' def _sub(heading): new_heading_level = len(heading.group('hashes')) + shift self.logger.debug(f'Shift heading level to {new_heading_level}, heading title: {heading.group("title")}') if new_heading_level <= 6: return f'{"#" * new_heading_level} {heading.group("title")}{heading.group("tail")}' else: self.logger.debug('New heading level is out of range, using bold paragraph text instead of heading') return f'**{heading.group("title")}**{heading.group("tail")}' return self._heading_pattern.sub(_sub, contentf _shift_headings(self, content: str, shift: int) -> str: '''Shift Markdown headings in a string by a given value. The shift can be positive or negative. :param content: Markdown content :param shift: Heading shift :returns: Markdown content with headings shifted by ``shift`` ''' def _sub(heading): new_heading_level = len(heading.group('hashes')) + shift self.logger.debug(f'Shift heading level to {new_heading_level}, heading title: {heading.group("title")}') if new_heading_level <= 6: return f'{"#" * new_heading_level} {heading.group("title")}{heading.group("tail")}' else: self.logger.debug('New heading level is out of range, using bold paragraph text instead of heading') return f'**{heading.group("title")}**{heading.group("tail")}' return self._heading_pattern.sub(_sub, content)
Shift Markdown headings in a string by a given value. The shift can be positive or negative. :param content: Markdown content :param shift: Heading shift :returns: Markdown content with headings shifted by ``shift``
def _find_top_heading_level(self, content: str) -> int: '''Find the highest level heading (i.e. having the least '#'s) in a Markdown string. :param content: Markdown content :returns: Maximum heading level detected; if no heading is found, 0 is returned ''' result = float('inf') for heading in self._heading_pattern.finditer(content): heading_level = len(heading.group('hashes')) if heading_level < result: result = heading_level self.logger.debug(f'Maximum heading level: {result}') return result if result < float('inf') else f _find_top_heading_level(self, content: str) -> int: '''Find the highest level heading (i.e. having the least '#'s) in a Markdown string. :param content: Markdown content :returns: Maximum heading level detected; if no heading is found, 0 is returned ''' result = float('inf') for heading in self._heading_pattern.finditer(content): heading_level = len(heading.group('hashes')) if heading_level < result: result = heading_level self.logger.debug(f'Maximum heading level: {result}') return result if result < float('inf') else 0
Find the highest level heading (i.e. having the least '#'s) in a Markdown string. :param content: Markdown content :returns: Maximum heading level detected; if no heading is found, 0 is returned
def _adjust_image_paths(self, content: str, md_file_path: Path) -> str: '''Locate images referenced in a Markdown string and replace their paths with the absolute ones. :param content: Markdown content :param md_file_path: Path to the Markdown file containing the content :returns: Markdown content with absolute image paths ''' def _sub(image): image_caption = image.group('caption') image_path = md_file_path.parent / Path(image.group('path')) self.logger.debug( f'Updating image reference; user specified path: {image.group("path")}, ' + f'absolute path: {image_path}, caption: {image_caption}' ) return f'![{image_caption}]({image_path.absolute().as_posix()})' return self._image_pattern.sub(_sub, contentf _adjust_image_paths(self, content: str, md_file_path: Path) -> str: '''Locate images referenced in a Markdown string and replace their paths with the absolute ones. :param content: Markdown content :param md_file_path: Path to the Markdown file containing the content :returns: Markdown content with absolute image paths ''' def _sub(image): image_caption = image.group('caption') image_path = md_file_path.parent / Path(image.group('path')) self.logger.debug( f'Updating image reference; user specified path: {image.group("path")}, ' + f'absolute path: {image_path}, caption: {image_caption}' ) return f'![{image_caption}]({image_path.absolute().as_posix()})' return self._image_pattern.sub(_sub, content)
Locate images referenced in a Markdown string and replace their paths with the absolute ones. :param content: Markdown content :param md_file_path: Path to the Markdown file containing the content :returns: Markdown content with absolute image paths
def create_view_from_dict(name, spec, template=None, cls=ActionsView): kwargs = dict(spec) if template is not None: kwargs.setdefault("template", template) actions = load_grouped_actions(kwargs, pop_keys=True) view = cls(name=name, **kwargs) if isinstance(view, ActionsView): view.actions.extend(actions) return view
Creates a view from an spec dict (typically, the YAML front-matter).
def _parse_argv(argv=copy(sys.argv)): cfg = DotDict() cfg_files = [] argv = argv[1:] # Skip command name while argv: arg = argv.pop(0) # split up arg in format --arg=val key_val = re.split('=| ', arg) arg = key_val[0] try: val = key_val[1] except IndexError: if len(argv) > 0 and argv[0][0] != '-': val = argv.pop(0) else: # No val available, probably a flag val = None if arg[0] == '-': key = arg.lstrip('-') if not val: val = True new_cfg = _dict_from_dotted(key, val) cfg = dict_merge(cfg, new_cfg) else: if arg.endswith(".yml"): cfg_files.append(arg) return cfg, cfg_files
return argv as a parsed dictionary, looks like the following: app --option1 likethis --option2 likethat --flag -> {'option1': 'likethis', 'option2': 'likethat', 'flag': True}
def _dict_from_dotted(key, val): split_key = key.split(".") split_key.reverse() for key_part in split_key: new_dict = DotDict() new_dict[key_part] = val val = new_dict return val
takes a key value pair like: key: "this.is.a.key" val: "the value" and returns a dictionary like: {"this": {"is": {"a": {"key": "the value" } } } }
def get_logger(name, CFG=None): logger = logging.getLogger(name) if CFG: # Make log directory if it doesn't exist for handler in CFG.get('handlers', {}).itervalues(): if 'filename' in handler: log_dir = os.path.dirname(handler['filename']) if not os.path.exists(log_dir): os.makedirs(log_dir) try: #TODO: This requires python 2.7 logging.config.dictConfig(CFG) except AttributeError: print >> sys.stderr, '"logging.config.dictConfig" doesn\'t seem to be supported in your python' raise return logger
set up logging for a service using the py 2.7 dictConfig
def t_TITLE(self, token): ur'\#\s+<wca-title>(?P<title>.+)\n' token.value = token.lexer.lexmatch.group("title").decode("utf8") token.lexer.lineno += 1 return tokef t_TITLE(self, token): ur'\#\s+<wca-title>(?P<title>.+)\n' token.value = token.lexer.lexmatch.group("title").decode("utf8") token.lexer.lineno += 1 return token
ur'\#\s+<wca-title>(?P<title>.+)\n
def t_LABELDECL(self, token): ur'-\s<label>\s*\[(?P<label>.+?)\]\s*(?P<text>.+?)\n' label = token.lexer.lexmatch.group("label").decode("utf8") text = token.lexer.lexmatch.group("text").decode("utf8") token.value = (label, text) token.lexer.lineno += 1 return tokef t_LABELDECL(self, token): ur'-\s<label>\s*\[(?P<label>.+?)\]\s*(?P<text>.+?)\n' label = token.lexer.lexmatch.group("label").decode("utf8") text = token.lexer.lexmatch.group("text").decode("utf8") token.value = (label, text) token.lexer.lineno += 1 return token
ur'-\s<label>\s*\[(?P<label>.+?)\]\s*(?P<text>.+?)\n
def t_STATESHEADER(self, token): ur'\#\#\s+<states-list>(?P<title>[^<\n]*)\n' title = token.lexer.lexmatch.group("title").decode("utf8") token.value = title token.lexer.lineno += 1 return tokef t_STATESHEADER(self, token): ur'\#\#\s+<states-list>(?P<title>[^<\n]*)\n' title = token.lexer.lexmatch.group("title").decode("utf8") token.value = title token.lexer.lineno += 1 return token
ur'\#\#\s+<states-list>(?P<title>[^<\n]*)\n
def t_REGULATION(self, token): ur'(?P<indents>\s{4,})*-\s(?P<reg>[a-zA-Z0-9]+)\)\s*(?P<text>.+?[^ ])\n' indents = token.lexer.lexmatch.group("indents") indents = len(indents)/4 if indents else 0 reg = token.lexer.lexmatch.group("reg").decode("utf8") text = token.lexer.lexmatch.group("text").decode("utf8") token.value = (indents, reg, text) token.lexer.lineno += 1 return tokef t_REGULATION(self, token): ur'(?P<indents>\s{4,})*-\s(?P<reg>[a-zA-Z0-9]+)\)\s*(?P<text>.+?[^ ])\n' indents = token.lexer.lexmatch.group("indents") indents = len(indents)/4 if indents else 0 reg = token.lexer.lexmatch.group("reg").decode("utf8") text = token.lexer.lexmatch.group("text").decode("utf8") token.value = (indents, reg, text) token.lexer.lineno += 1 return token
ur'(?P<indents>\s{4,})*-\s(?P<reg>[a-zA-Z0-9]+)\)\s*(?P<text>.+?[^ ])\n
def t_GUIDELINE(self, token): ur'-\s(?P<reg>[a-zA-Z0-9]+[+]+)\)\s\[(?P<label>.+?)\]\s*(?P<text>.+?[^ ])\n' reg = token.lexer.lexmatch.group("reg").decode("utf8") text = token.lexer.lexmatch.group("text").decode("utf8") label = token.lexer.lexmatch.group("label").decode("utf8") token.value = (0, reg, text, label) token.lexer.lineno += 1 return tokef t_GUIDELINE(self, token): ur'-\s(?P<reg>[a-zA-Z0-9]+[+]+)\)\s\[(?P<label>.+?)\]\s*(?P<text>.+?[^ ])\n' reg = token.lexer.lexmatch.group("reg").decode("utf8") text = token.lexer.lexmatch.group("text").decode("utf8") label = token.lexer.lexmatch.group("label").decode("utf8") token.value = (0, reg, text, label) token.lexer.lineno += 1 return token
ur'-\s(?P<reg>[a-zA-Z0-9]+[+]+)\)\s\[(?P<label>.+?)\]\s*(?P<text>.+?[^ ])\n
def t_TEXT(self, token): ur'(?P<text>[^<#\n ].+?[^ ])(?=\n)' text = token.lexer.lexmatch.group("text").decode("utf8") token.value = text return tokef t_TEXT(self, token): ur'(?P<text>[^<#\n ].+?[^ ])(?=\n)' text = token.lexer.lexmatch.group("text").decode("utf8") token.value = text return token
ur'(?P<text>[^<#\n ].+?[^ ])(?=\n)
def t_PARBREAK(self, token): ur'\n{2,}' token.lexer.lineno += len(token.value) return tokef t_PARBREAK(self, token): ur'\n{2,}' token.lexer.lineno += len(token.value) return token
ur'\n{2,}
def t_trailingwhitespace(self, token): ur'.+? \n' print "Error: trailing whitespace at line %s in text '%s'" % (token.lexer.lineno + 1, token.value[:-1]) token.lexer.lexerror = True token.lexer.skip(1f t_trailingwhitespace(self, token): ur'.+? \n' print "Error: trailing whitespace at line %s in text '%s'" % (token.lexer.lineno + 1, token.value[:-1]) token.lexer.lexerror = True token.lexer.skip(1)
ur'.+? \n
def register_event(cls, event_name, event, method): log.info('@Registry.register_event `{}` with subscriber `{}`' .format(event_name, method.__name__)) if event_name not in cls._events: cls._events[event_name] = {} if event not in cls._events[event_name]: cls._events[event_name][event] = [] cls._events[event_name][event].append(method)
Register an event class on it's name with a method to process it. :param event_name: name of the event. :param event: class of the event. :param method: a method used to process this event.
def register_producer(cls, producer): log.info('@Registry.register_producer `{}`' .format(producer.__class__.__name__)) cls._producer = (cls._producer or producer)
Register a default producer for events to use. :param producer: the default producer to to dispatch events on.
def exec_before_request_actions(actions, **kwargs): groups = ("before", "before_" + flask.request.method.lower()) return execute_actions(actions, limit_groups=groups, **kwargs)
Execute actions in the "before" and "before_METHOD" groups
def exec_after_request_actions(actions, response, **kwargs): current_context["response"] = response groups = ("after_" + flask.request.method.lower(), "after") try: rv = execute_actions(actions, limit_groups=groups, **kwargs) except ReturnValueException as e: rv = e.value if rv: return rv return response
Executes actions of the "after" and "after_METHOD" groups. A "response" var will be injected in the current context.
def full_exec_request_actions(actions, func=None, render_func=None): response = None try: exec_before_request_actions(actions, catch_context_exit=False) exec_request_actions(actions, catch_context_exit=False) if func: response = func() except ContextExitException as e: response = e.result except ReturnValueException as e: response = e.value if render_func and response is None: response = render_func() return exec_after_request_actions(actions, response)
Full process to execute before, during and after actions. If func is specified, it will be called after exec_request_actions() unless a ContextExitException was raised. If render_func is specified, it will be called after exec_request_actions() only if there is no response. exec_after_request_actions() is always called.
def as_view(url=None, methods=None, view_class=ActionsView, name=None, url_rules=None, **kwargs): def decorator(f): if url is not None: f = expose(url, methods=methods)(f) clsdict = {"name": name or f.__name__, "actions": getattr(f, "actions", None), "url_rules": url_rules or getattr(f, "urls", None)} if isinstance(f, WithActionsDecorator): f = f.func clsdict['func'] = f def constructor(self, **ctorkwargs): for k, v in kwargs.items(): if k not in ctorkwargs or ctorkwargs[k] is None: ctorkwargs[k] = v view_class.__init__(self, func=f, **ctorkwargs) clsdict["__init__"] = constructor return type(f.__name__, (view_class,), clsdict) return decorator
Decorator to transform a function into a view class. Be warned that this will replace the function with the view class.
def register(self, target): for rule, options in self.url_rules: target.add_url_rule(rule, self.name, self.dispatch_request, **options)
Registers url_rules on the blueprint
def view(self, *args, **kwargs): def decorator(f): kwargs.setdefault("view_class", self.view_class) return self.add_view(as_view(*args, **kwargs)(f)) return decorator
Decorator to automatically apply as_view decorator and register it.
def add_action_view(self, name, url, actions, **kwargs): view = ActionsView(name, url=url, self_var=self, **kwargs) if isinstance(actions, dict): for group, actions in actions.iteritems(): view.actions.extend(load_actions(actions, group=group or None)) else: view.actions.extend(load_actions(actions)) self.add_view(view) return view
Creates an ActionsView instance and registers it.
def main(argv: Optional[Sequence[str]] = None) -> None: parser = ArgumentParser(description="Convert Jupyter Notebook exams to PDFs") parser.add_argument( "--exam", type=int, required=True, help="Exam number to convert", dest="exam_num", ) parser.add_argument( "--time", type=str, required=True, help="Time of exam to convert" ) parser.add_argument( "--date", type=str, required=True, help="The date the exam will take place" ) args = parser.parse_args(argv) process(args.exam_num, args.time, args.date)
Parse arguments and process the exam assignment.
def get_profile_model(): auth_profile_module = getattr(settings, 'AUTH_PROFILE_MODULE', None) profile_model = None if auth_profile_module: # get the profile model. TODO: super flacky, refactor app_label, model = auth_profile_module.split('.') profile_model = getattr(__import__("%s.models" % app_label, \ globals(), locals(), [model, ], -1), model, None) return profile_model
Returns configured user profile model or None if not found
def is_on(self): return self.status not in (CONST.STATUS_OFF, CONST.STATUS_OFFLINE, CONST.STATUS_CLOSED, CONST.STATUS_OPEN)
Get sensor state. Assume offline or open (worst case).
def extract_meta(self, text): first_line = True metadata = [] content = [] metadata_parsed = False for line in text.split('\n'): if first_line: first_line = False if line.strip() != '---': raise MetaParseException('Invalid metadata') else: continue if line.strip() == '' and not metadata_parsed: continue if line.strip() == '---' and not metadata_parsed: # reached the last line metadata_parsed = True elif not metadata_parsed: metadata.append(line) else: content.append(line) content = '\n'.join(content) try: metadata = yaml.load('\n'.join(metadata)) except: raise content = text metadata = yaml.load('') return content, metadata
Takes input as the entire file. Reads the first yaml document as metadata. and the rest of the document as text
def set_defaults(self): for key, value in self.spec.items(): setattr(self, key.upper(), value.get("default", None))
Add each model entry with it's default
def load_env(self): for key, value in self.spec.items(): if value['type'] in (dict, list): envar = (self.env_prefix + "_" + key).upper() try: envvar = env.json(envar, default=getattr(self, key.upper(), value.get('default'))) except ConfigurationError as _err: #pragma: no cover print(_err) self.log.critical(f"Error parsing json from env var. {os.environ.get(envar)}") print(envar) raise else: envvar = env((self.env_prefix + "_" + key).upper(), default=getattr(self, key.upper(), value.get('default')), cast=value['type']) setattr(self, key.upper(), envvar)
Load the model fron environment variables
def parse_args(self): parser = ArgumentParser(description='', formatter_class=RawTextHelpFormatter) parser.add_argument("--generate", action="store", dest='generate', choices=['command', 'docker-run', 'docker-compose', 'ini', 'env', 'kubernetes', 'readme', 'drone-plugin'], help="Generate a template ") parser.add_argument("--settings", action="store", dest='settings', help="Specify a settings file. (ie settings.dev)") for key, value in self.spec.items(): if value['type'] in [str, int, float]: parser.add_argument(f"--{key.lower()}", action="store", dest=key, type=value['type'], choices=value.get("choices"), help=self.help(value)) elif value['type'] == bool: parser.add_argument(f"--{key.lower()}", action="store", dest=key, type=lambda x:bool(strtobool(x)), choices=value.get("choices"), help=self.help(value)) elif value['type'] == list: parser.add_argument(f"--{key.lower()}", action="store", dest=key, nargs='+', choices=value.get("choices"), help=self.help(value)) elif value['type'] == dict: parser.add_argument(f"--{key.lower()}", action="store", dest=key, type=json.loads, choices=value.get("choices"), help=self.help(value)) args, _unknown = parser.parse_known_args() return args
Parse the cli args Returns: args (namespace): The args
def add_args(self, args): for key, value in vars(args).items(): if value is not None: setattr(self, key.upper(), value)
Add the args Args: args (namespace): The commandline args
def load_ini(self, ini_file): if ini_file and not os.path.exists(ini_file): self.log.critical(f"Settings file specified but not found. {ini_file}") sys.exit(1) if not ini_file: ini_file = f"{self.cwd}/settings.ini" if os.path.exists(ini_file): config = configparser.RawConfigParser(allow_no_value=True) config.read(ini_file) for key, value in self.spec.items(): entry = None if value['type'] == str: entry = config.get("settings", option=key.lower(), fallback=None) elif value['type'] == bool: entry = config.getboolean("settings", option=key.lower(), fallback=None) elif value['type'] == int: entry = config.getint("settings", option=key.lower(), fallback=None) elif value['type'] == float: entry = config.getfloat("settings", option=key.lower(), fallback=None) elif value['type'] in [list, dict]: entries = config.get("settings", option=key.lower(), fallback=None) if entries: try: entry = json.loads(entries) except json.decoder.JSONDecodeError as _err: #pragma: no cover self.log.critical(f"Error parsing json from ini file. {entries}") sys.exit(1) if entry is not None: setattr(self, key.upper(), entry)
Load the contents from the ini file Args: ini_file (str): The file from which the settings should be loaded
def check_required(self): die = False for key, value in self.spec.items(): if not getattr(self, key.upper()) and value['required']: print(f"{key} is a required setting. " "Set via command-line params, env or file. " "For examples, try '--generate' or '--help'.") die = True if die: sys.exit(1)
Check all required settings have been provided
def generate(self): otype = getattr(self, 'GENERATE') if otype: if otype == 'env': self.generate_env() elif otype == "command": self.generate_command() elif otype == "docker-run": self.generate_docker_run() elif otype == "docker-compose": self.generate_docker_compose() elif otype == "kubernetes": self.generate_kubernetes() elif otype == 'ini': self.generate_ini() elif otype == 'readme': self.generate_readme() elif otype == 'drone-plugin': self.generate_drone_plugin() sys.exit(0)
Generate sample settings
def generate_env(self): for key in sorted(list(self.spec.keys())): if self.spec[key]['type'] in (dict, list): value = f"\'{json.dumps(self.spec[key].get('example', ''))}\'" else: value = f"{self.spec[key].get('example', '')}" print(f"export {self.env_prefix}_{key.upper()}={value}")
Generate sample environment variables
def generate_command(self): example = [] example.append(f"{sys.argv[0]}") for key in sorted(list(self.spec.keys())): if self.spec[key]['type'] == list: value = " ".join(self.spec[key].get('example', '')) elif self.spec[key]['type'] == dict: value = f"\'{json.dumps(self.spec[key].get('example', ''))}\'" else: value = self.spec[key].get('example', '') string = f" --{key.lower()} {value}" example.append(string) print(" \\\n".join(example))
Generate a sample command
def generate_docker_run(self): example = [] example.append("docker run -it") for key in sorted(list(self.spec.keys())): if self.spec[key]['type'] in (dict, list): value = f"\'{json.dumps(self.spec[key].get('example', ''))}\'" else: value = f"{self.spec[key].get('example', '')}" string = f" -e {self.env_prefix}_{key.upper()}={value}" example.append(string) example.append(" <container-name>") print(" \\\n".join(example))
Generate a sample docker run
def generate_docker_compose(self): example = {} example['app'] = {} example['app']['environment'] = [] for key in sorted(list(self.spec.keys())): if self.spec[key]['type'] in (dict, list): value = f"\'{json.dumps(self.spec[key].get('example', ''))}\'" else: value = f"{self.spec[key].get('example', '')}" example['app']['environment'].append(f"{self.env_prefix}_{key.upper()}={value}") print(yaml.dump(example, default_flow_style=False))
Generate a sample docker compose
def generate_ini(self): example = [] example.append("[settings]") for key in sorted(list(self.spec.keys())): if self.spec[key]['type'] in [list, dict]: value = json.dumps(self.spec[key].get('example', '')) else: value = self.spec[key].get('example', '') string = f"{key.lower()}={value}" example.append(string) print("\n".join(example))
Generate a sample ini
def generate_kubernetes(self): example = {} example['spec'] = {} example['spec']['containers'] = [] example['spec']['containers'].append({"name": '', "image": '', "env": []}) for key, value in self.spec.items(): if value['type'] in (dict, list): kvalue = f"\'{json.dumps(value.get('example', ''))}\'" else: kvalue = f"{value.get('example', '')}" entry = {"name": f"{self.env_prefix}_{key.upper()}", "value": kvalue} example['spec']['containers'][0]['env'].append(entry) print(yaml.dump(example, default_flow_style=False))
Generate a sample kubernetes
def generate_drone_plugin(self): example = {} example['pipeline'] = {} example['pipeline']['appname'] = {} example['pipeline']['appname']['image'] = "" example['pipeline']['appname']['secrets'] = "" for key, value in self.spec.items(): if value['type'] in (dict, list): kvalue = f"\'{json.dumps(value.get('example', ''))}\'" else: kvalue = f"{value.get('example', '')}" example['pipeline']['appname'][key.lower()] = kvalue print(yaml.dump(example, default_flow_style=False))
Generate a sample drone plugin configuration
def generate_readme(self): print("## Examples of settings runtime params") print("### Command-line parameters") print("```") self.generate_command() print("```") print("### Environment variables") print("```") self.generate_env() print("```") print("### ini file") print("```") self.generate_ini() print("```") print("### docker run") print("```") self.generate_docker_run() print("```") print("### docker compose") print("```") self.generate_docker_compose() print("```") print("### kubernetes") print("```") self.generate_kubernetes() print("```") print("### drone plugin") print("```") self.generate_drone_plugin() print("```")
Generate a readme with all the generators
def file_exists(self, subdir, prefix, suffix): real_path = os.path.join(self.STATIC_DIR, self.DIR, subdir, prefix + suffix) return os.path.exists(real_path)
Returns true if the resource file exists, else False. Positional arguments: subdir -- sub directory name under the resource's main directory (e.g. css or js, or an empty string if the resource's directory structure is flat). prefix -- file name without the file extension. suffix -- file extension (if self.minify = True, includes .min before the extension).
def add_css(self, subdir, file_name_prefix): suffix_maxify = '.css' suffix_minify = '.min.css' if self.minify and self.file_exists(subdir, file_name_prefix, suffix_minify): self.resources_css.append(posixpath.join(self.DIR, subdir, file_name_prefix + suffix_minify)) elif self.file_exists(subdir, file_name_prefix, suffix_maxify): self.resources_css.append(posixpath.join(self.DIR, subdir, file_name_prefix + suffix_maxify)) else: file_path = os.path.join(self.STATIC_DIR, self.DIR, subdir, file_name_prefix + suffix_maxify) raise IOError('Resource file not found: {0}'.format(file_path))
Add a css file for this resource. If self.minify is True, checks if the .min.css file exists. If not, falls back to non-minified file. If that file also doesn't exist, IOError is raised. Positional arguments: subdir -- sub directory name under the resource's main directory (e.g. css or js, or an empty string). file_name_prefix -- file name without the file extension.
def read_dataframe_from_xls(desired_type: Type[T], file_path: str, encoding: str, logger: Logger, **kwargs) -> pd.DataFrame: return pd.read_excel(file_path, **kwargs)
We register this method rather than the other because pandas guesses the encoding by itself. Also, it is easier to put a breakpoint and debug by trying various options to find the good one (in streaming mode you just have one try and then the stream is consumed) :param desired_type: :param file_path: :param encoding: :param logger: :param kwargs: :return:
def read_df_or_series_from_csv(desired_type: Type[pd.DataFrame], file_path: str, encoding: str, logger: Logger, **kwargs) -> pd.DataFrame: if desired_type is pd.Series: # as recommended in http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.from_csv.html # and from http://stackoverflow.com/questions/15760856/how-to-read-a-pandas-series-from-a-csv-file # TODO there should be a way to decide between row-oriented (squeeze=True) and col-oriented (index_col=0) # note : squeeze=true only works for row-oriented, so we dont use it. We rather expect that a row-oriented # dataframe would be convertible to a series using the df to series converter below if 'index_col' not in kwargs.keys(): one_col_df = pd.read_csv(file_path, encoding=encoding, index_col=0, **kwargs) else: one_col_df = pd.read_csv(file_path, encoding=encoding, **kwargs) if one_col_df.shape[1] == 1: return one_col_df[one_col_df.columns[0]] else: raise Exception('Cannot build a series from this csv: it has more than two columns (one index + one value).' ' Probably the parsing chain $read_df_or_series_from_csv => single_row_or_col_df_to_series$' 'will work, though.') else: return pd.read_csv(file_path, encoding=encoding, **kwargs)
Helper method to read a dataframe from a csv file. By default this is well suited for a dataframe with headers in the first row, for example a parameter dataframe. :param desired_type: :param file_path: :param encoding: :param logger: :param kwargs: :return:
def get_default_pandas_parsers() -> List[AnyParser]: return [SingleFileParserFunction(parser_function=read_dataframe_from_xls, streaming_mode=False, supported_exts={'.xls', '.xlsx', '.xlsm'}, supported_types={pd.DataFrame}, option_hints=pandas_parsers_option_hints_xls), SingleFileParserFunction(parser_function=read_df_or_series_from_csv, streaming_mode=False, supported_exts={'.csv', '.txt'}, supported_types={pd.DataFrame, pd.Series}, option_hints=pandas_parsers_option_hints_csv), ]
Utility method to return the default parsers able to parse a dictionary from a file. :return:
def dict_to_df(desired_type: Type[T], dict_obj: Dict, logger: Logger, orient: str = None, **kwargs) -> pd.DataFrame: if len(dict_obj) > 0: first_val = dict_obj[next(iter(dict_obj))] if isinstance(first_val, dict) or isinstance(first_val, list): # --'full' table # default is index orientation orient = orient or 'index' # if orient is 'columns': # return pd.DataFrame(dict_obj) # else: return pd.DataFrame.from_dict(dict_obj, orient=orient) else: # --scalar > single-row or single-col # default is columns orientation orient = orient or 'columns' if orient is 'columns': return pd.DataFrame(dict_obj, index=[0]) else: res = pd.DataFrame.from_dict(dict_obj, orient=orient) res.index.name = 'key' return res.rename(columns={0: 'value'}) else: # for empty dictionaries, orientation does not matter # but maybe we should still create a column 'value' in this empty dataframe ? return pd.DataFrame.from_dict(dict_obj)
Helper method to convert a dictionary into a dataframe. It supports both simple key-value dicts as well as true table dicts. For this it uses pd.DataFrame constructor or pd.DataFrame.from_dict intelligently depending on the case. The orientation of the resulting dataframe can be configured, or left to default behaviour. Default orientation is different depending on the contents: * 'index' for 2-level dictionaries, in order to align as much as possible with the natural way to express rows in JSON * 'columns' for 1-level (simple key-value) dictionaries, so as to preserve the data types of the scalar values in the resulting dataframe columns if they are different :param desired_type: :param dict_obj: :param logger: :param orient: the orientation of the resulting dataframe. :param kwargs: :return:
def single_row_or_col_df_to_series(desired_type: Type[T], single_rowcol_df: pd.DataFrame, logger: Logger, **kwargs)\ -> pd.Series: if single_rowcol_df.shape[0] == 1: # one row return single_rowcol_df.transpose()[0] elif single_rowcol_df.shape[1] == 2 and isinstance(single_rowcol_df.index, pd.RangeIndex): # two columns but the index contains nothing but the row number : we can use the first column d = single_rowcol_df.set_index(single_rowcol_df.columns[0]) return d[d.columns[0]] elif single_rowcol_df.shape[1] == 1: # one column and one index d = single_rowcol_df return d[d.columns[0]] else: raise ValueError('Unable to convert provided dataframe to a series : ' 'expected exactly 1 row or 1 column, found : ' + str(single_rowcol_df.shape) + '')
Helper method to convert a dataframe with one row or one or two columns into a Series :param desired_type: :param single_col_df: :param logger: :param kwargs: :return:
def single_row_or_col_df_to_dict(desired_type: Type[T], single_rowcol_df: pd.DataFrame, logger: Logger, **kwargs)\ -> Dict[str, str]: if single_rowcol_df.shape[0] == 1: return single_rowcol_df.transpose()[0].to_dict() # return {col_name: single_rowcol_df[col_name][single_rowcol_df.index.values[0]] for col_name in single_rowcol_df.columns} elif single_rowcol_df.shape[1] == 2 and isinstance(single_rowcol_df.index, pd.RangeIndex): # two columns but the index contains nothing but the row number : we can use the first column d = single_rowcol_df.set_index(single_rowcol_df.columns[0]) return d[d.columns[0]].to_dict() elif single_rowcol_df.shape[1] == 1: # one column and one index d = single_rowcol_df return d[d.columns[0]].to_dict() else: raise ValueError('Unable to convert provided dataframe to a parameters dictionary : ' 'expected exactly 1 row or 1 column, found : ' + str(single_rowcol_df.shape) + '')
Helper method to convert a dataframe with one row or one or two columns into a dictionary :param desired_type: :param single_rowcol_df: :param logger: :param kwargs: :return:
def get_default_pandas_converters() -> List[Union[Converter[Any, pd.DataFrame], Converter[pd.DataFrame, Any]]]: return [ConverterFunction(from_type=pd.DataFrame, to_type=dict, conversion_method=single_row_or_col_df_to_dict), ConverterFunction(from_type=dict, to_type=pd.DataFrame, conversion_method=dict_to_df, option_hints=dict_to_single_row_or_col_df_opts), ConverterFunction(from_type=pd.DataFrame, to_type=pd.Series, conversion_method=single_row_or_col_df_to_series)]
Utility method to return the default converters associated to dataframes (from dataframe to other type, and from other type to dataframe) :return:
def full_subgraph(self, vertices): subgraph_vertices = {v for v in vertices} subgraph_edges = {edge for v in subgraph_vertices for edge in self._out_edges[v] if self._heads[edge] in subgraph_vertices} subgraph_heads = {edge: self._heads[edge] for edge in subgraph_edges} subgraph_tails = {edge: self._tails[edge] for edge in subgraph_edges} return DirectedGraph._raw( vertices=subgraph_vertices, edges=subgraph_edges, heads=subgraph_heads, tails=subgraph_tails, )
Return the subgraph of this graph whose vertices are the given ones and whose edges are all the edges of the original graph between those vertices.
def _raw(cls, vertices, edges, heads, tails): self = object.__new__(cls) self._vertices = vertices self._edges = edges self._heads = heads self._tails = tails # For future use, map each vertex to its outward and inward edges. # These could be computed on demand instead of precomputed. self._out_edges = collections.defaultdict(set) self._in_edges = collections.defaultdict(set) for edge in self._edges: self._out_edges[self._tails[edge]].add(edge) self._in_edges[self._heads[edge]].add(edge) return self
Private constructor for direct construction of a DirectedGraph from its consituents.
def from_out_edges(cls, vertices, edge_mapper): vertices = set(vertices) edges = set() heads = {} tails = {} # Number the edges arbitrarily. edge_identifier = itertools.count() for tail in vertices: for head in edge_mapper[tail]: edge = next(edge_identifier) edges.add(edge) heads[edge] = head tails[edge] = tail return cls._raw( vertices=vertices, edges=edges, heads=heads, tails=tails, )
Create a DirectedGraph from a collection of vertices and a mapping giving the vertices that each vertex is connected to.
def from_edge_pairs(cls, vertices, edge_pairs): vertices = set(vertices) edges = set() heads = {} tails = {} # Number the edges arbitrarily. edge_identifier = itertools.count() for tail, head in edge_pairs: edge = next(edge_identifier) edges.add(edge) heads[edge] = head tails[edge] = tail return cls._raw( vertices=vertices, edges=edges, heads=heads, tails=tails, )
Create a DirectedGraph from a collection of vertices and a collection of pairs giving links between the vertices.
def annotated(self): annotated_vertices = { vertex: AnnotatedVertex( id=vertex_id, annotation=six.text_type(vertex), ) for vertex_id, vertex in zip(itertools.count(), self.vertices) } annotated_edges = [ AnnotatedEdge( id=edge_id, annotation=six.text_type(edge), head=annotated_vertices[self.head(edge)].id, tail=annotated_vertices[self.tail(edge)].id, ) for edge_id, edge in zip(itertools.count(), self.edges) ] return AnnotatedGraph( vertices=annotated_vertices.values(), edges=annotated_edges, )
Return an AnnotatedGraph with the same structure as this graph.
def execute_command(working_dir, cmd, env_dict): proc_env = os.environ.copy() proc_env["PATH"] = "{}:{}:.".format(proc_env["PATH"], working_dir) proc_env.update(env_dict) proc = subprocess.Popen( cmd, cwd=working_dir, env=proc_env, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) status = proc.wait() stdout, stderr = proc.communicate() if status: msg = ( "Non zero {} exit from command {}\n" "Stdout: {}\n" "Stderr: {}\n" ).format(status, cmd, stdout, stderr) LOGGER.error(msg) raise RuntimeError(msg) LOGGER.info(stdout)
execute_command: run the command provided in the working dir specified adding the env_dict settings to the execution environment :param working_dir: path to directory to execute command also gets added to the PATH :param cmd: Shell command to execute :param env_dict: dictionary of additional env vars to be passed to the subprocess environment
def load(self): if self.exists(): with open(self.dot_file, 'r') as handle: self.update(json.load(handle)) if self.options['context'] is not None: self['context'] = self.options['context'] else: self.options['context'] = self['context'] if self.options['defaults'] is not None: self['defaults'] = self.options['defaults'] else: self.options['defaults'] = self['defaults'] if self.options['output'] is not None: self['output'] = self.options['output'] if self.options.get('inclusive', False): self['inclusive'] = True if self.options.get('exclude', []): self['exclude'].extend(self.options['exclude']) if self['output'] is None: self['output'] = os.path.join(os.getcwd(), 'dockerstache-output') self['output_path'] = self.abs_output_dir() self['input_path'] = self.abs_input_dir() if self['context'] is not None: self['context_path'] = absolute_path(self['context']) if self['defaults'] is not None: self['defaults_path'] = absolute_path(self['defaults'])
read dotfile and populate self opts will override the dotfile settings, make sure everything is synced in both opts and this object