code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def _check_signatures(lines, **kwargs): trusted = kwargs.get("trusted", ()) signatures = tuple(kwargs.get("signatures", ())) alt_signatures = tuple(kwargs.get("alt_signatures", ())) min_reviewers = kwargs.get("min_reviewers", 3) matching = [] errors = [] signatures += alt_signatures test_signatures = re.compile("^({0})".format("|".join(signatures))) test_alt_signatures = re.compile("^({0})".format("|".join(alt_signatures))) for i, line in lines: if signatures and test_signatures.search(line): if line.endswith("."): errors.append(("M191", i)) if not alt_signatures or not test_alt_signatures.search(line): matching.append(line) else: errors.append(("M102", i)) if not matching: errors.append(("M101", 1)) errors.append(("M100", 1)) elif len(matching) < min_reviewers: pattern = re.compile('|'.join(map(lambda x: '<' + re.escape(x) + '>', trusted))) trusted_matching = list(filter(None, map(pattern.search, matching))) if len(trusted_matching) == 0: errors.append(("M100", 1)) return errors
Check that the signatures are valid. There should be at least three signatures. If not, one of them should be a trusted developer/reviewer. Formatting supported being: [signature] full name <email@address> :param lines: lines (lineno, content) to verify. :type lines: list :param signatures: list of supported signature :type signatures: list :param alt_signatures: list of alternative signatures, not counted :type alt_signatures: list :param trusted: list of trusted reviewers, the e-mail address. :type trusted: list :param min_reviewers: minimal number of reviewers needed. (Default 3) :type min_reviewers: int :return: errors as in (code, line number, *args) :rtype: list
def check_message(message, **kwargs): if kwargs.pop("allow_empty", False): if not message or message.isspace(): return [] lines = re.split(r"\r\n|\r|\n", message) errors = _check_1st_line(lines[0], **kwargs) err, signature_lines = _check_bullets(lines, **kwargs) errors += err errors += _check_signatures(signature_lines, **kwargs) def _format(code, lineno, args): return "{0}: {1} {2}".format(lineno, code, _messages_codes[code].format(*args)) return list(map(lambda x: _format(x[0], x[1], x[2:]), sorted(errors, key=lambda x: x[0])))
Check the message format. Rules: - the first line must start by a component name - and a short description (52 chars), - then bullet points are expected - and finally signatures. :param components: compontents, e.g. ``('auth', 'utils', 'misc')`` :type components: `list` :param signatures: signatures, e.g. ``('Signed-off-by', 'Reviewed-by')`` :type signatures: `list` :param alt_signatures: alternative signatures, e.g. ``('Tested-by',)`` :type alt_signatures: `list` :param trusted: optional list of reviewers, e.g. ``('[email protected]',)`` :type trusted: `list` :param max_length: optional maximum line length (by default: 72) :type max_length: int :param max_first_line: optional maximum first line length (by default: 50) :type max_first_line: int :param allow_empty: optional way to allow empty message (by default: False) :type allow_empty: bool :return: errors sorted by line number :rtype: `list`
def _register_pyflakes_check(): from flake8_isort import Flake8Isort from flake8_blind_except import check_blind_except # Resolving conflicts between pep8 and pyflakes. codes = { "UnusedImport": "F401", "ImportShadowedByLoopVar": "F402", "ImportStarUsed": "F403", "LateFutureImport": "F404", "Redefined": "F801", "RedefinedInListComp": "F812", "UndefinedName": "F821", "UndefinedExport": "F822", "UndefinedLocal": "F823", "DuplicateArgument": "F831", "UnusedVariable": "F841", } for name, obj in vars(pyflakes.messages).items(): if name[0].isupper() and obj.message: obj.tpl = "{0} {1}".format(codes.get(name, "F999"), obj.message) pep8.register_check(_PyFlakesChecker, codes=['F']) # FIXME parser hack parser = pep8.get_parser('', '') Flake8Isort.add_options(parser) options, args = parser.parse_args([]) # end of hack pep8.register_check(Flake8Isort, codes=['I']) pep8.register_check(check_blind_except, codes=['B90'])
Register the pyFlakes checker into PEP8 set of checks.
def is_file_excluded(filename, excludes): # check if you need to exclude this file return any([exclude and re.match(exclude, filename) is not None for exclude in excludes])
Check if the file should be excluded. :param filename: file name :param excludes: list of regex to match :return: True if the file should be excluded
def check_pep8(filename, **kwargs): options = { "ignore": kwargs.get("ignore"), "select": kwargs.get("select"), } if not _registered_pyflakes_check and kwargs.get("pyflakes", True): _register_pyflakes_check() checker = pep8.Checker(filename, reporter=_Report, **options) checker.check_all() errors = [] for error in sorted(checker.report.errors, key=lambda x: x[0]): errors.append("{0}:{1}: {3}".format(*error)) return errors
Perform static analysis on the given file. :param filename: path of file to check. :type filename: str :param ignore: codes to ignore, e.g. ``('E111', 'E123')`` :type ignore: `list` :param select: codes to explicitly select. :type select: `list` :param pyflakes: run the pyflakes checks too (default ``True``) :type pyflakes: bool :return: errors :rtype: `list` .. seealso:: :py:class:`pycodestyle.Checker`
def check_pydocstyle(filename, **kwargs): ignore = kwargs.get("ignore") match = kwargs.get("match", None) match_dir = kwargs.get("match_dir", None) errors = [] if match and not re.match(match, os.path.basename(filename)): return errors if match_dir: # FIXME here the full path is checked, be sure, if match_dir doesn't # match the path (usually temporary) before the actual application path # it may not run the checks when it should have. path = os.path.split(os.path.abspath(filename))[0] while path != "/": path, dirname = os.path.split(path) if not re.match(match_dir, dirname): return errors checker = pydocstyle.PEP257Checker() with open(filename) as fp: try: for error in checker.check_source(fp.read(), filename): if ignore is None or error.code not in ignore: # Removing the colon ':' after the error code message = re.sub("(D[0-9]{3}): ?(.*)", r"\1 \2", error.message) errors.append("{0}: {1}".format(error.line, message)) except tokenize.TokenError as e: errors.append("{1}:{2} {0}".format(e.args[0], *e.args[1])) except pydocstyle.AllError as e: errors.append(str(e)) return errors
Perform static analysis on the given file docstrings. :param filename: path of file to check. :type filename: str :param ignore: codes to ignore, e.g. ('D400',) :type ignore: `list` :param match: regex the filename has to match to be checked :type match: str :param match_dir: regex everydir in path should match to be checked :type match_dir: str :return: errors :rtype: `list` .. seealso:: `PyCQA/pydocstyle <https://github.com/GreenSteam/pydocstyle/>`_
def check_file(filename, **kwargs): excludes = kwargs.get("excludes", []) errors = [] if is_file_excluded(filename, excludes): return None if filename.endswith(".py"): if kwargs.get("pep8", True): errors += check_pep8(filename, **kwargs) if kwargs.get("pydocstyle", True): errors += check_pydocstyle(filename, **kwargs) if kwargs.get("license", True): errors += check_license(filename, **kwargs) elif re.search("\.(tpl|html)$", filename): errors += check_license(filename, **kwargs) elif re.search("\.(js|jsx|css|less)$", filename): errors += check_license(filename, python_style=False, **kwargs) def try_to_int(value): try: return int(value.split(':', 1)[0]) except ValueError: return 0 return sorted(errors, key=try_to_int)
Perform static analysis on the given file. .. seealso:: - :data:`.SUPPORTED_FILES` - :func:`.check_pep8` - :func:`.check_pydocstyle` - and :func:`.check_license` :param filename: path of file to check. :type filename: str :return: errors sorted by line number or None if file is excluded :rtype: `list`
def check_author(author, **kwargs): errors = [] authors = kwargs.get("authors") if not authors: errors.append('1:A100: ' + _author_codes['A100']) return errors exclude_author_names = kwargs.get("exclude_author_names") if exclude_author_names and author in exclude_author_names: return [] path = kwargs.get("path") if not path: path = os.getcwd() for afile in authors: if not os.path.exists(path + os.sep + afile): errors.append('1:A101: ' + _author_codes['A101'].format(afile)) if errors: return errors status = subprocess.Popen(['grep', '-q', author] + [path + os.sep + afile for afile in authors], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=path).wait() if status: errors.append('1:A102: ' + _author_codes['A102'].format(author)) return errors
Check the presence of the author in the AUTHORS/THANKS files. Rules: - the author full name and email must appear in AUTHORS file :param authors: name of AUTHORS files :type authors: `list` :param path: path to the repository home :type path: str :return: errors :rtype: `list`
def get_options(config=None): if config is None: from . import config config.get = lambda key, default=None: getattr(config, key, default) base = { "components": config.get("COMPONENTS"), "signatures": config.get("SIGNATURES"), "commit_msg_template": config.get("COMMIT_MSG_TEMPLATE"), "commit_msg_labels": config.get("COMMIT_MSG_LABELS"), "alt_signatures": config.get("ALT_SIGNATURES"), "trusted": config.get("TRUSTED_DEVELOPERS"), "pep8": config.get("CHECK_PEP8", True), "pydocstyle": config.get("CHECK_PYDOCSTYLE", True), "license": config.get("CHECK_LICENSE", True), "pyflakes": config.get("CHECK_PYFLAKES", True), "ignore": config.get("IGNORE"), "select": config.get("SELECT"), "match": config.get("PYDOCSTYLE_MATCH"), "match_dir": config.get("PYDOCSTYLE_MATCH_DIR"), "min_reviewers": config.get("MIN_REVIEWERS"), "colors": config.get("COLORS", True), "excludes": config.get("EXCLUDES", []), "authors": config.get("AUTHORS"), "exclude_author_names": config.get("EXCLUDE_AUTHOR_NAMES"), } options = {} for k, v in base.items(): if v is not None: options[k] = v return options
Build the options from the config object.
def run(self): for msg in self.messages: col = getattr(msg, 'col', 0) yield msg.lineno, col, (msg.tpl % msg.message_args), msg.__class__
Yield the error messages.
def error(self, line_number, offset, text, check): code = super(_Report, self).error(line_number, offset, text, check) if code: self.errors.append((line_number, offset + 1, code, text, check))
Run the checks and collect the errors.
def prompt(prompt_string, default=None, secret=False, boolean=False, bool_type=None): if boolean or bool_type in BOOLEAN_DEFAULTS: if bool_type is None: bool_type = 'y_n' default_msg = BOOLEAN_DEFAULTS[bool_type][is_affirmative(default)] else: default_msg = " (default {val}): " prompt_string += (default_msg.format(val=default) if default else ": ") if secret: val = getpass(prompt_string) else: val = input(prompt_string) val = (val if val else default) if boolean: val = val.lower().startswith('y') return val
Prompt user for a string, with a default value * secret converts to password prompt * boolean converts return value to boolean, checking for starting with a Y
def prop_unc(jc): j, c = jc return np.dot(np.dot(j, c), j.T)
Propagate uncertainty. :param jc: the Jacobian and covariance matrix :type jc: sequence This method is mainly designed to be used as the target for a multiprocessing pool.
def partial_derivative(f, x, n, nargs, delta=DELTA): dx = np.zeros((nargs, len(x[n]))) # scale delta by (|x| + 1.0) to avoid noise from machine precision dx[n] += np.where(x[n], x[n] * delta, delta) # apply central difference approximation x_dx = zip(*[xi + (dxi, -dxi) for xi, dxi in zip(x, dx)]) return (f(x_dx[0]) - f(x_dx[1])) / dx[n] / 2.0
Calculate partial derivative using central finite difference approximation. :param f: function :param x: sequence of arguments :param n: index of argument derivateve is with respect to :param nargs: number of arguments :param delta: optional step size, default is :math:`\\epsilon^{1/3}` where :math:`\\epsilon` is machine precision
def jacobian(func, x, nf, nobs, *args, **kwargs): nargs = len(x) # degrees of freedom f = lambda x_: func(x_, *args, **kwargs) j = np.zeros((nargs, nf, nobs)) # matrix of zeros for n in xrange(nargs): j[n] = partial_derivative(f, x, n, nargs) # better to transpose J once than transpose partial derivative each time # j[:,:,n] = df.T return j.T
Estimate Jacobian matrices :math:`\\frac{\\partial f_i}{\\partial x_{j,k}}` where :math:`k` are independent observations of :math:`x`. The independent variable, :math:`x`, must be a numpy array with exactly 2 dimensions. The first dimension is the number of independent arguments, and the second dimensions is the number of observations. The function must return a Numpy array with exactly 2 dimensions. The first is the number of returns and the second dimension corresponds to the number of observations. If the input argument is 2-D then the output should also be 2-D Constant arguments can be passed as additional positional arguments or keyword arguments. If any constant argument increases the number of observations of the return value, tile the input arguments to match. Use :func:`numpy.atleast_2d` or :func:`numpy.reshape` to get the correct dimensions for scalars. :param func: function :param x: independent variables grouped by observation :param nf: number of return in output (1st dimension) :param nobs: number of observations in output (2nd dimension) :return: Jacobian matrices for each observation
def jflatten(j): nobs, nf, nargs = j.shape nrows, ncols = nf * nobs, nargs * nobs jflat = np.zeros((nrows, ncols)) for n in xrange(nobs): r, c = n * nf, n * nargs jflat[r:(r + nf), c:(c + nargs)] = j[n] return jflat
Flatten 3_D Jacobian into 2-D.
def jtosparse(j): data = j.flatten().tolist() nobs, nf, nargs = j.shape indices = zip(*[(r, c) for n in xrange(nobs) for r in xrange(n * nf, (n + 1) * nf) for c in xrange(n * nargs, (n + 1) * nargs)]) return csr_matrix((data, indices), shape=(nobs * nf, nobs * nargs))
Generate sparse matrix coordinates from 3-D Jacobian.
def assign_handler(query, category): if(category == 'count lines'): handler.lines(query) elif(category == 'count words'): handler.words(query) elif(category == 'weather'): web.weather(query) elif(category == 'no match'): web.generic(query) elif(category == 'file info'): handler.file_info(query) elif(category == 'executable'): handler.make_executable(query) elif(category == 'search'): handler.search(query) elif(category == 'path'): handler.add_to_path(query) elif(category == 'uname'): handler.system_info(query) else: print 'I\'m not able to understand your query'
assign_handler(query, category) -- assign the user's query to a particular category, and call the appropriate handler.
def get_file_name(query): match = re.search(r'\S*\.[\d\w]{1,4}', query) if(match): filename = match.group() return filename else: start = match.start() end = match.end() spaces = re.finditer(r' ', query) space_index = [] for space in spaces: space_index.append(space.start()) space_index.pop() for i in space_index: filename = query[i+1:end] if(os.path.isfile(filename)): return filename return None
get_file_name(query) -> filename -- return the filename found in a given, found by matching a regular expression.
def get_path(query): match = re.search(r'/(.*/)+(\S*(\.[\d\w]{1,4})?)', query) if(os.path.isfile(match.group()) or os.path.isdir(match.group())): return match.group() else: return None
get_path(query) -> pathname -- return the path found in a given, found by matching a regular expression.
def get_readable_filesize(size): if(size < 1024): return str(size)+' bytes' temp = size/1024.0 level = 1 while(temp >= 1024 and level< 3): temp = temp/1024 level += 1 if(level == 1): return str(round(temp,2))+' KB' elif(level == 2): return str(round(temp,2))+' MB' else: return str(round(temp,2))+' GB'
get_readable_filesize(size) -> filesize -- return human readable filesize from given size in bytes.
def list(self, svc_rec=None, hostfilter=None, compromised=False): return self.send.accounts_list(svc_rec, hostfilter, compromised)
List user accounts :param svc_rec: db.t_services.id :param hostfilter: :param compromised: Show only compromised accounts :return: [acct.t_accounts.f_services_id, acct.t_hosts.f_ipaddr, acct.t_hosts.f_hostname, acct.t_accounts.id, acct.t_accounts.f_username, acct.t_accounts.f_fullname, acct.t_accounts.f_password, acct.t_accounts.f_compromised, acct.t_accounts.f_hash1, acct.t_accounts.f_hash1_type, acct.t_accounts.f_hash2, acct.t_accounts.f_hash2_type, acct.t_accounts.f_source, acct.t_accounts.f_uid, acct.t_accounts.f_gid, acct.t_accounts.f_level, acct.t_accounts.f_domain, acct.t_accounts.f_message, acct.t_accounts.f_lockout, acct.t_accounts.f_duration, acct.t_accounts.f_active, acct.t_accounts.f_description, acct.t_services.f_proto, acct.t_services.f_number, ]
def upload_file(self, service_rec=None, host_service=None, filename=None, pw_data=None, f_type=None, add_to_evidence=True): return self.send.accounts_upload_file(service_rec, host_service, filename, pw_data, f_type, add_to_evidence)
Upload a password file :param service_rec: db.t_services.id :param host_service: db.t_hosts.id :param filename: Filename :param pw_data: Content of file :param f_type: Type of file :param add_to_evidence: True/False to add to t_evidence :return: (True/False, Response Message)
def _columns_to_kwargs(conversion_table, columns, row): kwdict = {} counter = 0 for column in columns: # Map the column name to the correct MarketHistoryEntry kwarg. kwarg_name = conversion_table[column] # Set the kwarg to the correct value from the row. kwdict[kwarg_name] = row[counter] counter += 1 return kwdict
Given a list of column names, and a list of values (a row), return a dict of kwargs that may be used to instantiate a MarketHistoryEntry or MarketOrder object. :param dict conversion_table: The conversion table to use for mapping spec names to kwargs. :param list columns: A list of column names. :param list row: A list of values.
def parse_datetime(time_str): try: return dateutil.parser.parse( time_str ).replace(microsecond=0).astimezone(UTC_TZINFO) except ValueError: # This was some kind of unrecognizable time string. raise ParseError("Invalid time string: %s" % time_str)
Wraps dateutil's parser function to set an explicit UTC timezone, and to make sure microseconds are 0. Unified Uploader format and EMK format bother don't use microseconds at all. :param str time_str: The date/time str to parse. :rtype: datetime.datetime :returns: A parsed, UTC datetime.
def merge(self, po_file, source_files): # Create a temporary file to write pot file pot_file = tempfile.NamedTemporaryFile(mode='wb', prefix='rookout_', delete=False) pot_filename = pot_file.name slog.info('Create POT file [%s].', pot_filename) xargs = [self._xgettext, "--package-name=main", "--package-version=0.1", "--default-domain=main", "--from-code=UTF-8", "-C", "-k_", "--output", pot_filename] txt = subprocess.check_output(xargs+source_files, stderr=subprocess.STDOUT, universal_newlines=True) if len(txt) > 0: raise(ChildProcessError(txt)) slog.info('Start merge [%s] to [%s].', pot_filename, po_file) xargs = [self._msgmerge, "-U", po_file, pot_filename] txt = subprocess.check_output(xargs, universal_newlines=True) slog.info(txt) pot_file.close() os.remove(pot_filename)
从源码中获取所有条目,合并到 po_file 中。 :param string po_file: 待写入的 po 文件路径。 :param list source_files : 所有待处理的原文件路径 list。
def fmt(self, po_file, mo_file): if not os.path.exists(po_file): slog.error('The PO file [%s] is non-existen!'%po_file) return txt = subprocess.check_output([self._msgfmt, '--check', "--strict", '--verbose', "--output-file", mo_file, po_file], stderr=subprocess.STDOUT, universal_newlines=True) slog.info(txt)
将 po 文件转换成 mo 文件。 :param string po_file: 待转换的 po 文件路径。 :param string mo_file: 目标 mo 文件的路径。
def add_suffix(filename, suffix): path = filename.split("/") parts = path[-1].split(".") i = max(len(parts) - 2, 0) parts[i] = parts[i] + suffix path[-1] = ".".join(parts) return "/".join(path)
ADD suffix TO THE filename (NOT INCLUDING THE FILE EXTENSION)
def find(self, pattern): output = [] def _find(dir): if re.match(pattern, dir._filename.split("/")[-1]): output.append(dir) if dir.is_directory(): for c in dir.children: _find(c) _find(self) return output
:param pattern: REGULAR EXPRESSION TO MATCH NAME (NOT INCLUDING PATH) :return: LIST OF File OBJECTS THAT HAVE MATCHING NAME
def set_extension(self, ext): path = self._filename.split("/") parts = path[-1].split(".") if len(parts) == 1: parts.append(ext) else: parts[-1] = ext path[-1] = ".".join(parts) return File("/".join(path))
RETURN NEW FILE WITH GIVEN EXTENSION
def set_name(self, name): path = self._filename.split("/") parts = path[-1].split(".") if len(parts) == 1: path[-1] = name else: path[-1] = name + "." + parts[-1] return File("/".join(path))
RETURN NEW FILE WITH GIVEN EXTENSION
def backup_name(self, timestamp=None): suffix = datetime2string(coalesce(timestamp, datetime.now()), "%Y%m%d_%H%M%S") return File.add_suffix(self._filename, suffix)
RETURN A FILENAME THAT CAN SERVE AS A BACKUP FOR THIS FILE
def read(self, encoding="utf8"): with open(self._filename, "rb") as f: if self.key: return get_module("mo_math.crypto").decrypt(f.read(), self.key) else: content = f.read().decode(encoding) return content
:param encoding: :return:
def read_zipfile(self, encoding='utf8'): from zipfile import ZipFile with ZipFile(self.abspath) as zipped: for num, zip_name in enumerate(zipped.namelist()): return zipped.open(zip_name).read().decode(encoding)
READ FIRST FILE IN ZIP FILE :param encoding: :return: STRING
def append(self, content, encoding='utf8'): if not self.parent.exists: self.parent.create() with open(self._filename, "ab") as output_file: if not is_text(content): Log.error(u"expecting to write unicode only") output_file.write(content.encode(encoding)) output_file.write(b"\n")
add a line to file
def url_param2value(param): if param == None: return Null if param == None: return Null def _decode(v): output = [] i = 0 while i < len(v): c = v[i] if c == "%": d = hex2chr(v[i + 1:i + 3]) output.append(d) i += 3 else: output.append(c) i += 1 output = text_type("".join(output)) try: return json2value(output) except Exception: pass return output query = Data() for p in param.split('&'): if not p: continue if p.find("=") == -1: k = p v = True else: k, v = p.split("=") v = _decode(v) u = query.get(k) if u is None: query[k] = v elif is_list(u): u += [v] else: query[k] = [u, v] return query
CONVERT URL QUERY PARAMETERS INTO DICT
def value2url_param(value): if value == None: Log.error("Can not encode None into a URL") if is_data(value): value_ = wrap(value) output = "&".join([ value2url_param(k) + "=" + (value2url_param(v) if is_text(v) else value2url_param(value2json(v))) for k, v in value_.leaves() ]) elif is_text(value): output = "".join(_map2url[c] for c in value.encode('utf8')) elif is_binary(value): output = "".join(_map2url[c] for c in value) elif hasattr(value, "__iter__"): output = ",".join(value2url_param(v) for v in value) else: output = str(value) return output
:param value: :return: ascii URL
def configfile_from_path(path, strict=True): extension = path.split('.')[-1] conf_type = FILE_TYPES.get(extension) if not conf_type: raise exc.UnrecognizedFileExtension( "Cannot parse file of type {0}. Choices are {1}.".format( extension, FILE_TYPES.keys(), ) ) return conf_type(path=path, strict=strict)
Get a ConfigFile object based on a file path. This method will inspect the file extension and return the appropriate ConfigFile subclass initialized with the given path. Args: path (str): The file path which represents the configuration file. strict (bool): Whether or not to parse the file in strict mode. Returns: confpy.loaders.base.ConfigurationFile: The subclass which is specialized for the given file path. Raises: UnrecognizedFileExtension: If there is no loader for the path.
def configuration_from_paths(paths, strict=True): for path in paths: cfg = configfile_from_path(path, strict=strict).config return cfg
Get a Configuration object based on multiple file paths. Args: paths (iter of str): An iterable of file paths which identify config files on the system. strict (bool): Whether or not to parse the files in strict mode. Returns: confpy.core.config.Configuration: The loaded configuration object. Raises: NamespaceNotRegistered: If a file contains a namespace which is not defined. OptionNotRegistered: If a file contains an option which is not defined but resides under a valid namespace. UnrecognizedFileExtension: If there is no loader for a path.
def set_environment_var_options(config, env=None, prefix='CONFPY'): env = env or os.environ for section_name, section in config: for option_name, _ in section: var_name = '{0}_{1}_{2}'.format( prefix.upper(), section_name.upper(), option_name.upper(), ) env_var = env.get(var_name) if env_var: setattr(section, option_name, env_var) return config
Set any configuration options which have an environment var set. Args: config (confpy.core.config.Configuration): A configuration object which has been initialized with options. env (dict): Optional dictionary which contains environment variables. The default is os.environ if no value is given. prefix (str): The string prefix prepended to all environment variables. This value will be set to upper case. The default is CONFPY. Returns: confpy.core.config.Configuration: A configuration object with environment variables set. The pattern to follow when setting environment variables is: <PREFIX>_<SECTION>_<OPTION> Each value should be upper case and separated by underscores.
def set_cli_options(config, arguments=None): arguments = arguments or sys.argv[1:] parser = argparse.ArgumentParser() for section_name, section in config: for option_name, _ in section: var_name = '{0}_{1}'.format( section_name.lower(), option_name.lower(), ) parser.add_argument('--{0}'.format(var_name)) args, _ = parser.parse_known_args(arguments) args = vars(args) for section_name, section in config: for option_name, _ in section: var_name = '{0}_{1}'.format( section_name.lower(), option_name.lower(), ) value = args.get(var_name) if value: setattr(section, option_name, value) return config
Set any configuration options which have a CLI value set. Args: config (confpy.core.config.Configuration): A configuration object which has been initialized with options. arguments (iter of str): An iterable of strings which contains the CLI arguments passed. If nothing is give then sys.argv is used. Returns: confpy.core.config.Configuration: A configuration object with CLI values set. The pattern to follow when setting CLI values is: <section>_<option> Each value should be lower case and separated by underscores.
def check_for_missing_options(config): for section_name, section in config: for option_name, option in section: if option.required and option.value is None: raise exc.MissingRequiredOption( "Option {0} in namespace {1} is required.".format( option_name, section_name, ) ) return config
Iter over a config and raise if a required option is still not set. Args: config (confpy.core.config.Configuration): The configuration object to validate. Raises: MissingRequiredOption: If any required options are not set in the configuration object. Required options with default values are considered set and will not cause this function to raise.
def parse_options(files, env_prefix='CONFPY', strict=True): return check_for_missing_options( config=set_cli_options( config=set_environment_var_options( config=configuration_from_paths( paths=files, strict=strict, ), prefix=env_prefix, ), ) )
Parse configuration options and return a configuration object. Args: files (iter of str): File paths which identify configuration files. These files are processed in order with values in later files overwriting values in earlier files. env_prefix (str): The static prefix prepended to all options when set as environment variables. The default is CONFPY. strict (bool): Whether or not to parse the files in strict mode. Returns: confpy.core.config.Configuration: The loaded configuration object. Raises: MissingRequiredOption: If a required option is not defined in any file. NamespaceNotRegistered: If a file contains a namespace which is not defined. OptionNotRegistered: If a file contains an option which is not defined but resides under a valid namespace. UnrecognizedFileExtension: If there is no loader for a path.
def buy_product(self, product_pk): if self.invoice_sales.filter(line_invoice_sales__line_order__product__pk=product_pk).exists() \ or self.ticket_sales.filter(line_ticket_sales__line_order__product__pk=product_pk).exists(): return True else: return False
determina si el customer ha comprado un producto
def template(self): # First try props if self.props.template: return self.props.template else: # Return the wtype of the widget, and we'll presume that, # like resources, there's a .html file in that directory return self.wtype
Get the template from: YAML or class
def render(self, sphinx_app: Sphinx, context): # Called from kaybee.plugins.widgets.handlers.render_widgets builder: StandaloneHTMLBuilder = sphinx_app.builder resource = sphinx_app.env.resources[self.docname] context['sphinx_app'] = sphinx_app context['widget'] = self context['resource'] = resource # make_context is optionally implemented on the concrete class # for each widget self.make_context(context, sphinx_app) # NOTE: Can use builder.templates.render_string template = self.template + '.html' html = builder.templates.render(template, context) return html
Given a Sphinx builder and context with sphinx_app in it, generate HTML
def desc(t=None, reg=True): def decorated_fn(cls): if not inspect.isclass(cls): return NotImplemented('For now we can only describe classes') name = t or camel_case_to_underscore(cls.__name__)[0] if reg: di.injector.register(name, cls) else: di.injector.describe(name, cls) return cls return decorated_fn
Describe Class Dependency :param reg: should we register this class as well :param t: custom type as well :return:
def add(self, key, value=None): if value is None: value = 2 ** (len(self)) self[key] = value setattr(self, key, self[key]) return value
Adds the new key to this enumerated type. :param key | <str>
def all(self): out = 0 for key, value in self.items(): out |= value return out
Returns all the values joined together. :return <int>
def base(self, value, recurse=True): while value in self._bases: value = self._bases[value] if not recurse: break return value
Returns the root base for the given value from this enumeration. :param value | <variant> recurse | <bool>
def displayText(self, value, blank='', joiner=', '): if value is None: return '' labels = [] for key, my_value in sorted(self.items(), key=lambda x: x[1]): if value & my_value: labels.append(self._labels.get(my_value, text.pretty(key))) return joiner.join(labels) or blank
Returns the display text for the value associated with the inputted text. This will result in a comma separated list of labels for the value, or the blank text provided if no text is found. :param value | <variant> blank | <str> joiner | <str> :return <str>
def extend(self, base, key, value=None): new_val = self.add(key, value) self._bases[new_val] = base
Adds a new definition to this enumerated type, extending the given base type. This will create a new key for the type and register it as a new viable option from the system, however, it will also register its base information so you can use enum.base to retrieve the root type. :param base | <variant> | value for this enumeration key | <str> | new key for the value value | <variant> | if None is supplied, it will be auto-assigned :usage |>>> from projex.enum import enum |>>> Types = enum('Integer', 'Boolean') |>>> Types.Integer |1 |>>> Types.Boolean |2 |>>> Types.extend(Types.Integer, 'BigInteger') |>>> Types.BigInteger |4 |>>> Types.base(Types.BigInteger) |1
def fromSet(self, values): value = 0 for flag in values: value |= self(flag) return value
Generates a flag value based on the given set of values. :param values: <set> :return: <int>
def label(self, value): return self._labels.get(value) or text.pretty(self(value))
Returns a pretty text version of the key for the inputted value. :param value | <variant> :return <str>
def labels(self): return [self._labels.get(value) or text.pretty(key) for key, value in sorted(self.items(), key=lambda x: x[1])]
Return a list of "user friendly" labels. :return <list> [ <str>, .. ]
def setLabel(self, value, label): if label: self._labels[value] = label else: self._labels.pop(value, None)
Sets the label text for the inputted value. This will override the default pretty text label that is used for the key. :param value | <variant> label | <str>
def text(self, value, default=''): for key, val in self.items(): if val == value: return key return default
Returns the text for the inputted value. :return <str>
def toSet(self, flags): return {key for key, value in self.items() if value & flags}
Generates a flag value based on the given set of values. :param values: <set> :return: <int>
def valueByLabel(self, label): keys = self.keys() labels = [text.pretty(key) for key in keys] if label in labels: return self[keys[labels.index(label)]] return 0
Determine a given value based on the inputted label. :param label <str> :return <int>
def cli(ctx, name,all): ctx.gbc.say('all_example_functions',stuff=all_examples_functions, verbosity=1000) for example in all_examples_functions: if all or (name and example['name'] == name): if all: ctx.gbc.say('example',stuff=example, verbosity=100) name = example['name'] #click.echo_via_pager(example['fun']()) click.echo("#"*80) click.echo("### start of bubble example: "+name) click.echo("#"*80) click.echo(example['fun']()) click.echo("#"*80) click.echo("### end of bubble example: "+name) click.echo("#"*80) click.echo() else: click.echo("available example: " + example['name'])
Show example for doing some task in bubble(experimental)
def check_if_alive(self): try: from urllib2 import urlopen, URLError, HTTPError except ImportError: from urllib.request import urlopen, URLError, HTTPError if len(self.instance.STATUS_LINK): check_url = self.instance.STATUS_LINK % ({'content_uid': self.get_content_uid()}) else: # fallback check_url = self.instance.url try: response = urlopen(check_url) except (HTTPError, URLError): return False except ValueError: raise URLError('Invalid URL: %s'.format(check_url)) else: return True if response.code == 200 else False
Check if the content is available on the host server. Returns `True` if available, else `False`. This method is `lazy`-evaluated or only executes when called. :rtype: bool
def load_config_file(self): config_parser = SafeConfigParser() config_parser.read(self.CONFIG_FILE) if config_parser.has_section('handlers'): self._config['handlers_package'] = config_parser.get('handlers', 'package') if config_parser.has_section('auth'): self._config['consumer_key'] = config_parser.get('auth', 'consumer_key') self._config['consumer_secret'] = config_parser.get('auth', 'consumer_secret') self._config['token_key'] = config_parser.get('auth', 'token_key') self._config['token_secret'] = config_parser.get('auth', 'token_secret') if config_parser.has_section('stream'): self._config['user_stream'] = config_parser.get('stream', 'user_stream').lower() == 'true' else: self._config['user_stream'] = False if config_parser.has_option('general', 'min_seconds_between_errors'): self._config['min_seconds_between_errors'] = config_parser.get('general', 'min_seconds_between_errors') if config_parser.has_option('general', 'sleep_seconds_on_consecutive_errors'): self._config['sleep_seconds_on_consecutive_errors'] = config_parser.get( 'general', 'sleep_seconds_on_consecutive_errors')
Parse configuration file and get config values.
def load_config_from_cli_arguments(self, *args, **kwargs): self._load_config_from_cli_argument(key='handlers_package', **kwargs) self._load_config_from_cli_argument(key='auth', **kwargs) self._load_config_from_cli_argument(key='user_stream', **kwargs) self._load_config_from_cli_argument(key='min_seconds_between_errors', **kwargs) self._load_config_from_cli_argument(key='sleep_seconds_on_consecutive_errors', **kwargs)
Get config values of passed in CLI options. :param dict kwargs: CLI options
def validate_configs(self): # Check required arguments, validate values for conf in self.REQUIRED_CONFIGS: if conf not in self._config: raise MissingConfigError('Missing required configuration %s' % conf)
Check that required config are set. :raises :class:`~responsebot.common.exceptions.MissingConfigError`: if a required config is missing
def get(self, id): data = self.db.get_data(self.get_path, id=id) return self._build_item(**data['Data'][self.name])
Gets the dict data and builds the item object.
def save(self, entity): assert isinstance(entity, Entity), " entity must have an instance of Entity" return self.__collection.save(entity.as_dict())
Maps entity to dict and returns future
def find_one(self, **kwargs): future = TracebackFuture() def handle_response(result, error): if error: future.set_exception(error) else: instance = self.__entity() instance.map_dict(result) future.set_result(instance) self.__collection.find_one(kwargs, callback=handle_response) return future
Returns future. Executes collection's find_one method based on keyword args maps result ( dict to instance ) and return future Example:: manager = EntityManager(Product) product_saved = yield manager.find_one(_id=object_id)
def find(self, **kwargs): max_limit = None if 'max_limit' in kwargs: max_limit = kwargs.pop('max_limit') cursor = self.__collection.find(kwargs) instances = [] for doc in (yield cursor.to_list(max_limit)): instance = self.__entity() instance.map_dict(doc) instances.append(instance) return instances
Returns List(typeof=). Executes collection's find method based on keyword args maps results ( dict to list of entity instances). Set max_limit parameter to limit the amount of data send back through network Example:: manager = EntityManager(Product) products = yield manager.find(age={'$gt': 17}, max_limit=100)
def update(self, entity): assert isinstance(entity, Entity), "Error: entity must have an instance of Entity" return self.__collection.update({'_id': entity._id}, {'$set': entity.as_dict()})
Executes collection's update method based on keyword args. Example:: manager = EntityManager(Product) p = Product() p.name = 'new name' p.description = 'new description' p.price = 300.0 yield manager.update(p)
async def get_poll(poll_id): async with aiohttp.get('{api_url}/{poll_id}'.format(api_url=api_url, poll_id=poll_id)) as r: return await StrawPoll(r)
Get a strawpoll. Example: poll = strawpy.get_poll('11682852') :param poll_id: :return: strawpy.Strawpoll object
async def create_poll(title, options, multi=True, permissive=True, captcha=False, dupcheck='normal'): query = { 'title': title, 'options': options, 'multi': multi, 'permissive': permissive, 'captcha': captcha, 'dupcheck': dupcheck } async with aiohttp.post(api_url, data=json.dumps(query)) as r: return await StrawPoll(r)
Create a strawpoll. Example: new_poll = strawpy.create_poll('Is Python the best?', ['Yes', 'No']) :param title: :param options: :param multi: :param permissive: :param captcha: :param dupcheck: :return: strawpy.Strawpoll object
def raise_status(response): if response.status != 200: if response.status == 401: raise StrawPollException('Unauthorized', response) elif response.status == 403: raise StrawPollException('Forbidden', response) elif response.status == 404: raise StrawPollException('Not Found', response) else: response.raise_for_status()
Raise an exception if the request did not return a status code of 200. :param response: Request response body
def results_with_percent(self): percents = [int(float(v) / sum(self.votes) * 100) if sum(self.votes) > 0 else 0 for v in self.votes] return zip(self.options, self.votes, percents)
Zip options, votes and percents (as integers) together. :return: List of tuples (option, votes, percent)
def open(self, results=False): webbrowser.open(self.results_url if results else self.url)
Open the strawpoll in a browser. Can specify to open the main or results page. :param results: True/False
def main(): argv = sys.argv if len(argv) < 2: targetfile = 'target.y' else: targetfile = argv[1] print 'Parsing ruleset: ' + targetfile, flex_a = Flexparser() mma = flex_a.yyparse(targetfile) print 'OK' print 'Perform minimization on initial automaton:', mma.minimize() print 'OK' print 'Perform Brzozowski on minimal automaton:', brzozowski_a = Brzozowski(mma) mma_regex = brzozowski_a.get_regex() print mma_regex
Testing function for DFA brzozowski algebraic method Operation
def _bfs_sort(self, start): pathstates = {} # maintain a queue of nodes to be visited. Both current and previous # node must be included. queue = [] # push the first path into the queue queue.append([0, start]) pathstates[start.stateid] = 0 while queue: # get the first node from the queue leaf = queue.pop(0) node = leaf[1] pathlen = leaf[0] # enumerate all adjacent nodes, construct a new path and push it # into the queue for arc in node.arcs: next_state = self.mma[arc.nextstate] if next_state.stateid not in pathstates: queue.append([pathlen + 1, next_state]) pathstates[next_state.stateid] = pathlen + 1 orderedstatesdict = OrderedDict( sorted( pathstates.items(), key=lambda x: x[1], reverse=False)) for state in self.mma.states: orderedstatesdict[state.stateid] = state orderedstates = [x[1] for x in list(orderedstatesdict.items())] return orderedstates
maintain a map of states distance using BFS Args: start (fst state): The initial DFA state Returns: list: An ordered list of DFA states using path distance
def star(self, input_string): if input_string != self.epsilon and input_string != self.empty: return "(" + input_string + ")*" else: return ""
Kleene star operation Args: input_string (str): The string that the kleene star will be made Returns: str: The applied Kleene star operation on the input string
def _brzozowski_algebraic_method_init(self): # Initialize B for state_a in self.mma.states: if state_a.final: self.B[state_a.stateid] = self.epsilon else: self.B[state_a.stateid] = self.empty # Initialize A for state_b in self.mma.states: self.A[state_a.stateid, state_b.stateid] = self.empty for arc in state_a.arcs: if arc.nextstate == state_b.stateid: self.A[state_a.stateid, state_b.stateid] = \ self.mma.isyms.find(arc.ilabel)
Initialize Brzozowski Algebraic Method
def load_mmd(): global _MMD_LIB global _LIB_LOCATION try: lib_file = 'libMultiMarkdown' + SHLIB_EXT[platform.system()] _LIB_LOCATION = os.path.abspath(os.path.join(DEFAULT_LIBRARY_DIR, lib_file)) if not os.path.isfile(_LIB_LOCATION): _LIB_LOCATION = ctypes.util.find_library('MultiMarkdown') _MMD_LIB = ctypes.cdll.LoadLibrary(_LIB_LOCATION) except: _MMD_LIB = None
Loads libMultiMarkdown for usage
def _expand_source(source, dname, fmt): _MMD_LIB.g_string_new.restype = ctypes.POINTER(GString) _MMD_LIB.g_string_new.argtypes = [ctypes.c_char_p] src = source.encode('utf-8') gstr = _MMD_LIB.g_string_new(src) _MMD_LIB.prepend_mmd_header(gstr) _MMD_LIB.append_mmd_footer(gstr) manif = _MMD_LIB.g_string_new(b"") _MMD_LIB.transclude_source.argtypes = [ctypes.POINTER(GString), ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(GString)] _MMD_LIB.transclude_source(gstr, dname.encode('utf-8'), None, fmt, manif) manifest_txt = manif.contents.str full_txt = gstr.contents.str _MMD_LIB.g_string_free(manif, True) _MMD_LIB.g_string_free(gstr, True) manifest_txt = [ii for ii in manifest_txt.decode('utf-8').split('\n') if ii] return full_txt.decode('utf-8'), manifest_txt
Expands source text to include headers, footers, and expands Multimarkdown transclusion directives. Keyword arguments: source -- string containing the Multimarkdown text to expand dname -- directory name to use as the base directory for transclusion references fmt -- format flag indicating which format to use to convert transclusion statements
def has_metadata(source, ext): _MMD_LIB.has_metadata.argtypes = [ctypes.c_char_p, ctypes.c_int] _MMD_LIB.has_metadata.restype = ctypes.c_bool return _MMD_LIB.has_metadata(source.encode('utf-8'), ext)
Returns a flag indicating if a given block of MultiMarkdown text contains metadata.
def convert(source, ext=COMPLETE, fmt=HTML, dname=None): if dname and not ext & COMPATIBILITY: if os.path.isfile(dname): dname = os.path.abspath(os.path.dirname(dname)) source, _ = _expand_source(source, dname, fmt) _MMD_LIB.markdown_to_string.argtypes = [ctypes.c_char_p, ctypes.c_ulong, ctypes.c_int] _MMD_LIB.markdown_to_string.restype = ctypes.c_char_p src = source.encode('utf-8') return _MMD_LIB.markdown_to_string(src, ext, fmt).decode('utf-8')
Converts a string of MultiMarkdown text to the requested format. Transclusion is performed if the COMPATIBILITY extension is not set, and dname is set to a valid directory Keyword arguments: source -- string containing MultiMarkdown text ext -- extension bitfield to pass to conversion process fmt -- flag indicating output format to use dname -- Path to use for transclusion - if None, transclusion functionality is bypassed
def convert_from(fname, ext=COMPLETE, fmt=HTML): dname = os.path.abspath(os.path.dirname(fname)) with open(fname, 'r') as fp: src = fp.read() return convert(src, ext, fmt, dname)
Reads in a file and performs MultiMarkdown conversion, with transclusion ocurring based on the file directory. Returns the converted string. Keyword arguments: fname -- Filename of document to convert ext -- extension bitfield to pass to conversion process fmt -- flag indicating output format to use
def manifest(txt, dname): _, files = _expand_source(txt, dname, HTML) return files
Extracts file manifest for a body of text with the given directory.
def keys(source, ext=COMPLETE): _MMD_LIB.extract_metadata_keys.restype = ctypes.c_char_p _MMD_LIB.extract_metadata_keys.argtypes = [ctypes.c_char_p, ctypes.c_ulong] src = source.encode('utf-8') all_keys = _MMD_LIB.extract_metadata_keys(src, ext) all_keys = all_keys.decode('utf-8') if all_keys else '' key_list = [ii for ii in all_keys.split('\n') if ii] return key_list
Extracts metadata keys from the provided MultiMarkdown text. Keyword arguments: source -- string containing MultiMarkdown text ext -- extension bitfield for extracting MultiMarkdown
def value(source, key, ext=COMPLETE): _MMD_LIB.extract_metadata_value.restype = ctypes.c_char_p _MMD_LIB.extract_metadata_value.argtypes = [ctypes.c_char_p, ctypes.c_ulong, ctypes.c_char_p] src = source.encode('utf-8') dkey = key.encode('utf-8') value = _MMD_LIB.extract_metadata_value(src, ext, dkey) return value.decode('utf-8') if value else ''
Extracts value for the specified metadata key from the given extension set. Keyword arguments: source -- string containing MultiMarkdown text ext -- extension bitfield for processing text key -- key to extract
def tweet(self, text, in_reply_to=None, filename=None, file=None): if filename is None: return Tweet(self._client.update_status(status=text, in_reply_to_status_id=in_reply_to)._json) else: return Tweet(self._client.update_with_media(filename=filename, file=file, status=text, in_reply_to_status_id=in_reply_to)._json)
Post a new tweet. :param text: the text to post :param in_reply_to: The ID of the tweet to reply to :param filename: If `file` param is not provided, read file from this path :param file: A file object, which will be used instead of opening `filename`. `filename` is still required, for MIME type detection and to use as a form field in the POST data :return: Tweet object
def retweet(self, id): try: self._client.retweet(id=id) return True except TweepError as e: if e.api_code == TWITTER_PAGE_DOES_NOT_EXISTS_ERROR: return False raise
Retweet a tweet. :param id: ID of the tweet in question :return: True if success, False otherwise
def get_tweet(self, id): try: return Tweet(self._client.get_status(id=id)._json) except TweepError as e: if e.api_code == TWITTER_TWEET_NOT_FOUND_ERROR: return None raise
Get an existing tweet. :param id: ID of the tweet in question :return: Tweet object. None if not found
def get_user(self, id): try: return User(self._client.get_user(user_id=id)._json) except TweepError as e: if e.api_code == TWITTER_USER_NOT_FOUND_ERROR: return None raise
Get a user's info. :param id: ID of the user in question :return: User object. None if not found
def remove_tweet(self, id): try: self._client.destroy_status(id=id) return True except TweepError as e: if e.api_code in [TWITTER_PAGE_DOES_NOT_EXISTS_ERROR, TWITTER_DELETE_OTHER_USER_TWEET]: return False raise
Delete a tweet. :param id: ID of the tweet in question :return: True if success, False otherwise
def follow(self, user_id, notify=False): try: return User(self._client.create_friendship(user_id=user_id, follow=notify)._json) except TweepError as e: if e.api_code in [TWITTER_ACCOUNT_SUSPENDED_ERROR]: return self.get_user(user_id) raise
Follow a user. :param user_id: ID of the user in question :param notify: whether to notify the user about the following :return: user that are followed
def unfollow(self, user_id): return User(self._client.destroy_friendship(user_id=user_id)._json)
Follow a user. :param user_id: ID of the user in question :return: The user that were unfollowed
def create_list(self, name, mode='public', description=None): return List(tweepy_list_to_json(self._client.create_list(name=name, mode=mode, description=description)))
Create a list :param name: Name of the new list :param mode: :code:`'public'` (default) or :code:`'private'` :param description: Description of the new list :return: The new list object :rtype: :class:`~responsebot.models.List`
def destroy_list(self, list_id): return List(tweepy_list_to_json(self._client.destroy_list(list_id=list_id)))
Destroy a list :param list_id: list ID number :return: The destroyed list object :rtype: :class:`~responsebot.models.List`
def update_list(self, list_id, name=None, mode=None, description=None): return List(tweepy_list_to_json( self._client.update_list(list_id=list_id, name=name, mode=mode, description=description)) )
Update a list :param list_id: list ID number :param name: New name for the list :param mode: :code:`'public'` (default) or :code:`'private'` :param description: New description of the list :return: The updated list object :rtype: :class:`~responsebot.models.List`
def list_timeline(self, list_id, since_id=None, max_id=None, count=20): statuses = self._client.list_timeline(list_id=list_id, since_id=since_id, max_id=max_id, count=count) return [Tweet(tweet._json) for tweet in statuses]
List the tweets of specified list. :param list_id: list ID number :param since_id: results will have ID greater than specified ID (more recent than) :param max_id: results will have ID less than specified ID (older than) :param count: number of results per page :return: list of :class:`~responsebot.models.Tweet` objects
def get_list(self, list_id): return List(tweepy_list_to_json(self._client.get_list(list_id=list_id)))
Get info of specified list :param list_id: list ID number :return: :class:`~responsebot.models.List` object
def add_list_member(self, list_id, user_id): return List(tweepy_list_to_json(self._client.add_list_member(list_id=list_id, user_id=user_id)))
Add a user to list :param list_id: list ID number :param user_id: user ID number :return: :class:`~responsebot.models.List` object
def remove_list_member(self, list_id, user_id): return List(tweepy_list_to_json(self._client.remove_list_member(list_id=list_id, user_id=user_id)))
Remove a user from a list :param list_id: list ID number :param user_id: user ID number :return: :class:`~responsebot.models.List` object